From da26716a7d28f4830fa877597fb9c0e2d8cbc73c Mon Sep 17 00:00:00 2001 From: Tamal Saha Date: Wed, 15 Jan 2020 04:49:10 -0800 Subject: [PATCH] Use sigs.k8s.io/sig-storage-lib-external-provisioner - Uses PV controller from new library location - Included fix for k8s 1.11 ref:https://github.com/kubernetes-sigs/sig-storage-lib-external-provisioner/pull/60 Signed-off-by: Tamal Saha --- go.mod | 21 +- go.sum | 141 +- main.go | 2 +- provisioner.go | 6 +- vendor/github.com/ghodss/yaml/.travis.yml | 7 - vendor/github.com/golang/glog/README | 44 - vendor/github.com/google/btree/.travis.yml | 1 - vendor/github.com/google/btree/README.md | 12 - vendor/github.com/google/btree/btree.go | 890 - vendor/github.com/google/btree/btree_mem.go | 76 - .../{pborman/uuid => google/go-cmp}/LICENSE | 2 +- .../github.com/google/go-cmp/cmp/compare.go | 616 + .../google/go-cmp/cmp/export_panic.go | 15 + .../google/go-cmp/cmp/export_unsafe.go | 23 + .../go-cmp/cmp/internal/diff/debug_disable.go | 17 + .../go-cmp/cmp/internal/diff/debug_enable.go | 122 + .../google/go-cmp/cmp/internal/diff/diff.go | 372 + .../google/go-cmp/cmp/internal/flags/flags.go | 9 + .../cmp/internal/flags/toolchain_legacy.go | 10 + .../cmp/internal/flags/toolchain_recent.go | 10 + .../go-cmp/cmp/internal/function/func.go | 99 + .../cmp/internal/value/pointer_purego.go | 23 + .../cmp/internal/value/pointer_unsafe.go | 26 + .../google/go-cmp/cmp/internal/value/sort.go | 104 + .../google/go-cmp/cmp/internal/value/zero.go | 45 + .../github.com/google/go-cmp/cmp/options.go | 524 + vendor/github.com/google/go-cmp/cmp/path.go | 308 + vendor/github.com/google/go-cmp/cmp/report.go | 51 + .../google/go-cmp/cmp/report_compare.go | 296 + .../google/go-cmp/cmp/report_reflect.go | 279 + .../google/go-cmp/cmp/report_slices.go | 333 + .../google/go-cmp/cmp/report_text.go | 382 + .../google/go-cmp/cmp/report_value.go | 121 + vendor/github.com/google/uuid/go.mod | 1 + vendor/github.com/google/uuid/node.go | 1 + vendor/github.com/google/uuid/uuid.go | 73 +- .../gregjones/httpcache/.travis.yml | 18 - .../gregjones/httpcache/LICENSE.txt | 7 - .../github.com/gregjones/httpcache/README.md | 29 - .../httpcache/diskcache/diskcache.go | 61 - .../gregjones/httpcache/httpcache.go | 551 - vendor/github.com/json-iterator/go/iter.go | 27 + .../github.com/json-iterator/go/iter_array.go | 10 +- .../json-iterator/go/iter_object.go | 24 +- .../json-iterator/go/iter_skip_sloppy.go | 19 + vendor/github.com/json-iterator/go/reflect.go | 5 + .../json-iterator/go/reflect_extension.go | 2 +- .../json-iterator/go/reflect_map.go | 4 + .../json-iterator/go/reflect_marshaler.go | 12 +- .../go/reflect_struct_decoder.go | 44 + .../external-storage/lib/util/util.go | 67 - vendor/github.com/miekg/dns/.codecov.yml | 8 + vendor/github.com/miekg/dns/.gitignore | 4 + vendor/github.com/miekg/dns/.travis.yml | 17 + vendor/github.com/miekg/dns/AUTHORS | 1 + vendor/github.com/miekg/dns/CODEOWNERS | 1 + vendor/github.com/miekg/dns/CONTRIBUTORS | 10 + vendor/github.com/miekg/dns/COPYRIGHT | 9 + vendor/github.com/miekg/dns/LICENSE | 30 + vendor/github.com/miekg/dns/Makefile.fuzz | 33 + vendor/github.com/miekg/dns/Makefile.release | 52 + vendor/github.com/miekg/dns/README.md | 175 + vendor/github.com/miekg/dns/acceptfunc.go | 61 + vendor/github.com/miekg/dns/client.go | 415 + vendor/github.com/miekg/dns/clientconfig.go | 135 + vendor/github.com/miekg/dns/dane.go | 43 + vendor/github.com/miekg/dns/defaults.go | 378 + vendor/github.com/miekg/dns/dns.go | 134 + vendor/github.com/miekg/dns/dnssec.go | 794 + vendor/github.com/miekg/dns/dnssec_keygen.go | 140 + vendor/github.com/miekg/dns/dnssec_keyscan.go | 322 + vendor/github.com/miekg/dns/dnssec_privkey.go | 94 + vendor/github.com/miekg/dns/doc.go | 268 + vendor/github.com/miekg/dns/duplicate.go | 38 + vendor/github.com/miekg/dns/edns.go | 675 + vendor/github.com/miekg/dns/format.go | 93 + vendor/github.com/miekg/dns/fuzz.go | 32 + vendor/github.com/miekg/dns/generate.go | 247 + vendor/github.com/miekg/dns/go.mod | 11 + vendor/github.com/miekg/dns/go.sum | 39 + vendor/github.com/miekg/dns/labels.go | 212 + vendor/github.com/miekg/dns/listen_go111.go | 44 + .../github.com/miekg/dns/listen_go_not111.go | 23 + vendor/github.com/miekg/dns/msg.go | 1196 + vendor/github.com/miekg/dns/msg_helpers.go | 810 + vendor/github.com/miekg/dns/msg_truncate.go | 111 + vendor/github.com/miekg/dns/nsecx.go | 95 + vendor/github.com/miekg/dns/privaterr.go | 114 + vendor/github.com/miekg/dns/reverse.go | 52 + vendor/github.com/miekg/dns/sanitize.go | 86 + vendor/github.com/miekg/dns/scan.go | 1408 + vendor/github.com/miekg/dns/scan_rr.go | 1764 + vendor/github.com/miekg/dns/serve_mux.go | 123 + vendor/github.com/miekg/dns/server.go | 764 + vendor/github.com/miekg/dns/sig0.go | 209 + vendor/github.com/miekg/dns/singleinflight.go | 61 + vendor/github.com/miekg/dns/smimea.go | 44 + vendor/github.com/miekg/dns/tlsa.go | 44 + vendor/github.com/miekg/dns/tsig.go | 389 + vendor/github.com/miekg/dns/types.go | 1527 + vendor/github.com/miekg/dns/udp.go | 102 + vendor/github.com/miekg/dns/udp_windows.go | 35 + vendor/github.com/miekg/dns/update.go | 110 + vendor/github.com/miekg/dns/version.go | 15 + vendor/github.com/miekg/dns/xfr.go | 266 + vendor/github.com/miekg/dns/zduplicate.go | 1157 + vendor/github.com/miekg/dns/zmsg.go | 2741 + vendor/github.com/miekg/dns/ztypes.go | 898 + vendor/github.com/pborman/uuid/.travis.yml | 10 - .../github.com/pborman/uuid/CONTRIBUTING.md | 10 - vendor/github.com/pborman/uuid/CONTRIBUTORS | 1 - vendor/github.com/pborman/uuid/README.md | 15 - vendor/github.com/pborman/uuid/dce.go | 84 - vendor/github.com/pborman/uuid/doc.go | 13 - vendor/github.com/pborman/uuid/go.mod | 3 - vendor/github.com/pborman/uuid/go.sum | 2 - vendor/github.com/pborman/uuid/hash.go | 53 - vendor/github.com/pborman/uuid/marshal.go | 85 - vendor/github.com/pborman/uuid/node.go | 50 - vendor/github.com/pborman/uuid/sql.go | 68 - vendor/github.com/pborman/uuid/time.go | 57 - vendor/github.com/pborman/uuid/util.go | 32 - vendor/github.com/pborman/uuid/uuid.go | 162 - vendor/github.com/pborman/uuid/version1.go | 23 - vendor/github.com/pborman/uuid/version4.go | 26 - vendor/github.com/peterbourgon/diskv/LICENSE | 19 - .../github.com/peterbourgon/diskv/README.md | 141 - .../peterbourgon/diskv/compression.go | 64 - vendor/github.com/peterbourgon/diskv/diskv.go | 624 - vendor/github.com/peterbourgon/diskv/index.go | 115 - vendor/golang.org/x/crypto/ed25519/ed25519.go | 222 + .../x/crypto/ed25519/ed25519_go113.go | 73 + .../ed25519/internal/edwards25519/const.go | 1422 + .../internal/edwards25519/edwards25519.go | 1793 + .../x/crypto/ssh/terminal/terminal.go | 67 +- vendor/golang.org/x/net/bpf/asm.go | 41 + vendor/golang.org/x/net/bpf/constants.go | 222 + vendor/golang.org/x/net/bpf/doc.go | 82 + vendor/golang.org/x/net/bpf/instructions.go | 726 + vendor/golang.org/x/net/bpf/setter.go | 10 + vendor/golang.org/x/net/bpf/vm.go | 150 + .../golang.org/x/net/bpf/vm_instructions.go | 182 + vendor/golang.org/x/net/http2/server.go | 46 +- vendor/golang.org/x/net/http2/transport.go | 2 +- vendor/golang.org/x/net/http2/writesched.go | 8 +- .../x/net/http2/writesched_random.go | 9 +- .../golang.org/x/net/internal/iana/const.go | 223 + .../x/net/internal/socket/cmsghdr.go | 11 + .../x/net/internal/socket/cmsghdr_bsd.go | 13 + .../internal/socket/cmsghdr_linux_32bit.go | 14 + .../internal/socket/cmsghdr_linux_64bit.go | 14 + .../internal/socket/cmsghdr_solaris_64bit.go | 14 + .../x/net/internal/socket/cmsghdr_stub.go | 17 + .../golang.org/x/net/internal/socket/empty.s | 7 + .../x/net/internal/socket/error_unix.go | 31 + .../x/net/internal/socket/error_windows.go | 26 + .../x/net/internal/socket/iovec_32bit.go | 19 + .../x/net/internal/socket/iovec_64bit.go | 19 + .../internal/socket/iovec_solaris_64bit.go | 19 + .../x/net/internal/socket/iovec_stub.go | 11 + .../x/net/internal/socket/mmsghdr_stub.go | 21 + .../x/net/internal/socket/mmsghdr_unix.go | 42 + .../x/net/internal/socket/msghdr_bsd.go | 39 + .../x/net/internal/socket/msghdr_bsdvar.go | 16 + .../x/net/internal/socket/msghdr_linux.go | 36 + .../net/internal/socket/msghdr_linux_32bit.go | 24 + .../net/internal/socket/msghdr_linux_64bit.go | 24 + .../x/net/internal/socket/msghdr_openbsd.go | 14 + .../internal/socket/msghdr_solaris_64bit.go | 36 + .../x/net/internal/socket/msghdr_stub.go | 14 + .../x/net/internal/socket/rawconn.go | 64 + .../x/net/internal/socket/rawconn_mmsg.go | 73 + .../x/net/internal/socket/rawconn_msg.go | 76 + .../x/net/internal/socket/rawconn_nommsg.go | 15 + .../x/net/internal/socket/rawconn_nomsg.go | 15 + .../x/net/internal/socket/socket.go | 288 + .../golang.org/x/net/internal/socket/sys.go | 33 + .../x/net/internal/socket/sys_bsd.go | 15 + .../x/net/internal/socket/sys_bsdvar.go | 23 + .../x/net/internal/socket/sys_const_unix.go | 17 + .../x/net/internal/socket/sys_darwin.go | 7 + .../x/net/internal/socket/sys_dragonfly.go | 7 + .../net/internal/socket/sys_go1_11_darwin.go | 33 + .../x/net/internal/socket/sys_linkname.go | 42 + .../x/net/internal/socket/sys_linux.go | 27 + .../x/net/internal/socket/sys_linux_386.go | 55 + .../x/net/internal/socket/sys_linux_386.s | 11 + .../x/net/internal/socket/sys_linux_amd64.go | 10 + .../x/net/internal/socket/sys_linux_arm.go | 10 + .../x/net/internal/socket/sys_linux_arm64.go | 10 + .../x/net/internal/socket/sys_linux_mips.go | 10 + .../x/net/internal/socket/sys_linux_mips64.go | 10 + .../net/internal/socket/sys_linux_mips64le.go | 10 + .../x/net/internal/socket/sys_linux_mipsle.go | 10 + .../x/net/internal/socket/sys_linux_ppc64.go | 10 + .../net/internal/socket/sys_linux_ppc64le.go | 10 + .../net/internal/socket/sys_linux_riscv64.go | 12 + .../x/net/internal/socket/sys_linux_s390x.go | 55 + .../x/net/internal/socket/sys_linux_s390x.s | 11 + .../x/net/internal/socket/sys_netbsd.go | 25 + .../x/net/internal/socket/sys_posix.go | 183 + .../x/net/internal/socket/sys_solaris.go | 70 + .../x/net/internal/socket/sys_solaris_amd64.s | 11 + .../x/net/internal/socket/sys_stub.go | 63 + .../x/net/internal/socket/sys_unix.go | 33 + .../x/net/internal/socket/sys_windows.go | 71 + .../x/net/internal/socket/zsys_aix_ppc64.go | 61 + .../x/net/internal/socket/zsys_darwin_386.go | 51 + .../net/internal/socket/zsys_darwin_amd64.go | 53 + .../x/net/internal/socket/zsys_darwin_arm.go | 51 + .../net/internal/socket/zsys_darwin_arm64.go | 53 + .../internal/socket/zsys_dragonfly_amd64.go | 53 + .../x/net/internal/socket/zsys_freebsd_386.go | 51 + .../net/internal/socket/zsys_freebsd_amd64.go | 53 + .../x/net/internal/socket/zsys_freebsd_arm.go | 51 + .../net/internal/socket/zsys_freebsd_arm64.go | 53 + .../x/net/internal/socket/zsys_linux_386.go | 55 + .../x/net/internal/socket/zsys_linux_amd64.go | 58 + .../x/net/internal/socket/zsys_linux_arm.go | 55 + .../x/net/internal/socket/zsys_linux_arm64.go | 58 + .../x/net/internal/socket/zsys_linux_mips.go | 55 + .../net/internal/socket/zsys_linux_mips64.go | 58 + .../internal/socket/zsys_linux_mips64le.go | 58 + .../net/internal/socket/zsys_linux_mipsle.go | 55 + .../x/net/internal/socket/zsys_linux_ppc64.go | 58 + .../net/internal/socket/zsys_linux_ppc64le.go | 58 + .../net/internal/socket/zsys_linux_riscv64.go | 59 + .../x/net/internal/socket/zsys_linux_s390x.go | 58 + .../x/net/internal/socket/zsys_netbsd_386.go | 57 + .../net/internal/socket/zsys_netbsd_amd64.go | 60 + .../x/net/internal/socket/zsys_netbsd_arm.go | 57 + .../net/internal/socket/zsys_netbsd_arm64.go | 60 + .../x/net/internal/socket/zsys_openbsd_386.go | 51 + .../net/internal/socket/zsys_openbsd_amd64.go | 53 + .../x/net/internal/socket/zsys_openbsd_arm.go | 51 + .../net/internal/socket/zsys_openbsd_arm64.go | 53 + .../net/internal/socket/zsys_solaris_amd64.go | 52 + vendor/golang.org/x/net/ipv4/batch.go | 194 + vendor/golang.org/x/net/ipv4/control.go | 144 + vendor/golang.org/x/net/ipv4/control_bsd.go | 40 + .../golang.org/x/net/ipv4/control_pktinfo.go | 39 + vendor/golang.org/x/net/ipv4/control_stub.go | 13 + vendor/golang.org/x/net/ipv4/control_unix.go | 73 + .../golang.org/x/net/ipv4/control_windows.go | 12 + vendor/golang.org/x/net/ipv4/dgramopt.go | 264 + vendor/golang.org/x/net/ipv4/doc.go | 244 + vendor/golang.org/x/net/ipv4/endpoint.go | 186 + vendor/golang.org/x/net/ipv4/genericopt.go | 55 + vendor/golang.org/x/net/ipv4/header.go | 173 + vendor/golang.org/x/net/ipv4/helper.go | 80 + vendor/golang.org/x/net/ipv4/iana.go | 38 + vendor/golang.org/x/net/ipv4/icmp.go | 57 + vendor/golang.org/x/net/ipv4/icmp_linux.go | 25 + vendor/golang.org/x/net/ipv4/icmp_stub.go | 25 + vendor/golang.org/x/net/ipv4/packet.go | 117 + vendor/golang.org/x/net/ipv4/payload.go | 23 + vendor/golang.org/x/net/ipv4/payload_cmsg.go | 84 + .../golang.org/x/net/ipv4/payload_nocmsg.go | 39 + vendor/golang.org/x/net/ipv4/sockopt.go | 44 + vendor/golang.org/x/net/ipv4/sockopt_posix.go | 71 + vendor/golang.org/x/net/ipv4/sockopt_stub.go | 42 + vendor/golang.org/x/net/ipv4/sys_aix.go | 38 + vendor/golang.org/x/net/ipv4/sys_asmreq.go | 119 + .../golang.org/x/net/ipv4/sys_asmreq_stub.go | 25 + vendor/golang.org/x/net/ipv4/sys_asmreqn.go | 42 + .../golang.org/x/net/ipv4/sys_asmreqn_stub.go | 21 + vendor/golang.org/x/net/ipv4/sys_bpf.go | 23 + vendor/golang.org/x/net/ipv4/sys_bpf_stub.go | 16 + vendor/golang.org/x/net/ipv4/sys_bsd.go | 37 + vendor/golang.org/x/net/ipv4/sys_darwin.go | 65 + vendor/golang.org/x/net/ipv4/sys_dragonfly.go | 35 + vendor/golang.org/x/net/ipv4/sys_freebsd.go | 76 + vendor/golang.org/x/net/ipv4/sys_linux.go | 59 + vendor/golang.org/x/net/ipv4/sys_solaris.go | 57 + vendor/golang.org/x/net/ipv4/sys_ssmreq.go | 52 + .../golang.org/x/net/ipv4/sys_ssmreq_stub.go | 21 + vendor/golang.org/x/net/ipv4/sys_stub.go | 13 + vendor/golang.org/x/net/ipv4/sys_windows.go | 67 + .../golang.org/x/net/ipv4/zsys_aix_ppc64.go | 33 + vendor/golang.org/x/net/ipv4/zsys_darwin.go | 99 + .../golang.org/x/net/ipv4/zsys_dragonfly.go | 31 + .../golang.org/x/net/ipv4/zsys_freebsd_386.go | 93 + .../x/net/ipv4/zsys_freebsd_amd64.go | 95 + .../golang.org/x/net/ipv4/zsys_freebsd_arm.go | 95 + .../golang.org/x/net/ipv4/zsys_linux_386.go | 148 + .../golang.org/x/net/ipv4/zsys_linux_amd64.go | 150 + .../golang.org/x/net/ipv4/zsys_linux_arm.go | 148 + .../golang.org/x/net/ipv4/zsys_linux_arm64.go | 150 + .../golang.org/x/net/ipv4/zsys_linux_mips.go | 148 + .../x/net/ipv4/zsys_linux_mips64.go | 150 + .../x/net/ipv4/zsys_linux_mips64le.go | 150 + .../x/net/ipv4/zsys_linux_mipsle.go | 148 + .../golang.org/x/net/ipv4/zsys_linux_ppc.go | 148 + .../golang.org/x/net/ipv4/zsys_linux_ppc64.go | 150 + .../x/net/ipv4/zsys_linux_ppc64le.go | 150 + .../x/net/ipv4/zsys_linux_riscv64.go | 151 + .../golang.org/x/net/ipv4/zsys_linux_s390x.go | 150 + vendor/golang.org/x/net/ipv4/zsys_netbsd.go | 30 + vendor/golang.org/x/net/ipv4/zsys_openbsd.go | 30 + vendor/golang.org/x/net/ipv4/zsys_solaris.go | 100 + vendor/golang.org/x/net/ipv6/batch.go | 116 + vendor/golang.org/x/net/ipv6/control.go | 187 + .../x/net/ipv6/control_rfc2292_unix.go | 48 + .../x/net/ipv6/control_rfc3542_unix.go | 94 + vendor/golang.org/x/net/ipv6/control_stub.go | 13 + vendor/golang.org/x/net/ipv6/control_unix.go | 55 + .../golang.org/x/net/ipv6/control_windows.go | 12 + vendor/golang.org/x/net/ipv6/dgramopt.go | 301 + vendor/golang.org/x/net/ipv6/doc.go | 243 + vendor/golang.org/x/net/ipv6/endpoint.go | 127 + vendor/golang.org/x/net/ipv6/genericopt.go | 56 + vendor/golang.org/x/net/ipv6/header.go | 55 + vendor/golang.org/x/net/ipv6/helper.go | 59 + vendor/golang.org/x/net/ipv6/iana.go | 86 + vendor/golang.org/x/net/ipv6/icmp.go | 60 + vendor/golang.org/x/net/ipv6/icmp_bsd.go | 29 + vendor/golang.org/x/net/ipv6/icmp_linux.go | 27 + vendor/golang.org/x/net/ipv6/icmp_solaris.go | 27 + vendor/golang.org/x/net/ipv6/icmp_stub.go | 23 + vendor/golang.org/x/net/ipv6/icmp_windows.go | 22 + vendor/golang.org/x/net/ipv6/payload.go | 23 + vendor/golang.org/x/net/ipv6/payload_cmsg.go | 70 + .../golang.org/x/net/ipv6/payload_nocmsg.go | 38 + vendor/golang.org/x/net/ipv6/sockopt.go | 43 + vendor/golang.org/x/net/ipv6/sockopt_posix.go | 89 + vendor/golang.org/x/net/ipv6/sockopt_stub.go | 46 + vendor/golang.org/x/net/ipv6/sys_aix.go | 77 + vendor/golang.org/x/net/ipv6/sys_asmreq.go | 24 + .../golang.org/x/net/ipv6/sys_asmreq_stub.go | 17 + vendor/golang.org/x/net/ipv6/sys_bpf.go | 23 + vendor/golang.org/x/net/ipv6/sys_bpf_stub.go | 16 + vendor/golang.org/x/net/ipv6/sys_bsd.go | 57 + vendor/golang.org/x/net/ipv6/sys_darwin.go | 78 + vendor/golang.org/x/net/ipv6/sys_freebsd.go | 92 + vendor/golang.org/x/net/ipv6/sys_linux.go | 74 + vendor/golang.org/x/net/ipv6/sys_solaris.go | 74 + vendor/golang.org/x/net/ipv6/sys_ssmreq.go | 54 + .../golang.org/x/net/ipv6/sys_ssmreq_stub.go | 21 + vendor/golang.org/x/net/ipv6/sys_stub.go | 13 + vendor/golang.org/x/net/ipv6/sys_windows.go | 75 + .../golang.org/x/net/ipv6/zsys_aix_ppc64.go | 103 + vendor/golang.org/x/net/ipv6/zsys_darwin.go | 131 + .../golang.org/x/net/ipv6/zsys_dragonfly.go | 88 + .../golang.org/x/net/ipv6/zsys_freebsd_386.go | 122 + .../x/net/ipv6/zsys_freebsd_amd64.go | 124 + .../golang.org/x/net/ipv6/zsys_freebsd_arm.go | 124 + .../golang.org/x/net/ipv6/zsys_linux_386.go | 170 + .../golang.org/x/net/ipv6/zsys_linux_amd64.go | 172 + .../golang.org/x/net/ipv6/zsys_linux_arm.go | 170 + .../golang.org/x/net/ipv6/zsys_linux_arm64.go | 172 + .../golang.org/x/net/ipv6/zsys_linux_mips.go | 170 + .../x/net/ipv6/zsys_linux_mips64.go | 172 + .../x/net/ipv6/zsys_linux_mips64le.go | 172 + .../x/net/ipv6/zsys_linux_mipsle.go | 170 + .../golang.org/x/net/ipv6/zsys_linux_ppc.go | 170 + .../golang.org/x/net/ipv6/zsys_linux_ppc64.go | 172 + .../x/net/ipv6/zsys_linux_ppc64le.go | 172 + .../x/net/ipv6/zsys_linux_riscv64.go | 173 + .../golang.org/x/net/ipv6/zsys_linux_s390x.go | 172 + vendor/golang.org/x/net/ipv6/zsys_netbsd.go | 84 + vendor/golang.org/x/net/ipv6/zsys_openbsd.go | 93 + vendor/golang.org/x/net/ipv6/zsys_solaris.go | 131 + vendor/golang.org/x/sys/unix/mkasm_darwin.go | 61 - vendor/golang.org/x/sys/unix/mkpost.go | 122 - vendor/golang.org/x/sys/unix/mksyscall.go | 407 - .../x/sys/unix/mksyscall_aix_ppc.go | 415 - .../x/sys/unix/mksyscall_aix_ppc64.go | 614 - .../x/sys/unix/mksyscall_solaris.go | 335 - .../golang.org/x/sys/unix/mksysctl_openbsd.go | 355 - vendor/golang.org/x/sys/unix/mksysnum.go | 190 - vendor/golang.org/x/sys/unix/types_aix.go | 237 - vendor/golang.org/x/sys/unix/types_darwin.go | 283 - .../golang.org/x/sys/unix/types_dragonfly.go | 263 - vendor/golang.org/x/sys/unix/types_freebsd.go | 400 - vendor/golang.org/x/sys/unix/types_netbsd.go | 290 - vendor/golang.org/x/sys/unix/types_openbsd.go | 283 - vendor/golang.org/x/sys/unix/types_solaris.go | 266 - .../golang.org/x/text/transform/transform.go | 6 +- vendor/golang.org/x/text/unicode/bidi/bidi.go | 2 +- .../golang.org/x/text/unicode/bidi/bracket.go | 4 +- vendor/golang.org/x/text/unicode/bidi/core.go | 2 +- vendor/golang.org/x/text/unicode/bidi/gen.go | 133 - .../x/text/unicode/bidi/gen_ranges.go | 57 - .../x/text/unicode/bidi/gen_trieval.go | 64 - .../x/text/unicode/bidi/tables10.0.0.go | 2 +- .../x/text/unicode/bidi/tables11.0.0.go | 1887 + .../x/text/unicode/norm/composition.go | 8 +- .../x/text/unicode/norm/forminfo.go | 19 + vendor/golang.org/x/text/unicode/norm/iter.go | 3 +- .../x/text/unicode/norm/maketables.go | 976 - .../x/text/unicode/norm/normalize.go | 4 +- .../x/text/unicode/norm/readwriter.go | 4 +- .../x/text/unicode/norm/tables10.0.0.go | 1892 +- .../x/text/unicode/norm/tables11.0.0.go | 7693 +++ .../x/text/unicode/norm/tables9.0.0.go | 1890 +- .../x/text/unicode/norm/transform.go | 10 +- .../golang.org/x/text/unicode/norm/triegen.go | 117 - .../appengine/internal/api.go | 4 + vendor/gopkg.in/yaml.v2/decode.go | 38 + vendor/gopkg.in/yaml.v2/resolve.go | 2 +- vendor/gopkg.in/yaml.v2/scannerc.go | 16 + .../{v1alpha1 => v1}/doc.go | 11 +- .../admissionregistration/v1/generated.pb.go | 3469 ++ .../admissionregistration/v1/generated.proto | 479 + .../{v1alpha1 => v1}/register.go | 12 +- .../api/admissionregistration/v1/types.go | 551 + .../v1/types_swagger_doc_generated.go | 151 + .../v1/zz_generated.deepcopy.go | 396 + .../v1alpha1/generated.pb.go | 1027 - .../v1alpha1/generated.proto | 107 - .../admissionregistration/v1alpha1/types.go | 106 - .../v1alpha1/types_swagger_doc_generated.go | 71 - .../api/admissionregistration/v1beta1/doc.go | 5 +- .../v1beta1/generated.pb.go | 2760 +- .../v1beta1/generated.proto | 318 +- .../admissionregistration/v1beta1/types.go | 287 +- .../v1beta1/types_swagger_doc_generated.go | 71 +- .../v1beta1/zz_generated.deepcopy.go | 180 +- vendor/k8s.io/api/apps/v1/doc.go | 1 + vendor/k8s.io/api/apps/v1/generated.pb.go | 3657 +- vendor/k8s.io/api/apps/v1/generated.proto | 21 +- vendor/k8s.io/api/apps/v1/types.go | 35 +- .../apps/v1/types_swagger_doc_generated.go | 22 +- .../api/apps/v1/zz_generated.deepcopy.go | 10 +- vendor/k8s.io/api/apps/v1beta1/doc.go | 1 + .../k8s.io/api/apps/v1beta1/generated.pb.go | 2986 +- .../k8s.io/api/apps/v1beta1/generated.proto | 13 +- vendor/k8s.io/api/apps/v1beta1/types.go | 25 +- .../v1beta1/types_swagger_doc_generated.go | 12 +- .../api/apps/v1beta1/zz_generated.deepcopy.go | 6 +- vendor/k8s.io/api/apps/v1beta2/doc.go | 1 + .../k8s.io/api/apps/v1beta2/generated.pb.go | 4096 +- .../k8s.io/api/apps/v1beta2/generated.proto | 29 +- vendor/k8s.io/api/apps/v1beta2/types.go | 41 +- .../v1beta2/types_swagger_doc_generated.go | 30 +- .../api/apps/v1beta2/zz_generated.deepcopy.go | 10 +- .../api/auditregistration/v1alpha1/doc.go | 23 + .../v1alpha1/generated.pb.go | 2056 + .../v1alpha1/generated.proto | 162 + .../auditregistration/v1alpha1/register.go | 56 + .../api/auditregistration/v1alpha1/types.go | 198 + .../v1alpha1/types_swagger_doc_generated.go | 111 + .../v1alpha1/zz_generated.deepcopy.go | 229 + vendor/k8s.io/api/authentication/v1/doc.go | 2 + .../api/authentication/v1/generated.pb.go | 1356 +- .../api/authentication/v1/generated.proto | 25 +- vendor/k8s.io/api/authentication/v1/types.go | 23 +- .../v1/types_swagger_doc_generated.go | 8 +- .../v1/zz_generated.deepcopy.go | 12 +- .../k8s.io/api/authentication/v1beta1/doc.go | 2 + .../authentication/v1beta1/generated.pb.go | 907 +- .../authentication/v1beta1/generated.proto | 20 + .../api/authentication/v1beta1/types.go | 18 + .../v1beta1/types_swagger_doc_generated.go | 6 +- .../v1beta1/zz_generated.deepcopy.go | 12 +- vendor/k8s.io/api/authorization/v1/doc.go | 2 + .../api/authorization/v1/generated.pb.go | 1951 +- .../k8s.io/api/authorization/v1beta1/doc.go | 2 + .../api/authorization/v1beta1/generated.pb.go | 1951 +- vendor/k8s.io/api/autoscaling/v1/doc.go | 1 + .../k8s.io/api/autoscaling/v1/generated.pb.go | 2522 +- .../k8s.io/api/autoscaling/v1/generated.proto | 18 +- vendor/k8s.io/api/autoscaling/v1/types.go | 24 +- .../v1/types_swagger_doc_generated.go | 22 +- .../autoscaling/v1/zz_generated.deepcopy.go | 2 +- vendor/k8s.io/api/autoscaling/v2beta1/doc.go | 1 + .../api/autoscaling/v2beta1/generated.pb.go | 2291 +- .../api/autoscaling/v2beta1/generated.proto | 13 +- .../k8s.io/api/autoscaling/v2beta1/types.go | 19 +- .../v2beta1/types_swagger_doc_generated.go | 16 +- .../v2beta1/zz_generated.deepcopy.go | 2 +- vendor/k8s.io/api/autoscaling/v2beta2/doc.go | 1 + .../api/autoscaling/v2beta2/generated.pb.go | 2376 +- .../api/autoscaling/v2beta2/generated.proto | 13 +- .../k8s.io/api/autoscaling/v2beta2/types.go | 21 +- .../v2beta2/types_swagger_doc_generated.go | 8 +- .../v2beta2/zz_generated.deepcopy.go | 2 +- vendor/k8s.io/api/batch/v1/doc.go | 1 + vendor/k8s.io/api/batch/v1/generated.pb.go | 884 +- vendor/k8s.io/api/batch/v1/generated.proto | 8 +- vendor/k8s.io/api/batch/v1/types.go | 8 +- .../batch/v1/types_swagger_doc_generated.go | 8 +- .../api/batch/v1/zz_generated.deepcopy.go | 2 +- vendor/k8s.io/api/batch/v1beta1/doc.go | 1 + .../k8s.io/api/batch/v1beta1/generated.pb.go | 838 +- .../k8s.io/api/batch/v1beta1/generated.proto | 16 +- vendor/k8s.io/api/batch/v1beta1/types.go | 16 +- .../v1beta1/types_swagger_doc_generated.go | 16 +- .../batch/v1beta1/zz_generated.deepcopy.go | 2 +- vendor/k8s.io/api/batch/v2alpha1/doc.go | 1 + .../k8s.io/api/batch/v2alpha1/generated.pb.go | 838 +- .../k8s.io/api/batch/v2alpha1/generated.proto | 16 +- vendor/k8s.io/api/batch/v2alpha1/types.go | 16 +- .../v2alpha1/types_swagger_doc_generated.go | 16 +- .../batch/v2alpha1/zz_generated.deepcopy.go | 2 +- vendor/k8s.io/api/certificates/v1beta1/doc.go | 2 + .../api/certificates/v1beta1/generated.pb.go | 1009 +- .../k8s.io/api/certificates/v1beta1/types.go | 46 +- .../v1beta1/zz_generated.deepcopy.go | 2 +- vendor/k8s.io/api/coordination/v1/doc.go | 23 + .../api/coordination/v1/generated.pb.go | 1002 + .../api/coordination/v1/generated.proto | 80 + vendor/k8s.io/api/coordination/v1/register.go | 53 + vendor/k8s.io/api/coordination/v1/types.go | 74 + .../v1/types_swagger_doc_generated.go | 63 + .../coordination/v1/zz_generated.deepcopy.go | 124 + vendor/k8s.io/api/coordination/v1beta1/doc.go | 2 + .../api/coordination/v1beta1/generated.pb.go | 489 +- .../api/coordination/v1beta1/generated.proto | 6 +- .../k8s.io/api/coordination/v1beta1/types.go | 6 +- .../v1beta1/types_swagger_doc_generated.go | 6 +- .../v1beta1/zz_generated.deepcopy.go | 2 +- .../api/core/v1/annotation_key_constants.go | 25 + vendor/k8s.io/api/core/v1/doc.go | 1 + vendor/k8s.io/api/core/v1/generated.pb.go | 51783 ++++++++++------ vendor/k8s.io/api/core/v1/generated.proto | 807 +- vendor/k8s.io/api/core/v1/register.go | 1 + vendor/k8s.io/api/core/v1/taint.go | 8 +- vendor/k8s.io/api/core/v1/types.go | 860 +- .../core/v1/types_swagger_doc_generated.go | 455 +- .../k8s.io/api/core/v1/well_known_labels.go | 50 + .../k8s.io/api/core/v1/well_known_taints.go | 55 + .../api/core/v1/zz_generated.deepcopy.go | 412 +- vendor/k8s.io/api/discovery/v1alpha1/doc.go | 22 + .../api/discovery/v1alpha1/generated.pb.go | 1730 + .../api/discovery/v1alpha1/generated.proto | 157 + .../discovery/v1alpha1}/register.go | 66 +- vendor/k8s.io/api/discovery/v1alpha1/types.go | 162 + .../v1alpha1/types_swagger_doc_generated.go | 86 + .../discovery/v1alpha1/well_known_labels.go | 28 + .../v1alpha1/zz_generated.deepcopy.go | 195 + vendor/k8s.io/api/discovery/v1beta1/doc.go | 22 + .../api/discovery/v1beta1/generated.pb.go | 1730 + .../api/discovery/v1beta1/generated.proto | 157 + .../k8s.io/api/discovery/v1beta1/register.go | 56 + vendor/k8s.io/api/discovery/v1beta1/types.go | 162 + .../v1beta1/types_swagger_doc_generated.go | 86 + .../discovery/v1beta1/well_known_labels.go | 28 + .../v1beta1/zz_generated.deepcopy.go | 195 + vendor/k8s.io/api/events/v1beta1/doc.go | 2 + .../k8s.io/api/events/v1beta1/generated.pb.go | 708 +- .../k8s.io/api/events/v1beta1/generated.proto | 3 +- vendor/k8s.io/api/events/v1beta1/types.go | 7 +- .../v1beta1/types_swagger_doc_generated.go | 4 +- .../events/v1beta1/zz_generated.deepcopy.go | 2 +- vendor/k8s.io/api/extensions/v1beta1/doc.go | 1 + .../api/extensions/v1beta1/generated.pb.go | 9130 ++- .../api/extensions/v1beta1/generated.proto | 133 +- vendor/k8s.io/api/extensions/v1beta1/types.go | 146 +- .../v1beta1/types_swagger_doc_generated.go | 101 +- .../v1beta1/zz_generated.deepcopy.go | 162 +- vendor/k8s.io/api/flowcontrol/v1alpha1/doc.go | 24 + .../api/flowcontrol/v1alpha1/generated.pb.go | 5459 ++ .../api/flowcontrol/v1alpha1/generated.proto | 436 + .../api/flowcontrol/v1alpha1/register.go | 58 + .../k8s.io/api/flowcontrol/v1alpha1/types.go | 513 + .../v1alpha1/types_swagger_doc_generated.go | 258 + .../v1alpha1/zz_generated.deepcopy.go | 541 + vendor/k8s.io/api/networking/v1/doc.go | 2 + .../k8s.io/api/networking/v1/generated.pb.go | 1076 +- .../k8s.io/api/networking/v1/generated.proto | 12 +- vendor/k8s.io/api/networking/v1/types.go | 8 +- .../v1/types_swagger_doc_generated.go | 8 +- .../networking/v1/zz_generated.deepcopy.go | 2 +- vendor/k8s.io/api/networking/v1beta1/doc.go | 22 + .../api/networking/v1beta1/generated.pb.go | 2394 + .../api/networking/v1beta1/generated.proto | 186 + .../k8s.io/api/networking/v1beta1/register.go | 56 + vendor/k8s.io/api/networking/v1beta1/types.go | 192 + .../v1beta1/types_swagger_doc_generated.go | 127 + .../v1beta1/zz_generated.deepcopy.go | 252 + vendor/k8s.io/api/node/v1alpha1/doc.go | 23 + .../k8s.io/api/node/v1alpha1/generated.pb.go | 1609 + .../k8s.io/api/node/v1alpha1/generated.proto | 118 + vendor/k8s.io/api/node/v1alpha1/register.go | 52 + vendor/k8s.io/api/node/v1alpha1/types.go | 116 + .../v1alpha1/types_swagger_doc_generated.go | 80 + .../node/v1alpha1/zz_generated.deepcopy.go | 165 + vendor/k8s.io/api/node/v1beta1/doc.go | 23 + .../k8s.io/api/node/v1beta1/generated.pb.go | 1438 + .../k8s.io/api/node/v1beta1/generated.proto | 108 + vendor/k8s.io/api/node/v1beta1/register.go | 52 + vendor/k8s.io/api/node/v1beta1/types.go | 106 + .../v1beta1/types_swagger_doc_generated.go | 71 + .../v1beta1}/zz_generated.deepcopy.go | 93 +- vendor/k8s.io/api/policy/v1beta1/doc.go | 3 +- .../k8s.io/api/policy/v1beta1/generated.pb.go | 3221 +- .../k8s.io/api/policy/v1beta1/generated.proto | 71 +- vendor/k8s.io/api/policy/v1beta1/types.go | 102 +- .../v1beta1/types_swagger_doc_generated.go | 38 +- .../policy/v1beta1/zz_generated.deepcopy.go | 82 +- vendor/k8s.io/api/rbac/v1/doc.go | 2 + vendor/k8s.io/api/rbac/v1/generated.pb.go | 1530 +- vendor/k8s.io/api/rbac/v1/generated.proto | 2 + vendor/k8s.io/api/rbac/v1/types.go | 2 + .../rbac/v1/types_swagger_doc_generated.go | 2 +- .../api/rbac/v1/zz_generated.deepcopy.go | 8 +- vendor/k8s.io/api/rbac/v1alpha1/doc.go | 2 + .../k8s.io/api/rbac/v1alpha1/generated.pb.go | 1532 +- .../k8s.io/api/rbac/v1alpha1/generated.proto | 16 +- vendor/k8s.io/api/rbac/v1alpha1/types.go | 16 +- .../v1alpha1/types_swagger_doc_generated.go | 18 +- .../rbac/v1alpha1/zz_generated.deepcopy.go | 8 +- vendor/k8s.io/api/rbac/v1beta1/doc.go | 2 + .../k8s.io/api/rbac/v1beta1/generated.pb.go | 1530 +- .../k8s.io/api/rbac/v1beta1/generated.proto | 14 +- vendor/k8s.io/api/rbac/v1beta1/types.go | 14 +- .../v1beta1/types_swagger_doc_generated.go | 18 +- .../api/rbac/v1beta1/zz_generated.deepcopy.go | 8 +- vendor/k8s.io/api/scheduling/v1/doc.go | 23 + .../k8s.io/api/scheduling/v1/generated.pb.go | 761 + .../k8s.io/api/scheduling/v1/generated.proto | 75 + vendor/k8s.io/api/scheduling/v1/register.go | 55 + vendor/k8s.io/api/scheduling/v1/types.go | 74 + .../v1/types_swagger_doc_generated.go | 53 + .../scheduling/v1/zz_generated.deepcopy.go | 90 + vendor/k8s.io/api/scheduling/v1alpha1/doc.go | 2 + .../api/scheduling/v1alpha1/generated.pb.go | 395 +- .../api/scheduling/v1alpha1/generated.proto | 13 +- .../k8s.io/api/scheduling/v1alpha1/types.go | 13 +- .../v1alpha1/types_swagger_doc_generated.go | 13 +- .../v1alpha1/zz_generated.deepcopy.go | 8 +- vendor/k8s.io/api/scheduling/v1beta1/doc.go | 2 + .../api/scheduling/v1beta1/generated.pb.go | 395 +- .../api/scheduling/v1beta1/generated.proto | 13 +- vendor/k8s.io/api/scheduling/v1beta1/types.go | 13 +- .../v1beta1/types_swagger_doc_generated.go | 13 +- .../v1beta1/zz_generated.deepcopy.go | 8 +- vendor/k8s.io/api/settings/v1alpha1/doc.go | 2 + .../api/settings/v1alpha1/generated.pb.go | 558 +- .../api/settings/v1alpha1/generated.proto | 2 +- vendor/k8s.io/api/settings/v1alpha1/types.go | 2 +- .../v1alpha1/types_swagger_doc_generated.go | 2 +- .../v1alpha1/zz_generated.deepcopy.go | 2 +- vendor/k8s.io/api/storage/v1/doc.go | 4 +- vendor/k8s.io/api/storage/v1/generated.pb.go | 3656 +- vendor/k8s.io/api/storage/v1/generated.proto | 195 +- vendor/k8s.io/api/storage/v1/register.go | 6 + vendor/k8s.io/api/storage/v1/types.go | 214 +- .../storage/v1/types_swagger_doc_generated.go | 117 +- .../api/storage/v1/zz_generated.deepcopy.go | 286 +- vendor/k8s.io/api/storage/v1alpha1/doc.go | 4 +- .../api/storage/v1alpha1/generated.pb.go | 989 +- .../api/storage/v1alpha1/generated.proto | 16 +- vendor/k8s.io/api/storage/v1alpha1/types.go | 18 +- .../v1alpha1/types_swagger_doc_generated.go | 6 +- .../storage/v1alpha1/zz_generated.deepcopy.go | 8 +- vendor/k8s.io/api/storage/v1beta1/doc.go | 2 + .../api/storage/v1beta1/generated.pb.go | 3876 +- .../api/storage/v1beta1/generated.proto | 200 +- vendor/k8s.io/api/storage/v1beta1/register.go | 6 + vendor/k8s.io/api/storage/v1beta1/types.go | 243 +- .../v1beta1/types_swagger_doc_generated.go | 93 +- .../storage/v1beta1/zz_generated.deepcopy.go | 230 +- .../k8s.io/apimachinery/pkg/api/errors/OWNERS | 3 +- .../apimachinery/pkg/api/errors/errors.go | 132 +- .../k8s.io/apimachinery/pkg/api/meta/OWNERS | 6 +- .../k8s.io/apimachinery/pkg/api/meta/help.go | 79 +- .../k8s.io/apimachinery/pkg/api/meta/meta.go | 30 +- .../apimachinery/pkg/api/resource/OWNERS | 4 +- .../pkg/api/resource/generated.pb.go | 49 +- .../pkg/api/resource/generated.proto | 22 +- .../apimachinery/pkg/api/resource/math.go | 4 +- .../apimachinery/pkg/api/resource/quantity.go | 32 +- .../pkg/api/resource/quantity_proto.go | 34 +- .../apis/meta/internalversion/conversion.go | 54 - .../pkg/apis/meta/internalversion/doc.go | 2 +- .../pkg/apis/meta/internalversion/register.go | 32 +- .../pkg/apis/meta/internalversion/types.go | 12 +- .../zz_generated.conversion.go | 24 +- .../internalversion/zz_generated.deepcopy.go | 2 +- .../apimachinery/pkg/apis/meta/v1/OWNERS | 3 +- .../pkg/apis/meta/v1/controller_ref.go | 19 +- .../pkg/apis/meta/v1/conversion.go | 105 +- .../apimachinery/pkg/apis/meta/v1/deepcopy.go | 46 + .../apimachinery/pkg/apis/meta/v1/doc.go | 2 + .../apimachinery/pkg/apis/meta/v1/duration.go | 10 + .../pkg/apis/meta/v1/generated.pb.go | 6771 +- .../pkg/apis/meta/v1/generated.proto | 301 +- .../apimachinery/pkg/apis/meta/v1/helpers.go | 46 + .../apimachinery/pkg/apis/meta/v1/meta.go | 20 +- .../pkg/apis/meta/v1/micro_time.go | 31 +- .../pkg/apis/meta/v1/micro_time_proto.go | 8 + .../apimachinery/pkg/apis/meta/v1/register.go | 76 +- .../apimachinery/pkg/apis/meta/v1/time.go | 20 +- .../pkg/apis/meta/v1/time_proto.go | 12 +- .../apimachinery/pkg/apis/meta/v1/types.go | 414 +- .../meta/v1/types_swagger_doc_generated.go | 232 +- .../pkg/apis/meta/v1/unstructured/helpers.go | 79 +- .../apis/meta/v1/unstructured/unstructured.go | 98 +- .../meta/v1/unstructured/unstructured_list.go | 22 + .../apis/meta/v1/zz_generated.conversion.go | 523 + .../pkg/apis/meta/v1/zz_generated.deepcopy.go | 310 +- .../pkg/apis/meta/v1beta1/deepcopy.go | 27 - .../apimachinery/pkg/apis/meta/v1beta1/doc.go | 3 +- .../pkg/apis/meta/v1beta1/generated.pb.go | 448 +- .../pkg/apis/meta/v1beta1/generated.proto | 27 +- .../pkg/apis/meta/v1beta1/register.go | 16 +- .../pkg/apis/meta/v1beta1/types.go | 143 +- .../v1beta1/types_swagger_doc_generated.go | 70 +- .../meta/v1beta1/zz_generated.deepcopy.go | 138 +- .../pkg/conversion/queryparams/convert.go | 4 - .../k8s.io/apimachinery/pkg/labels/labels.go | 2 +- .../apimachinery/pkg/labels/selector.go | 57 +- .../k8s.io/apimachinery/pkg/runtime/codec.go | 112 +- .../apimachinery/pkg/runtime/conversion.go | 97 +- .../apimachinery/pkg/runtime/converter.go | 14 +- .../k8s.io/apimachinery/pkg/runtime/error.go | 29 + .../apimachinery/pkg/runtime/generated.pb.go | 381 +- .../apimachinery/pkg/runtime/generated.proto | 20 +- .../k8s.io/apimachinery/pkg/runtime/helper.go | 53 +- .../apimachinery/pkg/runtime/interfaces.go | 92 + .../k8s.io/apimachinery/pkg/runtime/mapper.go | 98 + .../apimachinery/pkg/runtime/negotiate.go | 146 + .../apimachinery/pkg/runtime/register.go | 30 - .../pkg/runtime/schema/generated.pb.go | 23 +- .../pkg/runtime/schema/group_version.go | 24 +- .../pkg/runtime/serializer/codec_factory.go | 141 +- .../pkg/runtime/serializer/json/json.go | 143 +- .../runtime/serializer/protobuf/protobuf.go | 117 +- .../runtime/serializer/protobuf_extension.go | 48 - .../runtime/serializer/streaming/streaming.go | 2 +- .../serializer/versioning/versioning.go | 166 +- .../k8s.io/apimachinery/pkg/runtime/types.go | 19 +- .../apimachinery/pkg/runtime/types_proto.go | 90 +- .../pkg/runtime/zz_generated.deepcopy.go | 33 - vendor/k8s.io/apimachinery/pkg/types/patch.go | 1 + .../apimachinery/pkg/util/cache/cache.go | 83 - .../apimachinery/pkg/util/cache/expiring.go | 192 + .../apimachinery/pkg/util/clock/clock.go | 90 +- .../k8s.io/apimachinery/pkg/util/diff/diff.go | 266 +- .../apimachinery/pkg/util/errors/errors.go | 38 +- .../pkg/util/intstr/generated.pb.go | 195 +- .../pkg/util/intstr/generated.proto | 2 +- .../apimachinery/pkg/util/intstr/intstr.go | 10 +- .../k8s.io/apimachinery/pkg/util/json/json.go | 28 +- .../apimachinery/pkg/util/mergepatch/OWNERS | 2 + .../apimachinery/pkg/util/mergepatch/util.go | 2 +- .../pkg/util/naming/from_stack.go | 2 +- .../k8s.io/apimachinery/pkg/util/net/http.go | 55 +- .../apimachinery/pkg/util/net/interface.go | 111 +- .../k8s.io/apimachinery/pkg/util/net/util.go | 17 + .../apimachinery/pkg/util/runtime/runtime.go | 52 +- .../k8s.io/apimachinery/pkg/util/sets/byte.go | 6 +- .../k8s.io/apimachinery/pkg/util/sets/int.go | 6 +- .../apimachinery/pkg/util/sets/int32.go | 205 + .../apimachinery/pkg/util/sets/int64.go | 6 +- .../apimachinery/pkg/util/sets/string.go | 6 +- .../pkg/util/strategicpatch/OWNERS | 2 + .../pkg/util/strategicpatch/patch.go | 2 +- .../k8s.io/apimachinery/pkg/util/uuid/uuid.go | 20 +- .../pkg/util/validation/field/errors.go | 15 +- .../pkg/util/validation/validation.go | 59 +- .../pkg/util/version/doc.go | 2 +- .../pkg/util/version/version.go | 37 + .../k8s.io/apimachinery/pkg/util/wait/wait.go | 183 +- .../apimachinery/pkg/util/yaml/decoder.go | 8 +- vendor/k8s.io/apimachinery/pkg/version/doc.go | 3 +- .../apimachinery/pkg/watch/streamwatcher.go | 33 +- vendor/k8s.io/apimachinery/pkg/watch/watch.go | 11 +- .../third_party/forked/golang/json/OWNERS | 2 + .../client-go/discovery/cached_discovery.go | 282 - .../client-go/discovery/discovery_client.go | 127 +- vendor/k8s.io/client-go/discovery/doc.go | 2 +- vendor/k8s.io/client-go/discovery/helper.go | 8 +- .../client-go/discovery/round_tripper.go | 62 - .../admissionregistration/interface.go | 12 +- .../admissionregistration/v1/interface.go | 52 + .../v1/mutatingwebhookconfiguration.go | 88 + .../v1/validatingwebhookconfiguration.go | 88 + .../v1alpha1/initializerconfiguration.go | 88 - .../informers/auditregistration/interface.go | 46 + .../auditregistration/v1alpha1/auditsink.go | 88 + .../auditregistration/v1alpha1/interface.go | 45 + .../informers/coordination/interface.go | 8 + .../informers/coordination/v1/interface.go | 45 + .../informers/coordination/v1/lease.go | 89 + .../informers/discovery/interface.go | 54 + .../discovery/v1alpha1/endpointslice.go | 89 + .../informers/discovery/v1alpha1/interface.go | 45 + .../discovery/v1beta1/endpointslice.go | 89 + .../informers/discovery/v1beta1/interface.go | 45 + .../informers/extensions/v1beta1/interface.go | 7 + .../extensions/v1beta1/networkpolicy.go | 89 + vendor/k8s.io/client-go/informers/factory.go | 24 + .../informers/flowcontrol/interface.go | 46 + .../flowcontrol/v1alpha1/flowschema.go | 88 + .../flowcontrol/v1alpha1/interface.go | 52 + .../v1alpha1/prioritylevelconfiguration.go | 88 + vendor/k8s.io/client-go/informers/generic.go | 79 +- .../internalinterfaces/factory_interfaces.go | 2 + .../informers/networking/interface.go | 8 + .../informers/networking/v1beta1/ingress.go | 89 + .../informers/networking/v1beta1/interface.go | 45 + .../client-go/informers/node/interface.go | 54 + .../v1alpha1/interface.go | 10 +- .../informers/node/v1alpha1/runtimeclass.go | 88 + .../informers/node/v1beta1/interface.go | 45 + .../informers/node/v1beta1/runtimeclass.go | 88 + .../informers/scheduling/interface.go | 8 + .../informers/scheduling/v1/interface.go | 45 + .../informers/scheduling/v1/priorityclass.go | 88 + .../client-go/informers/storage/v1/csinode.go | 88 + .../informers/storage/v1/interface.go | 14 + .../informers/storage/v1/volumeattachment.go | 88 + .../informers/storage/v1beta1/csidriver.go | 88 + .../informers/storage/v1beta1/csinode.go | 88 + .../informers/storage/v1beta1/interface.go | 14 + .../k8s.io/client-go/kubernetes/clientset.go | 341 +- .../client-go/kubernetes/scheme/register.go | 24 +- .../v1/admissionregistration_client.go | 94 + .../typed/admissionregistration/v1}/doc.go | 12 +- .../v1/generated_expansion.go | 23 + .../v1/mutatingwebhookconfiguration.go | 164 + .../v1/validatingwebhookconfiguration.go | 164 + .../v1alpha1/initializerconfiguration.go | 147 - .../v1beta1/admissionregistration_client.go | 3 +- .../v1beta1/mutatingwebhookconfiguration.go | 17 + .../v1beta1/validatingwebhookconfiguration.go | 17 + .../kubernetes/typed/apps/v1/apps_client.go | 3 +- .../typed/apps/v1/controllerrevision.go | 17 + .../kubernetes/typed/apps/v1/daemonset.go | 17 + .../kubernetes/typed/apps/v1/deployment.go | 49 + .../kubernetes/typed/apps/v1/replicaset.go | 49 + .../kubernetes/typed/apps/v1/statefulset.go | 49 + .../typed/apps/v1beta1/apps_client.go | 8 +- .../typed/apps/v1beta1/controllerrevision.go | 17 + .../typed/apps/v1beta1/deployment.go | 17 + .../typed/apps/v1beta1/generated_expansion.go | 2 - .../kubernetes/typed/apps/v1beta1/scale.go | 48 - .../typed/apps/v1beta1/statefulset.go | 17 + .../typed/apps/v1beta2/apps_client.go | 8 +- .../typed/apps/v1beta2/controllerrevision.go | 17 + .../typed/apps/v1beta2/daemonset.go | 17 + .../typed/apps/v1beta2/deployment.go | 17 + .../typed/apps/v1beta2/generated_expansion.go | 2 - .../typed/apps/v1beta2/replicaset.go | 17 + .../kubernetes/typed/apps/v1beta2/scale.go | 48 - .../typed/apps/v1beta2/statefulset.go | 17 + .../v1alpha1/auditregistration_client.go} | 35 +- .../auditregistration/v1alpha1/auditsink.go | 164 + .../v1alpha1/doc.go | 0 .../v1alpha1/generated_expansion.go | 2 +- .../v1/authentication_client.go | 3 +- .../v1/tokenreview_expansion.go | 8 + .../v1beta1/authentication_client.go | 3 +- .../v1beta1/tokenreview_expansion.go | 8 + .../authorization/v1/authorization_client.go | 3 +- .../v1/localsubjectaccessreview_expansion.go | 8 + .../v1/selfsubjectaccessreview_expansion.go | 8 + .../v1/selfsubjectrulesreview_expansion.go | 8 + .../v1/subjectaccessreview_expansion.go | 8 + .../v1beta1/authorization_client.go | 3 +- .../localsubjectaccessreview_expansion.go | 8 + .../selfsubjectaccessreview_expansion.go | 8 + .../selfsubjectrulesreview_expansion.go | 8 + .../v1beta1/subjectaccessreview_expansion.go | 8 + .../autoscaling/v1/autoscaling_client.go | 3 +- .../autoscaling/v1/horizontalpodautoscaler.go | 17 + .../autoscaling/v2beta1/autoscaling_client.go | 3 +- .../v2beta1/horizontalpodautoscaler.go | 17 + .../autoscaling/v2beta2/autoscaling_client.go | 3 +- .../v2beta2/horizontalpodautoscaler.go | 17 + .../kubernetes/typed/batch/v1/batch_client.go | 3 +- .../kubernetes/typed/batch/v1/job.go | 17 + .../typed/batch/v1beta1/batch_client.go | 3 +- .../kubernetes/typed/batch/v1beta1/cronjob.go | 17 + .../typed/batch/v2alpha1/batch_client.go | 3 +- .../typed/batch/v2alpha1/cronjob.go | 17 + .../v1beta1/certificates_client.go | 3 +- .../v1beta1/certificatesigningrequest.go | 17 + .../coordination/v1/coordination_client.go | 89 + .../kubernetes/typed/coordination/v1/doc.go | 20 + .../coordination/v1/generated_expansion.go | 21 + .../kubernetes/typed/coordination/v1/lease.go | 174 + .../v1beta1/coordination_client.go | 3 +- .../typed/coordination/v1beta1/lease.go | 17 + .../typed/core/v1/componentstatus.go | 17 + .../kubernetes/typed/core/v1/configmap.go | 17 + .../kubernetes/typed/core/v1/core_client.go | 3 +- .../kubernetes/typed/core/v1/endpoints.go | 17 + .../kubernetes/typed/core/v1/event.go | 17 + .../typed/core/v1/event_expansion.go | 10 +- .../kubernetes/typed/core/v1/limitrange.go | 17 + .../kubernetes/typed/core/v1/namespace.go | 12 + .../kubernetes/typed/core/v1/node.go | 17 + .../typed/core/v1/persistentvolume.go | 17 + .../typed/core/v1/persistentvolumeclaim.go | 17 + .../client-go/kubernetes/typed/core/v1/pod.go | 48 + .../kubernetes/typed/core/v1/podtemplate.go | 17 + .../typed/core/v1/replicationcontroller.go | 33 +- .../kubernetes/typed/core/v1/resourcequota.go | 17 + .../kubernetes/typed/core/v1/secret.go | 17 + .../kubernetes/typed/core/v1/service.go | 12 + .../typed/core/v1/serviceaccount.go | 17 + .../discovery/v1alpha1/discovery_client.go | 89 + .../typed/discovery/v1alpha1/doc.go | 20 + .../typed/discovery/v1alpha1/endpointslice.go | 174 + .../discovery/v1alpha1/generated_expansion.go | 21 + .../discovery/v1beta1/discovery_client.go | 89 + .../kubernetes/typed/discovery/v1beta1/doc.go | 20 + .../typed/discovery/v1beta1/endpointslice.go | 174 + .../discovery/v1beta1/generated_expansion.go | 21 + .../kubernetes/typed/events/v1beta1/event.go | 17 + .../typed/events/v1beta1/event_expansion.go | 98 + .../typed/events/v1beta1/events_client.go | 3 +- .../events/v1beta1/generated_expansion.go | 2 - .../typed/extensions/v1beta1/daemonset.go | 17 + .../typed/extensions/v1beta1/deployment.go | 17 + .../extensions/v1beta1/extensions_client.go | 13 +- .../extensions/v1beta1/generated_expansion.go | 2 + .../typed/extensions/v1beta1/ingress.go | 17 + .../typed/extensions/v1beta1/networkpolicy.go | 174 + .../extensions/v1beta1/podsecuritypolicy.go | 17 + .../typed/extensions/v1beta1/replicaset.go | 17 + .../typed/extensions/v1beta1/scale.go | 48 - .../extensions/v1beta1/scale_expansion.go | 65 - .../typed/flowcontrol/v1alpha1/doc.go | 20 + .../v1alpha1/flowcontrol_client.go | 94 + .../typed/flowcontrol/v1alpha1/flowschema.go | 180 + .../v1alpha1/generated_expansion.go | 23 + .../v1alpha1/prioritylevelconfiguration.go | 180 + .../typed/networking/v1/networking_client.go | 3 +- .../typed/networking/v1/networkpolicy.go | 17 + .../typed/networking/v1beta1/doc.go | 20 + .../networking/v1beta1/generated_expansion.go | 21 + .../typed/networking/v1beta1/ingress.go | 191 + .../networking/v1beta1/networking_client.go | 89 + .../kubernetes/typed/node/v1alpha1/doc.go | 20 + .../node/v1alpha1/generated_expansion.go | 21 + .../typed/node/v1alpha1/node_client.go | 89 + .../typed/node/v1alpha1/runtimeclass.go | 164 + .../kubernetes/typed/node/v1beta1/doc.go | 20 + .../typed/node/v1beta1/generated_expansion.go | 21 + .../typed/node/v1beta1/node_client.go | 89 + .../typed/node/v1beta1/runtimeclass.go | 164 + .../policy/v1beta1/poddisruptionbudget.go | 17 + .../typed/policy/v1beta1/podsecuritypolicy.go | 17 + .../typed/policy/v1beta1/policy_client.go | 3 +- .../kubernetes/typed/rbac/v1/clusterrole.go | 17 + .../typed/rbac/v1/clusterrolebinding.go | 17 + .../kubernetes/typed/rbac/v1/rbac_client.go | 3 +- .../kubernetes/typed/rbac/v1/role.go | 17 + .../kubernetes/typed/rbac/v1/rolebinding.go | 17 + .../typed/rbac/v1alpha1/clusterrole.go | 17 + .../typed/rbac/v1alpha1/clusterrolebinding.go | 17 + .../typed/rbac/v1alpha1/rbac_client.go | 3 +- .../kubernetes/typed/rbac/v1alpha1/role.go | 17 + .../typed/rbac/v1alpha1/rolebinding.go | 17 + .../typed/rbac/v1beta1/clusterrole.go | 17 + .../typed/rbac/v1beta1/clusterrolebinding.go | 17 + .../typed/rbac/v1beta1/rbac_client.go | 3 +- .../kubernetes/typed/rbac/v1beta1/role.go | 17 + .../typed/rbac/v1beta1/rolebinding.go | 17 + .../kubernetes/typed/scheduling/v1/doc.go | 20 + .../scheduling/v1/generated_expansion.go | 21 + .../typed/scheduling/v1/priorityclass.go | 164 + .../typed/scheduling/v1/scheduling_client.go | 89 + .../scheduling/v1alpha1/priorityclass.go | 17 + .../scheduling/v1alpha1/scheduling_client.go | 3 +- .../typed/scheduling/v1beta1/priorityclass.go | 17 + .../scheduling/v1beta1/scheduling_client.go | 3 +- .../typed/settings/v1alpha1/podpreset.go | 17 + .../settings/v1alpha1/settings_client.go | 3 +- .../kubernetes/typed/storage/v1/csinode.go | 164 + .../typed/storage/v1/generated_expansion.go | 4 + .../typed/storage/v1/storage_client.go | 13 +- .../typed/storage/v1/storageclass.go | 17 + .../typed/storage/v1/volumeattachment.go | 180 + .../typed/storage/v1alpha1/storage_client.go | 3 +- .../storage/v1alpha1/volumeattachment.go | 17 + .../typed/storage/v1beta1/csidriver.go | 164 + .../typed/storage/v1beta1/csinode.go | 164 + .../storage/v1beta1/generated_expansion.go | 4 + .../typed/storage/v1beta1/storage_client.go | 13 +- .../typed/storage/v1beta1/storageclass.go | 17 + .../typed/storage/v1beta1/volumeattachment.go | 17 + .../v1/expansion_generated.go | 27 + .../v1/mutatingwebhookconfiguration.go | 65 + .../v1/validatingwebhookconfiguration.go | 65 + .../v1alpha1/initializerconfiguration.go | 65 - .../apps/v1beta1/expansion_generated.go | 8 - .../client-go/listers/apps/v1beta1/scale.go | 94 - .../apps/v1beta2/expansion_generated.go | 8 - .../client-go/listers/apps/v1beta2/scale.go | 94 - .../auditregistration/v1alpha1/auditsink.go | 65 + .../v1alpha1/expansion_generated.go | 6 +- .../coordination/v1/expansion_generated.go | 27 + .../listers/coordination/v1/lease.go | 94 + .../discovery/v1alpha1/endpointslice.go | 94 + .../discovery/v1alpha1/expansion_generated.go | 27 + .../discovery/v1beta1/endpointslice.go | 94 + .../discovery/v1beta1/expansion_generated.go | 27 + .../extensions/v1beta1/expansion_generated.go | 16 +- .../extensions/v1beta1/networkpolicy.go | 94 + .../listers/extensions/v1beta1/scale.go | 94 - .../v1alpha1/expansion_generated.go | 27 + .../flowcontrol/v1alpha1/flowschema.go | 65 + .../v1alpha1/prioritylevelconfiguration.go | 65 + .../networking/v1beta1/expansion_generated.go | 27 + .../listers/networking/v1beta1/ingress.go | 94 + .../node/v1alpha1/expansion_generated.go | 23 + .../listers/node/v1alpha1/runtimeclass.go | 65 + .../node/v1beta1/expansion_generated.go | 23 + .../listers/node/v1beta1/runtimeclass.go | 65 + .../v1beta1/poddisruptionbudget_expansion.go | 4 +- .../scheduling/v1/expansion_generated.go | 23 + .../listers/scheduling/v1/priorityclass.go | 65 + .../client-go/listers/storage/v1/csinode.go | 65 + .../listers/storage/v1/expansion_generated.go | 8 + .../listers/storage/v1/volumeattachment.go | 65 + .../listers/storage/v1beta1/csidriver.go | 65 + .../listers/storage/v1beta1/csinode.go | 65 + .../storage/v1beta1/expansion_generated.go | 8 + .../pkg/apis/clientauthentication/OWNERS | 9 + .../pkg/apis/clientauthentication/doc.go | 1 + .../apis/clientauthentication/v1alpha1/doc.go | 1 + .../clientauthentication/v1alpha1/types.go | 2 +- .../apis/clientauthentication/v1beta1/doc.go | 1 + .../v1beta1/zz_generated.conversion.go | 5 - vendor/k8s.io/client-go/pkg/version/def.bzl | 2 +- vendor/k8s.io/client-go/pkg/version/doc.go | 3 +- .../plugin/pkg/client/auth/exec/exec.go | 38 +- vendor/k8s.io/client-go/rest/OWNERS | 3 +- vendor/k8s.io/client-go/rest/client.go | 141 +- vendor/k8s.io/client-go/rest/config.go | 229 +- vendor/k8s.io/client-go/rest/plugin.go | 8 +- vendor/k8s.io/client-go/rest/request.go | 309 +- vendor/k8s.io/client-go/rest/transport.go | 32 +- vendor/k8s.io/client-go/rest/urlbackoff.go | 8 +- vendor/k8s.io/client-go/rest/watch/decoder.go | 2 +- .../client-go/rest/zz_generated.deepcopy.go | 5 + vendor/k8s.io/client-go/tools/cache/OWNERS | 9 +- .../client-go/tools/cache/controller.go | 77 +- .../client-go/tools/cache/delta_fifo.go | 64 +- .../client-go/tools/cache/expiration_cache.go | 67 +- .../tools/cache/expiration_cache_fakes.go | 5 +- .../tools/cache/fake_custom_store.go | 6 +- vendor/k8s.io/client-go/tools/cache/fifo.go | 13 +- vendor/k8s.io/client-go/tools/cache/heap.go | 12 +- vendor/k8s.io/client-go/tools/cache/index.go | 29 +- .../k8s.io/client-go/tools/cache/listers.go | 27 +- .../k8s.io/client-go/tools/cache/listwatch.go | 14 +- .../client-go/tools/cache/mutation_cache.go | 5 +- .../tools/cache/mutation_detector.go | 32 +- .../k8s.io/client-go/tools/cache/reflector.go | 206 +- .../tools/cache/reflector_metrics.go | 30 - .../client-go/tools/cache/shared_informer.go | 162 +- vendor/k8s.io/client-go/tools/cache/store.go | 2 +- .../tools/cache/thread_safe_store.go | 29 +- .../client-go/tools/cache/undelta_store.go | 8 +- .../client-go/tools/clientcmd/api/doc.go | 1 + .../client-go/tools/clientcmd/api/types.go | 49 + .../client-go/tools/leaderelection/OWNERS | 3 +- .../tools/leaderelection/healthzadaptor.go | 69 + .../tools/leaderelection/leaderelection.go | 152 +- .../client-go/tools/leaderelection/metrics.go | 109 + .../resourcelock/configmaplock.go | 18 +- .../resourcelock/endpointslock.go | 19 +- .../leaderelection/resourcelock/interface.go | 79 +- .../leaderelection/resourcelock/leaselock.go | 129 + .../leaderelection/resourcelock/multilock.go | 103 + vendor/k8s.io/client-go/tools/metrics/OWNERS | 4 +- vendor/k8s.io/client-go/tools/pager/pager.go | 129 +- vendor/k8s.io/client-go/tools/record/OWNERS | 2 + vendor/k8s.io/client-go/tools/record/doc.go | 3 +- vendor/k8s.io/client-go/tools/record/event.go | 150 +- .../client-go/tools/record/events_cache.go | 46 + .../client-go/tools/record/util/util.go | 44 + .../k8s.io/client-go/tools/reference/ref.go | 53 +- vendor/k8s.io/client-go/transport/OWNERS | 2 + vendor/k8s.io/client-go/transport/cache.go | 36 +- vendor/k8s.io/client-go/transport/config.go | 30 +- .../client-go/transport/round_trippers.go | 134 +- .../{rest => transport}/token_source.go | 40 +- .../k8s.io/client-go/transport/transport.go | 78 +- vendor/k8s.io/client-go/util/cert/OWNERS | 9 + vendor/k8s.io/client-go/util/cert/cert.go | 106 +- vendor/k8s.io/client-go/util/cert/io.go | 104 +- vendor/k8s.io/client-go/util/cert/pem.go | 212 +- .../client-go/util/cert/server_inspection.go | 102 + .../client-go/util/flowcontrol/backoff.go | 18 +- .../client-go/util/flowcontrol/throttle.go | 16 + vendor/k8s.io/client-go/util/keyutil/OWNERS | 7 + vendor/k8s.io/client-go/util/keyutil/key.go | 323 + vendor/k8s.io/client-go/util/retry/OWNERS | 2 + vendor/k8s.io/client-go/util/retry/util.go | 68 +- .../util/workqueue/default_rate_limiters.go | 4 +- .../util/workqueue/delaying_queue.go | 32 +- vendor/k8s.io/client-go/util/workqueue/doc.go | 2 +- .../client-go/util/workqueue/metrics.go | 135 +- .../client-go/util/workqueue/parallelizer.go | 6 - .../k8s.io/client-go/util/workqueue/queue.go | 50 +- ...itting_queue.go => rate_limiting_queue.go} | 6 +- vendor/k8s.io/klog/.travis.yml | 16 + vendor/k8s.io/klog/CONTRIBUTING.md | 22 + .../golang/glog => k8s.io/klog}/LICENSE | 0 vendor/k8s.io/klog/OWNERS | 19 + vendor/k8s.io/klog/README.md | 97 + vendor/k8s.io/klog/RELEASE.md | 9 + vendor/k8s.io/klog/SECURITY_CONTACTS | 20 + vendor/k8s.io/klog/code-of-conduct.md | 3 + vendor/k8s.io/klog/go.mod | 5 + vendor/k8s.io/klog/go.sum | 2 + .../glog/glog.go => k8s.io/klog/klog.go} | 258 +- .../glog_file.go => k8s.io/klog/klog_file.go} | 35 +- vendor/k8s.io/kubernetes/LICENSE | 202 - vendor/k8s.io/kubernetes/pkg/apis/core/BUILD | 60 - vendor/k8s.io/kubernetes/pkg/apis/core/OWNERS | 45 - .../pkg/apis/core/annotation_key_constants.go | 85 - .../pkg/apis/core/field_constants.go | 38 - .../kubernetes/pkg/apis/core/helper/BUILD | 51 - .../pkg/apis/core/helper/helpers.go | 564 - .../k8s.io/kubernetes/pkg/apis/core/json.go | 28 - .../pkg/apis/core/objectreference.go | 34 - .../kubernetes/pkg/apis/core/resource.go | 55 - .../k8s.io/kubernetes/pkg/apis/core/taint.go | 36 - .../kubernetes/pkg/apis/core/toleration.go | 30 - .../k8s.io/kubernetes/pkg/apis/core/types.go | 4696 -- .../kubernetes/pkg/apis/core/v1/helper/BUILD | 52 - .../pkg/apis/core/v1/helper/helpers.go | 502 - .../pkg/apis/core/zz_generated.deepcopy.go | 5389 -- .../k8s.io/kubernetes/pkg/util/version/BUILD | 35 - .../google/btree => k8s.io/utils}/LICENSE | 0 .../util => utils}/buffer/ring_growing.go | 0 .../util => utils}/integer/integer.go | 6 + vendor/k8s.io/utils/trace/trace.go | 131 + vendor/modules.txt | 346 +- .../LICENSE | 0 .../controller/controller.go | 715 +- .../controller/doc.go | 17 + .../controller/metrics/doc.go | 17 + .../controller/metrics/metrics.go | 0 .../controller/volume.go | 64 +- .../controller/volume_store.go | 269 + .../util/doc.go | 17 + .../util/util.go | 159 + .../ghodss => sigs.k8s.io}/yaml/.gitignore | 0 vendor/sigs.k8s.io/yaml/.travis.yml | 14 + vendor/sigs.k8s.io/yaml/CONTRIBUTING.md | 31 + .../ghodss => sigs.k8s.io}/yaml/LICENSE | 0 vendor/sigs.k8s.io/yaml/OWNERS | 25 + .../ghodss => sigs.k8s.io}/yaml/README.md | 0 vendor/sigs.k8s.io/yaml/RELEASE.md | 9 + vendor/sigs.k8s.io/yaml/SECURITY_CONTACTS | 17 + vendor/sigs.k8s.io/yaml/code-of-conduct.md | 3 + .../ghodss => sigs.k8s.io}/yaml/fields.go | 1 + .../ghodss => sigs.k8s.io}/yaml/yaml.go | 68 +- vendor/sigs.k8s.io/yaml/yaml_go110.go | 14 + 1149 files changed, 188498 insertions(+), 70876 deletions(-) delete mode 100644 vendor/github.com/ghodss/yaml/.travis.yml delete mode 100644 vendor/github.com/golang/glog/README delete mode 100644 vendor/github.com/google/btree/.travis.yml delete mode 100644 vendor/github.com/google/btree/README.md delete mode 100644 vendor/github.com/google/btree/btree.go delete mode 100644 vendor/github.com/google/btree/btree_mem.go rename vendor/github.com/{pborman/uuid => google/go-cmp}/LICENSE (96%) create mode 100644 vendor/github.com/google/go-cmp/cmp/compare.go create mode 100644 vendor/github.com/google/go-cmp/cmp/export_panic.go create mode 100644 vendor/github.com/google/go-cmp/cmp/export_unsafe.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/function/func.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/value/sort.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/value/zero.go create mode 100644 vendor/github.com/google/go-cmp/cmp/options.go create mode 100644 vendor/github.com/google/go-cmp/cmp/path.go create mode 100644 vendor/github.com/google/go-cmp/cmp/report.go create mode 100644 vendor/github.com/google/go-cmp/cmp/report_compare.go create mode 100644 vendor/github.com/google/go-cmp/cmp/report_reflect.go create mode 100644 vendor/github.com/google/go-cmp/cmp/report_slices.go create mode 100644 vendor/github.com/google/go-cmp/cmp/report_text.go create mode 100644 vendor/github.com/google/go-cmp/cmp/report_value.go create mode 100644 vendor/github.com/google/uuid/go.mod delete mode 100644 vendor/github.com/gregjones/httpcache/.travis.yml delete mode 100644 vendor/github.com/gregjones/httpcache/LICENSE.txt delete mode 100644 vendor/github.com/gregjones/httpcache/README.md delete mode 100644 vendor/github.com/gregjones/httpcache/diskcache/diskcache.go delete mode 100644 vendor/github.com/gregjones/httpcache/httpcache.go delete mode 100644 vendor/github.com/kubernetes-incubator/external-storage/lib/util/util.go create mode 100644 vendor/github.com/miekg/dns/.codecov.yml create mode 100644 vendor/github.com/miekg/dns/.gitignore create mode 100644 vendor/github.com/miekg/dns/.travis.yml create mode 100644 vendor/github.com/miekg/dns/AUTHORS create mode 100644 vendor/github.com/miekg/dns/CODEOWNERS create mode 100644 vendor/github.com/miekg/dns/CONTRIBUTORS create mode 100644 vendor/github.com/miekg/dns/COPYRIGHT create mode 100644 vendor/github.com/miekg/dns/LICENSE create mode 100644 vendor/github.com/miekg/dns/Makefile.fuzz create mode 100644 vendor/github.com/miekg/dns/Makefile.release create mode 100644 vendor/github.com/miekg/dns/README.md create mode 100644 vendor/github.com/miekg/dns/acceptfunc.go create mode 100644 vendor/github.com/miekg/dns/client.go create mode 100644 vendor/github.com/miekg/dns/clientconfig.go create mode 100644 vendor/github.com/miekg/dns/dane.go create mode 100644 vendor/github.com/miekg/dns/defaults.go create mode 100644 vendor/github.com/miekg/dns/dns.go create mode 100644 vendor/github.com/miekg/dns/dnssec.go create mode 100644 vendor/github.com/miekg/dns/dnssec_keygen.go create mode 100644 vendor/github.com/miekg/dns/dnssec_keyscan.go create mode 100644 vendor/github.com/miekg/dns/dnssec_privkey.go create mode 100644 vendor/github.com/miekg/dns/doc.go create mode 100644 vendor/github.com/miekg/dns/duplicate.go create mode 100644 vendor/github.com/miekg/dns/edns.go create mode 100644 vendor/github.com/miekg/dns/format.go create mode 100644 vendor/github.com/miekg/dns/fuzz.go create mode 100644 vendor/github.com/miekg/dns/generate.go create mode 100644 vendor/github.com/miekg/dns/go.mod create mode 100644 vendor/github.com/miekg/dns/go.sum create mode 100644 vendor/github.com/miekg/dns/labels.go create mode 100644 vendor/github.com/miekg/dns/listen_go111.go create mode 100644 vendor/github.com/miekg/dns/listen_go_not111.go create mode 100644 vendor/github.com/miekg/dns/msg.go create mode 100644 vendor/github.com/miekg/dns/msg_helpers.go create mode 100644 vendor/github.com/miekg/dns/msg_truncate.go create mode 100644 vendor/github.com/miekg/dns/nsecx.go create mode 100644 vendor/github.com/miekg/dns/privaterr.go create mode 100644 vendor/github.com/miekg/dns/reverse.go create mode 100644 vendor/github.com/miekg/dns/sanitize.go create mode 100644 vendor/github.com/miekg/dns/scan.go create mode 100644 vendor/github.com/miekg/dns/scan_rr.go create mode 100644 vendor/github.com/miekg/dns/serve_mux.go create mode 100644 vendor/github.com/miekg/dns/server.go create mode 100644 vendor/github.com/miekg/dns/sig0.go create mode 100644 vendor/github.com/miekg/dns/singleinflight.go create mode 100644 vendor/github.com/miekg/dns/smimea.go create mode 100644 vendor/github.com/miekg/dns/tlsa.go create mode 100644 vendor/github.com/miekg/dns/tsig.go create mode 100644 vendor/github.com/miekg/dns/types.go create mode 100644 vendor/github.com/miekg/dns/udp.go create mode 100644 vendor/github.com/miekg/dns/udp_windows.go create mode 100644 vendor/github.com/miekg/dns/update.go create mode 100644 vendor/github.com/miekg/dns/version.go create mode 100644 vendor/github.com/miekg/dns/xfr.go create mode 100644 vendor/github.com/miekg/dns/zduplicate.go create mode 100644 vendor/github.com/miekg/dns/zmsg.go create mode 100644 vendor/github.com/miekg/dns/ztypes.go delete mode 100644 vendor/github.com/pborman/uuid/.travis.yml delete mode 100644 vendor/github.com/pborman/uuid/CONTRIBUTING.md delete mode 100644 vendor/github.com/pborman/uuid/CONTRIBUTORS delete mode 100644 vendor/github.com/pborman/uuid/README.md delete mode 100644 vendor/github.com/pborman/uuid/dce.go delete mode 100644 vendor/github.com/pborman/uuid/doc.go delete mode 100644 vendor/github.com/pborman/uuid/go.mod delete mode 100644 vendor/github.com/pborman/uuid/go.sum delete mode 100644 vendor/github.com/pborman/uuid/hash.go delete mode 100644 vendor/github.com/pborman/uuid/marshal.go delete mode 100644 vendor/github.com/pborman/uuid/node.go delete mode 100644 vendor/github.com/pborman/uuid/sql.go delete mode 100644 vendor/github.com/pborman/uuid/time.go delete mode 100644 vendor/github.com/pborman/uuid/util.go delete mode 100644 vendor/github.com/pborman/uuid/uuid.go delete mode 100644 vendor/github.com/pborman/uuid/version1.go delete mode 100644 vendor/github.com/pborman/uuid/version4.go delete mode 100644 vendor/github.com/peterbourgon/diskv/LICENSE delete mode 100644 vendor/github.com/peterbourgon/diskv/README.md delete mode 100644 vendor/github.com/peterbourgon/diskv/compression.go delete mode 100644 vendor/github.com/peterbourgon/diskv/diskv.go delete mode 100644 vendor/github.com/peterbourgon/diskv/index.go create mode 100644 vendor/golang.org/x/crypto/ed25519/ed25519.go create mode 100644 vendor/golang.org/x/crypto/ed25519/ed25519_go113.go create mode 100644 vendor/golang.org/x/crypto/ed25519/internal/edwards25519/const.go create mode 100644 vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go create mode 100644 vendor/golang.org/x/net/bpf/asm.go create mode 100644 vendor/golang.org/x/net/bpf/constants.go create mode 100644 vendor/golang.org/x/net/bpf/doc.go create mode 100644 vendor/golang.org/x/net/bpf/instructions.go create mode 100644 vendor/golang.org/x/net/bpf/setter.go create mode 100644 vendor/golang.org/x/net/bpf/vm.go create mode 100644 vendor/golang.org/x/net/bpf/vm_instructions.go create mode 100644 vendor/golang.org/x/net/internal/iana/const.go create mode 100644 vendor/golang.org/x/net/internal/socket/cmsghdr.go create mode 100644 vendor/golang.org/x/net/internal/socket/cmsghdr_bsd.go create mode 100644 vendor/golang.org/x/net/internal/socket/cmsghdr_linux_32bit.go create mode 100644 vendor/golang.org/x/net/internal/socket/cmsghdr_linux_64bit.go create mode 100644 vendor/golang.org/x/net/internal/socket/cmsghdr_solaris_64bit.go create mode 100644 vendor/golang.org/x/net/internal/socket/cmsghdr_stub.go create mode 100644 vendor/golang.org/x/net/internal/socket/empty.s create mode 100644 vendor/golang.org/x/net/internal/socket/error_unix.go create mode 100644 vendor/golang.org/x/net/internal/socket/error_windows.go create mode 100644 vendor/golang.org/x/net/internal/socket/iovec_32bit.go create mode 100644 vendor/golang.org/x/net/internal/socket/iovec_64bit.go create mode 100644 vendor/golang.org/x/net/internal/socket/iovec_solaris_64bit.go create mode 100644 vendor/golang.org/x/net/internal/socket/iovec_stub.go create mode 100644 vendor/golang.org/x/net/internal/socket/mmsghdr_stub.go create mode 100644 vendor/golang.org/x/net/internal/socket/mmsghdr_unix.go create mode 100644 vendor/golang.org/x/net/internal/socket/msghdr_bsd.go create mode 100644 vendor/golang.org/x/net/internal/socket/msghdr_bsdvar.go create mode 100644 vendor/golang.org/x/net/internal/socket/msghdr_linux.go create mode 100644 vendor/golang.org/x/net/internal/socket/msghdr_linux_32bit.go create mode 100644 vendor/golang.org/x/net/internal/socket/msghdr_linux_64bit.go create mode 100644 vendor/golang.org/x/net/internal/socket/msghdr_openbsd.go create mode 100644 vendor/golang.org/x/net/internal/socket/msghdr_solaris_64bit.go create mode 100644 vendor/golang.org/x/net/internal/socket/msghdr_stub.go create mode 100644 vendor/golang.org/x/net/internal/socket/rawconn.go create mode 100644 vendor/golang.org/x/net/internal/socket/rawconn_mmsg.go create mode 100644 vendor/golang.org/x/net/internal/socket/rawconn_msg.go create mode 100644 vendor/golang.org/x/net/internal/socket/rawconn_nommsg.go create mode 100644 vendor/golang.org/x/net/internal/socket/rawconn_nomsg.go create mode 100644 vendor/golang.org/x/net/internal/socket/socket.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_bsd.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_bsdvar.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_const_unix.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_darwin.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_dragonfly.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_go1_11_darwin.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_linkname.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux_386.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux_386.s create mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux_amd64.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux_arm.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux_arm64.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux_mips.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux_mips64.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux_mips64le.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux_mipsle.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux_ppc64.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux_ppc64le.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux_riscv64.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux_s390x.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux_s390x.s create mode 100644 vendor/golang.org/x/net/internal/socket/sys_netbsd.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_posix.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_solaris.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_solaris_amd64.s create mode 100644 vendor/golang.org/x/net/internal/socket/sys_stub.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_unix.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_windows.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_aix_ppc64.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_darwin_386.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_darwin_amd64.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_darwin_arm.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_darwin_arm64.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_dragonfly_amd64.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_freebsd_386.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_freebsd_amd64.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_freebsd_arm.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_freebsd_arm64.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_linux_386.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_linux_amd64.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_linux_arm.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_linux_arm64.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_linux_mips.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_linux_mips64.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_linux_mips64le.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_linux_mipsle.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_linux_ppc64.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_linux_ppc64le.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_linux_riscv64.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_linux_s390x.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_netbsd_386.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_netbsd_amd64.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_netbsd_arm.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_netbsd_arm64.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_openbsd_386.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_openbsd_amd64.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_openbsd_arm.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_openbsd_arm64.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_solaris_amd64.go create mode 100644 vendor/golang.org/x/net/ipv4/batch.go create mode 100644 vendor/golang.org/x/net/ipv4/control.go create mode 100644 vendor/golang.org/x/net/ipv4/control_bsd.go create mode 100644 vendor/golang.org/x/net/ipv4/control_pktinfo.go create mode 100644 vendor/golang.org/x/net/ipv4/control_stub.go create mode 100644 vendor/golang.org/x/net/ipv4/control_unix.go create mode 100644 vendor/golang.org/x/net/ipv4/control_windows.go create mode 100644 vendor/golang.org/x/net/ipv4/dgramopt.go create mode 100644 vendor/golang.org/x/net/ipv4/doc.go create mode 100644 vendor/golang.org/x/net/ipv4/endpoint.go create mode 100644 vendor/golang.org/x/net/ipv4/genericopt.go create mode 100644 vendor/golang.org/x/net/ipv4/header.go create mode 100644 vendor/golang.org/x/net/ipv4/helper.go create mode 100644 vendor/golang.org/x/net/ipv4/iana.go create mode 100644 vendor/golang.org/x/net/ipv4/icmp.go create mode 100644 vendor/golang.org/x/net/ipv4/icmp_linux.go create mode 100644 vendor/golang.org/x/net/ipv4/icmp_stub.go create mode 100644 vendor/golang.org/x/net/ipv4/packet.go create mode 100644 vendor/golang.org/x/net/ipv4/payload.go create mode 100644 vendor/golang.org/x/net/ipv4/payload_cmsg.go create mode 100644 vendor/golang.org/x/net/ipv4/payload_nocmsg.go create mode 100644 vendor/golang.org/x/net/ipv4/sockopt.go create mode 100644 vendor/golang.org/x/net/ipv4/sockopt_posix.go create mode 100644 vendor/golang.org/x/net/ipv4/sockopt_stub.go create mode 100644 vendor/golang.org/x/net/ipv4/sys_aix.go create mode 100644 vendor/golang.org/x/net/ipv4/sys_asmreq.go create mode 100644 vendor/golang.org/x/net/ipv4/sys_asmreq_stub.go create mode 100644 vendor/golang.org/x/net/ipv4/sys_asmreqn.go create mode 100644 vendor/golang.org/x/net/ipv4/sys_asmreqn_stub.go create mode 100644 vendor/golang.org/x/net/ipv4/sys_bpf.go create mode 100644 vendor/golang.org/x/net/ipv4/sys_bpf_stub.go create mode 100644 vendor/golang.org/x/net/ipv4/sys_bsd.go create mode 100644 vendor/golang.org/x/net/ipv4/sys_darwin.go create mode 100644 vendor/golang.org/x/net/ipv4/sys_dragonfly.go create mode 100644 vendor/golang.org/x/net/ipv4/sys_freebsd.go create mode 100644 vendor/golang.org/x/net/ipv4/sys_linux.go create mode 100644 vendor/golang.org/x/net/ipv4/sys_solaris.go create mode 100644 vendor/golang.org/x/net/ipv4/sys_ssmreq.go create mode 100644 vendor/golang.org/x/net/ipv4/sys_ssmreq_stub.go create mode 100644 vendor/golang.org/x/net/ipv4/sys_stub.go create mode 100644 vendor/golang.org/x/net/ipv4/sys_windows.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_aix_ppc64.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_darwin.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_dragonfly.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_freebsd_386.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_freebsd_amd64.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_freebsd_arm.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_linux_386.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_linux_amd64.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_linux_arm.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_linux_arm64.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_linux_mips.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_linux_mips64.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_linux_mips64le.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_linux_mipsle.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_linux_ppc.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_linux_ppc64.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_linux_ppc64le.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_linux_riscv64.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_linux_s390x.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_netbsd.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_openbsd.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_solaris.go create mode 100644 vendor/golang.org/x/net/ipv6/batch.go create mode 100644 vendor/golang.org/x/net/ipv6/control.go create mode 100644 vendor/golang.org/x/net/ipv6/control_rfc2292_unix.go create mode 100644 vendor/golang.org/x/net/ipv6/control_rfc3542_unix.go create mode 100644 vendor/golang.org/x/net/ipv6/control_stub.go create mode 100644 vendor/golang.org/x/net/ipv6/control_unix.go create mode 100644 vendor/golang.org/x/net/ipv6/control_windows.go create mode 100644 vendor/golang.org/x/net/ipv6/dgramopt.go create mode 100644 vendor/golang.org/x/net/ipv6/doc.go create mode 100644 vendor/golang.org/x/net/ipv6/endpoint.go create mode 100644 vendor/golang.org/x/net/ipv6/genericopt.go create mode 100644 vendor/golang.org/x/net/ipv6/header.go create mode 100644 vendor/golang.org/x/net/ipv6/helper.go create mode 100644 vendor/golang.org/x/net/ipv6/iana.go create mode 100644 vendor/golang.org/x/net/ipv6/icmp.go create mode 100644 vendor/golang.org/x/net/ipv6/icmp_bsd.go create mode 100644 vendor/golang.org/x/net/ipv6/icmp_linux.go create mode 100644 vendor/golang.org/x/net/ipv6/icmp_solaris.go create mode 100644 vendor/golang.org/x/net/ipv6/icmp_stub.go create mode 100644 vendor/golang.org/x/net/ipv6/icmp_windows.go create mode 100644 vendor/golang.org/x/net/ipv6/payload.go create mode 100644 vendor/golang.org/x/net/ipv6/payload_cmsg.go create mode 100644 vendor/golang.org/x/net/ipv6/payload_nocmsg.go create mode 100644 vendor/golang.org/x/net/ipv6/sockopt.go create mode 100644 vendor/golang.org/x/net/ipv6/sockopt_posix.go create mode 100644 vendor/golang.org/x/net/ipv6/sockopt_stub.go create mode 100644 vendor/golang.org/x/net/ipv6/sys_aix.go create mode 100644 vendor/golang.org/x/net/ipv6/sys_asmreq.go create mode 100644 vendor/golang.org/x/net/ipv6/sys_asmreq_stub.go create mode 100644 vendor/golang.org/x/net/ipv6/sys_bpf.go create mode 100644 vendor/golang.org/x/net/ipv6/sys_bpf_stub.go create mode 100644 vendor/golang.org/x/net/ipv6/sys_bsd.go create mode 100644 vendor/golang.org/x/net/ipv6/sys_darwin.go create mode 100644 vendor/golang.org/x/net/ipv6/sys_freebsd.go create mode 100644 vendor/golang.org/x/net/ipv6/sys_linux.go create mode 100644 vendor/golang.org/x/net/ipv6/sys_solaris.go create mode 100644 vendor/golang.org/x/net/ipv6/sys_ssmreq.go create mode 100644 vendor/golang.org/x/net/ipv6/sys_ssmreq_stub.go create mode 100644 vendor/golang.org/x/net/ipv6/sys_stub.go create mode 100644 vendor/golang.org/x/net/ipv6/sys_windows.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_aix_ppc64.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_darwin.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_dragonfly.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_freebsd_386.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_freebsd_amd64.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_freebsd_arm.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_linux_386.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_linux_amd64.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_linux_arm.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_linux_arm64.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_linux_mips.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_linux_mips64.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_linux_mips64le.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_linux_mipsle.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_linux_ppc.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_linux_ppc64.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_linux_ppc64le.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_linux_riscv64.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_linux_s390x.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_netbsd.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_openbsd.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_solaris.go delete mode 100644 vendor/golang.org/x/sys/unix/mkasm_darwin.go delete mode 100644 vendor/golang.org/x/sys/unix/mkpost.go delete mode 100644 vendor/golang.org/x/sys/unix/mksyscall.go delete mode 100644 vendor/golang.org/x/sys/unix/mksyscall_aix_ppc.go delete mode 100644 vendor/golang.org/x/sys/unix/mksyscall_aix_ppc64.go delete mode 100644 vendor/golang.org/x/sys/unix/mksyscall_solaris.go delete mode 100644 vendor/golang.org/x/sys/unix/mksysctl_openbsd.go delete mode 100644 vendor/golang.org/x/sys/unix/mksysnum.go delete mode 100644 vendor/golang.org/x/sys/unix/types_aix.go delete mode 100644 vendor/golang.org/x/sys/unix/types_darwin.go delete mode 100644 vendor/golang.org/x/sys/unix/types_dragonfly.go delete mode 100644 vendor/golang.org/x/sys/unix/types_freebsd.go delete mode 100644 vendor/golang.org/x/sys/unix/types_netbsd.go delete mode 100644 vendor/golang.org/x/sys/unix/types_openbsd.go delete mode 100644 vendor/golang.org/x/sys/unix/types_solaris.go delete mode 100644 vendor/golang.org/x/text/unicode/bidi/gen.go delete mode 100644 vendor/golang.org/x/text/unicode/bidi/gen_ranges.go delete mode 100644 vendor/golang.org/x/text/unicode/bidi/gen_trieval.go create mode 100644 vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go delete mode 100644 vendor/golang.org/x/text/unicode/norm/maketables.go create mode 100644 vendor/golang.org/x/text/unicode/norm/tables11.0.0.go delete mode 100644 vendor/golang.org/x/text/unicode/norm/triegen.go rename vendor/k8s.io/api/admissionregistration/{v1alpha1 => v1}/doc.go (75%) create mode 100644 vendor/k8s.io/api/admissionregistration/v1/generated.pb.go create mode 100644 vendor/k8s.io/api/admissionregistration/v1/generated.proto rename vendor/k8s.io/api/admissionregistration/{v1alpha1 => v1}/register.go (87%) create mode 100644 vendor/k8s.io/api/admissionregistration/v1/types.go create mode 100644 vendor/k8s.io/api/admissionregistration/v1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/admissionregistration/v1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go delete mode 100644 vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto delete mode 100644 vendor/k8s.io/api/admissionregistration/v1alpha1/types.go delete mode 100644 vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/auditregistration/v1alpha1/doc.go create mode 100644 vendor/k8s.io/api/auditregistration/v1alpha1/generated.pb.go create mode 100644 vendor/k8s.io/api/auditregistration/v1alpha1/generated.proto create mode 100644 vendor/k8s.io/api/auditregistration/v1alpha1/register.go create mode 100644 vendor/k8s.io/api/auditregistration/v1alpha1/types.go create mode 100644 vendor/k8s.io/api/auditregistration/v1alpha1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/auditregistration/v1alpha1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/coordination/v1/doc.go create mode 100644 vendor/k8s.io/api/coordination/v1/generated.pb.go create mode 100644 vendor/k8s.io/api/coordination/v1/generated.proto create mode 100644 vendor/k8s.io/api/coordination/v1/register.go create mode 100644 vendor/k8s.io/api/coordination/v1/types.go create mode 100644 vendor/k8s.io/api/coordination/v1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/coordination/v1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/core/v1/well_known_labels.go create mode 100644 vendor/k8s.io/api/core/v1/well_known_taints.go create mode 100644 vendor/k8s.io/api/discovery/v1alpha1/doc.go create mode 100644 vendor/k8s.io/api/discovery/v1alpha1/generated.pb.go create mode 100644 vendor/k8s.io/api/discovery/v1alpha1/generated.proto rename vendor/k8s.io/{kubernetes/pkg/apis/core => api/discovery/v1alpha1}/register.go (52%) create mode 100644 vendor/k8s.io/api/discovery/v1alpha1/types.go create mode 100644 vendor/k8s.io/api/discovery/v1alpha1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/discovery/v1alpha1/well_known_labels.go create mode 100644 vendor/k8s.io/api/discovery/v1alpha1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/discovery/v1beta1/doc.go create mode 100644 vendor/k8s.io/api/discovery/v1beta1/generated.pb.go create mode 100644 vendor/k8s.io/api/discovery/v1beta1/generated.proto create mode 100644 vendor/k8s.io/api/discovery/v1beta1/register.go create mode 100644 vendor/k8s.io/api/discovery/v1beta1/types.go create mode 100644 vendor/k8s.io/api/discovery/v1beta1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/discovery/v1beta1/well_known_labels.go create mode 100644 vendor/k8s.io/api/discovery/v1beta1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/flowcontrol/v1alpha1/doc.go create mode 100644 vendor/k8s.io/api/flowcontrol/v1alpha1/generated.pb.go create mode 100644 vendor/k8s.io/api/flowcontrol/v1alpha1/generated.proto create mode 100644 vendor/k8s.io/api/flowcontrol/v1alpha1/register.go create mode 100644 vendor/k8s.io/api/flowcontrol/v1alpha1/types.go create mode 100644 vendor/k8s.io/api/flowcontrol/v1alpha1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/flowcontrol/v1alpha1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/networking/v1beta1/doc.go create mode 100644 vendor/k8s.io/api/networking/v1beta1/generated.pb.go create mode 100644 vendor/k8s.io/api/networking/v1beta1/generated.proto create mode 100644 vendor/k8s.io/api/networking/v1beta1/register.go create mode 100644 vendor/k8s.io/api/networking/v1beta1/types.go create mode 100644 vendor/k8s.io/api/networking/v1beta1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/networking/v1beta1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/node/v1alpha1/doc.go create mode 100644 vendor/k8s.io/api/node/v1alpha1/generated.pb.go create mode 100644 vendor/k8s.io/api/node/v1alpha1/generated.proto create mode 100644 vendor/k8s.io/api/node/v1alpha1/register.go create mode 100644 vendor/k8s.io/api/node/v1alpha1/types.go create mode 100644 vendor/k8s.io/api/node/v1alpha1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/node/v1alpha1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/node/v1beta1/doc.go create mode 100644 vendor/k8s.io/api/node/v1beta1/generated.pb.go create mode 100644 vendor/k8s.io/api/node/v1beta1/generated.proto create mode 100644 vendor/k8s.io/api/node/v1beta1/register.go create mode 100644 vendor/k8s.io/api/node/v1beta1/types.go create mode 100644 vendor/k8s.io/api/node/v1beta1/types_swagger_doc_generated.go rename vendor/k8s.io/api/{admissionregistration/v1alpha1 => node/v1beta1}/zz_generated.deepcopy.go (59%) create mode 100644 vendor/k8s.io/api/scheduling/v1/doc.go create mode 100644 vendor/k8s.io/api/scheduling/v1/generated.pb.go create mode 100644 vendor/k8s.io/api/scheduling/v1/generated.proto create mode 100644 vendor/k8s.io/api/scheduling/v1/register.go create mode 100644 vendor/k8s.io/api/scheduling/v1/types.go create mode 100644 vendor/k8s.io/api/scheduling/v1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/scheduling/v1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/conversion.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/deepcopy.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.conversion.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/mapper.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/negotiate.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf_extension.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/util/cache/cache.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/cache/expiring.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/sets/int32.go rename vendor/k8s.io/{kubernetes => apimachinery}/pkg/util/version/doc.go (90%) rename vendor/k8s.io/{kubernetes => apimachinery}/pkg/util/version/version.go (87%) delete mode 100644 vendor/k8s.io/client-go/discovery/cached_discovery.go delete mode 100644 vendor/k8s.io/client-go/discovery/round_tripper.go create mode 100644 vendor/k8s.io/client-go/informers/admissionregistration/v1/interface.go create mode 100644 vendor/k8s.io/client-go/informers/admissionregistration/v1/mutatingwebhookconfiguration.go create mode 100644 vendor/k8s.io/client-go/informers/admissionregistration/v1/validatingwebhookconfiguration.go delete mode 100644 vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/initializerconfiguration.go create mode 100644 vendor/k8s.io/client-go/informers/auditregistration/interface.go create mode 100644 vendor/k8s.io/client-go/informers/auditregistration/v1alpha1/auditsink.go create mode 100644 vendor/k8s.io/client-go/informers/auditregistration/v1alpha1/interface.go create mode 100644 vendor/k8s.io/client-go/informers/coordination/v1/interface.go create mode 100644 vendor/k8s.io/client-go/informers/coordination/v1/lease.go create mode 100644 vendor/k8s.io/client-go/informers/discovery/interface.go create mode 100644 vendor/k8s.io/client-go/informers/discovery/v1alpha1/endpointslice.go create mode 100644 vendor/k8s.io/client-go/informers/discovery/v1alpha1/interface.go create mode 100644 vendor/k8s.io/client-go/informers/discovery/v1beta1/endpointslice.go create mode 100644 vendor/k8s.io/client-go/informers/discovery/v1beta1/interface.go create mode 100644 vendor/k8s.io/client-go/informers/extensions/v1beta1/networkpolicy.go create mode 100644 vendor/k8s.io/client-go/informers/flowcontrol/interface.go create mode 100644 vendor/k8s.io/client-go/informers/flowcontrol/v1alpha1/flowschema.go create mode 100644 vendor/k8s.io/client-go/informers/flowcontrol/v1alpha1/interface.go create mode 100644 vendor/k8s.io/client-go/informers/flowcontrol/v1alpha1/prioritylevelconfiguration.go create mode 100644 vendor/k8s.io/client-go/informers/networking/v1beta1/ingress.go create mode 100644 vendor/k8s.io/client-go/informers/networking/v1beta1/interface.go create mode 100644 vendor/k8s.io/client-go/informers/node/interface.go rename vendor/k8s.io/client-go/informers/{admissionregistration => node}/v1alpha1/interface.go (78%) create mode 100644 vendor/k8s.io/client-go/informers/node/v1alpha1/runtimeclass.go create mode 100644 vendor/k8s.io/client-go/informers/node/v1beta1/interface.go create mode 100644 vendor/k8s.io/client-go/informers/node/v1beta1/runtimeclass.go create mode 100644 vendor/k8s.io/client-go/informers/scheduling/v1/interface.go create mode 100644 vendor/k8s.io/client-go/informers/scheduling/v1/priorityclass.go create mode 100644 vendor/k8s.io/client-go/informers/storage/v1/csinode.go create mode 100644 vendor/k8s.io/client-go/informers/storage/v1/volumeattachment.go create mode 100644 vendor/k8s.io/client-go/informers/storage/v1beta1/csidriver.go create mode 100644 vendor/k8s.io/client-go/informers/storage/v1beta1/csinode.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/admissionregistration_client.go rename vendor/k8s.io/{kubernetes/pkg/apis/core => client-go/kubernetes/typed/admissionregistration/v1}/doc.go (52%) create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/mutatingwebhookconfiguration.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingwebhookconfiguration.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/initializerconfiguration.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/scale.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/scale.go rename vendor/k8s.io/client-go/kubernetes/typed/{admissionregistration/v1alpha1/admissionregistration_client.go => auditregistration/v1alpha1/auditregistration_client.go} (54%) create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/auditsink.go rename vendor/k8s.io/client-go/kubernetes/typed/{admissionregistration => auditregistration}/v1alpha1/doc.go (100%) rename vendor/k8s.io/client-go/kubernetes/typed/{admissionregistration => auditregistration}/v1alpha1/generated_expansion.go (92%) create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/coordination_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/lease.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/discovery_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/endpointslice.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/discovery_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/endpointslice.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/networkpolicy.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/scale.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/scale_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/flowcontrol_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/flowschema.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/prioritylevelconfiguration.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingress.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/networking_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/node_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/runtimeclass.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/node_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/runtimeclass.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/priorityclass.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/scheduling_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csinode.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1/volumeattachment.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csidriver.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csinode.go create mode 100644 vendor/k8s.io/client-go/listers/admissionregistration/v1/expansion_generated.go create mode 100644 vendor/k8s.io/client-go/listers/admissionregistration/v1/mutatingwebhookconfiguration.go create mode 100644 vendor/k8s.io/client-go/listers/admissionregistration/v1/validatingwebhookconfiguration.go delete mode 100644 vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/initializerconfiguration.go delete mode 100644 vendor/k8s.io/client-go/listers/apps/v1beta1/scale.go delete mode 100644 vendor/k8s.io/client-go/listers/apps/v1beta2/scale.go create mode 100644 vendor/k8s.io/client-go/listers/auditregistration/v1alpha1/auditsink.go rename vendor/k8s.io/client-go/listers/{admissionregistration => auditregistration}/v1alpha1/expansion_generated.go (78%) create mode 100644 vendor/k8s.io/client-go/listers/coordination/v1/expansion_generated.go create mode 100644 vendor/k8s.io/client-go/listers/coordination/v1/lease.go create mode 100644 vendor/k8s.io/client-go/listers/discovery/v1alpha1/endpointslice.go create mode 100644 vendor/k8s.io/client-go/listers/discovery/v1alpha1/expansion_generated.go create mode 100644 vendor/k8s.io/client-go/listers/discovery/v1beta1/endpointslice.go create mode 100644 vendor/k8s.io/client-go/listers/discovery/v1beta1/expansion_generated.go create mode 100644 vendor/k8s.io/client-go/listers/extensions/v1beta1/networkpolicy.go delete mode 100644 vendor/k8s.io/client-go/listers/extensions/v1beta1/scale.go create mode 100644 vendor/k8s.io/client-go/listers/flowcontrol/v1alpha1/expansion_generated.go create mode 100644 vendor/k8s.io/client-go/listers/flowcontrol/v1alpha1/flowschema.go create mode 100644 vendor/k8s.io/client-go/listers/flowcontrol/v1alpha1/prioritylevelconfiguration.go create mode 100644 vendor/k8s.io/client-go/listers/networking/v1beta1/expansion_generated.go create mode 100644 vendor/k8s.io/client-go/listers/networking/v1beta1/ingress.go create mode 100644 vendor/k8s.io/client-go/listers/node/v1alpha1/expansion_generated.go create mode 100644 vendor/k8s.io/client-go/listers/node/v1alpha1/runtimeclass.go create mode 100644 vendor/k8s.io/client-go/listers/node/v1beta1/expansion_generated.go create mode 100644 vendor/k8s.io/client-go/listers/node/v1beta1/runtimeclass.go create mode 100644 vendor/k8s.io/client-go/listers/scheduling/v1/expansion_generated.go create mode 100644 vendor/k8s.io/client-go/listers/scheduling/v1/priorityclass.go create mode 100644 vendor/k8s.io/client-go/listers/storage/v1/csinode.go create mode 100644 vendor/k8s.io/client-go/listers/storage/v1/volumeattachment.go create mode 100644 vendor/k8s.io/client-go/listers/storage/v1beta1/csidriver.go create mode 100644 vendor/k8s.io/client-go/listers/storage/v1beta1/csinode.go create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/OWNERS create mode 100644 vendor/k8s.io/client-go/tools/leaderelection/healthzadaptor.go create mode 100644 vendor/k8s.io/client-go/tools/leaderelection/metrics.go create mode 100644 vendor/k8s.io/client-go/tools/leaderelection/resourcelock/leaselock.go create mode 100644 vendor/k8s.io/client-go/tools/leaderelection/resourcelock/multilock.go create mode 100644 vendor/k8s.io/client-go/tools/record/util/util.go rename vendor/k8s.io/client-go/{rest => transport}/token_source.go (70%) create mode 100644 vendor/k8s.io/client-go/util/cert/OWNERS create mode 100644 vendor/k8s.io/client-go/util/cert/server_inspection.go create mode 100644 vendor/k8s.io/client-go/util/keyutil/OWNERS create mode 100644 vendor/k8s.io/client-go/util/keyutil/key.go rename vendor/k8s.io/client-go/util/workqueue/{rate_limitting_queue.go => rate_limiting_queue.go} (96%) create mode 100644 vendor/k8s.io/klog/.travis.yml create mode 100644 vendor/k8s.io/klog/CONTRIBUTING.md rename vendor/{github.com/golang/glog => k8s.io/klog}/LICENSE (100%) create mode 100644 vendor/k8s.io/klog/OWNERS create mode 100644 vendor/k8s.io/klog/README.md create mode 100644 vendor/k8s.io/klog/RELEASE.md create mode 100644 vendor/k8s.io/klog/SECURITY_CONTACTS create mode 100644 vendor/k8s.io/klog/code-of-conduct.md create mode 100644 vendor/k8s.io/klog/go.mod create mode 100644 vendor/k8s.io/klog/go.sum rename vendor/{github.com/golang/glog/glog.go => k8s.io/klog/klog.go} (83%) rename vendor/{github.com/golang/glog/glog_file.go => k8s.io/klog/klog_file.go} (75%) delete mode 100644 vendor/k8s.io/kubernetes/LICENSE delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/core/BUILD delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/core/OWNERS delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/core/annotation_key_constants.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/core/field_constants.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/core/helper/BUILD delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/core/helper/helpers.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/core/json.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/core/objectreference.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/core/resource.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/core/taint.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/core/toleration.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/core/types.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/core/v1/helper/BUILD delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/core/v1/helper/helpers.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/core/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/util/version/BUILD rename vendor/{github.com/google/btree => k8s.io/utils}/LICENSE (100%) rename vendor/k8s.io/{client-go/util => utils}/buffer/ring_growing.go (100%) rename vendor/k8s.io/{client-go/util => utils}/integer/integer.go (81%) create mode 100644 vendor/k8s.io/utils/trace/trace.go rename vendor/{github.com/kubernetes-incubator/external-storage => sigs.k8s.io/sig-storage-lib-external-provisioner}/LICENSE (100%) rename vendor/{github.com/kubernetes-incubator/external-storage/lib => sigs.k8s.io/sig-storage-lib-external-provisioner}/controller/controller.go (65%) create mode 100644 vendor/sigs.k8s.io/sig-storage-lib-external-provisioner/controller/doc.go create mode 100644 vendor/sigs.k8s.io/sig-storage-lib-external-provisioner/controller/metrics/doc.go rename vendor/{github.com/kubernetes-incubator/external-storage/lib => sigs.k8s.io/sig-storage-lib-external-provisioner}/controller/metrics/metrics.go (100%) rename vendor/{github.com/kubernetes-incubator/external-storage/lib => sigs.k8s.io/sig-storage-lib-external-provisioner}/controller/volume.go (50%) create mode 100644 vendor/sigs.k8s.io/sig-storage-lib-external-provisioner/controller/volume_store.go create mode 100644 vendor/sigs.k8s.io/sig-storage-lib-external-provisioner/util/doc.go create mode 100644 vendor/sigs.k8s.io/sig-storage-lib-external-provisioner/util/util.go rename vendor/{github.com/ghodss => sigs.k8s.io}/yaml/.gitignore (100%) create mode 100644 vendor/sigs.k8s.io/yaml/.travis.yml create mode 100644 vendor/sigs.k8s.io/yaml/CONTRIBUTING.md rename vendor/{github.com/ghodss => sigs.k8s.io}/yaml/LICENSE (100%) create mode 100644 vendor/sigs.k8s.io/yaml/OWNERS rename vendor/{github.com/ghodss => sigs.k8s.io}/yaml/README.md (100%) create mode 100644 vendor/sigs.k8s.io/yaml/RELEASE.md create mode 100644 vendor/sigs.k8s.io/yaml/SECURITY_CONTACTS create mode 100644 vendor/sigs.k8s.io/yaml/code-of-conduct.md rename vendor/{github.com/ghodss => sigs.k8s.io}/yaml/fields.go (99%) rename vendor/{github.com/ghodss => sigs.k8s.io}/yaml/yaml.go (77%) create mode 100644 vendor/sigs.k8s.io/yaml/yaml_go110.go diff --git a/go.mod b/go.mod index 40984ecc..a81cbfb5 100644 --- a/go.mod +++ b/go.mod @@ -4,29 +4,18 @@ go 1.12 require ( github.com/Sirupsen/logrus v0.11.0 - github.com/ghodss/yaml v1.0.0 // indirect github.com/gogo/protobuf v1.3.0 // indirect - github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b // indirect github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 // indirect - github.com/google/btree v1.0.0 // indirect github.com/googleapis/gnostic v0.3.1 // indirect - github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect github.com/hashicorp/golang-lru v0.5.3 // indirect - github.com/kubernetes-incubator/external-storage v5.2.0+incompatible - github.com/modern-go/reflect2 v1.0.1 // indirect - github.com/pborman/uuid v1.2.0 // indirect - github.com/peterbourgon/diskv v2.0.1+incompatible // indirect + github.com/miekg/dns v1.1.27 // indirect github.com/pkg/errors v0.8.1 github.com/prometheus/client_golang v1.1.0 // indirect - github.com/stretchr/testify v1.4.0 // indirect github.com/urfave/cli v1.19.1 - golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 // indirect golang.org/x/sys v0.0.0-20190926180325-855e68c8590b // indirect golang.org/x/time v0.0.0-20190921001708-c4c64cad1fd0 // indirect - gopkg.in/inf.v0 v0.9.1 // indirect - k8s.io/api v0.0.0-20181005203742-357ec6384fa7 - k8s.io/apimachinery v0.0.0-20180913025736-6dd46049f395 - k8s.io/client-go v0.0.0-20181005204318-cb4883f3dea0 - k8s.io/kube-openapi v0.0.0-20190918143330-0270cf2f1c1d // indirect - k8s.io/kubernetes v1.12.1 // indirect + k8s.io/api v0.17.1 + k8s.io/apimachinery v0.17.1 + k8s.io/client-go v0.17.1 + sigs.k8s.io/sig-storage-lib-external-provisioner v4.0.2-0.20200115000635-36885abbb2bd+incompatible ) diff --git a/go.sum b/go.sum index 82098370..9d02d65d 100644 --- a/go.sum +++ b/go.sum @@ -1,4 +1,14 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= +github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= +github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= @@ -10,10 +20,12 @@ github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24 github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= @@ -23,8 +35,6 @@ github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLi github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= @@ -45,38 +55,48 @@ github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903 h1:LbsanbbD6LieF github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 h1:ZgQEtGgCBiWRM39fZuwSd1LwSqqSW0hOdXCYYDX0R3I= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/uuid v1.0.0 h1:b4Gk+7WdP/d3HZH8EJsZpvV7EtDOgaZLtnaNGIu1adA= -github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d h1:7XGaL1e6bYS1yIonGp9761ExpPPV1ui0SAC59Yube9k= github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.3.1 h1:WeAefnSUHlBb0iJKwxFDZdbfGwkd7xRNuV+IpXMJhYk= github.com/googleapis/gnostic v0.3.1/go.mod h1:on+2t9HRStVgn95RSsFWFz+6Q0Snyqv1awfrALZdbtU= -github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA= -github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.3 h1:YPkqC67at8FYaadspW/6uE0COsBxS2656RLEr8Bppgk= github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7 h1:KfgG9LzI+pYjr4xvmz/5H4FXjokeP+rlHLhv3iH62Fo= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.8 h1:QiWkFLKq0T7mpzwOTu6BzNDbfTE8OLrYhVKYMLF46Ok= +github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= @@ -87,11 +107,11 @@ github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORN github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/kubernetes-incubator/external-storage v5.2.0+incompatible h1:48P3+oDkUZ/M2wWI6f7n30525AYsexfASdDtecXv8eY= -github.com/kubernetes-incubator/external-storage v5.2.0+incompatible/go.mod h1:6qcu16qsv2BN+USk4ij7dxKahxYuolxVihb7e5zLNvg= github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/miekg/dns v1.1.27 h1:aEH/kqUzUxGJ/UHcEKdJY+ugH6WEzsEBBSPa8zuy1aM= +github.com/miekg/dns v1.1.27/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -102,15 +122,15 @@ github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3Rllmb github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c h1:Hww8mOyEKTeON4bZn7FrlLismspbPc1teNRUVH7wLQ8= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.8.0 h1:VkHVNpR4iVnU8XQR6DBm8BqYjN7CRzw+xKUbVVbbW9w= -github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.1 h1:q/mM8GF/n0shIN8SaAZ0V+jnLPzen6WIVZdiwrRlMlo= +github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c h1:eSfnfIuwhxZyULg1NNuZycJcYkjYVGYe7FczwQReM6U= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/onsi/gomega v1.5.0 h1:izbySO9zDPmjJ8rDjLvkA2zJHIo+HkYXHnf7eN7SSyo= -github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/pborman/uuid v1.2.0 h1:J7Q5mO4ysT1dv8hyrUGHb9+ooztCXu1D8MY8DZYsu3g= -github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= +github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME= +github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -135,9 +155,10 @@ github.com/prometheus/procfs v0.0.3 h1:CTwfnzjQ+8dS6MhHHu4YswVAD99sL2wjPqP+VkURm github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff h1:VARhShG49tiji6mdRNp7JTNDtJ0FhuprF93GBQ37xGU= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= @@ -147,52 +168,92 @@ github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJy github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/urfave/cli v1.19.1 h1:0mKm4ZoB74PxYmZVua162y1dGt1qc10MyymYRBf3lb8= github.com/urfave/cli v1.19.1/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 h1:ObdrDkeb4kJdCP557AjRjq69pTHfNouLtWZG7j9rPN8= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980 h1:dfGZHvZk057jK2MCeWus/TowKpJ8y4AmooUzdBSR9GU= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190812203447-cdfb69ac37fc h1:gkKoSkUmnU6bpS/VhkuO27bzQeSA51uaEfbOW5dNb68= -golang.org/x/net v0.0.0-20190812203447-cdfb69ac37fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191004110552-13f9640d40b9 h1:rjwSpXsdiK0dV8/Naq3kAw9ymfAeJIyd0upUIElB+lI= +golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190926180325-855e68c8590b h1:/8GN4qrAmRZQXgjWZHj9z/UJI5vNqQhPtgcw02z2f+8= golang.org/x/sys v0.0.0-20190926180325-855e68c8590b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190921001708-c4c64cad1fd0 h1:xQwXv67TxFo9nC1GJFyab5eq/5B590r6RlnL/G8Sz7w= golang.org/x/time v0.0.0-20190921001708-c4c64cad1fd0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4c= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/inf.v0 v0.9.0 h1:3zYtXIO92bvsdS3ggAdA8Gb4Azj0YU+TVY1uGYNFA8o= -gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= @@ -200,32 +261,28 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWD gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -k8s.io/api v0.0.0-20181005203742-357ec6384fa7 h1:cmeeh4O02+Rpgl6znrIkYB7Zu/bBIQoDa7IGZDa1Gz8= -k8s.io/api v0.0.0-20181005203742-357ec6384fa7/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA= -k8s.io/api v0.0.0-20190925180651-d58b53da08f5 h1:PEYuamj4laOODrvrh/KIKxihqE8kAnxFRZ6kKtrAS8c= -k8s.io/api v0.0.0-20190925180651-d58b53da08f5/go.mod h1:blPYY5r6fKug8SVOnjDtFAlzZzInCRL9NNls66SFhFI= -k8s.io/apimachinery v0.0.0-20180913025736-6dd46049f395 h1:X+c9tYTDc9Pmt+Z1YSMqmUTCYf13VYe1u+ZwzjgpK0M= -k8s.io/apimachinery v0.0.0-20180913025736-6dd46049f395/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0= -k8s.io/apimachinery v0.0.0-20190923155427-ec87dd743e08/go.mod h1:grJJH0hgilA2pYoUiJcPu2EDUal95NTq1vpxxvMLSu8= -k8s.io/apimachinery v0.0.0-20190925235427-62598f38f24e h1:LYpiLXa8/G1bL7VPm3m5For0h/hsr3ZyD4ytutxTctQ= -k8s.io/apimachinery v0.0.0-20190925235427-62598f38f24e/go.mod h1:grJJH0hgilA2pYoUiJcPu2EDUal95NTq1vpxxvMLSu8= -k8s.io/client-go v0.0.0-20181005204318-cb4883f3dea0 h1:ktzXC5eO9HXl8UsT/ttMjCrmYu0LY4uj9fDA4V12daA= -k8s.io/client-go v0.0.0-20181005204318-cb4883f3dea0/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s= -k8s.io/client-go v11.0.0+incompatible h1:LBbX2+lOwY9flffWlJM7f1Ct8V2SRNiMRDFeiwnJo9o= -k8s.io/client-go v11.0.0+incompatible/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s= +gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +k8s.io/api v0.17.1 h1:i46MidoDOE9tvQ0TTEYggf3ka/pziP1+tHI/GFVeJao= +k8s.io/api v0.17.1/go.mod h1:zxiAc5y8Ngn4fmhWUtSxuUlkfz1ixT7j9wESokELzOg= +k8s.io/apimachinery v0.17.1 h1:zUjS3szTxoUjTDYNvdFkYt2uMEXLcthcbp+7uZvWhYM= +k8s.io/apimachinery v0.17.1/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg= +k8s.io/client-go v0.17.1 h1:LbbuZ5tI7OYx4et5DfRFcJuoojvpYO0c7vps2rgJsHY= +k8s.io/client-go v0.17.1/go.mod h1:HZtHJSC/VuSHcETN9QA5QDZky1tXiYrkF/7t7vRpO1A= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92 h1:PgoMI/L1Nu5Vmvgm+vGheLuxKST8h6FMOqggyAFtHPc= k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= -k8s.io/kube-openapi v0.0.0-20190816220812-743ec37842bf h1:EYm5AW/UUDbnmnI+gK0TJDVK9qPLhM+sRHYanNKw0EQ= -k8s.io/kube-openapi v0.0.0-20190816220812-743ec37842bf/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= -k8s.io/kube-openapi v0.0.0-20190918143330-0270cf2f1c1d h1:Xpe6sK+RY4ZgCTyZ3y273UmFmURhjtoJiwOMbQsXitY= -k8s.io/kube-openapi v0.0.0-20190918143330-0270cf2f1c1d/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= -k8s.io/kubernetes v1.12.1 h1:UFMcMGMhx2Fe5oA3nTFCax9ygL5DaCysVcsubkOt8Mo= -k8s.io/kubernetes v1.12.1/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= -k8s.io/utils v0.0.0-20190923111123-69764acb6e8e h1:BXSmdH6S3YGLlhC89DZp+sNdYSmwNeDU6Xu5ZpzGOlM= -k8s.io/utils v0.0.0-20190923111123-69764acb6e8e/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= +k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a h1:UcxjrRMyNx/i/y8G7kPvLyy7rfbeuf1PYyBf973pgyU= +k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= +k8s.io/utils v0.0.0-20191114184206-e782cd3c129f h1:GiPwtSzdP43eI1hpPCbROQCCIgCuiMMNF8YUVLF3vJo= +k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= +sigs.k8s.io/sig-storage-lib-external-provisioner v4.0.2-0.20200115000635-36885abbb2bd+incompatible h1:npIS9Cs/L6o5TioSbOFP1PZX0+303iCLxoCEe6ixMzM= +sigs.k8s.io/sig-storage-lib-external-provisioner v4.0.2-0.20200115000635-36885abbb2bd+incompatible/go.mod h1:qhqLyNwJC49PoUalmtzYb4s9fT8HOMBTLbTY1QoVOqI= sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= diff --git a/main.go b/main.go index ae88dd9e..4f15bea7 100644 --- a/main.go +++ b/main.go @@ -10,9 +10,9 @@ import ( "github.com/pkg/errors" "github.com/urfave/cli" - pvController "github.com/kubernetes-incubator/external-storage/lib/controller" clientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" + pvController "sigs.k8s.io/sig-storage-lib-external-provisioner/controller" ) var ( diff --git a/provisioner.go b/provisioner.go index 5830c66a..0b50f6f5 100644 --- a/provisioner.go +++ b/provisioner.go @@ -11,12 +11,12 @@ import ( "time" "github.com/Sirupsen/logrus" - pvController "github.com/kubernetes-incubator/external-storage/lib/controller" "github.com/pkg/errors" v1 "k8s.io/api/core/v1" k8serror "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" + pvController "sigs.k8s.io/sig-storage-lib-external-provisioner/controller" ) type ActionType string @@ -161,7 +161,7 @@ func (p *LocalPathProvisioner) getRandomPathOnNode(node string) (string, error) return path, nil } -func (p *LocalPathProvisioner) Provision(opts pvController.VolumeOptions) (*v1.PersistentVolume, error) { +func (p *LocalPathProvisioner) Provision(opts pvController.ProvisionOptions) (*v1.PersistentVolume, error) { pvc := opts.PVC if pvc.Spec.Selector != nil { return nil, fmt.Errorf("claim.Spec.Selector is not supported") @@ -200,7 +200,7 @@ func (p *LocalPathProvisioner) Provision(opts pvController.VolumeOptions) (*v1.P Name: name, }, Spec: v1.PersistentVolumeSpec{ - PersistentVolumeReclaimPolicy: opts.PersistentVolumeReclaimPolicy, + PersistentVolumeReclaimPolicy: *opts.StorageClass.ReclaimPolicy, AccessModes: pvc.Spec.AccessModes, VolumeMode: &fs, Capacity: v1.ResourceList{ diff --git a/vendor/github.com/ghodss/yaml/.travis.yml b/vendor/github.com/ghodss/yaml/.travis.yml deleted file mode 100644 index 0e9d6edc..00000000 --- a/vendor/github.com/ghodss/yaml/.travis.yml +++ /dev/null @@ -1,7 +0,0 @@ -language: go -go: - - 1.3 - - 1.4 -script: - - go test - - go build diff --git a/vendor/github.com/golang/glog/README b/vendor/github.com/golang/glog/README deleted file mode 100644 index 387b4eb6..00000000 --- a/vendor/github.com/golang/glog/README +++ /dev/null @@ -1,44 +0,0 @@ -glog -==== - -Leveled execution logs for Go. - -This is an efficient pure Go implementation of leveled logs in the -manner of the open source C++ package - https://github.com/google/glog - -By binding methods to booleans it is possible to use the log package -without paying the expense of evaluating the arguments to the log. -Through the -vmodule flag, the package also provides fine-grained -control over logging at the file level. - -The comment from glog.go introduces the ideas: - - Package glog implements logging analogous to the Google-internal - C++ INFO/ERROR/V setup. It provides functions Info, Warning, - Error, Fatal, plus formatting variants such as Infof. It - also provides V-style logging controlled by the -v and - -vmodule=file=2 flags. - - Basic examples: - - glog.Info("Prepare to repel boarders") - - glog.Fatalf("Initialization failed: %s", err) - - See the documentation for the V function for an explanation - of these examples: - - if glog.V(2) { - glog.Info("Starting transaction...") - } - - glog.V(2).Infoln("Processed", nItems, "elements") - - -The repository contains an open source version of the log package -used inside Google. The master copy of the source lives inside -Google, not here. The code in this repo is for export only and is not itself -under development. Feature requests will be ignored. - -Send bug reports to golang-nuts@googlegroups.com. diff --git a/vendor/github.com/google/btree/.travis.yml b/vendor/github.com/google/btree/.travis.yml deleted file mode 100644 index 4f2ee4d9..00000000 --- a/vendor/github.com/google/btree/.travis.yml +++ /dev/null @@ -1 +0,0 @@ -language: go diff --git a/vendor/github.com/google/btree/README.md b/vendor/github.com/google/btree/README.md deleted file mode 100644 index 6062a4da..00000000 --- a/vendor/github.com/google/btree/README.md +++ /dev/null @@ -1,12 +0,0 @@ -# BTree implementation for Go - -![Travis CI Build Status](https://api.travis-ci.org/google/btree.svg?branch=master) - -This package provides an in-memory B-Tree implementation for Go, useful as -an ordered, mutable data structure. - -The API is based off of the wonderful -http://godoc.org/github.com/petar/GoLLRB/llrb, and is meant to allow btree to -act as a drop-in replacement for gollrb trees. - -See http://godoc.org/github.com/google/btree for documentation. diff --git a/vendor/github.com/google/btree/btree.go b/vendor/github.com/google/btree/btree.go deleted file mode 100644 index 6ff062f9..00000000 --- a/vendor/github.com/google/btree/btree.go +++ /dev/null @@ -1,890 +0,0 @@ -// Copyright 2014 Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package btree implements in-memory B-Trees of arbitrary degree. -// -// btree implements an in-memory B-Tree for use as an ordered data structure. -// It is not meant for persistent storage solutions. -// -// It has a flatter structure than an equivalent red-black or other binary tree, -// which in some cases yields better memory usage and/or performance. -// See some discussion on the matter here: -// http://google-opensource.blogspot.com/2013/01/c-containers-that-save-memory-and-time.html -// Note, though, that this project is in no way related to the C++ B-Tree -// implementation written about there. -// -// Within this tree, each node contains a slice of items and a (possibly nil) -// slice of children. For basic numeric values or raw structs, this can cause -// efficiency differences when compared to equivalent C++ template code that -// stores values in arrays within the node: -// * Due to the overhead of storing values as interfaces (each -// value needs to be stored as the value itself, then 2 words for the -// interface pointing to that value and its type), resulting in higher -// memory use. -// * Since interfaces can point to values anywhere in memory, values are -// most likely not stored in contiguous blocks, resulting in a higher -// number of cache misses. -// These issues don't tend to matter, though, when working with strings or other -// heap-allocated structures, since C++-equivalent structures also must store -// pointers and also distribute their values across the heap. -// -// This implementation is designed to be a drop-in replacement to gollrb.LLRB -// trees, (http://github.com/petar/gollrb), an excellent and probably the most -// widely used ordered tree implementation in the Go ecosystem currently. -// Its functions, therefore, exactly mirror those of -// llrb.LLRB where possible. Unlike gollrb, though, we currently don't -// support storing multiple equivalent values. -package btree - -import ( - "fmt" - "io" - "sort" - "strings" - "sync" -) - -// Item represents a single object in the tree. -type Item interface { - // Less tests whether the current item is less than the given argument. - // - // This must provide a strict weak ordering. - // If !a.Less(b) && !b.Less(a), we treat this to mean a == b (i.e. we can only - // hold one of either a or b in the tree). - Less(than Item) bool -} - -const ( - DefaultFreeListSize = 32 -) - -var ( - nilItems = make(items, 16) - nilChildren = make(children, 16) -) - -// FreeList represents a free list of btree nodes. By default each -// BTree has its own FreeList, but multiple BTrees can share the same -// FreeList. -// Two Btrees using the same freelist are safe for concurrent write access. -type FreeList struct { - mu sync.Mutex - freelist []*node -} - -// NewFreeList creates a new free list. -// size is the maximum size of the returned free list. -func NewFreeList(size int) *FreeList { - return &FreeList{freelist: make([]*node, 0, size)} -} - -func (f *FreeList) newNode() (n *node) { - f.mu.Lock() - index := len(f.freelist) - 1 - if index < 0 { - f.mu.Unlock() - return new(node) - } - n = f.freelist[index] - f.freelist[index] = nil - f.freelist = f.freelist[:index] - f.mu.Unlock() - return -} - -// freeNode adds the given node to the list, returning true if it was added -// and false if it was discarded. -func (f *FreeList) freeNode(n *node) (out bool) { - f.mu.Lock() - if len(f.freelist) < cap(f.freelist) { - f.freelist = append(f.freelist, n) - out = true - } - f.mu.Unlock() - return -} - -// ItemIterator allows callers of Ascend* to iterate in-order over portions of -// the tree. When this function returns false, iteration will stop and the -// associated Ascend* function will immediately return. -type ItemIterator func(i Item) bool - -// New creates a new B-Tree with the given degree. -// -// New(2), for example, will create a 2-3-4 tree (each node contains 1-3 items -// and 2-4 children). -func New(degree int) *BTree { - return NewWithFreeList(degree, NewFreeList(DefaultFreeListSize)) -} - -// NewWithFreeList creates a new B-Tree that uses the given node free list. -func NewWithFreeList(degree int, f *FreeList) *BTree { - if degree <= 1 { - panic("bad degree") - } - return &BTree{ - degree: degree, - cow: ©OnWriteContext{freelist: f}, - } -} - -// items stores items in a node. -type items []Item - -// insertAt inserts a value into the given index, pushing all subsequent values -// forward. -func (s *items) insertAt(index int, item Item) { - *s = append(*s, nil) - if index < len(*s) { - copy((*s)[index+1:], (*s)[index:]) - } - (*s)[index] = item -} - -// removeAt removes a value at a given index, pulling all subsequent values -// back. -func (s *items) removeAt(index int) Item { - item := (*s)[index] - copy((*s)[index:], (*s)[index+1:]) - (*s)[len(*s)-1] = nil - *s = (*s)[:len(*s)-1] - return item -} - -// pop removes and returns the last element in the list. -func (s *items) pop() (out Item) { - index := len(*s) - 1 - out = (*s)[index] - (*s)[index] = nil - *s = (*s)[:index] - return -} - -// truncate truncates this instance at index so that it contains only the -// first index items. index must be less than or equal to length. -func (s *items) truncate(index int) { - var toClear items - *s, toClear = (*s)[:index], (*s)[index:] - for len(toClear) > 0 { - toClear = toClear[copy(toClear, nilItems):] - } -} - -// find returns the index where the given item should be inserted into this -// list. 'found' is true if the item already exists in the list at the given -// index. -func (s items) find(item Item) (index int, found bool) { - i := sort.Search(len(s), func(i int) bool { - return item.Less(s[i]) - }) - if i > 0 && !s[i-1].Less(item) { - return i - 1, true - } - return i, false -} - -// children stores child nodes in a node. -type children []*node - -// insertAt inserts a value into the given index, pushing all subsequent values -// forward. -func (s *children) insertAt(index int, n *node) { - *s = append(*s, nil) - if index < len(*s) { - copy((*s)[index+1:], (*s)[index:]) - } - (*s)[index] = n -} - -// removeAt removes a value at a given index, pulling all subsequent values -// back. -func (s *children) removeAt(index int) *node { - n := (*s)[index] - copy((*s)[index:], (*s)[index+1:]) - (*s)[len(*s)-1] = nil - *s = (*s)[:len(*s)-1] - return n -} - -// pop removes and returns the last element in the list. -func (s *children) pop() (out *node) { - index := len(*s) - 1 - out = (*s)[index] - (*s)[index] = nil - *s = (*s)[:index] - return -} - -// truncate truncates this instance at index so that it contains only the -// first index children. index must be less than or equal to length. -func (s *children) truncate(index int) { - var toClear children - *s, toClear = (*s)[:index], (*s)[index:] - for len(toClear) > 0 { - toClear = toClear[copy(toClear, nilChildren):] - } -} - -// node is an internal node in a tree. -// -// It must at all times maintain the invariant that either -// * len(children) == 0, len(items) unconstrained -// * len(children) == len(items) + 1 -type node struct { - items items - children children - cow *copyOnWriteContext -} - -func (n *node) mutableFor(cow *copyOnWriteContext) *node { - if n.cow == cow { - return n - } - out := cow.newNode() - if cap(out.items) >= len(n.items) { - out.items = out.items[:len(n.items)] - } else { - out.items = make(items, len(n.items), cap(n.items)) - } - copy(out.items, n.items) - // Copy children - if cap(out.children) >= len(n.children) { - out.children = out.children[:len(n.children)] - } else { - out.children = make(children, len(n.children), cap(n.children)) - } - copy(out.children, n.children) - return out -} - -func (n *node) mutableChild(i int) *node { - c := n.children[i].mutableFor(n.cow) - n.children[i] = c - return c -} - -// split splits the given node at the given index. The current node shrinks, -// and this function returns the item that existed at that index and a new node -// containing all items/children after it. -func (n *node) split(i int) (Item, *node) { - item := n.items[i] - next := n.cow.newNode() - next.items = append(next.items, n.items[i+1:]...) - n.items.truncate(i) - if len(n.children) > 0 { - next.children = append(next.children, n.children[i+1:]...) - n.children.truncate(i + 1) - } - return item, next -} - -// maybeSplitChild checks if a child should be split, and if so splits it. -// Returns whether or not a split occurred. -func (n *node) maybeSplitChild(i, maxItems int) bool { - if len(n.children[i].items) < maxItems { - return false - } - first := n.mutableChild(i) - item, second := first.split(maxItems / 2) - n.items.insertAt(i, item) - n.children.insertAt(i+1, second) - return true -} - -// insert inserts an item into the subtree rooted at this node, making sure -// no nodes in the subtree exceed maxItems items. Should an equivalent item be -// be found/replaced by insert, it will be returned. -func (n *node) insert(item Item, maxItems int) Item { - i, found := n.items.find(item) - if found { - out := n.items[i] - n.items[i] = item - return out - } - if len(n.children) == 0 { - n.items.insertAt(i, item) - return nil - } - if n.maybeSplitChild(i, maxItems) { - inTree := n.items[i] - switch { - case item.Less(inTree): - // no change, we want first split node - case inTree.Less(item): - i++ // we want second split node - default: - out := n.items[i] - n.items[i] = item - return out - } - } - return n.mutableChild(i).insert(item, maxItems) -} - -// get finds the given key in the subtree and returns it. -func (n *node) get(key Item) Item { - i, found := n.items.find(key) - if found { - return n.items[i] - } else if len(n.children) > 0 { - return n.children[i].get(key) - } - return nil -} - -// min returns the first item in the subtree. -func min(n *node) Item { - if n == nil { - return nil - } - for len(n.children) > 0 { - n = n.children[0] - } - if len(n.items) == 0 { - return nil - } - return n.items[0] -} - -// max returns the last item in the subtree. -func max(n *node) Item { - if n == nil { - return nil - } - for len(n.children) > 0 { - n = n.children[len(n.children)-1] - } - if len(n.items) == 0 { - return nil - } - return n.items[len(n.items)-1] -} - -// toRemove details what item to remove in a node.remove call. -type toRemove int - -const ( - removeItem toRemove = iota // removes the given item - removeMin // removes smallest item in the subtree - removeMax // removes largest item in the subtree -) - -// remove removes an item from the subtree rooted at this node. -func (n *node) remove(item Item, minItems int, typ toRemove) Item { - var i int - var found bool - switch typ { - case removeMax: - if len(n.children) == 0 { - return n.items.pop() - } - i = len(n.items) - case removeMin: - if len(n.children) == 0 { - return n.items.removeAt(0) - } - i = 0 - case removeItem: - i, found = n.items.find(item) - if len(n.children) == 0 { - if found { - return n.items.removeAt(i) - } - return nil - } - default: - panic("invalid type") - } - // If we get to here, we have children. - if len(n.children[i].items) <= minItems { - return n.growChildAndRemove(i, item, minItems, typ) - } - child := n.mutableChild(i) - // Either we had enough items to begin with, or we've done some - // merging/stealing, because we've got enough now and we're ready to return - // stuff. - if found { - // The item exists at index 'i', and the child we've selected can give us a - // predecessor, since if we've gotten here it's got > minItems items in it. - out := n.items[i] - // We use our special-case 'remove' call with typ=maxItem to pull the - // predecessor of item i (the rightmost leaf of our immediate left child) - // and set it into where we pulled the item from. - n.items[i] = child.remove(nil, minItems, removeMax) - return out - } - // Final recursive call. Once we're here, we know that the item isn't in this - // node and that the child is big enough to remove from. - return child.remove(item, minItems, typ) -} - -// growChildAndRemove grows child 'i' to make sure it's possible to remove an -// item from it while keeping it at minItems, then calls remove to actually -// remove it. -// -// Most documentation says we have to do two sets of special casing: -// 1) item is in this node -// 2) item is in child -// In both cases, we need to handle the two subcases: -// A) node has enough values that it can spare one -// B) node doesn't have enough values -// For the latter, we have to check: -// a) left sibling has node to spare -// b) right sibling has node to spare -// c) we must merge -// To simplify our code here, we handle cases #1 and #2 the same: -// If a node doesn't have enough items, we make sure it does (using a,b,c). -// We then simply redo our remove call, and the second time (regardless of -// whether we're in case 1 or 2), we'll have enough items and can guarantee -// that we hit case A. -func (n *node) growChildAndRemove(i int, item Item, minItems int, typ toRemove) Item { - if i > 0 && len(n.children[i-1].items) > minItems { - // Steal from left child - child := n.mutableChild(i) - stealFrom := n.mutableChild(i - 1) - stolenItem := stealFrom.items.pop() - child.items.insertAt(0, n.items[i-1]) - n.items[i-1] = stolenItem - if len(stealFrom.children) > 0 { - child.children.insertAt(0, stealFrom.children.pop()) - } - } else if i < len(n.items) && len(n.children[i+1].items) > minItems { - // steal from right child - child := n.mutableChild(i) - stealFrom := n.mutableChild(i + 1) - stolenItem := stealFrom.items.removeAt(0) - child.items = append(child.items, n.items[i]) - n.items[i] = stolenItem - if len(stealFrom.children) > 0 { - child.children = append(child.children, stealFrom.children.removeAt(0)) - } - } else { - if i >= len(n.items) { - i-- - } - child := n.mutableChild(i) - // merge with right child - mergeItem := n.items.removeAt(i) - mergeChild := n.children.removeAt(i + 1) - child.items = append(child.items, mergeItem) - child.items = append(child.items, mergeChild.items...) - child.children = append(child.children, mergeChild.children...) - n.cow.freeNode(mergeChild) - } - return n.remove(item, minItems, typ) -} - -type direction int - -const ( - descend = direction(-1) - ascend = direction(+1) -) - -// iterate provides a simple method for iterating over elements in the tree. -// -// When ascending, the 'start' should be less than 'stop' and when descending, -// the 'start' should be greater than 'stop'. Setting 'includeStart' to true -// will force the iterator to include the first item when it equals 'start', -// thus creating a "greaterOrEqual" or "lessThanEqual" rather than just a -// "greaterThan" or "lessThan" queries. -func (n *node) iterate(dir direction, start, stop Item, includeStart bool, hit bool, iter ItemIterator) (bool, bool) { - var ok, found bool - var index int - switch dir { - case ascend: - if start != nil { - index, _ = n.items.find(start) - } - for i := index; i < len(n.items); i++ { - if len(n.children) > 0 { - if hit, ok = n.children[i].iterate(dir, start, stop, includeStart, hit, iter); !ok { - return hit, false - } - } - if !includeStart && !hit && start != nil && !start.Less(n.items[i]) { - hit = true - continue - } - hit = true - if stop != nil && !n.items[i].Less(stop) { - return hit, false - } - if !iter(n.items[i]) { - return hit, false - } - } - if len(n.children) > 0 { - if hit, ok = n.children[len(n.children)-1].iterate(dir, start, stop, includeStart, hit, iter); !ok { - return hit, false - } - } - case descend: - if start != nil { - index, found = n.items.find(start) - if !found { - index = index - 1 - } - } else { - index = len(n.items) - 1 - } - for i := index; i >= 0; i-- { - if start != nil && !n.items[i].Less(start) { - if !includeStart || hit || start.Less(n.items[i]) { - continue - } - } - if len(n.children) > 0 { - if hit, ok = n.children[i+1].iterate(dir, start, stop, includeStart, hit, iter); !ok { - return hit, false - } - } - if stop != nil && !stop.Less(n.items[i]) { - return hit, false // continue - } - hit = true - if !iter(n.items[i]) { - return hit, false - } - } - if len(n.children) > 0 { - if hit, ok = n.children[0].iterate(dir, start, stop, includeStart, hit, iter); !ok { - return hit, false - } - } - } - return hit, true -} - -// Used for testing/debugging purposes. -func (n *node) print(w io.Writer, level int) { - fmt.Fprintf(w, "%sNODE:%v\n", strings.Repeat(" ", level), n.items) - for _, c := range n.children { - c.print(w, level+1) - } -} - -// BTree is an implementation of a B-Tree. -// -// BTree stores Item instances in an ordered structure, allowing easy insertion, -// removal, and iteration. -// -// Write operations are not safe for concurrent mutation by multiple -// goroutines, but Read operations are. -type BTree struct { - degree int - length int - root *node - cow *copyOnWriteContext -} - -// copyOnWriteContext pointers determine node ownership... a tree with a write -// context equivalent to a node's write context is allowed to modify that node. -// A tree whose write context does not match a node's is not allowed to modify -// it, and must create a new, writable copy (IE: it's a Clone). -// -// When doing any write operation, we maintain the invariant that the current -// node's context is equal to the context of the tree that requested the write. -// We do this by, before we descend into any node, creating a copy with the -// correct context if the contexts don't match. -// -// Since the node we're currently visiting on any write has the requesting -// tree's context, that node is modifiable in place. Children of that node may -// not share context, but before we descend into them, we'll make a mutable -// copy. -type copyOnWriteContext struct { - freelist *FreeList -} - -// Clone clones the btree, lazily. Clone should not be called concurrently, -// but the original tree (t) and the new tree (t2) can be used concurrently -// once the Clone call completes. -// -// The internal tree structure of b is marked read-only and shared between t and -// t2. Writes to both t and t2 use copy-on-write logic, creating new nodes -// whenever one of b's original nodes would have been modified. Read operations -// should have no performance degredation. Write operations for both t and t2 -// will initially experience minor slow-downs caused by additional allocs and -// copies due to the aforementioned copy-on-write logic, but should converge to -// the original performance characteristics of the original tree. -func (t *BTree) Clone() (t2 *BTree) { - // Create two entirely new copy-on-write contexts. - // This operation effectively creates three trees: - // the original, shared nodes (old b.cow) - // the new b.cow nodes - // the new out.cow nodes - cow1, cow2 := *t.cow, *t.cow - out := *t - t.cow = &cow1 - out.cow = &cow2 - return &out -} - -// maxItems returns the max number of items to allow per node. -func (t *BTree) maxItems() int { - return t.degree*2 - 1 -} - -// minItems returns the min number of items to allow per node (ignored for the -// root node). -func (t *BTree) minItems() int { - return t.degree - 1 -} - -func (c *copyOnWriteContext) newNode() (n *node) { - n = c.freelist.newNode() - n.cow = c - return -} - -type freeType int - -const ( - ftFreelistFull freeType = iota // node was freed (available for GC, not stored in freelist) - ftStored // node was stored in the freelist for later use - ftNotOwned // node was ignored by COW, since it's owned by another one -) - -// freeNode frees a node within a given COW context, if it's owned by that -// context. It returns what happened to the node (see freeType const -// documentation). -func (c *copyOnWriteContext) freeNode(n *node) freeType { - if n.cow == c { - // clear to allow GC - n.items.truncate(0) - n.children.truncate(0) - n.cow = nil - if c.freelist.freeNode(n) { - return ftStored - } else { - return ftFreelistFull - } - } else { - return ftNotOwned - } -} - -// ReplaceOrInsert adds the given item to the tree. If an item in the tree -// already equals the given one, it is removed from the tree and returned. -// Otherwise, nil is returned. -// -// nil cannot be added to the tree (will panic). -func (t *BTree) ReplaceOrInsert(item Item) Item { - if item == nil { - panic("nil item being added to BTree") - } - if t.root == nil { - t.root = t.cow.newNode() - t.root.items = append(t.root.items, item) - t.length++ - return nil - } else { - t.root = t.root.mutableFor(t.cow) - if len(t.root.items) >= t.maxItems() { - item2, second := t.root.split(t.maxItems() / 2) - oldroot := t.root - t.root = t.cow.newNode() - t.root.items = append(t.root.items, item2) - t.root.children = append(t.root.children, oldroot, second) - } - } - out := t.root.insert(item, t.maxItems()) - if out == nil { - t.length++ - } - return out -} - -// Delete removes an item equal to the passed in item from the tree, returning -// it. If no such item exists, returns nil. -func (t *BTree) Delete(item Item) Item { - return t.deleteItem(item, removeItem) -} - -// DeleteMin removes the smallest item in the tree and returns it. -// If no such item exists, returns nil. -func (t *BTree) DeleteMin() Item { - return t.deleteItem(nil, removeMin) -} - -// DeleteMax removes the largest item in the tree and returns it. -// If no such item exists, returns nil. -func (t *BTree) DeleteMax() Item { - return t.deleteItem(nil, removeMax) -} - -func (t *BTree) deleteItem(item Item, typ toRemove) Item { - if t.root == nil || len(t.root.items) == 0 { - return nil - } - t.root = t.root.mutableFor(t.cow) - out := t.root.remove(item, t.minItems(), typ) - if len(t.root.items) == 0 && len(t.root.children) > 0 { - oldroot := t.root - t.root = t.root.children[0] - t.cow.freeNode(oldroot) - } - if out != nil { - t.length-- - } - return out -} - -// AscendRange calls the iterator for every value in the tree within the range -// [greaterOrEqual, lessThan), until iterator returns false. -func (t *BTree) AscendRange(greaterOrEqual, lessThan Item, iterator ItemIterator) { - if t.root == nil { - return - } - t.root.iterate(ascend, greaterOrEqual, lessThan, true, false, iterator) -} - -// AscendLessThan calls the iterator for every value in the tree within the range -// [first, pivot), until iterator returns false. -func (t *BTree) AscendLessThan(pivot Item, iterator ItemIterator) { - if t.root == nil { - return - } - t.root.iterate(ascend, nil, pivot, false, false, iterator) -} - -// AscendGreaterOrEqual calls the iterator for every value in the tree within -// the range [pivot, last], until iterator returns false. -func (t *BTree) AscendGreaterOrEqual(pivot Item, iterator ItemIterator) { - if t.root == nil { - return - } - t.root.iterate(ascend, pivot, nil, true, false, iterator) -} - -// Ascend calls the iterator for every value in the tree within the range -// [first, last], until iterator returns false. -func (t *BTree) Ascend(iterator ItemIterator) { - if t.root == nil { - return - } - t.root.iterate(ascend, nil, nil, false, false, iterator) -} - -// DescendRange calls the iterator for every value in the tree within the range -// [lessOrEqual, greaterThan), until iterator returns false. -func (t *BTree) DescendRange(lessOrEqual, greaterThan Item, iterator ItemIterator) { - if t.root == nil { - return - } - t.root.iterate(descend, lessOrEqual, greaterThan, true, false, iterator) -} - -// DescendLessOrEqual calls the iterator for every value in the tree within the range -// [pivot, first], until iterator returns false. -func (t *BTree) DescendLessOrEqual(pivot Item, iterator ItemIterator) { - if t.root == nil { - return - } - t.root.iterate(descend, pivot, nil, true, false, iterator) -} - -// DescendGreaterThan calls the iterator for every value in the tree within -// the range (pivot, last], until iterator returns false. -func (t *BTree) DescendGreaterThan(pivot Item, iterator ItemIterator) { - if t.root == nil { - return - } - t.root.iterate(descend, nil, pivot, false, false, iterator) -} - -// Descend calls the iterator for every value in the tree within the range -// [last, first], until iterator returns false. -func (t *BTree) Descend(iterator ItemIterator) { - if t.root == nil { - return - } - t.root.iterate(descend, nil, nil, false, false, iterator) -} - -// Get looks for the key item in the tree, returning it. It returns nil if -// unable to find that item. -func (t *BTree) Get(key Item) Item { - if t.root == nil { - return nil - } - return t.root.get(key) -} - -// Min returns the smallest item in the tree, or nil if the tree is empty. -func (t *BTree) Min() Item { - return min(t.root) -} - -// Max returns the largest item in the tree, or nil if the tree is empty. -func (t *BTree) Max() Item { - return max(t.root) -} - -// Has returns true if the given key is in the tree. -func (t *BTree) Has(key Item) bool { - return t.Get(key) != nil -} - -// Len returns the number of items currently in the tree. -func (t *BTree) Len() int { - return t.length -} - -// Clear removes all items from the btree. If addNodesToFreelist is true, -// t's nodes are added to its freelist as part of this call, until the freelist -// is full. Otherwise, the root node is simply dereferenced and the subtree -// left to Go's normal GC processes. -// -// This can be much faster -// than calling Delete on all elements, because that requires finding/removing -// each element in the tree and updating the tree accordingly. It also is -// somewhat faster than creating a new tree to replace the old one, because -// nodes from the old tree are reclaimed into the freelist for use by the new -// one, instead of being lost to the garbage collector. -// -// This call takes: -// O(1): when addNodesToFreelist is false, this is a single operation. -// O(1): when the freelist is already full, it breaks out immediately -// O(freelist size): when the freelist is empty and the nodes are all owned -// by this tree, nodes are added to the freelist until full. -// O(tree size): when all nodes are owned by another tree, all nodes are -// iterated over looking for nodes to add to the freelist, and due to -// ownership, none are. -func (t *BTree) Clear(addNodesToFreelist bool) { - if t.root != nil && addNodesToFreelist { - t.root.reset(t.cow) - } - t.root, t.length = nil, 0 -} - -// reset returns a subtree to the freelist. It breaks out immediately if the -// freelist is full, since the only benefit of iterating is to fill that -// freelist up. Returns true if parent reset call should continue. -func (n *node) reset(c *copyOnWriteContext) bool { - for _, child := range n.children { - if !child.reset(c) { - return false - } - } - return c.freeNode(n) != ftFreelistFull -} - -// Int implements the Item interface for integers. -type Int int - -// Less returns true if int(a) < int(b). -func (a Int) Less(b Item) bool { - return a < b.(Int) -} diff --git a/vendor/github.com/google/btree/btree_mem.go b/vendor/github.com/google/btree/btree_mem.go deleted file mode 100644 index cb95b7fa..00000000 --- a/vendor/github.com/google/btree/btree_mem.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2014 Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build ignore - -// This binary compares memory usage between btree and gollrb. -package main - -import ( - "flag" - "fmt" - "math/rand" - "runtime" - "time" - - "github.com/google/btree" - "github.com/petar/GoLLRB/llrb" -) - -var ( - size = flag.Int("size", 1000000, "size of the tree to build") - degree = flag.Int("degree", 8, "degree of btree") - gollrb = flag.Bool("llrb", false, "use llrb instead of btree") -) - -func main() { - flag.Parse() - vals := rand.Perm(*size) - var t, v interface{} - v = vals - var stats runtime.MemStats - for i := 0; i < 10; i++ { - runtime.GC() - } - fmt.Println("-------- BEFORE ----------") - runtime.ReadMemStats(&stats) - fmt.Printf("%+v\n", stats) - start := time.Now() - if *gollrb { - tr := llrb.New() - for _, v := range vals { - tr.ReplaceOrInsert(llrb.Int(v)) - } - t = tr // keep it around - } else { - tr := btree.New(*degree) - for _, v := range vals { - tr.ReplaceOrInsert(btree.Int(v)) - } - t = tr // keep it around - } - fmt.Printf("%v inserts in %v\n", *size, time.Since(start)) - fmt.Println("-------- AFTER ----------") - runtime.ReadMemStats(&stats) - fmt.Printf("%+v\n", stats) - for i := 0; i < 10; i++ { - runtime.GC() - } - fmt.Println("-------- AFTER GC ----------") - runtime.ReadMemStats(&stats) - fmt.Printf("%+v\n", stats) - if t == v { - fmt.Println("to make sure vals and tree aren't GC'd") - } -} diff --git a/vendor/github.com/pborman/uuid/LICENSE b/vendor/github.com/google/go-cmp/LICENSE similarity index 96% rename from vendor/github.com/pborman/uuid/LICENSE rename to vendor/github.com/google/go-cmp/LICENSE index 5dc68268..32017f8f 100644 --- a/vendor/github.com/pborman/uuid/LICENSE +++ b/vendor/github.com/google/go-cmp/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009,2014 Google Inc. All rights reserved. +Copyright (c) 2017 The Go Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are diff --git a/vendor/github.com/google/go-cmp/cmp/compare.go b/vendor/github.com/google/go-cmp/cmp/compare.go new file mode 100644 index 00000000..2133562b --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/compare.go @@ -0,0 +1,616 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// Package cmp determines equality of values. +// +// This package is intended to be a more powerful and safer alternative to +// reflect.DeepEqual for comparing whether two values are semantically equal. +// +// The primary features of cmp are: +// +// • When the default behavior of equality does not suit the needs of the test, +// custom equality functions can override the equality operation. +// For example, an equality function may report floats as equal so long as they +// are within some tolerance of each other. +// +// • Types that have an Equal method may use that method to determine equality. +// This allows package authors to determine the equality operation for the types +// that they define. +// +// • If no custom equality functions are used and no Equal method is defined, +// equality is determined by recursively comparing the primitive kinds on both +// values, much like reflect.DeepEqual. Unlike reflect.DeepEqual, unexported +// fields are not compared by default; they result in panics unless suppressed +// by using an Ignore option (see cmpopts.IgnoreUnexported) or explicitly compared +// using the AllowUnexported option. +package cmp + +import ( + "fmt" + "reflect" + "strings" + + "github.com/google/go-cmp/cmp/internal/diff" + "github.com/google/go-cmp/cmp/internal/flags" + "github.com/google/go-cmp/cmp/internal/function" + "github.com/google/go-cmp/cmp/internal/value" +) + +// Equal reports whether x and y are equal by recursively applying the +// following rules in the given order to x and y and all of their sub-values: +// +// • Let S be the set of all Ignore, Transformer, and Comparer options that +// remain after applying all path filters, value filters, and type filters. +// If at least one Ignore exists in S, then the comparison is ignored. +// If the number of Transformer and Comparer options in S is greater than one, +// then Equal panics because it is ambiguous which option to use. +// If S contains a single Transformer, then use that to transform the current +// values and recursively call Equal on the output values. +// If S contains a single Comparer, then use that to compare the current values. +// Otherwise, evaluation proceeds to the next rule. +// +// • If the values have an Equal method of the form "(T) Equal(T) bool" or +// "(T) Equal(I) bool" where T is assignable to I, then use the result of +// x.Equal(y) even if x or y is nil. Otherwise, no such method exists and +// evaluation proceeds to the next rule. +// +// • Lastly, try to compare x and y based on their basic kinds. +// Simple kinds like booleans, integers, floats, complex numbers, strings, and +// channels are compared using the equivalent of the == operator in Go. +// Functions are only equal if they are both nil, otherwise they are unequal. +// +// Structs are equal if recursively calling Equal on all fields report equal. +// If a struct contains unexported fields, Equal panics unless an Ignore option +// (e.g., cmpopts.IgnoreUnexported) ignores that field or the AllowUnexported +// option explicitly permits comparing the unexported field. +// +// Slices are equal if they are both nil or both non-nil, where recursively +// calling Equal on all non-ignored slice or array elements report equal. +// Empty non-nil slices and nil slices are not equal; to equate empty slices, +// consider using cmpopts.EquateEmpty. +// +// Maps are equal if they are both nil or both non-nil, where recursively +// calling Equal on all non-ignored map entries report equal. +// Map keys are equal according to the == operator. +// To use custom comparisons for map keys, consider using cmpopts.SortMaps. +// Empty non-nil maps and nil maps are not equal; to equate empty maps, +// consider using cmpopts.EquateEmpty. +// +// Pointers and interfaces are equal if they are both nil or both non-nil, +// where they have the same underlying concrete type and recursively +// calling Equal on the underlying values reports equal. +func Equal(x, y interface{}, opts ...Option) bool { + vx := reflect.ValueOf(x) + vy := reflect.ValueOf(y) + + // If the inputs are different types, auto-wrap them in an empty interface + // so that they have the same parent type. + var t reflect.Type + if !vx.IsValid() || !vy.IsValid() || vx.Type() != vy.Type() { + t = reflect.TypeOf((*interface{})(nil)).Elem() + if vx.IsValid() { + vvx := reflect.New(t).Elem() + vvx.Set(vx) + vx = vvx + } + if vy.IsValid() { + vvy := reflect.New(t).Elem() + vvy.Set(vy) + vy = vvy + } + } else { + t = vx.Type() + } + + s := newState(opts) + s.compareAny(&pathStep{t, vx, vy}) + return s.result.Equal() +} + +// Diff returns a human-readable report of the differences between two values. +// It returns an empty string if and only if Equal returns true for the same +// input values and options. +// +// The output is displayed as a literal in pseudo-Go syntax. +// At the start of each line, a "-" prefix indicates an element removed from x, +// a "+" prefix to indicates an element added to y, and the lack of a prefix +// indicates an element common to both x and y. If possible, the output +// uses fmt.Stringer.String or error.Error methods to produce more humanly +// readable outputs. In such cases, the string is prefixed with either an +// 's' or 'e' character, respectively, to indicate that the method was called. +// +// Do not depend on this output being stable. If you need the ability to +// programmatically interpret the difference, consider using a custom Reporter. +func Diff(x, y interface{}, opts ...Option) string { + r := new(defaultReporter) + eq := Equal(x, y, Options(opts), Reporter(r)) + d := r.String() + if (d == "") != eq { + panic("inconsistent difference and equality results") + } + return d +} + +type state struct { + // These fields represent the "comparison state". + // Calling statelessCompare must not result in observable changes to these. + result diff.Result // The current result of comparison + curPath Path // The current path in the value tree + reporters []reporter // Optional reporters + + // recChecker checks for infinite cycles applying the same set of + // transformers upon the output of itself. + recChecker recChecker + + // dynChecker triggers pseudo-random checks for option correctness. + // It is safe for statelessCompare to mutate this value. + dynChecker dynChecker + + // These fields, once set by processOption, will not change. + exporters map[reflect.Type]bool // Set of structs with unexported field visibility + opts Options // List of all fundamental and filter options +} + +func newState(opts []Option) *state { + // Always ensure a validator option exists to validate the inputs. + s := &state{opts: Options{validator{}}} + s.processOption(Options(opts)) + return s +} + +func (s *state) processOption(opt Option) { + switch opt := opt.(type) { + case nil: + case Options: + for _, o := range opt { + s.processOption(o) + } + case coreOption: + type filtered interface { + isFiltered() bool + } + if fopt, ok := opt.(filtered); ok && !fopt.isFiltered() { + panic(fmt.Sprintf("cannot use an unfiltered option: %v", opt)) + } + s.opts = append(s.opts, opt) + case visibleStructs: + if s.exporters == nil { + s.exporters = make(map[reflect.Type]bool) + } + for t := range opt { + s.exporters[t] = true + } + case reporter: + s.reporters = append(s.reporters, opt) + default: + panic(fmt.Sprintf("unknown option %T", opt)) + } +} + +// statelessCompare compares two values and returns the result. +// This function is stateless in that it does not alter the current result, +// or output to any registered reporters. +func (s *state) statelessCompare(step PathStep) diff.Result { + // We do not save and restore the curPath because all of the compareX + // methods should properly push and pop from the path. + // It is an implementation bug if the contents of curPath differs from + // when calling this function to when returning from it. + + oldResult, oldReporters := s.result, s.reporters + s.result = diff.Result{} // Reset result + s.reporters = nil // Remove reporters to avoid spurious printouts + s.compareAny(step) + res := s.result + s.result, s.reporters = oldResult, oldReporters + return res +} + +func (s *state) compareAny(step PathStep) { + // Update the path stack. + s.curPath.push(step) + defer s.curPath.pop() + for _, r := range s.reporters { + r.PushStep(step) + defer r.PopStep() + } + s.recChecker.Check(s.curPath) + + // Obtain the current type and values. + t := step.Type() + vx, vy := step.Values() + + // Rule 1: Check whether an option applies on this node in the value tree. + if s.tryOptions(t, vx, vy) { + return + } + + // Rule 2: Check whether the type has a valid Equal method. + if s.tryMethod(t, vx, vy) { + return + } + + // Rule 3: Compare based on the underlying kind. + switch t.Kind() { + case reflect.Bool: + s.report(vx.Bool() == vy.Bool(), 0) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + s.report(vx.Int() == vy.Int(), 0) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + s.report(vx.Uint() == vy.Uint(), 0) + case reflect.Float32, reflect.Float64: + s.report(vx.Float() == vy.Float(), 0) + case reflect.Complex64, reflect.Complex128: + s.report(vx.Complex() == vy.Complex(), 0) + case reflect.String: + s.report(vx.String() == vy.String(), 0) + case reflect.Chan, reflect.UnsafePointer: + s.report(vx.Pointer() == vy.Pointer(), 0) + case reflect.Func: + s.report(vx.IsNil() && vy.IsNil(), 0) + case reflect.Struct: + s.compareStruct(t, vx, vy) + case reflect.Slice, reflect.Array: + s.compareSlice(t, vx, vy) + case reflect.Map: + s.compareMap(t, vx, vy) + case reflect.Ptr: + s.comparePtr(t, vx, vy) + case reflect.Interface: + s.compareInterface(t, vx, vy) + default: + panic(fmt.Sprintf("%v kind not handled", t.Kind())) + } +} + +func (s *state) tryOptions(t reflect.Type, vx, vy reflect.Value) bool { + // Evaluate all filters and apply the remaining options. + if opt := s.opts.filter(s, t, vx, vy); opt != nil { + opt.apply(s, vx, vy) + return true + } + return false +} + +func (s *state) tryMethod(t reflect.Type, vx, vy reflect.Value) bool { + // Check if this type even has an Equal method. + m, ok := t.MethodByName("Equal") + if !ok || !function.IsType(m.Type, function.EqualAssignable) { + return false + } + + eq := s.callTTBFunc(m.Func, vx, vy) + s.report(eq, reportByMethod) + return true +} + +func (s *state) callTRFunc(f, v reflect.Value, step Transform) reflect.Value { + v = sanitizeValue(v, f.Type().In(0)) + if !s.dynChecker.Next() { + return f.Call([]reflect.Value{v})[0] + } + + // Run the function twice and ensure that we get the same results back. + // We run in goroutines so that the race detector (if enabled) can detect + // unsafe mutations to the input. + c := make(chan reflect.Value) + go detectRaces(c, f, v) + got := <-c + want := f.Call([]reflect.Value{v})[0] + if step.vx, step.vy = got, want; !s.statelessCompare(step).Equal() { + // To avoid false-positives with non-reflexive equality operations, + // we sanity check whether a value is equal to itself. + if step.vx, step.vy = want, want; !s.statelessCompare(step).Equal() { + return want + } + panic(fmt.Sprintf("non-deterministic function detected: %s", function.NameOf(f))) + } + return want +} + +func (s *state) callTTBFunc(f, x, y reflect.Value) bool { + x = sanitizeValue(x, f.Type().In(0)) + y = sanitizeValue(y, f.Type().In(1)) + if !s.dynChecker.Next() { + return f.Call([]reflect.Value{x, y})[0].Bool() + } + + // Swapping the input arguments is sufficient to check that + // f is symmetric and deterministic. + // We run in goroutines so that the race detector (if enabled) can detect + // unsafe mutations to the input. + c := make(chan reflect.Value) + go detectRaces(c, f, y, x) + got := <-c + want := f.Call([]reflect.Value{x, y})[0].Bool() + if !got.IsValid() || got.Bool() != want { + panic(fmt.Sprintf("non-deterministic or non-symmetric function detected: %s", function.NameOf(f))) + } + return want +} + +func detectRaces(c chan<- reflect.Value, f reflect.Value, vs ...reflect.Value) { + var ret reflect.Value + defer func() { + recover() // Ignore panics, let the other call to f panic instead + c <- ret + }() + ret = f.Call(vs)[0] +} + +// sanitizeValue converts nil interfaces of type T to those of type R, +// assuming that T is assignable to R. +// Otherwise, it returns the input value as is. +func sanitizeValue(v reflect.Value, t reflect.Type) reflect.Value { + // TODO(dsnet): Workaround for reflect bug (https://golang.org/issue/22143). + if !flags.AtLeastGo110 { + if v.Kind() == reflect.Interface && v.IsNil() && v.Type() != t { + return reflect.New(t).Elem() + } + } + return v +} + +func (s *state) compareStruct(t reflect.Type, vx, vy reflect.Value) { + var vax, vay reflect.Value // Addressable versions of vx and vy + + step := StructField{&structField{}} + for i := 0; i < t.NumField(); i++ { + step.typ = t.Field(i).Type + step.vx = vx.Field(i) + step.vy = vy.Field(i) + step.name = t.Field(i).Name + step.idx = i + step.unexported = !isExported(step.name) + if step.unexported { + if step.name == "_" { + continue + } + // Defer checking of unexported fields until later to give an + // Ignore a chance to ignore the field. + if !vax.IsValid() || !vay.IsValid() { + // For retrieveUnexportedField to work, the parent struct must + // be addressable. Create a new copy of the values if + // necessary to make them addressable. + vax = makeAddressable(vx) + vay = makeAddressable(vy) + } + step.mayForce = s.exporters[t] + step.pvx = vax + step.pvy = vay + step.field = t.Field(i) + } + s.compareAny(step) + } +} + +func (s *state) compareSlice(t reflect.Type, vx, vy reflect.Value) { + isSlice := t.Kind() == reflect.Slice + if isSlice && (vx.IsNil() || vy.IsNil()) { + s.report(vx.IsNil() && vy.IsNil(), 0) + return + } + + // TODO: Support cyclic data structures. + + step := SliceIndex{&sliceIndex{pathStep: pathStep{typ: t.Elem()}}} + withIndexes := func(ix, iy int) SliceIndex { + if ix >= 0 { + step.vx, step.xkey = vx.Index(ix), ix + } else { + step.vx, step.xkey = reflect.Value{}, -1 + } + if iy >= 0 { + step.vy, step.ykey = vy.Index(iy), iy + } else { + step.vy, step.ykey = reflect.Value{}, -1 + } + return step + } + + // Ignore options are able to ignore missing elements in a slice. + // However, detecting these reliably requires an optimal differencing + // algorithm, for which diff.Difference is not. + // + // Instead, we first iterate through both slices to detect which elements + // would be ignored if standing alone. The index of non-discarded elements + // are stored in a separate slice, which diffing is then performed on. + var indexesX, indexesY []int + var ignoredX, ignoredY []bool + for ix := 0; ix < vx.Len(); ix++ { + ignored := s.statelessCompare(withIndexes(ix, -1)).NumDiff == 0 + if !ignored { + indexesX = append(indexesX, ix) + } + ignoredX = append(ignoredX, ignored) + } + for iy := 0; iy < vy.Len(); iy++ { + ignored := s.statelessCompare(withIndexes(-1, iy)).NumDiff == 0 + if !ignored { + indexesY = append(indexesY, iy) + } + ignoredY = append(ignoredY, ignored) + } + + // Compute an edit-script for slices vx and vy (excluding ignored elements). + edits := diff.Difference(len(indexesX), len(indexesY), func(ix, iy int) diff.Result { + return s.statelessCompare(withIndexes(indexesX[ix], indexesY[iy])) + }) + + // Replay the ignore-scripts and the edit-script. + var ix, iy int + for ix < vx.Len() || iy < vy.Len() { + var e diff.EditType + switch { + case ix < len(ignoredX) && ignoredX[ix]: + e = diff.UniqueX + case iy < len(ignoredY) && ignoredY[iy]: + e = diff.UniqueY + default: + e, edits = edits[0], edits[1:] + } + switch e { + case diff.UniqueX: + s.compareAny(withIndexes(ix, -1)) + ix++ + case diff.UniqueY: + s.compareAny(withIndexes(-1, iy)) + iy++ + default: + s.compareAny(withIndexes(ix, iy)) + ix++ + iy++ + } + } +} + +func (s *state) compareMap(t reflect.Type, vx, vy reflect.Value) { + if vx.IsNil() || vy.IsNil() { + s.report(vx.IsNil() && vy.IsNil(), 0) + return + } + + // TODO: Support cyclic data structures. + + // We combine and sort the two map keys so that we can perform the + // comparisons in a deterministic order. + step := MapIndex{&mapIndex{pathStep: pathStep{typ: t.Elem()}}} + for _, k := range value.SortKeys(append(vx.MapKeys(), vy.MapKeys()...)) { + step.vx = vx.MapIndex(k) + step.vy = vy.MapIndex(k) + step.key = k + if !step.vx.IsValid() && !step.vy.IsValid() { + // It is possible for both vx and vy to be invalid if the + // key contained a NaN value in it. + // + // Even with the ability to retrieve NaN keys in Go 1.12, + // there still isn't a sensible way to compare the values since + // a NaN key may map to multiple unordered values. + // The most reasonable way to compare NaNs would be to compare the + // set of values. However, this is impossible to do efficiently + // since set equality is provably an O(n^2) operation given only + // an Equal function. If we had a Less function or Hash function, + // this could be done in O(n*log(n)) or O(n), respectively. + // + // Rather than adding complex logic to deal with NaNs, make it + // the user's responsibility to compare such obscure maps. + const help = "consider providing a Comparer to compare the map" + panic(fmt.Sprintf("%#v has map key with NaNs\n%s", s.curPath, help)) + } + s.compareAny(step) + } +} + +func (s *state) comparePtr(t reflect.Type, vx, vy reflect.Value) { + if vx.IsNil() || vy.IsNil() { + s.report(vx.IsNil() && vy.IsNil(), 0) + return + } + + // TODO: Support cyclic data structures. + + vx, vy = vx.Elem(), vy.Elem() + s.compareAny(Indirect{&indirect{pathStep{t.Elem(), vx, vy}}}) +} + +func (s *state) compareInterface(t reflect.Type, vx, vy reflect.Value) { + if vx.IsNil() || vy.IsNil() { + s.report(vx.IsNil() && vy.IsNil(), 0) + return + } + vx, vy = vx.Elem(), vy.Elem() + if vx.Type() != vy.Type() { + s.report(false, 0) + return + } + s.compareAny(TypeAssertion{&typeAssertion{pathStep{vx.Type(), vx, vy}}}) +} + +func (s *state) report(eq bool, rf resultFlags) { + if rf&reportByIgnore == 0 { + if eq { + s.result.NumSame++ + rf |= reportEqual + } else { + s.result.NumDiff++ + rf |= reportUnequal + } + } + for _, r := range s.reporters { + r.Report(Result{flags: rf}) + } +} + +// recChecker tracks the state needed to periodically perform checks that +// user provided transformers are not stuck in an infinitely recursive cycle. +type recChecker struct{ next int } + +// Check scans the Path for any recursive transformers and panics when any +// recursive transformers are detected. Note that the presence of a +// recursive Transformer does not necessarily imply an infinite cycle. +// As such, this check only activates after some minimal number of path steps. +func (rc *recChecker) Check(p Path) { + const minLen = 1 << 16 + if rc.next == 0 { + rc.next = minLen + } + if len(p) < rc.next { + return + } + rc.next <<= 1 + + // Check whether the same transformer has appeared at least twice. + var ss []string + m := map[Option]int{} + for _, ps := range p { + if t, ok := ps.(Transform); ok { + t := t.Option() + if m[t] == 1 { // Transformer was used exactly once before + tf := t.(*transformer).fnc.Type() + ss = append(ss, fmt.Sprintf("%v: %v => %v", t, tf.In(0), tf.Out(0))) + } + m[t]++ + } + } + if len(ss) > 0 { + const warning = "recursive set of Transformers detected" + const help = "consider using cmpopts.AcyclicTransformer" + set := strings.Join(ss, "\n\t") + panic(fmt.Sprintf("%s:\n\t%s\n%s", warning, set, help)) + } +} + +// dynChecker tracks the state needed to periodically perform checks that +// user provided functions are symmetric and deterministic. +// The zero value is safe for immediate use. +type dynChecker struct{ curr, next int } + +// Next increments the state and reports whether a check should be performed. +// +// Checks occur every Nth function call, where N is a triangular number: +// 0 1 3 6 10 15 21 28 36 45 55 66 78 91 105 120 136 153 171 190 ... +// See https://en.wikipedia.org/wiki/Triangular_number +// +// This sequence ensures that the cost of checks drops significantly as +// the number of functions calls grows larger. +func (dc *dynChecker) Next() bool { + ok := dc.curr == dc.next + if ok { + dc.curr = 0 + dc.next++ + } + dc.curr++ + return ok +} + +// makeAddressable returns a value that is always addressable. +// It returns the input verbatim if it is already addressable, +// otherwise it creates a new value and returns an addressable copy. +func makeAddressable(v reflect.Value) reflect.Value { + if v.CanAddr() { + return v + } + vc := reflect.New(v.Type()).Elem() + vc.Set(v) + return vc +} diff --git a/vendor/github.com/google/go-cmp/cmp/export_panic.go b/vendor/github.com/google/go-cmp/cmp/export_panic.go new file mode 100644 index 00000000..abc3a1c3 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/export_panic.go @@ -0,0 +1,15 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build purego + +package cmp + +import "reflect" + +const supportAllowUnexported = false + +func retrieveUnexportedField(reflect.Value, reflect.StructField) reflect.Value { + panic("retrieveUnexportedField is not implemented") +} diff --git a/vendor/github.com/google/go-cmp/cmp/export_unsafe.go b/vendor/github.com/google/go-cmp/cmp/export_unsafe.go new file mode 100644 index 00000000..59d4ee91 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/export_unsafe.go @@ -0,0 +1,23 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build !purego + +package cmp + +import ( + "reflect" + "unsafe" +) + +const supportAllowUnexported = true + +// retrieveUnexportedField uses unsafe to forcibly retrieve any field from +// a struct such that the value has read-write permissions. +// +// The parent struct, v, must be addressable, while f must be a StructField +// describing the field to retrieve. +func retrieveUnexportedField(v reflect.Value, f reflect.StructField) reflect.Value { + return reflect.NewAt(f.Type, unsafe.Pointer(v.UnsafeAddr()+f.Offset)).Elem() +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go new file mode 100644 index 00000000..fe98dcc6 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go @@ -0,0 +1,17 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build !cmp_debug + +package diff + +var debug debugger + +type debugger struct{} + +func (debugger) Begin(_, _ int, f EqualFunc, _, _ *EditScript) EqualFunc { + return f +} +func (debugger) Update() {} +func (debugger) Finish() {} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go new file mode 100644 index 00000000..597b6ae5 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go @@ -0,0 +1,122 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build cmp_debug + +package diff + +import ( + "fmt" + "strings" + "sync" + "time" +) + +// The algorithm can be seen running in real-time by enabling debugging: +// go test -tags=cmp_debug -v +// +// Example output: +// === RUN TestDifference/#34 +// ┌───────────────────────────────┐ +// │ \ · · · · · · · · · · · · · · │ +// │ · # · · · · · · · · · · · · · │ +// │ · \ · · · · · · · · · · · · · │ +// │ · · \ · · · · · · · · · · · · │ +// │ · · · X # · · · · · · · · · · │ +// │ · · · # \ · · · · · · · · · · │ +// │ · · · · · # # · · · · · · · · │ +// │ · · · · · # \ · · · · · · · · │ +// │ · · · · · · · \ · · · · · · · │ +// │ · · · · · · · · \ · · · · · · │ +// │ · · · · · · · · · \ · · · · · │ +// │ · · · · · · · · · · \ · · # · │ +// │ · · · · · · · · · · · \ # # · │ +// │ · · · · · · · · · · · # # # · │ +// │ · · · · · · · · · · # # # # · │ +// │ · · · · · · · · · # # # # # · │ +// │ · · · · · · · · · · · · · · \ │ +// └───────────────────────────────┘ +// [.Y..M.XY......YXYXY.|] +// +// The grid represents the edit-graph where the horizontal axis represents +// list X and the vertical axis represents list Y. The start of the two lists +// is the top-left, while the ends are the bottom-right. The '·' represents +// an unexplored node in the graph. The '\' indicates that the two symbols +// from list X and Y are equal. The 'X' indicates that two symbols are similar +// (but not exactly equal) to each other. The '#' indicates that the two symbols +// are different (and not similar). The algorithm traverses this graph trying to +// make the paths starting in the top-left and the bottom-right connect. +// +// The series of '.', 'X', 'Y', and 'M' characters at the bottom represents +// the currently established path from the forward and reverse searches, +// separated by a '|' character. + +const ( + updateDelay = 100 * time.Millisecond + finishDelay = 500 * time.Millisecond + ansiTerminal = true // ANSI escape codes used to move terminal cursor +) + +var debug debugger + +type debugger struct { + sync.Mutex + p1, p2 EditScript + fwdPath, revPath *EditScript + grid []byte + lines int +} + +func (dbg *debugger) Begin(nx, ny int, f EqualFunc, p1, p2 *EditScript) EqualFunc { + dbg.Lock() + dbg.fwdPath, dbg.revPath = p1, p2 + top := "┌─" + strings.Repeat("──", nx) + "┐\n" + row := "│ " + strings.Repeat("· ", nx) + "│\n" + btm := "└─" + strings.Repeat("──", nx) + "┘\n" + dbg.grid = []byte(top + strings.Repeat(row, ny) + btm) + dbg.lines = strings.Count(dbg.String(), "\n") + fmt.Print(dbg) + + // Wrap the EqualFunc so that we can intercept each result. + return func(ix, iy int) (r Result) { + cell := dbg.grid[len(top)+iy*len(row):][len("│ ")+len("· ")*ix:][:len("·")] + for i := range cell { + cell[i] = 0 // Zero out the multiple bytes of UTF-8 middle-dot + } + switch r = f(ix, iy); { + case r.Equal(): + cell[0] = '\\' + case r.Similar(): + cell[0] = 'X' + default: + cell[0] = '#' + } + return + } +} + +func (dbg *debugger) Update() { + dbg.print(updateDelay) +} + +func (dbg *debugger) Finish() { + dbg.print(finishDelay) + dbg.Unlock() +} + +func (dbg *debugger) String() string { + dbg.p1, dbg.p2 = *dbg.fwdPath, dbg.p2[:0] + for i := len(*dbg.revPath) - 1; i >= 0; i-- { + dbg.p2 = append(dbg.p2, (*dbg.revPath)[i]) + } + return fmt.Sprintf("%s[%v|%v]\n\n", dbg.grid, dbg.p1, dbg.p2) +} + +func (dbg *debugger) print(d time.Duration) { + if ansiTerminal { + fmt.Printf("\x1b[%dA", dbg.lines) // Reset terminal cursor + } + fmt.Print(dbg) + time.Sleep(d) +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go new file mode 100644 index 00000000..3d2e4266 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go @@ -0,0 +1,372 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// Package diff implements an algorithm for producing edit-scripts. +// The edit-script is a sequence of operations needed to transform one list +// of symbols into another (or vice-versa). The edits allowed are insertions, +// deletions, and modifications. The summation of all edits is called the +// Levenshtein distance as this problem is well-known in computer science. +// +// This package prioritizes performance over accuracy. That is, the run time +// is more important than obtaining a minimal Levenshtein distance. +package diff + +// EditType represents a single operation within an edit-script. +type EditType uint8 + +const ( + // Identity indicates that a symbol pair is identical in both list X and Y. + Identity EditType = iota + // UniqueX indicates that a symbol only exists in X and not Y. + UniqueX + // UniqueY indicates that a symbol only exists in Y and not X. + UniqueY + // Modified indicates that a symbol pair is a modification of each other. + Modified +) + +// EditScript represents the series of differences between two lists. +type EditScript []EditType + +// String returns a human-readable string representing the edit-script where +// Identity, UniqueX, UniqueY, and Modified are represented by the +// '.', 'X', 'Y', and 'M' characters, respectively. +func (es EditScript) String() string { + b := make([]byte, len(es)) + for i, e := range es { + switch e { + case Identity: + b[i] = '.' + case UniqueX: + b[i] = 'X' + case UniqueY: + b[i] = 'Y' + case Modified: + b[i] = 'M' + default: + panic("invalid edit-type") + } + } + return string(b) +} + +// stats returns a histogram of the number of each type of edit operation. +func (es EditScript) stats() (s struct{ NI, NX, NY, NM int }) { + for _, e := range es { + switch e { + case Identity: + s.NI++ + case UniqueX: + s.NX++ + case UniqueY: + s.NY++ + case Modified: + s.NM++ + default: + panic("invalid edit-type") + } + } + return +} + +// Dist is the Levenshtein distance and is guaranteed to be 0 if and only if +// lists X and Y are equal. +func (es EditScript) Dist() int { return len(es) - es.stats().NI } + +// LenX is the length of the X list. +func (es EditScript) LenX() int { return len(es) - es.stats().NY } + +// LenY is the length of the Y list. +func (es EditScript) LenY() int { return len(es) - es.stats().NX } + +// EqualFunc reports whether the symbols at indexes ix and iy are equal. +// When called by Difference, the index is guaranteed to be within nx and ny. +type EqualFunc func(ix int, iy int) Result + +// Result is the result of comparison. +// NumSame is the number of sub-elements that are equal. +// NumDiff is the number of sub-elements that are not equal. +type Result struct{ NumSame, NumDiff int } + +// BoolResult returns a Result that is either Equal or not Equal. +func BoolResult(b bool) Result { + if b { + return Result{NumSame: 1} // Equal, Similar + } else { + return Result{NumDiff: 2} // Not Equal, not Similar + } +} + +// Equal indicates whether the symbols are equal. Two symbols are equal +// if and only if NumDiff == 0. If Equal, then they are also Similar. +func (r Result) Equal() bool { return r.NumDiff == 0 } + +// Similar indicates whether two symbols are similar and may be represented +// by using the Modified type. As a special case, we consider binary comparisons +// (i.e., those that return Result{1, 0} or Result{0, 1}) to be similar. +// +// The exact ratio of NumSame to NumDiff to determine similarity may change. +func (r Result) Similar() bool { + // Use NumSame+1 to offset NumSame so that binary comparisons are similar. + return r.NumSame+1 >= r.NumDiff +} + +// Difference reports whether two lists of lengths nx and ny are equal +// given the definition of equality provided as f. +// +// This function returns an edit-script, which is a sequence of operations +// needed to convert one list into the other. The following invariants for +// the edit-script are maintained: +// • eq == (es.Dist()==0) +// • nx == es.LenX() +// • ny == es.LenY() +// +// This algorithm is not guaranteed to be an optimal solution (i.e., one that +// produces an edit-script with a minimal Levenshtein distance). This algorithm +// favors performance over optimality. The exact output is not guaranteed to +// be stable and may change over time. +func Difference(nx, ny int, f EqualFunc) (es EditScript) { + // This algorithm is based on traversing what is known as an "edit-graph". + // See Figure 1 from "An O(ND) Difference Algorithm and Its Variations" + // by Eugene W. Myers. Since D can be as large as N itself, this is + // effectively O(N^2). Unlike the algorithm from that paper, we are not + // interested in the optimal path, but at least some "decent" path. + // + // For example, let X and Y be lists of symbols: + // X = [A B C A B B A] + // Y = [C B A B A C] + // + // The edit-graph can be drawn as the following: + // A B C A B B A + // ┌─────────────┐ + // C │_|_|\|_|_|_|_│ 0 + // B │_|\|_|_|\|\|_│ 1 + // A │\|_|_|\|_|_|\│ 2 + // B │_|\|_|_|\|\|_│ 3 + // A │\|_|_|\|_|_|\│ 4 + // C │ | |\| | | | │ 5 + // └─────────────┘ 6 + // 0 1 2 3 4 5 6 7 + // + // List X is written along the horizontal axis, while list Y is written + // along the vertical axis. At any point on this grid, if the symbol in + // list X matches the corresponding symbol in list Y, then a '\' is drawn. + // The goal of any minimal edit-script algorithm is to find a path from the + // top-left corner to the bottom-right corner, while traveling through the + // fewest horizontal or vertical edges. + // A horizontal edge is equivalent to inserting a symbol from list X. + // A vertical edge is equivalent to inserting a symbol from list Y. + // A diagonal edge is equivalent to a matching symbol between both X and Y. + + // Invariants: + // • 0 ≤ fwdPath.X ≤ (fwdFrontier.X, revFrontier.X) ≤ revPath.X ≤ nx + // • 0 ≤ fwdPath.Y ≤ (fwdFrontier.Y, revFrontier.Y) ≤ revPath.Y ≤ ny + // + // In general: + // • fwdFrontier.X < revFrontier.X + // • fwdFrontier.Y < revFrontier.Y + // Unless, it is time for the algorithm to terminate. + fwdPath := path{+1, point{0, 0}, make(EditScript, 0, (nx+ny)/2)} + revPath := path{-1, point{nx, ny}, make(EditScript, 0)} + fwdFrontier := fwdPath.point // Forward search frontier + revFrontier := revPath.point // Reverse search frontier + + // Search budget bounds the cost of searching for better paths. + // The longest sequence of non-matching symbols that can be tolerated is + // approximately the square-root of the search budget. + searchBudget := 4 * (nx + ny) // O(n) + + // The algorithm below is a greedy, meet-in-the-middle algorithm for + // computing sub-optimal edit-scripts between two lists. + // + // The algorithm is approximately as follows: + // • Searching for differences switches back-and-forth between + // a search that starts at the beginning (the top-left corner), and + // a search that starts at the end (the bottom-right corner). The goal of + // the search is connect with the search from the opposite corner. + // • As we search, we build a path in a greedy manner, where the first + // match seen is added to the path (this is sub-optimal, but provides a + // decent result in practice). When matches are found, we try the next pair + // of symbols in the lists and follow all matches as far as possible. + // • When searching for matches, we search along a diagonal going through + // through the "frontier" point. If no matches are found, we advance the + // frontier towards the opposite corner. + // • This algorithm terminates when either the X coordinates or the + // Y coordinates of the forward and reverse frontier points ever intersect. + // + // This algorithm is correct even if searching only in the forward direction + // or in the reverse direction. We do both because it is commonly observed + // that two lists commonly differ because elements were added to the front + // or end of the other list. + // + // Running the tests with the "cmp_debug" build tag prints a visualization + // of the algorithm running in real-time. This is educational for + // understanding how the algorithm works. See debug_enable.go. + f = debug.Begin(nx, ny, f, &fwdPath.es, &revPath.es) + for { + // Forward search from the beginning. + if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 { + break + } + for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ { + // Search in a diagonal pattern for a match. + z := zigzag(i) + p := point{fwdFrontier.X + z, fwdFrontier.Y - z} + switch { + case p.X >= revPath.X || p.Y < fwdPath.Y: + stop1 = true // Hit top-right corner + case p.Y >= revPath.Y || p.X < fwdPath.X: + stop2 = true // Hit bottom-left corner + case f(p.X, p.Y).Equal(): + // Match found, so connect the path to this point. + fwdPath.connect(p, f) + fwdPath.append(Identity) + // Follow sequence of matches as far as possible. + for fwdPath.X < revPath.X && fwdPath.Y < revPath.Y { + if !f(fwdPath.X, fwdPath.Y).Equal() { + break + } + fwdPath.append(Identity) + } + fwdFrontier = fwdPath.point + stop1, stop2 = true, true + default: + searchBudget-- // Match not found + } + debug.Update() + } + // Advance the frontier towards reverse point. + if revPath.X-fwdFrontier.X >= revPath.Y-fwdFrontier.Y { + fwdFrontier.X++ + } else { + fwdFrontier.Y++ + } + + // Reverse search from the end. + if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 { + break + } + for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ { + // Search in a diagonal pattern for a match. + z := zigzag(i) + p := point{revFrontier.X - z, revFrontier.Y + z} + switch { + case fwdPath.X >= p.X || revPath.Y < p.Y: + stop1 = true // Hit bottom-left corner + case fwdPath.Y >= p.Y || revPath.X < p.X: + stop2 = true // Hit top-right corner + case f(p.X-1, p.Y-1).Equal(): + // Match found, so connect the path to this point. + revPath.connect(p, f) + revPath.append(Identity) + // Follow sequence of matches as far as possible. + for fwdPath.X < revPath.X && fwdPath.Y < revPath.Y { + if !f(revPath.X-1, revPath.Y-1).Equal() { + break + } + revPath.append(Identity) + } + revFrontier = revPath.point + stop1, stop2 = true, true + default: + searchBudget-- // Match not found + } + debug.Update() + } + // Advance the frontier towards forward point. + if revFrontier.X-fwdPath.X >= revFrontier.Y-fwdPath.Y { + revFrontier.X-- + } else { + revFrontier.Y-- + } + } + + // Join the forward and reverse paths and then append the reverse path. + fwdPath.connect(revPath.point, f) + for i := len(revPath.es) - 1; i >= 0; i-- { + t := revPath.es[i] + revPath.es = revPath.es[:i] + fwdPath.append(t) + } + debug.Finish() + return fwdPath.es +} + +type path struct { + dir int // +1 if forward, -1 if reverse + point // Leading point of the EditScript path + es EditScript +} + +// connect appends any necessary Identity, Modified, UniqueX, or UniqueY types +// to the edit-script to connect p.point to dst. +func (p *path) connect(dst point, f EqualFunc) { + if p.dir > 0 { + // Connect in forward direction. + for dst.X > p.X && dst.Y > p.Y { + switch r := f(p.X, p.Y); { + case r.Equal(): + p.append(Identity) + case r.Similar(): + p.append(Modified) + case dst.X-p.X >= dst.Y-p.Y: + p.append(UniqueX) + default: + p.append(UniqueY) + } + } + for dst.X > p.X { + p.append(UniqueX) + } + for dst.Y > p.Y { + p.append(UniqueY) + } + } else { + // Connect in reverse direction. + for p.X > dst.X && p.Y > dst.Y { + switch r := f(p.X-1, p.Y-1); { + case r.Equal(): + p.append(Identity) + case r.Similar(): + p.append(Modified) + case p.Y-dst.Y >= p.X-dst.X: + p.append(UniqueY) + default: + p.append(UniqueX) + } + } + for p.X > dst.X { + p.append(UniqueX) + } + for p.Y > dst.Y { + p.append(UniqueY) + } + } +} + +func (p *path) append(t EditType) { + p.es = append(p.es, t) + switch t { + case Identity, Modified: + p.add(p.dir, p.dir) + case UniqueX: + p.add(p.dir, 0) + case UniqueY: + p.add(0, p.dir) + } + debug.Update() +} + +type point struct{ X, Y int } + +func (p *point) add(dx, dy int) { p.X += dx; p.Y += dy } + +// zigzag maps a consecutive sequence of integers to a zig-zag sequence. +// [0 1 2 3 4 5 ...] => [0 -1 +1 -2 +2 ...] +func zigzag(x int) int { + if x&1 != 0 { + x = ^x + } + return x >> 1 +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go b/vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go new file mode 100644 index 00000000..a9e7fc0b --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go @@ -0,0 +1,9 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package flags + +// Deterministic controls whether the output of Diff should be deterministic. +// This is only used for testing. +var Deterministic bool diff --git a/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go b/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go new file mode 100644 index 00000000..01aed0a1 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go @@ -0,0 +1,10 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build !go1.10 + +package flags + +// AtLeastGo110 reports whether the Go toolchain is at least Go 1.10. +const AtLeastGo110 = false diff --git a/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go b/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go new file mode 100644 index 00000000..c0b667f5 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go @@ -0,0 +1,10 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build go1.10 + +package flags + +// AtLeastGo110 reports whether the Go toolchain is at least Go 1.10. +const AtLeastGo110 = true diff --git a/vendor/github.com/google/go-cmp/cmp/internal/function/func.go b/vendor/github.com/google/go-cmp/cmp/internal/function/func.go new file mode 100644 index 00000000..ace1dbe8 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/function/func.go @@ -0,0 +1,99 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// Package function provides functionality for identifying function types. +package function + +import ( + "reflect" + "regexp" + "runtime" + "strings" +) + +type funcType int + +const ( + _ funcType = iota + + tbFunc // func(T) bool + ttbFunc // func(T, T) bool + trbFunc // func(T, R) bool + tibFunc // func(T, I) bool + trFunc // func(T) R + + Equal = ttbFunc // func(T, T) bool + EqualAssignable = tibFunc // func(T, I) bool; encapsulates func(T, T) bool + Transformer = trFunc // func(T) R + ValueFilter = ttbFunc // func(T, T) bool + Less = ttbFunc // func(T, T) bool + ValuePredicate = tbFunc // func(T) bool + KeyValuePredicate = trbFunc // func(T, R) bool +) + +var boolType = reflect.TypeOf(true) + +// IsType reports whether the reflect.Type is of the specified function type. +func IsType(t reflect.Type, ft funcType) bool { + if t == nil || t.Kind() != reflect.Func || t.IsVariadic() { + return false + } + ni, no := t.NumIn(), t.NumOut() + switch ft { + case tbFunc: // func(T) bool + if ni == 1 && no == 1 && t.Out(0) == boolType { + return true + } + case ttbFunc: // func(T, T) bool + if ni == 2 && no == 1 && t.In(0) == t.In(1) && t.Out(0) == boolType { + return true + } + case trbFunc: // func(T, R) bool + if ni == 2 && no == 1 && t.Out(0) == boolType { + return true + } + case tibFunc: // func(T, I) bool + if ni == 2 && no == 1 && t.In(0).AssignableTo(t.In(1)) && t.Out(0) == boolType { + return true + } + case trFunc: // func(T) R + if ni == 1 && no == 1 { + return true + } + } + return false +} + +var lastIdentRx = regexp.MustCompile(`[_\p{L}][_\p{L}\p{N}]*$`) + +// NameOf returns the name of the function value. +func NameOf(v reflect.Value) string { + fnc := runtime.FuncForPC(v.Pointer()) + if fnc == nil { + return "" + } + fullName := fnc.Name() // e.g., "long/path/name/mypkg.(*MyType).(long/path/name/mypkg.myMethod)-fm" + + // Method closures have a "-fm" suffix. + fullName = strings.TrimSuffix(fullName, "-fm") + + var name string + for len(fullName) > 0 { + inParen := strings.HasSuffix(fullName, ")") + fullName = strings.TrimSuffix(fullName, ")") + + s := lastIdentRx.FindString(fullName) + if s == "" { + break + } + name = s + "." + name + fullName = strings.TrimSuffix(fullName, s) + + if i := strings.LastIndexByte(fullName, '('); inParen && i >= 0 { + fullName = fullName[:i] + } + fullName = strings.TrimSuffix(fullName, ".") + } + return strings.TrimSuffix(name, ".") +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go new file mode 100644 index 00000000..0a01c479 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go @@ -0,0 +1,23 @@ +// Copyright 2018, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build purego + +package value + +import "reflect" + +// Pointer is an opaque typed pointer and is guaranteed to be comparable. +type Pointer struct { + p uintptr + t reflect.Type +} + +// PointerOf returns a Pointer from v, which must be a +// reflect.Ptr, reflect.Slice, or reflect.Map. +func PointerOf(v reflect.Value) Pointer { + // NOTE: Storing a pointer as an uintptr is technically incorrect as it + // assumes that the GC implementation does not use a moving collector. + return Pointer{v.Pointer(), v.Type()} +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go new file mode 100644 index 00000000..da134ae2 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go @@ -0,0 +1,26 @@ +// Copyright 2018, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build !purego + +package value + +import ( + "reflect" + "unsafe" +) + +// Pointer is an opaque typed pointer and is guaranteed to be comparable. +type Pointer struct { + p unsafe.Pointer + t reflect.Type +} + +// PointerOf returns a Pointer from v, which must be a +// reflect.Ptr, reflect.Slice, or reflect.Map. +func PointerOf(v reflect.Value) Pointer { + // The proper representation of a pointer is unsafe.Pointer, + // which is necessary if the GC ever uses a moving collector. + return Pointer{unsafe.Pointer(v.Pointer()), v.Type()} +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go b/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go new file mode 100644 index 00000000..938f646f --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go @@ -0,0 +1,104 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package value + +import ( + "fmt" + "math" + "reflect" + "sort" +) + +// SortKeys sorts a list of map keys, deduplicating keys if necessary. +// The type of each value must be comparable. +func SortKeys(vs []reflect.Value) []reflect.Value { + if len(vs) == 0 { + return vs + } + + // Sort the map keys. + sort.Slice(vs, func(i, j int) bool { return isLess(vs[i], vs[j]) }) + + // Deduplicate keys (fails for NaNs). + vs2 := vs[:1] + for _, v := range vs[1:] { + if isLess(vs2[len(vs2)-1], v) { + vs2 = append(vs2, v) + } + } + return vs2 +} + +// isLess is a generic function for sorting arbitrary map keys. +// The inputs must be of the same type and must be comparable. +func isLess(x, y reflect.Value) bool { + switch x.Type().Kind() { + case reflect.Bool: + return !x.Bool() && y.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return x.Int() < y.Int() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return x.Uint() < y.Uint() + case reflect.Float32, reflect.Float64: + fx, fy := x.Float(), y.Float() + return fx < fy || math.IsNaN(fx) && !math.IsNaN(fy) + case reflect.Complex64, reflect.Complex128: + cx, cy := x.Complex(), y.Complex() + rx, ix, ry, iy := real(cx), imag(cx), real(cy), imag(cy) + if rx == ry || (math.IsNaN(rx) && math.IsNaN(ry)) { + return ix < iy || math.IsNaN(ix) && !math.IsNaN(iy) + } + return rx < ry || math.IsNaN(rx) && !math.IsNaN(ry) + case reflect.Ptr, reflect.UnsafePointer, reflect.Chan: + return x.Pointer() < y.Pointer() + case reflect.String: + return x.String() < y.String() + case reflect.Array: + for i := 0; i < x.Len(); i++ { + if isLess(x.Index(i), y.Index(i)) { + return true + } + if isLess(y.Index(i), x.Index(i)) { + return false + } + } + return false + case reflect.Struct: + for i := 0; i < x.NumField(); i++ { + if isLess(x.Field(i), y.Field(i)) { + return true + } + if isLess(y.Field(i), x.Field(i)) { + return false + } + } + return false + case reflect.Interface: + vx, vy := x.Elem(), y.Elem() + if !vx.IsValid() || !vy.IsValid() { + return !vx.IsValid() && vy.IsValid() + } + tx, ty := vx.Type(), vy.Type() + if tx == ty { + return isLess(x.Elem(), y.Elem()) + } + if tx.Kind() != ty.Kind() { + return vx.Kind() < vy.Kind() + } + if tx.String() != ty.String() { + return tx.String() < ty.String() + } + if tx.PkgPath() != ty.PkgPath() { + return tx.PkgPath() < ty.PkgPath() + } + // This can happen in rare situations, so we fallback to just comparing + // the unique pointer for a reflect.Type. This guarantees deterministic + // ordering within a program, but it is obviously not stable. + return reflect.ValueOf(vx.Type()).Pointer() < reflect.ValueOf(vy.Type()).Pointer() + default: + // Must be Func, Map, or Slice; which are not comparable. + panic(fmt.Sprintf("%T is not comparable", x.Type())) + } +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/zero.go b/vendor/github.com/google/go-cmp/cmp/internal/value/zero.go new file mode 100644 index 00000000..d13a12cc --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/zero.go @@ -0,0 +1,45 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package value + +import "reflect" + +// IsZero reports whether v is the zero value. +// This does not rely on Interface and so can be used on unexported fields. +func IsZero(v reflect.Value) bool { + switch v.Kind() { + case reflect.Bool: + return v.Bool() == false + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Complex64, reflect.Complex128: + return v.Complex() == 0 + case reflect.String: + return v.String() == "" + case reflect.UnsafePointer: + return v.Pointer() == 0 + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: + return v.IsNil() + case reflect.Array: + for i := 0; i < v.Len(); i++ { + if !IsZero(v.Index(i)) { + return false + } + } + return true + case reflect.Struct: + for i := 0; i < v.NumField(); i++ { + if !IsZero(v.Field(i)) { + return false + } + } + return true + } + return false +} diff --git a/vendor/github.com/google/go-cmp/cmp/options.go b/vendor/github.com/google/go-cmp/cmp/options.go new file mode 100644 index 00000000..79344816 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/options.go @@ -0,0 +1,524 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +import ( + "fmt" + "reflect" + "regexp" + "strings" + + "github.com/google/go-cmp/cmp/internal/function" +) + +// Option configures for specific behavior of Equal and Diff. In particular, +// the fundamental Option functions (Ignore, Transformer, and Comparer), +// configure how equality is determined. +// +// The fundamental options may be composed with filters (FilterPath and +// FilterValues) to control the scope over which they are applied. +// +// The cmp/cmpopts package provides helper functions for creating options that +// may be used with Equal and Diff. +type Option interface { + // filter applies all filters and returns the option that remains. + // Each option may only read s.curPath and call s.callTTBFunc. + // + // An Options is returned only if multiple comparers or transformers + // can apply simultaneously and will only contain values of those types + // or sub-Options containing values of those types. + filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption +} + +// applicableOption represents the following types: +// Fundamental: ignore | validator | *comparer | *transformer +// Grouping: Options +type applicableOption interface { + Option + + // apply executes the option, which may mutate s or panic. + apply(s *state, vx, vy reflect.Value) +} + +// coreOption represents the following types: +// Fundamental: ignore | validator | *comparer | *transformer +// Filters: *pathFilter | *valuesFilter +type coreOption interface { + Option + isCore() +} + +type core struct{} + +func (core) isCore() {} + +// Options is a list of Option values that also satisfies the Option interface. +// Helper comparison packages may return an Options value when packing multiple +// Option values into a single Option. When this package processes an Options, +// it will be implicitly expanded into a flat list. +// +// Applying a filter on an Options is equivalent to applying that same filter +// on all individual options held within. +type Options []Option + +func (opts Options) filter(s *state, t reflect.Type, vx, vy reflect.Value) (out applicableOption) { + for _, opt := range opts { + switch opt := opt.filter(s, t, vx, vy); opt.(type) { + case ignore: + return ignore{} // Only ignore can short-circuit evaluation + case validator: + out = validator{} // Takes precedence over comparer or transformer + case *comparer, *transformer, Options: + switch out.(type) { + case nil: + out = opt + case validator: + // Keep validator + case *comparer, *transformer, Options: + out = Options{out, opt} // Conflicting comparers or transformers + } + } + } + return out +} + +func (opts Options) apply(s *state, _, _ reflect.Value) { + const warning = "ambiguous set of applicable options" + const help = "consider using filters to ensure at most one Comparer or Transformer may apply" + var ss []string + for _, opt := range flattenOptions(nil, opts) { + ss = append(ss, fmt.Sprint(opt)) + } + set := strings.Join(ss, "\n\t") + panic(fmt.Sprintf("%s at %#v:\n\t%s\n%s", warning, s.curPath, set, help)) +} + +func (opts Options) String() string { + var ss []string + for _, opt := range opts { + ss = append(ss, fmt.Sprint(opt)) + } + return fmt.Sprintf("Options{%s}", strings.Join(ss, ", ")) +} + +// FilterPath returns a new Option where opt is only evaluated if filter f +// returns true for the current Path in the value tree. +// +// This filter is called even if a slice element or map entry is missing and +// provides an opportunity to ignore such cases. The filter function must be +// symmetric such that the filter result is identical regardless of whether the +// missing value is from x or y. +// +// The option passed in may be an Ignore, Transformer, Comparer, Options, or +// a previously filtered Option. +func FilterPath(f func(Path) bool, opt Option) Option { + if f == nil { + panic("invalid path filter function") + } + if opt := normalizeOption(opt); opt != nil { + return &pathFilter{fnc: f, opt: opt} + } + return nil +} + +type pathFilter struct { + core + fnc func(Path) bool + opt Option +} + +func (f pathFilter) filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption { + if f.fnc(s.curPath) { + return f.opt.filter(s, t, vx, vy) + } + return nil +} + +func (f pathFilter) String() string { + return fmt.Sprintf("FilterPath(%s, %v)", function.NameOf(reflect.ValueOf(f.fnc)), f.opt) +} + +// FilterValues returns a new Option where opt is only evaluated if filter f, +// which is a function of the form "func(T, T) bool", returns true for the +// current pair of values being compared. If either value is invalid or +// the type of the values is not assignable to T, then this filter implicitly +// returns false. +// +// The filter function must be +// symmetric (i.e., agnostic to the order of the inputs) and +// deterministic (i.e., produces the same result when given the same inputs). +// If T is an interface, it is possible that f is called with two values with +// different concrete types that both implement T. +// +// The option passed in may be an Ignore, Transformer, Comparer, Options, or +// a previously filtered Option. +func FilterValues(f interface{}, opt Option) Option { + v := reflect.ValueOf(f) + if !function.IsType(v.Type(), function.ValueFilter) || v.IsNil() { + panic(fmt.Sprintf("invalid values filter function: %T", f)) + } + if opt := normalizeOption(opt); opt != nil { + vf := &valuesFilter{fnc: v, opt: opt} + if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 { + vf.typ = ti + } + return vf + } + return nil +} + +type valuesFilter struct { + core + typ reflect.Type // T + fnc reflect.Value // func(T, T) bool + opt Option +} + +func (f valuesFilter) filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption { + if !vx.IsValid() || !vx.CanInterface() || !vy.IsValid() || !vy.CanInterface() { + return nil + } + if (f.typ == nil || t.AssignableTo(f.typ)) && s.callTTBFunc(f.fnc, vx, vy) { + return f.opt.filter(s, t, vx, vy) + } + return nil +} + +func (f valuesFilter) String() string { + return fmt.Sprintf("FilterValues(%s, %v)", function.NameOf(f.fnc), f.opt) +} + +// Ignore is an Option that causes all comparisons to be ignored. +// This value is intended to be combined with FilterPath or FilterValues. +// It is an error to pass an unfiltered Ignore option to Equal. +func Ignore() Option { return ignore{} } + +type ignore struct{ core } + +func (ignore) isFiltered() bool { return false } +func (ignore) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption { return ignore{} } +func (ignore) apply(s *state, _, _ reflect.Value) { s.report(true, reportByIgnore) } +func (ignore) String() string { return "Ignore()" } + +// validator is a sentinel Option type to indicate that some options could not +// be evaluated due to unexported fields, missing slice elements, or +// missing map entries. Both values are validator only for unexported fields. +type validator struct{ core } + +func (validator) filter(_ *state, _ reflect.Type, vx, vy reflect.Value) applicableOption { + if !vx.IsValid() || !vy.IsValid() { + return validator{} + } + if !vx.CanInterface() || !vy.CanInterface() { + return validator{} + } + return nil +} +func (validator) apply(s *state, vx, vy reflect.Value) { + // Implies missing slice element or map entry. + if !vx.IsValid() || !vy.IsValid() { + s.report(vx.IsValid() == vy.IsValid(), 0) + return + } + + // Unable to Interface implies unexported field without visibility access. + if !vx.CanInterface() || !vy.CanInterface() { + const help = "consider using a custom Comparer; if you control the implementation of type, you can also consider AllowUnexported or cmpopts.IgnoreUnexported" + panic(fmt.Sprintf("cannot handle unexported field: %#v\n%s", s.curPath, help)) + } + + panic("not reachable") +} + +// identRx represents a valid identifier according to the Go specification. +const identRx = `[_\p{L}][_\p{L}\p{N}]*` + +var identsRx = regexp.MustCompile(`^` + identRx + `(\.` + identRx + `)*$`) + +// Transformer returns an Option that applies a transformation function that +// converts values of a certain type into that of another. +// +// The transformer f must be a function "func(T) R" that converts values of +// type T to those of type R and is implicitly filtered to input values +// assignable to T. The transformer must not mutate T in any way. +// +// To help prevent some cases of infinite recursive cycles applying the +// same transform to the output of itself (e.g., in the case where the +// input and output types are the same), an implicit filter is added such that +// a transformer is applicable only if that exact transformer is not already +// in the tail of the Path since the last non-Transform step. +// For situations where the implicit filter is still insufficient, +// consider using cmpopts.AcyclicTransformer, which adds a filter +// to prevent the transformer from being recursively applied upon itself. +// +// The name is a user provided label that is used as the Transform.Name in the +// transformation PathStep (and eventually shown in the Diff output). +// The name must be a valid identifier or qualified identifier in Go syntax. +// If empty, an arbitrary name is used. +func Transformer(name string, f interface{}) Option { + v := reflect.ValueOf(f) + if !function.IsType(v.Type(), function.Transformer) || v.IsNil() { + panic(fmt.Sprintf("invalid transformer function: %T", f)) + } + if name == "" { + name = function.NameOf(v) + if !identsRx.MatchString(name) { + name = "λ" // Lambda-symbol as placeholder name + } + } else if !identsRx.MatchString(name) { + panic(fmt.Sprintf("invalid name: %q", name)) + } + tr := &transformer{name: name, fnc: reflect.ValueOf(f)} + if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 { + tr.typ = ti + } + return tr +} + +type transformer struct { + core + name string + typ reflect.Type // T + fnc reflect.Value // func(T) R +} + +func (tr *transformer) isFiltered() bool { return tr.typ != nil } + +func (tr *transformer) filter(s *state, t reflect.Type, _, _ reflect.Value) applicableOption { + for i := len(s.curPath) - 1; i >= 0; i-- { + if t, ok := s.curPath[i].(Transform); !ok { + break // Hit most recent non-Transform step + } else if tr == t.trans { + return nil // Cannot directly use same Transform + } + } + if tr.typ == nil || t.AssignableTo(tr.typ) { + return tr + } + return nil +} + +func (tr *transformer) apply(s *state, vx, vy reflect.Value) { + step := Transform{&transform{pathStep{typ: tr.fnc.Type().Out(0)}, tr}} + vvx := s.callTRFunc(tr.fnc, vx, step) + vvy := s.callTRFunc(tr.fnc, vy, step) + step.vx, step.vy = vvx, vvy + s.compareAny(step) +} + +func (tr transformer) String() string { + return fmt.Sprintf("Transformer(%s, %s)", tr.name, function.NameOf(tr.fnc)) +} + +// Comparer returns an Option that determines whether two values are equal +// to each other. +// +// The comparer f must be a function "func(T, T) bool" and is implicitly +// filtered to input values assignable to T. If T is an interface, it is +// possible that f is called with two values of different concrete types that +// both implement T. +// +// The equality function must be: +// • Symmetric: equal(x, y) == equal(y, x) +// • Deterministic: equal(x, y) == equal(x, y) +// • Pure: equal(x, y) does not modify x or y +func Comparer(f interface{}) Option { + v := reflect.ValueOf(f) + if !function.IsType(v.Type(), function.Equal) || v.IsNil() { + panic(fmt.Sprintf("invalid comparer function: %T", f)) + } + cm := &comparer{fnc: v} + if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 { + cm.typ = ti + } + return cm +} + +type comparer struct { + core + typ reflect.Type // T + fnc reflect.Value // func(T, T) bool +} + +func (cm *comparer) isFiltered() bool { return cm.typ != nil } + +func (cm *comparer) filter(_ *state, t reflect.Type, _, _ reflect.Value) applicableOption { + if cm.typ == nil || t.AssignableTo(cm.typ) { + return cm + } + return nil +} + +func (cm *comparer) apply(s *state, vx, vy reflect.Value) { + eq := s.callTTBFunc(cm.fnc, vx, vy) + s.report(eq, reportByFunc) +} + +func (cm comparer) String() string { + return fmt.Sprintf("Comparer(%s)", function.NameOf(cm.fnc)) +} + +// AllowUnexported returns an Option that forcibly allows operations on +// unexported fields in certain structs, which are specified by passing in a +// value of each struct type. +// +// Users of this option must understand that comparing on unexported fields +// from external packages is not safe since changes in the internal +// implementation of some external package may cause the result of Equal +// to unexpectedly change. However, it may be valid to use this option on types +// defined in an internal package where the semantic meaning of an unexported +// field is in the control of the user. +// +// In many cases, a custom Comparer should be used instead that defines +// equality as a function of the public API of a type rather than the underlying +// unexported implementation. +// +// For example, the reflect.Type documentation defines equality to be determined +// by the == operator on the interface (essentially performing a shallow pointer +// comparison) and most attempts to compare *regexp.Regexp types are interested +// in only checking that the regular expression strings are equal. +// Both of these are accomplished using Comparers: +// +// Comparer(func(x, y reflect.Type) bool { return x == y }) +// Comparer(func(x, y *regexp.Regexp) bool { return x.String() == y.String() }) +// +// In other cases, the cmpopts.IgnoreUnexported option can be used to ignore +// all unexported fields on specified struct types. +func AllowUnexported(types ...interface{}) Option { + if !supportAllowUnexported { + panic("AllowUnexported is not supported on purego builds, Google App Engine Standard, or GopherJS") + } + m := make(map[reflect.Type]bool) + for _, typ := range types { + t := reflect.TypeOf(typ) + if t.Kind() != reflect.Struct { + panic(fmt.Sprintf("invalid struct type: %T", typ)) + } + m[t] = true + } + return visibleStructs(m) +} + +type visibleStructs map[reflect.Type]bool + +func (visibleStructs) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption { + panic("not implemented") +} + +// Result represents the comparison result for a single node and +// is provided by cmp when calling Result (see Reporter). +type Result struct { + _ [0]func() // Make Result incomparable + flags resultFlags +} + +// Equal reports whether the node was determined to be equal or not. +// As a special case, ignored nodes are considered equal. +func (r Result) Equal() bool { + return r.flags&(reportEqual|reportByIgnore) != 0 +} + +// ByIgnore reports whether the node is equal because it was ignored. +// This never reports true if Equal reports false. +func (r Result) ByIgnore() bool { + return r.flags&reportByIgnore != 0 +} + +// ByMethod reports whether the Equal method determined equality. +func (r Result) ByMethod() bool { + return r.flags&reportByMethod != 0 +} + +// ByFunc reports whether a Comparer function determined equality. +func (r Result) ByFunc() bool { + return r.flags&reportByFunc != 0 +} + +type resultFlags uint + +const ( + _ resultFlags = (1 << iota) / 2 + + reportEqual + reportUnequal + reportByIgnore + reportByMethod + reportByFunc +) + +// Reporter is an Option that can be passed to Equal. When Equal traverses +// the value trees, it calls PushStep as it descends into each node in the +// tree and PopStep as it ascend out of the node. The leaves of the tree are +// either compared (determined to be equal or not equal) or ignored and reported +// as such by calling the Report method. +func Reporter(r interface { + // PushStep is called when a tree-traversal operation is performed. + // The PathStep itself is only valid until the step is popped. + // The PathStep.Values are valid for the duration of the entire traversal + // and must not be mutated. + // + // Equal always calls PushStep at the start to provide an operation-less + // PathStep used to report the root values. + // + // Within a slice, the exact set of inserted, removed, or modified elements + // is unspecified and may change in future implementations. + // The entries of a map are iterated through in an unspecified order. + PushStep(PathStep) + + // Report is called exactly once on leaf nodes to report whether the + // comparison identified the node as equal, unequal, or ignored. + // A leaf node is one that is immediately preceded by and followed by + // a pair of PushStep and PopStep calls. + Report(Result) + + // PopStep ascends back up the value tree. + // There is always a matching pop call for every push call. + PopStep() +}) Option { + return reporter{r} +} + +type reporter struct{ reporterIface } +type reporterIface interface { + PushStep(PathStep) + Report(Result) + PopStep() +} + +func (reporter) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption { + panic("not implemented") +} + +// normalizeOption normalizes the input options such that all Options groups +// are flattened and groups with a single element are reduced to that element. +// Only coreOptions and Options containing coreOptions are allowed. +func normalizeOption(src Option) Option { + switch opts := flattenOptions(nil, Options{src}); len(opts) { + case 0: + return nil + case 1: + return opts[0] + default: + return opts + } +} + +// flattenOptions copies all options in src to dst as a flat list. +// Only coreOptions and Options containing coreOptions are allowed. +func flattenOptions(dst, src Options) Options { + for _, opt := range src { + switch opt := opt.(type) { + case nil: + continue + case Options: + dst = flattenOptions(dst, opt) + case coreOption: + dst = append(dst, opt) + default: + panic(fmt.Sprintf("invalid option type: %T", opt)) + } + } + return dst +} diff --git a/vendor/github.com/google/go-cmp/cmp/path.go b/vendor/github.com/google/go-cmp/cmp/path.go new file mode 100644 index 00000000..96fffd29 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/path.go @@ -0,0 +1,308 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +import ( + "fmt" + "reflect" + "strings" + "unicode" + "unicode/utf8" +) + +// Path is a list of PathSteps describing the sequence of operations to get +// from some root type to the current position in the value tree. +// The first Path element is always an operation-less PathStep that exists +// simply to identify the initial type. +// +// When traversing structs with embedded structs, the embedded struct will +// always be accessed as a field before traversing the fields of the +// embedded struct themselves. That is, an exported field from the +// embedded struct will never be accessed directly from the parent struct. +type Path []PathStep + +// PathStep is a union-type for specific operations to traverse +// a value's tree structure. Users of this package never need to implement +// these types as values of this type will be returned by this package. +// +// Implementations of this interface are +// StructField, SliceIndex, MapIndex, Indirect, TypeAssertion, and Transform. +type PathStep interface { + String() string + + // Type is the resulting type after performing the path step. + Type() reflect.Type + + // Values is the resulting values after performing the path step. + // The type of each valid value is guaranteed to be identical to Type. + // + // In some cases, one or both may be invalid or have restrictions: + // • For StructField, both are not interface-able if the current field + // is unexported and the struct type is not explicitly permitted by + // AllowUnexported to traverse unexported fields. + // • For SliceIndex, one may be invalid if an element is missing from + // either the x or y slice. + // • For MapIndex, one may be invalid if an entry is missing from + // either the x or y map. + // + // The provided values must not be mutated. + Values() (vx, vy reflect.Value) +} + +var ( + _ PathStep = StructField{} + _ PathStep = SliceIndex{} + _ PathStep = MapIndex{} + _ PathStep = Indirect{} + _ PathStep = TypeAssertion{} + _ PathStep = Transform{} +) + +func (pa *Path) push(s PathStep) { + *pa = append(*pa, s) +} + +func (pa *Path) pop() { + *pa = (*pa)[:len(*pa)-1] +} + +// Last returns the last PathStep in the Path. +// If the path is empty, this returns a non-nil PathStep that reports a nil Type. +func (pa Path) Last() PathStep { + return pa.Index(-1) +} + +// Index returns the ith step in the Path and supports negative indexing. +// A negative index starts counting from the tail of the Path such that -1 +// refers to the last step, -2 refers to the second-to-last step, and so on. +// If index is invalid, this returns a non-nil PathStep that reports a nil Type. +func (pa Path) Index(i int) PathStep { + if i < 0 { + i = len(pa) + i + } + if i < 0 || i >= len(pa) { + return pathStep{} + } + return pa[i] +} + +// String returns the simplified path to a node. +// The simplified path only contains struct field accesses. +// +// For example: +// MyMap.MySlices.MyField +func (pa Path) String() string { + var ss []string + for _, s := range pa { + if _, ok := s.(StructField); ok { + ss = append(ss, s.String()) + } + } + return strings.TrimPrefix(strings.Join(ss, ""), ".") +} + +// GoString returns the path to a specific node using Go syntax. +// +// For example: +// (*root.MyMap["key"].(*mypkg.MyStruct).MySlices)[2][3].MyField +func (pa Path) GoString() string { + var ssPre, ssPost []string + var numIndirect int + for i, s := range pa { + var nextStep PathStep + if i+1 < len(pa) { + nextStep = pa[i+1] + } + switch s := s.(type) { + case Indirect: + numIndirect++ + pPre, pPost := "(", ")" + switch nextStep.(type) { + case Indirect: + continue // Next step is indirection, so let them batch up + case StructField: + numIndirect-- // Automatic indirection on struct fields + case nil: + pPre, pPost = "", "" // Last step; no need for parenthesis + } + if numIndirect > 0 { + ssPre = append(ssPre, pPre+strings.Repeat("*", numIndirect)) + ssPost = append(ssPost, pPost) + } + numIndirect = 0 + continue + case Transform: + ssPre = append(ssPre, s.trans.name+"(") + ssPost = append(ssPost, ")") + continue + } + ssPost = append(ssPost, s.String()) + } + for i, j := 0, len(ssPre)-1; i < j; i, j = i+1, j-1 { + ssPre[i], ssPre[j] = ssPre[j], ssPre[i] + } + return strings.Join(ssPre, "") + strings.Join(ssPost, "") +} + +type pathStep struct { + typ reflect.Type + vx, vy reflect.Value +} + +func (ps pathStep) Type() reflect.Type { return ps.typ } +func (ps pathStep) Values() (vx, vy reflect.Value) { return ps.vx, ps.vy } +func (ps pathStep) String() string { + if ps.typ == nil { + return "" + } + s := ps.typ.String() + if s == "" || strings.ContainsAny(s, "{}\n") { + return "root" // Type too simple or complex to print + } + return fmt.Sprintf("{%s}", s) +} + +// StructField represents a struct field access on a field called Name. +type StructField struct{ *structField } +type structField struct { + pathStep + name string + idx int + + // These fields are used for forcibly accessing an unexported field. + // pvx, pvy, and field are only valid if unexported is true. + unexported bool + mayForce bool // Forcibly allow visibility + pvx, pvy reflect.Value // Parent values + field reflect.StructField // Field information +} + +func (sf StructField) Type() reflect.Type { return sf.typ } +func (sf StructField) Values() (vx, vy reflect.Value) { + if !sf.unexported { + return sf.vx, sf.vy // CanInterface reports true + } + + // Forcibly obtain read-write access to an unexported struct field. + if sf.mayForce { + vx = retrieveUnexportedField(sf.pvx, sf.field) + vy = retrieveUnexportedField(sf.pvy, sf.field) + return vx, vy // CanInterface reports true + } + return sf.vx, sf.vy // CanInterface reports false +} +func (sf StructField) String() string { return fmt.Sprintf(".%s", sf.name) } + +// Name is the field name. +func (sf StructField) Name() string { return sf.name } + +// Index is the index of the field in the parent struct type. +// See reflect.Type.Field. +func (sf StructField) Index() int { return sf.idx } + +// SliceIndex is an index operation on a slice or array at some index Key. +type SliceIndex struct{ *sliceIndex } +type sliceIndex struct { + pathStep + xkey, ykey int +} + +func (si SliceIndex) Type() reflect.Type { return si.typ } +func (si SliceIndex) Values() (vx, vy reflect.Value) { return si.vx, si.vy } +func (si SliceIndex) String() string { + switch { + case si.xkey == si.ykey: + return fmt.Sprintf("[%d]", si.xkey) + case si.ykey == -1: + // [5->?] means "I don't know where X[5] went" + return fmt.Sprintf("[%d->?]", si.xkey) + case si.xkey == -1: + // [?->3] means "I don't know where Y[3] came from" + return fmt.Sprintf("[?->%d]", si.ykey) + default: + // [5->3] means "X[5] moved to Y[3]" + return fmt.Sprintf("[%d->%d]", si.xkey, si.ykey) + } +} + +// Key is the index key; it may return -1 if in a split state +func (si SliceIndex) Key() int { + if si.xkey != si.ykey { + return -1 + } + return si.xkey +} + +// SplitKeys are the indexes for indexing into slices in the +// x and y values, respectively. These indexes may differ due to the +// insertion or removal of an element in one of the slices, causing +// all of the indexes to be shifted. If an index is -1, then that +// indicates that the element does not exist in the associated slice. +// +// Key is guaranteed to return -1 if and only if the indexes returned +// by SplitKeys are not the same. SplitKeys will never return -1 for +// both indexes. +func (si SliceIndex) SplitKeys() (ix, iy int) { return si.xkey, si.ykey } + +// MapIndex is an index operation on a map at some index Key. +type MapIndex struct{ *mapIndex } +type mapIndex struct { + pathStep + key reflect.Value +} + +func (mi MapIndex) Type() reflect.Type { return mi.typ } +func (mi MapIndex) Values() (vx, vy reflect.Value) { return mi.vx, mi.vy } +func (mi MapIndex) String() string { return fmt.Sprintf("[%#v]", mi.key) } + +// Key is the value of the map key. +func (mi MapIndex) Key() reflect.Value { return mi.key } + +// Indirect represents pointer indirection on the parent type. +type Indirect struct{ *indirect } +type indirect struct { + pathStep +} + +func (in Indirect) Type() reflect.Type { return in.typ } +func (in Indirect) Values() (vx, vy reflect.Value) { return in.vx, in.vy } +func (in Indirect) String() string { return "*" } + +// TypeAssertion represents a type assertion on an interface. +type TypeAssertion struct{ *typeAssertion } +type typeAssertion struct { + pathStep +} + +func (ta TypeAssertion) Type() reflect.Type { return ta.typ } +func (ta TypeAssertion) Values() (vx, vy reflect.Value) { return ta.vx, ta.vy } +func (ta TypeAssertion) String() string { return fmt.Sprintf(".(%v)", ta.typ) } + +// Transform is a transformation from the parent type to the current type. +type Transform struct{ *transform } +type transform struct { + pathStep + trans *transformer +} + +func (tf Transform) Type() reflect.Type { return tf.typ } +func (tf Transform) Values() (vx, vy reflect.Value) { return tf.vx, tf.vy } +func (tf Transform) String() string { return fmt.Sprintf("%s()", tf.trans.name) } + +// Name is the name of the Transformer. +func (tf Transform) Name() string { return tf.trans.name } + +// Func is the function pointer to the transformer function. +func (tf Transform) Func() reflect.Value { return tf.trans.fnc } + +// Option returns the originally constructed Transformer option. +// The == operator can be used to detect the exact option used. +func (tf Transform) Option() Option { return tf.trans } + +// isExported reports whether the identifier is exported. +func isExported(id string) bool { + r, _ := utf8.DecodeRuneInString(id) + return unicode.IsUpper(r) +} diff --git a/vendor/github.com/google/go-cmp/cmp/report.go b/vendor/github.com/google/go-cmp/cmp/report.go new file mode 100644 index 00000000..6ddf2999 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/report.go @@ -0,0 +1,51 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +// defaultReporter implements the reporter interface. +// +// As Equal serially calls the PushStep, Report, and PopStep methods, the +// defaultReporter constructs a tree-based representation of the compared value +// and the result of each comparison (see valueNode). +// +// When the String method is called, the FormatDiff method transforms the +// valueNode tree into a textNode tree, which is a tree-based representation +// of the textual output (see textNode). +// +// Lastly, the textNode.String method produces the final report as a string. +type defaultReporter struct { + root *valueNode + curr *valueNode +} + +func (r *defaultReporter) PushStep(ps PathStep) { + r.curr = r.curr.PushStep(ps) + if r.root == nil { + r.root = r.curr + } +} +func (r *defaultReporter) Report(rs Result) { + r.curr.Report(rs) +} +func (r *defaultReporter) PopStep() { + r.curr = r.curr.PopStep() +} + +// String provides a full report of the differences detected as a structured +// literal in pseudo-Go syntax. String may only be called after the entire tree +// has been traversed. +func (r *defaultReporter) String() string { + assert(r.root != nil && r.curr == nil) + if r.root.NumDiff == 0 { + return "" + } + return formatOptions{}.FormatDiff(r.root).String() +} + +func assert(ok bool) { + if !ok { + panic("assertion failure") + } +} diff --git a/vendor/github.com/google/go-cmp/cmp/report_compare.go b/vendor/github.com/google/go-cmp/cmp/report_compare.go new file mode 100644 index 00000000..05efb992 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/report_compare.go @@ -0,0 +1,296 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +import ( + "fmt" + "reflect" + + "github.com/google/go-cmp/cmp/internal/value" +) + +// TODO: Enforce limits? +// * Enforce maximum number of records to print per node? +// * Enforce maximum size in bytes allowed? +// * As a heuristic, use less verbosity for equal nodes than unequal nodes. +// TODO: Enforce unique outputs? +// * Avoid Stringer methods if it results in same output? +// * Print pointer address if outputs still equal? + +// numContextRecords is the number of surrounding equal records to print. +const numContextRecords = 2 + +type diffMode byte + +const ( + diffUnknown diffMode = 0 + diffIdentical diffMode = ' ' + diffRemoved diffMode = '-' + diffInserted diffMode = '+' +) + +type typeMode int + +const ( + // emitType always prints the type. + emitType typeMode = iota + // elideType never prints the type. + elideType + // autoType prints the type only for composite kinds + // (i.e., structs, slices, arrays, and maps). + autoType +) + +type formatOptions struct { + // DiffMode controls the output mode of FormatDiff. + // + // If diffUnknown, then produce a diff of the x and y values. + // If diffIdentical, then emit values as if they were equal. + // If diffRemoved, then only emit x values (ignoring y values). + // If diffInserted, then only emit y values (ignoring x values). + DiffMode diffMode + + // TypeMode controls whether to print the type for the current node. + // + // As a general rule of thumb, we always print the type of the next node + // after an interface, and always elide the type of the next node after + // a slice or map node. + TypeMode typeMode + + // formatValueOptions are options specific to printing reflect.Values. + formatValueOptions +} + +func (opts formatOptions) WithDiffMode(d diffMode) formatOptions { + opts.DiffMode = d + return opts +} +func (opts formatOptions) WithTypeMode(t typeMode) formatOptions { + opts.TypeMode = t + return opts +} + +// FormatDiff converts a valueNode tree into a textNode tree, where the later +// is a textual representation of the differences detected in the former. +func (opts formatOptions) FormatDiff(v *valueNode) textNode { + // Check whether we have specialized formatting for this node. + // This is not necessary, but helpful for producing more readable outputs. + if opts.CanFormatDiffSlice(v) { + return opts.FormatDiffSlice(v) + } + + // For leaf nodes, format the value based on the reflect.Values alone. + if v.MaxDepth == 0 { + switch opts.DiffMode { + case diffUnknown, diffIdentical: + // Format Equal. + if v.NumDiff == 0 { + outx := opts.FormatValue(v.ValueX, visitedPointers{}) + outy := opts.FormatValue(v.ValueY, visitedPointers{}) + if v.NumIgnored > 0 && v.NumSame == 0 { + return textEllipsis + } else if outx.Len() < outy.Len() { + return outx + } else { + return outy + } + } + + // Format unequal. + assert(opts.DiffMode == diffUnknown) + var list textList + outx := opts.WithTypeMode(elideType).FormatValue(v.ValueX, visitedPointers{}) + outy := opts.WithTypeMode(elideType).FormatValue(v.ValueY, visitedPointers{}) + if outx != nil { + list = append(list, textRecord{Diff: '-', Value: outx}) + } + if outy != nil { + list = append(list, textRecord{Diff: '+', Value: outy}) + } + return opts.WithTypeMode(emitType).FormatType(v.Type, list) + case diffRemoved: + return opts.FormatValue(v.ValueX, visitedPointers{}) + case diffInserted: + return opts.FormatValue(v.ValueY, visitedPointers{}) + default: + panic("invalid diff mode") + } + } + + // Descend into the child value node. + if v.TransformerName != "" { + out := opts.WithTypeMode(emitType).FormatDiff(v.Value) + out = textWrap{"Inverse(" + v.TransformerName + ", ", out, ")"} + return opts.FormatType(v.Type, out) + } else { + switch k := v.Type.Kind(); k { + case reflect.Struct, reflect.Array, reflect.Slice, reflect.Map: + return opts.FormatType(v.Type, opts.formatDiffList(v.Records, k)) + case reflect.Ptr: + return textWrap{"&", opts.FormatDiff(v.Value), ""} + case reflect.Interface: + return opts.WithTypeMode(emitType).FormatDiff(v.Value) + default: + panic(fmt.Sprintf("%v cannot have children", k)) + } + } +} + +func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind) textNode { + // Derive record name based on the data structure kind. + var name string + var formatKey func(reflect.Value) string + switch k { + case reflect.Struct: + name = "field" + opts = opts.WithTypeMode(autoType) + formatKey = func(v reflect.Value) string { return v.String() } + case reflect.Slice, reflect.Array: + name = "element" + opts = opts.WithTypeMode(elideType) + formatKey = func(reflect.Value) string { return "" } + case reflect.Map: + name = "entry" + opts = opts.WithTypeMode(elideType) + formatKey = formatMapKey + } + + // Handle unification. + switch opts.DiffMode { + case diffIdentical, diffRemoved, diffInserted: + var list textList + var deferredEllipsis bool // Add final "..." to indicate records were dropped + for _, r := range recs { + // Elide struct fields that are zero value. + if k == reflect.Struct { + var isZero bool + switch opts.DiffMode { + case diffIdentical: + isZero = value.IsZero(r.Value.ValueX) || value.IsZero(r.Value.ValueX) + case diffRemoved: + isZero = value.IsZero(r.Value.ValueX) + case diffInserted: + isZero = value.IsZero(r.Value.ValueY) + } + if isZero { + continue + } + } + // Elide ignored nodes. + if r.Value.NumIgnored > 0 && r.Value.NumSame+r.Value.NumDiff == 0 { + deferredEllipsis = !(k == reflect.Slice || k == reflect.Array) + if !deferredEllipsis { + list.AppendEllipsis(diffStats{}) + } + continue + } + if out := opts.FormatDiff(r.Value); out != nil { + list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + } + } + if deferredEllipsis { + list.AppendEllipsis(diffStats{}) + } + return textWrap{"{", list, "}"} + case diffUnknown: + default: + panic("invalid diff mode") + } + + // Handle differencing. + var list textList + groups := coalesceAdjacentRecords(name, recs) + for i, ds := range groups { + // Handle equal records. + if ds.NumDiff() == 0 { + // Compute the number of leading and trailing records to print. + var numLo, numHi int + numEqual := ds.NumIgnored + ds.NumIdentical + for numLo < numContextRecords && numLo+numHi < numEqual && i != 0 { + if r := recs[numLo].Value; r.NumIgnored > 0 && r.NumSame+r.NumDiff == 0 { + break + } + numLo++ + } + for numHi < numContextRecords && numLo+numHi < numEqual && i != len(groups)-1 { + if r := recs[numEqual-numHi-1].Value; r.NumIgnored > 0 && r.NumSame+r.NumDiff == 0 { + break + } + numHi++ + } + if numEqual-(numLo+numHi) == 1 && ds.NumIgnored == 0 { + numHi++ // Avoid pointless coalescing of a single equal record + } + + // Format the equal values. + for _, r := range recs[:numLo] { + out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value) + list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + } + if numEqual > numLo+numHi { + ds.NumIdentical -= numLo + numHi + list.AppendEllipsis(ds) + } + for _, r := range recs[numEqual-numHi : numEqual] { + out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value) + list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + } + recs = recs[numEqual:] + continue + } + + // Handle unequal records. + for _, r := range recs[:ds.NumDiff()] { + switch { + case opts.CanFormatDiffSlice(r.Value): + out := opts.FormatDiffSlice(r.Value) + list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + case r.Value.NumChildren == r.Value.MaxDepth: + outx := opts.WithDiffMode(diffRemoved).FormatDiff(r.Value) + outy := opts.WithDiffMode(diffInserted).FormatDiff(r.Value) + if outx != nil { + list = append(list, textRecord{Diff: diffRemoved, Key: formatKey(r.Key), Value: outx}) + } + if outy != nil { + list = append(list, textRecord{Diff: diffInserted, Key: formatKey(r.Key), Value: outy}) + } + default: + out := opts.FormatDiff(r.Value) + list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + } + } + recs = recs[ds.NumDiff():] + } + assert(len(recs) == 0) + return textWrap{"{", list, "}"} +} + +// coalesceAdjacentRecords coalesces the list of records into groups of +// adjacent equal, or unequal counts. +func coalesceAdjacentRecords(name string, recs []reportRecord) (groups []diffStats) { + var prevCase int // Arbitrary index into which case last occurred + lastStats := func(i int) *diffStats { + if prevCase != i { + groups = append(groups, diffStats{Name: name}) + prevCase = i + } + return &groups[len(groups)-1] + } + for _, r := range recs { + switch rv := r.Value; { + case rv.NumIgnored > 0 && rv.NumSame+rv.NumDiff == 0: + lastStats(1).NumIgnored++ + case rv.NumDiff == 0: + lastStats(1).NumIdentical++ + case rv.NumDiff > 0 && !rv.ValueY.IsValid(): + lastStats(2).NumRemoved++ + case rv.NumDiff > 0 && !rv.ValueX.IsValid(): + lastStats(2).NumInserted++ + default: + lastStats(2).NumModified++ + } + } + return groups +} diff --git a/vendor/github.com/google/go-cmp/cmp/report_reflect.go b/vendor/github.com/google/go-cmp/cmp/report_reflect.go new file mode 100644 index 00000000..5521c604 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/report_reflect.go @@ -0,0 +1,279 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +import ( + "fmt" + "reflect" + "strconv" + "strings" + "unicode" + + "github.com/google/go-cmp/cmp/internal/flags" + "github.com/google/go-cmp/cmp/internal/value" +) + +type formatValueOptions struct { + // AvoidStringer controls whether to avoid calling custom stringer + // methods like error.Error or fmt.Stringer.String. + AvoidStringer bool + + // ShallowPointers controls whether to avoid descending into pointers. + // Useful when printing map keys, where pointer comparison is performed + // on the pointer address rather than the pointed-at value. + ShallowPointers bool + + // PrintAddresses controls whether to print the address of all pointers, + // slice elements, and maps. + PrintAddresses bool +} + +// FormatType prints the type as if it were wrapping s. +// This may return s as-is depending on the current type and TypeMode mode. +func (opts formatOptions) FormatType(t reflect.Type, s textNode) textNode { + // Check whether to emit the type or not. + switch opts.TypeMode { + case autoType: + switch t.Kind() { + case reflect.Struct, reflect.Slice, reflect.Array, reflect.Map: + if s.Equal(textNil) { + return s + } + default: + return s + } + case elideType: + return s + } + + // Determine the type label, applying special handling for unnamed types. + typeName := t.String() + if t.Name() == "" { + // According to Go grammar, certain type literals contain symbols that + // do not strongly bind to the next lexicographical token (e.g., *T). + switch t.Kind() { + case reflect.Chan, reflect.Func, reflect.Ptr: + typeName = "(" + typeName + ")" + } + typeName = strings.Replace(typeName, "struct {", "struct{", -1) + typeName = strings.Replace(typeName, "interface {", "interface{", -1) + } + + // Avoid wrap the value in parenthesis if unnecessary. + if s, ok := s.(textWrap); ok { + hasParens := strings.HasPrefix(s.Prefix, "(") && strings.HasSuffix(s.Suffix, ")") + hasBraces := strings.HasPrefix(s.Prefix, "{") && strings.HasSuffix(s.Suffix, "}") + if hasParens || hasBraces { + return textWrap{typeName, s, ""} + } + } + return textWrap{typeName + "(", s, ")"} +} + +// FormatValue prints the reflect.Value, taking extra care to avoid descending +// into pointers already in m. As pointers are visited, m is also updated. +func (opts formatOptions) FormatValue(v reflect.Value, m visitedPointers) (out textNode) { + if !v.IsValid() { + return nil + } + t := v.Type() + + // Check whether there is an Error or String method to call. + if !opts.AvoidStringer && v.CanInterface() { + // Avoid calling Error or String methods on nil receivers since many + // implementations crash when doing so. + if (t.Kind() != reflect.Ptr && t.Kind() != reflect.Interface) || !v.IsNil() { + switch v := v.Interface().(type) { + case error: + return textLine("e" + formatString(v.Error())) + case fmt.Stringer: + return textLine("s" + formatString(v.String())) + } + } + } + + // Check whether to explicitly wrap the result with the type. + var skipType bool + defer func() { + if !skipType { + out = opts.FormatType(t, out) + } + }() + + var ptr string + switch t.Kind() { + case reflect.Bool: + return textLine(fmt.Sprint(v.Bool())) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return textLine(fmt.Sprint(v.Int())) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + // Unnamed uints are usually bytes or words, so use hexadecimal. + if t.PkgPath() == "" || t.Kind() == reflect.Uintptr { + return textLine(formatHex(v.Uint())) + } + return textLine(fmt.Sprint(v.Uint())) + case reflect.Float32, reflect.Float64: + return textLine(fmt.Sprint(v.Float())) + case reflect.Complex64, reflect.Complex128: + return textLine(fmt.Sprint(v.Complex())) + case reflect.String: + return textLine(formatString(v.String())) + case reflect.UnsafePointer, reflect.Chan, reflect.Func: + return textLine(formatPointer(v)) + case reflect.Struct: + var list textList + for i := 0; i < v.NumField(); i++ { + vv := v.Field(i) + if value.IsZero(vv) { + continue // Elide fields with zero values + } + s := opts.WithTypeMode(autoType).FormatValue(vv, m) + list = append(list, textRecord{Key: t.Field(i).Name, Value: s}) + } + return textWrap{"{", list, "}"} + case reflect.Slice: + if v.IsNil() { + return textNil + } + if opts.PrintAddresses { + ptr = formatPointer(v) + } + fallthrough + case reflect.Array: + var list textList + for i := 0; i < v.Len(); i++ { + vi := v.Index(i) + if vi.CanAddr() { // Check for cyclic elements + p := vi.Addr() + if m.Visit(p) { + var out textNode + out = textLine(formatPointer(p)) + out = opts.WithTypeMode(emitType).FormatType(p.Type(), out) + out = textWrap{"*", out, ""} + list = append(list, textRecord{Value: out}) + continue + } + } + s := opts.WithTypeMode(elideType).FormatValue(vi, m) + list = append(list, textRecord{Value: s}) + } + return textWrap{ptr + "{", list, "}"} + case reflect.Map: + if v.IsNil() { + return textNil + } + if m.Visit(v) { + return textLine(formatPointer(v)) + } + + var list textList + for _, k := range value.SortKeys(v.MapKeys()) { + sk := formatMapKey(k) + sv := opts.WithTypeMode(elideType).FormatValue(v.MapIndex(k), m) + list = append(list, textRecord{Key: sk, Value: sv}) + } + if opts.PrintAddresses { + ptr = formatPointer(v) + } + return textWrap{ptr + "{", list, "}"} + case reflect.Ptr: + if v.IsNil() { + return textNil + } + if m.Visit(v) || opts.ShallowPointers { + return textLine(formatPointer(v)) + } + if opts.PrintAddresses { + ptr = formatPointer(v) + } + skipType = true // Let the underlying value print the type instead + return textWrap{"&" + ptr, opts.FormatValue(v.Elem(), m), ""} + case reflect.Interface: + if v.IsNil() { + return textNil + } + // Interfaces accept different concrete types, + // so configure the underlying value to explicitly print the type. + skipType = true // Print the concrete type instead + return opts.WithTypeMode(emitType).FormatValue(v.Elem(), m) + default: + panic(fmt.Sprintf("%v kind not handled", v.Kind())) + } +} + +// formatMapKey formats v as if it were a map key. +// The result is guaranteed to be a single line. +func formatMapKey(v reflect.Value) string { + var opts formatOptions + opts.TypeMode = elideType + opts.AvoidStringer = true + opts.ShallowPointers = true + s := opts.FormatValue(v, visitedPointers{}).String() + return strings.TrimSpace(s) +} + +// formatString prints s as a double-quoted or backtick-quoted string. +func formatString(s string) string { + // Use quoted string if it the same length as a raw string literal. + // Otherwise, attempt to use the raw string form. + qs := strconv.Quote(s) + if len(qs) == 1+len(s)+1 { + return qs + } + + // Disallow newlines to ensure output is a single line. + // Only allow printable runes for readability purposes. + rawInvalid := func(r rune) bool { + return r == '`' || r == '\n' || !(unicode.IsPrint(r) || r == '\t') + } + if strings.IndexFunc(s, rawInvalid) < 0 { + return "`" + s + "`" + } + return qs +} + +// formatHex prints u as a hexadecimal integer in Go notation. +func formatHex(u uint64) string { + var f string + switch { + case u <= 0xff: + f = "0x%02x" + case u <= 0xffff: + f = "0x%04x" + case u <= 0xffffff: + f = "0x%06x" + case u <= 0xffffffff: + f = "0x%08x" + case u <= 0xffffffffff: + f = "0x%010x" + case u <= 0xffffffffffff: + f = "0x%012x" + case u <= 0xffffffffffffff: + f = "0x%014x" + case u <= 0xffffffffffffffff: + f = "0x%016x" + } + return fmt.Sprintf(f, u) +} + +// formatPointer prints the address of the pointer. +func formatPointer(v reflect.Value) string { + p := v.Pointer() + if flags.Deterministic { + p = 0xdeadf00f // Only used for stable testing purposes + } + return fmt.Sprintf("⟪0x%x⟫", p) +} + +type visitedPointers map[value.Pointer]struct{} + +// Visit inserts pointer v into the visited map and reports whether it had +// already been visited before. +func (m visitedPointers) Visit(v reflect.Value) bool { + p := value.PointerOf(v) + _, visited := m[p] + m[p] = struct{}{} + return visited +} diff --git a/vendor/github.com/google/go-cmp/cmp/report_slices.go b/vendor/github.com/google/go-cmp/cmp/report_slices.go new file mode 100644 index 00000000..8cb3265e --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/report_slices.go @@ -0,0 +1,333 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +import ( + "bytes" + "fmt" + "reflect" + "strings" + "unicode" + "unicode/utf8" + + "github.com/google/go-cmp/cmp/internal/diff" +) + +// CanFormatDiffSlice reports whether we support custom formatting for nodes +// that are slices of primitive kinds or strings. +func (opts formatOptions) CanFormatDiffSlice(v *valueNode) bool { + switch { + case opts.DiffMode != diffUnknown: + return false // Must be formatting in diff mode + case v.NumDiff == 0: + return false // No differences detected + case v.NumIgnored+v.NumCompared+v.NumTransformed > 0: + // TODO: Handle the case where someone uses bytes.Equal on a large slice. + return false // Some custom option was used to determined equality + case !v.ValueX.IsValid() || !v.ValueY.IsValid(): + return false // Both values must be valid + } + + switch t := v.Type; t.Kind() { + case reflect.String: + case reflect.Array, reflect.Slice: + // Only slices of primitive types have specialized handling. + switch t.Elem().Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, + reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128: + default: + return false + } + + // If a sufficient number of elements already differ, + // use specialized formatting even if length requirement is not met. + if v.NumDiff > v.NumSame { + return true + } + default: + return false + } + + // Use specialized string diffing for longer slices or strings. + const minLength = 64 + return v.ValueX.Len() >= minLength && v.ValueY.Len() >= minLength +} + +// FormatDiffSlice prints a diff for the slices (or strings) represented by v. +// This provides custom-tailored logic to make printing of differences in +// textual strings and slices of primitive kinds more readable. +func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { + assert(opts.DiffMode == diffUnknown) + t, vx, vy := v.Type, v.ValueX, v.ValueY + + // Auto-detect the type of the data. + var isLinedText, isText, isBinary bool + var sx, sy string + switch { + case t.Kind() == reflect.String: + sx, sy = vx.String(), vy.String() + isText = true // Initial estimate, verify later + case t.Kind() == reflect.Slice && t.Elem() == reflect.TypeOf(byte(0)): + sx, sy = string(vx.Bytes()), string(vy.Bytes()) + isBinary = true // Initial estimate, verify later + case t.Kind() == reflect.Array: + // Arrays need to be addressable for slice operations to work. + vx2, vy2 := reflect.New(t).Elem(), reflect.New(t).Elem() + vx2.Set(vx) + vy2.Set(vy) + vx, vy = vx2, vy2 + } + if isText || isBinary { + var numLines, lastLineIdx, maxLineLen int + isBinary = false + for i, r := range sx + sy { + if !(unicode.IsPrint(r) || unicode.IsSpace(r)) || r == utf8.RuneError { + isBinary = true + break + } + if r == '\n' { + if maxLineLen < i-lastLineIdx { + lastLineIdx = i - lastLineIdx + } + lastLineIdx = i + 1 + numLines++ + } + } + isText = !isBinary + isLinedText = isText && numLines >= 4 && maxLineLen <= 256 + } + + // Format the string into printable records. + var list textList + var delim string + switch { + // If the text appears to be multi-lined text, + // then perform differencing across individual lines. + case isLinedText: + ssx := strings.Split(sx, "\n") + ssy := strings.Split(sy, "\n") + list = opts.formatDiffSlice( + reflect.ValueOf(ssx), reflect.ValueOf(ssy), 1, "line", + func(v reflect.Value, d diffMode) textRecord { + s := formatString(v.Index(0).String()) + return textRecord{Diff: d, Value: textLine(s)} + }, + ) + delim = "\n" + // If the text appears to be single-lined text, + // then perform differencing in approximately fixed-sized chunks. + // The output is printed as quoted strings. + case isText: + list = opts.formatDiffSlice( + reflect.ValueOf(sx), reflect.ValueOf(sy), 64, "byte", + func(v reflect.Value, d diffMode) textRecord { + s := formatString(v.String()) + return textRecord{Diff: d, Value: textLine(s)} + }, + ) + delim = "" + // If the text appears to be binary data, + // then perform differencing in approximately fixed-sized chunks. + // The output is inspired by hexdump. + case isBinary: + list = opts.formatDiffSlice( + reflect.ValueOf(sx), reflect.ValueOf(sy), 16, "byte", + func(v reflect.Value, d diffMode) textRecord { + var ss []string + for i := 0; i < v.Len(); i++ { + ss = append(ss, formatHex(v.Index(i).Uint())) + } + s := strings.Join(ss, ", ") + comment := commentString(fmt.Sprintf("%c|%v|", d, formatASCII(v.String()))) + return textRecord{Diff: d, Value: textLine(s), Comment: comment} + }, + ) + // For all other slices of primitive types, + // then perform differencing in approximately fixed-sized chunks. + // The size of each chunk depends on the width of the element kind. + default: + var chunkSize int + if t.Elem().Kind() == reflect.Bool { + chunkSize = 16 + } else { + switch t.Elem().Bits() { + case 8: + chunkSize = 16 + case 16: + chunkSize = 12 + case 32: + chunkSize = 8 + default: + chunkSize = 8 + } + } + list = opts.formatDiffSlice( + vx, vy, chunkSize, t.Elem().Kind().String(), + func(v reflect.Value, d diffMode) textRecord { + var ss []string + for i := 0; i < v.Len(); i++ { + switch t.Elem().Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + ss = append(ss, fmt.Sprint(v.Index(i).Int())) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + ss = append(ss, formatHex(v.Index(i).Uint())) + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128: + ss = append(ss, fmt.Sprint(v.Index(i).Interface())) + } + } + s := strings.Join(ss, ", ") + return textRecord{Diff: d, Value: textLine(s)} + }, + ) + } + + // Wrap the output with appropriate type information. + var out textNode = textWrap{"{", list, "}"} + if !isText { + // The "{...}" byte-sequence literal is not valid Go syntax for strings. + // Emit the type for extra clarity (e.g. "string{...}"). + if t.Kind() == reflect.String { + opts = opts.WithTypeMode(emitType) + } + return opts.FormatType(t, out) + } + switch t.Kind() { + case reflect.String: + out = textWrap{"strings.Join(", out, fmt.Sprintf(", %q)", delim)} + if t != reflect.TypeOf(string("")) { + out = opts.FormatType(t, out) + } + case reflect.Slice: + out = textWrap{"bytes.Join(", out, fmt.Sprintf(", %q)", delim)} + if t != reflect.TypeOf([]byte(nil)) { + out = opts.FormatType(t, out) + } + } + return out +} + +// formatASCII formats s as an ASCII string. +// This is useful for printing binary strings in a semi-legible way. +func formatASCII(s string) string { + b := bytes.Repeat([]byte{'.'}, len(s)) + for i := 0; i < len(s); i++ { + if ' ' <= s[i] && s[i] <= '~' { + b[i] = s[i] + } + } + return string(b) +} + +func (opts formatOptions) formatDiffSlice( + vx, vy reflect.Value, chunkSize int, name string, + makeRec func(reflect.Value, diffMode) textRecord, +) (list textList) { + es := diff.Difference(vx.Len(), vy.Len(), func(ix int, iy int) diff.Result { + return diff.BoolResult(vx.Index(ix).Interface() == vy.Index(iy).Interface()) + }) + + appendChunks := func(v reflect.Value, d diffMode) int { + n0 := v.Len() + for v.Len() > 0 { + n := chunkSize + if n > v.Len() { + n = v.Len() + } + list = append(list, makeRec(v.Slice(0, n), d)) + v = v.Slice(n, v.Len()) + } + return n0 - v.Len() + } + + groups := coalesceAdjacentEdits(name, es) + groups = coalesceInterveningIdentical(groups, chunkSize/4) + for i, ds := range groups { + // Print equal. + if ds.NumDiff() == 0 { + // Compute the number of leading and trailing equal bytes to print. + var numLo, numHi int + numEqual := ds.NumIgnored + ds.NumIdentical + for numLo < chunkSize*numContextRecords && numLo+numHi < numEqual && i != 0 { + numLo++ + } + for numHi < chunkSize*numContextRecords && numLo+numHi < numEqual && i != len(groups)-1 { + numHi++ + } + if numEqual-(numLo+numHi) <= chunkSize && ds.NumIgnored == 0 { + numHi = numEqual - numLo // Avoid pointless coalescing of single equal row + } + + // Print the equal bytes. + appendChunks(vx.Slice(0, numLo), diffIdentical) + if numEqual > numLo+numHi { + ds.NumIdentical -= numLo + numHi + list.AppendEllipsis(ds) + } + appendChunks(vx.Slice(numEqual-numHi, numEqual), diffIdentical) + vx = vx.Slice(numEqual, vx.Len()) + vy = vy.Slice(numEqual, vy.Len()) + continue + } + + // Print unequal. + nx := appendChunks(vx.Slice(0, ds.NumIdentical+ds.NumRemoved+ds.NumModified), diffRemoved) + vx = vx.Slice(nx, vx.Len()) + ny := appendChunks(vy.Slice(0, ds.NumIdentical+ds.NumInserted+ds.NumModified), diffInserted) + vy = vy.Slice(ny, vy.Len()) + } + assert(vx.Len() == 0 && vy.Len() == 0) + return list +} + +// coalesceAdjacentEdits coalesces the list of edits into groups of adjacent +// equal or unequal counts. +func coalesceAdjacentEdits(name string, es diff.EditScript) (groups []diffStats) { + var prevCase int // Arbitrary index into which case last occurred + lastStats := func(i int) *diffStats { + if prevCase != i { + groups = append(groups, diffStats{Name: name}) + prevCase = i + } + return &groups[len(groups)-1] + } + for _, e := range es { + switch e { + case diff.Identity: + lastStats(1).NumIdentical++ + case diff.UniqueX: + lastStats(2).NumRemoved++ + case diff.UniqueY: + lastStats(2).NumInserted++ + case diff.Modified: + lastStats(2).NumModified++ + } + } + return groups +} + +// coalesceInterveningIdentical coalesces sufficiently short (<= windowSize) +// equal groups into adjacent unequal groups that currently result in a +// dual inserted/removed printout. This acts as a high-pass filter to smooth +// out high-frequency changes within the windowSize. +func coalesceInterveningIdentical(groups []diffStats, windowSize int) []diffStats { + groups, groupsOrig := groups[:0], groups + for i, ds := range groupsOrig { + if len(groups) >= 2 && ds.NumDiff() > 0 { + prev := &groups[len(groups)-2] // Unequal group + curr := &groups[len(groups)-1] // Equal group + next := &groupsOrig[i] // Unequal group + hadX, hadY := prev.NumRemoved > 0, prev.NumInserted > 0 + hasX, hasY := next.NumRemoved > 0, next.NumInserted > 0 + if ((hadX || hasX) && (hadY || hasY)) && curr.NumIdentical <= windowSize { + *prev = (*prev).Append(*curr).Append(*next) + groups = groups[:len(groups)-1] // Truncate off equal group + continue + } + } + groups = append(groups, ds) + } + return groups +} diff --git a/vendor/github.com/google/go-cmp/cmp/report_text.go b/vendor/github.com/google/go-cmp/cmp/report_text.go new file mode 100644 index 00000000..80605d0e --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/report_text.go @@ -0,0 +1,382 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +import ( + "bytes" + "fmt" + "math/rand" + "strings" + "time" + + "github.com/google/go-cmp/cmp/internal/flags" +) + +var randBool = rand.New(rand.NewSource(time.Now().Unix())).Intn(2) == 0 + +type indentMode int + +func (n indentMode) appendIndent(b []byte, d diffMode) []byte { + if flags.Deterministic || randBool { + // Use regular spaces (U+0020). + switch d { + case diffUnknown, diffIdentical: + b = append(b, " "...) + case diffRemoved: + b = append(b, "- "...) + case diffInserted: + b = append(b, "+ "...) + } + } else { + // Use non-breaking spaces (U+00a0). + switch d { + case diffUnknown, diffIdentical: + b = append(b, "  "...) + case diffRemoved: + b = append(b, "- "...) + case diffInserted: + b = append(b, "+ "...) + } + } + return repeatCount(n).appendChar(b, '\t') +} + +type repeatCount int + +func (n repeatCount) appendChar(b []byte, c byte) []byte { + for ; n > 0; n-- { + b = append(b, c) + } + return b +} + +// textNode is a simplified tree-based representation of structured text. +// Possible node types are textWrap, textList, or textLine. +type textNode interface { + // Len reports the length in bytes of a single-line version of the tree. + // Nested textRecord.Diff and textRecord.Comment fields are ignored. + Len() int + // Equal reports whether the two trees are structurally identical. + // Nested textRecord.Diff and textRecord.Comment fields are compared. + Equal(textNode) bool + // String returns the string representation of the text tree. + // It is not guaranteed that len(x.String()) == x.Len(), + // nor that x.String() == y.String() implies that x.Equal(y). + String() string + + // formatCompactTo formats the contents of the tree as a single-line string + // to the provided buffer. Any nested textRecord.Diff and textRecord.Comment + // fields are ignored. + // + // However, not all nodes in the tree should be collapsed as a single-line. + // If a node can be collapsed as a single-line, it is replaced by a textLine + // node. Since the top-level node cannot replace itself, this also returns + // the current node itself. + // + // This does not mutate the receiver. + formatCompactTo([]byte, diffMode) ([]byte, textNode) + // formatExpandedTo formats the contents of the tree as a multi-line string + // to the provided buffer. In order for column alignment to operate well, + // formatCompactTo must be called before calling formatExpandedTo. + formatExpandedTo([]byte, diffMode, indentMode) []byte +} + +// textWrap is a wrapper that concatenates a prefix and/or a suffix +// to the underlying node. +type textWrap struct { + Prefix string // e.g., "bytes.Buffer{" + Value textNode // textWrap | textList | textLine + Suffix string // e.g., "}" +} + +func (s textWrap) Len() int { + return len(s.Prefix) + s.Value.Len() + len(s.Suffix) +} +func (s1 textWrap) Equal(s2 textNode) bool { + if s2, ok := s2.(textWrap); ok { + return s1.Prefix == s2.Prefix && s1.Value.Equal(s2.Value) && s1.Suffix == s2.Suffix + } + return false +} +func (s textWrap) String() string { + var d diffMode + var n indentMode + _, s2 := s.formatCompactTo(nil, d) + b := n.appendIndent(nil, d) // Leading indent + b = s2.formatExpandedTo(b, d, n) // Main body + b = append(b, '\n') // Trailing newline + return string(b) +} +func (s textWrap) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) { + n0 := len(b) // Original buffer length + b = append(b, s.Prefix...) + b, s.Value = s.Value.formatCompactTo(b, d) + b = append(b, s.Suffix...) + if _, ok := s.Value.(textLine); ok { + return b, textLine(b[n0:]) + } + return b, s +} +func (s textWrap) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte { + b = append(b, s.Prefix...) + b = s.Value.formatExpandedTo(b, d, n) + b = append(b, s.Suffix...) + return b +} + +// textList is a comma-separated list of textWrap or textLine nodes. +// The list may be formatted as multi-lines or single-line at the discretion +// of the textList.formatCompactTo method. +type textList []textRecord +type textRecord struct { + Diff diffMode // e.g., 0 or '-' or '+' + Key string // e.g., "MyField" + Value textNode // textWrap | textLine + Comment fmt.Stringer // e.g., "6 identical fields" +} + +// AppendEllipsis appends a new ellipsis node to the list if none already +// exists at the end. If cs is non-zero it coalesces the statistics with the +// previous diffStats. +func (s *textList) AppendEllipsis(ds diffStats) { + hasStats := ds != diffStats{} + if len(*s) == 0 || !(*s)[len(*s)-1].Value.Equal(textEllipsis) { + if hasStats { + *s = append(*s, textRecord{Value: textEllipsis, Comment: ds}) + } else { + *s = append(*s, textRecord{Value: textEllipsis}) + } + return + } + if hasStats { + (*s)[len(*s)-1].Comment = (*s)[len(*s)-1].Comment.(diffStats).Append(ds) + } +} + +func (s textList) Len() (n int) { + for i, r := range s { + n += len(r.Key) + if r.Key != "" { + n += len(": ") + } + n += r.Value.Len() + if i < len(s)-1 { + n += len(", ") + } + } + return n +} + +func (s1 textList) Equal(s2 textNode) bool { + if s2, ok := s2.(textList); ok { + if len(s1) != len(s2) { + return false + } + for i := range s1 { + r1, r2 := s1[i], s2[i] + if !(r1.Diff == r2.Diff && r1.Key == r2.Key && r1.Value.Equal(r2.Value) && r1.Comment == r2.Comment) { + return false + } + } + return true + } + return false +} + +func (s textList) String() string { + return textWrap{"{", s, "}"}.String() +} + +func (s textList) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) { + s = append(textList(nil), s...) // Avoid mutating original + + // Determine whether we can collapse this list as a single line. + n0 := len(b) // Original buffer length + var multiLine bool + for i, r := range s { + if r.Diff == diffInserted || r.Diff == diffRemoved { + multiLine = true + } + b = append(b, r.Key...) + if r.Key != "" { + b = append(b, ": "...) + } + b, s[i].Value = r.Value.formatCompactTo(b, d|r.Diff) + if _, ok := s[i].Value.(textLine); !ok { + multiLine = true + } + if r.Comment != nil { + multiLine = true + } + if i < len(s)-1 { + b = append(b, ", "...) + } + } + // Force multi-lined output when printing a removed/inserted node that + // is sufficiently long. + if (d == diffInserted || d == diffRemoved) && len(b[n0:]) > 80 { + multiLine = true + } + if !multiLine { + return b, textLine(b[n0:]) + } + return b, s +} + +func (s textList) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte { + alignKeyLens := s.alignLens( + func(r textRecord) bool { + _, isLine := r.Value.(textLine) + return r.Key == "" || !isLine + }, + func(r textRecord) int { return len(r.Key) }, + ) + alignValueLens := s.alignLens( + func(r textRecord) bool { + _, isLine := r.Value.(textLine) + return !isLine || r.Value.Equal(textEllipsis) || r.Comment == nil + }, + func(r textRecord) int { return len(r.Value.(textLine)) }, + ) + + // Format the list as a multi-lined output. + n++ + for i, r := range s { + b = n.appendIndent(append(b, '\n'), d|r.Diff) + if r.Key != "" { + b = append(b, r.Key+": "...) + } + b = alignKeyLens[i].appendChar(b, ' ') + + b = r.Value.formatExpandedTo(b, d|r.Diff, n) + if !r.Value.Equal(textEllipsis) { + b = append(b, ',') + } + b = alignValueLens[i].appendChar(b, ' ') + + if r.Comment != nil { + b = append(b, " // "+r.Comment.String()...) + } + } + n-- + + return n.appendIndent(append(b, '\n'), d) +} + +func (s textList) alignLens( + skipFunc func(textRecord) bool, + lenFunc func(textRecord) int, +) []repeatCount { + var startIdx, endIdx, maxLen int + lens := make([]repeatCount, len(s)) + for i, r := range s { + if skipFunc(r) { + for j := startIdx; j < endIdx && j < len(s); j++ { + lens[j] = repeatCount(maxLen - lenFunc(s[j])) + } + startIdx, endIdx, maxLen = i+1, i+1, 0 + } else { + if maxLen < lenFunc(r) { + maxLen = lenFunc(r) + } + endIdx = i + 1 + } + } + for j := startIdx; j < endIdx && j < len(s); j++ { + lens[j] = repeatCount(maxLen - lenFunc(s[j])) + } + return lens +} + +// textLine is a single-line segment of text and is always a leaf node +// in the textNode tree. +type textLine []byte + +var ( + textNil = textLine("nil") + textEllipsis = textLine("...") +) + +func (s textLine) Len() int { + return len(s) +} +func (s1 textLine) Equal(s2 textNode) bool { + if s2, ok := s2.(textLine); ok { + return bytes.Equal([]byte(s1), []byte(s2)) + } + return false +} +func (s textLine) String() string { + return string(s) +} +func (s textLine) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) { + return append(b, s...), s +} +func (s textLine) formatExpandedTo(b []byte, _ diffMode, _ indentMode) []byte { + return append(b, s...) +} + +type diffStats struct { + Name string + NumIgnored int + NumIdentical int + NumRemoved int + NumInserted int + NumModified int +} + +func (s diffStats) NumDiff() int { + return s.NumRemoved + s.NumInserted + s.NumModified +} + +func (s diffStats) Append(ds diffStats) diffStats { + assert(s.Name == ds.Name) + s.NumIgnored += ds.NumIgnored + s.NumIdentical += ds.NumIdentical + s.NumRemoved += ds.NumRemoved + s.NumInserted += ds.NumInserted + s.NumModified += ds.NumModified + return s +} + +// String prints a humanly-readable summary of coalesced records. +// +// Example: +// diffStats{Name: "Field", NumIgnored: 5}.String() => "5 ignored fields" +func (s diffStats) String() string { + var ss []string + var sum int + labels := [...]string{"ignored", "identical", "removed", "inserted", "modified"} + counts := [...]int{s.NumIgnored, s.NumIdentical, s.NumRemoved, s.NumInserted, s.NumModified} + for i, n := range counts { + if n > 0 { + ss = append(ss, fmt.Sprintf("%d %v", n, labels[i])) + } + sum += n + } + + // Pluralize the name (adjusting for some obscure English grammar rules). + name := s.Name + if sum > 1 { + name = name + "s" + if strings.HasSuffix(name, "ys") { + name = name[:len(name)-2] + "ies" // e.g., "entrys" => "entries" + } + } + + // Format the list according to English grammar (with Oxford comma). + switch n := len(ss); n { + case 0: + return "" + case 1, 2: + return strings.Join(ss, " and ") + " " + name + default: + return strings.Join(ss[:n-1], ", ") + ", and " + ss[n-1] + " " + name + } +} + +type commentString string + +func (s commentString) String() string { return string(s) } diff --git a/vendor/github.com/google/go-cmp/cmp/report_value.go b/vendor/github.com/google/go-cmp/cmp/report_value.go new file mode 100644 index 00000000..83031a7f --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/report_value.go @@ -0,0 +1,121 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +import "reflect" + +// valueNode represents a single node within a report, which is a +// structured representation of the value tree, containing information +// regarding which nodes are equal or not. +type valueNode struct { + parent *valueNode + + Type reflect.Type + ValueX reflect.Value + ValueY reflect.Value + + // NumSame is the number of leaf nodes that are equal. + // All descendants are equal only if NumDiff is 0. + NumSame int + // NumDiff is the number of leaf nodes that are not equal. + NumDiff int + // NumIgnored is the number of leaf nodes that are ignored. + NumIgnored int + // NumCompared is the number of leaf nodes that were compared + // using an Equal method or Comparer function. + NumCompared int + // NumTransformed is the number of non-leaf nodes that were transformed. + NumTransformed int + // NumChildren is the number of transitive descendants of this node. + // This counts from zero; thus, leaf nodes have no descendants. + NumChildren int + // MaxDepth is the maximum depth of the tree. This counts from zero; + // thus, leaf nodes have a depth of zero. + MaxDepth int + + // Records is a list of struct fields, slice elements, or map entries. + Records []reportRecord // If populated, implies Value is not populated + + // Value is the result of a transformation, pointer indirect, of + // type assertion. + Value *valueNode // If populated, implies Records is not populated + + // TransformerName is the name of the transformer. + TransformerName string // If non-empty, implies Value is populated +} +type reportRecord struct { + Key reflect.Value // Invalid for slice element + Value *valueNode +} + +func (parent *valueNode) PushStep(ps PathStep) (child *valueNode) { + vx, vy := ps.Values() + child = &valueNode{parent: parent, Type: ps.Type(), ValueX: vx, ValueY: vy} + switch s := ps.(type) { + case StructField: + assert(parent.Value == nil) + parent.Records = append(parent.Records, reportRecord{Key: reflect.ValueOf(s.Name()), Value: child}) + case SliceIndex: + assert(parent.Value == nil) + parent.Records = append(parent.Records, reportRecord{Value: child}) + case MapIndex: + assert(parent.Value == nil) + parent.Records = append(parent.Records, reportRecord{Key: s.Key(), Value: child}) + case Indirect: + assert(parent.Value == nil && parent.Records == nil) + parent.Value = child + case TypeAssertion: + assert(parent.Value == nil && parent.Records == nil) + parent.Value = child + case Transform: + assert(parent.Value == nil && parent.Records == nil) + parent.Value = child + parent.TransformerName = s.Name() + parent.NumTransformed++ + default: + assert(parent == nil) // Must be the root step + } + return child +} + +func (r *valueNode) Report(rs Result) { + assert(r.MaxDepth == 0) // May only be called on leaf nodes + + if rs.ByIgnore() { + r.NumIgnored++ + } else { + if rs.Equal() { + r.NumSame++ + } else { + r.NumDiff++ + } + } + assert(r.NumSame+r.NumDiff+r.NumIgnored == 1) + + if rs.ByMethod() { + r.NumCompared++ + } + if rs.ByFunc() { + r.NumCompared++ + } + assert(r.NumCompared <= 1) +} + +func (child *valueNode) PopStep() (parent *valueNode) { + if child.parent == nil { + return nil + } + parent = child.parent + parent.NumSame += child.NumSame + parent.NumDiff += child.NumDiff + parent.NumIgnored += child.NumIgnored + parent.NumCompared += child.NumCompared + parent.NumTransformed += child.NumTransformed + parent.NumChildren += child.NumChildren + 1 + if parent.MaxDepth < child.MaxDepth+1 { + parent.MaxDepth = child.MaxDepth + 1 + } + return parent +} diff --git a/vendor/github.com/google/uuid/go.mod b/vendor/github.com/google/uuid/go.mod new file mode 100644 index 00000000..fc84cd79 --- /dev/null +++ b/vendor/github.com/google/uuid/go.mod @@ -0,0 +1 @@ +module github.com/google/uuid diff --git a/vendor/github.com/google/uuid/node.go b/vendor/github.com/google/uuid/node.go index 3e4e90dc..d651a2b0 100644 --- a/vendor/github.com/google/uuid/node.go +++ b/vendor/github.com/google/uuid/node.go @@ -48,6 +48,7 @@ func setNodeInterface(name string) bool { // does not specify a specific interface generate a random Node ID // (section 4.1.6) if name == "" { + ifname = "random" randomBits(nodeID[:]) return true } diff --git a/vendor/github.com/google/uuid/uuid.go b/vendor/github.com/google/uuid/uuid.go index 7f3643fe..524404cc 100644 --- a/vendor/github.com/google/uuid/uuid.go +++ b/vendor/github.com/google/uuid/uuid.go @@ -1,4 +1,4 @@ -// Copyright 2016 Google Inc. All rights reserved. +// Copyright 2018 Google Inc. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -35,20 +35,43 @@ const ( var rander = rand.Reader // random function -// Parse decodes s into a UUID or returns an error. Both the UUID form of -// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and -// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded. +// Parse decodes s into a UUID or returns an error. Both the standard UUID +// forms of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded as well as the +// Microsoft encoding {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} and the raw hex +// encoding: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx. func Parse(s string) (UUID, error) { var uuid UUID - if len(s) != 36 { - if len(s) != 36+9 { - return uuid, fmt.Errorf("invalid UUID length: %d", len(s)) - } + switch len(s) { + // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + case 36: + + // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + case 36 + 9: if strings.ToLower(s[:9]) != "urn:uuid:" { return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9]) } s = s[9:] + + // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} + case 36 + 2: + s = s[1:] + + // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + case 32: + var ok bool + for i := range uuid { + uuid[i], ok = xtob(s[i*2], s[i*2+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + } + return uuid, nil + default: + return uuid, fmt.Errorf("invalid UUID length: %d", len(s)) } + // s is now at least 36 bytes long + // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { return uuid, errors.New("invalid UUID format") } @@ -70,15 +93,29 @@ func Parse(s string) (UUID, error) { // ParseBytes is like Parse, except it parses a byte slice instead of a string. func ParseBytes(b []byte) (UUID, error) { var uuid UUID - if len(b) != 36 { - if len(b) != 36+9 { - return uuid, fmt.Errorf("invalid UUID length: %d", len(b)) - } + switch len(b) { + case 36: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + case 36 + 9: // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx if !bytes.Equal(bytes.ToLower(b[:9]), []byte("urn:uuid:")) { return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9]) } b = b[9:] + case 36 + 2: // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} + b = b[1:] + case 32: // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + var ok bool + for i := 0; i < 32; i += 2 { + uuid[i/2], ok = xtob(b[i], b[i+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + } + return uuid, nil + default: + return uuid, fmt.Errorf("invalid UUID length: %d", len(b)) } + // s is now at least 36 bytes long + // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx if b[8] != '-' || b[13] != '-' || b[18] != '-' || b[23] != '-' { return uuid, errors.New("invalid UUID format") } @@ -97,6 +134,16 @@ func ParseBytes(b []byte) (UUID, error) { return uuid, nil } +// MustParse is like Parse but panics if the string cannot be parsed. +// It simplifies safe initialization of global variables holding compiled UUIDs. +func MustParse(s string) UUID { + uuid, err := Parse(s) + if err != nil { + panic(`uuid: Parse(` + s + `): ` + err.Error()) + } + return uuid +} + // FromBytes creates a new UUID from a byte slice. Returns an error if the slice // does not have a length of 16. The bytes are copied from the slice. func FromBytes(b []byte) (uuid UUID, err error) { @@ -130,7 +177,7 @@ func (uuid UUID) URN() string { } func encodeHex(dst []byte, uuid UUID) { - hex.Encode(dst[:], uuid[:4]) + hex.Encode(dst, uuid[:4]) dst[8] = '-' hex.Encode(dst[9:13], uuid[4:6]) dst[13] = '-' diff --git a/vendor/github.com/gregjones/httpcache/.travis.yml b/vendor/github.com/gregjones/httpcache/.travis.yml deleted file mode 100644 index 597bc999..00000000 --- a/vendor/github.com/gregjones/httpcache/.travis.yml +++ /dev/null @@ -1,18 +0,0 @@ -sudo: false -language: go -matrix: - allow_failures: - - go: master - fast_finish: true - include: - - go: 1.10.x - - go: 1.11.x - env: GOFMT=1 - - go: master -install: - - # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step). -script: - - go get -t -v ./... - - if test -n "${GOFMT}"; then gofmt -w -s . && git diff --exit-code; fi - - go tool vet . - - go test -v -race ./... diff --git a/vendor/github.com/gregjones/httpcache/LICENSE.txt b/vendor/github.com/gregjones/httpcache/LICENSE.txt deleted file mode 100644 index 81316beb..00000000 --- a/vendor/github.com/gregjones/httpcache/LICENSE.txt +++ /dev/null @@ -1,7 +0,0 @@ -Copyright © 2012 Greg Jones (greg.jones@gmail.com) - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/gregjones/httpcache/README.md b/vendor/github.com/gregjones/httpcache/README.md deleted file mode 100644 index 51e7d23d..00000000 --- a/vendor/github.com/gregjones/httpcache/README.md +++ /dev/null @@ -1,29 +0,0 @@ -httpcache -========= - -[![Build Status](https://travis-ci.org/gregjones/httpcache.svg?branch=master)](https://travis-ci.org/gregjones/httpcache) [![GoDoc](https://godoc.org/github.com/gregjones/httpcache?status.svg)](https://godoc.org/github.com/gregjones/httpcache) - -Package httpcache provides a http.RoundTripper implementation that works as a mostly [RFC 7234](https://tools.ietf.org/html/rfc7234) compliant cache for http responses. - -It is only suitable for use as a 'private' cache (i.e. for a web-browser or an API-client and not for a shared proxy). - -This project isn't actively maintained; it works for what I, and seemingly others, want to do with it, and I consider it "done". That said, if you find any issues, please open a Pull Request and I will try to review it. Any changes now that change the public API won't be considered. - -Cache Backends --------------- - -- The built-in 'memory' cache stores responses in an in-memory map. -- [`github.com/gregjones/httpcache/diskcache`](https://github.com/gregjones/httpcache/tree/master/diskcache) provides a filesystem-backed cache using the [diskv](https://github.com/peterbourgon/diskv) library. -- [`github.com/gregjones/httpcache/memcache`](https://github.com/gregjones/httpcache/tree/master/memcache) provides memcache implementations, for both App Engine and 'normal' memcache servers. -- [`sourcegraph.com/sourcegraph/s3cache`](https://sourcegraph.com/github.com/sourcegraph/s3cache) uses Amazon S3 for storage. -- [`github.com/gregjones/httpcache/leveldbcache`](https://github.com/gregjones/httpcache/tree/master/leveldbcache) provides a filesystem-backed cache using [leveldb](https://github.com/syndtr/goleveldb/leveldb). -- [`github.com/die-net/lrucache`](https://github.com/die-net/lrucache) provides an in-memory cache that will evict least-recently used entries. -- [`github.com/die-net/lrucache/twotier`](https://github.com/die-net/lrucache/tree/master/twotier) allows caches to be combined, for example to use lrucache above with a persistent disk-cache. -- [`github.com/birkelund/boltdbcache`](https://github.com/birkelund/boltdbcache) provides a BoltDB implementation (based on the [bbolt](https://github.com/coreos/bbolt) fork). - -If you implement any other backend and wish it to be linked here, please send a PR editing this file. - -License -------- - -- [MIT License](LICENSE.txt) diff --git a/vendor/github.com/gregjones/httpcache/diskcache/diskcache.go b/vendor/github.com/gregjones/httpcache/diskcache/diskcache.go deleted file mode 100644 index 42e3129d..00000000 --- a/vendor/github.com/gregjones/httpcache/diskcache/diskcache.go +++ /dev/null @@ -1,61 +0,0 @@ -// Package diskcache provides an implementation of httpcache.Cache that uses the diskv package -// to supplement an in-memory map with persistent storage -// -package diskcache - -import ( - "bytes" - "crypto/md5" - "encoding/hex" - "github.com/peterbourgon/diskv" - "io" -) - -// Cache is an implementation of httpcache.Cache that supplements the in-memory map with persistent storage -type Cache struct { - d *diskv.Diskv -} - -// Get returns the response corresponding to key if present -func (c *Cache) Get(key string) (resp []byte, ok bool) { - key = keyToFilename(key) - resp, err := c.d.Read(key) - if err != nil { - return []byte{}, false - } - return resp, true -} - -// Set saves a response to the cache as key -func (c *Cache) Set(key string, resp []byte) { - key = keyToFilename(key) - c.d.WriteStream(key, bytes.NewReader(resp), true) -} - -// Delete removes the response with key from the cache -func (c *Cache) Delete(key string) { - key = keyToFilename(key) - c.d.Erase(key) -} - -func keyToFilename(key string) string { - h := md5.New() - io.WriteString(h, key) - return hex.EncodeToString(h.Sum(nil)) -} - -// New returns a new Cache that will store files in basePath -func New(basePath string) *Cache { - return &Cache{ - d: diskv.New(diskv.Options{ - BasePath: basePath, - CacheSizeMax: 100 * 1024 * 1024, // 100MB - }), - } -} - -// NewWithDiskv returns a new Cache using the provided Diskv as underlying -// storage. -func NewWithDiskv(d *diskv.Diskv) *Cache { - return &Cache{d} -} diff --git a/vendor/github.com/gregjones/httpcache/httpcache.go b/vendor/github.com/gregjones/httpcache/httpcache.go deleted file mode 100644 index b41a63d1..00000000 --- a/vendor/github.com/gregjones/httpcache/httpcache.go +++ /dev/null @@ -1,551 +0,0 @@ -// Package httpcache provides a http.RoundTripper implementation that works as a -// mostly RFC-compliant cache for http responses. -// -// It is only suitable for use as a 'private' cache (i.e. for a web-browser or an API-client -// and not for a shared proxy). -// -package httpcache - -import ( - "bufio" - "bytes" - "errors" - "io" - "io/ioutil" - "net/http" - "net/http/httputil" - "strings" - "sync" - "time" -) - -const ( - stale = iota - fresh - transparent - // XFromCache is the header added to responses that are returned from the cache - XFromCache = "X-From-Cache" -) - -// A Cache interface is used by the Transport to store and retrieve responses. -type Cache interface { - // Get returns the []byte representation of a cached response and a bool - // set to true if the value isn't empty - Get(key string) (responseBytes []byte, ok bool) - // Set stores the []byte representation of a response against a key - Set(key string, responseBytes []byte) - // Delete removes the value associated with the key - Delete(key string) -} - -// cacheKey returns the cache key for req. -func cacheKey(req *http.Request) string { - if req.Method == http.MethodGet { - return req.URL.String() - } else { - return req.Method + " " + req.URL.String() - } -} - -// CachedResponse returns the cached http.Response for req if present, and nil -// otherwise. -func CachedResponse(c Cache, req *http.Request) (resp *http.Response, err error) { - cachedVal, ok := c.Get(cacheKey(req)) - if !ok { - return - } - - b := bytes.NewBuffer(cachedVal) - return http.ReadResponse(bufio.NewReader(b), req) -} - -// MemoryCache is an implemtation of Cache that stores responses in an in-memory map. -type MemoryCache struct { - mu sync.RWMutex - items map[string][]byte -} - -// Get returns the []byte representation of the response and true if present, false if not -func (c *MemoryCache) Get(key string) (resp []byte, ok bool) { - c.mu.RLock() - resp, ok = c.items[key] - c.mu.RUnlock() - return resp, ok -} - -// Set saves response resp to the cache with key -func (c *MemoryCache) Set(key string, resp []byte) { - c.mu.Lock() - c.items[key] = resp - c.mu.Unlock() -} - -// Delete removes key from the cache -func (c *MemoryCache) Delete(key string) { - c.mu.Lock() - delete(c.items, key) - c.mu.Unlock() -} - -// NewMemoryCache returns a new Cache that will store items in an in-memory map -func NewMemoryCache() *MemoryCache { - c := &MemoryCache{items: map[string][]byte{}} - return c -} - -// Transport is an implementation of http.RoundTripper that will return values from a cache -// where possible (avoiding a network request) and will additionally add validators (etag/if-modified-since) -// to repeated requests allowing servers to return 304 / Not Modified -type Transport struct { - // The RoundTripper interface actually used to make requests - // If nil, http.DefaultTransport is used - Transport http.RoundTripper - Cache Cache - // If true, responses returned from the cache will be given an extra header, X-From-Cache - MarkCachedResponses bool -} - -// NewTransport returns a new Transport with the -// provided Cache implementation and MarkCachedResponses set to true -func NewTransport(c Cache) *Transport { - return &Transport{Cache: c, MarkCachedResponses: true} -} - -// Client returns an *http.Client that caches responses. -func (t *Transport) Client() *http.Client { - return &http.Client{Transport: t} -} - -// varyMatches will return false unless all of the cached values for the headers listed in Vary -// match the new request -func varyMatches(cachedResp *http.Response, req *http.Request) bool { - for _, header := range headerAllCommaSepValues(cachedResp.Header, "vary") { - header = http.CanonicalHeaderKey(header) - if header != "" && req.Header.Get(header) != cachedResp.Header.Get("X-Varied-"+header) { - return false - } - } - return true -} - -// RoundTrip takes a Request and returns a Response -// -// If there is a fresh Response already in cache, then it will be returned without connecting to -// the server. -// -// If there is a stale Response, then any validators it contains will be set on the new request -// to give the server a chance to respond with NotModified. If this happens, then the cached Response -// will be returned. -func (t *Transport) RoundTrip(req *http.Request) (resp *http.Response, err error) { - cacheKey := cacheKey(req) - cacheable := (req.Method == "GET" || req.Method == "HEAD") && req.Header.Get("range") == "" - var cachedResp *http.Response - if cacheable { - cachedResp, err = CachedResponse(t.Cache, req) - } else { - // Need to invalidate an existing value - t.Cache.Delete(cacheKey) - } - - transport := t.Transport - if transport == nil { - transport = http.DefaultTransport - } - - if cacheable && cachedResp != nil && err == nil { - if t.MarkCachedResponses { - cachedResp.Header.Set(XFromCache, "1") - } - - if varyMatches(cachedResp, req) { - // Can only use cached value if the new request doesn't Vary significantly - freshness := getFreshness(cachedResp.Header, req.Header) - if freshness == fresh { - return cachedResp, nil - } - - if freshness == stale { - var req2 *http.Request - // Add validators if caller hasn't already done so - etag := cachedResp.Header.Get("etag") - if etag != "" && req.Header.Get("etag") == "" { - req2 = cloneRequest(req) - req2.Header.Set("if-none-match", etag) - } - lastModified := cachedResp.Header.Get("last-modified") - if lastModified != "" && req.Header.Get("last-modified") == "" { - if req2 == nil { - req2 = cloneRequest(req) - } - req2.Header.Set("if-modified-since", lastModified) - } - if req2 != nil { - req = req2 - } - } - } - - resp, err = transport.RoundTrip(req) - if err == nil && req.Method == "GET" && resp.StatusCode == http.StatusNotModified { - // Replace the 304 response with the one from cache, but update with some new headers - endToEndHeaders := getEndToEndHeaders(resp.Header) - for _, header := range endToEndHeaders { - cachedResp.Header[header] = resp.Header[header] - } - resp = cachedResp - } else if (err != nil || (cachedResp != nil && resp.StatusCode >= 500)) && - req.Method == "GET" && canStaleOnError(cachedResp.Header, req.Header) { - // In case of transport failure and stale-if-error activated, returns cached content - // when available - return cachedResp, nil - } else { - if err != nil || resp.StatusCode != http.StatusOK { - t.Cache.Delete(cacheKey) - } - if err != nil { - return nil, err - } - } - } else { - reqCacheControl := parseCacheControl(req.Header) - if _, ok := reqCacheControl["only-if-cached"]; ok { - resp = newGatewayTimeoutResponse(req) - } else { - resp, err = transport.RoundTrip(req) - if err != nil { - return nil, err - } - } - } - - if cacheable && canStore(parseCacheControl(req.Header), parseCacheControl(resp.Header)) { - for _, varyKey := range headerAllCommaSepValues(resp.Header, "vary") { - varyKey = http.CanonicalHeaderKey(varyKey) - fakeHeader := "X-Varied-" + varyKey - reqValue := req.Header.Get(varyKey) - if reqValue != "" { - resp.Header.Set(fakeHeader, reqValue) - } - } - switch req.Method { - case "GET": - // Delay caching until EOF is reached. - resp.Body = &cachingReadCloser{ - R: resp.Body, - OnEOF: func(r io.Reader) { - resp := *resp - resp.Body = ioutil.NopCloser(r) - respBytes, err := httputil.DumpResponse(&resp, true) - if err == nil { - t.Cache.Set(cacheKey, respBytes) - } - }, - } - default: - respBytes, err := httputil.DumpResponse(resp, true) - if err == nil { - t.Cache.Set(cacheKey, respBytes) - } - } - } else { - t.Cache.Delete(cacheKey) - } - return resp, nil -} - -// ErrNoDateHeader indicates that the HTTP headers contained no Date header. -var ErrNoDateHeader = errors.New("no Date header") - -// Date parses and returns the value of the Date header. -func Date(respHeaders http.Header) (date time.Time, err error) { - dateHeader := respHeaders.Get("date") - if dateHeader == "" { - err = ErrNoDateHeader - return - } - - return time.Parse(time.RFC1123, dateHeader) -} - -type realClock struct{} - -func (c *realClock) since(d time.Time) time.Duration { - return time.Since(d) -} - -type timer interface { - since(d time.Time) time.Duration -} - -var clock timer = &realClock{} - -// getFreshness will return one of fresh/stale/transparent based on the cache-control -// values of the request and the response -// -// fresh indicates the response can be returned -// stale indicates that the response needs validating before it is returned -// transparent indicates the response should not be used to fulfil the request -// -// Because this is only a private cache, 'public' and 'private' in cache-control aren't -// signficant. Similarly, smax-age isn't used. -func getFreshness(respHeaders, reqHeaders http.Header) (freshness int) { - respCacheControl := parseCacheControl(respHeaders) - reqCacheControl := parseCacheControl(reqHeaders) - if _, ok := reqCacheControl["no-cache"]; ok { - return transparent - } - if _, ok := respCacheControl["no-cache"]; ok { - return stale - } - if _, ok := reqCacheControl["only-if-cached"]; ok { - return fresh - } - - date, err := Date(respHeaders) - if err != nil { - return stale - } - currentAge := clock.since(date) - - var lifetime time.Duration - var zeroDuration time.Duration - - // If a response includes both an Expires header and a max-age directive, - // the max-age directive overrides the Expires header, even if the Expires header is more restrictive. - if maxAge, ok := respCacheControl["max-age"]; ok { - lifetime, err = time.ParseDuration(maxAge + "s") - if err != nil { - lifetime = zeroDuration - } - } else { - expiresHeader := respHeaders.Get("Expires") - if expiresHeader != "" { - expires, err := time.Parse(time.RFC1123, expiresHeader) - if err != nil { - lifetime = zeroDuration - } else { - lifetime = expires.Sub(date) - } - } - } - - if maxAge, ok := reqCacheControl["max-age"]; ok { - // the client is willing to accept a response whose age is no greater than the specified time in seconds - lifetime, err = time.ParseDuration(maxAge + "s") - if err != nil { - lifetime = zeroDuration - } - } - if minfresh, ok := reqCacheControl["min-fresh"]; ok { - // the client wants a response that will still be fresh for at least the specified number of seconds. - minfreshDuration, err := time.ParseDuration(minfresh + "s") - if err == nil { - currentAge = time.Duration(currentAge + minfreshDuration) - } - } - - if maxstale, ok := reqCacheControl["max-stale"]; ok { - // Indicates that the client is willing to accept a response that has exceeded its expiration time. - // If max-stale is assigned a value, then the client is willing to accept a response that has exceeded - // its expiration time by no more than the specified number of seconds. - // If no value is assigned to max-stale, then the client is willing to accept a stale response of any age. - // - // Responses served only because of a max-stale value are supposed to have a Warning header added to them, - // but that seems like a hassle, and is it actually useful? If so, then there needs to be a different - // return-value available here. - if maxstale == "" { - return fresh - } - maxstaleDuration, err := time.ParseDuration(maxstale + "s") - if err == nil { - currentAge = time.Duration(currentAge - maxstaleDuration) - } - } - - if lifetime > currentAge { - return fresh - } - - return stale -} - -// Returns true if either the request or the response includes the stale-if-error -// cache control extension: https://tools.ietf.org/html/rfc5861 -func canStaleOnError(respHeaders, reqHeaders http.Header) bool { - respCacheControl := parseCacheControl(respHeaders) - reqCacheControl := parseCacheControl(reqHeaders) - - var err error - lifetime := time.Duration(-1) - - if staleMaxAge, ok := respCacheControl["stale-if-error"]; ok { - if staleMaxAge != "" { - lifetime, err = time.ParseDuration(staleMaxAge + "s") - if err != nil { - return false - } - } else { - return true - } - } - if staleMaxAge, ok := reqCacheControl["stale-if-error"]; ok { - if staleMaxAge != "" { - lifetime, err = time.ParseDuration(staleMaxAge + "s") - if err != nil { - return false - } - } else { - return true - } - } - - if lifetime >= 0 { - date, err := Date(respHeaders) - if err != nil { - return false - } - currentAge := clock.since(date) - if lifetime > currentAge { - return true - } - } - - return false -} - -func getEndToEndHeaders(respHeaders http.Header) []string { - // These headers are always hop-by-hop - hopByHopHeaders := map[string]struct{}{ - "Connection": {}, - "Keep-Alive": {}, - "Proxy-Authenticate": {}, - "Proxy-Authorization": {}, - "Te": {}, - "Trailers": {}, - "Transfer-Encoding": {}, - "Upgrade": {}, - } - - for _, extra := range strings.Split(respHeaders.Get("connection"), ",") { - // any header listed in connection, if present, is also considered hop-by-hop - if strings.Trim(extra, " ") != "" { - hopByHopHeaders[http.CanonicalHeaderKey(extra)] = struct{}{} - } - } - endToEndHeaders := []string{} - for respHeader := range respHeaders { - if _, ok := hopByHopHeaders[respHeader]; !ok { - endToEndHeaders = append(endToEndHeaders, respHeader) - } - } - return endToEndHeaders -} - -func canStore(reqCacheControl, respCacheControl cacheControl) (canStore bool) { - if _, ok := respCacheControl["no-store"]; ok { - return false - } - if _, ok := reqCacheControl["no-store"]; ok { - return false - } - return true -} - -func newGatewayTimeoutResponse(req *http.Request) *http.Response { - var braw bytes.Buffer - braw.WriteString("HTTP/1.1 504 Gateway Timeout\r\n\r\n") - resp, err := http.ReadResponse(bufio.NewReader(&braw), req) - if err != nil { - panic(err) - } - return resp -} - -// cloneRequest returns a clone of the provided *http.Request. -// The clone is a shallow copy of the struct and its Header map. -// (This function copyright goauth2 authors: https://code.google.com/p/goauth2) -func cloneRequest(r *http.Request) *http.Request { - // shallow copy of the struct - r2 := new(http.Request) - *r2 = *r - // deep copy of the Header - r2.Header = make(http.Header) - for k, s := range r.Header { - r2.Header[k] = s - } - return r2 -} - -type cacheControl map[string]string - -func parseCacheControl(headers http.Header) cacheControl { - cc := cacheControl{} - ccHeader := headers.Get("Cache-Control") - for _, part := range strings.Split(ccHeader, ",") { - part = strings.Trim(part, " ") - if part == "" { - continue - } - if strings.ContainsRune(part, '=') { - keyval := strings.Split(part, "=") - cc[strings.Trim(keyval[0], " ")] = strings.Trim(keyval[1], ",") - } else { - cc[part] = "" - } - } - return cc -} - -// headerAllCommaSepValues returns all comma-separated values (each -// with whitespace trimmed) for header name in headers. According to -// Section 4.2 of the HTTP/1.1 spec -// (http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2), -// values from multiple occurrences of a header should be concatenated, if -// the header's value is a comma-separated list. -func headerAllCommaSepValues(headers http.Header, name string) []string { - var vals []string - for _, val := range headers[http.CanonicalHeaderKey(name)] { - fields := strings.Split(val, ",") - for i, f := range fields { - fields[i] = strings.TrimSpace(f) - } - vals = append(vals, fields...) - } - return vals -} - -// cachingReadCloser is a wrapper around ReadCloser R that calls OnEOF -// handler with a full copy of the content read from R when EOF is -// reached. -type cachingReadCloser struct { - // Underlying ReadCloser. - R io.ReadCloser - // OnEOF is called with a copy of the content of R when EOF is reached. - OnEOF func(io.Reader) - - buf bytes.Buffer // buf stores a copy of the content of R. -} - -// Read reads the next len(p) bytes from R or until R is drained. The -// return value n is the number of bytes read. If R has no data to -// return, err is io.EOF and OnEOF is called with a full copy of what -// has been read so far. -func (r *cachingReadCloser) Read(p []byte) (n int, err error) { - n, err = r.R.Read(p) - r.buf.Write(p[:n]) - if err == io.EOF { - r.OnEOF(bytes.NewReader(r.buf.Bytes())) - } - return n, err -} - -func (r *cachingReadCloser) Close() error { - return r.R.Close() -} - -// NewMemoryCacheTransport returns a new Transport using the in-memory cache implementation -func NewMemoryCacheTransport() *Transport { - c := NewMemoryCache() - t := NewTransport(c) - return t -} diff --git a/vendor/github.com/json-iterator/go/iter.go b/vendor/github.com/json-iterator/go/iter.go index 95ae54fb..29b31cf7 100644 --- a/vendor/github.com/json-iterator/go/iter.go +++ b/vendor/github.com/json-iterator/go/iter.go @@ -74,6 +74,7 @@ type Iterator struct { buf []byte head int tail int + depth int captureStartedAt int captured []byte Error error @@ -88,6 +89,7 @@ func NewIterator(cfg API) *Iterator { buf: nil, head: 0, tail: 0, + depth: 0, } } @@ -99,6 +101,7 @@ func Parse(cfg API, reader io.Reader, bufSize int) *Iterator { buf: make([]byte, bufSize), head: 0, tail: 0, + depth: 0, } } @@ -110,6 +113,7 @@ func ParseBytes(cfg API, input []byte) *Iterator { buf: input, head: 0, tail: len(input), + depth: 0, } } @@ -128,6 +132,7 @@ func (iter *Iterator) Reset(reader io.Reader) *Iterator { iter.reader = reader iter.head = 0 iter.tail = 0 + iter.depth = 0 return iter } @@ -137,6 +142,7 @@ func (iter *Iterator) ResetBytes(input []byte) *Iterator { iter.buf = input iter.head = 0 iter.tail = len(input) + iter.depth = 0 return iter } @@ -320,3 +326,24 @@ func (iter *Iterator) Read() interface{} { return nil } } + +// limit maximum depth of nesting, as allowed by https://tools.ietf.org/html/rfc7159#section-9 +const maxDepth = 10000 + +func (iter *Iterator) incrementDepth() (success bool) { + iter.depth++ + if iter.depth <= maxDepth { + return true + } + iter.ReportError("incrementDepth", "exceeded max depth") + return false +} + +func (iter *Iterator) decrementDepth() (success bool) { + iter.depth-- + if iter.depth >= 0 { + return true + } + iter.ReportError("decrementDepth", "unexpected negative nesting") + return false +} diff --git a/vendor/github.com/json-iterator/go/iter_array.go b/vendor/github.com/json-iterator/go/iter_array.go index 6188cb45..204fe0e0 100644 --- a/vendor/github.com/json-iterator/go/iter_array.go +++ b/vendor/github.com/json-iterator/go/iter_array.go @@ -28,26 +28,32 @@ func (iter *Iterator) ReadArray() (ret bool) { func (iter *Iterator) ReadArrayCB(callback func(*Iterator) bool) (ret bool) { c := iter.nextToken() if c == '[' { + if !iter.incrementDepth() { + return false + } c = iter.nextToken() if c != ']' { iter.unreadByte() if !callback(iter) { + iter.decrementDepth() return false } c = iter.nextToken() for c == ',' { if !callback(iter) { + iter.decrementDepth() return false } c = iter.nextToken() } if c != ']' { iter.ReportError("ReadArrayCB", "expect ] in the end, but found "+string([]byte{c})) + iter.decrementDepth() return false } - return true + return iter.decrementDepth() } - return true + return iter.decrementDepth() } if c == 'n' { iter.skipThreeBytes('u', 'l', 'l') diff --git a/vendor/github.com/json-iterator/go/iter_object.go b/vendor/github.com/json-iterator/go/iter_object.go index 1c575767..b6513711 100644 --- a/vendor/github.com/json-iterator/go/iter_object.go +++ b/vendor/github.com/json-iterator/go/iter_object.go @@ -112,6 +112,9 @@ func (iter *Iterator) ReadObjectCB(callback func(*Iterator, string) bool) bool { c := iter.nextToken() var field string if c == '{' { + if !iter.incrementDepth() { + return false + } c = iter.nextToken() if c == '"' { iter.unreadByte() @@ -121,6 +124,7 @@ func (iter *Iterator) ReadObjectCB(callback func(*Iterator, string) bool) bool { iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) } if !callback(iter, field) { + iter.decrementDepth() return false } c = iter.nextToken() @@ -131,20 +135,23 @@ func (iter *Iterator) ReadObjectCB(callback func(*Iterator, string) bool) bool { iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) } if !callback(iter, field) { + iter.decrementDepth() return false } c = iter.nextToken() } if c != '}' { iter.ReportError("ReadObjectCB", `object not ended with }`) + iter.decrementDepth() return false } - return true + return iter.decrementDepth() } if c == '}' { - return true + return iter.decrementDepth() } iter.ReportError("ReadObjectCB", `expect " after }, but found `+string([]byte{c})) + iter.decrementDepth() return false } if c == 'n' { @@ -159,15 +166,20 @@ func (iter *Iterator) ReadObjectCB(callback func(*Iterator, string) bool) bool { func (iter *Iterator) ReadMapCB(callback func(*Iterator, string) bool) bool { c := iter.nextToken() if c == '{' { + if !iter.incrementDepth() { + return false + } c = iter.nextToken() if c == '"' { iter.unreadByte() field := iter.ReadString() if iter.nextToken() != ':' { iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c})) + iter.decrementDepth() return false } if !callback(iter, field) { + iter.decrementDepth() return false } c = iter.nextToken() @@ -175,23 +187,27 @@ func (iter *Iterator) ReadMapCB(callback func(*Iterator, string) bool) bool { field = iter.ReadString() if iter.nextToken() != ':' { iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c})) + iter.decrementDepth() return false } if !callback(iter, field) { + iter.decrementDepth() return false } c = iter.nextToken() } if c != '}' { iter.ReportError("ReadMapCB", `object not ended with }`) + iter.decrementDepth() return false } - return true + return iter.decrementDepth() } if c == '}' { - return true + return iter.decrementDepth() } iter.ReportError("ReadMapCB", `expect " after }, but found `+string([]byte{c})) + iter.decrementDepth() return false } if c == 'n' { diff --git a/vendor/github.com/json-iterator/go/iter_skip_sloppy.go b/vendor/github.com/json-iterator/go/iter_skip_sloppy.go index 8fcdc3b6..9303de41 100644 --- a/vendor/github.com/json-iterator/go/iter_skip_sloppy.go +++ b/vendor/github.com/json-iterator/go/iter_skip_sloppy.go @@ -22,6 +22,9 @@ func (iter *Iterator) skipNumber() { func (iter *Iterator) skipArray() { level := 1 + if !iter.incrementDepth() { + return + } for { for i := iter.head; i < iter.tail; i++ { switch iter.buf[i] { @@ -31,8 +34,14 @@ func (iter *Iterator) skipArray() { i = iter.head - 1 // it will be i++ soon case '[': // If open symbol, increase level level++ + if !iter.incrementDepth() { + return + } case ']': // If close symbol, increase level level-- + if !iter.decrementDepth() { + return + } // If we have returned to the original level, we're done if level == 0 { @@ -50,6 +59,10 @@ func (iter *Iterator) skipArray() { func (iter *Iterator) skipObject() { level := 1 + if !iter.incrementDepth() { + return + } + for { for i := iter.head; i < iter.tail; i++ { switch iter.buf[i] { @@ -59,8 +72,14 @@ func (iter *Iterator) skipObject() { i = iter.head - 1 // it will be i++ soon case '{': // If open symbol, increase level level++ + if !iter.incrementDepth() { + return + } case '}': // If close symbol, increase level level-- + if !iter.decrementDepth() { + return + } // If we have returned to the original level, we're done if level == 0 { diff --git a/vendor/github.com/json-iterator/go/reflect.go b/vendor/github.com/json-iterator/go/reflect.go index 4459e203..74974ba7 100644 --- a/vendor/github.com/json-iterator/go/reflect.go +++ b/vendor/github.com/json-iterator/go/reflect.go @@ -60,6 +60,7 @@ func (b *ctx) append(prefix string) *ctx { // ReadVal copy the underlying JSON into go interface, same as json.Unmarshal func (iter *Iterator) ReadVal(obj interface{}) { + depth := iter.depth cacheKey := reflect2.RTypeOf(obj) decoder := iter.cfg.getDecoderFromCache(cacheKey) if decoder == nil { @@ -76,6 +77,10 @@ func (iter *Iterator) ReadVal(obj interface{}) { return } decoder.Decode(ptr, iter) + if iter.depth != depth { + iter.ReportError("ReadVal", "unexpected mismatched nesting") + return + } } // WriteVal copy the go interface into underlying JSON, same as json.Marshal diff --git a/vendor/github.com/json-iterator/go/reflect_extension.go b/vendor/github.com/json-iterator/go/reflect_extension.go index 05e8fbf1..e27e8d19 100644 --- a/vendor/github.com/json-iterator/go/reflect_extension.go +++ b/vendor/github.com/json-iterator/go/reflect_extension.go @@ -341,10 +341,10 @@ func describeStruct(ctx *ctx, typ reflect2.Type) *StructDescriptor { if ctx.onlyTaggedField && !hastag && !field.Anonymous() { continue } - tagParts := strings.Split(tag, ",") if tag == "-" { continue } + tagParts := strings.Split(tag, ",") if field.Anonymous() && (tag == "" || tagParts[0] == "") { if field.Type().Kind() == reflect.Struct { structDescriptor := describeStruct(ctx, field.Type()) diff --git a/vendor/github.com/json-iterator/go/reflect_map.go b/vendor/github.com/json-iterator/go/reflect_map.go index 547b4421..08e9a391 100644 --- a/vendor/github.com/json-iterator/go/reflect_map.go +++ b/vendor/github.com/json-iterator/go/reflect_map.go @@ -249,6 +249,10 @@ type mapEncoder struct { } func (encoder *mapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + if *(*unsafe.Pointer)(ptr) == nil { + stream.WriteNil() + return + } stream.WriteObjectStart() iter := encoder.mapType.UnsafeIterate(ptr) for i := 0; iter.HasNext(); i++ { diff --git a/vendor/github.com/json-iterator/go/reflect_marshaler.go b/vendor/github.com/json-iterator/go/reflect_marshaler.go index fea50719..3e21f375 100644 --- a/vendor/github.com/json-iterator/go/reflect_marshaler.go +++ b/vendor/github.com/json-iterator/go/reflect_marshaler.go @@ -3,8 +3,9 @@ package jsoniter import ( "encoding" "encoding/json" - "github.com/modern-go/reflect2" "unsafe" + + "github.com/modern-go/reflect2" ) var marshalerType = reflect2.TypeOfPtr((*json.Marshaler)(nil)).Elem() @@ -93,10 +94,17 @@ func (encoder *marshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { stream.WriteNil() return } - bytes, err := json.Marshal(obj) + marshaler := obj.(json.Marshaler) + bytes, err := marshaler.MarshalJSON() if err != nil { stream.Error = err } else { + // html escape was already done by jsoniter + // but the extra '\n' should be trimed + l := len(bytes) + if l > 0 && bytes[l-1] == '\n' { + bytes = bytes[:l-1] + } stream.Write(bytes) } } diff --git a/vendor/github.com/json-iterator/go/reflect_struct_decoder.go b/vendor/github.com/json-iterator/go/reflect_struct_decoder.go index 932641ac..5ad5cc56 100644 --- a/vendor/github.com/json-iterator/go/reflect_struct_decoder.go +++ b/vendor/github.com/json-iterator/go/reflect_struct_decoder.go @@ -500,6 +500,9 @@ func (decoder *generalStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) if !iter.readObjectStart() { return } + if !iter.incrementDepth() { + return + } var c byte for c = ','; c == ','; c = iter.nextToken() { decoder.decodeOneField(ptr, iter) @@ -510,6 +513,7 @@ func (decoder *generalStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) if c != '}' { iter.ReportError("struct Decode", `expect }, but found `+string([]byte{c})) } + iter.decrementDepth() } func (decoder *generalStructDecoder) decodeOneField(ptr unsafe.Pointer, iter *Iterator) { @@ -571,6 +575,9 @@ func (decoder *oneFieldStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) if !iter.readObjectStart() { return } + if !iter.incrementDepth() { + return + } for { if iter.readFieldHash() == decoder.fieldHash { decoder.fieldDecoder.Decode(ptr, iter) @@ -584,6 +591,7 @@ func (decoder *oneFieldStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) if iter.Error != nil && iter.Error != io.EOF { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } + iter.decrementDepth() } type twoFieldsStructDecoder struct { @@ -598,6 +606,9 @@ func (decoder *twoFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator if !iter.readObjectStart() { return } + if !iter.incrementDepth() { + return + } for { switch iter.readFieldHash() { case decoder.fieldHash1: @@ -614,6 +625,7 @@ func (decoder *twoFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator if iter.Error != nil && iter.Error != io.EOF { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } + iter.decrementDepth() } type threeFieldsStructDecoder struct { @@ -630,6 +642,9 @@ func (decoder *threeFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterat if !iter.readObjectStart() { return } + if !iter.incrementDepth() { + return + } for { switch iter.readFieldHash() { case decoder.fieldHash1: @@ -648,6 +663,7 @@ func (decoder *threeFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterat if iter.Error != nil && iter.Error != io.EOF { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } + iter.decrementDepth() } type fourFieldsStructDecoder struct { @@ -666,6 +682,9 @@ func (decoder *fourFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterato if !iter.readObjectStart() { return } + if !iter.incrementDepth() { + return + } for { switch iter.readFieldHash() { case decoder.fieldHash1: @@ -686,6 +705,7 @@ func (decoder *fourFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterato if iter.Error != nil && iter.Error != io.EOF { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } + iter.decrementDepth() } type fiveFieldsStructDecoder struct { @@ -706,6 +726,9 @@ func (decoder *fiveFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterato if !iter.readObjectStart() { return } + if !iter.incrementDepth() { + return + } for { switch iter.readFieldHash() { case decoder.fieldHash1: @@ -728,6 +751,7 @@ func (decoder *fiveFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterato if iter.Error != nil && iter.Error != io.EOF { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } + iter.decrementDepth() } type sixFieldsStructDecoder struct { @@ -750,6 +774,9 @@ func (decoder *sixFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator if !iter.readObjectStart() { return } + if !iter.incrementDepth() { + return + } for { switch iter.readFieldHash() { case decoder.fieldHash1: @@ -774,6 +801,7 @@ func (decoder *sixFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator if iter.Error != nil && iter.Error != io.EOF { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } + iter.decrementDepth() } type sevenFieldsStructDecoder struct { @@ -798,6 +826,9 @@ func (decoder *sevenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterat if !iter.readObjectStart() { return } + if !iter.incrementDepth() { + return + } for { switch iter.readFieldHash() { case decoder.fieldHash1: @@ -824,6 +855,7 @@ func (decoder *sevenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterat if iter.Error != nil && iter.Error != io.EOF { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } + iter.decrementDepth() } type eightFieldsStructDecoder struct { @@ -850,6 +882,9 @@ func (decoder *eightFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterat if !iter.readObjectStart() { return } + if !iter.incrementDepth() { + return + } for { switch iter.readFieldHash() { case decoder.fieldHash1: @@ -878,6 +913,7 @@ func (decoder *eightFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterat if iter.Error != nil && iter.Error != io.EOF { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } + iter.decrementDepth() } type nineFieldsStructDecoder struct { @@ -906,6 +942,9 @@ func (decoder *nineFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterato if !iter.readObjectStart() { return } + if !iter.incrementDepth() { + return + } for { switch iter.readFieldHash() { case decoder.fieldHash1: @@ -936,6 +975,7 @@ func (decoder *nineFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterato if iter.Error != nil && iter.Error != io.EOF { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } + iter.decrementDepth() } type tenFieldsStructDecoder struct { @@ -966,6 +1006,9 @@ func (decoder *tenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator if !iter.readObjectStart() { return } + if !iter.incrementDepth() { + return + } for { switch iter.readFieldHash() { case decoder.fieldHash1: @@ -998,6 +1041,7 @@ func (decoder *tenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator if iter.Error != nil && iter.Error != io.EOF { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } + iter.decrementDepth() } type structFieldDecoder struct { diff --git a/vendor/github.com/kubernetes-incubator/external-storage/lib/util/util.go b/vendor/github.com/kubernetes-incubator/external-storage/lib/util/util.go deleted file mode 100644 index fee69b55..00000000 --- a/vendor/github.com/kubernetes-incubator/external-storage/lib/util/util.go +++ /dev/null @@ -1,67 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import "k8s.io/api/core/v1" - -// Common allocation units -const ( - KiB int64 = 1024 - MiB int64 = 1024 * KiB - GiB int64 = 1024 * MiB - TiB int64 = 1024 * GiB -) - -// RoundUpSize calculates how many allocation units are needed to accommodate -// a volume of given size. E.g. when user wants 1500MiB volume, while AWS EBS -// allocates volumes in gibibyte-sized chunks, -// RoundUpSize(1500 * 1024*1024, 1024*1024*1024) returns '2' -// (2 GiB is the smallest allocatable volume that can hold 1500MiB) -func RoundUpSize(volumeSizeBytes int64, allocationUnitBytes int64) int64 { - return (volumeSizeBytes + allocationUnitBytes - 1) / allocationUnitBytes -} - -// RoundUpToGiB rounds up given quantity upto chunks of GiB -func RoundUpToGiB(sizeBytes int64) int64 { - return RoundUpSize(sizeBytes, GiB) -} - -// AccessModesContains returns whether the requested mode is contained by modes -func AccessModesContains(modes []v1.PersistentVolumeAccessMode, mode v1.PersistentVolumeAccessMode) bool { - for _, m := range modes { - if m == mode { - return true - } - } - return false -} - -// AccessModesContainedInAll returns whether all of the requested modes are contained by modes -func AccessModesContainedInAll(indexedModes []v1.PersistentVolumeAccessMode, requestedModes []v1.PersistentVolumeAccessMode) bool { - for _, mode := range requestedModes { - if !AccessModesContains(indexedModes, mode) { - return false - } - } - return true -} - -// CheckPersistentVolumeClaimModeBlock checks VolumeMode. -// If the mode is Block, return true otherwise return false. -func CheckPersistentVolumeClaimModeBlock(pvc *v1.PersistentVolumeClaim) bool { - return pvc.Spec.VolumeMode != nil && *pvc.Spec.VolumeMode == v1.PersistentVolumeBlock -} diff --git a/vendor/github.com/miekg/dns/.codecov.yml b/vendor/github.com/miekg/dns/.codecov.yml new file mode 100644 index 00000000..f91e5c1f --- /dev/null +++ b/vendor/github.com/miekg/dns/.codecov.yml @@ -0,0 +1,8 @@ +coverage: + status: + project: + default: + target: 40% + threshold: null + patch: false + changes: false diff --git a/vendor/github.com/miekg/dns/.gitignore b/vendor/github.com/miekg/dns/.gitignore new file mode 100644 index 00000000..776cd950 --- /dev/null +++ b/vendor/github.com/miekg/dns/.gitignore @@ -0,0 +1,4 @@ +*.6 +tags +test.out +a.out diff --git a/vendor/github.com/miekg/dns/.travis.yml b/vendor/github.com/miekg/dns/.travis.yml new file mode 100644 index 00000000..8eaa0642 --- /dev/null +++ b/vendor/github.com/miekg/dns/.travis.yml @@ -0,0 +1,17 @@ +language: go +sudo: false + +go: + - "1.12.x" + - "1.13.x" + - tip + +env: + - GO111MODULE=on + +script: + - go generate ./... && test `git ls-files --modified | wc -l` = 0 + - go test -race -v -bench=. -coverprofile=coverage.txt -covermode=atomic ./... + +after_success: + - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/miekg/dns/AUTHORS b/vendor/github.com/miekg/dns/AUTHORS new file mode 100644 index 00000000..19656835 --- /dev/null +++ b/vendor/github.com/miekg/dns/AUTHORS @@ -0,0 +1 @@ +Miek Gieben diff --git a/vendor/github.com/miekg/dns/CODEOWNERS b/vendor/github.com/miekg/dns/CODEOWNERS new file mode 100644 index 00000000..e0917031 --- /dev/null +++ b/vendor/github.com/miekg/dns/CODEOWNERS @@ -0,0 +1 @@ +* @miekg @tmthrgd diff --git a/vendor/github.com/miekg/dns/CONTRIBUTORS b/vendor/github.com/miekg/dns/CONTRIBUTORS new file mode 100644 index 00000000..5903779d --- /dev/null +++ b/vendor/github.com/miekg/dns/CONTRIBUTORS @@ -0,0 +1,10 @@ +Alex A. Skinner +Andrew Tunnell-Jones +Ask Bjørn Hansen +Dave Cheney +Dusty Wilson +Marek Majkowski +Peter van Dijk +Omri Bahumi +Alex Sergeyev +James Hartig diff --git a/vendor/github.com/miekg/dns/COPYRIGHT b/vendor/github.com/miekg/dns/COPYRIGHT new file mode 100644 index 00000000..35702b10 --- /dev/null +++ b/vendor/github.com/miekg/dns/COPYRIGHT @@ -0,0 +1,9 @@ +Copyright 2009 The Go Authors. All rights reserved. Use of this source code +is governed by a BSD-style license that can be found in the LICENSE file. +Extensions of the original work are copyright (c) 2011 Miek Gieben + +Copyright 2011 Miek Gieben. All rights reserved. Use of this source code is +governed by a BSD-style license that can be found in the LICENSE file. + +Copyright 2014 CloudFlare. All rights reserved. Use of this source code is +governed by a BSD-style license that can be found in the LICENSE file. diff --git a/vendor/github.com/miekg/dns/LICENSE b/vendor/github.com/miekg/dns/LICENSE new file mode 100644 index 00000000..55f12ab7 --- /dev/null +++ b/vendor/github.com/miekg/dns/LICENSE @@ -0,0 +1,30 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +As this is fork of the official Go code the same license applies. +Extensions of the original work are copyright (c) 2011 Miek Gieben diff --git a/vendor/github.com/miekg/dns/Makefile.fuzz b/vendor/github.com/miekg/dns/Makefile.fuzz new file mode 100644 index 00000000..dc158c4a --- /dev/null +++ b/vendor/github.com/miekg/dns/Makefile.fuzz @@ -0,0 +1,33 @@ +# Makefile for fuzzing +# +# Use go-fuzz and needs the tools installed. +# See https://blog.cloudflare.com/dns-parser-meet-go-fuzzer/ +# +# Installing go-fuzz: +# $ make -f Makefile.fuzz get +# Installs: +# * github.com/dvyukov/go-fuzz/go-fuzz +# * get github.com/dvyukov/go-fuzz/go-fuzz-build + +all: build + +.PHONY: build +build: + go-fuzz-build -tags fuzz github.com/miekg/dns + +.PHONY: build-newrr +build-newrr: + go-fuzz-build -func FuzzNewRR -tags fuzz github.com/miekg/dns + +.PHONY: fuzz +fuzz: + go-fuzz -bin=dns-fuzz.zip -workdir=fuzz + +.PHONY: get +get: + go get github.com/dvyukov/go-fuzz/go-fuzz + go get github.com/dvyukov/go-fuzz/go-fuzz-build + +.PHONY: clean +clean: + rm *-fuzz.zip diff --git a/vendor/github.com/miekg/dns/Makefile.release b/vendor/github.com/miekg/dns/Makefile.release new file mode 100644 index 00000000..8fb748e8 --- /dev/null +++ b/vendor/github.com/miekg/dns/Makefile.release @@ -0,0 +1,52 @@ +# Makefile for releasing. +# +# The release is controlled from version.go. The version found there is +# used to tag the git repo, we're not building any artifects so there is nothing +# to upload to github. +# +# * Up the version in version.go +# * Run: make -f Makefile.release release +# * will *commit* your change with 'Release $VERSION' +# * push to github +# + +define GO +//+build ignore + +package main + +import ( + "fmt" + + "github.com/miekg/dns" +) + +func main() { + fmt.Println(dns.Version.String()) +} +endef + +$(file > version_release.go,$(GO)) +VERSION:=$(shell go run version_release.go) +TAG="v$(VERSION)" + +all: + @echo Use the \'release\' target to start a release $(VERSION) + rm -f version_release.go + +.PHONY: release +release: commit push + @echo Released $(VERSION) + rm -f version_release.go + +.PHONY: commit +commit: + @echo Committing release $(VERSION) + git commit -am"Release $(VERSION)" + git tag $(TAG) + +.PHONY: push +push: + @echo Pushing release $(VERSION) to master + git push --tags + git push diff --git a/vendor/github.com/miekg/dns/README.md b/vendor/github.com/miekg/dns/README.md new file mode 100644 index 00000000..126fe62c --- /dev/null +++ b/vendor/github.com/miekg/dns/README.md @@ -0,0 +1,175 @@ +[![Build Status](https://travis-ci.org/miekg/dns.svg?branch=master)](https://travis-ci.org/miekg/dns) +[![Code Coverage](https://img.shields.io/codecov/c/github/miekg/dns/master.svg)](https://codecov.io/github/miekg/dns?branch=master) +[![Go Report Card](https://goreportcard.com/badge/github.com/miekg/dns)](https://goreportcard.com/report/miekg/dns) +[![](https://godoc.org/github.com/miekg/dns?status.svg)](https://godoc.org/github.com/miekg/dns) + +# Alternative (more granular) approach to a DNS library + +> Less is more. + +Complete and usable DNS library. All Resource Records are supported, including the DNSSEC types. +It follows a lean and mean philosophy. If there is stuff you should know as a DNS programmer there +isn't a convenience function for it. Server side and client side programming is supported, i.e. you +can build servers and resolvers with it. + +We try to keep the "master" branch as sane as possible and at the bleeding edge of standards, +avoiding breaking changes wherever reasonable. We support the last two versions of Go. + +# Goals + +* KISS; +* Fast; +* Small API. If it's easy to code in Go, don't make a function for it. + +# Users + +A not-so-up-to-date-list-that-may-be-actually-current: + +* https://github.com/coredns/coredns +* https://cloudflare.com +* https://github.com/abh/geodns +* http://www.statdns.com/ +* http://www.dnsinspect.com/ +* https://github.com/chuangbo/jianbing-dictionary-dns +* http://www.dns-lg.com/ +* https://github.com/fcambus/rrda +* https://github.com/kenshinx/godns +* https://github.com/skynetservices/skydns +* https://github.com/hashicorp/consul +* https://github.com/DevelopersPL/godnsagent +* https://github.com/duedil-ltd/discodns +* https://github.com/StalkR/dns-reverse-proxy +* https://github.com/tianon/rawdns +* https://mesosphere.github.io/mesos-dns/ +* https://pulse.turbobytes.com/ +* https://github.com/fcambus/statzone +* https://github.com/benschw/dns-clb-go +* https://github.com/corny/dnscheck for +* https://namesmith.io +* https://github.com/miekg/unbound +* https://github.com/miekg/exdns +* https://dnslookup.org +* https://github.com/looterz/grimd +* https://github.com/phamhongviet/serf-dns +* https://github.com/mehrdadrad/mylg +* https://github.com/bamarni/dockness +* https://github.com/fffaraz/microdns +* http://kelda.io +* https://github.com/ipdcode/hades +* https://github.com/StackExchange/dnscontrol/ +* https://www.dnsperf.com/ +* https://dnssectest.net/ +* https://dns.apebits.com +* https://github.com/oif/apex +* https://github.com/jedisct1/dnscrypt-proxy +* https://github.com/jedisct1/rpdns +* https://github.com/xor-gate/sshfp +* https://github.com/rs/dnstrace +* https://blitiri.com.ar/p/dnss ([github mirror](https://github.com/albertito/dnss)) +* https://github.com/semihalev/sdns +* https://render.com +* https://github.com/peterzen/goresolver +* https://github.com/folbricht/routedns + +Send pull request if you want to be listed here. + +# Features + +* UDP/TCP queries, IPv4 and IPv6 +* RFC 1035 zone file parsing ($INCLUDE, $ORIGIN, $TTL and $GENERATE (for all record types) are supported +* Fast +* Server side programming (mimicking the net/http package) +* Client side programming +* DNSSEC: signing, validating and key generation for DSA, RSA, ECDSA and Ed25519 +* EDNS0, NSID, Cookies +* AXFR/IXFR +* TSIG, SIG(0) +* DNS over TLS (DoT): encrypted connection between client and server over TCP +* DNS name compression + +Have fun! + +Miek Gieben - 2010-2012 - +DNS Authors 2012- + +# Building + +This library uses Go modules and uses semantic versioning. Building is done with the `go` tool, so +the following should work: + + go get github.com/miekg/dns + go build github.com/miekg/dns + +## Examples + +A short "how to use the API" is at the beginning of doc.go (this also will show when you call `godoc +github.com/miekg/dns`). + +Example programs can be found in the `github.com/miekg/exdns` repository. + +## Supported RFCs + +*all of them* + +* 103{4,5} - DNS standard +* 1348 - NSAP record (removed the record) +* 1982 - Serial Arithmetic +* 1876 - LOC record +* 1995 - IXFR +* 1996 - DNS notify +* 2136 - DNS Update (dynamic updates) +* 2181 - RRset definition - there is no RRset type though, just []RR +* 2537 - RSAMD5 DNS keys +* 2065 - DNSSEC (updated in later RFCs) +* 2671 - EDNS record +* 2782 - SRV record +* 2845 - TSIG record +* 2915 - NAPTR record +* 2929 - DNS IANA Considerations +* 3110 - RSASHA1 DNS keys +* 3123 - APL record +* 3225 - DO bit (DNSSEC OK) +* 340{1,2,3} - NAPTR record +* 3445 - Limiting the scope of (DNS)KEY +* 3597 - Unknown RRs +* 403{3,4,5} - DNSSEC + validation functions +* 4255 - SSHFP record +* 4343 - Case insensitivity +* 4408 - SPF record +* 4509 - SHA256 Hash in DS +* 4592 - Wildcards in the DNS +* 4635 - HMAC SHA TSIG +* 4701 - DHCID +* 4892 - id.server +* 5001 - NSID +* 5155 - NSEC3 record +* 5205 - HIP record +* 5702 - SHA2 in the DNS +* 5936 - AXFR +* 5966 - TCP implementation recommendations +* 6605 - ECDSA +* 6725 - IANA Registry Update +* 6742 - ILNP DNS +* 6840 - Clarifications and Implementation Notes for DNS Security +* 6844 - CAA record +* 6891 - EDNS0 update +* 6895 - DNS IANA considerations +* 6944 - DNSSEC DNSKEY Algorithm Status +* 6975 - Algorithm Understanding in DNSSEC +* 7043 - EUI48/EUI64 records +* 7314 - DNS (EDNS) EXPIRE Option +* 7477 - CSYNC RR +* 7828 - edns-tcp-keepalive EDNS0 Option +* 7553 - URI record +* 7858 - DNS over TLS: Initiation and Performance Considerations +* 7871 - EDNS0 Client Subnet +* 7873 - Domain Name System (DNS) Cookies +* 8080 - EdDSA for DNSSEC +* 8499 - DNS Terminology + +## Loosely Based Upon + +* ldns - +* NSD - +* Net::DNS - +* GRONG - diff --git a/vendor/github.com/miekg/dns/acceptfunc.go b/vendor/github.com/miekg/dns/acceptfunc.go new file mode 100644 index 00000000..825617fe --- /dev/null +++ b/vendor/github.com/miekg/dns/acceptfunc.go @@ -0,0 +1,61 @@ +package dns + +// MsgAcceptFunc is used early in the server code to accept or reject a message with RcodeFormatError. +// It returns a MsgAcceptAction to indicate what should happen with the message. +type MsgAcceptFunc func(dh Header) MsgAcceptAction + +// DefaultMsgAcceptFunc checks the request and will reject if: +// +// * isn't a request (don't respond in that case) +// +// * opcode isn't OpcodeQuery or OpcodeNotify +// +// * Zero bit isn't zero +// +// * has more than 1 question in the question section +// +// * has more than 1 RR in the Answer section +// +// * has more than 0 RRs in the Authority section +// +// * has more than 2 RRs in the Additional section +// +var DefaultMsgAcceptFunc MsgAcceptFunc = defaultMsgAcceptFunc + +// MsgAcceptAction represents the action to be taken. +type MsgAcceptAction int + +const ( + MsgAccept MsgAcceptAction = iota // Accept the message + MsgReject // Reject the message with a RcodeFormatError + MsgIgnore // Ignore the error and send nothing back. + MsgRejectNotImplemented // Reject the message with a RcodeNotImplemented +) + +func defaultMsgAcceptFunc(dh Header) MsgAcceptAction { + if isResponse := dh.Bits&_QR != 0; isResponse { + return MsgIgnore + } + + // Don't allow dynamic updates, because then the sections can contain a whole bunch of RRs. + opcode := int(dh.Bits>>11) & 0xF + if opcode != OpcodeQuery && opcode != OpcodeNotify { + return MsgRejectNotImplemented + } + + if dh.Qdcount != 1 { + return MsgReject + } + // NOTIFY requests can have a SOA in the ANSWER section. See RFC 1996 Section 3.7 and 3.11. + if dh.Ancount > 1 { + return MsgReject + } + // IXFR request could have one SOA RR in the NS section. See RFC 1995, section 3. + if dh.Nscount > 1 { + return MsgReject + } + if dh.Arcount > 2 { + return MsgReject + } + return MsgAccept +} diff --git a/vendor/github.com/miekg/dns/client.go b/vendor/github.com/miekg/dns/client.go new file mode 100644 index 00000000..db2761d4 --- /dev/null +++ b/vendor/github.com/miekg/dns/client.go @@ -0,0 +1,415 @@ +package dns + +// A client implementation. + +import ( + "context" + "crypto/tls" + "encoding/binary" + "fmt" + "io" + "net" + "strings" + "time" +) + +const ( + dnsTimeout time.Duration = 2 * time.Second + tcpIdleTimeout time.Duration = 8 * time.Second +) + +// A Conn represents a connection to a DNS server. +type Conn struct { + net.Conn // a net.Conn holding the connection + UDPSize uint16 // minimum receive buffer for UDP messages + TsigSecret map[string]string // secret(s) for Tsig map[], zonename must be in canonical form (lowercase, fqdn, see RFC 4034 Section 6.2) + tsigRequestMAC string +} + +// A Client defines parameters for a DNS client. +type Client struct { + Net string // if "tcp" or "tcp-tls" (DNS over TLS) a TCP query will be initiated, otherwise an UDP one (default is "" for UDP) + UDPSize uint16 // minimum receive buffer for UDP messages + TLSConfig *tls.Config // TLS connection configuration + Dialer *net.Dialer // a net.Dialer used to set local address, timeouts and more + // Timeout is a cumulative timeout for dial, write and read, defaults to 0 (disabled) - overrides DialTimeout, ReadTimeout, + // WriteTimeout when non-zero. Can be overridden with net.Dialer.Timeout (see Client.ExchangeWithDialer and + // Client.Dialer) or context.Context.Deadline (see the deprecated ExchangeContext) + Timeout time.Duration + DialTimeout time.Duration // net.DialTimeout, defaults to 2 seconds, or net.Dialer.Timeout if expiring earlier - overridden by Timeout when that value is non-zero + ReadTimeout time.Duration // net.Conn.SetReadTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero + WriteTimeout time.Duration // net.Conn.SetWriteTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero + TsigSecret map[string]string // secret(s) for Tsig map[], zonename must be in canonical form (lowercase, fqdn, see RFC 4034 Section 6.2) + SingleInflight bool // if true suppress multiple outstanding queries for the same Qname, Qtype and Qclass + group singleflight +} + +// Exchange performs a synchronous UDP query. It sends the message m to the address +// contained in a and waits for a reply. Exchange does not retry a failed query, nor +// will it fall back to TCP in case of truncation. +// See client.Exchange for more information on setting larger buffer sizes. +func Exchange(m *Msg, a string) (r *Msg, err error) { + client := Client{Net: "udp"} + r, _, err = client.Exchange(m, a) + return r, err +} + +func (c *Client) dialTimeout() time.Duration { + if c.Timeout != 0 { + return c.Timeout + } + if c.DialTimeout != 0 { + return c.DialTimeout + } + return dnsTimeout +} + +func (c *Client) readTimeout() time.Duration { + if c.ReadTimeout != 0 { + return c.ReadTimeout + } + return dnsTimeout +} + +func (c *Client) writeTimeout() time.Duration { + if c.WriteTimeout != 0 { + return c.WriteTimeout + } + return dnsTimeout +} + +// Dial connects to the address on the named network. +func (c *Client) Dial(address string) (conn *Conn, err error) { + // create a new dialer with the appropriate timeout + var d net.Dialer + if c.Dialer == nil { + d = net.Dialer{Timeout: c.getTimeoutForRequest(c.dialTimeout())} + } else { + d = *c.Dialer + } + + network := c.Net + if network == "" { + network = "udp" + } + + useTLS := strings.HasPrefix(network, "tcp") && strings.HasSuffix(network, "-tls") + + conn = new(Conn) + if useTLS { + network = strings.TrimSuffix(network, "-tls") + + conn.Conn, err = tls.DialWithDialer(&d, network, address, c.TLSConfig) + } else { + conn.Conn, err = d.Dial(network, address) + } + if err != nil { + return nil, err + } + + return conn, nil +} + +// Exchange performs a synchronous query. It sends the message m to the address +// contained in a and waits for a reply. Basic use pattern with a *dns.Client: +// +// c := new(dns.Client) +// in, rtt, err := c.Exchange(message, "127.0.0.1:53") +// +// Exchange does not retry a failed query, nor will it fall back to TCP in +// case of truncation. +// It is up to the caller to create a message that allows for larger responses to be +// returned. Specifically this means adding an EDNS0 OPT RR that will advertise a larger +// buffer, see SetEdns0. Messages without an OPT RR will fallback to the historic limit +// of 512 bytes +// To specify a local address or a timeout, the caller has to set the `Client.Dialer` +// attribute appropriately +func (c *Client) Exchange(m *Msg, address string) (r *Msg, rtt time.Duration, err error) { + if !c.SingleInflight { + return c.exchange(m, address) + } + + q := m.Question[0] + key := fmt.Sprintf("%s:%d:%d", q.Name, q.Qtype, q.Qclass) + r, rtt, err, shared := c.group.Do(key, func() (*Msg, time.Duration, error) { + return c.exchange(m, address) + }) + if r != nil && shared { + r = r.Copy() + } + + return r, rtt, err +} + +func (c *Client) exchange(m *Msg, a string) (r *Msg, rtt time.Duration, err error) { + var co *Conn + + co, err = c.Dial(a) + + if err != nil { + return nil, 0, err + } + defer co.Close() + + opt := m.IsEdns0() + // If EDNS0 is used use that for size. + if opt != nil && opt.UDPSize() >= MinMsgSize { + co.UDPSize = opt.UDPSize() + } + // Otherwise use the client's configured UDP size. + if opt == nil && c.UDPSize >= MinMsgSize { + co.UDPSize = c.UDPSize + } + + co.TsigSecret = c.TsigSecret + t := time.Now() + // write with the appropriate write timeout + co.SetWriteDeadline(t.Add(c.getTimeoutForRequest(c.writeTimeout()))) + if err = co.WriteMsg(m); err != nil { + return nil, 0, err + } + + co.SetReadDeadline(time.Now().Add(c.getTimeoutForRequest(c.readTimeout()))) + r, err = co.ReadMsg() + if err == nil && r.Id != m.Id { + err = ErrId + } + rtt = time.Since(t) + return r, rtt, err +} + +// ReadMsg reads a message from the connection co. +// If the received message contains a TSIG record the transaction signature +// is verified. This method always tries to return the message, however if an +// error is returned there are no guarantees that the returned message is a +// valid representation of the packet read. +func (co *Conn) ReadMsg() (*Msg, error) { + p, err := co.ReadMsgHeader(nil) + if err != nil { + return nil, err + } + + m := new(Msg) + if err := m.Unpack(p); err != nil { + // If an error was returned, we still want to allow the user to use + // the message, but naively they can just check err if they don't want + // to use an erroneous message + return m, err + } + if t := m.IsTsig(); t != nil { + if _, ok := co.TsigSecret[t.Hdr.Name]; !ok { + return m, ErrSecret + } + // Need to work on the original message p, as that was used to calculate the tsig. + err = TsigVerify(p, co.TsigSecret[t.Hdr.Name], co.tsigRequestMAC, false) + } + return m, err +} + +// ReadMsgHeader reads a DNS message, parses and populates hdr (when hdr is not nil). +// Returns message as a byte slice to be parsed with Msg.Unpack later on. +// Note that error handling on the message body is not possible as only the header is parsed. +func (co *Conn) ReadMsgHeader(hdr *Header) ([]byte, error) { + var ( + p []byte + n int + err error + ) + + if _, ok := co.Conn.(net.PacketConn); ok { + if co.UDPSize > MinMsgSize { + p = make([]byte, co.UDPSize) + } else { + p = make([]byte, MinMsgSize) + } + n, err = co.Read(p) + } else { + var length uint16 + if err := binary.Read(co.Conn, binary.BigEndian, &length); err != nil { + return nil, err + } + + p = make([]byte, length) + n, err = io.ReadFull(co.Conn, p) + } + + if err != nil { + return nil, err + } else if n < headerSize { + return nil, ErrShortRead + } + + p = p[:n] + if hdr != nil { + dh, _, err := unpackMsgHdr(p, 0) + if err != nil { + return nil, err + } + *hdr = dh + } + return p, err +} + +// Read implements the net.Conn read method. +func (co *Conn) Read(p []byte) (n int, err error) { + if co.Conn == nil { + return 0, ErrConnEmpty + } + + if _, ok := co.Conn.(net.PacketConn); ok { + // UDP connection + return co.Conn.Read(p) + } + + var length uint16 + if err := binary.Read(co.Conn, binary.BigEndian, &length); err != nil { + return 0, err + } + if int(length) > len(p) { + return 0, io.ErrShortBuffer + } + + return io.ReadFull(co.Conn, p[:length]) +} + +// WriteMsg sends a message through the connection co. +// If the message m contains a TSIG record the transaction +// signature is calculated. +func (co *Conn) WriteMsg(m *Msg) (err error) { + var out []byte + if t := m.IsTsig(); t != nil { + mac := "" + if _, ok := co.TsigSecret[t.Hdr.Name]; !ok { + return ErrSecret + } + out, mac, err = TsigGenerate(m, co.TsigSecret[t.Hdr.Name], co.tsigRequestMAC, false) + // Set for the next read, although only used in zone transfers + co.tsigRequestMAC = mac + } else { + out, err = m.Pack() + } + if err != nil { + return err + } + _, err = co.Write(out) + return err +} + +// Write implements the net.Conn Write method. +func (co *Conn) Write(p []byte) (int, error) { + if len(p) > MaxMsgSize { + return 0, &Error{err: "message too large"} + } + + if _, ok := co.Conn.(net.PacketConn); ok { + return co.Conn.Write(p) + } + + l := make([]byte, 2) + binary.BigEndian.PutUint16(l, uint16(len(p))) + + n, err := (&net.Buffers{l, p}).WriteTo(co.Conn) + return int(n), err +} + +// Return the appropriate timeout for a specific request +func (c *Client) getTimeoutForRequest(timeout time.Duration) time.Duration { + var requestTimeout time.Duration + if c.Timeout != 0 { + requestTimeout = c.Timeout + } else { + requestTimeout = timeout + } + // net.Dialer.Timeout has priority if smaller than the timeouts computed so + // far + if c.Dialer != nil && c.Dialer.Timeout != 0 { + if c.Dialer.Timeout < requestTimeout { + requestTimeout = c.Dialer.Timeout + } + } + return requestTimeout +} + +// Dial connects to the address on the named network. +func Dial(network, address string) (conn *Conn, err error) { + conn = new(Conn) + conn.Conn, err = net.Dial(network, address) + if err != nil { + return nil, err + } + return conn, nil +} + +// ExchangeContext performs a synchronous UDP query, like Exchange. It +// additionally obeys deadlines from the passed Context. +func ExchangeContext(ctx context.Context, m *Msg, a string) (r *Msg, err error) { + client := Client{Net: "udp"} + r, _, err = client.ExchangeContext(ctx, m, a) + // ignorint rtt to leave the original ExchangeContext API unchanged, but + // this function will go away + return r, err +} + +// ExchangeConn performs a synchronous query. It sends the message m via the connection +// c and waits for a reply. The connection c is not closed by ExchangeConn. +// Deprecated: This function is going away, but can easily be mimicked: +// +// co := &dns.Conn{Conn: c} // c is your net.Conn +// co.WriteMsg(m) +// in, _ := co.ReadMsg() +// co.Close() +// +func ExchangeConn(c net.Conn, m *Msg) (r *Msg, err error) { + println("dns: ExchangeConn: this function is deprecated") + co := new(Conn) + co.Conn = c + if err = co.WriteMsg(m); err != nil { + return nil, err + } + r, err = co.ReadMsg() + if err == nil && r.Id != m.Id { + err = ErrId + } + return r, err +} + +// DialTimeout acts like Dial but takes a timeout. +func DialTimeout(network, address string, timeout time.Duration) (conn *Conn, err error) { + client := Client{Net: network, Dialer: &net.Dialer{Timeout: timeout}} + return client.Dial(address) +} + +// DialWithTLS connects to the address on the named network with TLS. +func DialWithTLS(network, address string, tlsConfig *tls.Config) (conn *Conn, err error) { + if !strings.HasSuffix(network, "-tls") { + network += "-tls" + } + client := Client{Net: network, TLSConfig: tlsConfig} + return client.Dial(address) +} + +// DialTimeoutWithTLS acts like DialWithTLS but takes a timeout. +func DialTimeoutWithTLS(network, address string, tlsConfig *tls.Config, timeout time.Duration) (conn *Conn, err error) { + if !strings.HasSuffix(network, "-tls") { + network += "-tls" + } + client := Client{Net: network, Dialer: &net.Dialer{Timeout: timeout}, TLSConfig: tlsConfig} + return client.Dial(address) +} + +// ExchangeContext acts like Exchange, but honors the deadline on the provided +// context, if present. If there is both a context deadline and a configured +// timeout on the client, the earliest of the two takes effect. +func (c *Client) ExchangeContext(ctx context.Context, m *Msg, a string) (r *Msg, rtt time.Duration, err error) { + var timeout time.Duration + if deadline, ok := ctx.Deadline(); !ok { + timeout = 0 + } else { + timeout = time.Until(deadline) + } + // not passing the context to the underlying calls, as the API does not support + // context. For timeouts you should set up Client.Dialer and call Client.Exchange. + // TODO(tmthrgd,miekg): this is a race condition. + c.Dialer = &net.Dialer{Timeout: timeout} + return c.Exchange(m, a) +} diff --git a/vendor/github.com/miekg/dns/clientconfig.go b/vendor/github.com/miekg/dns/clientconfig.go new file mode 100644 index 00000000..e11b630d --- /dev/null +++ b/vendor/github.com/miekg/dns/clientconfig.go @@ -0,0 +1,135 @@ +package dns + +import ( + "bufio" + "io" + "os" + "strconv" + "strings" +) + +// ClientConfig wraps the contents of the /etc/resolv.conf file. +type ClientConfig struct { + Servers []string // servers to use + Search []string // suffixes to append to local name + Port string // what port to use + Ndots int // number of dots in name to trigger absolute lookup + Timeout int // seconds before giving up on packet + Attempts int // lost packets before giving up on server, not used in the package dns +} + +// ClientConfigFromFile parses a resolv.conf(5) like file and returns +// a *ClientConfig. +func ClientConfigFromFile(resolvconf string) (*ClientConfig, error) { + file, err := os.Open(resolvconf) + if err != nil { + return nil, err + } + defer file.Close() + return ClientConfigFromReader(file) +} + +// ClientConfigFromReader works like ClientConfigFromFile but takes an io.Reader as argument +func ClientConfigFromReader(resolvconf io.Reader) (*ClientConfig, error) { + c := new(ClientConfig) + scanner := bufio.NewScanner(resolvconf) + c.Servers = make([]string, 0) + c.Search = make([]string, 0) + c.Port = "53" + c.Ndots = 1 + c.Timeout = 5 + c.Attempts = 2 + + for scanner.Scan() { + if err := scanner.Err(); err != nil { + return nil, err + } + line := scanner.Text() + f := strings.Fields(line) + if len(f) < 1 { + continue + } + switch f[0] { + case "nameserver": // add one name server + if len(f) > 1 { + // One more check: make sure server name is + // just an IP address. Otherwise we need DNS + // to look it up. + name := f[1] + c.Servers = append(c.Servers, name) + } + + case "domain": // set search path to just this domain + if len(f) > 1 { + c.Search = make([]string, 1) + c.Search[0] = f[1] + } else { + c.Search = make([]string, 0) + } + + case "search": // set search path to given servers + c.Search = append([]string(nil), f[1:]...) + + case "options": // magic options + for _, s := range f[1:] { + switch { + case len(s) >= 6 && s[:6] == "ndots:": + n, _ := strconv.Atoi(s[6:]) + if n < 0 { + n = 0 + } else if n > 15 { + n = 15 + } + c.Ndots = n + case len(s) >= 8 && s[:8] == "timeout:": + n, _ := strconv.Atoi(s[8:]) + if n < 1 { + n = 1 + } + c.Timeout = n + case len(s) >= 9 && s[:9] == "attempts:": + n, _ := strconv.Atoi(s[9:]) + if n < 1 { + n = 1 + } + c.Attempts = n + case s == "rotate": + /* not imp */ + } + } + } + } + return c, nil +} + +// NameList returns all of the names that should be queried based on the +// config. It is based off of go's net/dns name building, but it does not +// check the length of the resulting names. +func (c *ClientConfig) NameList(name string) []string { + // if this domain is already fully qualified, no append needed. + if IsFqdn(name) { + return []string{name} + } + + // Check to see if the name has more labels than Ndots. Do this before making + // the domain fully qualified. + hasNdots := CountLabel(name) > c.Ndots + // Make the domain fully qualified. + name = Fqdn(name) + + // Make a list of names based off search. + names := []string{} + + // If name has enough dots, try that first. + if hasNdots { + names = append(names, name) + } + for _, s := range c.Search { + names = append(names, Fqdn(name+s)) + } + // If we didn't have enough dots, try after suffixes. + if !hasNdots { + names = append(names, name) + } + return names +} diff --git a/vendor/github.com/miekg/dns/dane.go b/vendor/github.com/miekg/dns/dane.go new file mode 100644 index 00000000..8c4a14ef --- /dev/null +++ b/vendor/github.com/miekg/dns/dane.go @@ -0,0 +1,43 @@ +package dns + +import ( + "crypto/sha256" + "crypto/sha512" + "crypto/x509" + "encoding/hex" + "errors" +) + +// CertificateToDANE converts a certificate to a hex string as used in the TLSA or SMIMEA records. +func CertificateToDANE(selector, matchingType uint8, cert *x509.Certificate) (string, error) { + switch matchingType { + case 0: + switch selector { + case 0: + return hex.EncodeToString(cert.Raw), nil + case 1: + return hex.EncodeToString(cert.RawSubjectPublicKeyInfo), nil + } + case 1: + h := sha256.New() + switch selector { + case 0: + h.Write(cert.Raw) + return hex.EncodeToString(h.Sum(nil)), nil + case 1: + h.Write(cert.RawSubjectPublicKeyInfo) + return hex.EncodeToString(h.Sum(nil)), nil + } + case 2: + h := sha512.New() + switch selector { + case 0: + h.Write(cert.Raw) + return hex.EncodeToString(h.Sum(nil)), nil + case 1: + h.Write(cert.RawSubjectPublicKeyInfo) + return hex.EncodeToString(h.Sum(nil)), nil + } + } + return "", errors.New("dns: bad MatchingType or Selector") +} diff --git a/vendor/github.com/miekg/dns/defaults.go b/vendor/github.com/miekg/dns/defaults.go new file mode 100644 index 00000000..b059f6fc --- /dev/null +++ b/vendor/github.com/miekg/dns/defaults.go @@ -0,0 +1,378 @@ +package dns + +import ( + "errors" + "net" + "strconv" + "strings" +) + +const hexDigit = "0123456789abcdef" + +// Everything is assumed in ClassINET. + +// SetReply creates a reply message from a request message. +func (dns *Msg) SetReply(request *Msg) *Msg { + dns.Id = request.Id + dns.Response = true + dns.Opcode = request.Opcode + if dns.Opcode == OpcodeQuery { + dns.RecursionDesired = request.RecursionDesired // Copy rd bit + dns.CheckingDisabled = request.CheckingDisabled // Copy cd bit + } + dns.Rcode = RcodeSuccess + if len(request.Question) > 0 { + dns.Question = make([]Question, 1) + dns.Question[0] = request.Question[0] + } + return dns +} + +// SetQuestion creates a question message, it sets the Question +// section, generates an Id and sets the RecursionDesired (RD) +// bit to true. +func (dns *Msg) SetQuestion(z string, t uint16) *Msg { + dns.Id = Id() + dns.RecursionDesired = true + dns.Question = make([]Question, 1) + dns.Question[0] = Question{z, t, ClassINET} + return dns +} + +// SetNotify creates a notify message, it sets the Question +// section, generates an Id and sets the Authoritative (AA) +// bit to true. +func (dns *Msg) SetNotify(z string) *Msg { + dns.Opcode = OpcodeNotify + dns.Authoritative = true + dns.Id = Id() + dns.Question = make([]Question, 1) + dns.Question[0] = Question{z, TypeSOA, ClassINET} + return dns +} + +// SetRcode creates an error message suitable for the request. +func (dns *Msg) SetRcode(request *Msg, rcode int) *Msg { + dns.SetReply(request) + dns.Rcode = rcode + return dns +} + +// SetRcodeFormatError creates a message with FormError set. +func (dns *Msg) SetRcodeFormatError(request *Msg) *Msg { + dns.Rcode = RcodeFormatError + dns.Opcode = OpcodeQuery + dns.Response = true + dns.Authoritative = false + dns.Id = request.Id + return dns +} + +// SetUpdate makes the message a dynamic update message. It +// sets the ZONE section to: z, TypeSOA, ClassINET. +func (dns *Msg) SetUpdate(z string) *Msg { + dns.Id = Id() + dns.Response = false + dns.Opcode = OpcodeUpdate + dns.Compress = false // BIND9 cannot handle compression + dns.Question = make([]Question, 1) + dns.Question[0] = Question{z, TypeSOA, ClassINET} + return dns +} + +// SetIxfr creates message for requesting an IXFR. +func (dns *Msg) SetIxfr(z string, serial uint32, ns, mbox string) *Msg { + dns.Id = Id() + dns.Question = make([]Question, 1) + dns.Ns = make([]RR, 1) + s := new(SOA) + s.Hdr = RR_Header{z, TypeSOA, ClassINET, defaultTtl, 0} + s.Serial = serial + s.Ns = ns + s.Mbox = mbox + dns.Question[0] = Question{z, TypeIXFR, ClassINET} + dns.Ns[0] = s + return dns +} + +// SetAxfr creates message for requesting an AXFR. +func (dns *Msg) SetAxfr(z string) *Msg { + dns.Id = Id() + dns.Question = make([]Question, 1) + dns.Question[0] = Question{z, TypeAXFR, ClassINET} + return dns +} + +// SetTsig appends a TSIG RR to the message. +// This is only a skeleton TSIG RR that is added as the last RR in the +// additional section. The Tsig is calculated when the message is being send. +func (dns *Msg) SetTsig(z, algo string, fudge uint16, timesigned int64) *Msg { + t := new(TSIG) + t.Hdr = RR_Header{z, TypeTSIG, ClassANY, 0, 0} + t.Algorithm = algo + t.Fudge = fudge + t.TimeSigned = uint64(timesigned) + t.OrigId = dns.Id + dns.Extra = append(dns.Extra, t) + return dns +} + +// SetEdns0 appends a EDNS0 OPT RR to the message. +// TSIG should always the last RR in a message. +func (dns *Msg) SetEdns0(udpsize uint16, do bool) *Msg { + e := new(OPT) + e.Hdr.Name = "." + e.Hdr.Rrtype = TypeOPT + e.SetUDPSize(udpsize) + if do { + e.SetDo() + } + dns.Extra = append(dns.Extra, e) + return dns +} + +// IsTsig checks if the message has a TSIG record as the last record +// in the additional section. It returns the TSIG record found or nil. +func (dns *Msg) IsTsig() *TSIG { + if len(dns.Extra) > 0 { + if dns.Extra[len(dns.Extra)-1].Header().Rrtype == TypeTSIG { + return dns.Extra[len(dns.Extra)-1].(*TSIG) + } + } + return nil +} + +// IsEdns0 checks if the message has a EDNS0 (OPT) record, any EDNS0 +// record in the additional section will do. It returns the OPT record +// found or nil. +func (dns *Msg) IsEdns0() *OPT { + // RFC 6891, Section 6.1.1 allows the OPT record to appear + // anywhere in the additional record section, but it's usually at + // the end so start there. + for i := len(dns.Extra) - 1; i >= 0; i-- { + if dns.Extra[i].Header().Rrtype == TypeOPT { + return dns.Extra[i].(*OPT) + } + } + return nil +} + +// popEdns0 is like IsEdns0, but it removes the record from the message. +func (dns *Msg) popEdns0() *OPT { + // RFC 6891, Section 6.1.1 allows the OPT record to appear + // anywhere in the additional record section, but it's usually at + // the end so start there. + for i := len(dns.Extra) - 1; i >= 0; i-- { + if dns.Extra[i].Header().Rrtype == TypeOPT { + opt := dns.Extra[i].(*OPT) + dns.Extra = append(dns.Extra[:i], dns.Extra[i+1:]...) + return opt + } + } + return nil +} + +// IsDomainName checks if s is a valid domain name, it returns the number of +// labels and true, when a domain name is valid. Note that non fully qualified +// domain name is considered valid, in this case the last label is counted in +// the number of labels. When false is returned the number of labels is not +// defined. Also note that this function is extremely liberal; almost any +// string is a valid domain name as the DNS is 8 bit protocol. It checks if each +// label fits in 63 characters and that the entire name will fit into the 255 +// octet wire format limit. +func IsDomainName(s string) (labels int, ok bool) { + // XXX: The logic in this function was copied from packDomainName and + // should be kept in sync with that function. + + const lenmsg = 256 + + if len(s) == 0 { // Ok, for instance when dealing with update RR without any rdata. + return 0, false + } + + s = Fqdn(s) + + // Each dot ends a segment of the name. Except for escaped dots (\.), which + // are normal dots. + + var ( + off int + begin int + wasDot bool + ) + for i := 0; i < len(s); i++ { + switch s[i] { + case '\\': + if off+1 > lenmsg { + return labels, false + } + + // check for \DDD + if i+3 < len(s) && isDigit(s[i+1]) && isDigit(s[i+2]) && isDigit(s[i+3]) { + i += 3 + begin += 3 + } else { + i++ + begin++ + } + + wasDot = false + case '.': + if wasDot { + // two dots back to back is not legal + return labels, false + } + wasDot = true + + labelLen := i - begin + if labelLen >= 1<<6 { // top two bits of length must be clear + return labels, false + } + + // off can already (we're in a loop) be bigger than lenmsg + // this happens when a name isn't fully qualified + off += 1 + labelLen + if off > lenmsg { + return labels, false + } + + labels++ + begin = i + 1 + default: + wasDot = false + } + } + + return labels, true +} + +// IsSubDomain checks if child is indeed a child of the parent. If child and parent +// are the same domain true is returned as well. +func IsSubDomain(parent, child string) bool { + // Entire child is contained in parent + return CompareDomainName(parent, child) == CountLabel(parent) +} + +// IsMsg sanity checks buf and returns an error if it isn't a valid DNS packet. +// The checking is performed on the binary payload. +func IsMsg(buf []byte) error { + // Header + if len(buf) < headerSize { + return errors.New("dns: bad message header") + } + // Header: Opcode + // TODO(miek): more checks here, e.g. check all header bits. + return nil +} + +// IsFqdn checks if a domain name is fully qualified. +func IsFqdn(s string) bool { + s2 := strings.TrimSuffix(s, ".") + if s == s2 { + return false + } + + i := strings.LastIndexFunc(s2, func(r rune) bool { + return r != '\\' + }) + + // Test whether we have an even number of escape sequences before + // the dot or none. + return (len(s2)-i)%2 != 0 +} + +// IsRRset checks if a set of RRs is a valid RRset as defined by RFC 2181. +// This means the RRs need to have the same type, name, and class. Returns true +// if the RR set is valid, otherwise false. +func IsRRset(rrset []RR) bool { + if len(rrset) == 0 { + return false + } + if len(rrset) == 1 { + return true + } + rrHeader := rrset[0].Header() + rrType := rrHeader.Rrtype + rrClass := rrHeader.Class + rrName := rrHeader.Name + + for _, rr := range rrset[1:] { + curRRHeader := rr.Header() + if curRRHeader.Rrtype != rrType || curRRHeader.Class != rrClass || curRRHeader.Name != rrName { + // Mismatch between the records, so this is not a valid rrset for + //signing/verifying + return false + } + } + + return true +} + +// Fqdn return the fully qualified domain name from s. +// If s is already fully qualified, it behaves as the identity function. +func Fqdn(s string) string { + if IsFqdn(s) { + return s + } + return s + "." +} + +// Copied from the official Go code. + +// ReverseAddr returns the in-addr.arpa. or ip6.arpa. hostname of the IP +// address suitable for reverse DNS (PTR) record lookups or an error if it fails +// to parse the IP address. +func ReverseAddr(addr string) (arpa string, err error) { + ip := net.ParseIP(addr) + if ip == nil { + return "", &Error{err: "unrecognized address: " + addr} + } + if v4 := ip.To4(); v4 != nil { + buf := make([]byte, 0, net.IPv4len*4+len("in-addr.arpa.")) + // Add it, in reverse, to the buffer + for i := len(v4) - 1; i >= 0; i-- { + buf = strconv.AppendInt(buf, int64(v4[i]), 10) + buf = append(buf, '.') + } + // Append "in-addr.arpa." and return (buf already has the final .) + buf = append(buf, "in-addr.arpa."...) + return string(buf), nil + } + // Must be IPv6 + buf := make([]byte, 0, net.IPv6len*4+len("ip6.arpa.")) + // Add it, in reverse, to the buffer + for i := len(ip) - 1; i >= 0; i-- { + v := ip[i] + buf = append(buf, hexDigit[v&0xF]) + buf = append(buf, '.') + buf = append(buf, hexDigit[v>>4]) + buf = append(buf, '.') + } + // Append "ip6.arpa." and return (buf already has the final .) + buf = append(buf, "ip6.arpa."...) + return string(buf), nil +} + +// String returns the string representation for the type t. +func (t Type) String() string { + if t1, ok := TypeToString[uint16(t)]; ok { + return t1 + } + return "TYPE" + strconv.Itoa(int(t)) +} + +// String returns the string representation for the class c. +func (c Class) String() string { + if s, ok := ClassToString[uint16(c)]; ok { + // Only emit mnemonics when they are unambiguous, specically ANY is in both. + if _, ok := StringToType[s]; !ok { + return s + } + } + return "CLASS" + strconv.Itoa(int(c)) +} + +// String returns the string representation for the name n. +func (n Name) String() string { + return sprintName(string(n)) +} diff --git a/vendor/github.com/miekg/dns/dns.go b/vendor/github.com/miekg/dns/dns.go new file mode 100644 index 00000000..ad83a27e --- /dev/null +++ b/vendor/github.com/miekg/dns/dns.go @@ -0,0 +1,134 @@ +package dns + +import "strconv" + +const ( + year68 = 1 << 31 // For RFC1982 (Serial Arithmetic) calculations in 32 bits. + defaultTtl = 3600 // Default internal TTL. + + // DefaultMsgSize is the standard default for messages larger than 512 bytes. + DefaultMsgSize = 4096 + // MinMsgSize is the minimal size of a DNS packet. + MinMsgSize = 512 + // MaxMsgSize is the largest possible DNS packet. + MaxMsgSize = 65535 +) + +// Error represents a DNS error. +type Error struct{ err string } + +func (e *Error) Error() string { + if e == nil { + return "dns: " + } + return "dns: " + e.err +} + +// An RR represents a resource record. +type RR interface { + // Header returns the header of an resource record. The header contains + // everything up to the rdata. + Header() *RR_Header + // String returns the text representation of the resource record. + String() string + + // copy returns a copy of the RR + copy() RR + + // len returns the length (in octets) of the compressed or uncompressed RR in wire format. + // + // If compression is nil, the uncompressed size will be returned, otherwise the compressed + // size will be returned and domain names will be added to the map for future compression. + len(off int, compression map[string]struct{}) int + + // pack packs the records RDATA into wire format. The header will + // already have been packed into msg. + pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) + + // unpack unpacks an RR from wire format. + // + // This will only be called on a new and empty RR type with only the header populated. It + // will only be called if the record's RDATA is non-empty. + unpack(msg []byte, off int) (off1 int, err error) + + // parse parses an RR from zone file format. + // + // This will only be called on a new and empty RR type with only the header populated. + parse(c *zlexer, origin string) *ParseError + + // isDuplicate returns whether the two RRs are duplicates. + isDuplicate(r2 RR) bool +} + +// RR_Header is the header all DNS resource records share. +type RR_Header struct { + Name string `dns:"cdomain-name"` + Rrtype uint16 + Class uint16 + Ttl uint32 + Rdlength uint16 // Length of data after header. +} + +// Header returns itself. This is here to make RR_Header implements the RR interface. +func (h *RR_Header) Header() *RR_Header { return h } + +// Just to implement the RR interface. +func (h *RR_Header) copy() RR { return nil } + +func (h *RR_Header) String() string { + var s string + + if h.Rrtype == TypeOPT { + s = ";" + // and maybe other things + } + + s += sprintName(h.Name) + "\t" + s += strconv.FormatInt(int64(h.Ttl), 10) + "\t" + s += Class(h.Class).String() + "\t" + s += Type(h.Rrtype).String() + "\t" + return s +} + +func (h *RR_Header) len(off int, compression map[string]struct{}) int { + l := domainNameLen(h.Name, off, compression, true) + l += 10 // rrtype(2) + class(2) + ttl(4) + rdlength(2) + return l +} + +func (h *RR_Header) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + // RR_Header has no RDATA to pack. + return off, nil +} + +func (h *RR_Header) unpack(msg []byte, off int) (int, error) { + panic("dns: internal error: unpack should never be called on RR_Header") +} + +func (h *RR_Header) parse(c *zlexer, origin string) *ParseError { + panic("dns: internal error: parse should never be called on RR_Header") +} + +// ToRFC3597 converts a known RR to the unknown RR representation from RFC 3597. +func (rr *RFC3597) ToRFC3597(r RR) error { + buf := make([]byte, Len(r)*2) + headerEnd, off, err := packRR(r, buf, 0, compressionMap{}, false) + if err != nil { + return err + } + buf = buf[:off] + + *rr = RFC3597{Hdr: *r.Header()} + rr.Hdr.Rdlength = uint16(off - headerEnd) + + if noRdata(rr.Hdr) { + return nil + } + + _, err = rr.unpack(buf, headerEnd) + if err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/miekg/dns/dnssec.go b/vendor/github.com/miekg/dns/dnssec.go new file mode 100644 index 00000000..12a693f9 --- /dev/null +++ b/vendor/github.com/miekg/dns/dnssec.go @@ -0,0 +1,794 @@ +package dns + +import ( + "bytes" + "crypto" + "crypto/dsa" + "crypto/ecdsa" + "crypto/elliptic" + _ "crypto/md5" + "crypto/rand" + "crypto/rsa" + _ "crypto/sha1" + _ "crypto/sha256" + _ "crypto/sha512" + "encoding/asn1" + "encoding/binary" + "encoding/hex" + "math/big" + "sort" + "strings" + "time" + + "golang.org/x/crypto/ed25519" +) + +// DNSSEC encryption algorithm codes. +const ( + _ uint8 = iota + RSAMD5 + DH + DSA + _ // Skip 4, RFC 6725, section 2.1 + RSASHA1 + DSANSEC3SHA1 + RSASHA1NSEC3SHA1 + RSASHA256 + _ // Skip 9, RFC 6725, section 2.1 + RSASHA512 + _ // Skip 11, RFC 6725, section 2.1 + ECCGOST + ECDSAP256SHA256 + ECDSAP384SHA384 + ED25519 + ED448 + INDIRECT uint8 = 252 + PRIVATEDNS uint8 = 253 // Private (experimental keys) + PRIVATEOID uint8 = 254 +) + +// AlgorithmToString is a map of algorithm IDs to algorithm names. +var AlgorithmToString = map[uint8]string{ + RSAMD5: "RSAMD5", + DH: "DH", + DSA: "DSA", + RSASHA1: "RSASHA1", + DSANSEC3SHA1: "DSA-NSEC3-SHA1", + RSASHA1NSEC3SHA1: "RSASHA1-NSEC3-SHA1", + RSASHA256: "RSASHA256", + RSASHA512: "RSASHA512", + ECCGOST: "ECC-GOST", + ECDSAP256SHA256: "ECDSAP256SHA256", + ECDSAP384SHA384: "ECDSAP384SHA384", + ED25519: "ED25519", + ED448: "ED448", + INDIRECT: "INDIRECT", + PRIVATEDNS: "PRIVATEDNS", + PRIVATEOID: "PRIVATEOID", +} + +// AlgorithmToHash is a map of algorithm crypto hash IDs to crypto.Hash's. +var AlgorithmToHash = map[uint8]crypto.Hash{ + RSAMD5: crypto.MD5, // Deprecated in RFC 6725 + DSA: crypto.SHA1, + RSASHA1: crypto.SHA1, + RSASHA1NSEC3SHA1: crypto.SHA1, + RSASHA256: crypto.SHA256, + ECDSAP256SHA256: crypto.SHA256, + ECDSAP384SHA384: crypto.SHA384, + RSASHA512: crypto.SHA512, + ED25519: crypto.Hash(0), +} + +// DNSSEC hashing algorithm codes. +const ( + _ uint8 = iota + SHA1 // RFC 4034 + SHA256 // RFC 4509 + GOST94 // RFC 5933 + SHA384 // Experimental + SHA512 // Experimental +) + +// HashToString is a map of hash IDs to names. +var HashToString = map[uint8]string{ + SHA1: "SHA1", + SHA256: "SHA256", + GOST94: "GOST94", + SHA384: "SHA384", + SHA512: "SHA512", +} + +// DNSKEY flag values. +const ( + SEP = 1 + REVOKE = 1 << 7 + ZONE = 1 << 8 +) + +// The RRSIG needs to be converted to wireformat with some of the rdata (the signature) missing. +type rrsigWireFmt struct { + TypeCovered uint16 + Algorithm uint8 + Labels uint8 + OrigTtl uint32 + Expiration uint32 + Inception uint32 + KeyTag uint16 + SignerName string `dns:"domain-name"` + /* No Signature */ +} + +// Used for converting DNSKEY's rdata to wirefmt. +type dnskeyWireFmt struct { + Flags uint16 + Protocol uint8 + Algorithm uint8 + PublicKey string `dns:"base64"` + /* Nothing is left out */ +} + +func divRoundUp(a, b int) int { + return (a + b - 1) / b +} + +// KeyTag calculates the keytag (or key-id) of the DNSKEY. +func (k *DNSKEY) KeyTag() uint16 { + if k == nil { + return 0 + } + var keytag int + switch k.Algorithm { + case RSAMD5: + // Look at the bottom two bytes of the modules, which the last + // item in the pubkey. + // This algorithm has been deprecated, but keep this key-tag calculation. + modulus, _ := fromBase64([]byte(k.PublicKey)) + if len(modulus) > 1 { + x := binary.BigEndian.Uint16(modulus[len(modulus)-2:]) + keytag = int(x) + } + default: + keywire := new(dnskeyWireFmt) + keywire.Flags = k.Flags + keywire.Protocol = k.Protocol + keywire.Algorithm = k.Algorithm + keywire.PublicKey = k.PublicKey + wire := make([]byte, DefaultMsgSize) + n, err := packKeyWire(keywire, wire) + if err != nil { + return 0 + } + wire = wire[:n] + for i, v := range wire { + if i&1 != 0 { + keytag += int(v) // must be larger than uint32 + } else { + keytag += int(v) << 8 + } + } + keytag += keytag >> 16 & 0xFFFF + keytag &= 0xFFFF + } + return uint16(keytag) +} + +// ToDS converts a DNSKEY record to a DS record. +func (k *DNSKEY) ToDS(h uint8) *DS { + if k == nil { + return nil + } + ds := new(DS) + ds.Hdr.Name = k.Hdr.Name + ds.Hdr.Class = k.Hdr.Class + ds.Hdr.Rrtype = TypeDS + ds.Hdr.Ttl = k.Hdr.Ttl + ds.Algorithm = k.Algorithm + ds.DigestType = h + ds.KeyTag = k.KeyTag() + + keywire := new(dnskeyWireFmt) + keywire.Flags = k.Flags + keywire.Protocol = k.Protocol + keywire.Algorithm = k.Algorithm + keywire.PublicKey = k.PublicKey + wire := make([]byte, DefaultMsgSize) + n, err := packKeyWire(keywire, wire) + if err != nil { + return nil + } + wire = wire[:n] + + owner := make([]byte, 255) + off, err1 := PackDomainName(strings.ToLower(k.Hdr.Name), owner, 0, nil, false) + if err1 != nil { + return nil + } + owner = owner[:off] + // RFC4034: + // digest = digest_algorithm( DNSKEY owner name | DNSKEY RDATA); + // "|" denotes concatenation + // DNSKEY RDATA = Flags | Protocol | Algorithm | Public Key. + + var hash crypto.Hash + switch h { + case SHA1: + hash = crypto.SHA1 + case SHA256: + hash = crypto.SHA256 + case SHA384: + hash = crypto.SHA384 + case SHA512: + hash = crypto.SHA512 + default: + return nil + } + + s := hash.New() + s.Write(owner) + s.Write(wire) + ds.Digest = hex.EncodeToString(s.Sum(nil)) + return ds +} + +// ToCDNSKEY converts a DNSKEY record to a CDNSKEY record. +func (k *DNSKEY) ToCDNSKEY() *CDNSKEY { + c := &CDNSKEY{DNSKEY: *k} + c.Hdr = k.Hdr + c.Hdr.Rrtype = TypeCDNSKEY + return c +} + +// ToCDS converts a DS record to a CDS record. +func (d *DS) ToCDS() *CDS { + c := &CDS{DS: *d} + c.Hdr = d.Hdr + c.Hdr.Rrtype = TypeCDS + return c +} + +// Sign signs an RRSet. The signature needs to be filled in with the values: +// Inception, Expiration, KeyTag, SignerName and Algorithm. The rest is copied +// from the RRset. Sign returns a non-nill error when the signing went OK. +// There is no check if RRSet is a proper (RFC 2181) RRSet. If OrigTTL is non +// zero, it is used as-is, otherwise the TTL of the RRset is used as the +// OrigTTL. +func (rr *RRSIG) Sign(k crypto.Signer, rrset []RR) error { + if k == nil { + return ErrPrivKey + } + // s.Inception and s.Expiration may be 0 (rollover etc.), the rest must be set + if rr.KeyTag == 0 || len(rr.SignerName) == 0 || rr.Algorithm == 0 { + return ErrKey + } + + h0 := rrset[0].Header() + rr.Hdr.Rrtype = TypeRRSIG + rr.Hdr.Name = h0.Name + rr.Hdr.Class = h0.Class + if rr.OrigTtl == 0 { // If set don't override + rr.OrigTtl = h0.Ttl + } + rr.TypeCovered = h0.Rrtype + rr.Labels = uint8(CountLabel(h0.Name)) + + if strings.HasPrefix(h0.Name, "*") { + rr.Labels-- // wildcard, remove from label count + } + + sigwire := new(rrsigWireFmt) + sigwire.TypeCovered = rr.TypeCovered + sigwire.Algorithm = rr.Algorithm + sigwire.Labels = rr.Labels + sigwire.OrigTtl = rr.OrigTtl + sigwire.Expiration = rr.Expiration + sigwire.Inception = rr.Inception + sigwire.KeyTag = rr.KeyTag + // For signing, lowercase this name + sigwire.SignerName = strings.ToLower(rr.SignerName) + + // Create the desired binary blob + signdata := make([]byte, DefaultMsgSize) + n, err := packSigWire(sigwire, signdata) + if err != nil { + return err + } + signdata = signdata[:n] + wire, err := rawSignatureData(rrset, rr) + if err != nil { + return err + } + + hash, ok := AlgorithmToHash[rr.Algorithm] + if !ok { + return ErrAlg + } + + switch rr.Algorithm { + case ED25519: + // ed25519 signs the raw message and performs hashing internally. + // All other supported signature schemes operate over the pre-hashed + // message, and thus ed25519 must be handled separately here. + // + // The raw message is passed directly into sign and crypto.Hash(0) is + // used to signal to the crypto.Signer that the data has not been hashed. + signature, err := sign(k, append(signdata, wire...), crypto.Hash(0), rr.Algorithm) + if err != nil { + return err + } + + rr.Signature = toBase64(signature) + case RSAMD5, DSA, DSANSEC3SHA1: + // See RFC 6944. + return ErrAlg + default: + h := hash.New() + h.Write(signdata) + h.Write(wire) + + signature, err := sign(k, h.Sum(nil), hash, rr.Algorithm) + if err != nil { + return err + } + + rr.Signature = toBase64(signature) + } + + return nil +} + +func sign(k crypto.Signer, hashed []byte, hash crypto.Hash, alg uint8) ([]byte, error) { + signature, err := k.Sign(rand.Reader, hashed, hash) + if err != nil { + return nil, err + } + + switch alg { + case RSASHA1, RSASHA1NSEC3SHA1, RSASHA256, RSASHA512: + return signature, nil + + case ECDSAP256SHA256, ECDSAP384SHA384: + ecdsaSignature := &struct { + R, S *big.Int + }{} + if _, err := asn1.Unmarshal(signature, ecdsaSignature); err != nil { + return nil, err + } + + var intlen int + switch alg { + case ECDSAP256SHA256: + intlen = 32 + case ECDSAP384SHA384: + intlen = 48 + } + + signature := intToBytes(ecdsaSignature.R, intlen) + signature = append(signature, intToBytes(ecdsaSignature.S, intlen)...) + return signature, nil + + // There is no defined interface for what a DSA backed crypto.Signer returns + case DSA, DSANSEC3SHA1: + // t := divRoundUp(divRoundUp(p.PublicKey.Y.BitLen(), 8)-64, 8) + // signature := []byte{byte(t)} + // signature = append(signature, intToBytes(r1, 20)...) + // signature = append(signature, intToBytes(s1, 20)...) + // rr.Signature = signature + + case ED25519: + return signature, nil + } + + return nil, ErrAlg +} + +// Verify validates an RRSet with the signature and key. This is only the +// cryptographic test, the signature validity period must be checked separately. +// This function copies the rdata of some RRs (to lowercase domain names) for the validation to work. +func (rr *RRSIG) Verify(k *DNSKEY, rrset []RR) error { + // First the easy checks + if !IsRRset(rrset) { + return ErrRRset + } + if rr.KeyTag != k.KeyTag() { + return ErrKey + } + if rr.Hdr.Class != k.Hdr.Class { + return ErrKey + } + if rr.Algorithm != k.Algorithm { + return ErrKey + } + if !strings.EqualFold(rr.SignerName, k.Hdr.Name) { + return ErrKey + } + if k.Protocol != 3 { + return ErrKey + } + + // IsRRset checked that we have at least one RR and that the RRs in + // the set have consistent type, class, and name. Also check that type and + // class matches the RRSIG record. + if h0 := rrset[0].Header(); h0.Class != rr.Hdr.Class || h0.Rrtype != rr.TypeCovered { + return ErrRRset + } + + // RFC 4035 5.3.2. Reconstructing the Signed Data + // Copy the sig, except the rrsig data + sigwire := new(rrsigWireFmt) + sigwire.TypeCovered = rr.TypeCovered + sigwire.Algorithm = rr.Algorithm + sigwire.Labels = rr.Labels + sigwire.OrigTtl = rr.OrigTtl + sigwire.Expiration = rr.Expiration + sigwire.Inception = rr.Inception + sigwire.KeyTag = rr.KeyTag + sigwire.SignerName = strings.ToLower(rr.SignerName) + // Create the desired binary blob + signeddata := make([]byte, DefaultMsgSize) + n, err := packSigWire(sigwire, signeddata) + if err != nil { + return err + } + signeddata = signeddata[:n] + wire, err := rawSignatureData(rrset, rr) + if err != nil { + return err + } + + sigbuf := rr.sigBuf() // Get the binary signature data + if rr.Algorithm == PRIVATEDNS { // PRIVATEOID + // TODO(miek) + // remove the domain name and assume its ours? + } + + hash, ok := AlgorithmToHash[rr.Algorithm] + if !ok { + return ErrAlg + } + + switch rr.Algorithm { + case RSASHA1, RSASHA1NSEC3SHA1, RSASHA256, RSASHA512, RSAMD5: + // TODO(mg): this can be done quicker, ie. cache the pubkey data somewhere?? + pubkey := k.publicKeyRSA() // Get the key + if pubkey == nil { + return ErrKey + } + + h := hash.New() + h.Write(signeddata) + h.Write(wire) + return rsa.VerifyPKCS1v15(pubkey, hash, h.Sum(nil), sigbuf) + + case ECDSAP256SHA256, ECDSAP384SHA384: + pubkey := k.publicKeyECDSA() + if pubkey == nil { + return ErrKey + } + + // Split sigbuf into the r and s coordinates + r := new(big.Int).SetBytes(sigbuf[:len(sigbuf)/2]) + s := new(big.Int).SetBytes(sigbuf[len(sigbuf)/2:]) + + h := hash.New() + h.Write(signeddata) + h.Write(wire) + if ecdsa.Verify(pubkey, h.Sum(nil), r, s) { + return nil + } + return ErrSig + + case ED25519: + pubkey := k.publicKeyED25519() + if pubkey == nil { + return ErrKey + } + + if ed25519.Verify(pubkey, append(signeddata, wire...), sigbuf) { + return nil + } + return ErrSig + + default: + return ErrAlg + } +} + +// ValidityPeriod uses RFC1982 serial arithmetic to calculate +// if a signature period is valid. If t is the zero time, the +// current time is taken other t is. Returns true if the signature +// is valid at the given time, otherwise returns false. +func (rr *RRSIG) ValidityPeriod(t time.Time) bool { + var utc int64 + if t.IsZero() { + utc = time.Now().UTC().Unix() + } else { + utc = t.UTC().Unix() + } + modi := (int64(rr.Inception) - utc) / year68 + mode := (int64(rr.Expiration) - utc) / year68 + ti := int64(rr.Inception) + modi*year68 + te := int64(rr.Expiration) + mode*year68 + return ti <= utc && utc <= te +} + +// Return the signatures base64 encodedig sigdata as a byte slice. +func (rr *RRSIG) sigBuf() []byte { + sigbuf, err := fromBase64([]byte(rr.Signature)) + if err != nil { + return nil + } + return sigbuf +} + +// publicKeyRSA returns the RSA public key from a DNSKEY record. +func (k *DNSKEY) publicKeyRSA() *rsa.PublicKey { + keybuf, err := fromBase64([]byte(k.PublicKey)) + if err != nil { + return nil + } + + if len(keybuf) < 1+1+64 { + // Exponent must be at least 1 byte and modulus at least 64 + return nil + } + + // RFC 2537/3110, section 2. RSA Public KEY Resource Records + // Length is in the 0th byte, unless its zero, then it + // it in bytes 1 and 2 and its a 16 bit number + explen := uint16(keybuf[0]) + keyoff := 1 + if explen == 0 { + explen = uint16(keybuf[1])<<8 | uint16(keybuf[2]) + keyoff = 3 + } + + if explen > 4 || explen == 0 || keybuf[keyoff] == 0 { + // Exponent larger than supported by the crypto package, + // empty, or contains prohibited leading zero. + return nil + } + + modoff := keyoff + int(explen) + modlen := len(keybuf) - modoff + if modlen < 64 || modlen > 512 || keybuf[modoff] == 0 { + // Modulus is too small, large, or contains prohibited leading zero. + return nil + } + + pubkey := new(rsa.PublicKey) + + var expo uint64 + // The exponent of length explen is between keyoff and modoff. + for _, v := range keybuf[keyoff:modoff] { + expo <<= 8 + expo |= uint64(v) + } + if expo > 1<<31-1 { + // Larger exponent than supported by the crypto package. + return nil + } + + pubkey.E = int(expo) + pubkey.N = new(big.Int).SetBytes(keybuf[modoff:]) + return pubkey +} + +// publicKeyECDSA returns the Curve public key from the DNSKEY record. +func (k *DNSKEY) publicKeyECDSA() *ecdsa.PublicKey { + keybuf, err := fromBase64([]byte(k.PublicKey)) + if err != nil { + return nil + } + pubkey := new(ecdsa.PublicKey) + switch k.Algorithm { + case ECDSAP256SHA256: + pubkey.Curve = elliptic.P256() + if len(keybuf) != 64 { + // wrongly encoded key + return nil + } + case ECDSAP384SHA384: + pubkey.Curve = elliptic.P384() + if len(keybuf) != 96 { + // Wrongly encoded key + return nil + } + } + pubkey.X = new(big.Int).SetBytes(keybuf[:len(keybuf)/2]) + pubkey.Y = new(big.Int).SetBytes(keybuf[len(keybuf)/2:]) + return pubkey +} + +func (k *DNSKEY) publicKeyDSA() *dsa.PublicKey { + keybuf, err := fromBase64([]byte(k.PublicKey)) + if err != nil { + return nil + } + if len(keybuf) < 22 { + return nil + } + t, keybuf := int(keybuf[0]), keybuf[1:] + size := 64 + t*8 + q, keybuf := keybuf[:20], keybuf[20:] + if len(keybuf) != 3*size { + return nil + } + p, keybuf := keybuf[:size], keybuf[size:] + g, y := keybuf[:size], keybuf[size:] + pubkey := new(dsa.PublicKey) + pubkey.Parameters.Q = new(big.Int).SetBytes(q) + pubkey.Parameters.P = new(big.Int).SetBytes(p) + pubkey.Parameters.G = new(big.Int).SetBytes(g) + pubkey.Y = new(big.Int).SetBytes(y) + return pubkey +} + +func (k *DNSKEY) publicKeyED25519() ed25519.PublicKey { + keybuf, err := fromBase64([]byte(k.PublicKey)) + if err != nil { + return nil + } + if len(keybuf) != ed25519.PublicKeySize { + return nil + } + return keybuf +} + +type wireSlice [][]byte + +func (p wireSlice) Len() int { return len(p) } +func (p wireSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } +func (p wireSlice) Less(i, j int) bool { + _, ioff, _ := UnpackDomainName(p[i], 0) + _, joff, _ := UnpackDomainName(p[j], 0) + return bytes.Compare(p[i][ioff+10:], p[j][joff+10:]) < 0 +} + +// Return the raw signature data. +func rawSignatureData(rrset []RR, s *RRSIG) (buf []byte, err error) { + wires := make(wireSlice, len(rrset)) + for i, r := range rrset { + r1 := r.copy() + h := r1.Header() + h.Ttl = s.OrigTtl + labels := SplitDomainName(h.Name) + // 6.2. Canonical RR Form. (4) - wildcards + if len(labels) > int(s.Labels) { + // Wildcard + h.Name = "*." + strings.Join(labels[len(labels)-int(s.Labels):], ".") + "." + } + // RFC 4034: 6.2. Canonical RR Form. (2) - domain name to lowercase + h.Name = strings.ToLower(h.Name) + // 6.2. Canonical RR Form. (3) - domain rdata to lowercase. + // NS, MD, MF, CNAME, SOA, MB, MG, MR, PTR, + // HINFO, MINFO, MX, RP, AFSDB, RT, SIG, PX, NXT, NAPTR, KX, + // SRV, DNAME, A6 + // + // RFC 6840 - Clarifications and Implementation Notes for DNS Security (DNSSEC): + // Section 6.2 of [RFC4034] also erroneously lists HINFO as a record + // that needs conversion to lowercase, and twice at that. Since HINFO + // records contain no domain names, they are not subject to case + // conversion. + switch x := r1.(type) { + case *NS: + x.Ns = strings.ToLower(x.Ns) + case *MD: + x.Md = strings.ToLower(x.Md) + case *MF: + x.Mf = strings.ToLower(x.Mf) + case *CNAME: + x.Target = strings.ToLower(x.Target) + case *SOA: + x.Ns = strings.ToLower(x.Ns) + x.Mbox = strings.ToLower(x.Mbox) + case *MB: + x.Mb = strings.ToLower(x.Mb) + case *MG: + x.Mg = strings.ToLower(x.Mg) + case *MR: + x.Mr = strings.ToLower(x.Mr) + case *PTR: + x.Ptr = strings.ToLower(x.Ptr) + case *MINFO: + x.Rmail = strings.ToLower(x.Rmail) + x.Email = strings.ToLower(x.Email) + case *MX: + x.Mx = strings.ToLower(x.Mx) + case *RP: + x.Mbox = strings.ToLower(x.Mbox) + x.Txt = strings.ToLower(x.Txt) + case *AFSDB: + x.Hostname = strings.ToLower(x.Hostname) + case *RT: + x.Host = strings.ToLower(x.Host) + case *SIG: + x.SignerName = strings.ToLower(x.SignerName) + case *PX: + x.Map822 = strings.ToLower(x.Map822) + x.Mapx400 = strings.ToLower(x.Mapx400) + case *NAPTR: + x.Replacement = strings.ToLower(x.Replacement) + case *KX: + x.Exchanger = strings.ToLower(x.Exchanger) + case *SRV: + x.Target = strings.ToLower(x.Target) + case *DNAME: + x.Target = strings.ToLower(x.Target) + } + // 6.2. Canonical RR Form. (5) - origTTL + wire := make([]byte, Len(r1)+1) // +1 to be safe(r) + off, err1 := PackRR(r1, wire, 0, nil, false) + if err1 != nil { + return nil, err1 + } + wire = wire[:off] + wires[i] = wire + } + sort.Sort(wires) + for i, wire := range wires { + if i > 0 && bytes.Equal(wire, wires[i-1]) { + continue + } + buf = append(buf, wire...) + } + return buf, nil +} + +func packSigWire(sw *rrsigWireFmt, msg []byte) (int, error) { + // copied from zmsg.go RRSIG packing + off, err := packUint16(sw.TypeCovered, msg, 0) + if err != nil { + return off, err + } + off, err = packUint8(sw.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(sw.Labels, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(sw.OrigTtl, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(sw.Expiration, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(sw.Inception, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(sw.KeyTag, msg, off) + if err != nil { + return off, err + } + off, err = PackDomainName(sw.SignerName, msg, off, nil, false) + if err != nil { + return off, err + } + return off, nil +} + +func packKeyWire(dw *dnskeyWireFmt, msg []byte) (int, error) { + // copied from zmsg.go DNSKEY packing + off, err := packUint16(dw.Flags, msg, 0) + if err != nil { + return off, err + } + off, err = packUint8(dw.Protocol, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(dw.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packStringBase64(dw.PublicKey, msg, off) + if err != nil { + return off, err + } + return off, nil +} diff --git a/vendor/github.com/miekg/dns/dnssec_keygen.go b/vendor/github.com/miekg/dns/dnssec_keygen.go new file mode 100644 index 00000000..60737e5b --- /dev/null +++ b/vendor/github.com/miekg/dns/dnssec_keygen.go @@ -0,0 +1,140 @@ +package dns + +import ( + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/rsa" + "math/big" + + "golang.org/x/crypto/ed25519" +) + +// Generate generates a DNSKEY of the given bit size. +// The public part is put inside the DNSKEY record. +// The Algorithm in the key must be set as this will define +// what kind of DNSKEY will be generated. +// The ECDSA algorithms imply a fixed keysize, in that case +// bits should be set to the size of the algorithm. +func (k *DNSKEY) Generate(bits int) (crypto.PrivateKey, error) { + switch k.Algorithm { + case RSAMD5, DSA, DSANSEC3SHA1: + return nil, ErrAlg + case RSASHA1, RSASHA256, RSASHA1NSEC3SHA1: + if bits < 512 || bits > 4096 { + return nil, ErrKeySize + } + case RSASHA512: + if bits < 1024 || bits > 4096 { + return nil, ErrKeySize + } + case ECDSAP256SHA256: + if bits != 256 { + return nil, ErrKeySize + } + case ECDSAP384SHA384: + if bits != 384 { + return nil, ErrKeySize + } + case ED25519: + if bits != 256 { + return nil, ErrKeySize + } + } + + switch k.Algorithm { + case RSASHA1, RSASHA256, RSASHA512, RSASHA1NSEC3SHA1: + priv, err := rsa.GenerateKey(rand.Reader, bits) + if err != nil { + return nil, err + } + k.setPublicKeyRSA(priv.PublicKey.E, priv.PublicKey.N) + return priv, nil + case ECDSAP256SHA256, ECDSAP384SHA384: + var c elliptic.Curve + switch k.Algorithm { + case ECDSAP256SHA256: + c = elliptic.P256() + case ECDSAP384SHA384: + c = elliptic.P384() + } + priv, err := ecdsa.GenerateKey(c, rand.Reader) + if err != nil { + return nil, err + } + k.setPublicKeyECDSA(priv.PublicKey.X, priv.PublicKey.Y) + return priv, nil + case ED25519: + pub, priv, err := ed25519.GenerateKey(rand.Reader) + if err != nil { + return nil, err + } + k.setPublicKeyED25519(pub) + return priv, nil + default: + return nil, ErrAlg + } +} + +// Set the public key (the value E and N) +func (k *DNSKEY) setPublicKeyRSA(_E int, _N *big.Int) bool { + if _E == 0 || _N == nil { + return false + } + buf := exponentToBuf(_E) + buf = append(buf, _N.Bytes()...) + k.PublicKey = toBase64(buf) + return true +} + +// Set the public key for Elliptic Curves +func (k *DNSKEY) setPublicKeyECDSA(_X, _Y *big.Int) bool { + if _X == nil || _Y == nil { + return false + } + var intlen int + switch k.Algorithm { + case ECDSAP256SHA256: + intlen = 32 + case ECDSAP384SHA384: + intlen = 48 + } + k.PublicKey = toBase64(curveToBuf(_X, _Y, intlen)) + return true +} + +// Set the public key for Ed25519 +func (k *DNSKEY) setPublicKeyED25519(_K ed25519.PublicKey) bool { + if _K == nil { + return false + } + k.PublicKey = toBase64(_K) + return true +} + +// Set the public key (the values E and N) for RSA +// RFC 3110: Section 2. RSA Public KEY Resource Records +func exponentToBuf(_E int) []byte { + var buf []byte + i := big.NewInt(int64(_E)).Bytes() + if len(i) < 256 { + buf = make([]byte, 1, 1+len(i)) + buf[0] = uint8(len(i)) + } else { + buf = make([]byte, 3, 3+len(i)) + buf[0] = 0 + buf[1] = uint8(len(i) >> 8) + buf[2] = uint8(len(i)) + } + buf = append(buf, i...) + return buf +} + +// Set the public key for X and Y for Curve. The two +// values are just concatenated. +func curveToBuf(_X, _Y *big.Int, intlen int) []byte { + buf := intToBytes(_X, intlen) + buf = append(buf, intToBytes(_Y, intlen)...) + return buf +} diff --git a/vendor/github.com/miekg/dns/dnssec_keyscan.go b/vendor/github.com/miekg/dns/dnssec_keyscan.go new file mode 100644 index 00000000..0e6f3201 --- /dev/null +++ b/vendor/github.com/miekg/dns/dnssec_keyscan.go @@ -0,0 +1,322 @@ +package dns + +import ( + "bufio" + "crypto" + "crypto/ecdsa" + "crypto/rsa" + "io" + "math/big" + "strconv" + "strings" + + "golang.org/x/crypto/ed25519" +) + +// NewPrivateKey returns a PrivateKey by parsing the string s. +// s should be in the same form of the BIND private key files. +func (k *DNSKEY) NewPrivateKey(s string) (crypto.PrivateKey, error) { + if s == "" || s[len(s)-1] != '\n' { // We need a closing newline + return k.ReadPrivateKey(strings.NewReader(s+"\n"), "") + } + return k.ReadPrivateKey(strings.NewReader(s), "") +} + +// ReadPrivateKey reads a private key from the io.Reader q. The string file is +// only used in error reporting. +// The public key must be known, because some cryptographic algorithms embed +// the public inside the privatekey. +func (k *DNSKEY) ReadPrivateKey(q io.Reader, file string) (crypto.PrivateKey, error) { + m, err := parseKey(q, file) + if m == nil { + return nil, err + } + if _, ok := m["private-key-format"]; !ok { + return nil, ErrPrivKey + } + if m["private-key-format"] != "v1.2" && m["private-key-format"] != "v1.3" { + return nil, ErrPrivKey + } + // TODO(mg): check if the pubkey matches the private key + algo, err := strconv.ParseUint(strings.SplitN(m["algorithm"], " ", 2)[0], 10, 8) + if err != nil { + return nil, ErrPrivKey + } + switch uint8(algo) { + case RSAMD5, DSA, DSANSEC3SHA1: + return nil, ErrAlg + case RSASHA1: + fallthrough + case RSASHA1NSEC3SHA1: + fallthrough + case RSASHA256: + fallthrough + case RSASHA512: + priv, err := readPrivateKeyRSA(m) + if err != nil { + return nil, err + } + pub := k.publicKeyRSA() + if pub == nil { + return nil, ErrKey + } + priv.PublicKey = *pub + return priv, nil + case ECCGOST: + return nil, ErrPrivKey + case ECDSAP256SHA256: + fallthrough + case ECDSAP384SHA384: + priv, err := readPrivateKeyECDSA(m) + if err != nil { + return nil, err + } + pub := k.publicKeyECDSA() + if pub == nil { + return nil, ErrKey + } + priv.PublicKey = *pub + return priv, nil + case ED25519: + return readPrivateKeyED25519(m) + default: + return nil, ErrPrivKey + } +} + +// Read a private key (file) string and create a public key. Return the private key. +func readPrivateKeyRSA(m map[string]string) (*rsa.PrivateKey, error) { + p := new(rsa.PrivateKey) + p.Primes = []*big.Int{nil, nil} + for k, v := range m { + switch k { + case "modulus", "publicexponent", "privateexponent", "prime1", "prime2": + v1, err := fromBase64([]byte(v)) + if err != nil { + return nil, err + } + switch k { + case "modulus": + p.PublicKey.N = new(big.Int).SetBytes(v1) + case "publicexponent": + i := new(big.Int).SetBytes(v1) + p.PublicKey.E = int(i.Int64()) // int64 should be large enough + case "privateexponent": + p.D = new(big.Int).SetBytes(v1) + case "prime1": + p.Primes[0] = new(big.Int).SetBytes(v1) + case "prime2": + p.Primes[1] = new(big.Int).SetBytes(v1) + } + case "exponent1", "exponent2", "coefficient": + // not used in Go (yet) + case "created", "publish", "activate": + // not used in Go (yet) + } + } + return p, nil +} + +func readPrivateKeyECDSA(m map[string]string) (*ecdsa.PrivateKey, error) { + p := new(ecdsa.PrivateKey) + p.D = new(big.Int) + // TODO: validate that the required flags are present + for k, v := range m { + switch k { + case "privatekey": + v1, err := fromBase64([]byte(v)) + if err != nil { + return nil, err + } + p.D.SetBytes(v1) + case "created", "publish", "activate": + /* not used in Go (yet) */ + } + } + return p, nil +} + +func readPrivateKeyED25519(m map[string]string) (ed25519.PrivateKey, error) { + var p ed25519.PrivateKey + // TODO: validate that the required flags are present + for k, v := range m { + switch k { + case "privatekey": + p1, err := fromBase64([]byte(v)) + if err != nil { + return nil, err + } + if len(p1) != ed25519.SeedSize { + return nil, ErrPrivKey + } + p = ed25519.NewKeyFromSeed(p1) + case "created", "publish", "activate": + /* not used in Go (yet) */ + } + } + return p, nil +} + +// parseKey reads a private key from r. It returns a map[string]string, +// with the key-value pairs, or an error when the file is not correct. +func parseKey(r io.Reader, file string) (map[string]string, error) { + m := make(map[string]string) + var k string + + c := newKLexer(r) + + for l, ok := c.Next(); ok; l, ok = c.Next() { + // It should alternate + switch l.value { + case zKey: + k = l.token + case zValue: + if k == "" { + return nil, &ParseError{file, "no private key seen", l} + } + + m[strings.ToLower(k)] = l.token + k = "" + } + } + + // Surface any read errors from r. + if err := c.Err(); err != nil { + return nil, &ParseError{file: file, err: err.Error()} + } + + return m, nil +} + +type klexer struct { + br io.ByteReader + + readErr error + + line int + column int + + key bool + + eol bool // end-of-line +} + +func newKLexer(r io.Reader) *klexer { + br, ok := r.(io.ByteReader) + if !ok { + br = bufio.NewReaderSize(r, 1024) + } + + return &klexer{ + br: br, + + line: 1, + + key: true, + } +} + +func (kl *klexer) Err() error { + if kl.readErr == io.EOF { + return nil + } + + return kl.readErr +} + +// readByte returns the next byte from the input +func (kl *klexer) readByte() (byte, bool) { + if kl.readErr != nil { + return 0, false + } + + c, err := kl.br.ReadByte() + if err != nil { + kl.readErr = err + return 0, false + } + + // delay the newline handling until the next token is delivered, + // fixes off-by-one errors when reporting a parse error. + if kl.eol { + kl.line++ + kl.column = 0 + kl.eol = false + } + + if c == '\n' { + kl.eol = true + } else { + kl.column++ + } + + return c, true +} + +func (kl *klexer) Next() (lex, bool) { + var ( + l lex + + str strings.Builder + + commt bool + ) + + for x, ok := kl.readByte(); ok; x, ok = kl.readByte() { + l.line, l.column = kl.line, kl.column + + switch x { + case ':': + if commt || !kl.key { + break + } + + kl.key = false + + // Next token is a space, eat it + kl.readByte() + + l.value = zKey + l.token = str.String() + return l, true + case ';': + commt = true + case '\n': + if commt { + // Reset a comment + commt = false + } + + if kl.key && str.Len() == 0 { + // ignore empty lines + break + } + + kl.key = true + + l.value = zValue + l.token = str.String() + return l, true + default: + if commt { + break + } + + str.WriteByte(x) + } + } + + if kl.readErr != nil && kl.readErr != io.EOF { + // Don't return any tokens after a read error occurs. + return lex{value: zEOF}, false + } + + if str.Len() > 0 { + // Send remainder + l.value = zValue + l.token = str.String() + return l, true + } + + return lex{value: zEOF}, false +} diff --git a/vendor/github.com/miekg/dns/dnssec_privkey.go b/vendor/github.com/miekg/dns/dnssec_privkey.go new file mode 100644 index 00000000..4493c9d5 --- /dev/null +++ b/vendor/github.com/miekg/dns/dnssec_privkey.go @@ -0,0 +1,94 @@ +package dns + +import ( + "crypto" + "crypto/dsa" + "crypto/ecdsa" + "crypto/rsa" + "math/big" + "strconv" + + "golang.org/x/crypto/ed25519" +) + +const format = "Private-key-format: v1.3\n" + +var bigIntOne = big.NewInt(1) + +// PrivateKeyString converts a PrivateKey to a string. This string has the same +// format as the private-key-file of BIND9 (Private-key-format: v1.3). +// It needs some info from the key (the algorithm), so its a method of the DNSKEY +// It supports rsa.PrivateKey, ecdsa.PrivateKey and dsa.PrivateKey +func (r *DNSKEY) PrivateKeyString(p crypto.PrivateKey) string { + algorithm := strconv.Itoa(int(r.Algorithm)) + algorithm += " (" + AlgorithmToString[r.Algorithm] + ")" + + switch p := p.(type) { + case *rsa.PrivateKey: + modulus := toBase64(p.PublicKey.N.Bytes()) + e := big.NewInt(int64(p.PublicKey.E)) + publicExponent := toBase64(e.Bytes()) + privateExponent := toBase64(p.D.Bytes()) + prime1 := toBase64(p.Primes[0].Bytes()) + prime2 := toBase64(p.Primes[1].Bytes()) + // Calculate Exponent1/2 and Coefficient as per: http://en.wikipedia.org/wiki/RSA#Using_the_Chinese_remainder_algorithm + // and from: http://code.google.com/p/go/issues/detail?id=987 + p1 := new(big.Int).Sub(p.Primes[0], bigIntOne) + q1 := new(big.Int).Sub(p.Primes[1], bigIntOne) + exp1 := new(big.Int).Mod(p.D, p1) + exp2 := new(big.Int).Mod(p.D, q1) + coeff := new(big.Int).ModInverse(p.Primes[1], p.Primes[0]) + + exponent1 := toBase64(exp1.Bytes()) + exponent2 := toBase64(exp2.Bytes()) + coefficient := toBase64(coeff.Bytes()) + + return format + + "Algorithm: " + algorithm + "\n" + + "Modulus: " + modulus + "\n" + + "PublicExponent: " + publicExponent + "\n" + + "PrivateExponent: " + privateExponent + "\n" + + "Prime1: " + prime1 + "\n" + + "Prime2: " + prime2 + "\n" + + "Exponent1: " + exponent1 + "\n" + + "Exponent2: " + exponent2 + "\n" + + "Coefficient: " + coefficient + "\n" + + case *ecdsa.PrivateKey: + var intlen int + switch r.Algorithm { + case ECDSAP256SHA256: + intlen = 32 + case ECDSAP384SHA384: + intlen = 48 + } + private := toBase64(intToBytes(p.D, intlen)) + return format + + "Algorithm: " + algorithm + "\n" + + "PrivateKey: " + private + "\n" + + case *dsa.PrivateKey: + T := divRoundUp(divRoundUp(p.PublicKey.Parameters.G.BitLen(), 8)-64, 8) + prime := toBase64(intToBytes(p.PublicKey.Parameters.P, 64+T*8)) + subprime := toBase64(intToBytes(p.PublicKey.Parameters.Q, 20)) + base := toBase64(intToBytes(p.PublicKey.Parameters.G, 64+T*8)) + priv := toBase64(intToBytes(p.X, 20)) + pub := toBase64(intToBytes(p.PublicKey.Y, 64+T*8)) + return format + + "Algorithm: " + algorithm + "\n" + + "Prime(p): " + prime + "\n" + + "Subprime(q): " + subprime + "\n" + + "Base(g): " + base + "\n" + + "Private_value(x): " + priv + "\n" + + "Public_value(y): " + pub + "\n" + + case ed25519.PrivateKey: + private := toBase64(p.Seed()) + return format + + "Algorithm: " + algorithm + "\n" + + "PrivateKey: " + private + "\n" + + default: + return "" + } +} diff --git a/vendor/github.com/miekg/dns/doc.go b/vendor/github.com/miekg/dns/doc.go new file mode 100644 index 00000000..3318b77e --- /dev/null +++ b/vendor/github.com/miekg/dns/doc.go @@ -0,0 +1,268 @@ +/* +Package dns implements a full featured interface to the Domain Name System. +Both server- and client-side programming is supported. The package allows +complete control over what is sent out to the DNS. The API follows the +less-is-more principle, by presenting a small, clean interface. + +It supports (asynchronous) querying/replying, incoming/outgoing zone transfers, +TSIG, EDNS0, dynamic updates, notifies and DNSSEC validation/signing. + +Note that domain names MUST be fully qualified before sending them, unqualified +names in a message will result in a packing failure. + +Resource records are native types. They are not stored in wire format. Basic +usage pattern for creating a new resource record: + + r := new(dns.MX) + r.Hdr = dns.RR_Header{Name: "miek.nl.", Rrtype: dns.TypeMX, Class: dns.ClassINET, Ttl: 3600} + r.Preference = 10 + r.Mx = "mx.miek.nl." + +Or directly from a string: + + mx, err := dns.NewRR("miek.nl. 3600 IN MX 10 mx.miek.nl.") + +Or when the default origin (.) and TTL (3600) and class (IN) suit you: + + mx, err := dns.NewRR("miek.nl MX 10 mx.miek.nl") + +Or even: + + mx, err := dns.NewRR("$ORIGIN nl.\nmiek 1H IN MX 10 mx.miek") + +In the DNS messages are exchanged, these messages contain resource records +(sets). Use pattern for creating a message: + + m := new(dns.Msg) + m.SetQuestion("miek.nl.", dns.TypeMX) + +Or when not certain if the domain name is fully qualified: + + m.SetQuestion(dns.Fqdn("miek.nl"), dns.TypeMX) + +The message m is now a message with the question section set to ask the MX +records for the miek.nl. zone. + +The following is slightly more verbose, but more flexible: + + m1 := new(dns.Msg) + m1.Id = dns.Id() + m1.RecursionDesired = true + m1.Question = make([]dns.Question, 1) + m1.Question[0] = dns.Question{"miek.nl.", dns.TypeMX, dns.ClassINET} + +After creating a message it can be sent. Basic use pattern for synchronous +querying the DNS at a server configured on 127.0.0.1 and port 53: + + c := new(dns.Client) + in, rtt, err := c.Exchange(m1, "127.0.0.1:53") + +Suppressing multiple outstanding queries (with the same question, type and +class) is as easy as setting: + + c.SingleInflight = true + +More advanced options are available using a net.Dialer and the corresponding API. +For example it is possible to set a timeout, or to specify a source IP address +and port to use for the connection: + + c := new(dns.Client) + laddr := net.UDPAddr{ + IP: net.ParseIP("[::1]"), + Port: 12345, + Zone: "", + } + c.Dialer := &net.Dialer{ + Timeout: 200 * time.Millisecond, + LocalAddr: &laddr, + } + in, rtt, err := c.Exchange(m1, "8.8.8.8:53") + +If these "advanced" features are not needed, a simple UDP query can be sent, +with: + + in, err := dns.Exchange(m1, "127.0.0.1:53") + +When this functions returns you will get DNS message. A DNS message consists +out of four sections. +The question section: in.Question, the answer section: in.Answer, +the authority section: in.Ns and the additional section: in.Extra. + +Each of these sections (except the Question section) contain a []RR. Basic +use pattern for accessing the rdata of a TXT RR as the first RR in +the Answer section: + + if t, ok := in.Answer[0].(*dns.TXT); ok { + // do something with t.Txt + } + +Domain Name and TXT Character String Representations + +Both domain names and TXT character strings are converted to presentation form +both when unpacked and when converted to strings. + +For TXT character strings, tabs, carriage returns and line feeds will be +converted to \t, \r and \n respectively. Back slashes and quotations marks will +be escaped. Bytes below 32 and above 127 will be converted to \DDD form. + +For domain names, in addition to the above rules brackets, periods, spaces, +semicolons and the at symbol are escaped. + +DNSSEC + +DNSSEC (DNS Security Extension) adds a layer of security to the DNS. It uses +public key cryptography to sign resource records. The public keys are stored in +DNSKEY records and the signatures in RRSIG records. + +Requesting DNSSEC information for a zone is done by adding the DO (DNSSEC OK) +bit to a request. + + m := new(dns.Msg) + m.SetEdns0(4096, true) + +Signature generation, signature verification and key generation are all supported. + +DYNAMIC UPDATES + +Dynamic updates reuses the DNS message format, but renames three of the +sections. Question is Zone, Answer is Prerequisite, Authority is Update, only +the Additional is not renamed. See RFC 2136 for the gory details. + +You can set a rather complex set of rules for the existence of absence of +certain resource records or names in a zone to specify if resource records +should be added or removed. The table from RFC 2136 supplemented with the Go +DNS function shows which functions exist to specify the prerequisites. + + 3.2.4 - Table Of Metavalues Used In Prerequisite Section + + CLASS TYPE RDATA Meaning Function + -------------------------------------------------------------- + ANY ANY empty Name is in use dns.NameUsed + ANY rrset empty RRset exists (value indep) dns.RRsetUsed + NONE ANY empty Name is not in use dns.NameNotUsed + NONE rrset empty RRset does not exist dns.RRsetNotUsed + zone rrset rr RRset exists (value dep) dns.Used + +The prerequisite section can also be left empty. If you have decided on the +prerequisites you can tell what RRs should be added or deleted. The next table +shows the options you have and what functions to call. + + 3.4.2.6 - Table Of Metavalues Used In Update Section + + CLASS TYPE RDATA Meaning Function + --------------------------------------------------------------- + ANY ANY empty Delete all RRsets from name dns.RemoveName + ANY rrset empty Delete an RRset dns.RemoveRRset + NONE rrset rr Delete an RR from RRset dns.Remove + zone rrset rr Add to an RRset dns.Insert + +TRANSACTION SIGNATURE + +An TSIG or transaction signature adds a HMAC TSIG record to each message sent. +The supported algorithms include: HmacMD5, HmacSHA1, HmacSHA256 and HmacSHA512. + +Basic use pattern when querying with a TSIG name "axfr." (note that these key names +must be fully qualified - as they are domain names) and the base64 secret +"so6ZGir4GPAqINNh9U5c3A==": + +If an incoming message contains a TSIG record it MUST be the last record in +the additional section (RFC2845 3.2). This means that you should make the +call to SetTsig last, right before executing the query. If you make any +changes to the RRset after calling SetTsig() the signature will be incorrect. + + c := new(dns.Client) + c.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="} + m := new(dns.Msg) + m.SetQuestion("miek.nl.", dns.TypeMX) + m.SetTsig("axfr.", dns.HmacMD5, 300, time.Now().Unix()) + ... + // When sending the TSIG RR is calculated and filled in before sending + +When requesting an zone transfer (almost all TSIG usage is when requesting zone +transfers), with TSIG, this is the basic use pattern. In this example we +request an AXFR for miek.nl. with TSIG key named "axfr." and secret +"so6ZGir4GPAqINNh9U5c3A==" and using the server 176.58.119.54: + + t := new(dns.Transfer) + m := new(dns.Msg) + t.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="} + m.SetAxfr("miek.nl.") + m.SetTsig("axfr.", dns.HmacMD5, 300, time.Now().Unix()) + c, err := t.In(m, "176.58.119.54:53") + for r := range c { ... } + +You can now read the records from the transfer as they come in. Each envelope +is checked with TSIG. If something is not correct an error is returned. + +Basic use pattern validating and replying to a message that has TSIG set. + + server := &dns.Server{Addr: ":53", Net: "udp"} + server.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="} + go server.ListenAndServe() + dns.HandleFunc(".", handleRequest) + + func handleRequest(w dns.ResponseWriter, r *dns.Msg) { + m := new(dns.Msg) + m.SetReply(r) + if r.IsTsig() != nil { + if w.TsigStatus() == nil { + // *Msg r has an TSIG record and it was validated + m.SetTsig("axfr.", dns.HmacMD5, 300, time.Now().Unix()) + } else { + // *Msg r has an TSIG records and it was not valided + } + } + w.WriteMsg(m) + } + +PRIVATE RRS + +RFC 6895 sets aside a range of type codes for private use. This range is 65,280 +- 65,534 (0xFF00 - 0xFFFE). When experimenting with new Resource Records these +can be used, before requesting an official type code from IANA. + +See https://miek.nl/2014/september/21/idn-and-private-rr-in-go-dns/ for more +information. + +EDNS0 + +EDNS0 is an extension mechanism for the DNS defined in RFC 2671 and updated by +RFC 6891. It defines an new RR type, the OPT RR, which is then completely +abused. + +Basic use pattern for creating an (empty) OPT RR: + + o := new(dns.OPT) + o.Hdr.Name = "." // MUST be the root zone, per definition. + o.Hdr.Rrtype = dns.TypeOPT + +The rdata of an OPT RR consists out of a slice of EDNS0 (RFC 6891) interfaces. +Currently only a few have been standardized: EDNS0_NSID (RFC 5001) and +EDNS0_SUBNET (RFC 7871). Note that these options may be combined in an OPT RR. +Basic use pattern for a server to check if (and which) options are set: + + // o is a dns.OPT + for _, s := range o.Option { + switch e := s.(type) { + case *dns.EDNS0_NSID: + // do stuff with e.Nsid + case *dns.EDNS0_SUBNET: + // access e.Family, e.Address, etc. + } + } + +SIG(0) + +From RFC 2931: + + SIG(0) provides protection for DNS transactions and requests .... + ... protection for glue records, DNS requests, protection for message headers + on requests and responses, and protection of the overall integrity of a response. + +It works like TSIG, except that SIG(0) uses public key cryptography, instead of +the shared secret approach in TSIG. Supported algorithms: DSA, ECDSAP256SHA256, +ECDSAP384SHA384, RSASHA1, RSASHA256 and RSASHA512. + +Signing subsequent messages in multi-message sessions is not implemented. +*/ +package dns diff --git a/vendor/github.com/miekg/dns/duplicate.go b/vendor/github.com/miekg/dns/duplicate.go new file mode 100644 index 00000000..49e6940b --- /dev/null +++ b/vendor/github.com/miekg/dns/duplicate.go @@ -0,0 +1,38 @@ +package dns + +//go:generate go run duplicate_generate.go + +// IsDuplicate checks of r1 and r2 are duplicates of each other, excluding the TTL. +// So this means the header data is equal *and* the RDATA is the same. Return true +// is so, otherwise false. +// It's a protocol violation to have identical RRs in a message. +func IsDuplicate(r1, r2 RR) bool { + // Check whether the record header is identical. + if !r1.Header().isDuplicate(r2.Header()) { + return false + } + + // Check whether the RDATA is identical. + return r1.isDuplicate(r2) +} + +func (r1 *RR_Header) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*RR_Header) + if !ok { + return false + } + if r1.Class != r2.Class { + return false + } + if r1.Rrtype != r2.Rrtype { + return false + } + if !isDuplicateName(r1.Name, r2.Name) { + return false + } + // ignore TTL + return true +} + +// isDuplicateName checks if the domain names s1 and s2 are equal. +func isDuplicateName(s1, s2 string) bool { return equal(s1, s2) } diff --git a/vendor/github.com/miekg/dns/edns.go b/vendor/github.com/miekg/dns/edns.go new file mode 100644 index 00000000..04808d57 --- /dev/null +++ b/vendor/github.com/miekg/dns/edns.go @@ -0,0 +1,675 @@ +package dns + +import ( + "encoding/binary" + "encoding/hex" + "errors" + "fmt" + "net" + "strconv" +) + +// EDNS0 Option codes. +const ( + EDNS0LLQ = 0x1 // long lived queries: http://tools.ietf.org/html/draft-sekar-dns-llq-01 + EDNS0UL = 0x2 // update lease draft: http://files.dns-sd.org/draft-sekar-dns-ul.txt + EDNS0NSID = 0x3 // nsid (See RFC 5001) + EDNS0DAU = 0x5 // DNSSEC Algorithm Understood + EDNS0DHU = 0x6 // DS Hash Understood + EDNS0N3U = 0x7 // NSEC3 Hash Understood + EDNS0SUBNET = 0x8 // client-subnet (See RFC 7871) + EDNS0EXPIRE = 0x9 // EDNS0 expire + EDNS0COOKIE = 0xa // EDNS0 Cookie + EDNS0TCPKEEPALIVE = 0xb // EDNS0 tcp keep alive (See RFC 7828) + EDNS0PADDING = 0xc // EDNS0 padding (See RFC 7830) + EDNS0LOCALSTART = 0xFDE9 // Beginning of range reserved for local/experimental use (See RFC 6891) + EDNS0LOCALEND = 0xFFFE // End of range reserved for local/experimental use (See RFC 6891) + _DO = 1 << 15 // DNSSEC OK +) + +// OPT is the EDNS0 RR appended to messages to convey extra (meta) information. +// See RFC 6891. +type OPT struct { + Hdr RR_Header + Option []EDNS0 `dns:"opt"` +} + +func (rr *OPT) String() string { + s := "\n;; OPT PSEUDOSECTION:\n; EDNS: version " + strconv.Itoa(int(rr.Version())) + "; " + if rr.Do() { + s += "flags: do; " + } else { + s += "flags: ; " + } + s += "udp: " + strconv.Itoa(int(rr.UDPSize())) + + for _, o := range rr.Option { + switch o.(type) { + case *EDNS0_NSID: + s += "\n; NSID: " + o.String() + h, e := o.pack() + var r string + if e == nil { + for _, c := range h { + r += "(" + string(c) + ")" + } + s += " " + r + } + case *EDNS0_SUBNET: + s += "\n; SUBNET: " + o.String() + case *EDNS0_COOKIE: + s += "\n; COOKIE: " + o.String() + case *EDNS0_UL: + s += "\n; UPDATE LEASE: " + o.String() + case *EDNS0_LLQ: + s += "\n; LONG LIVED QUERIES: " + o.String() + case *EDNS0_DAU: + s += "\n; DNSSEC ALGORITHM UNDERSTOOD: " + o.String() + case *EDNS0_DHU: + s += "\n; DS HASH UNDERSTOOD: " + o.String() + case *EDNS0_N3U: + s += "\n; NSEC3 HASH UNDERSTOOD: " + o.String() + case *EDNS0_LOCAL: + s += "\n; LOCAL OPT: " + o.String() + case *EDNS0_PADDING: + s += "\n; PADDING: " + o.String() + } + } + return s +} + +func (rr *OPT) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + for _, o := range rr.Option { + l += 4 // Account for 2-byte option code and 2-byte option length. + lo, _ := o.pack() + l += len(lo) + } + return l +} + +func (rr *OPT) parse(c *zlexer, origin string) *ParseError { + panic("dns: internal error: parse should never be called on OPT") +} + +func (r1 *OPT) isDuplicate(r2 RR) bool { return false } + +// return the old value -> delete SetVersion? + +// Version returns the EDNS version used. Only zero is defined. +func (rr *OPT) Version() uint8 { + return uint8(rr.Hdr.Ttl & 0x00FF0000 >> 16) +} + +// SetVersion sets the version of EDNS. This is usually zero. +func (rr *OPT) SetVersion(v uint8) { + rr.Hdr.Ttl = rr.Hdr.Ttl&0xFF00FFFF | uint32(v)<<16 +} + +// ExtendedRcode returns the EDNS extended RCODE field (the upper 8 bits of the TTL). +func (rr *OPT) ExtendedRcode() int { + return int(rr.Hdr.Ttl&0xFF000000>>24) << 4 +} + +// SetExtendedRcode sets the EDNS extended RCODE field. +// +// If the RCODE is not an extended RCODE, will reset the extended RCODE field to 0. +func (rr *OPT) SetExtendedRcode(v uint16) { + rr.Hdr.Ttl = rr.Hdr.Ttl&0x00FFFFFF | uint32(v>>4)<<24 +} + +// UDPSize returns the UDP buffer size. +func (rr *OPT) UDPSize() uint16 { + return rr.Hdr.Class +} + +// SetUDPSize sets the UDP buffer size. +func (rr *OPT) SetUDPSize(size uint16) { + rr.Hdr.Class = size +} + +// Do returns the value of the DO (DNSSEC OK) bit. +func (rr *OPT) Do() bool { + return rr.Hdr.Ttl&_DO == _DO +} + +// SetDo sets the DO (DNSSEC OK) bit. +// If we pass an argument, set the DO bit to that value. +// It is possible to pass 2 or more arguments. Any arguments after the 1st is silently ignored. +func (rr *OPT) SetDo(do ...bool) { + if len(do) == 1 { + if do[0] { + rr.Hdr.Ttl |= _DO + } else { + rr.Hdr.Ttl &^= _DO + } + } else { + rr.Hdr.Ttl |= _DO + } +} + +// EDNS0 defines an EDNS0 Option. An OPT RR can have multiple options appended to it. +type EDNS0 interface { + // Option returns the option code for the option. + Option() uint16 + // pack returns the bytes of the option data. + pack() ([]byte, error) + // unpack sets the data as found in the buffer. Is also sets + // the length of the slice as the length of the option data. + unpack([]byte) error + // String returns the string representation of the option. + String() string + // copy returns a deep-copy of the option. + copy() EDNS0 +} + +// EDNS0_NSID option is used to retrieve a nameserver +// identifier. When sending a request Nsid must be set to the empty string +// The identifier is an opaque string encoded as hex. +// Basic use pattern for creating an nsid option: +// +// o := new(dns.OPT) +// o.Hdr.Name = "." +// o.Hdr.Rrtype = dns.TypeOPT +// e := new(dns.EDNS0_NSID) +// e.Code = dns.EDNS0NSID +// e.Nsid = "AA" +// o.Option = append(o.Option, e) +type EDNS0_NSID struct { + Code uint16 // Always EDNS0NSID + Nsid string // This string needs to be hex encoded +} + +func (e *EDNS0_NSID) pack() ([]byte, error) { + h, err := hex.DecodeString(e.Nsid) + if err != nil { + return nil, err + } + return h, nil +} + +// Option implements the EDNS0 interface. +func (e *EDNS0_NSID) Option() uint16 { return EDNS0NSID } // Option returns the option code. +func (e *EDNS0_NSID) unpack(b []byte) error { e.Nsid = hex.EncodeToString(b); return nil } +func (e *EDNS0_NSID) String() string { return e.Nsid } +func (e *EDNS0_NSID) copy() EDNS0 { return &EDNS0_NSID{e.Code, e.Nsid} } + +// EDNS0_SUBNET is the subnet option that is used to give the remote nameserver +// an idea of where the client lives. See RFC 7871. It can then give back a different +// answer depending on the location or network topology. +// Basic use pattern for creating an subnet option: +// +// o := new(dns.OPT) +// o.Hdr.Name = "." +// o.Hdr.Rrtype = dns.TypeOPT +// e := new(dns.EDNS0_SUBNET) +// e.Code = dns.EDNS0SUBNET +// e.Family = 1 // 1 for IPv4 source address, 2 for IPv6 +// e.SourceNetmask = 32 // 32 for IPV4, 128 for IPv6 +// e.SourceScope = 0 +// e.Address = net.ParseIP("127.0.0.1").To4() // for IPv4 +// // e.Address = net.ParseIP("2001:7b8:32a::2") // for IPV6 +// o.Option = append(o.Option, e) +// +// This code will parse all the available bits when unpacking (up to optlen). +// When packing it will apply SourceNetmask. If you need more advanced logic, +// patches welcome and good luck. +type EDNS0_SUBNET struct { + Code uint16 // Always EDNS0SUBNET + Family uint16 // 1 for IP, 2 for IP6 + SourceNetmask uint8 + SourceScope uint8 + Address net.IP +} + +// Option implements the EDNS0 interface. +func (e *EDNS0_SUBNET) Option() uint16 { return EDNS0SUBNET } + +func (e *EDNS0_SUBNET) pack() ([]byte, error) { + b := make([]byte, 4) + binary.BigEndian.PutUint16(b[0:], e.Family) + b[2] = e.SourceNetmask + b[3] = e.SourceScope + switch e.Family { + case 0: + // "dig" sets AddressFamily to 0 if SourceNetmask is also 0 + // We might don't need to complain either + if e.SourceNetmask != 0 { + return nil, errors.New("dns: bad address family") + } + case 1: + if e.SourceNetmask > net.IPv4len*8 { + return nil, errors.New("dns: bad netmask") + } + if len(e.Address.To4()) != net.IPv4len { + return nil, errors.New("dns: bad address") + } + ip := e.Address.To4().Mask(net.CIDRMask(int(e.SourceNetmask), net.IPv4len*8)) + needLength := (e.SourceNetmask + 8 - 1) / 8 // division rounding up + b = append(b, ip[:needLength]...) + case 2: + if e.SourceNetmask > net.IPv6len*8 { + return nil, errors.New("dns: bad netmask") + } + if len(e.Address) != net.IPv6len { + return nil, errors.New("dns: bad address") + } + ip := e.Address.Mask(net.CIDRMask(int(e.SourceNetmask), net.IPv6len*8)) + needLength := (e.SourceNetmask + 8 - 1) / 8 // division rounding up + b = append(b, ip[:needLength]...) + default: + return nil, errors.New("dns: bad address family") + } + return b, nil +} + +func (e *EDNS0_SUBNET) unpack(b []byte) error { + if len(b) < 4 { + return ErrBuf + } + e.Family = binary.BigEndian.Uint16(b) + e.SourceNetmask = b[2] + e.SourceScope = b[3] + switch e.Family { + case 0: + // "dig" sets AddressFamily to 0 if SourceNetmask is also 0 + // It's okay to accept such a packet + if e.SourceNetmask != 0 { + return errors.New("dns: bad address family") + } + e.Address = net.IPv4(0, 0, 0, 0) + case 1: + if e.SourceNetmask > net.IPv4len*8 || e.SourceScope > net.IPv4len*8 { + return errors.New("dns: bad netmask") + } + addr := make(net.IP, net.IPv4len) + copy(addr, b[4:]) + e.Address = addr.To16() + case 2: + if e.SourceNetmask > net.IPv6len*8 || e.SourceScope > net.IPv6len*8 { + return errors.New("dns: bad netmask") + } + addr := make(net.IP, net.IPv6len) + copy(addr, b[4:]) + e.Address = addr + default: + return errors.New("dns: bad address family") + } + return nil +} + +func (e *EDNS0_SUBNET) String() (s string) { + if e.Address == nil { + s = "" + } else if e.Address.To4() != nil { + s = e.Address.String() + } else { + s = "[" + e.Address.String() + "]" + } + s += "/" + strconv.Itoa(int(e.SourceNetmask)) + "/" + strconv.Itoa(int(e.SourceScope)) + return +} + +func (e *EDNS0_SUBNET) copy() EDNS0 { + return &EDNS0_SUBNET{ + e.Code, + e.Family, + e.SourceNetmask, + e.SourceScope, + e.Address, + } +} + +// The EDNS0_COOKIE option is used to add a DNS Cookie to a message. +// +// o := new(dns.OPT) +// o.Hdr.Name = "." +// o.Hdr.Rrtype = dns.TypeOPT +// e := new(dns.EDNS0_COOKIE) +// e.Code = dns.EDNS0COOKIE +// e.Cookie = "24a5ac.." +// o.Option = append(o.Option, e) +// +// The Cookie field consists out of a client cookie (RFC 7873 Section 4), that is +// always 8 bytes. It may then optionally be followed by the server cookie. The server +// cookie is of variable length, 8 to a maximum of 32 bytes. In other words: +// +// cCookie := o.Cookie[:16] +// sCookie := o.Cookie[16:] +// +// There is no guarantee that the Cookie string has a specific length. +type EDNS0_COOKIE struct { + Code uint16 // Always EDNS0COOKIE + Cookie string // Hex-encoded cookie data +} + +func (e *EDNS0_COOKIE) pack() ([]byte, error) { + h, err := hex.DecodeString(e.Cookie) + if err != nil { + return nil, err + } + return h, nil +} + +// Option implements the EDNS0 interface. +func (e *EDNS0_COOKIE) Option() uint16 { return EDNS0COOKIE } +func (e *EDNS0_COOKIE) unpack(b []byte) error { e.Cookie = hex.EncodeToString(b); return nil } +func (e *EDNS0_COOKIE) String() string { return e.Cookie } +func (e *EDNS0_COOKIE) copy() EDNS0 { return &EDNS0_COOKIE{e.Code, e.Cookie} } + +// The EDNS0_UL (Update Lease) (draft RFC) option is used to tell the server to set +// an expiration on an update RR. This is helpful for clients that cannot clean +// up after themselves. This is a draft RFC and more information can be found at +// https://tools.ietf.org/html/draft-sekar-dns-ul-02 +// +// o := new(dns.OPT) +// o.Hdr.Name = "." +// o.Hdr.Rrtype = dns.TypeOPT +// e := new(dns.EDNS0_UL) +// e.Code = dns.EDNS0UL +// e.Lease = 120 // in seconds +// o.Option = append(o.Option, e) +type EDNS0_UL struct { + Code uint16 // Always EDNS0UL + Lease uint32 + KeyLease uint32 +} + +// Option implements the EDNS0 interface. +func (e *EDNS0_UL) Option() uint16 { return EDNS0UL } +func (e *EDNS0_UL) String() string { return fmt.Sprintf("%d %d", e.Lease, e.KeyLease) } +func (e *EDNS0_UL) copy() EDNS0 { return &EDNS0_UL{e.Code, e.Lease, e.KeyLease} } + +// Copied: http://golang.org/src/pkg/net/dnsmsg.go +func (e *EDNS0_UL) pack() ([]byte, error) { + var b []byte + if e.KeyLease == 0 { + b = make([]byte, 4) + } else { + b = make([]byte, 8) + binary.BigEndian.PutUint32(b[4:], e.KeyLease) + } + binary.BigEndian.PutUint32(b, e.Lease) + return b, nil +} + +func (e *EDNS0_UL) unpack(b []byte) error { + switch len(b) { + case 4: + e.KeyLease = 0 + case 8: + e.KeyLease = binary.BigEndian.Uint32(b[4:]) + default: + return ErrBuf + } + e.Lease = binary.BigEndian.Uint32(b) + return nil +} + +// EDNS0_LLQ stands for Long Lived Queries: http://tools.ietf.org/html/draft-sekar-dns-llq-01 +// Implemented for completeness, as the EDNS0 type code is assigned. +type EDNS0_LLQ struct { + Code uint16 // Always EDNS0LLQ + Version uint16 + Opcode uint16 + Error uint16 + Id uint64 + LeaseLife uint32 +} + +// Option implements the EDNS0 interface. +func (e *EDNS0_LLQ) Option() uint16 { return EDNS0LLQ } + +func (e *EDNS0_LLQ) pack() ([]byte, error) { + b := make([]byte, 18) + binary.BigEndian.PutUint16(b[0:], e.Version) + binary.BigEndian.PutUint16(b[2:], e.Opcode) + binary.BigEndian.PutUint16(b[4:], e.Error) + binary.BigEndian.PutUint64(b[6:], e.Id) + binary.BigEndian.PutUint32(b[14:], e.LeaseLife) + return b, nil +} + +func (e *EDNS0_LLQ) unpack(b []byte) error { + if len(b) < 18 { + return ErrBuf + } + e.Version = binary.BigEndian.Uint16(b[0:]) + e.Opcode = binary.BigEndian.Uint16(b[2:]) + e.Error = binary.BigEndian.Uint16(b[4:]) + e.Id = binary.BigEndian.Uint64(b[6:]) + e.LeaseLife = binary.BigEndian.Uint32(b[14:]) + return nil +} + +func (e *EDNS0_LLQ) String() string { + s := strconv.FormatUint(uint64(e.Version), 10) + " " + strconv.FormatUint(uint64(e.Opcode), 10) + + " " + strconv.FormatUint(uint64(e.Error), 10) + " " + strconv.FormatUint(e.Id, 10) + + " " + strconv.FormatUint(uint64(e.LeaseLife), 10) + return s +} +func (e *EDNS0_LLQ) copy() EDNS0 { + return &EDNS0_LLQ{e.Code, e.Version, e.Opcode, e.Error, e.Id, e.LeaseLife} +} + +// EDNS0_DUA implements the EDNS0 "DNSSEC Algorithm Understood" option. See RFC 6975. +type EDNS0_DAU struct { + Code uint16 // Always EDNS0DAU + AlgCode []uint8 +} + +// Option implements the EDNS0 interface. +func (e *EDNS0_DAU) Option() uint16 { return EDNS0DAU } +func (e *EDNS0_DAU) pack() ([]byte, error) { return e.AlgCode, nil } +func (e *EDNS0_DAU) unpack(b []byte) error { e.AlgCode = b; return nil } + +func (e *EDNS0_DAU) String() string { + s := "" + for _, alg := range e.AlgCode { + if a, ok := AlgorithmToString[alg]; ok { + s += " " + a + } else { + s += " " + strconv.Itoa(int(alg)) + } + } + return s +} +func (e *EDNS0_DAU) copy() EDNS0 { return &EDNS0_DAU{e.Code, e.AlgCode} } + +// EDNS0_DHU implements the EDNS0 "DS Hash Understood" option. See RFC 6975. +type EDNS0_DHU struct { + Code uint16 // Always EDNS0DHU + AlgCode []uint8 +} + +// Option implements the EDNS0 interface. +func (e *EDNS0_DHU) Option() uint16 { return EDNS0DHU } +func (e *EDNS0_DHU) pack() ([]byte, error) { return e.AlgCode, nil } +func (e *EDNS0_DHU) unpack(b []byte) error { e.AlgCode = b; return nil } + +func (e *EDNS0_DHU) String() string { + s := "" + for _, alg := range e.AlgCode { + if a, ok := HashToString[alg]; ok { + s += " " + a + } else { + s += " " + strconv.Itoa(int(alg)) + } + } + return s +} +func (e *EDNS0_DHU) copy() EDNS0 { return &EDNS0_DHU{e.Code, e.AlgCode} } + +// EDNS0_N3U implements the EDNS0 "NSEC3 Hash Understood" option. See RFC 6975. +type EDNS0_N3U struct { + Code uint16 // Always EDNS0N3U + AlgCode []uint8 +} + +// Option implements the EDNS0 interface. +func (e *EDNS0_N3U) Option() uint16 { return EDNS0N3U } +func (e *EDNS0_N3U) pack() ([]byte, error) { return e.AlgCode, nil } +func (e *EDNS0_N3U) unpack(b []byte) error { e.AlgCode = b; return nil } + +func (e *EDNS0_N3U) String() string { + // Re-use the hash map + s := "" + for _, alg := range e.AlgCode { + if a, ok := HashToString[alg]; ok { + s += " " + a + } else { + s += " " + strconv.Itoa(int(alg)) + } + } + return s +} +func (e *EDNS0_N3U) copy() EDNS0 { return &EDNS0_N3U{e.Code, e.AlgCode} } + +// EDNS0_EXPIRE implementes the EDNS0 option as described in RFC 7314. +type EDNS0_EXPIRE struct { + Code uint16 // Always EDNS0EXPIRE + Expire uint32 +} + +// Option implements the EDNS0 interface. +func (e *EDNS0_EXPIRE) Option() uint16 { return EDNS0EXPIRE } +func (e *EDNS0_EXPIRE) String() string { return strconv.FormatUint(uint64(e.Expire), 10) } +func (e *EDNS0_EXPIRE) copy() EDNS0 { return &EDNS0_EXPIRE{e.Code, e.Expire} } + +func (e *EDNS0_EXPIRE) pack() ([]byte, error) { + b := make([]byte, 4) + binary.BigEndian.PutUint32(b, e.Expire) + return b, nil +} + +func (e *EDNS0_EXPIRE) unpack(b []byte) error { + if len(b) == 0 { + // zero-length EXPIRE query, see RFC 7314 Section 2 + return nil + } + if len(b) < 4 { + return ErrBuf + } + e.Expire = binary.BigEndian.Uint32(b) + return nil +} + +// The EDNS0_LOCAL option is used for local/experimental purposes. The option +// code is recommended to be within the range [EDNS0LOCALSTART, EDNS0LOCALEND] +// (RFC6891), although any unassigned code can actually be used. The content of +// the option is made available in Data, unaltered. +// Basic use pattern for creating a local option: +// +// o := new(dns.OPT) +// o.Hdr.Name = "." +// o.Hdr.Rrtype = dns.TypeOPT +// e := new(dns.EDNS0_LOCAL) +// e.Code = dns.EDNS0LOCALSTART +// e.Data = []byte{72, 82, 74} +// o.Option = append(o.Option, e) +type EDNS0_LOCAL struct { + Code uint16 + Data []byte +} + +// Option implements the EDNS0 interface. +func (e *EDNS0_LOCAL) Option() uint16 { return e.Code } +func (e *EDNS0_LOCAL) String() string { + return strconv.FormatInt(int64(e.Code), 10) + ":0x" + hex.EncodeToString(e.Data) +} +func (e *EDNS0_LOCAL) copy() EDNS0 { + b := make([]byte, len(e.Data)) + copy(b, e.Data) + return &EDNS0_LOCAL{e.Code, b} +} + +func (e *EDNS0_LOCAL) pack() ([]byte, error) { + b := make([]byte, len(e.Data)) + copied := copy(b, e.Data) + if copied != len(e.Data) { + return nil, ErrBuf + } + return b, nil +} + +func (e *EDNS0_LOCAL) unpack(b []byte) error { + e.Data = make([]byte, len(b)) + copied := copy(e.Data, b) + if copied != len(b) { + return ErrBuf + } + return nil +} + +// EDNS0_TCP_KEEPALIVE is an EDNS0 option that instructs the server to keep +// the TCP connection alive. See RFC 7828. +type EDNS0_TCP_KEEPALIVE struct { + Code uint16 // Always EDNSTCPKEEPALIVE + Length uint16 // the value 0 if the TIMEOUT is omitted, the value 2 if it is present; + Timeout uint16 // an idle timeout value for the TCP connection, specified in units of 100 milliseconds, encoded in network byte order. +} + +// Option implements the EDNS0 interface. +func (e *EDNS0_TCP_KEEPALIVE) Option() uint16 { return EDNS0TCPKEEPALIVE } + +func (e *EDNS0_TCP_KEEPALIVE) pack() ([]byte, error) { + if e.Timeout != 0 && e.Length != 2 { + return nil, errors.New("dns: timeout specified but length is not 2") + } + if e.Timeout == 0 && e.Length != 0 { + return nil, errors.New("dns: timeout not specified but length is not 0") + } + b := make([]byte, 4+e.Length) + binary.BigEndian.PutUint16(b[0:], e.Code) + binary.BigEndian.PutUint16(b[2:], e.Length) + if e.Length == 2 { + binary.BigEndian.PutUint16(b[4:], e.Timeout) + } + return b, nil +} + +func (e *EDNS0_TCP_KEEPALIVE) unpack(b []byte) error { + if len(b) < 4 { + return ErrBuf + } + e.Length = binary.BigEndian.Uint16(b[2:4]) + if e.Length != 0 && e.Length != 2 { + return errors.New("dns: length mismatch, want 0/2 but got " + strconv.FormatUint(uint64(e.Length), 10)) + } + if e.Length == 2 { + if len(b) < 6 { + return ErrBuf + } + e.Timeout = binary.BigEndian.Uint16(b[4:6]) + } + return nil +} + +func (e *EDNS0_TCP_KEEPALIVE) String() (s string) { + s = "use tcp keep-alive" + if e.Length == 0 { + s += ", timeout omitted" + } else { + s += fmt.Sprintf(", timeout %dms", e.Timeout*100) + } + return +} +func (e *EDNS0_TCP_KEEPALIVE) copy() EDNS0 { return &EDNS0_TCP_KEEPALIVE{e.Code, e.Length, e.Timeout} } + +// EDNS0_PADDING option is used to add padding to a request/response. The default +// value of padding SHOULD be 0x0 but other values MAY be used, for instance if +// compression is applied before encryption which may break signatures. +type EDNS0_PADDING struct { + Padding []byte +} + +// Option implements the EDNS0 interface. +func (e *EDNS0_PADDING) Option() uint16 { return EDNS0PADDING } +func (e *EDNS0_PADDING) pack() ([]byte, error) { return e.Padding, nil } +func (e *EDNS0_PADDING) unpack(b []byte) error { e.Padding = b; return nil } +func (e *EDNS0_PADDING) String() string { return fmt.Sprintf("%0X", e.Padding) } +func (e *EDNS0_PADDING) copy() EDNS0 { + b := make([]byte, len(e.Padding)) + copy(b, e.Padding) + return &EDNS0_PADDING{b} +} diff --git a/vendor/github.com/miekg/dns/format.go b/vendor/github.com/miekg/dns/format.go new file mode 100644 index 00000000..0ec79f2f --- /dev/null +++ b/vendor/github.com/miekg/dns/format.go @@ -0,0 +1,93 @@ +package dns + +import ( + "net" + "reflect" + "strconv" +) + +// NumField returns the number of rdata fields r has. +func NumField(r RR) int { + return reflect.ValueOf(r).Elem().NumField() - 1 // Remove RR_Header +} + +// Field returns the rdata field i as a string. Fields are indexed starting from 1. +// RR types that holds slice data, for instance the NSEC type bitmap will return a single +// string where the types are concatenated using a space. +// Accessing non existing fields will cause a panic. +func Field(r RR, i int) string { + if i == 0 { + return "" + } + d := reflect.ValueOf(r).Elem().Field(i) + switch d.Kind() { + case reflect.String: + return d.String() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return strconv.FormatInt(d.Int(), 10) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return strconv.FormatUint(d.Uint(), 10) + case reflect.Slice: + switch reflect.ValueOf(r).Elem().Type().Field(i).Tag { + case `dns:"a"`: + // TODO(miek): Hmm store this as 16 bytes + if d.Len() < net.IPv4len { + return "" + } + if d.Len() < net.IPv6len { + return net.IPv4(byte(d.Index(0).Uint()), + byte(d.Index(1).Uint()), + byte(d.Index(2).Uint()), + byte(d.Index(3).Uint())).String() + } + return net.IPv4(byte(d.Index(12).Uint()), + byte(d.Index(13).Uint()), + byte(d.Index(14).Uint()), + byte(d.Index(15).Uint())).String() + case `dns:"aaaa"`: + if d.Len() < net.IPv6len { + return "" + } + return net.IP{ + byte(d.Index(0).Uint()), + byte(d.Index(1).Uint()), + byte(d.Index(2).Uint()), + byte(d.Index(3).Uint()), + byte(d.Index(4).Uint()), + byte(d.Index(5).Uint()), + byte(d.Index(6).Uint()), + byte(d.Index(7).Uint()), + byte(d.Index(8).Uint()), + byte(d.Index(9).Uint()), + byte(d.Index(10).Uint()), + byte(d.Index(11).Uint()), + byte(d.Index(12).Uint()), + byte(d.Index(13).Uint()), + byte(d.Index(14).Uint()), + byte(d.Index(15).Uint()), + }.String() + case `dns:"nsec"`: + if d.Len() == 0 { + return "" + } + s := Type(d.Index(0).Uint()).String() + for i := 1; i < d.Len(); i++ { + s += " " + Type(d.Index(i).Uint()).String() + } + return s + default: + // if it does not have a tag its a string slice + fallthrough + case `dns:"txt"`: + if d.Len() == 0 { + return "" + } + s := d.Index(0).String() + for i := 1; i < d.Len(); i++ { + s += " " + d.Index(i).String() + } + return s + } + } + return "" +} diff --git a/vendor/github.com/miekg/dns/fuzz.go b/vendor/github.com/miekg/dns/fuzz.go new file mode 100644 index 00000000..57410acd --- /dev/null +++ b/vendor/github.com/miekg/dns/fuzz.go @@ -0,0 +1,32 @@ +// +build fuzz + +package dns + +import "strings" + +func Fuzz(data []byte) int { + msg := new(Msg) + + if err := msg.Unpack(data); err != nil { + return 0 + } + if _, err := msg.Pack(); err != nil { + return 0 + } + + return 1 +} + +func FuzzNewRR(data []byte) int { + str := string(data) + // Do not fuzz lines that include the $INCLUDE keyword and hint the fuzzer + // at avoiding them. + // See GH#1025 for context. + if strings.Contains(strings.ToUpper(str), "$INCLUDE") { + return -1 + } + if _, err := NewRR(str); err != nil { + return 0 + } + return 1 +} diff --git a/vendor/github.com/miekg/dns/generate.go b/vendor/github.com/miekg/dns/generate.go new file mode 100644 index 00000000..f7e91a23 --- /dev/null +++ b/vendor/github.com/miekg/dns/generate.go @@ -0,0 +1,247 @@ +package dns + +import ( + "bytes" + "fmt" + "io" + "strconv" + "strings" +) + +// Parse the $GENERATE statement as used in BIND9 zones. +// See http://www.zytrax.com/books/dns/ch8/generate.html for instance. +// We are called after '$GENERATE '. After which we expect: +// * the range (12-24/2) +// * lhs (ownername) +// * [[ttl][class]] +// * type +// * rhs (rdata) +// But we are lazy here, only the range is parsed *all* occurrences +// of $ after that are interpreted. +func (zp *ZoneParser) generate(l lex) (RR, bool) { + token := l.token + step := 1 + if i := strings.IndexByte(token, '/'); i >= 0 { + if i+1 == len(token) { + return zp.setParseError("bad step in $GENERATE range", l) + } + + s, err := strconv.Atoi(token[i+1:]) + if err != nil || s <= 0 { + return zp.setParseError("bad step in $GENERATE range", l) + } + + step = s + token = token[:i] + } + + sx := strings.SplitN(token, "-", 2) + if len(sx) != 2 { + return zp.setParseError("bad start-stop in $GENERATE range", l) + } + + start, err := strconv.Atoi(sx[0]) + if err != nil { + return zp.setParseError("bad start in $GENERATE range", l) + } + + end, err := strconv.Atoi(sx[1]) + if err != nil { + return zp.setParseError("bad stop in $GENERATE range", l) + } + if end < 0 || start < 0 || end < start || (end-start)/step > 65535 { + return zp.setParseError("bad range in $GENERATE range", l) + } + + // _BLANK + l, ok := zp.c.Next() + if !ok || l.value != zBlank { + return zp.setParseError("garbage after $GENERATE range", l) + } + + // Create a complete new string, which we then parse again. + var s string + for l, ok := zp.c.Next(); ok; l, ok = zp.c.Next() { + if l.err { + return zp.setParseError("bad data in $GENERATE directive", l) + } + if l.value == zNewline { + break + } + + s += l.token + } + + r := &generateReader{ + s: s, + + cur: start, + start: start, + end: end, + step: step, + + file: zp.file, + lex: &l, + } + zp.sub = NewZoneParser(r, zp.origin, zp.file) + zp.sub.includeDepth, zp.sub.includeAllowed = zp.includeDepth, zp.includeAllowed + zp.sub.generateDisallowed = true + zp.sub.SetDefaultTTL(defaultTtl) + return zp.subNext() +} + +type generateReader struct { + s string + si int + + cur int + start int + end int + step int + + mod bytes.Buffer + + escape bool + + eof bool + + file string + lex *lex +} + +func (r *generateReader) parseError(msg string, end int) *ParseError { + r.eof = true // Make errors sticky. + + l := *r.lex + l.token = r.s[r.si-1 : end] + l.column += r.si // l.column starts one zBLANK before r.s + + return &ParseError{r.file, msg, l} +} + +func (r *generateReader) Read(p []byte) (int, error) { + // NewZLexer, through NewZoneParser, should use ReadByte and + // not end up here. + + panic("not implemented") +} + +func (r *generateReader) ReadByte() (byte, error) { + if r.eof { + return 0, io.EOF + } + if r.mod.Len() > 0 { + return r.mod.ReadByte() + } + + if r.si >= len(r.s) { + r.si = 0 + r.cur += r.step + + r.eof = r.cur > r.end || r.cur < 0 + return '\n', nil + } + + si := r.si + r.si++ + + switch r.s[si] { + case '\\': + if r.escape { + r.escape = false + return '\\', nil + } + + r.escape = true + return r.ReadByte() + case '$': + if r.escape { + r.escape = false + return '$', nil + } + + mod := "%d" + + if si >= len(r.s)-1 { + // End of the string + fmt.Fprintf(&r.mod, mod, r.cur) + return r.mod.ReadByte() + } + + if r.s[si+1] == '$' { + r.si++ + return '$', nil + } + + var offset int + + // Search for { and } + if r.s[si+1] == '{' { + // Modifier block + sep := strings.Index(r.s[si+2:], "}") + if sep < 0 { + return 0, r.parseError("bad modifier in $GENERATE", len(r.s)) + } + + var errMsg string + mod, offset, errMsg = modToPrintf(r.s[si+2 : si+2+sep]) + if errMsg != "" { + return 0, r.parseError(errMsg, si+3+sep) + } + if r.start+offset < 0 || r.end+offset > 1<<31-1 { + return 0, r.parseError("bad offset in $GENERATE", si+3+sep) + } + + r.si += 2 + sep // Jump to it + } + + fmt.Fprintf(&r.mod, mod, r.cur+offset) + return r.mod.ReadByte() + default: + if r.escape { // Pretty useless here + r.escape = false + return r.ReadByte() + } + + return r.s[si], nil + } +} + +// Convert a $GENERATE modifier 0,0,d to something Printf can deal with. +func modToPrintf(s string) (string, int, string) { + // Modifier is { offset [ ,width [ ,base ] ] } - provide default + // values for optional width and type, if necessary. + var offStr, widthStr, base string + switch xs := strings.Split(s, ","); len(xs) { + case 1: + offStr, widthStr, base = xs[0], "0", "d" + case 2: + offStr, widthStr, base = xs[0], xs[1], "d" + case 3: + offStr, widthStr, base = xs[0], xs[1], xs[2] + default: + return "", 0, "bad modifier in $GENERATE" + } + + switch base { + case "o", "d", "x", "X": + default: + return "", 0, "bad base in $GENERATE" + } + + offset, err := strconv.Atoi(offStr) + if err != nil { + return "", 0, "bad offset in $GENERATE" + } + + width, err := strconv.Atoi(widthStr) + if err != nil || width < 0 || width > 255 { + return "", 0, "bad width in $GENERATE" + } + + if width == 0 { + return "%" + base, offset, "" + } + + return "%0" + widthStr + base, offset, "" +} diff --git a/vendor/github.com/miekg/dns/go.mod b/vendor/github.com/miekg/dns/go.mod new file mode 100644 index 00000000..6003d057 --- /dev/null +++ b/vendor/github.com/miekg/dns/go.mod @@ -0,0 +1,11 @@ +module github.com/miekg/dns + +go 1.12 + +require ( + golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 + golang.org/x/net v0.0.0-20190923162816-aa69164e4478 + golang.org/x/sync v0.0.0-20190423024810-112230192c58 + golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe + golang.org/x/tools v0.0.0-20191216052735-49a3e744a425 // indirect +) diff --git a/vendor/github.com/miekg/dns/go.sum b/vendor/github.com/miekg/dns/go.sum new file mode 100644 index 00000000..96bda3a9 --- /dev/null +++ b/vendor/github.com/miekg/dns/go.sum @@ -0,0 +1,39 @@ +golang.org/x/crypto v0.0.0-20181001203147-e3636079e1a4 h1:Vk3wNqEZwyGyei9yq5ekj7frek2u7HUfffJ1/opblzc= +golang.org/x/crypto v0.0.0-20181001203147-e3636079e1a4/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190829043050-9756ffdc2472 h1:Gv7RPwsi3eZ2Fgewe3CBsuOebPwO27PoXzRpJPsvSSM= +golang.org/x/crypto v0.0.0-20190829043050-9756ffdc2472/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392 h1:ACG4HJsFiNMf47Y4PeRoebLNy/2lXT9EtprMuTFWt1M= +golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 h1:ObdrDkeb4kJdCP557AjRjq69pTHfNouLtWZG7j9rPN8= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/net v0.0.0-20180926154720-4dfa2610cdf3 h1:dgd4x4kJt7G4k4m93AYLzM8Ni6h2qLTfh9n9vXJT3/0= +golang.org/x/net v0.0.0-20180926154720-4dfa2610cdf3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297 h1:k7pJ2yAPLPgbskkFdhRCsA77k2fySZ1zf2zCjvQCiIM= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190923162816-aa69164e4478 h1:l5EDrHhldLYb3ZRHDUhXF7Om7MvYXnkV9/iQNo1lX6g= +golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180928133829-e4b3c5e90611 h1:O33LKL7WyJgjN9CvxfTIomjIClbd/Kq86/iipowHQU0= +golang.org/x/sys v0.0.0-20180928133829-e4b3c5e90611/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd h1:DBH9mDw0zluJT/R+nGuV3jWFWLFaHyYZWD4tOT+cjn0= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe h1:6fAMxZRR6sl1Uq8U61gxU+kPTs2tR8uOySCbBP7BN/M= +golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216052735-49a3e744a425 h1:VvQyQJN0tSuecqgcIxMWnnfG5kSmgy9KZR9sW3W5QeA= +golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/vendor/github.com/miekg/dns/labels.go b/vendor/github.com/miekg/dns/labels.go new file mode 100644 index 00000000..10d82471 --- /dev/null +++ b/vendor/github.com/miekg/dns/labels.go @@ -0,0 +1,212 @@ +package dns + +// Holds a bunch of helper functions for dealing with labels. + +// SplitDomainName splits a name string into it's labels. +// www.miek.nl. returns []string{"www", "miek", "nl"} +// .www.miek.nl. returns []string{"", "www", "miek", "nl"}, +// The root label (.) returns nil. Note that using +// strings.Split(s) will work in most cases, but does not handle +// escaped dots (\.) for instance. +// s must be a syntactically valid domain name, see IsDomainName. +func SplitDomainName(s string) (labels []string) { + if len(s) == 0 { + return nil + } + fqdnEnd := 0 // offset of the final '.' or the length of the name + idx := Split(s) + begin := 0 + if IsFqdn(s) { + fqdnEnd = len(s) - 1 + } else { + fqdnEnd = len(s) + } + + switch len(idx) { + case 0: + return nil + case 1: + // no-op + default: + for _, end := range idx[1:] { + labels = append(labels, s[begin:end-1]) + begin = end + } + } + + return append(labels, s[begin:fqdnEnd]) +} + +// CompareDomainName compares the names s1 and s2 and +// returns how many labels they have in common starting from the *right*. +// The comparison stops at the first inequality. The names are downcased +// before the comparison. +// +// www.miek.nl. and miek.nl. have two labels in common: miek and nl +// www.miek.nl. and www.bla.nl. have one label in common: nl +// +// s1 and s2 must be syntactically valid domain names. +func CompareDomainName(s1, s2 string) (n int) { + // the first check: root label + if s1 == "." || s2 == "." { + return 0 + } + + l1 := Split(s1) + l2 := Split(s2) + + j1 := len(l1) - 1 // end + i1 := len(l1) - 2 // start + j2 := len(l2) - 1 + i2 := len(l2) - 2 + // the second check can be done here: last/only label + // before we fall through into the for-loop below + if equal(s1[l1[j1]:], s2[l2[j2]:]) { + n++ + } else { + return + } + for { + if i1 < 0 || i2 < 0 { + break + } + if equal(s1[l1[i1]:l1[j1]], s2[l2[i2]:l2[j2]]) { + n++ + } else { + break + } + j1-- + i1-- + j2-- + i2-- + } + return +} + +// CountLabel counts the the number of labels in the string s. +// s must be a syntactically valid domain name. +func CountLabel(s string) (labels int) { + if s == "." { + return + } + off := 0 + end := false + for { + off, end = NextLabel(s, off) + labels++ + if end { + return + } + } +} + +// Split splits a name s into its label indexes. +// www.miek.nl. returns []int{0, 4, 9}, www.miek.nl also returns []int{0, 4, 9}. +// The root name (.) returns nil. Also see SplitDomainName. +// s must be a syntactically valid domain name. +func Split(s string) []int { + if s == "." { + return nil + } + idx := make([]int, 1, 3) + off := 0 + end := false + + for { + off, end = NextLabel(s, off) + if end { + return idx + } + idx = append(idx, off) + } +} + +// NextLabel returns the index of the start of the next label in the +// string s starting at offset. +// The bool end is true when the end of the string has been reached. +// Also see PrevLabel. +func NextLabel(s string, offset int) (i int, end bool) { + if s == "" { + return 0, true + } + for i = offset; i < len(s)-1; i++ { + if s[i] != '.' { + continue + } + j := i - 1 + for j >= 0 && s[j] == '\\' { + j-- + } + + if (j-i)%2 == 0 { + continue + } + + return i + 1, false + } + return i + 1, true +} + +// PrevLabel returns the index of the label when starting from the right and +// jumping n labels to the left. +// The bool start is true when the start of the string has been overshot. +// Also see NextLabel. +func PrevLabel(s string, n int) (i int, start bool) { + if s == "" { + return 0, true + } + if n == 0 { + return len(s), false + } + + l := len(s) - 1 + if s[l] == '.' { + l-- + } + + for ; l >= 0 && n > 0; l-- { + if s[l] != '.' { + continue + } + j := l - 1 + for j >= 0 && s[j] == '\\' { + j-- + } + + if (j-l)%2 == 0 { + continue + } + + n-- + if n == 0 { + return l + 1, false + } + } + + return 0, n > 1 +} + +// equal compares a and b while ignoring case. It returns true when equal otherwise false. +func equal(a, b string) bool { + // might be lifted into API function. + la := len(a) + lb := len(b) + if la != lb { + return false + } + + for i := la - 1; i >= 0; i-- { + ai := a[i] + bi := b[i] + if ai >= 'A' && ai <= 'Z' { + ai |= 'a' - 'A' + } + if bi >= 'A' && bi <= 'Z' { + bi |= 'a' - 'A' + } + if ai != bi { + return false + } + } + return true +} diff --git a/vendor/github.com/miekg/dns/listen_go111.go b/vendor/github.com/miekg/dns/listen_go111.go new file mode 100644 index 00000000..fad195cf --- /dev/null +++ b/vendor/github.com/miekg/dns/listen_go111.go @@ -0,0 +1,44 @@ +// +build go1.11 +// +build aix darwin dragonfly freebsd linux netbsd openbsd + +package dns + +import ( + "context" + "net" + "syscall" + + "golang.org/x/sys/unix" +) + +const supportsReusePort = true + +func reuseportControl(network, address string, c syscall.RawConn) error { + var opErr error + err := c.Control(func(fd uintptr) { + opErr = unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_REUSEPORT, 1) + }) + if err != nil { + return err + } + + return opErr +} + +func listenTCP(network, addr string, reuseport bool) (net.Listener, error) { + var lc net.ListenConfig + if reuseport { + lc.Control = reuseportControl + } + + return lc.Listen(context.Background(), network, addr) +} + +func listenUDP(network, addr string, reuseport bool) (net.PacketConn, error) { + var lc net.ListenConfig + if reuseport { + lc.Control = reuseportControl + } + + return lc.ListenPacket(context.Background(), network, addr) +} diff --git a/vendor/github.com/miekg/dns/listen_go_not111.go b/vendor/github.com/miekg/dns/listen_go_not111.go new file mode 100644 index 00000000..b9201417 --- /dev/null +++ b/vendor/github.com/miekg/dns/listen_go_not111.go @@ -0,0 +1,23 @@ +// +build !go1.11 !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd + +package dns + +import "net" + +const supportsReusePort = false + +func listenTCP(network, addr string, reuseport bool) (net.Listener, error) { + if reuseport { + // TODO(tmthrgd): return an error? + } + + return net.Listen(network, addr) +} + +func listenUDP(network, addr string, reuseport bool) (net.PacketConn, error) { + if reuseport { + // TODO(tmthrgd): return an error? + } + + return net.ListenPacket(network, addr) +} diff --git a/vendor/github.com/miekg/dns/msg.go b/vendor/github.com/miekg/dns/msg.go new file mode 100644 index 00000000..29381300 --- /dev/null +++ b/vendor/github.com/miekg/dns/msg.go @@ -0,0 +1,1196 @@ +// DNS packet assembly, see RFC 1035. Converting from - Unpack() - +// and to - Pack() - wire format. +// All the packers and unpackers take a (msg []byte, off int) +// and return (off1 int, ok bool). If they return ok==false, they +// also return off1==len(msg), so that the next unpacker will +// also fail. This lets us avoid checks of ok until the end of a +// packing sequence. + +package dns + +//go:generate go run msg_generate.go + +import ( + "crypto/rand" + "encoding/binary" + "fmt" + "math/big" + "strconv" + "strings" +) + +const ( + maxCompressionOffset = 2 << 13 // We have 14 bits for the compression pointer + maxDomainNameWireOctets = 255 // See RFC 1035 section 2.3.4 + + // This is the maximum number of compression pointers that should occur in a + // semantically valid message. Each label in a domain name must be at least one + // octet and is separated by a period. The root label won't be represented by a + // compression pointer to a compression pointer, hence the -2 to exclude the + // smallest valid root label. + // + // It is possible to construct a valid message that has more compression pointers + // than this, and still doesn't loop, by pointing to a previous pointer. This is + // not something a well written implementation should ever do, so we leave them + // to trip the maximum compression pointer check. + maxCompressionPointers = (maxDomainNameWireOctets+1)/2 - 2 + + // This is the maximum length of a domain name in presentation format. The + // maximum wire length of a domain name is 255 octets (see above), with the + // maximum label length being 63. The wire format requires one extra byte over + // the presentation format, reducing the number of octets by 1. Each label in + // the name will be separated by a single period, with each octet in the label + // expanding to at most 4 bytes (\DDD). If all other labels are of the maximum + // length, then the final label can only be 61 octets long to not exceed the + // maximum allowed wire length. + maxDomainNamePresentationLength = 61*4 + 1 + 63*4 + 1 + 63*4 + 1 + 63*4 + 1 +) + +// Errors defined in this package. +var ( + ErrAlg error = &Error{err: "bad algorithm"} // ErrAlg indicates an error with the (DNSSEC) algorithm. + ErrAuth error = &Error{err: "bad authentication"} // ErrAuth indicates an error in the TSIG authentication. + ErrBuf error = &Error{err: "buffer size too small"} // ErrBuf indicates that the buffer used is too small for the message. + ErrConnEmpty error = &Error{err: "conn has no connection"} // ErrConnEmpty indicates a connection is being used before it is initialized. + ErrExtendedRcode error = &Error{err: "bad extended rcode"} // ErrExtendedRcode ... + ErrFqdn error = &Error{err: "domain must be fully qualified"} // ErrFqdn indicates that a domain name does not have a closing dot. + ErrId error = &Error{err: "id mismatch"} // ErrId indicates there is a mismatch with the message's ID. + ErrKeyAlg error = &Error{err: "bad key algorithm"} // ErrKeyAlg indicates that the algorithm in the key is not valid. + ErrKey error = &Error{err: "bad key"} + ErrKeySize error = &Error{err: "bad key size"} + ErrLongDomain error = &Error{err: fmt.Sprintf("domain name exceeded %d wire-format octets", maxDomainNameWireOctets)} + ErrNoSig error = &Error{err: "no signature found"} + ErrPrivKey error = &Error{err: "bad private key"} + ErrRcode error = &Error{err: "bad rcode"} + ErrRdata error = &Error{err: "bad rdata"} + ErrRRset error = &Error{err: "bad rrset"} + ErrSecret error = &Error{err: "no secrets defined"} + ErrShortRead error = &Error{err: "short read"} + ErrSig error = &Error{err: "bad signature"} // ErrSig indicates that a signature can not be cryptographically validated. + ErrSoa error = &Error{err: "no SOA"} // ErrSOA indicates that no SOA RR was seen when doing zone transfers. + ErrTime error = &Error{err: "bad time"} // ErrTime indicates a timing error in TSIG authentication. +) + +// Id by default returns a 16-bit random number to be used as a message id. The +// number is drawn from a cryptographically secure random number generator. +// This being a variable the function can be reassigned to a custom function. +// For instance, to make it return a static value for testing: +// +// dns.Id = func() uint16 { return 3 } +var Id = id + +// id returns a 16 bits random number to be used as a +// message id. The random provided should be good enough. +func id() uint16 { + var output uint16 + err := binary.Read(rand.Reader, binary.BigEndian, &output) + if err != nil { + panic("dns: reading random id failed: " + err.Error()) + } + return output +} + +// MsgHdr is a a manually-unpacked version of (id, bits). +type MsgHdr struct { + Id uint16 + Response bool + Opcode int + Authoritative bool + Truncated bool + RecursionDesired bool + RecursionAvailable bool + Zero bool + AuthenticatedData bool + CheckingDisabled bool + Rcode int +} + +// Msg contains the layout of a DNS message. +type Msg struct { + MsgHdr + Compress bool `json:"-"` // If true, the message will be compressed when converted to wire format. + Question []Question // Holds the RR(s) of the question section. + Answer []RR // Holds the RR(s) of the answer section. + Ns []RR // Holds the RR(s) of the authority section. + Extra []RR // Holds the RR(s) of the additional section. +} + +// ClassToString is a maps Classes to strings for each CLASS wire type. +var ClassToString = map[uint16]string{ + ClassINET: "IN", + ClassCSNET: "CS", + ClassCHAOS: "CH", + ClassHESIOD: "HS", + ClassNONE: "NONE", + ClassANY: "ANY", +} + +// OpcodeToString maps Opcodes to strings. +var OpcodeToString = map[int]string{ + OpcodeQuery: "QUERY", + OpcodeIQuery: "IQUERY", + OpcodeStatus: "STATUS", + OpcodeNotify: "NOTIFY", + OpcodeUpdate: "UPDATE", +} + +// RcodeToString maps Rcodes to strings. +var RcodeToString = map[int]string{ + RcodeSuccess: "NOERROR", + RcodeFormatError: "FORMERR", + RcodeServerFailure: "SERVFAIL", + RcodeNameError: "NXDOMAIN", + RcodeNotImplemented: "NOTIMP", + RcodeRefused: "REFUSED", + RcodeYXDomain: "YXDOMAIN", // See RFC 2136 + RcodeYXRrset: "YXRRSET", + RcodeNXRrset: "NXRRSET", + RcodeNotAuth: "NOTAUTH", + RcodeNotZone: "NOTZONE", + RcodeBadSig: "BADSIG", // Also known as RcodeBadVers, see RFC 6891 + // RcodeBadVers: "BADVERS", + RcodeBadKey: "BADKEY", + RcodeBadTime: "BADTIME", + RcodeBadMode: "BADMODE", + RcodeBadName: "BADNAME", + RcodeBadAlg: "BADALG", + RcodeBadTrunc: "BADTRUNC", + RcodeBadCookie: "BADCOOKIE", +} + +// compressionMap is used to allow a more efficient compression map +// to be used for internal packDomainName calls without changing the +// signature or functionality of public API. +// +// In particular, map[string]uint16 uses 25% less per-entry memory +// than does map[string]int. +type compressionMap struct { + ext map[string]int // external callers + int map[string]uint16 // internal callers +} + +func (m compressionMap) valid() bool { + return m.int != nil || m.ext != nil +} + +func (m compressionMap) insert(s string, pos int) { + if m.ext != nil { + m.ext[s] = pos + } else { + m.int[s] = uint16(pos) + } +} + +func (m compressionMap) find(s string) (int, bool) { + if m.ext != nil { + pos, ok := m.ext[s] + return pos, ok + } + + pos, ok := m.int[s] + return int(pos), ok +} + +// Domain names are a sequence of counted strings +// split at the dots. They end with a zero-length string. + +// PackDomainName packs a domain name s into msg[off:]. +// If compression is wanted compress must be true and the compression +// map needs to hold a mapping between domain names and offsets +// pointing into msg. +func PackDomainName(s string, msg []byte, off int, compression map[string]int, compress bool) (off1 int, err error) { + return packDomainName(s, msg, off, compressionMap{ext: compression}, compress) +} + +func packDomainName(s string, msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + // XXX: A logical copy of this function exists in IsDomainName and + // should be kept in sync with this function. + + ls := len(s) + if ls == 0 { // Ok, for instance when dealing with update RR without any rdata. + return off, nil + } + + // If not fully qualified, error out. + if !IsFqdn(s) { + return len(msg), ErrFqdn + } + + // Each dot ends a segment of the name. + // We trade each dot byte for a length byte. + // Except for escaped dots (\.), which are normal dots. + // There is also a trailing zero. + + // Compression + pointer := -1 + + // Emit sequence of counted strings, chopping at dots. + var ( + begin int + compBegin int + compOff int + bs []byte + wasDot bool + ) +loop: + for i := 0; i < ls; i++ { + var c byte + if bs == nil { + c = s[i] + } else { + c = bs[i] + } + + switch c { + case '\\': + if off+1 > len(msg) { + return len(msg), ErrBuf + } + + if bs == nil { + bs = []byte(s) + } + + // check for \DDD + if i+3 < ls && isDigit(bs[i+1]) && isDigit(bs[i+2]) && isDigit(bs[i+3]) { + bs[i] = dddToByte(bs[i+1:]) + copy(bs[i+1:ls-3], bs[i+4:]) + ls -= 3 + compOff += 3 + } else { + copy(bs[i:ls-1], bs[i+1:]) + ls-- + compOff++ + } + + wasDot = false + case '.': + if wasDot { + // two dots back to back is not legal + return len(msg), ErrRdata + } + wasDot = true + + labelLen := i - begin + if labelLen >= 1<<6 { // top two bits of length must be clear + return len(msg), ErrRdata + } + + // off can already (we're in a loop) be bigger than len(msg) + // this happens when a name isn't fully qualified + if off+1+labelLen > len(msg) { + return len(msg), ErrBuf + } + + // Don't try to compress '.' + // We should only compress when compress is true, but we should also still pick + // up names that can be used for *future* compression(s). + if compression.valid() && !isRootLabel(s, bs, begin, ls) { + if p, ok := compression.find(s[compBegin:]); ok { + // The first hit is the longest matching dname + // keep the pointer offset we get back and store + // the offset of the current name, because that's + // where we need to insert the pointer later + + // If compress is true, we're allowed to compress this dname + if compress { + pointer = p // Where to point to + break loop + } + } else if off < maxCompressionOffset { + // Only offsets smaller than maxCompressionOffset can be used. + compression.insert(s[compBegin:], off) + } + } + + // The following is covered by the length check above. + msg[off] = byte(labelLen) + + if bs == nil { + copy(msg[off+1:], s[begin:i]) + } else { + copy(msg[off+1:], bs[begin:i]) + } + off += 1 + labelLen + + begin = i + 1 + compBegin = begin + compOff + default: + wasDot = false + } + } + + // Root label is special + if isRootLabel(s, bs, 0, ls) { + return off, nil + } + + // If we did compression and we find something add the pointer here + if pointer != -1 { + // We have two bytes (14 bits) to put the pointer in + binary.BigEndian.PutUint16(msg[off:], uint16(pointer^0xC000)) + return off + 2, nil + } + + if off < len(msg) { + msg[off] = 0 + } + + return off + 1, nil +} + +// isRootLabel returns whether s or bs, from off to end, is the root +// label ".". +// +// If bs is nil, s will be checked, otherwise bs will be checked. +func isRootLabel(s string, bs []byte, off, end int) bool { + if bs == nil { + return s[off:end] == "." + } + + return end-off == 1 && bs[off] == '.' +} + +// Unpack a domain name. +// In addition to the simple sequences of counted strings above, +// domain names are allowed to refer to strings elsewhere in the +// packet, to avoid repeating common suffixes when returning +// many entries in a single domain. The pointers are marked +// by a length byte with the top two bits set. Ignoring those +// two bits, that byte and the next give a 14 bit offset from msg[0] +// where we should pick up the trail. +// Note that if we jump elsewhere in the packet, +// we return off1 == the offset after the first pointer we found, +// which is where the next record will start. +// In theory, the pointers are only allowed to jump backward. +// We let them jump anywhere and stop jumping after a while. + +// UnpackDomainName unpacks a domain name into a string. It returns +// the name, the new offset into msg and any error that occurred. +// +// When an error is encountered, the unpacked name will be discarded +// and len(msg) will be returned as the offset. +func UnpackDomainName(msg []byte, off int) (string, int, error) { + s := make([]byte, 0, maxDomainNamePresentationLength) + off1 := 0 + lenmsg := len(msg) + budget := maxDomainNameWireOctets + ptr := 0 // number of pointers followed +Loop: + for { + if off >= lenmsg { + return "", lenmsg, ErrBuf + } + c := int(msg[off]) + off++ + switch c & 0xC0 { + case 0x00: + if c == 0x00 { + // end of name + break Loop + } + // literal string + if off+c > lenmsg { + return "", lenmsg, ErrBuf + } + budget -= c + 1 // +1 for the label separator + if budget <= 0 { + return "", lenmsg, ErrLongDomain + } + for _, b := range msg[off : off+c] { + switch b { + case '.', '(', ')', ';', ' ', '@': + fallthrough + case '"', '\\': + s = append(s, '\\', b) + default: + if b < ' ' || b > '~' { // unprintable, use \DDD + s = append(s, escapeByte(b)...) + } else { + s = append(s, b) + } + } + } + s = append(s, '.') + off += c + case 0xC0: + // pointer to somewhere else in msg. + // remember location after first ptr, + // since that's how many bytes we consumed. + // also, don't follow too many pointers -- + // maybe there's a loop. + if off >= lenmsg { + return "", lenmsg, ErrBuf + } + c1 := msg[off] + off++ + if ptr == 0 { + off1 = off + } + if ptr++; ptr > maxCompressionPointers { + return "", lenmsg, &Error{err: "too many compression pointers"} + } + // pointer should guarantee that it advances and points forwards at least + // but the condition on previous three lines guarantees that it's + // at least loop-free + off = (c^0xC0)<<8 | int(c1) + default: + // 0x80 and 0x40 are reserved + return "", lenmsg, ErrRdata + } + } + if ptr == 0 { + off1 = off + } + if len(s) == 0 { + return ".", off1, nil + } + return string(s), off1, nil +} + +func packTxt(txt []string, msg []byte, offset int, tmp []byte) (int, error) { + if len(txt) == 0 { + if offset >= len(msg) { + return offset, ErrBuf + } + msg[offset] = 0 + return offset, nil + } + var err error + for _, s := range txt { + if len(s) > len(tmp) { + return offset, ErrBuf + } + offset, err = packTxtString(s, msg, offset, tmp) + if err != nil { + return offset, err + } + } + return offset, nil +} + +func packTxtString(s string, msg []byte, offset int, tmp []byte) (int, error) { + lenByteOffset := offset + if offset >= len(msg) || len(s) > len(tmp) { + return offset, ErrBuf + } + offset++ + bs := tmp[:len(s)] + copy(bs, s) + for i := 0; i < len(bs); i++ { + if len(msg) <= offset { + return offset, ErrBuf + } + if bs[i] == '\\' { + i++ + if i == len(bs) { + break + } + // check for \DDD + if i+2 < len(bs) && isDigit(bs[i]) && isDigit(bs[i+1]) && isDigit(bs[i+2]) { + msg[offset] = dddToByte(bs[i:]) + i += 2 + } else { + msg[offset] = bs[i] + } + } else { + msg[offset] = bs[i] + } + offset++ + } + l := offset - lenByteOffset - 1 + if l > 255 { + return offset, &Error{err: "string exceeded 255 bytes in txt"} + } + msg[lenByteOffset] = byte(l) + return offset, nil +} + +func packOctetString(s string, msg []byte, offset int, tmp []byte) (int, error) { + if offset >= len(msg) || len(s) > len(tmp) { + return offset, ErrBuf + } + bs := tmp[:len(s)] + copy(bs, s) + for i := 0; i < len(bs); i++ { + if len(msg) <= offset { + return offset, ErrBuf + } + if bs[i] == '\\' { + i++ + if i == len(bs) { + break + } + // check for \DDD + if i+2 < len(bs) && isDigit(bs[i]) && isDigit(bs[i+1]) && isDigit(bs[i+2]) { + msg[offset] = dddToByte(bs[i:]) + i += 2 + } else { + msg[offset] = bs[i] + } + } else { + msg[offset] = bs[i] + } + offset++ + } + return offset, nil +} + +func unpackTxt(msg []byte, off0 int) (ss []string, off int, err error) { + off = off0 + var s string + for off < len(msg) && err == nil { + s, off, err = unpackString(msg, off) + if err == nil { + ss = append(ss, s) + } + } + return +} + +// Helpers for dealing with escaped bytes +func isDigit(b byte) bool { return b >= '0' && b <= '9' } + +func dddToByte(s []byte) byte { + _ = s[2] // bounds check hint to compiler; see golang.org/issue/14808 + return byte((s[0]-'0')*100 + (s[1]-'0')*10 + (s[2] - '0')) +} + +func dddStringToByte(s string) byte { + _ = s[2] // bounds check hint to compiler; see golang.org/issue/14808 + return byte((s[0]-'0')*100 + (s[1]-'0')*10 + (s[2] - '0')) +} + +// Helper function for packing and unpacking +func intToBytes(i *big.Int, length int) []byte { + buf := i.Bytes() + if len(buf) < length { + b := make([]byte, length) + copy(b[length-len(buf):], buf) + return b + } + return buf +} + +// PackRR packs a resource record rr into msg[off:]. +// See PackDomainName for documentation about the compression. +func PackRR(rr RR, msg []byte, off int, compression map[string]int, compress bool) (off1 int, err error) { + headerEnd, off1, err := packRR(rr, msg, off, compressionMap{ext: compression}, compress) + if err == nil { + // packRR no longer sets the Rdlength field on the rr, but + // callers might be expecting it so we set it here. + rr.Header().Rdlength = uint16(off1 - headerEnd) + } + return off1, err +} + +func packRR(rr RR, msg []byte, off int, compression compressionMap, compress bool) (headerEnd int, off1 int, err error) { + if rr == nil { + return len(msg), len(msg), &Error{err: "nil rr"} + } + + headerEnd, err = rr.Header().packHeader(msg, off, compression, compress) + if err != nil { + return headerEnd, len(msg), err + } + + off1, err = rr.pack(msg, headerEnd, compression, compress) + if err != nil { + return headerEnd, len(msg), err + } + + rdlength := off1 - headerEnd + if int(uint16(rdlength)) != rdlength { // overflow + return headerEnd, len(msg), ErrRdata + } + + // The RDLENGTH field is the last field in the header and we set it here. + binary.BigEndian.PutUint16(msg[headerEnd-2:], uint16(rdlength)) + return headerEnd, off1, nil +} + +// UnpackRR unpacks msg[off:] into an RR. +func UnpackRR(msg []byte, off int) (rr RR, off1 int, err error) { + h, off, msg, err := unpackHeader(msg, off) + if err != nil { + return nil, len(msg), err + } + + return UnpackRRWithHeader(h, msg, off) +} + +// UnpackRRWithHeader unpacks the record type specific payload given an existing +// RR_Header. +func UnpackRRWithHeader(h RR_Header, msg []byte, off int) (rr RR, off1 int, err error) { + if newFn, ok := TypeToRR[h.Rrtype]; ok { + rr = newFn() + *rr.Header() = h + } else { + rr = &RFC3597{Hdr: h} + } + + if noRdata(h) { + return rr, off, nil + } + + end := off + int(h.Rdlength) + + off, err = rr.unpack(msg, off) + if err != nil { + return nil, end, err + } + if off != end { + return &h, end, &Error{err: "bad rdlength"} + } + + return rr, off, nil +} + +// unpackRRslice unpacks msg[off:] into an []RR. +// If we cannot unpack the whole array, then it will return nil +func unpackRRslice(l int, msg []byte, off int) (dst1 []RR, off1 int, err error) { + var r RR + // Don't pre-allocate, l may be under attacker control + var dst []RR + for i := 0; i < l; i++ { + off1 := off + r, off, err = UnpackRR(msg, off) + if err != nil { + off = len(msg) + break + } + // If offset does not increase anymore, l is a lie + if off1 == off { + l = i + break + } + dst = append(dst, r) + } + if err != nil && off == len(msg) { + dst = nil + } + return dst, off, err +} + +// Convert a MsgHdr to a string, with dig-like headers: +// +//;; opcode: QUERY, status: NOERROR, id: 48404 +// +//;; flags: qr aa rd ra; +func (h *MsgHdr) String() string { + if h == nil { + return " MsgHdr" + } + + s := ";; opcode: " + OpcodeToString[h.Opcode] + s += ", status: " + RcodeToString[h.Rcode] + s += ", id: " + strconv.Itoa(int(h.Id)) + "\n" + + s += ";; flags:" + if h.Response { + s += " qr" + } + if h.Authoritative { + s += " aa" + } + if h.Truncated { + s += " tc" + } + if h.RecursionDesired { + s += " rd" + } + if h.RecursionAvailable { + s += " ra" + } + if h.Zero { // Hmm + s += " z" + } + if h.AuthenticatedData { + s += " ad" + } + if h.CheckingDisabled { + s += " cd" + } + + s += ";" + return s +} + +// Pack packs a Msg: it is converted to to wire format. +// If the dns.Compress is true the message will be in compressed wire format. +func (dns *Msg) Pack() (msg []byte, err error) { + return dns.PackBuffer(nil) +} + +// PackBuffer packs a Msg, using the given buffer buf. If buf is too small a new buffer is allocated. +func (dns *Msg) PackBuffer(buf []byte) (msg []byte, err error) { + // If this message can't be compressed, avoid filling the + // compression map and creating garbage. + if dns.Compress && dns.isCompressible() { + compression := make(map[string]uint16) // Compression pointer mappings. + return dns.packBufferWithCompressionMap(buf, compressionMap{int: compression}, true) + } + + return dns.packBufferWithCompressionMap(buf, compressionMap{}, false) +} + +// packBufferWithCompressionMap packs a Msg, using the given buffer buf. +func (dns *Msg) packBufferWithCompressionMap(buf []byte, compression compressionMap, compress bool) (msg []byte, err error) { + if dns.Rcode < 0 || dns.Rcode > 0xFFF { + return nil, ErrRcode + } + + // Set extended rcode unconditionally if we have an opt, this will allow + // reseting the extended rcode bits if they need to. + if opt := dns.IsEdns0(); opt != nil { + opt.SetExtendedRcode(uint16(dns.Rcode)) + } else if dns.Rcode > 0xF { + // If Rcode is an extended one and opt is nil, error out. + return nil, ErrExtendedRcode + } + + // Convert convenient Msg into wire-like Header. + var dh Header + dh.Id = dns.Id + dh.Bits = uint16(dns.Opcode)<<11 | uint16(dns.Rcode&0xF) + if dns.Response { + dh.Bits |= _QR + } + if dns.Authoritative { + dh.Bits |= _AA + } + if dns.Truncated { + dh.Bits |= _TC + } + if dns.RecursionDesired { + dh.Bits |= _RD + } + if dns.RecursionAvailable { + dh.Bits |= _RA + } + if dns.Zero { + dh.Bits |= _Z + } + if dns.AuthenticatedData { + dh.Bits |= _AD + } + if dns.CheckingDisabled { + dh.Bits |= _CD + } + + dh.Qdcount = uint16(len(dns.Question)) + dh.Ancount = uint16(len(dns.Answer)) + dh.Nscount = uint16(len(dns.Ns)) + dh.Arcount = uint16(len(dns.Extra)) + + // We need the uncompressed length here, because we first pack it and then compress it. + msg = buf + uncompressedLen := msgLenWithCompressionMap(dns, nil) + if packLen := uncompressedLen + 1; len(msg) < packLen { + msg = make([]byte, packLen) + } + + // Pack it in: header and then the pieces. + off := 0 + off, err = dh.pack(msg, off, compression, compress) + if err != nil { + return nil, err + } + for _, r := range dns.Question { + off, err = r.pack(msg, off, compression, compress) + if err != nil { + return nil, err + } + } + for _, r := range dns.Answer { + _, off, err = packRR(r, msg, off, compression, compress) + if err != nil { + return nil, err + } + } + for _, r := range dns.Ns { + _, off, err = packRR(r, msg, off, compression, compress) + if err != nil { + return nil, err + } + } + for _, r := range dns.Extra { + _, off, err = packRR(r, msg, off, compression, compress) + if err != nil { + return nil, err + } + } + return msg[:off], nil +} + +func (dns *Msg) unpack(dh Header, msg []byte, off int) (err error) { + // If we are at the end of the message we should return *just* the + // header. This can still be useful to the caller. 9.9.9.9 sends these + // when responding with REFUSED for instance. + if off == len(msg) { + // reset sections before returning + dns.Question, dns.Answer, dns.Ns, dns.Extra = nil, nil, nil, nil + return nil + } + + // Qdcount, Ancount, Nscount, Arcount can't be trusted, as they are + // attacker controlled. This means we can't use them to pre-allocate + // slices. + dns.Question = nil + for i := 0; i < int(dh.Qdcount); i++ { + off1 := off + var q Question + q, off, err = unpackQuestion(msg, off) + if err != nil { + return err + } + if off1 == off { // Offset does not increase anymore, dh.Qdcount is a lie! + dh.Qdcount = uint16(i) + break + } + dns.Question = append(dns.Question, q) + } + + dns.Answer, off, err = unpackRRslice(int(dh.Ancount), msg, off) + // The header counts might have been wrong so we need to update it + dh.Ancount = uint16(len(dns.Answer)) + if err == nil { + dns.Ns, off, err = unpackRRslice(int(dh.Nscount), msg, off) + } + // The header counts might have been wrong so we need to update it + dh.Nscount = uint16(len(dns.Ns)) + if err == nil { + dns.Extra, off, err = unpackRRslice(int(dh.Arcount), msg, off) + } + // The header counts might have been wrong so we need to update it + dh.Arcount = uint16(len(dns.Extra)) + + // Set extended Rcode + if opt := dns.IsEdns0(); opt != nil { + dns.Rcode |= opt.ExtendedRcode() + } + + if off != len(msg) { + // TODO(miek) make this an error? + // use PackOpt to let people tell how detailed the error reporting should be? + // println("dns: extra bytes in dns packet", off, "<", len(msg)) + } + return err + +} + +// Unpack unpacks a binary message to a Msg structure. +func (dns *Msg) Unpack(msg []byte) (err error) { + dh, off, err := unpackMsgHdr(msg, 0) + if err != nil { + return err + } + + dns.setHdr(dh) + return dns.unpack(dh, msg, off) +} + +// Convert a complete message to a string with dig-like output. +func (dns *Msg) String() string { + if dns == nil { + return " MsgHdr" + } + s := dns.MsgHdr.String() + " " + s += "QUERY: " + strconv.Itoa(len(dns.Question)) + ", " + s += "ANSWER: " + strconv.Itoa(len(dns.Answer)) + ", " + s += "AUTHORITY: " + strconv.Itoa(len(dns.Ns)) + ", " + s += "ADDITIONAL: " + strconv.Itoa(len(dns.Extra)) + "\n" + if len(dns.Question) > 0 { + s += "\n;; QUESTION SECTION:\n" + for _, r := range dns.Question { + s += r.String() + "\n" + } + } + if len(dns.Answer) > 0 { + s += "\n;; ANSWER SECTION:\n" + for _, r := range dns.Answer { + if r != nil { + s += r.String() + "\n" + } + } + } + if len(dns.Ns) > 0 { + s += "\n;; AUTHORITY SECTION:\n" + for _, r := range dns.Ns { + if r != nil { + s += r.String() + "\n" + } + } + } + if len(dns.Extra) > 0 { + s += "\n;; ADDITIONAL SECTION:\n" + for _, r := range dns.Extra { + if r != nil { + s += r.String() + "\n" + } + } + } + return s +} + +// isCompressible returns whether the msg may be compressible. +func (dns *Msg) isCompressible() bool { + // If we only have one question, there is nothing we can ever compress. + return len(dns.Question) > 1 || len(dns.Answer) > 0 || + len(dns.Ns) > 0 || len(dns.Extra) > 0 +} + +// Len returns the message length when in (un)compressed wire format. +// If dns.Compress is true compression it is taken into account. Len() +// is provided to be a faster way to get the size of the resulting packet, +// than packing it, measuring the size and discarding the buffer. +func (dns *Msg) Len() int { + // If this message can't be compressed, avoid filling the + // compression map and creating garbage. + if dns.Compress && dns.isCompressible() { + compression := make(map[string]struct{}) + return msgLenWithCompressionMap(dns, compression) + } + + return msgLenWithCompressionMap(dns, nil) +} + +func msgLenWithCompressionMap(dns *Msg, compression map[string]struct{}) int { + l := headerSize + + for _, r := range dns.Question { + l += r.len(l, compression) + } + for _, r := range dns.Answer { + if r != nil { + l += r.len(l, compression) + } + } + for _, r := range dns.Ns { + if r != nil { + l += r.len(l, compression) + } + } + for _, r := range dns.Extra { + if r != nil { + l += r.len(l, compression) + } + } + + return l +} + +func domainNameLen(s string, off int, compression map[string]struct{}, compress bool) int { + if s == "" || s == "." { + return 1 + } + + escaped := strings.Contains(s, "\\") + + if compression != nil && (compress || off < maxCompressionOffset) { + // compressionLenSearch will insert the entry into the compression + // map if it doesn't contain it. + if l, ok := compressionLenSearch(compression, s, off); ok && compress { + if escaped { + return escapedNameLen(s[:l]) + 2 + } + + return l + 2 + } + } + + if escaped { + return escapedNameLen(s) + 1 + } + + return len(s) + 1 +} + +func escapedNameLen(s string) int { + nameLen := len(s) + for i := 0; i < len(s); i++ { + if s[i] != '\\' { + continue + } + + if i+3 < len(s) && isDigit(s[i+1]) && isDigit(s[i+2]) && isDigit(s[i+3]) { + nameLen -= 3 + i += 3 + } else { + nameLen-- + i++ + } + } + + return nameLen +} + +func compressionLenSearch(c map[string]struct{}, s string, msgOff int) (int, bool) { + for off, end := 0, false; !end; off, end = NextLabel(s, off) { + if _, ok := c[s[off:]]; ok { + return off, true + } + + if msgOff+off < maxCompressionOffset { + c[s[off:]] = struct{}{} + } + } + + return 0, false +} + +// Copy returns a new RR which is a deep-copy of r. +func Copy(r RR) RR { return r.copy() } + +// Len returns the length (in octets) of the uncompressed RR in wire format. +func Len(r RR) int { return r.len(0, nil) } + +// Copy returns a new *Msg which is a deep-copy of dns. +func (dns *Msg) Copy() *Msg { return dns.CopyTo(new(Msg)) } + +// CopyTo copies the contents to the provided message using a deep-copy and returns the copy. +func (dns *Msg) CopyTo(r1 *Msg) *Msg { + r1.MsgHdr = dns.MsgHdr + r1.Compress = dns.Compress + + if len(dns.Question) > 0 { + r1.Question = make([]Question, len(dns.Question)) + copy(r1.Question, dns.Question) // TODO(miek): Question is an immutable value, ok to do a shallow-copy + } + + rrArr := make([]RR, len(dns.Answer)+len(dns.Ns)+len(dns.Extra)) + r1.Answer, rrArr = rrArr[:0:len(dns.Answer)], rrArr[len(dns.Answer):] + r1.Ns, rrArr = rrArr[:0:len(dns.Ns)], rrArr[len(dns.Ns):] + r1.Extra = rrArr[:0:len(dns.Extra)] + + for _, r := range dns.Answer { + r1.Answer = append(r1.Answer, r.copy()) + } + + for _, r := range dns.Ns { + r1.Ns = append(r1.Ns, r.copy()) + } + + for _, r := range dns.Extra { + r1.Extra = append(r1.Extra, r.copy()) + } + + return r1 +} + +func (q *Question) pack(msg []byte, off int, compression compressionMap, compress bool) (int, error) { + off, err := packDomainName(q.Name, msg, off, compression, compress) + if err != nil { + return off, err + } + off, err = packUint16(q.Qtype, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(q.Qclass, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func unpackQuestion(msg []byte, off int) (Question, int, error) { + var ( + q Question + err error + ) + q.Name, off, err = UnpackDomainName(msg, off) + if err != nil { + return q, off, err + } + if off == len(msg) { + return q, off, nil + } + q.Qtype, off, err = unpackUint16(msg, off) + if err != nil { + return q, off, err + } + if off == len(msg) { + return q, off, nil + } + q.Qclass, off, err = unpackUint16(msg, off) + if off == len(msg) { + return q, off, nil + } + return q, off, err +} + +func (dh *Header) pack(msg []byte, off int, compression compressionMap, compress bool) (int, error) { + off, err := packUint16(dh.Id, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(dh.Bits, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(dh.Qdcount, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(dh.Ancount, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(dh.Nscount, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(dh.Arcount, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func unpackMsgHdr(msg []byte, off int) (Header, int, error) { + var ( + dh Header + err error + ) + dh.Id, off, err = unpackUint16(msg, off) + if err != nil { + return dh, off, err + } + dh.Bits, off, err = unpackUint16(msg, off) + if err != nil { + return dh, off, err + } + dh.Qdcount, off, err = unpackUint16(msg, off) + if err != nil { + return dh, off, err + } + dh.Ancount, off, err = unpackUint16(msg, off) + if err != nil { + return dh, off, err + } + dh.Nscount, off, err = unpackUint16(msg, off) + if err != nil { + return dh, off, err + } + dh.Arcount, off, err = unpackUint16(msg, off) + if err != nil { + return dh, off, err + } + return dh, off, nil +} + +// setHdr set the header in the dns using the binary data in dh. +func (dns *Msg) setHdr(dh Header) { + dns.Id = dh.Id + dns.Response = dh.Bits&_QR != 0 + dns.Opcode = int(dh.Bits>>11) & 0xF + dns.Authoritative = dh.Bits&_AA != 0 + dns.Truncated = dh.Bits&_TC != 0 + dns.RecursionDesired = dh.Bits&_RD != 0 + dns.RecursionAvailable = dh.Bits&_RA != 0 + dns.Zero = dh.Bits&_Z != 0 // _Z covers the zero bit, which should be zero; not sure why we set it to the opposite. + dns.AuthenticatedData = dh.Bits&_AD != 0 + dns.CheckingDisabled = dh.Bits&_CD != 0 + dns.Rcode = int(dh.Bits & 0xF) +} diff --git a/vendor/github.com/miekg/dns/msg_helpers.go b/vendor/github.com/miekg/dns/msg_helpers.go new file mode 100644 index 00000000..98fadc31 --- /dev/null +++ b/vendor/github.com/miekg/dns/msg_helpers.go @@ -0,0 +1,810 @@ +package dns + +import ( + "encoding/base32" + "encoding/base64" + "encoding/binary" + "encoding/hex" + "net" + "strings" +) + +// helper functions called from the generated zmsg.go + +// These function are named after the tag to help pack/unpack, if there is no tag it is the name +// of the type they pack/unpack (string, int, etc). We prefix all with unpackData or packData, so packDataA or +// packDataDomainName. + +func unpackDataA(msg []byte, off int) (net.IP, int, error) { + if off+net.IPv4len > len(msg) { + return nil, len(msg), &Error{err: "overflow unpacking a"} + } + a := append(make(net.IP, 0, net.IPv4len), msg[off:off+net.IPv4len]...) + off += net.IPv4len + return a, off, nil +} + +func packDataA(a net.IP, msg []byte, off int) (int, error) { + switch len(a) { + case net.IPv4len, net.IPv6len: + // It must be a slice of 4, even if it is 16, we encode only the first 4 + if off+net.IPv4len > len(msg) { + return len(msg), &Error{err: "overflow packing a"} + } + + copy(msg[off:], a.To4()) + off += net.IPv4len + case 0: + // Allowed, for dynamic updates. + default: + return len(msg), &Error{err: "overflow packing a"} + } + return off, nil +} + +func unpackDataAAAA(msg []byte, off int) (net.IP, int, error) { + if off+net.IPv6len > len(msg) { + return nil, len(msg), &Error{err: "overflow unpacking aaaa"} + } + aaaa := append(make(net.IP, 0, net.IPv6len), msg[off:off+net.IPv6len]...) + off += net.IPv6len + return aaaa, off, nil +} + +func packDataAAAA(aaaa net.IP, msg []byte, off int) (int, error) { + switch len(aaaa) { + case net.IPv6len: + if off+net.IPv6len > len(msg) { + return len(msg), &Error{err: "overflow packing aaaa"} + } + + copy(msg[off:], aaaa) + off += net.IPv6len + case 0: + // Allowed, dynamic updates. + default: + return len(msg), &Error{err: "overflow packing aaaa"} + } + return off, nil +} + +// unpackHeader unpacks an RR header, returning the offset to the end of the header and a +// re-sliced msg according to the expected length of the RR. +func unpackHeader(msg []byte, off int) (rr RR_Header, off1 int, truncmsg []byte, err error) { + hdr := RR_Header{} + if off == len(msg) { + return hdr, off, msg, nil + } + + hdr.Name, off, err = UnpackDomainName(msg, off) + if err != nil { + return hdr, len(msg), msg, err + } + hdr.Rrtype, off, err = unpackUint16(msg, off) + if err != nil { + return hdr, len(msg), msg, err + } + hdr.Class, off, err = unpackUint16(msg, off) + if err != nil { + return hdr, len(msg), msg, err + } + hdr.Ttl, off, err = unpackUint32(msg, off) + if err != nil { + return hdr, len(msg), msg, err + } + hdr.Rdlength, off, err = unpackUint16(msg, off) + if err != nil { + return hdr, len(msg), msg, err + } + msg, err = truncateMsgFromRdlength(msg, off, hdr.Rdlength) + return hdr, off, msg, err +} + +// packHeader packs an RR header, returning the offset to the end of the header. +// See PackDomainName for documentation about the compression. +func (hdr RR_Header) packHeader(msg []byte, off int, compression compressionMap, compress bool) (int, error) { + if off == len(msg) { + return off, nil + } + + off, err := packDomainName(hdr.Name, msg, off, compression, compress) + if err != nil { + return len(msg), err + } + off, err = packUint16(hdr.Rrtype, msg, off) + if err != nil { + return len(msg), err + } + off, err = packUint16(hdr.Class, msg, off) + if err != nil { + return len(msg), err + } + off, err = packUint32(hdr.Ttl, msg, off) + if err != nil { + return len(msg), err + } + off, err = packUint16(0, msg, off) // The RDLENGTH field will be set later in packRR. + if err != nil { + return len(msg), err + } + return off, nil +} + +// helper helper functions. + +// truncateMsgFromRdLength truncates msg to match the expected length of the RR. +// Returns an error if msg is smaller than the expected size. +func truncateMsgFromRdlength(msg []byte, off int, rdlength uint16) (truncmsg []byte, err error) { + lenrd := off + int(rdlength) + if lenrd > len(msg) { + return msg, &Error{err: "overflowing header size"} + } + return msg[:lenrd], nil +} + +var base32HexNoPadEncoding = base32.HexEncoding.WithPadding(base32.NoPadding) + +func fromBase32(s []byte) (buf []byte, err error) { + for i, b := range s { + if b >= 'a' && b <= 'z' { + s[i] = b - 32 + } + } + buflen := base32HexNoPadEncoding.DecodedLen(len(s)) + buf = make([]byte, buflen) + n, err := base32HexNoPadEncoding.Decode(buf, s) + buf = buf[:n] + return +} + +func toBase32(b []byte) string { + return base32HexNoPadEncoding.EncodeToString(b) +} + +func fromBase64(s []byte) (buf []byte, err error) { + buflen := base64.StdEncoding.DecodedLen(len(s)) + buf = make([]byte, buflen) + n, err := base64.StdEncoding.Decode(buf, s) + buf = buf[:n] + return +} + +func toBase64(b []byte) string { return base64.StdEncoding.EncodeToString(b) } + +// dynamicUpdate returns true if the Rdlength is zero. +func noRdata(h RR_Header) bool { return h.Rdlength == 0 } + +func unpackUint8(msg []byte, off int) (i uint8, off1 int, err error) { + if off+1 > len(msg) { + return 0, len(msg), &Error{err: "overflow unpacking uint8"} + } + return msg[off], off + 1, nil +} + +func packUint8(i uint8, msg []byte, off int) (off1 int, err error) { + if off+1 > len(msg) { + return len(msg), &Error{err: "overflow packing uint8"} + } + msg[off] = i + return off + 1, nil +} + +func unpackUint16(msg []byte, off int) (i uint16, off1 int, err error) { + if off+2 > len(msg) { + return 0, len(msg), &Error{err: "overflow unpacking uint16"} + } + return binary.BigEndian.Uint16(msg[off:]), off + 2, nil +} + +func packUint16(i uint16, msg []byte, off int) (off1 int, err error) { + if off+2 > len(msg) { + return len(msg), &Error{err: "overflow packing uint16"} + } + binary.BigEndian.PutUint16(msg[off:], i) + return off + 2, nil +} + +func unpackUint32(msg []byte, off int) (i uint32, off1 int, err error) { + if off+4 > len(msg) { + return 0, len(msg), &Error{err: "overflow unpacking uint32"} + } + return binary.BigEndian.Uint32(msg[off:]), off + 4, nil +} + +func packUint32(i uint32, msg []byte, off int) (off1 int, err error) { + if off+4 > len(msg) { + return len(msg), &Error{err: "overflow packing uint32"} + } + binary.BigEndian.PutUint32(msg[off:], i) + return off + 4, nil +} + +func unpackUint48(msg []byte, off int) (i uint64, off1 int, err error) { + if off+6 > len(msg) { + return 0, len(msg), &Error{err: "overflow unpacking uint64 as uint48"} + } + // Used in TSIG where the last 48 bits are occupied, so for now, assume a uint48 (6 bytes) + i = uint64(msg[off])<<40 | uint64(msg[off+1])<<32 | uint64(msg[off+2])<<24 | uint64(msg[off+3])<<16 | + uint64(msg[off+4])<<8 | uint64(msg[off+5]) + off += 6 + return i, off, nil +} + +func packUint48(i uint64, msg []byte, off int) (off1 int, err error) { + if off+6 > len(msg) { + return len(msg), &Error{err: "overflow packing uint64 as uint48"} + } + msg[off] = byte(i >> 40) + msg[off+1] = byte(i >> 32) + msg[off+2] = byte(i >> 24) + msg[off+3] = byte(i >> 16) + msg[off+4] = byte(i >> 8) + msg[off+5] = byte(i) + off += 6 + return off, nil +} + +func unpackUint64(msg []byte, off int) (i uint64, off1 int, err error) { + if off+8 > len(msg) { + return 0, len(msg), &Error{err: "overflow unpacking uint64"} + } + return binary.BigEndian.Uint64(msg[off:]), off + 8, nil +} + +func packUint64(i uint64, msg []byte, off int) (off1 int, err error) { + if off+8 > len(msg) { + return len(msg), &Error{err: "overflow packing uint64"} + } + binary.BigEndian.PutUint64(msg[off:], i) + off += 8 + return off, nil +} + +func unpackString(msg []byte, off int) (string, int, error) { + if off+1 > len(msg) { + return "", off, &Error{err: "overflow unpacking txt"} + } + l := int(msg[off]) + off++ + if off+l > len(msg) { + return "", off, &Error{err: "overflow unpacking txt"} + } + var s strings.Builder + consumed := 0 + for i, b := range msg[off : off+l] { + switch { + case b == '"' || b == '\\': + if consumed == 0 { + s.Grow(l * 2) + } + s.Write(msg[off+consumed : off+i]) + s.WriteByte('\\') + s.WriteByte(b) + consumed = i + 1 + case b < ' ' || b > '~': // unprintable + if consumed == 0 { + s.Grow(l * 2) + } + s.Write(msg[off+consumed : off+i]) + s.WriteString(escapeByte(b)) + consumed = i + 1 + } + } + if consumed == 0 { // no escaping needed + return string(msg[off : off+l]), off + l, nil + } + s.Write(msg[off+consumed : off+l]) + return s.String(), off + l, nil +} + +func packString(s string, msg []byte, off int) (int, error) { + txtTmp := make([]byte, 256*4+1) + off, err := packTxtString(s, msg, off, txtTmp) + if err != nil { + return len(msg), err + } + return off, nil +} + +func unpackStringBase32(msg []byte, off, end int) (string, int, error) { + if end > len(msg) { + return "", len(msg), &Error{err: "overflow unpacking base32"} + } + s := toBase32(msg[off:end]) + return s, end, nil +} + +func packStringBase32(s string, msg []byte, off int) (int, error) { + b32, err := fromBase32([]byte(s)) + if err != nil { + return len(msg), err + } + if off+len(b32) > len(msg) { + return len(msg), &Error{err: "overflow packing base32"} + } + copy(msg[off:off+len(b32)], b32) + off += len(b32) + return off, nil +} + +func unpackStringBase64(msg []byte, off, end int) (string, int, error) { + // Rest of the RR is base64 encoded value, so we don't need an explicit length + // to be set. Thus far all RR's that have base64 encoded fields have those as their + // last one. What we do need is the end of the RR! + if end > len(msg) { + return "", len(msg), &Error{err: "overflow unpacking base64"} + } + s := toBase64(msg[off:end]) + return s, end, nil +} + +func packStringBase64(s string, msg []byte, off int) (int, error) { + b64, err := fromBase64([]byte(s)) + if err != nil { + return len(msg), err + } + if off+len(b64) > len(msg) { + return len(msg), &Error{err: "overflow packing base64"} + } + copy(msg[off:off+len(b64)], b64) + off += len(b64) + return off, nil +} + +func unpackStringHex(msg []byte, off, end int) (string, int, error) { + // Rest of the RR is hex encoded value, so we don't need an explicit length + // to be set. NSEC and TSIG have hex fields with a length field. + // What we do need is the end of the RR! + if end > len(msg) { + return "", len(msg), &Error{err: "overflow unpacking hex"} + } + + s := hex.EncodeToString(msg[off:end]) + return s, end, nil +} + +func packStringHex(s string, msg []byte, off int) (int, error) { + h, err := hex.DecodeString(s) + if err != nil { + return len(msg), err + } + if off+len(h) > len(msg) { + return len(msg), &Error{err: "overflow packing hex"} + } + copy(msg[off:off+len(h)], h) + off += len(h) + return off, nil +} + +func unpackStringAny(msg []byte, off, end int) (string, int, error) { + if end > len(msg) { + return "", len(msg), &Error{err: "overflow unpacking anything"} + } + return string(msg[off:end]), end, nil +} + +func packStringAny(s string, msg []byte, off int) (int, error) { + if off+len(s) > len(msg) { + return len(msg), &Error{err: "overflow packing anything"} + } + copy(msg[off:off+len(s)], s) + off += len(s) + return off, nil +} + +func unpackStringTxt(msg []byte, off int) ([]string, int, error) { + txt, off, err := unpackTxt(msg, off) + if err != nil { + return nil, len(msg), err + } + return txt, off, nil +} + +func packStringTxt(s []string, msg []byte, off int) (int, error) { + txtTmp := make([]byte, 256*4+1) // If the whole string consists out of \DDD we need this many. + off, err := packTxt(s, msg, off, txtTmp) + if err != nil { + return len(msg), err + } + return off, nil +} + +func unpackDataOpt(msg []byte, off int) ([]EDNS0, int, error) { + var edns []EDNS0 +Option: + var code uint16 + if off+4 > len(msg) { + return nil, len(msg), &Error{err: "overflow unpacking opt"} + } + code = binary.BigEndian.Uint16(msg[off:]) + off += 2 + optlen := binary.BigEndian.Uint16(msg[off:]) + off += 2 + if off+int(optlen) > len(msg) { + return nil, len(msg), &Error{err: "overflow unpacking opt"} + } + switch code { + case EDNS0NSID: + e := new(EDNS0_NSID) + if err := e.unpack(msg[off : off+int(optlen)]); err != nil { + return nil, len(msg), err + } + edns = append(edns, e) + off += int(optlen) + case EDNS0SUBNET: + e := new(EDNS0_SUBNET) + if err := e.unpack(msg[off : off+int(optlen)]); err != nil { + return nil, len(msg), err + } + edns = append(edns, e) + off += int(optlen) + case EDNS0COOKIE: + e := new(EDNS0_COOKIE) + if err := e.unpack(msg[off : off+int(optlen)]); err != nil { + return nil, len(msg), err + } + edns = append(edns, e) + off += int(optlen) + case EDNS0EXPIRE: + e := new(EDNS0_EXPIRE) + if err := e.unpack(msg[off : off+int(optlen)]); err != nil { + return nil, len(msg), err + } + edns = append(edns, e) + off += int(optlen) + case EDNS0UL: + e := new(EDNS0_UL) + if err := e.unpack(msg[off : off+int(optlen)]); err != nil { + return nil, len(msg), err + } + edns = append(edns, e) + off += int(optlen) + case EDNS0LLQ: + e := new(EDNS0_LLQ) + if err := e.unpack(msg[off : off+int(optlen)]); err != nil { + return nil, len(msg), err + } + edns = append(edns, e) + off += int(optlen) + case EDNS0DAU: + e := new(EDNS0_DAU) + if err := e.unpack(msg[off : off+int(optlen)]); err != nil { + return nil, len(msg), err + } + edns = append(edns, e) + off += int(optlen) + case EDNS0DHU: + e := new(EDNS0_DHU) + if err := e.unpack(msg[off : off+int(optlen)]); err != nil { + return nil, len(msg), err + } + edns = append(edns, e) + off += int(optlen) + case EDNS0N3U: + e := new(EDNS0_N3U) + if err := e.unpack(msg[off : off+int(optlen)]); err != nil { + return nil, len(msg), err + } + edns = append(edns, e) + off += int(optlen) + case EDNS0PADDING: + e := new(EDNS0_PADDING) + if err := e.unpack(msg[off : off+int(optlen)]); err != nil { + return nil, len(msg), err + } + edns = append(edns, e) + off += int(optlen) + default: + e := new(EDNS0_LOCAL) + e.Code = code + if err := e.unpack(msg[off : off+int(optlen)]); err != nil { + return nil, len(msg), err + } + edns = append(edns, e) + off += int(optlen) + } + + if off < len(msg) { + goto Option + } + + return edns, off, nil +} + +func packDataOpt(options []EDNS0, msg []byte, off int) (int, error) { + for _, el := range options { + b, err := el.pack() + if err != nil || off+4 > len(msg) { + return len(msg), &Error{err: "overflow packing opt"} + } + binary.BigEndian.PutUint16(msg[off:], el.Option()) // Option code + binary.BigEndian.PutUint16(msg[off+2:], uint16(len(b))) // Length + off += 4 + if off+len(b) > len(msg) { + copy(msg[off:], b) + off = len(msg) + continue + } + // Actual data + copy(msg[off:off+len(b)], b) + off += len(b) + } + return off, nil +} + +func unpackStringOctet(msg []byte, off int) (string, int, error) { + s := string(msg[off:]) + return s, len(msg), nil +} + +func packStringOctet(s string, msg []byte, off int) (int, error) { + txtTmp := make([]byte, 256*4+1) + off, err := packOctetString(s, msg, off, txtTmp) + if err != nil { + return len(msg), err + } + return off, nil +} + +func unpackDataNsec(msg []byte, off int) ([]uint16, int, error) { + var nsec []uint16 + length, window, lastwindow := 0, 0, -1 + for off < len(msg) { + if off+2 > len(msg) { + return nsec, len(msg), &Error{err: "overflow unpacking nsecx"} + } + window = int(msg[off]) + length = int(msg[off+1]) + off += 2 + if window <= lastwindow { + // RFC 4034: Blocks are present in the NSEC RR RDATA in + // increasing numerical order. + return nsec, len(msg), &Error{err: "out of order NSEC block"} + } + if length == 0 { + // RFC 4034: Blocks with no types present MUST NOT be included. + return nsec, len(msg), &Error{err: "empty NSEC block"} + } + if length > 32 { + return nsec, len(msg), &Error{err: "NSEC block too long"} + } + if off+length > len(msg) { + return nsec, len(msg), &Error{err: "overflowing NSEC block"} + } + + // Walk the bytes in the window and extract the type bits + for j, b := range msg[off : off+length] { + // Check the bits one by one, and set the type + if b&0x80 == 0x80 { + nsec = append(nsec, uint16(window*256+j*8+0)) + } + if b&0x40 == 0x40 { + nsec = append(nsec, uint16(window*256+j*8+1)) + } + if b&0x20 == 0x20 { + nsec = append(nsec, uint16(window*256+j*8+2)) + } + if b&0x10 == 0x10 { + nsec = append(nsec, uint16(window*256+j*8+3)) + } + if b&0x8 == 0x8 { + nsec = append(nsec, uint16(window*256+j*8+4)) + } + if b&0x4 == 0x4 { + nsec = append(nsec, uint16(window*256+j*8+5)) + } + if b&0x2 == 0x2 { + nsec = append(nsec, uint16(window*256+j*8+6)) + } + if b&0x1 == 0x1 { + nsec = append(nsec, uint16(window*256+j*8+7)) + } + } + off += length + lastwindow = window + } + return nsec, off, nil +} + +// typeBitMapLen is a helper function which computes the "maximum" length of +// a the NSEC Type BitMap field. +func typeBitMapLen(bitmap []uint16) int { + var l int + var lastwindow, lastlength uint16 + for _, t := range bitmap { + window := t / 256 + length := (t-window*256)/8 + 1 + if window > lastwindow && lastlength != 0 { // New window, jump to the new offset + l += int(lastlength) + 2 + lastlength = 0 + } + if window < lastwindow || length < lastlength { + // packDataNsec would return Error{err: "nsec bits out of order"} here, but + // when computing the length, we want do be liberal. + continue + } + lastwindow, lastlength = window, length + } + l += int(lastlength) + 2 + return l +} + +func packDataNsec(bitmap []uint16, msg []byte, off int) (int, error) { + if len(bitmap) == 0 { + return off, nil + } + var lastwindow, lastlength uint16 + for _, t := range bitmap { + window := t / 256 + length := (t-window*256)/8 + 1 + if window > lastwindow && lastlength != 0 { // New window, jump to the new offset + off += int(lastlength) + 2 + lastlength = 0 + } + if window < lastwindow || length < lastlength { + return len(msg), &Error{err: "nsec bits out of order"} + } + if off+2+int(length) > len(msg) { + return len(msg), &Error{err: "overflow packing nsec"} + } + // Setting the window # + msg[off] = byte(window) + // Setting the octets length + msg[off+1] = byte(length) + // Setting the bit value for the type in the right octet + msg[off+1+int(length)] |= byte(1 << (7 - t%8)) + lastwindow, lastlength = window, length + } + off += int(lastlength) + 2 + return off, nil +} + +func unpackDataDomainNames(msg []byte, off, end int) ([]string, int, error) { + var ( + servers []string + s string + err error + ) + if end > len(msg) { + return nil, len(msg), &Error{err: "overflow unpacking domain names"} + } + for off < end { + s, off, err = UnpackDomainName(msg, off) + if err != nil { + return servers, len(msg), err + } + servers = append(servers, s) + } + return servers, off, nil +} + +func packDataDomainNames(names []string, msg []byte, off int, compression compressionMap, compress bool) (int, error) { + var err error + for _, name := range names { + off, err = packDomainName(name, msg, off, compression, compress) + if err != nil { + return len(msg), err + } + } + return off, nil +} + +func packDataApl(data []APLPrefix, msg []byte, off int) (int, error) { + var err error + for i := range data { + off, err = packDataAplPrefix(&data[i], msg, off) + if err != nil { + return len(msg), err + } + } + return off, nil +} + +func packDataAplPrefix(p *APLPrefix, msg []byte, off int) (int, error) { + if len(p.Network.IP) != len(p.Network.Mask) { + return len(msg), &Error{err: "address and mask lengths don't match"} + } + + var err error + prefix, _ := p.Network.Mask.Size() + addr := p.Network.IP.Mask(p.Network.Mask)[:(prefix+7)/8] + + switch len(p.Network.IP) { + case net.IPv4len: + off, err = packUint16(1, msg, off) + case net.IPv6len: + off, err = packUint16(2, msg, off) + default: + err = &Error{err: "unrecognized address family"} + } + if err != nil { + return len(msg), err + } + + off, err = packUint8(uint8(prefix), msg, off) + if err != nil { + return len(msg), err + } + + var n uint8 + if p.Negation { + n = 0x80 + } + adflen := uint8(len(addr)) & 0x7f + off, err = packUint8(n|adflen, msg, off) + if err != nil { + return len(msg), err + } + + if off+len(addr) > len(msg) { + return len(msg), &Error{err: "overflow packing APL prefix"} + } + off += copy(msg[off:], addr) + + return off, nil +} + +func unpackDataApl(msg []byte, off int) ([]APLPrefix, int, error) { + var result []APLPrefix + for off < len(msg) { + prefix, end, err := unpackDataAplPrefix(msg, off) + if err != nil { + return nil, len(msg), err + } + off = end + result = append(result, prefix) + } + return result, off, nil +} + +func unpackDataAplPrefix(msg []byte, off int) (APLPrefix, int, error) { + family, off, err := unpackUint16(msg, off) + if err != nil { + return APLPrefix{}, len(msg), &Error{err: "overflow unpacking APL prefix"} + } + prefix, off, err := unpackUint8(msg, off) + if err != nil { + return APLPrefix{}, len(msg), &Error{err: "overflow unpacking APL prefix"} + } + nlen, off, err := unpackUint8(msg, off) + if err != nil { + return APLPrefix{}, len(msg), &Error{err: "overflow unpacking APL prefix"} + } + + var ip []byte + switch family { + case 1: + ip = make([]byte, net.IPv4len) + case 2: + ip = make([]byte, net.IPv6len) + default: + return APLPrefix{}, len(msg), &Error{err: "unrecognized APL address family"} + } + if int(prefix) > 8*len(ip) { + return APLPrefix{}, len(msg), &Error{err: "APL prefix too long"} + } + + afdlen := int(nlen & 0x7f) + if (int(prefix)+7)/8 != afdlen { + return APLPrefix{}, len(msg), &Error{err: "invalid APL address length"} + } + if off+afdlen > len(msg) { + return APLPrefix{}, len(msg), &Error{err: "overflow unpacking APL address"} + } + off += copy(ip, msg[off:off+afdlen]) + if prefix%8 > 0 { + last := ip[afdlen-1] + zero := uint8(0xff) >> (prefix % 8) + if last&zero > 0 { + return APLPrefix{}, len(msg), &Error{err: "extra APL address bits"} + } + } + + return APLPrefix{ + Negation: (nlen & 0x80) != 0, + Network: net.IPNet{ + IP: ip, + Mask: net.CIDRMask(int(prefix), 8*len(ip)), + }, + }, off, nil +} diff --git a/vendor/github.com/miekg/dns/msg_truncate.go b/vendor/github.com/miekg/dns/msg_truncate.go new file mode 100644 index 00000000..89d40757 --- /dev/null +++ b/vendor/github.com/miekg/dns/msg_truncate.go @@ -0,0 +1,111 @@ +package dns + +// Truncate ensures the reply message will fit into the requested buffer +// size by removing records that exceed the requested size. +// +// It will first check if the reply fits without compression and then with +// compression. If it won't fit with compression, Truncate then walks the +// record adding as many records as possible without exceeding the +// requested buffer size. +// +// The TC bit will be set if any records were excluded from the message. +// This indicates to that the client should retry over TCP. +// +// According to RFC 2181, the TC bit should only be set if not all of the +// "required" RRs can be included in the response. Unfortunately, we have +// no way of knowing which RRs are required so we set the TC bit if any RR +// had to be omitted from the response. +// +// The appropriate buffer size can be retrieved from the requests OPT +// record, if present, and is transport specific otherwise. dns.MinMsgSize +// should be used for UDP requests without an OPT record, and +// dns.MaxMsgSize for TCP requests without an OPT record. +func (dns *Msg) Truncate(size int) { + if dns.IsTsig() != nil { + // To simplify this implementation, we don't perform + // truncation on responses with a TSIG record. + return + } + + // RFC 6891 mandates that the payload size in an OPT record + // less than 512 bytes must be treated as equal to 512 bytes. + // + // For ease of use, we impose that restriction here. + if size < 512 { + size = 512 + } + + l := msgLenWithCompressionMap(dns, nil) // uncompressed length + if l <= size { + // Don't waste effort compressing this message. + dns.Compress = false + return + } + + dns.Compress = true + + edns0 := dns.popEdns0() + if edns0 != nil { + // Account for the OPT record that gets added at the end, + // by subtracting that length from our budget. + // + // The EDNS(0) OPT record must have the root domain and + // it's length is thus unaffected by compression. + size -= Len(edns0) + } + + compression := make(map[string]struct{}) + + l = headerSize + for _, r := range dns.Question { + l += r.len(l, compression) + } + + var numAnswer int + if l < size { + l, numAnswer = truncateLoop(dns.Answer, size, l, compression) + } + + var numNS int + if l < size { + l, numNS = truncateLoop(dns.Ns, size, l, compression) + } + + var numExtra int + if l < size { + l, numExtra = truncateLoop(dns.Extra, size, l, compression) + } + + // See the function documentation for when we set this. + dns.Truncated = len(dns.Answer) > numAnswer || + len(dns.Ns) > numNS || len(dns.Extra) > numExtra + + dns.Answer = dns.Answer[:numAnswer] + dns.Ns = dns.Ns[:numNS] + dns.Extra = dns.Extra[:numExtra] + + if edns0 != nil { + // Add the OPT record back onto the additional section. + dns.Extra = append(dns.Extra, edns0) + } +} + +func truncateLoop(rrs []RR, size, l int, compression map[string]struct{}) (int, int) { + for i, r := range rrs { + if r == nil { + continue + } + + l += r.len(l, compression) + if l > size { + // Return size, rather than l prior to this record, + // to prevent any further records being added. + return size, i + } + if l == size { + return l, i + 1 + } + } + + return l, len(rrs) +} diff --git a/vendor/github.com/miekg/dns/nsecx.go b/vendor/github.com/miekg/dns/nsecx.go new file mode 100644 index 00000000..8f071a47 --- /dev/null +++ b/vendor/github.com/miekg/dns/nsecx.go @@ -0,0 +1,95 @@ +package dns + +import ( + "crypto/sha1" + "encoding/hex" + "strings" +) + +// HashName hashes a string (label) according to RFC 5155. It returns the hashed string in uppercase. +func HashName(label string, ha uint8, iter uint16, salt string) string { + if ha != SHA1 { + return "" + } + + wireSalt := make([]byte, hex.DecodedLen(len(salt))) + n, err := packStringHex(salt, wireSalt, 0) + if err != nil { + return "" + } + wireSalt = wireSalt[:n] + + name := make([]byte, 255) + off, err := PackDomainName(strings.ToLower(label), name, 0, nil, false) + if err != nil { + return "" + } + name = name[:off] + + s := sha1.New() + // k = 0 + s.Write(name) + s.Write(wireSalt) + nsec3 := s.Sum(nil) + + // k > 0 + for k := uint16(0); k < iter; k++ { + s.Reset() + s.Write(nsec3) + s.Write(wireSalt) + nsec3 = s.Sum(nsec3[:0]) + } + + return toBase32(nsec3) +} + +// Cover returns true if a name is covered by the NSEC3 record +func (rr *NSEC3) Cover(name string) bool { + nameHash := HashName(name, rr.Hash, rr.Iterations, rr.Salt) + owner := strings.ToUpper(rr.Hdr.Name) + labelIndices := Split(owner) + if len(labelIndices) < 2 { + return false + } + ownerHash := owner[:labelIndices[1]-1] + ownerZone := owner[labelIndices[1]:] + if !IsSubDomain(ownerZone, strings.ToUpper(name)) { // name is outside owner zone + return false + } + + nextHash := rr.NextDomain + + // if empty interval found, try cover wildcard hashes so nameHash shouldn't match with ownerHash + if ownerHash == nextHash && nameHash != ownerHash { // empty interval + return true + } + if ownerHash > nextHash { // end of zone + if nameHash > ownerHash { // covered since there is nothing after ownerHash + return true + } + return nameHash < nextHash // if nameHash is before beginning of zone it is covered + } + if nameHash < ownerHash { // nameHash is before ownerHash, not covered + return false + } + return nameHash < nextHash // if nameHash is before nextHash is it covered (between ownerHash and nextHash) +} + +// Match returns true if a name matches the NSEC3 record +func (rr *NSEC3) Match(name string) bool { + nameHash := HashName(name, rr.Hash, rr.Iterations, rr.Salt) + owner := strings.ToUpper(rr.Hdr.Name) + labelIndices := Split(owner) + if len(labelIndices) < 2 { + return false + } + ownerHash := owner[:labelIndices[1]-1] + ownerZone := owner[labelIndices[1]:] + if !IsSubDomain(ownerZone, strings.ToUpper(name)) { // name is outside owner zone + return false + } + if ownerHash == nameHash { + return true + } + return false +} diff --git a/vendor/github.com/miekg/dns/privaterr.go b/vendor/github.com/miekg/dns/privaterr.go new file mode 100644 index 00000000..e28f0663 --- /dev/null +++ b/vendor/github.com/miekg/dns/privaterr.go @@ -0,0 +1,114 @@ +package dns + +import "strings" + +// PrivateRdata is an interface used for implementing "Private Use" RR types, see +// RFC 6895. This allows one to experiment with new RR types, without requesting an +// official type code. Also see dns.PrivateHandle and dns.PrivateHandleRemove. +type PrivateRdata interface { + // String returns the text presentaton of the Rdata of the Private RR. + String() string + // Parse parses the Rdata of the private RR. + Parse([]string) error + // Pack is used when packing a private RR into a buffer. + Pack([]byte) (int, error) + // Unpack is used when unpacking a private RR from a buffer. + // TODO(miek): diff. signature than Pack, see edns0.go for instance. + Unpack([]byte) (int, error) + // Copy copies the Rdata into the PrivateRdata argument. + Copy(PrivateRdata) error + // Len returns the length in octets of the Rdata. + Len() int +} + +// PrivateRR represents an RR that uses a PrivateRdata user-defined type. +// It mocks normal RRs and implements dns.RR interface. +type PrivateRR struct { + Hdr RR_Header + Data PrivateRdata + + generator func() PrivateRdata // for copy +} + +// Header return the RR header of r. +func (r *PrivateRR) Header() *RR_Header { return &r.Hdr } + +func (r *PrivateRR) String() string { return r.Hdr.String() + r.Data.String() } + +// Private len and copy parts to satisfy RR interface. +func (r *PrivateRR) len(off int, compression map[string]struct{}) int { + l := r.Hdr.len(off, compression) + l += r.Data.Len() + return l +} + +func (r *PrivateRR) copy() RR { + // make new RR like this: + rr := &PrivateRR{r.Hdr, r.generator(), r.generator} + + if err := r.Data.Copy(rr.Data); err != nil { + panic("dns: got value that could not be used to copy Private rdata: " + err.Error()) + } + + return rr +} + +func (r *PrivateRR) pack(msg []byte, off int, compression compressionMap, compress bool) (int, error) { + n, err := r.Data.Pack(msg[off:]) + if err != nil { + return len(msg), err + } + off += n + return off, nil +} + +func (r *PrivateRR) unpack(msg []byte, off int) (int, error) { + off1, err := r.Data.Unpack(msg[off:]) + off += off1 + return off, err +} + +func (r *PrivateRR) parse(c *zlexer, origin string) *ParseError { + var l lex + text := make([]string, 0, 2) // could be 0..N elements, median is probably 1 +Fetch: + for { + // TODO(miek): we could also be returning _QUOTE, this might or might not + // be an issue (basically parsing TXT becomes hard) + switch l, _ = c.Next(); l.value { + case zNewline, zEOF: + break Fetch + case zString: + text = append(text, l.token) + } + } + + err := r.Data.Parse(text) + if err != nil { + return &ParseError{"", err.Error(), l} + } + + return nil +} + +func (r1 *PrivateRR) isDuplicate(r2 RR) bool { return false } + +// PrivateHandle registers a private resource record type. It requires +// string and numeric representation of private RR type and generator function as argument. +func PrivateHandle(rtypestr string, rtype uint16, generator func() PrivateRdata) { + rtypestr = strings.ToUpper(rtypestr) + + TypeToRR[rtype] = func() RR { return &PrivateRR{RR_Header{}, generator(), generator} } + TypeToString[rtype] = rtypestr + StringToType[rtypestr] = rtype +} + +// PrivateHandleRemove removes definitions required to support private RR type. +func PrivateHandleRemove(rtype uint16) { + rtypestr, ok := TypeToString[rtype] + if ok { + delete(TypeToRR, rtype) + delete(TypeToString, rtype) + delete(StringToType, rtypestr) + } +} diff --git a/vendor/github.com/miekg/dns/reverse.go b/vendor/github.com/miekg/dns/reverse.go new file mode 100644 index 00000000..28151af8 --- /dev/null +++ b/vendor/github.com/miekg/dns/reverse.go @@ -0,0 +1,52 @@ +package dns + +// StringToType is the reverse of TypeToString, needed for string parsing. +var StringToType = reverseInt16(TypeToString) + +// StringToClass is the reverse of ClassToString, needed for string parsing. +var StringToClass = reverseInt16(ClassToString) + +// StringToOpcode is a map of opcodes to strings. +var StringToOpcode = reverseInt(OpcodeToString) + +// StringToRcode is a map of rcodes to strings. +var StringToRcode = reverseInt(RcodeToString) + +func init() { + // Preserve previous NOTIMP typo, see github.com/miekg/dns/issues/733. + StringToRcode["NOTIMPL"] = RcodeNotImplemented +} + +// StringToAlgorithm is the reverse of AlgorithmToString. +var StringToAlgorithm = reverseInt8(AlgorithmToString) + +// StringToHash is a map of names to hash IDs. +var StringToHash = reverseInt8(HashToString) + +// StringToCertType is the reverseof CertTypeToString. +var StringToCertType = reverseInt16(CertTypeToString) + +// Reverse a map +func reverseInt8(m map[uint8]string) map[string]uint8 { + n := make(map[string]uint8, len(m)) + for u, s := range m { + n[s] = u + } + return n +} + +func reverseInt16(m map[uint16]string) map[string]uint16 { + n := make(map[string]uint16, len(m)) + for u, s := range m { + n[s] = u + } + return n +} + +func reverseInt(m map[int]string) map[string]int { + n := make(map[string]int, len(m)) + for u, s := range m { + n[s] = u + } + return n +} diff --git a/vendor/github.com/miekg/dns/sanitize.go b/vendor/github.com/miekg/dns/sanitize.go new file mode 100644 index 00000000..a638e862 --- /dev/null +++ b/vendor/github.com/miekg/dns/sanitize.go @@ -0,0 +1,86 @@ +package dns + +// Dedup removes identical RRs from rrs. It preserves the original ordering. +// The lowest TTL of any duplicates is used in the remaining one. Dedup modifies +// rrs. +// m is used to store the RRs temporary. If it is nil a new map will be allocated. +func Dedup(rrs []RR, m map[string]RR) []RR { + + if m == nil { + m = make(map[string]RR) + } + // Save the keys, so we don't have to call normalizedString twice. + keys := make([]*string, 0, len(rrs)) + + for _, r := range rrs { + key := normalizedString(r) + keys = append(keys, &key) + if mr, ok := m[key]; ok { + // Shortest TTL wins. + rh, mrh := r.Header(), mr.Header() + if mrh.Ttl > rh.Ttl { + mrh.Ttl = rh.Ttl + } + continue + } + + m[key] = r + } + // If the length of the result map equals the amount of RRs we got, + // it means they were all different. We can then just return the original rrset. + if len(m) == len(rrs) { + return rrs + } + + j := 0 + for i, r := range rrs { + // If keys[i] lives in the map, we should copy and remove it. + if _, ok := m[*keys[i]]; ok { + delete(m, *keys[i]) + rrs[j] = r + j++ + } + + if len(m) == 0 { + break + } + } + + return rrs[:j] +} + +// normalizedString returns a normalized string from r. The TTL +// is removed and the domain name is lowercased. We go from this: +// DomainNameTTLCLASSTYPERDATA to: +// lowercasenameCLASSTYPE... +func normalizedString(r RR) string { + // A string Go DNS makes has: domainnameTTL... + b := []byte(r.String()) + + // find the first non-escaped tab, then another, so we capture where the TTL lives. + esc := false + ttlStart, ttlEnd := 0, 0 + for i := 0; i < len(b) && ttlEnd == 0; i++ { + switch { + case b[i] == '\\': + esc = !esc + case b[i] == '\t' && !esc: + if ttlStart == 0 { + ttlStart = i + continue + } + if ttlEnd == 0 { + ttlEnd = i + } + case b[i] >= 'A' && b[i] <= 'Z' && !esc: + b[i] += 32 + default: + esc = false + } + } + + // remove TTL. + copy(b[ttlStart:], b[ttlEnd:]) + cut := ttlEnd - ttlStart + return string(b[:len(b)-cut]) +} diff --git a/vendor/github.com/miekg/dns/scan.go b/vendor/github.com/miekg/dns/scan.go new file mode 100644 index 00000000..671018b1 --- /dev/null +++ b/vendor/github.com/miekg/dns/scan.go @@ -0,0 +1,1408 @@ +package dns + +import ( + "bufio" + "fmt" + "io" + "os" + "path/filepath" + "strconv" + "strings" +) + +const maxTok = 2048 // Largest token we can return. + +// The maximum depth of $INCLUDE directives supported by the +// ZoneParser API. +const maxIncludeDepth = 7 + +// Tokinize a RFC 1035 zone file. The tokenizer will normalize it: +// * Add ownernames if they are left blank; +// * Suppress sequences of spaces; +// * Make each RR fit on one line (_NEWLINE is send as last) +// * Handle comments: ; +// * Handle braces - anywhere. +const ( + // Zonefile + zEOF = iota + zString + zBlank + zQuote + zNewline + zRrtpe + zOwner + zClass + zDirOrigin // $ORIGIN + zDirTTL // $TTL + zDirInclude // $INCLUDE + zDirGenerate // $GENERATE + + // Privatekey file + zValue + zKey + + zExpectOwnerDir // Ownername + zExpectOwnerBl // Whitespace after the ownername + zExpectAny // Expect rrtype, ttl or class + zExpectAnyNoClass // Expect rrtype or ttl + zExpectAnyNoClassBl // The whitespace after _EXPECT_ANY_NOCLASS + zExpectAnyNoTTL // Expect rrtype or class + zExpectAnyNoTTLBl // Whitespace after _EXPECT_ANY_NOTTL + zExpectRrtype // Expect rrtype + zExpectRrtypeBl // Whitespace BEFORE rrtype + zExpectRdata // The first element of the rdata + zExpectDirTTLBl // Space after directive $TTL + zExpectDirTTL // Directive $TTL + zExpectDirOriginBl // Space after directive $ORIGIN + zExpectDirOrigin // Directive $ORIGIN + zExpectDirIncludeBl // Space after directive $INCLUDE + zExpectDirInclude // Directive $INCLUDE + zExpectDirGenerate // Directive $GENERATE + zExpectDirGenerateBl // Space after directive $GENERATE +) + +// ParseError is a parsing error. It contains the parse error and the location in the io.Reader +// where the error occurred. +type ParseError struct { + file string + err string + lex lex +} + +func (e *ParseError) Error() (s string) { + if e.file != "" { + s = e.file + ": " + } + s += "dns: " + e.err + ": " + strconv.QuoteToASCII(e.lex.token) + " at line: " + + strconv.Itoa(e.lex.line) + ":" + strconv.Itoa(e.lex.column) + return +} + +type lex struct { + token string // text of the token + err bool // when true, token text has lexer error + value uint8 // value: zString, _BLANK, etc. + torc uint16 // type or class as parsed in the lexer, we only need to look this up in the grammar + line int // line in the file + column int // column in the file +} + +// Token holds the token that are returned when a zone file is parsed. +type Token struct { + // The scanned resource record when error is not nil. + RR + // When an error occurred, this has the error specifics. + Error *ParseError + // A potential comment positioned after the RR and on the same line. + Comment string +} + +// ttlState describes the state necessary to fill in an omitted RR TTL +type ttlState struct { + ttl uint32 // ttl is the current default TTL + isByDirective bool // isByDirective indicates whether ttl was set by a $TTL directive +} + +// NewRR reads the RR contained in the string s. Only the first RR is +// returned. If s contains no records, NewRR will return nil with no +// error. +// +// The class defaults to IN and TTL defaults to 3600. The full zone +// file syntax like $TTL, $ORIGIN, etc. is supported. +// +// All fields of the returned RR are set, except RR.Header().Rdlength +// which is set to 0. +func NewRR(s string) (RR, error) { + if len(s) > 0 && s[len(s)-1] != '\n' { // We need a closing newline + return ReadRR(strings.NewReader(s+"\n"), "") + } + return ReadRR(strings.NewReader(s), "") +} + +// ReadRR reads the RR contained in r. +// +// The string file is used in error reporting and to resolve relative +// $INCLUDE directives. +// +// See NewRR for more documentation. +func ReadRR(r io.Reader, file string) (RR, error) { + zp := NewZoneParser(r, ".", file) + zp.SetDefaultTTL(defaultTtl) + zp.SetIncludeAllowed(true) + rr, _ := zp.Next() + return rr, zp.Err() +} + +// ParseZone reads a RFC 1035 style zonefile from r. It returns +// Tokens on the returned channel, each consisting of either a +// parsed RR and optional comment or a nil RR and an error. The +// channel is closed by ParseZone when the end of r is reached. +// +// The string file is used in error reporting and to resolve relative +// $INCLUDE directives. The string origin is used as the initial +// origin, as if the file would start with an $ORIGIN directive. +// +// The directives $INCLUDE, $ORIGIN, $TTL and $GENERATE are all +// supported. Note that $GENERATE's range support up to a maximum of +// of 65535 steps. +// +// Basic usage pattern when reading from a string (z) containing the +// zone data: +// +// for x := range dns.ParseZone(strings.NewReader(z), "", "") { +// if x.Error != nil { +// // log.Println(x.Error) +// } else { +// // Do something with x.RR +// } +// } +// +// Comments specified after an RR (and on the same line!) are +// returned too: +// +// foo. IN A 10.0.0.1 ; this is a comment +// +// The text "; this is comment" is returned in Token.Comment. +// Comments inside the RR are returned concatenated along with the +// RR. Comments on a line by themselves are discarded. +// +// To prevent memory leaks it is important to always fully drain the +// returned channel. If an error occurs, it will always be the last +// Token sent on the channel. +// +// Deprecated: New users should prefer the ZoneParser API. +func ParseZone(r io.Reader, origin, file string) chan *Token { + t := make(chan *Token, 10000) + go parseZone(r, origin, file, t) + return t +} + +func parseZone(r io.Reader, origin, file string, t chan *Token) { + defer close(t) + + zp := NewZoneParser(r, origin, file) + zp.SetIncludeAllowed(true) + + for rr, ok := zp.Next(); ok; rr, ok = zp.Next() { + t <- &Token{RR: rr, Comment: zp.Comment()} + } + + if err := zp.Err(); err != nil { + pe, ok := err.(*ParseError) + if !ok { + pe = &ParseError{file: file, err: err.Error()} + } + + t <- &Token{Error: pe} + } +} + +// ZoneParser is a parser for an RFC 1035 style zonefile. +// +// Each parsed RR in the zone is returned sequentially from Next. An +// optional comment can be retrieved with Comment. +// +// The directives $INCLUDE, $ORIGIN, $TTL and $GENERATE are all +// supported. Although $INCLUDE is disabled by default. +// Note that $GENERATE's range support up to a maximum of 65535 steps. +// +// Basic usage pattern when reading from a string (z) containing the +// zone data: +// +// zp := NewZoneParser(strings.NewReader(z), "", "") +// +// for rr, ok := zp.Next(); ok; rr, ok = zp.Next() { +// // Do something with rr +// } +// +// if err := zp.Err(); err != nil { +// // log.Println(err) +// } +// +// Comments specified after an RR (and on the same line!) are +// returned too: +// +// foo. IN A 10.0.0.1 ; this is a comment +// +// The text "; this is comment" is returned from Comment. Comments inside +// the RR are returned concatenated along with the RR. Comments on a line +// by themselves are discarded. +type ZoneParser struct { + c *zlexer + + parseErr *ParseError + + origin string + file string + + defttl *ttlState + + h RR_Header + + // sub is used to parse $INCLUDE files and $GENERATE directives. + // Next, by calling subNext, forwards the resulting RRs from this + // sub parser to the calling code. + sub *ZoneParser + osFile *os.File + + includeDepth uint8 + + includeAllowed bool + generateDisallowed bool +} + +// NewZoneParser returns an RFC 1035 style zonefile parser that reads +// from r. +// +// The string file is used in error reporting and to resolve relative +// $INCLUDE directives. The string origin is used as the initial +// origin, as if the file would start with an $ORIGIN directive. +func NewZoneParser(r io.Reader, origin, file string) *ZoneParser { + var pe *ParseError + if origin != "" { + origin = Fqdn(origin) + if _, ok := IsDomainName(origin); !ok { + pe = &ParseError{file, "bad initial origin name", lex{}} + } + } + + return &ZoneParser{ + c: newZLexer(r), + + parseErr: pe, + + origin: origin, + file: file, + } +} + +// SetDefaultTTL sets the parsers default TTL to ttl. +func (zp *ZoneParser) SetDefaultTTL(ttl uint32) { + zp.defttl = &ttlState{ttl, false} +} + +// SetIncludeAllowed controls whether $INCLUDE directives are +// allowed. $INCLUDE directives are not supported by default. +// +// The $INCLUDE directive will open and read from a user controlled +// file on the system. Even if the file is not a valid zonefile, the +// contents of the file may be revealed in error messages, such as: +// +// /etc/passwd: dns: not a TTL: "root:x:0:0:root:/root:/bin/bash" at line: 1:31 +// /etc/shadow: dns: not a TTL: "root:$6$::0:99999:7:::" at line: 1:125 +func (zp *ZoneParser) SetIncludeAllowed(v bool) { + zp.includeAllowed = v +} + +// Err returns the first non-EOF error that was encountered by the +// ZoneParser. +func (zp *ZoneParser) Err() error { + if zp.parseErr != nil { + return zp.parseErr + } + + if zp.sub != nil { + if err := zp.sub.Err(); err != nil { + return err + } + } + + return zp.c.Err() +} + +func (zp *ZoneParser) setParseError(err string, l lex) (RR, bool) { + zp.parseErr = &ParseError{zp.file, err, l} + return nil, false +} + +// Comment returns an optional text comment that occurred alongside +// the RR. +func (zp *ZoneParser) Comment() string { + if zp.parseErr != nil { + return "" + } + + if zp.sub != nil { + return zp.sub.Comment() + } + + return zp.c.Comment() +} + +func (zp *ZoneParser) subNext() (RR, bool) { + if rr, ok := zp.sub.Next(); ok { + return rr, true + } + + if zp.sub.osFile != nil { + zp.sub.osFile.Close() + zp.sub.osFile = nil + } + + if zp.sub.Err() != nil { + // We have errors to surface. + return nil, false + } + + zp.sub = nil + return zp.Next() +} + +// Next advances the parser to the next RR in the zonefile and +// returns the (RR, true). It will return (nil, false) when the +// parsing stops, either by reaching the end of the input or an +// error. After Next returns (nil, false), the Err method will return +// any error that occurred during parsing. +func (zp *ZoneParser) Next() (RR, bool) { + if zp.parseErr != nil { + return nil, false + } + if zp.sub != nil { + return zp.subNext() + } + + // 6 possible beginnings of a line (_ is a space): + // + // 0. zRRTYPE -> all omitted until the rrtype + // 1. zOwner _ zRrtype -> class/ttl omitted + // 2. zOwner _ zString _ zRrtype -> class omitted + // 3. zOwner _ zString _ zClass _ zRrtype -> ttl/class + // 4. zOwner _ zClass _ zRrtype -> ttl omitted + // 5. zOwner _ zClass _ zString _ zRrtype -> class/ttl (reversed) + // + // After detecting these, we know the zRrtype so we can jump to functions + // handling the rdata for each of these types. + + st := zExpectOwnerDir // initial state + h := &zp.h + + for l, ok := zp.c.Next(); ok; l, ok = zp.c.Next() { + // zlexer spotted an error already + if l.err { + return zp.setParseError(l.token, l) + } + + switch st { + case zExpectOwnerDir: + // We can also expect a directive, like $TTL or $ORIGIN + if zp.defttl != nil { + h.Ttl = zp.defttl.ttl + } + + h.Class = ClassINET + + switch l.value { + case zNewline: + st = zExpectOwnerDir + case zOwner: + name, ok := toAbsoluteName(l.token, zp.origin) + if !ok { + return zp.setParseError("bad owner name", l) + } + + h.Name = name + + st = zExpectOwnerBl + case zDirTTL: + st = zExpectDirTTLBl + case zDirOrigin: + st = zExpectDirOriginBl + case zDirInclude: + st = zExpectDirIncludeBl + case zDirGenerate: + st = zExpectDirGenerateBl + case zRrtpe: + h.Rrtype = l.torc + + st = zExpectRdata + case zClass: + h.Class = l.torc + + st = zExpectAnyNoClassBl + case zBlank: + // Discard, can happen when there is nothing on the + // line except the RR type + case zString: + ttl, ok := stringToTTL(l.token) + if !ok { + return zp.setParseError("not a TTL", l) + } + + h.Ttl = ttl + + if zp.defttl == nil || !zp.defttl.isByDirective { + zp.defttl = &ttlState{ttl, false} + } + + st = zExpectAnyNoTTLBl + default: + return zp.setParseError("syntax error at beginning", l) + } + case zExpectDirIncludeBl: + if l.value != zBlank { + return zp.setParseError("no blank after $INCLUDE-directive", l) + } + + st = zExpectDirInclude + case zExpectDirInclude: + if l.value != zString { + return zp.setParseError("expecting $INCLUDE value, not this...", l) + } + + neworigin := zp.origin // There may be optionally a new origin set after the filename, if not use current one + switch l, _ := zp.c.Next(); l.value { + case zBlank: + l, _ := zp.c.Next() + if l.value == zString { + name, ok := toAbsoluteName(l.token, zp.origin) + if !ok { + return zp.setParseError("bad origin name", l) + } + + neworigin = name + } + case zNewline, zEOF: + // Ok + default: + return zp.setParseError("garbage after $INCLUDE", l) + } + + if !zp.includeAllowed { + return zp.setParseError("$INCLUDE directive not allowed", l) + } + if zp.includeDepth >= maxIncludeDepth { + return zp.setParseError("too deeply nested $INCLUDE", l) + } + + // Start with the new file + includePath := l.token + if !filepath.IsAbs(includePath) { + includePath = filepath.Join(filepath.Dir(zp.file), includePath) + } + + r1, e1 := os.Open(includePath) + if e1 != nil { + var as string + if !filepath.IsAbs(l.token) { + as = fmt.Sprintf(" as `%s'", includePath) + } + + msg := fmt.Sprintf("failed to open `%s'%s: %v", l.token, as, e1) + return zp.setParseError(msg, l) + } + + zp.sub = NewZoneParser(r1, neworigin, includePath) + zp.sub.defttl, zp.sub.includeDepth, zp.sub.osFile = zp.defttl, zp.includeDepth+1, r1 + zp.sub.SetIncludeAllowed(true) + return zp.subNext() + case zExpectDirTTLBl: + if l.value != zBlank { + return zp.setParseError("no blank after $TTL-directive", l) + } + + st = zExpectDirTTL + case zExpectDirTTL: + if l.value != zString { + return zp.setParseError("expecting $TTL value, not this...", l) + } + + if err := slurpRemainder(zp.c); err != nil { + return zp.setParseError(err.err, err.lex) + } + + ttl, ok := stringToTTL(l.token) + if !ok { + return zp.setParseError("expecting $TTL value, not this...", l) + } + + zp.defttl = &ttlState{ttl, true} + + st = zExpectOwnerDir + case zExpectDirOriginBl: + if l.value != zBlank { + return zp.setParseError("no blank after $ORIGIN-directive", l) + } + + st = zExpectDirOrigin + case zExpectDirOrigin: + if l.value != zString { + return zp.setParseError("expecting $ORIGIN value, not this...", l) + } + + if err := slurpRemainder(zp.c); err != nil { + return zp.setParseError(err.err, err.lex) + } + + name, ok := toAbsoluteName(l.token, zp.origin) + if !ok { + return zp.setParseError("bad origin name", l) + } + + zp.origin = name + + st = zExpectOwnerDir + case zExpectDirGenerateBl: + if l.value != zBlank { + return zp.setParseError("no blank after $GENERATE-directive", l) + } + + st = zExpectDirGenerate + case zExpectDirGenerate: + if zp.generateDisallowed { + return zp.setParseError("nested $GENERATE directive not allowed", l) + } + if l.value != zString { + return zp.setParseError("expecting $GENERATE value, not this...", l) + } + + return zp.generate(l) + case zExpectOwnerBl: + if l.value != zBlank { + return zp.setParseError("no blank after owner", l) + } + + st = zExpectAny + case zExpectAny: + switch l.value { + case zRrtpe: + if zp.defttl == nil { + return zp.setParseError("missing TTL with no previous value", l) + } + + h.Rrtype = l.torc + + st = zExpectRdata + case zClass: + h.Class = l.torc + + st = zExpectAnyNoClassBl + case zString: + ttl, ok := stringToTTL(l.token) + if !ok { + return zp.setParseError("not a TTL", l) + } + + h.Ttl = ttl + + if zp.defttl == nil || !zp.defttl.isByDirective { + zp.defttl = &ttlState{ttl, false} + } + + st = zExpectAnyNoTTLBl + default: + return zp.setParseError("expecting RR type, TTL or class, not this...", l) + } + case zExpectAnyNoClassBl: + if l.value != zBlank { + return zp.setParseError("no blank before class", l) + } + + st = zExpectAnyNoClass + case zExpectAnyNoTTLBl: + if l.value != zBlank { + return zp.setParseError("no blank before TTL", l) + } + + st = zExpectAnyNoTTL + case zExpectAnyNoTTL: + switch l.value { + case zClass: + h.Class = l.torc + + st = zExpectRrtypeBl + case zRrtpe: + h.Rrtype = l.torc + + st = zExpectRdata + default: + return zp.setParseError("expecting RR type or class, not this...", l) + } + case zExpectAnyNoClass: + switch l.value { + case zString: + ttl, ok := stringToTTL(l.token) + if !ok { + return zp.setParseError("not a TTL", l) + } + + h.Ttl = ttl + + if zp.defttl == nil || !zp.defttl.isByDirective { + zp.defttl = &ttlState{ttl, false} + } + + st = zExpectRrtypeBl + case zRrtpe: + h.Rrtype = l.torc + + st = zExpectRdata + default: + return zp.setParseError("expecting RR type or TTL, not this...", l) + } + case zExpectRrtypeBl: + if l.value != zBlank { + return zp.setParseError("no blank before RR type", l) + } + + st = zExpectRrtype + case zExpectRrtype: + if l.value != zRrtpe { + return zp.setParseError("unknown RR type", l) + } + + h.Rrtype = l.torc + + st = zExpectRdata + case zExpectRdata: + var rr RR + if newFn, ok := TypeToRR[h.Rrtype]; ok && canParseAsRR(h.Rrtype) { + rr = newFn() + *rr.Header() = *h + } else { + rr = &RFC3597{Hdr: *h} + } + + _, isPrivate := rr.(*PrivateRR) + if !isPrivate && zp.c.Peek().token == "" { + // This is a dynamic update rr. + + // TODO(tmthrgd): Previously slurpRemainder was only called + // for certain RR types, which may have been important. + if err := slurpRemainder(zp.c); err != nil { + return zp.setParseError(err.err, err.lex) + } + + return rr, true + } else if l.value == zNewline { + return zp.setParseError("unexpected newline", l) + } + + if err := rr.parse(zp.c, zp.origin); err != nil { + // err is a concrete *ParseError without the file field set. + // The setParseError call below will construct a new + // *ParseError with file set to zp.file. + + // If err.lex is nil than we have encounter an unknown RR type + // in that case we substitute our current lex token. + if err.lex == (lex{}) { + return zp.setParseError(err.err, l) + } + + return zp.setParseError(err.err, err.lex) + } + + return rr, true + } + } + + // If we get here, we and the h.Rrtype is still zero, we haven't parsed anything, this + // is not an error, because an empty zone file is still a zone file. + return nil, false +} + +// canParseAsRR returns true if the record type can be parsed as a +// concrete RR. It blacklists certain record types that must be parsed +// according to RFC 3597 because they lack a presentation format. +func canParseAsRR(rrtype uint16) bool { + switch rrtype { + case TypeANY, TypeNULL, TypeOPT, TypeTSIG: + return false + default: + return true + } +} + +type zlexer struct { + br io.ByteReader + + readErr error + + line int + column int + + comBuf string + comment string + + l lex + cachedL *lex + + brace int + quote bool + space bool + commt bool + rrtype bool + owner bool + + nextL bool + + eol bool // end-of-line +} + +func newZLexer(r io.Reader) *zlexer { + br, ok := r.(io.ByteReader) + if !ok { + br = bufio.NewReaderSize(r, 1024) + } + + return &zlexer{ + br: br, + + line: 1, + + owner: true, + } +} + +func (zl *zlexer) Err() error { + if zl.readErr == io.EOF { + return nil + } + + return zl.readErr +} + +// readByte returns the next byte from the input +func (zl *zlexer) readByte() (byte, bool) { + if zl.readErr != nil { + return 0, false + } + + c, err := zl.br.ReadByte() + if err != nil { + zl.readErr = err + return 0, false + } + + // delay the newline handling until the next token is delivered, + // fixes off-by-one errors when reporting a parse error. + if zl.eol { + zl.line++ + zl.column = 0 + zl.eol = false + } + + if c == '\n' { + zl.eol = true + } else { + zl.column++ + } + + return c, true +} + +func (zl *zlexer) Peek() lex { + if zl.nextL { + return zl.l + } + + l, ok := zl.Next() + if !ok { + return l + } + + if zl.nextL { + // Cache l. Next returns zl.cachedL then zl.l. + zl.cachedL = &l + } else { + // In this case l == zl.l, so we just tell Next to return zl.l. + zl.nextL = true + } + + return l +} + +func (zl *zlexer) Next() (lex, bool) { + l := &zl.l + switch { + case zl.cachedL != nil: + l, zl.cachedL = zl.cachedL, nil + return *l, true + case zl.nextL: + zl.nextL = false + return *l, true + case l.err: + // Parsing errors should be sticky. + return lex{value: zEOF}, false + } + + var ( + str [maxTok]byte // Hold string text + com [maxTok]byte // Hold comment text + + stri int // Offset in str (0 means empty) + comi int // Offset in com (0 means empty) + + escape bool + ) + + if zl.comBuf != "" { + comi = copy(com[:], zl.comBuf) + zl.comBuf = "" + } + + zl.comment = "" + + for x, ok := zl.readByte(); ok; x, ok = zl.readByte() { + l.line, l.column = zl.line, zl.column + + if stri >= len(str) { + l.token = "token length insufficient for parsing" + l.err = true + return *l, true + } + if comi >= len(com) { + l.token = "comment length insufficient for parsing" + l.err = true + return *l, true + } + + switch x { + case ' ', '\t': + if escape || zl.quote { + // Inside quotes or escaped this is legal. + str[stri] = x + stri++ + + escape = false + break + } + + if zl.commt { + com[comi] = x + comi++ + break + } + + var retL lex + if stri == 0 { + // Space directly in the beginning, handled in the grammar + } else if zl.owner { + // If we have a string and its the first, make it an owner + l.value = zOwner + l.token = string(str[:stri]) + + // escape $... start with a \ not a $, so this will work + switch strings.ToUpper(l.token) { + case "$TTL": + l.value = zDirTTL + case "$ORIGIN": + l.value = zDirOrigin + case "$INCLUDE": + l.value = zDirInclude + case "$GENERATE": + l.value = zDirGenerate + } + + retL = *l + } else { + l.value = zString + l.token = string(str[:stri]) + + if !zl.rrtype { + tokenUpper := strings.ToUpper(l.token) + if t, ok := StringToType[tokenUpper]; ok { + l.value = zRrtpe + l.torc = t + + zl.rrtype = true + } else if strings.HasPrefix(tokenUpper, "TYPE") { + t, ok := typeToInt(l.token) + if !ok { + l.token = "unknown RR type" + l.err = true + return *l, true + } + + l.value = zRrtpe + l.torc = t + + zl.rrtype = true + } + + if t, ok := StringToClass[tokenUpper]; ok { + l.value = zClass + l.torc = t + } else if strings.HasPrefix(tokenUpper, "CLASS") { + t, ok := classToInt(l.token) + if !ok { + l.token = "unknown class" + l.err = true + return *l, true + } + + l.value = zClass + l.torc = t + } + } + + retL = *l + } + + zl.owner = false + + if !zl.space { + zl.space = true + + l.value = zBlank + l.token = " " + + if retL == (lex{}) { + return *l, true + } + + zl.nextL = true + } + + if retL != (lex{}) { + return retL, true + } + case ';': + if escape || zl.quote { + // Inside quotes or escaped this is legal. + str[stri] = x + stri++ + + escape = false + break + } + + zl.commt = true + zl.comBuf = "" + + if comi > 1 { + // A newline was previously seen inside a comment that + // was inside braces and we delayed adding it until now. + com[comi] = ' ' // convert newline to space + comi++ + if comi >= len(com) { + l.token = "comment length insufficient for parsing" + l.err = true + return *l, true + } + } + + com[comi] = ';' + comi++ + + if stri > 0 { + zl.comBuf = string(com[:comi]) + + l.value = zString + l.token = string(str[:stri]) + return *l, true + } + case '\r': + escape = false + + if zl.quote { + str[stri] = x + stri++ + } + + // discard if outside of quotes + case '\n': + escape = false + + // Escaped newline + if zl.quote { + str[stri] = x + stri++ + break + } + + if zl.commt { + // Reset a comment + zl.commt = false + zl.rrtype = false + + // If not in a brace this ends the comment AND the RR + if zl.brace == 0 { + zl.owner = true + + l.value = zNewline + l.token = "\n" + zl.comment = string(com[:comi]) + return *l, true + } + + zl.comBuf = string(com[:comi]) + break + } + + if zl.brace == 0 { + // If there is previous text, we should output it here + var retL lex + if stri != 0 { + l.value = zString + l.token = string(str[:stri]) + + if !zl.rrtype { + tokenUpper := strings.ToUpper(l.token) + if t, ok := StringToType[tokenUpper]; ok { + zl.rrtype = true + + l.value = zRrtpe + l.torc = t + } + } + + retL = *l + } + + l.value = zNewline + l.token = "\n" + + zl.comment = zl.comBuf + zl.comBuf = "" + zl.rrtype = false + zl.owner = true + + if retL != (lex{}) { + zl.nextL = true + return retL, true + } + + return *l, true + } + case '\\': + // comments do not get escaped chars, everything is copied + if zl.commt { + com[comi] = x + comi++ + break + } + + // something already escaped must be in string + if escape { + str[stri] = x + stri++ + + escape = false + break + } + + // something escaped outside of string gets added to string + str[stri] = x + stri++ + + escape = true + case '"': + if zl.commt { + com[comi] = x + comi++ + break + } + + if escape { + str[stri] = x + stri++ + + escape = false + break + } + + zl.space = false + + // send previous gathered text and the quote + var retL lex + if stri != 0 { + l.value = zString + l.token = string(str[:stri]) + + retL = *l + } + + // send quote itself as separate token + l.value = zQuote + l.token = "\"" + + zl.quote = !zl.quote + + if retL != (lex{}) { + zl.nextL = true + return retL, true + } + + return *l, true + case '(', ')': + if zl.commt { + com[comi] = x + comi++ + break + } + + if escape || zl.quote { + // Inside quotes or escaped this is legal. + str[stri] = x + stri++ + + escape = false + break + } + + switch x { + case ')': + zl.brace-- + + if zl.brace < 0 { + l.token = "extra closing brace" + l.err = true + return *l, true + } + case '(': + zl.brace++ + } + default: + escape = false + + if zl.commt { + com[comi] = x + comi++ + break + } + + str[stri] = x + stri++ + + zl.space = false + } + } + + if zl.readErr != nil && zl.readErr != io.EOF { + // Don't return any tokens after a read error occurs. + return lex{value: zEOF}, false + } + + var retL lex + if stri > 0 { + // Send remainder of str + l.value = zString + l.token = string(str[:stri]) + retL = *l + + if comi <= 0 { + return retL, true + } + } + + if comi > 0 { + // Send remainder of com + l.value = zNewline + l.token = "\n" + zl.comment = string(com[:comi]) + + if retL != (lex{}) { + zl.nextL = true + return retL, true + } + + return *l, true + } + + if zl.brace != 0 { + l.token = "unbalanced brace" + l.err = true + return *l, true + } + + return lex{value: zEOF}, false +} + +func (zl *zlexer) Comment() string { + if zl.l.err { + return "" + } + + return zl.comment +} + +// Extract the class number from CLASSxx +func classToInt(token string) (uint16, bool) { + offset := 5 + if len(token) < offset+1 { + return 0, false + } + class, err := strconv.ParseUint(token[offset:], 10, 16) + if err != nil { + return 0, false + } + return uint16(class), true +} + +// Extract the rr number from TYPExxx +func typeToInt(token string) (uint16, bool) { + offset := 4 + if len(token) < offset+1 { + return 0, false + } + typ, err := strconv.ParseUint(token[offset:], 10, 16) + if err != nil { + return 0, false + } + return uint16(typ), true +} + +// stringToTTL parses things like 2w, 2m, etc, and returns the time in seconds. +func stringToTTL(token string) (uint32, bool) { + var s, i uint32 + for _, c := range token { + switch c { + case 's', 'S': + s += i + i = 0 + case 'm', 'M': + s += i * 60 + i = 0 + case 'h', 'H': + s += i * 60 * 60 + i = 0 + case 'd', 'D': + s += i * 60 * 60 * 24 + i = 0 + case 'w', 'W': + s += i * 60 * 60 * 24 * 7 + i = 0 + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + i *= 10 + i += uint32(c) - '0' + default: + return 0, false + } + } + return s + i, true +} + +// Parse LOC records' [.][mM] into a +// mantissa exponent format. Token should contain the entire +// string (i.e. no spaces allowed) +func stringToCm(token string) (e, m uint8, ok bool) { + if token[len(token)-1] == 'M' || token[len(token)-1] == 'm' { + token = token[0 : len(token)-1] + } + s := strings.SplitN(token, ".", 2) + var meters, cmeters, val int + var err error + switch len(s) { + case 2: + if cmeters, err = strconv.Atoi(s[1]); err != nil { + return + } + fallthrough + case 1: + if meters, err = strconv.Atoi(s[0]); err != nil { + return + } + case 0: + // huh? + return 0, 0, false + } + ok = true + if meters > 0 { + e = 2 + val = meters + } else { + e = 0 + val = cmeters + } + for val > 10 { + e++ + val /= 10 + } + if e > 9 { + ok = false + } + m = uint8(val) + return +} + +func toAbsoluteName(name, origin string) (absolute string, ok bool) { + // check for an explicit origin reference + if name == "@" { + // require a nonempty origin + if origin == "" { + return "", false + } + return origin, true + } + + // require a valid domain name + _, ok = IsDomainName(name) + if !ok || name == "" { + return "", false + } + + // check if name is already absolute + if IsFqdn(name) { + return name, true + } + + // require a nonempty origin + if origin == "" { + return "", false + } + return appendOrigin(name, origin), true +} + +func appendOrigin(name, origin string) string { + if origin == "." { + return name + origin + } + return name + "." + origin +} + +// LOC record helper function +func locCheckNorth(token string, latitude uint32) (uint32, bool) { + switch token { + case "n", "N": + return LOC_EQUATOR + latitude, true + case "s", "S": + return LOC_EQUATOR - latitude, true + } + return latitude, false +} + +// LOC record helper function +func locCheckEast(token string, longitude uint32) (uint32, bool) { + switch token { + case "e", "E": + return LOC_EQUATOR + longitude, true + case "w", "W": + return LOC_EQUATOR - longitude, true + } + return longitude, false +} + +// "Eat" the rest of the "line" +func slurpRemainder(c *zlexer) *ParseError { + l, _ := c.Next() + switch l.value { + case zBlank: + l, _ = c.Next() + if l.value != zNewline && l.value != zEOF { + return &ParseError{"", "garbage after rdata", l} + } + case zNewline: + case zEOF: + default: + return &ParseError{"", "garbage after rdata", l} + } + return nil +} + +// Parse a 64 bit-like ipv6 address: "0014:4fff:ff20:ee64" +// Used for NID and L64 record. +func stringToNodeID(l lex) (uint64, *ParseError) { + if len(l.token) < 19 { + return 0, &ParseError{l.token, "bad NID/L64 NodeID/Locator64", l} + } + // There must be three colons at fixes postitions, if not its a parse error + if l.token[4] != ':' && l.token[9] != ':' && l.token[14] != ':' { + return 0, &ParseError{l.token, "bad NID/L64 NodeID/Locator64", l} + } + s := l.token[0:4] + l.token[5:9] + l.token[10:14] + l.token[15:19] + u, err := strconv.ParseUint(s, 16, 64) + if err != nil { + return 0, &ParseError{l.token, "bad NID/L64 NodeID/Locator64", l} + } + return u, nil +} diff --git a/vendor/github.com/miekg/dns/scan_rr.go b/vendor/github.com/miekg/dns/scan_rr.go new file mode 100644 index 00000000..6c37b2e2 --- /dev/null +++ b/vendor/github.com/miekg/dns/scan_rr.go @@ -0,0 +1,1764 @@ +package dns + +import ( + "encoding/base64" + "net" + "strconv" + "strings" +) + +// A remainder of the rdata with embedded spaces, return the parsed string (sans the spaces) +// or an error +func endingToString(c *zlexer, errstr string) (string, *ParseError) { + var s string + l, _ := c.Next() // zString + for l.value != zNewline && l.value != zEOF { + if l.err { + return s, &ParseError{"", errstr, l} + } + switch l.value { + case zString: + s += l.token + case zBlank: // Ok + default: + return "", &ParseError{"", errstr, l} + } + l, _ = c.Next() + } + + return s, nil +} + +// A remainder of the rdata with embedded spaces, split on unquoted whitespace +// and return the parsed string slice or an error +func endingToTxtSlice(c *zlexer, errstr string) ([]string, *ParseError) { + // Get the remaining data until we see a zNewline + l, _ := c.Next() + if l.err { + return nil, &ParseError{"", errstr, l} + } + + // Build the slice + s := make([]string, 0) + quote := false + empty := false + for l.value != zNewline && l.value != zEOF { + if l.err { + return nil, &ParseError{"", errstr, l} + } + switch l.value { + case zString: + empty = false + if len(l.token) > 255 { + // split up tokens that are larger than 255 into 255-chunks + sx := []string{} + p, i := 0, 255 + for { + if i <= len(l.token) { + sx = append(sx, l.token[p:i]) + } else { + sx = append(sx, l.token[p:]) + break + + } + p, i = p+255, i+255 + } + s = append(s, sx...) + break + } + + s = append(s, l.token) + case zBlank: + if quote { + // zBlank can only be seen in between txt parts. + return nil, &ParseError{"", errstr, l} + } + case zQuote: + if empty && quote { + s = append(s, "") + } + quote = !quote + empty = true + default: + return nil, &ParseError{"", errstr, l} + } + l, _ = c.Next() + } + + if quote { + return nil, &ParseError{"", errstr, l} + } + + return s, nil +} + +func (rr *A) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + rr.A = net.ParseIP(l.token) + // IPv4 addresses cannot include ":". + // We do this rather than use net.IP's To4() because + // To4() treats IPv4-mapped IPv6 addresses as being + // IPv4. + isIPv4 := !strings.Contains(l.token, ":") + if rr.A == nil || !isIPv4 || l.err { + return &ParseError{"", "bad A A", l} + } + return slurpRemainder(c) +} + +func (rr *AAAA) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + rr.AAAA = net.ParseIP(l.token) + // IPv6 addresses must include ":", and IPv4 + // addresses cannot include ":". + isIPv6 := strings.Contains(l.token, ":") + if rr.AAAA == nil || !isIPv6 || l.err { + return &ParseError{"", "bad AAAA AAAA", l} + } + return slurpRemainder(c) +} + +func (rr *NS) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + name, nameOk := toAbsoluteName(l.token, o) + if l.err || !nameOk { + return &ParseError{"", "bad NS Ns", l} + } + rr.Ns = name + return slurpRemainder(c) +} + +func (rr *PTR) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + name, nameOk := toAbsoluteName(l.token, o) + if l.err || !nameOk { + return &ParseError{"", "bad PTR Ptr", l} + } + rr.Ptr = name + return slurpRemainder(c) +} + +func (rr *NSAPPTR) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + name, nameOk := toAbsoluteName(l.token, o) + if l.err || !nameOk { + return &ParseError{"", "bad NSAP-PTR Ptr", l} + } + rr.Ptr = name + return slurpRemainder(c) +} + +func (rr *RP) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + mbox, mboxOk := toAbsoluteName(l.token, o) + if l.err || !mboxOk { + return &ParseError{"", "bad RP Mbox", l} + } + rr.Mbox = mbox + + c.Next() // zBlank + l, _ = c.Next() + rr.Txt = l.token + + txt, txtOk := toAbsoluteName(l.token, o) + if l.err || !txtOk { + return &ParseError{"", "bad RP Txt", l} + } + rr.Txt = txt + + return slurpRemainder(c) +} + +func (rr *MR) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + name, nameOk := toAbsoluteName(l.token, o) + if l.err || !nameOk { + return &ParseError{"", "bad MR Mr", l} + } + rr.Mr = name + return slurpRemainder(c) +} + +func (rr *MB) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + name, nameOk := toAbsoluteName(l.token, o) + if l.err || !nameOk { + return &ParseError{"", "bad MB Mb", l} + } + rr.Mb = name + return slurpRemainder(c) +} + +func (rr *MG) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + name, nameOk := toAbsoluteName(l.token, o) + if l.err || !nameOk { + return &ParseError{"", "bad MG Mg", l} + } + rr.Mg = name + return slurpRemainder(c) +} + +func (rr *HINFO) parse(c *zlexer, o string) *ParseError { + chunks, e := endingToTxtSlice(c, "bad HINFO Fields") + if e != nil { + return e + } + + if ln := len(chunks); ln == 0 { + return nil + } else if ln == 1 { + // Can we split it? + if out := strings.Fields(chunks[0]); len(out) > 1 { + chunks = out + } else { + chunks = append(chunks, "") + } + } + + rr.Cpu = chunks[0] + rr.Os = strings.Join(chunks[1:], " ") + + return nil +} + +func (rr *MINFO) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + rmail, rmailOk := toAbsoluteName(l.token, o) + if l.err || !rmailOk { + return &ParseError{"", "bad MINFO Rmail", l} + } + rr.Rmail = rmail + + c.Next() // zBlank + l, _ = c.Next() + rr.Email = l.token + + email, emailOk := toAbsoluteName(l.token, o) + if l.err || !emailOk { + return &ParseError{"", "bad MINFO Email", l} + } + rr.Email = email + + return slurpRemainder(c) +} + +func (rr *MF) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + name, nameOk := toAbsoluteName(l.token, o) + if l.err || !nameOk { + return &ParseError{"", "bad MF Mf", l} + } + rr.Mf = name + return slurpRemainder(c) +} + +func (rr *MD) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + name, nameOk := toAbsoluteName(l.token, o) + if l.err || !nameOk { + return &ParseError{"", "bad MD Md", l} + } + rr.Md = name + return slurpRemainder(c) +} + +func (rr *MX) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + i, e := strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return &ParseError{"", "bad MX Pref", l} + } + rr.Preference = uint16(i) + + c.Next() // zBlank + l, _ = c.Next() // zString + rr.Mx = l.token + + name, nameOk := toAbsoluteName(l.token, o) + if l.err || !nameOk { + return &ParseError{"", "bad MX Mx", l} + } + rr.Mx = name + + return slurpRemainder(c) +} + +func (rr *RT) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + i, e := strconv.ParseUint(l.token, 10, 16) + if e != nil { + return &ParseError{"", "bad RT Preference", l} + } + rr.Preference = uint16(i) + + c.Next() // zBlank + l, _ = c.Next() // zString + rr.Host = l.token + + name, nameOk := toAbsoluteName(l.token, o) + if l.err || !nameOk { + return &ParseError{"", "bad RT Host", l} + } + rr.Host = name + + return slurpRemainder(c) +} + +func (rr *AFSDB) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + i, e := strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return &ParseError{"", "bad AFSDB Subtype", l} + } + rr.Subtype = uint16(i) + + c.Next() // zBlank + l, _ = c.Next() // zString + rr.Hostname = l.token + + name, nameOk := toAbsoluteName(l.token, o) + if l.err || !nameOk { + return &ParseError{"", "bad AFSDB Hostname", l} + } + rr.Hostname = name + return slurpRemainder(c) +} + +func (rr *X25) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + if l.err { + return &ParseError{"", "bad X25 PSDNAddress", l} + } + rr.PSDNAddress = l.token + return slurpRemainder(c) +} + +func (rr *KX) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + i, e := strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return &ParseError{"", "bad KX Pref", l} + } + rr.Preference = uint16(i) + + c.Next() // zBlank + l, _ = c.Next() // zString + rr.Exchanger = l.token + + name, nameOk := toAbsoluteName(l.token, o) + if l.err || !nameOk { + return &ParseError{"", "bad KX Exchanger", l} + } + rr.Exchanger = name + return slurpRemainder(c) +} + +func (rr *CNAME) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + name, nameOk := toAbsoluteName(l.token, o) + if l.err || !nameOk { + return &ParseError{"", "bad CNAME Target", l} + } + rr.Target = name + return slurpRemainder(c) +} + +func (rr *DNAME) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + name, nameOk := toAbsoluteName(l.token, o) + if l.err || !nameOk { + return &ParseError{"", "bad DNAME Target", l} + } + rr.Target = name + return slurpRemainder(c) +} + +func (rr *SOA) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + ns, nsOk := toAbsoluteName(l.token, o) + if l.err || !nsOk { + return &ParseError{"", "bad SOA Ns", l} + } + rr.Ns = ns + + c.Next() // zBlank + l, _ = c.Next() + rr.Mbox = l.token + + mbox, mboxOk := toAbsoluteName(l.token, o) + if l.err || !mboxOk { + return &ParseError{"", "bad SOA Mbox", l} + } + rr.Mbox = mbox + + c.Next() // zBlank + + var ( + v uint32 + ok bool + ) + for i := 0; i < 5; i++ { + l, _ = c.Next() + if l.err { + return &ParseError{"", "bad SOA zone parameter", l} + } + if j, e := strconv.ParseUint(l.token, 10, 32); e != nil { + if i == 0 { + // Serial must be a number + return &ParseError{"", "bad SOA zone parameter", l} + } + // We allow other fields to be unitful duration strings + if v, ok = stringToTTL(l.token); !ok { + return &ParseError{"", "bad SOA zone parameter", l} + + } + } else { + v = uint32(j) + } + switch i { + case 0: + rr.Serial = v + c.Next() // zBlank + case 1: + rr.Refresh = v + c.Next() // zBlank + case 2: + rr.Retry = v + c.Next() // zBlank + case 3: + rr.Expire = v + c.Next() // zBlank + case 4: + rr.Minttl = v + } + } + return slurpRemainder(c) +} + +func (rr *SRV) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + i, e := strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return &ParseError{"", "bad SRV Priority", l} + } + rr.Priority = uint16(i) + + c.Next() // zBlank + l, _ = c.Next() // zString + i, e = strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return &ParseError{"", "bad SRV Weight", l} + } + rr.Weight = uint16(i) + + c.Next() // zBlank + l, _ = c.Next() // zString + i, e = strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return &ParseError{"", "bad SRV Port", l} + } + rr.Port = uint16(i) + + c.Next() // zBlank + l, _ = c.Next() // zString + rr.Target = l.token + + name, nameOk := toAbsoluteName(l.token, o) + if l.err || !nameOk { + return &ParseError{"", "bad SRV Target", l} + } + rr.Target = name + return slurpRemainder(c) +} + +func (rr *NAPTR) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + i, e := strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return &ParseError{"", "bad NAPTR Order", l} + } + rr.Order = uint16(i) + + c.Next() // zBlank + l, _ = c.Next() // zString + i, e = strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return &ParseError{"", "bad NAPTR Preference", l} + } + rr.Preference = uint16(i) + + // Flags + c.Next() // zBlank + l, _ = c.Next() // _QUOTE + if l.value != zQuote { + return &ParseError{"", "bad NAPTR Flags", l} + } + l, _ = c.Next() // Either String or Quote + if l.value == zString { + rr.Flags = l.token + l, _ = c.Next() // _QUOTE + if l.value != zQuote { + return &ParseError{"", "bad NAPTR Flags", l} + } + } else if l.value == zQuote { + rr.Flags = "" + } else { + return &ParseError{"", "bad NAPTR Flags", l} + } + + // Service + c.Next() // zBlank + l, _ = c.Next() // _QUOTE + if l.value != zQuote { + return &ParseError{"", "bad NAPTR Service", l} + } + l, _ = c.Next() // Either String or Quote + if l.value == zString { + rr.Service = l.token + l, _ = c.Next() // _QUOTE + if l.value != zQuote { + return &ParseError{"", "bad NAPTR Service", l} + } + } else if l.value == zQuote { + rr.Service = "" + } else { + return &ParseError{"", "bad NAPTR Service", l} + } + + // Regexp + c.Next() // zBlank + l, _ = c.Next() // _QUOTE + if l.value != zQuote { + return &ParseError{"", "bad NAPTR Regexp", l} + } + l, _ = c.Next() // Either String or Quote + if l.value == zString { + rr.Regexp = l.token + l, _ = c.Next() // _QUOTE + if l.value != zQuote { + return &ParseError{"", "bad NAPTR Regexp", l} + } + } else if l.value == zQuote { + rr.Regexp = "" + } else { + return &ParseError{"", "bad NAPTR Regexp", l} + } + + // After quote no space?? + c.Next() // zBlank + l, _ = c.Next() // zString + rr.Replacement = l.token + + name, nameOk := toAbsoluteName(l.token, o) + if l.err || !nameOk { + return &ParseError{"", "bad NAPTR Replacement", l} + } + rr.Replacement = name + return slurpRemainder(c) +} + +func (rr *TALINK) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + previousName, previousNameOk := toAbsoluteName(l.token, o) + if l.err || !previousNameOk { + return &ParseError{"", "bad TALINK PreviousName", l} + } + rr.PreviousName = previousName + + c.Next() // zBlank + l, _ = c.Next() + rr.NextName = l.token + + nextName, nextNameOk := toAbsoluteName(l.token, o) + if l.err || !nextNameOk { + return &ParseError{"", "bad TALINK NextName", l} + } + rr.NextName = nextName + + return slurpRemainder(c) +} + +func (rr *LOC) parse(c *zlexer, o string) *ParseError { + // Non zero defaults for LOC record, see RFC 1876, Section 3. + rr.HorizPre = 165 // 10000 + rr.VertPre = 162 // 10 + rr.Size = 18 // 1 + ok := false + + // North + l, _ := c.Next() + i, e := strconv.ParseUint(l.token, 10, 32) + if e != nil || l.err { + return &ParseError{"", "bad LOC Latitude", l} + } + rr.Latitude = 1000 * 60 * 60 * uint32(i) + + c.Next() // zBlank + // Either number, 'N' or 'S' + l, _ = c.Next() + if rr.Latitude, ok = locCheckNorth(l.token, rr.Latitude); ok { + goto East + } + i, e = strconv.ParseUint(l.token, 10, 32) + if e != nil || l.err { + return &ParseError{"", "bad LOC Latitude minutes", l} + } + rr.Latitude += 1000 * 60 * uint32(i) + + c.Next() // zBlank + l, _ = c.Next() + if i, e := strconv.ParseFloat(l.token, 32); e != nil || l.err { + return &ParseError{"", "bad LOC Latitude seconds", l} + } else { + rr.Latitude += uint32(1000 * i) + } + c.Next() // zBlank + // Either number, 'N' or 'S' + l, _ = c.Next() + if rr.Latitude, ok = locCheckNorth(l.token, rr.Latitude); ok { + goto East + } + // If still alive, flag an error + return &ParseError{"", "bad LOC Latitude North/South", l} + +East: + // East + c.Next() // zBlank + l, _ = c.Next() + if i, e := strconv.ParseUint(l.token, 10, 32); e != nil || l.err { + return &ParseError{"", "bad LOC Longitude", l} + } else { + rr.Longitude = 1000 * 60 * 60 * uint32(i) + } + c.Next() // zBlank + // Either number, 'E' or 'W' + l, _ = c.Next() + if rr.Longitude, ok = locCheckEast(l.token, rr.Longitude); ok { + goto Altitude + } + if i, e := strconv.ParseUint(l.token, 10, 32); e != nil || l.err { + return &ParseError{"", "bad LOC Longitude minutes", l} + } else { + rr.Longitude += 1000 * 60 * uint32(i) + } + c.Next() // zBlank + l, _ = c.Next() + if i, e := strconv.ParseFloat(l.token, 32); e != nil || l.err { + return &ParseError{"", "bad LOC Longitude seconds", l} + } else { + rr.Longitude += uint32(1000 * i) + } + c.Next() // zBlank + // Either number, 'E' or 'W' + l, _ = c.Next() + if rr.Longitude, ok = locCheckEast(l.token, rr.Longitude); ok { + goto Altitude + } + // If still alive, flag an error + return &ParseError{"", "bad LOC Longitude East/West", l} + +Altitude: + c.Next() // zBlank + l, _ = c.Next() + if len(l.token) == 0 || l.err { + return &ParseError{"", "bad LOC Altitude", l} + } + if l.token[len(l.token)-1] == 'M' || l.token[len(l.token)-1] == 'm' { + l.token = l.token[0 : len(l.token)-1] + } + if i, e := strconv.ParseFloat(l.token, 32); e != nil { + return &ParseError{"", "bad LOC Altitude", l} + } else { + rr.Altitude = uint32(i*100.0 + 10000000.0 + 0.5) + } + + // And now optionally the other values + l, _ = c.Next() + count := 0 + for l.value != zNewline && l.value != zEOF { + switch l.value { + case zString: + switch count { + case 0: // Size + e, m, ok := stringToCm(l.token) + if !ok { + return &ParseError{"", "bad LOC Size", l} + } + rr.Size = e&0x0f | m<<4&0xf0 + case 1: // HorizPre + e, m, ok := stringToCm(l.token) + if !ok { + return &ParseError{"", "bad LOC HorizPre", l} + } + rr.HorizPre = e&0x0f | m<<4&0xf0 + case 2: // VertPre + e, m, ok := stringToCm(l.token) + if !ok { + return &ParseError{"", "bad LOC VertPre", l} + } + rr.VertPre = e&0x0f | m<<4&0xf0 + } + count++ + case zBlank: + // Ok + default: + return &ParseError{"", "bad LOC Size, HorizPre or VertPre", l} + } + l, _ = c.Next() + } + return nil +} + +func (rr *HIP) parse(c *zlexer, o string) *ParseError { + // HitLength is not represented + l, _ := c.Next() + i, e := strconv.ParseUint(l.token, 10, 8) + if e != nil || l.err { + return &ParseError{"", "bad HIP PublicKeyAlgorithm", l} + } + rr.PublicKeyAlgorithm = uint8(i) + + c.Next() // zBlank + l, _ = c.Next() // zString + if len(l.token) == 0 || l.err { + return &ParseError{"", "bad HIP Hit", l} + } + rr.Hit = l.token // This can not contain spaces, see RFC 5205 Section 6. + rr.HitLength = uint8(len(rr.Hit)) / 2 + + c.Next() // zBlank + l, _ = c.Next() // zString + if len(l.token) == 0 || l.err { + return &ParseError{"", "bad HIP PublicKey", l} + } + rr.PublicKey = l.token // This cannot contain spaces + rr.PublicKeyLength = uint16(base64.StdEncoding.DecodedLen(len(rr.PublicKey))) + + // RendezvousServers (if any) + l, _ = c.Next() + var xs []string + for l.value != zNewline && l.value != zEOF { + switch l.value { + case zString: + name, nameOk := toAbsoluteName(l.token, o) + if l.err || !nameOk { + return &ParseError{"", "bad HIP RendezvousServers", l} + } + xs = append(xs, name) + case zBlank: + // Ok + default: + return &ParseError{"", "bad HIP RendezvousServers", l} + } + l, _ = c.Next() + } + + rr.RendezvousServers = xs + return nil +} + +func (rr *CERT) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + if v, ok := StringToCertType[l.token]; ok { + rr.Type = v + } else if i, e := strconv.ParseUint(l.token, 10, 16); e != nil { + return &ParseError{"", "bad CERT Type", l} + } else { + rr.Type = uint16(i) + } + c.Next() // zBlank + l, _ = c.Next() // zString + i, e := strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return &ParseError{"", "bad CERT KeyTag", l} + } + rr.KeyTag = uint16(i) + c.Next() // zBlank + l, _ = c.Next() // zString + if v, ok := StringToAlgorithm[l.token]; ok { + rr.Algorithm = v + } else if i, e := strconv.ParseUint(l.token, 10, 8); e != nil { + return &ParseError{"", "bad CERT Algorithm", l} + } else { + rr.Algorithm = uint8(i) + } + s, e1 := endingToString(c, "bad CERT Certificate") + if e1 != nil { + return e1 + } + rr.Certificate = s + return nil +} + +func (rr *OPENPGPKEY) parse(c *zlexer, o string) *ParseError { + s, e := endingToString(c, "bad OPENPGPKEY PublicKey") + if e != nil { + return e + } + rr.PublicKey = s + return nil +} + +func (rr *CSYNC) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + j, e := strconv.ParseUint(l.token, 10, 32) + if e != nil { + // Serial must be a number + return &ParseError{"", "bad CSYNC serial", l} + } + rr.Serial = uint32(j) + + c.Next() // zBlank + + l, _ = c.Next() + j, e = strconv.ParseUint(l.token, 10, 16) + if e != nil { + // Serial must be a number + return &ParseError{"", "bad CSYNC flags", l} + } + rr.Flags = uint16(j) + + rr.TypeBitMap = make([]uint16, 0) + var ( + k uint16 + ok bool + ) + l, _ = c.Next() + for l.value != zNewline && l.value != zEOF { + switch l.value { + case zBlank: + // Ok + case zString: + tokenUpper := strings.ToUpper(l.token) + if k, ok = StringToType[tokenUpper]; !ok { + if k, ok = typeToInt(l.token); !ok { + return &ParseError{"", "bad CSYNC TypeBitMap", l} + } + } + rr.TypeBitMap = append(rr.TypeBitMap, k) + default: + return &ParseError{"", "bad CSYNC TypeBitMap", l} + } + l, _ = c.Next() + } + return nil +} + +func (rr *SIG) parse(c *zlexer, o string) *ParseError { + return rr.RRSIG.parse(c, o) +} + +func (rr *RRSIG) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + tokenUpper := strings.ToUpper(l.token) + if t, ok := StringToType[tokenUpper]; !ok { + if strings.HasPrefix(tokenUpper, "TYPE") { + t, ok = typeToInt(l.token) + if !ok { + return &ParseError{"", "bad RRSIG Typecovered", l} + } + rr.TypeCovered = t + } else { + return &ParseError{"", "bad RRSIG Typecovered", l} + } + } else { + rr.TypeCovered = t + } + + c.Next() // zBlank + l, _ = c.Next() + i, err := strconv.ParseUint(l.token, 10, 8) + if err != nil || l.err { + return &ParseError{"", "bad RRSIG Algorithm", l} + } + rr.Algorithm = uint8(i) + + c.Next() // zBlank + l, _ = c.Next() + i, err = strconv.ParseUint(l.token, 10, 8) + if err != nil || l.err { + return &ParseError{"", "bad RRSIG Labels", l} + } + rr.Labels = uint8(i) + + c.Next() // zBlank + l, _ = c.Next() + i, err = strconv.ParseUint(l.token, 10, 32) + if err != nil || l.err { + return &ParseError{"", "bad RRSIG OrigTtl", l} + } + rr.OrigTtl = uint32(i) + + c.Next() // zBlank + l, _ = c.Next() + if i, err := StringToTime(l.token); err != nil { + // Try to see if all numeric and use it as epoch + if i, err := strconv.ParseInt(l.token, 10, 64); err == nil { + // TODO(miek): error out on > MAX_UINT32, same below + rr.Expiration = uint32(i) + } else { + return &ParseError{"", "bad RRSIG Expiration", l} + } + } else { + rr.Expiration = i + } + + c.Next() // zBlank + l, _ = c.Next() + if i, err := StringToTime(l.token); err != nil { + if i, err := strconv.ParseInt(l.token, 10, 64); err == nil { + rr.Inception = uint32(i) + } else { + return &ParseError{"", "bad RRSIG Inception", l} + } + } else { + rr.Inception = i + } + + c.Next() // zBlank + l, _ = c.Next() + i, err = strconv.ParseUint(l.token, 10, 16) + if err != nil || l.err { + return &ParseError{"", "bad RRSIG KeyTag", l} + } + rr.KeyTag = uint16(i) + + c.Next() // zBlank + l, _ = c.Next() + rr.SignerName = l.token + name, nameOk := toAbsoluteName(l.token, o) + if l.err || !nameOk { + return &ParseError{"", "bad RRSIG SignerName", l} + } + rr.SignerName = name + + s, e := endingToString(c, "bad RRSIG Signature") + if e != nil { + return e + } + rr.Signature = s + + return nil +} + +func (rr *NSEC) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + name, nameOk := toAbsoluteName(l.token, o) + if l.err || !nameOk { + return &ParseError{"", "bad NSEC NextDomain", l} + } + rr.NextDomain = name + + rr.TypeBitMap = make([]uint16, 0) + var ( + k uint16 + ok bool + ) + l, _ = c.Next() + for l.value != zNewline && l.value != zEOF { + switch l.value { + case zBlank: + // Ok + case zString: + tokenUpper := strings.ToUpper(l.token) + if k, ok = StringToType[tokenUpper]; !ok { + if k, ok = typeToInt(l.token); !ok { + return &ParseError{"", "bad NSEC TypeBitMap", l} + } + } + rr.TypeBitMap = append(rr.TypeBitMap, k) + default: + return &ParseError{"", "bad NSEC TypeBitMap", l} + } + l, _ = c.Next() + } + return nil +} + +func (rr *NSEC3) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + i, e := strconv.ParseUint(l.token, 10, 8) + if e != nil || l.err { + return &ParseError{"", "bad NSEC3 Hash", l} + } + rr.Hash = uint8(i) + c.Next() // zBlank + l, _ = c.Next() + i, e = strconv.ParseUint(l.token, 10, 8) + if e != nil || l.err { + return &ParseError{"", "bad NSEC3 Flags", l} + } + rr.Flags = uint8(i) + c.Next() // zBlank + l, _ = c.Next() + i, e = strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return &ParseError{"", "bad NSEC3 Iterations", l} + } + rr.Iterations = uint16(i) + c.Next() + l, _ = c.Next() + if len(l.token) == 0 || l.err { + return &ParseError{"", "bad NSEC3 Salt", l} + } + if l.token != "-" { + rr.SaltLength = uint8(len(l.token)) / 2 + rr.Salt = l.token + } + + c.Next() + l, _ = c.Next() + if len(l.token) == 0 || l.err { + return &ParseError{"", "bad NSEC3 NextDomain", l} + } + rr.HashLength = 20 // Fix for NSEC3 (sha1 160 bits) + rr.NextDomain = l.token + + rr.TypeBitMap = make([]uint16, 0) + var ( + k uint16 + ok bool + ) + l, _ = c.Next() + for l.value != zNewline && l.value != zEOF { + switch l.value { + case zBlank: + // Ok + case zString: + tokenUpper := strings.ToUpper(l.token) + if k, ok = StringToType[tokenUpper]; !ok { + if k, ok = typeToInt(l.token); !ok { + return &ParseError{"", "bad NSEC3 TypeBitMap", l} + } + } + rr.TypeBitMap = append(rr.TypeBitMap, k) + default: + return &ParseError{"", "bad NSEC3 TypeBitMap", l} + } + l, _ = c.Next() + } + return nil +} + +func (rr *NSEC3PARAM) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + i, e := strconv.ParseUint(l.token, 10, 8) + if e != nil || l.err { + return &ParseError{"", "bad NSEC3PARAM Hash", l} + } + rr.Hash = uint8(i) + c.Next() // zBlank + l, _ = c.Next() + i, e = strconv.ParseUint(l.token, 10, 8) + if e != nil || l.err { + return &ParseError{"", "bad NSEC3PARAM Flags", l} + } + rr.Flags = uint8(i) + c.Next() // zBlank + l, _ = c.Next() + i, e = strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return &ParseError{"", "bad NSEC3PARAM Iterations", l} + } + rr.Iterations = uint16(i) + c.Next() + l, _ = c.Next() + if l.token != "-" { + rr.SaltLength = uint8(len(l.token)) + rr.Salt = l.token + } + return slurpRemainder(c) +} + +func (rr *EUI48) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + if len(l.token) != 17 || l.err { + return &ParseError{"", "bad EUI48 Address", l} + } + addr := make([]byte, 12) + dash := 0 + for i := 0; i < 10; i += 2 { + addr[i] = l.token[i+dash] + addr[i+1] = l.token[i+1+dash] + dash++ + if l.token[i+1+dash] != '-' { + return &ParseError{"", "bad EUI48 Address", l} + } + } + addr[10] = l.token[15] + addr[11] = l.token[16] + + i, e := strconv.ParseUint(string(addr), 16, 48) + if e != nil { + return &ParseError{"", "bad EUI48 Address", l} + } + rr.Address = i + return slurpRemainder(c) +} + +func (rr *EUI64) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + if len(l.token) != 23 || l.err { + return &ParseError{"", "bad EUI64 Address", l} + } + addr := make([]byte, 16) + dash := 0 + for i := 0; i < 14; i += 2 { + addr[i] = l.token[i+dash] + addr[i+1] = l.token[i+1+dash] + dash++ + if l.token[i+1+dash] != '-' { + return &ParseError{"", "bad EUI64 Address", l} + } + } + addr[14] = l.token[21] + addr[15] = l.token[22] + + i, e := strconv.ParseUint(string(addr), 16, 64) + if e != nil { + return &ParseError{"", "bad EUI68 Address", l} + } + rr.Address = i + return slurpRemainder(c) +} + +func (rr *SSHFP) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + i, e := strconv.ParseUint(l.token, 10, 8) + if e != nil || l.err { + return &ParseError{"", "bad SSHFP Algorithm", l} + } + rr.Algorithm = uint8(i) + c.Next() // zBlank + l, _ = c.Next() + i, e = strconv.ParseUint(l.token, 10, 8) + if e != nil || l.err { + return &ParseError{"", "bad SSHFP Type", l} + } + rr.Type = uint8(i) + c.Next() // zBlank + s, e1 := endingToString(c, "bad SSHFP Fingerprint") + if e1 != nil { + return e1 + } + rr.FingerPrint = s + return nil +} + +func (rr *DNSKEY) parseDNSKEY(c *zlexer, o, typ string) *ParseError { + l, _ := c.Next() + i, e := strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return &ParseError{"", "bad " + typ + " Flags", l} + } + rr.Flags = uint16(i) + c.Next() // zBlank + l, _ = c.Next() // zString + i, e = strconv.ParseUint(l.token, 10, 8) + if e != nil || l.err { + return &ParseError{"", "bad " + typ + " Protocol", l} + } + rr.Protocol = uint8(i) + c.Next() // zBlank + l, _ = c.Next() // zString + i, e = strconv.ParseUint(l.token, 10, 8) + if e != nil || l.err { + return &ParseError{"", "bad " + typ + " Algorithm", l} + } + rr.Algorithm = uint8(i) + s, e1 := endingToString(c, "bad "+typ+" PublicKey") + if e1 != nil { + return e1 + } + rr.PublicKey = s + return nil +} + +func (rr *DNSKEY) parse(c *zlexer, o string) *ParseError { + return rr.parseDNSKEY(c, o, "DNSKEY") +} + +func (rr *KEY) parse(c *zlexer, o string) *ParseError { + return rr.parseDNSKEY(c, o, "KEY") +} + +func (rr *CDNSKEY) parse(c *zlexer, o string) *ParseError { + return rr.parseDNSKEY(c, o, "CDNSKEY") +} + +func (rr *RKEY) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + i, e := strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return &ParseError{"", "bad RKEY Flags", l} + } + rr.Flags = uint16(i) + c.Next() // zBlank + l, _ = c.Next() // zString + i, e = strconv.ParseUint(l.token, 10, 8) + if e != nil || l.err { + return &ParseError{"", "bad RKEY Protocol", l} + } + rr.Protocol = uint8(i) + c.Next() // zBlank + l, _ = c.Next() // zString + i, e = strconv.ParseUint(l.token, 10, 8) + if e != nil || l.err { + return &ParseError{"", "bad RKEY Algorithm", l} + } + rr.Algorithm = uint8(i) + s, e1 := endingToString(c, "bad RKEY PublicKey") + if e1 != nil { + return e1 + } + rr.PublicKey = s + return nil +} + +func (rr *EID) parse(c *zlexer, o string) *ParseError { + s, e := endingToString(c, "bad EID Endpoint") + if e != nil { + return e + } + rr.Endpoint = s + return nil +} + +func (rr *NIMLOC) parse(c *zlexer, o string) *ParseError { + s, e := endingToString(c, "bad NIMLOC Locator") + if e != nil { + return e + } + rr.Locator = s + return nil +} + +func (rr *GPOS) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + _, e := strconv.ParseFloat(l.token, 64) + if e != nil || l.err { + return &ParseError{"", "bad GPOS Longitude", l} + } + rr.Longitude = l.token + c.Next() // zBlank + l, _ = c.Next() + _, e = strconv.ParseFloat(l.token, 64) + if e != nil || l.err { + return &ParseError{"", "bad GPOS Latitude", l} + } + rr.Latitude = l.token + c.Next() // zBlank + l, _ = c.Next() + _, e = strconv.ParseFloat(l.token, 64) + if e != nil || l.err { + return &ParseError{"", "bad GPOS Altitude", l} + } + rr.Altitude = l.token + return slurpRemainder(c) +} + +func (rr *DS) parseDS(c *zlexer, o, typ string) *ParseError { + l, _ := c.Next() + i, e := strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return &ParseError{"", "bad " + typ + " KeyTag", l} + } + rr.KeyTag = uint16(i) + c.Next() // zBlank + l, _ = c.Next() + if i, e = strconv.ParseUint(l.token, 10, 8); e != nil { + tokenUpper := strings.ToUpper(l.token) + i, ok := StringToAlgorithm[tokenUpper] + if !ok || l.err { + return &ParseError{"", "bad " + typ + " Algorithm", l} + } + rr.Algorithm = i + } else { + rr.Algorithm = uint8(i) + } + c.Next() // zBlank + l, _ = c.Next() + i, e = strconv.ParseUint(l.token, 10, 8) + if e != nil || l.err { + return &ParseError{"", "bad " + typ + " DigestType", l} + } + rr.DigestType = uint8(i) + s, e1 := endingToString(c, "bad "+typ+" Digest") + if e1 != nil { + return e1 + } + rr.Digest = s + return nil +} + +func (rr *DS) parse(c *zlexer, o string) *ParseError { + return rr.parseDS(c, o, "DS") +} + +func (rr *DLV) parse(c *zlexer, o string) *ParseError { + return rr.parseDS(c, o, "DLV") +} + +func (rr *CDS) parse(c *zlexer, o string) *ParseError { + return rr.parseDS(c, o, "CDS") +} + +func (rr *TA) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + i, e := strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return &ParseError{"", "bad TA KeyTag", l} + } + rr.KeyTag = uint16(i) + c.Next() // zBlank + l, _ = c.Next() + if i, e := strconv.ParseUint(l.token, 10, 8); e != nil { + tokenUpper := strings.ToUpper(l.token) + i, ok := StringToAlgorithm[tokenUpper] + if !ok || l.err { + return &ParseError{"", "bad TA Algorithm", l} + } + rr.Algorithm = i + } else { + rr.Algorithm = uint8(i) + } + c.Next() // zBlank + l, _ = c.Next() + i, e = strconv.ParseUint(l.token, 10, 8) + if e != nil || l.err { + return &ParseError{"", "bad TA DigestType", l} + } + rr.DigestType = uint8(i) + s, err := endingToString(c, "bad TA Digest") + if err != nil { + return err + } + rr.Digest = s + return nil +} + +func (rr *TLSA) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + i, e := strconv.ParseUint(l.token, 10, 8) + if e != nil || l.err { + return &ParseError{"", "bad TLSA Usage", l} + } + rr.Usage = uint8(i) + c.Next() // zBlank + l, _ = c.Next() + i, e = strconv.ParseUint(l.token, 10, 8) + if e != nil || l.err { + return &ParseError{"", "bad TLSA Selector", l} + } + rr.Selector = uint8(i) + c.Next() // zBlank + l, _ = c.Next() + i, e = strconv.ParseUint(l.token, 10, 8) + if e != nil || l.err { + return &ParseError{"", "bad TLSA MatchingType", l} + } + rr.MatchingType = uint8(i) + // So this needs be e2 (i.e. different than e), because...??t + s, e2 := endingToString(c, "bad TLSA Certificate") + if e2 != nil { + return e2 + } + rr.Certificate = s + return nil +} + +func (rr *SMIMEA) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + i, e := strconv.ParseUint(l.token, 10, 8) + if e != nil || l.err { + return &ParseError{"", "bad SMIMEA Usage", l} + } + rr.Usage = uint8(i) + c.Next() // zBlank + l, _ = c.Next() + i, e = strconv.ParseUint(l.token, 10, 8) + if e != nil || l.err { + return &ParseError{"", "bad SMIMEA Selector", l} + } + rr.Selector = uint8(i) + c.Next() // zBlank + l, _ = c.Next() + i, e = strconv.ParseUint(l.token, 10, 8) + if e != nil || l.err { + return &ParseError{"", "bad SMIMEA MatchingType", l} + } + rr.MatchingType = uint8(i) + // So this needs be e2 (i.e. different than e), because...??t + s, e2 := endingToString(c, "bad SMIMEA Certificate") + if e2 != nil { + return e2 + } + rr.Certificate = s + return nil +} + +func (rr *RFC3597) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + if l.token != "\\#" { + return &ParseError{"", "bad RFC3597 Rdata", l} + } + + c.Next() // zBlank + l, _ = c.Next() + rdlength, e := strconv.Atoi(l.token) + if e != nil || l.err { + return &ParseError{"", "bad RFC3597 Rdata ", l} + } + + s, e1 := endingToString(c, "bad RFC3597 Rdata") + if e1 != nil { + return e1 + } + if rdlength*2 != len(s) { + return &ParseError{"", "bad RFC3597 Rdata", l} + } + rr.Rdata = s + return nil +} + +func (rr *SPF) parse(c *zlexer, o string) *ParseError { + s, e := endingToTxtSlice(c, "bad SPF Txt") + if e != nil { + return e + } + rr.Txt = s + return nil +} + +func (rr *AVC) parse(c *zlexer, o string) *ParseError { + s, e := endingToTxtSlice(c, "bad AVC Txt") + if e != nil { + return e + } + rr.Txt = s + return nil +} + +func (rr *TXT) parse(c *zlexer, o string) *ParseError { + // no zBlank reading here, because all this rdata is TXT + s, e := endingToTxtSlice(c, "bad TXT Txt") + if e != nil { + return e + } + rr.Txt = s + return nil +} + +// identical to setTXT +func (rr *NINFO) parse(c *zlexer, o string) *ParseError { + s, e := endingToTxtSlice(c, "bad NINFO ZSData") + if e != nil { + return e + } + rr.ZSData = s + return nil +} + +func (rr *URI) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + i, e := strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return &ParseError{"", "bad URI Priority", l} + } + rr.Priority = uint16(i) + c.Next() // zBlank + l, _ = c.Next() + i, e = strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return &ParseError{"", "bad URI Weight", l} + } + rr.Weight = uint16(i) + + c.Next() // zBlank + s, err := endingToTxtSlice(c, "bad URI Target") + if err != nil { + return err + } + if len(s) != 1 { + return &ParseError{"", "bad URI Target", l} + } + rr.Target = s[0] + return nil +} + +func (rr *DHCID) parse(c *zlexer, o string) *ParseError { + // awesome record to parse! + s, e := endingToString(c, "bad DHCID Digest") + if e != nil { + return e + } + rr.Digest = s + return nil +} + +func (rr *NID) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + i, e := strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return &ParseError{"", "bad NID Preference", l} + } + rr.Preference = uint16(i) + c.Next() // zBlank + l, _ = c.Next() // zString + u, err := stringToNodeID(l) + if err != nil || l.err { + return err + } + rr.NodeID = u + return slurpRemainder(c) +} + +func (rr *L32) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + i, e := strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return &ParseError{"", "bad L32 Preference", l} + } + rr.Preference = uint16(i) + c.Next() // zBlank + l, _ = c.Next() // zString + rr.Locator32 = net.ParseIP(l.token) + if rr.Locator32 == nil || l.err { + return &ParseError{"", "bad L32 Locator", l} + } + return slurpRemainder(c) +} + +func (rr *LP) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + i, e := strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return &ParseError{"", "bad LP Preference", l} + } + rr.Preference = uint16(i) + + c.Next() // zBlank + l, _ = c.Next() // zString + rr.Fqdn = l.token + name, nameOk := toAbsoluteName(l.token, o) + if l.err || !nameOk { + return &ParseError{"", "bad LP Fqdn", l} + } + rr.Fqdn = name + + return slurpRemainder(c) +} + +func (rr *L64) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + i, e := strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return &ParseError{"", "bad L64 Preference", l} + } + rr.Preference = uint16(i) + c.Next() // zBlank + l, _ = c.Next() // zString + u, err := stringToNodeID(l) + if err != nil || l.err { + return err + } + rr.Locator64 = u + return slurpRemainder(c) +} + +func (rr *UID) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + i, e := strconv.ParseUint(l.token, 10, 32) + if e != nil || l.err { + return &ParseError{"", "bad UID Uid", l} + } + rr.Uid = uint32(i) + return slurpRemainder(c) +} + +func (rr *GID) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + i, e := strconv.ParseUint(l.token, 10, 32) + if e != nil || l.err { + return &ParseError{"", "bad GID Gid", l} + } + rr.Gid = uint32(i) + return slurpRemainder(c) +} + +func (rr *UINFO) parse(c *zlexer, o string) *ParseError { + s, e := endingToTxtSlice(c, "bad UINFO Uinfo") + if e != nil { + return e + } + if ln := len(s); ln == 0 { + return nil + } + rr.Uinfo = s[0] // silently discard anything after the first character-string + return nil +} + +func (rr *PX) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + i, e := strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return &ParseError{"", "bad PX Preference", l} + } + rr.Preference = uint16(i) + + c.Next() // zBlank + l, _ = c.Next() // zString + rr.Map822 = l.token + map822, map822Ok := toAbsoluteName(l.token, o) + if l.err || !map822Ok { + return &ParseError{"", "bad PX Map822", l} + } + rr.Map822 = map822 + + c.Next() // zBlank + l, _ = c.Next() // zString + rr.Mapx400 = l.token + mapx400, mapx400Ok := toAbsoluteName(l.token, o) + if l.err || !mapx400Ok { + return &ParseError{"", "bad PX Mapx400", l} + } + rr.Mapx400 = mapx400 + + return slurpRemainder(c) +} + +func (rr *CAA) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + i, err := strconv.ParseUint(l.token, 10, 8) + if err != nil || l.err { + return &ParseError{"", "bad CAA Flag", l} + } + rr.Flag = uint8(i) + + c.Next() // zBlank + l, _ = c.Next() // zString + if l.value != zString { + return &ParseError{"", "bad CAA Tag", l} + } + rr.Tag = l.token + + c.Next() // zBlank + s, e := endingToTxtSlice(c, "bad CAA Value") + if e != nil { + return e + } + if len(s) != 1 { + return &ParseError{"", "bad CAA Value", l} + } + rr.Value = s[0] + return nil +} + +func (rr *TKEY) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + + // Algorithm + if l.value != zString { + return &ParseError{"", "bad TKEY algorithm", l} + } + rr.Algorithm = l.token + c.Next() // zBlank + + // Get the key length and key values + l, _ = c.Next() + i, err := strconv.ParseUint(l.token, 10, 8) + if err != nil || l.err { + return &ParseError{"", "bad TKEY key length", l} + } + rr.KeySize = uint16(i) + c.Next() // zBlank + l, _ = c.Next() + if l.value != zString { + return &ParseError{"", "bad TKEY key", l} + } + rr.Key = l.token + c.Next() // zBlank + + // Get the otherdata length and string data + l, _ = c.Next() + i, err = strconv.ParseUint(l.token, 10, 8) + if err != nil || l.err { + return &ParseError{"", "bad TKEY otherdata length", l} + } + rr.OtherLen = uint16(i) + c.Next() // zBlank + l, _ = c.Next() + if l.value != zString { + return &ParseError{"", "bad TKEY otherday", l} + } + rr.OtherData = l.token + + return nil +} + +func (rr *APL) parse(c *zlexer, o string) *ParseError { + var prefixes []APLPrefix + + for { + l, _ := c.Next() + if l.value == zNewline || l.value == zEOF { + break + } + if l.value == zBlank && prefixes != nil { + continue + } + if l.value != zString { + return &ParseError{"", "unexpected APL field", l} + } + + // Expected format: [!]afi:address/prefix + + colon := strings.IndexByte(l.token, ':') + if colon == -1 { + return &ParseError{"", "missing colon in APL field", l} + } + + family, cidr := l.token[:colon], l.token[colon+1:] + + var negation bool + if family != "" && family[0] == '!' { + negation = true + family = family[1:] + } + + afi, err := strconv.ParseUint(family, 10, 16) + if err != nil { + return &ParseError{"", "failed to parse APL family: " + err.Error(), l} + } + var addrLen int + switch afi { + case 1: + addrLen = net.IPv4len + case 2: + addrLen = net.IPv6len + default: + return &ParseError{"", "unrecognized APL family", l} + } + + ip, subnet, err := net.ParseCIDR(cidr) + if err != nil { + return &ParseError{"", "failed to parse APL address: " + err.Error(), l} + } + if !ip.Equal(subnet.IP) { + return &ParseError{"", "extra bits in APL address", l} + } + + if len(subnet.IP) != addrLen { + return &ParseError{"", "address mismatch with the APL family", l} + } + + prefixes = append(prefixes, APLPrefix{ + Negation: negation, + Network: *subnet, + }) + } + + rr.Prefixes = prefixes + return nil +} diff --git a/vendor/github.com/miekg/dns/serve_mux.go b/vendor/github.com/miekg/dns/serve_mux.go new file mode 100644 index 00000000..69deb33e --- /dev/null +++ b/vendor/github.com/miekg/dns/serve_mux.go @@ -0,0 +1,123 @@ +package dns + +import ( + "strings" + "sync" +) + +// ServeMux is an DNS request multiplexer. It matches the zone name of +// each incoming request against a list of registered patterns add calls +// the handler for the pattern that most closely matches the zone name. +// +// ServeMux is DNSSEC aware, meaning that queries for the DS record are +// redirected to the parent zone (if that is also registered), otherwise +// the child gets the query. +// +// ServeMux is also safe for concurrent access from multiple goroutines. +// +// The zero ServeMux is empty and ready for use. +type ServeMux struct { + z map[string]Handler + m sync.RWMutex +} + +// NewServeMux allocates and returns a new ServeMux. +func NewServeMux() *ServeMux { + return new(ServeMux) +} + +// DefaultServeMux is the default ServeMux used by Serve. +var DefaultServeMux = NewServeMux() + +func (mux *ServeMux) match(q string, t uint16) Handler { + mux.m.RLock() + defer mux.m.RUnlock() + if mux.z == nil { + return nil + } + + q = strings.ToLower(q) + + var handler Handler + for off, end := 0, false; !end; off, end = NextLabel(q, off) { + if h, ok := mux.z[q[off:]]; ok { + if t != TypeDS { + return h + } + // Continue for DS to see if we have a parent too, if so delegate to the parent + handler = h + } + } + + // Wildcard match, if we have found nothing try the root zone as a last resort. + if h, ok := mux.z["."]; ok { + return h + } + + return handler +} + +// Handle adds a handler to the ServeMux for pattern. +func (mux *ServeMux) Handle(pattern string, handler Handler) { + if pattern == "" { + panic("dns: invalid pattern " + pattern) + } + mux.m.Lock() + if mux.z == nil { + mux.z = make(map[string]Handler) + } + mux.z[Fqdn(pattern)] = handler + mux.m.Unlock() +} + +// HandleFunc adds a handler function to the ServeMux for pattern. +func (mux *ServeMux) HandleFunc(pattern string, handler func(ResponseWriter, *Msg)) { + mux.Handle(pattern, HandlerFunc(handler)) +} + +// HandleRemove deregisters the handler specific for pattern from the ServeMux. +func (mux *ServeMux) HandleRemove(pattern string) { + if pattern == "" { + panic("dns: invalid pattern " + pattern) + } + mux.m.Lock() + delete(mux.z, Fqdn(pattern)) + mux.m.Unlock() +} + +// ServeDNS dispatches the request to the handler whose pattern most +// closely matches the request message. +// +// ServeDNS is DNSSEC aware, meaning that queries for the DS record +// are redirected to the parent zone (if that is also registered), +// otherwise the child gets the query. +// +// If no handler is found, or there is no question, a standard SERVFAIL +// message is returned +func (mux *ServeMux) ServeDNS(w ResponseWriter, req *Msg) { + var h Handler + if len(req.Question) >= 1 { // allow more than one question + h = mux.match(req.Question[0].Name, req.Question[0].Qtype) + } + + if h != nil { + h.ServeDNS(w, req) + } else { + HandleFailed(w, req) + } +} + +// Handle registers the handler with the given pattern +// in the DefaultServeMux. The documentation for +// ServeMux explains how patterns are matched. +func Handle(pattern string, handler Handler) { DefaultServeMux.Handle(pattern, handler) } + +// HandleRemove deregisters the handle with the given pattern +// in the DefaultServeMux. +func HandleRemove(pattern string) { DefaultServeMux.HandleRemove(pattern) } + +// HandleFunc registers the handler function with the given pattern +// in the DefaultServeMux. +func HandleFunc(pattern string, handler func(ResponseWriter, *Msg)) { + DefaultServeMux.HandleFunc(pattern, handler) +} diff --git a/vendor/github.com/miekg/dns/server.go b/vendor/github.com/miekg/dns/server.go new file mode 100644 index 00000000..3cf1a024 --- /dev/null +++ b/vendor/github.com/miekg/dns/server.go @@ -0,0 +1,764 @@ +// DNS server implementation. + +package dns + +import ( + "context" + "crypto/tls" + "encoding/binary" + "errors" + "io" + "net" + "strings" + "sync" + "time" +) + +// Default maximum number of TCP queries before we close the socket. +const maxTCPQueries = 128 + +// aLongTimeAgo is a non-zero time, far in the past, used for +// immediate cancelation of network operations. +var aLongTimeAgo = time.Unix(1, 0) + +// Handler is implemented by any value that implements ServeDNS. +type Handler interface { + ServeDNS(w ResponseWriter, r *Msg) +} + +// The HandlerFunc type is an adapter to allow the use of +// ordinary functions as DNS handlers. If f is a function +// with the appropriate signature, HandlerFunc(f) is a +// Handler object that calls f. +type HandlerFunc func(ResponseWriter, *Msg) + +// ServeDNS calls f(w, r). +func (f HandlerFunc) ServeDNS(w ResponseWriter, r *Msg) { + f(w, r) +} + +// A ResponseWriter interface is used by an DNS handler to +// construct an DNS response. +type ResponseWriter interface { + // LocalAddr returns the net.Addr of the server + LocalAddr() net.Addr + // RemoteAddr returns the net.Addr of the client that sent the current request. + RemoteAddr() net.Addr + // WriteMsg writes a reply back to the client. + WriteMsg(*Msg) error + // Write writes a raw buffer back to the client. + Write([]byte) (int, error) + // Close closes the connection. + Close() error + // TsigStatus returns the status of the Tsig. + TsigStatus() error + // TsigTimersOnly sets the tsig timers only boolean. + TsigTimersOnly(bool) + // Hijack lets the caller take over the connection. + // After a call to Hijack(), the DNS package will not do anything with the connection. + Hijack() +} + +// A ConnectionStater interface is used by a DNS Handler to access TLS connection state +// when available. +type ConnectionStater interface { + ConnectionState() *tls.ConnectionState +} + +type response struct { + closed bool // connection has been closed + hijacked bool // connection has been hijacked by handler + tsigTimersOnly bool + tsigStatus error + tsigRequestMAC string + tsigSecret map[string]string // the tsig secrets + udp *net.UDPConn // i/o connection if UDP was used + tcp net.Conn // i/o connection if TCP was used + udpSession *SessionUDP // oob data to get egress interface right + writer Writer // writer to output the raw DNS bits +} + +// HandleFailed returns a HandlerFunc that returns SERVFAIL for every request it gets. +func HandleFailed(w ResponseWriter, r *Msg) { + m := new(Msg) + m.SetRcode(r, RcodeServerFailure) + // does not matter if this write fails + w.WriteMsg(m) +} + +// ListenAndServe Starts a server on address and network specified Invoke handler +// for incoming queries. +func ListenAndServe(addr string, network string, handler Handler) error { + server := &Server{Addr: addr, Net: network, Handler: handler} + return server.ListenAndServe() +} + +// ListenAndServeTLS acts like http.ListenAndServeTLS, more information in +// http://golang.org/pkg/net/http/#ListenAndServeTLS +func ListenAndServeTLS(addr, certFile, keyFile string, handler Handler) error { + cert, err := tls.LoadX509KeyPair(certFile, keyFile) + if err != nil { + return err + } + + config := tls.Config{ + Certificates: []tls.Certificate{cert}, + } + + server := &Server{ + Addr: addr, + Net: "tcp-tls", + TLSConfig: &config, + Handler: handler, + } + + return server.ListenAndServe() +} + +// ActivateAndServe activates a server with a listener from systemd, +// l and p should not both be non-nil. +// If both l and p are not nil only p will be used. +// Invoke handler for incoming queries. +func ActivateAndServe(l net.Listener, p net.PacketConn, handler Handler) error { + server := &Server{Listener: l, PacketConn: p, Handler: handler} + return server.ActivateAndServe() +} + +// Writer writes raw DNS messages; each call to Write should send an entire message. +type Writer interface { + io.Writer +} + +// Reader reads raw DNS messages; each call to ReadTCP or ReadUDP should return an entire message. +type Reader interface { + // ReadTCP reads a raw message from a TCP connection. Implementations may alter + // connection properties, for example the read-deadline. + ReadTCP(conn net.Conn, timeout time.Duration) ([]byte, error) + // ReadUDP reads a raw message from a UDP connection. Implementations may alter + // connection properties, for example the read-deadline. + ReadUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *SessionUDP, error) +} + +// defaultReader is an adapter for the Server struct that implements the Reader interface +// using the readTCP and readUDP func of the embedded Server. +type defaultReader struct { + *Server +} + +func (dr defaultReader) ReadTCP(conn net.Conn, timeout time.Duration) ([]byte, error) { + return dr.readTCP(conn, timeout) +} + +func (dr defaultReader) ReadUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *SessionUDP, error) { + return dr.readUDP(conn, timeout) +} + +// DecorateReader is a decorator hook for extending or supplanting the functionality of a Reader. +// Implementations should never return a nil Reader. +type DecorateReader func(Reader) Reader + +// DecorateWriter is a decorator hook for extending or supplanting the functionality of a Writer. +// Implementations should never return a nil Writer. +type DecorateWriter func(Writer) Writer + +// A Server defines parameters for running an DNS server. +type Server struct { + // Address to listen on, ":dns" if empty. + Addr string + // if "tcp" or "tcp-tls" (DNS over TLS) it will invoke a TCP listener, otherwise an UDP one + Net string + // TCP Listener to use, this is to aid in systemd's socket activation. + Listener net.Listener + // TLS connection configuration + TLSConfig *tls.Config + // UDP "Listener" to use, this is to aid in systemd's socket activation. + PacketConn net.PacketConn + // Handler to invoke, dns.DefaultServeMux if nil. + Handler Handler + // Default buffer size to use to read incoming UDP messages. If not set + // it defaults to MinMsgSize (512 B). + UDPSize int + // The net.Conn.SetReadTimeout value for new connections, defaults to 2 * time.Second. + ReadTimeout time.Duration + // The net.Conn.SetWriteTimeout value for new connections, defaults to 2 * time.Second. + WriteTimeout time.Duration + // TCP idle timeout for multiple queries, if nil, defaults to 8 * time.Second (RFC 5966). + IdleTimeout func() time.Duration + // Secret(s) for Tsig map[]. The zonename must be in canonical form (lowercase, fqdn, see RFC 4034 Section 6.2). + TsigSecret map[string]string + // If NotifyStartedFunc is set it is called once the server has started listening. + NotifyStartedFunc func() + // DecorateReader is optional, allows customization of the process that reads raw DNS messages. + DecorateReader DecorateReader + // DecorateWriter is optional, allows customization of the process that writes raw DNS messages. + DecorateWriter DecorateWriter + // Maximum number of TCP queries before we close the socket. Default is maxTCPQueries (unlimited if -1). + MaxTCPQueries int + // Whether to set the SO_REUSEPORT socket option, allowing multiple listeners to be bound to a single address. + // It is only supported on go1.11+ and when using ListenAndServe. + ReusePort bool + // AcceptMsgFunc will check the incoming message and will reject it early in the process. + // By default DefaultMsgAcceptFunc will be used. + MsgAcceptFunc MsgAcceptFunc + + // Shutdown handling + lock sync.RWMutex + started bool + shutdown chan struct{} + conns map[net.Conn]struct{} + + // A pool for UDP message buffers. + udpPool sync.Pool +} + +func (srv *Server) isStarted() bool { + srv.lock.RLock() + started := srv.started + srv.lock.RUnlock() + return started +} + +func makeUDPBuffer(size int) func() interface{} { + return func() interface{} { + return make([]byte, size) + } +} + +func (srv *Server) init() { + srv.shutdown = make(chan struct{}) + srv.conns = make(map[net.Conn]struct{}) + + if srv.UDPSize == 0 { + srv.UDPSize = MinMsgSize + } + if srv.MsgAcceptFunc == nil { + srv.MsgAcceptFunc = DefaultMsgAcceptFunc + } + if srv.Handler == nil { + srv.Handler = DefaultServeMux + } + + srv.udpPool.New = makeUDPBuffer(srv.UDPSize) +} + +func unlockOnce(l sync.Locker) func() { + var once sync.Once + return func() { once.Do(l.Unlock) } +} + +// ListenAndServe starts a nameserver on the configured address in *Server. +func (srv *Server) ListenAndServe() error { + unlock := unlockOnce(&srv.lock) + srv.lock.Lock() + defer unlock() + + if srv.started { + return &Error{err: "server already started"} + } + + addr := srv.Addr + if addr == "" { + addr = ":domain" + } + + srv.init() + + switch srv.Net { + case "tcp", "tcp4", "tcp6": + l, err := listenTCP(srv.Net, addr, srv.ReusePort) + if err != nil { + return err + } + srv.Listener = l + srv.started = true + unlock() + return srv.serveTCP(l) + case "tcp-tls", "tcp4-tls", "tcp6-tls": + if srv.TLSConfig == nil || (len(srv.TLSConfig.Certificates) == 0 && srv.TLSConfig.GetCertificate == nil) { + return errors.New("dns: neither Certificates nor GetCertificate set in Config") + } + network := strings.TrimSuffix(srv.Net, "-tls") + l, err := listenTCP(network, addr, srv.ReusePort) + if err != nil { + return err + } + l = tls.NewListener(l, srv.TLSConfig) + srv.Listener = l + srv.started = true + unlock() + return srv.serveTCP(l) + case "udp", "udp4", "udp6": + l, err := listenUDP(srv.Net, addr, srv.ReusePort) + if err != nil { + return err + } + u := l.(*net.UDPConn) + if e := setUDPSocketOptions(u); e != nil { + return e + } + srv.PacketConn = l + srv.started = true + unlock() + return srv.serveUDP(u) + } + return &Error{err: "bad network"} +} + +// ActivateAndServe starts a nameserver with the PacketConn or Listener +// configured in *Server. Its main use is to start a server from systemd. +func (srv *Server) ActivateAndServe() error { + unlock := unlockOnce(&srv.lock) + srv.lock.Lock() + defer unlock() + + if srv.started { + return &Error{err: "server already started"} + } + + srv.init() + + pConn := srv.PacketConn + l := srv.Listener + if pConn != nil { + // Check PacketConn interface's type is valid and value + // is not nil + if t, ok := pConn.(*net.UDPConn); ok && t != nil { + if e := setUDPSocketOptions(t); e != nil { + return e + } + srv.started = true + unlock() + return srv.serveUDP(t) + } + } + if l != nil { + srv.started = true + unlock() + return srv.serveTCP(l) + } + return &Error{err: "bad listeners"} +} + +// Shutdown shuts down a server. After a call to Shutdown, ListenAndServe and +// ActivateAndServe will return. +func (srv *Server) Shutdown() error { + return srv.ShutdownContext(context.Background()) +} + +// ShutdownContext shuts down a server. After a call to ShutdownContext, +// ListenAndServe and ActivateAndServe will return. +// +// A context.Context may be passed to limit how long to wait for connections +// to terminate. +func (srv *Server) ShutdownContext(ctx context.Context) error { + srv.lock.Lock() + if !srv.started { + srv.lock.Unlock() + return &Error{err: "server not started"} + } + + srv.started = false + + if srv.PacketConn != nil { + srv.PacketConn.SetReadDeadline(aLongTimeAgo) // Unblock reads + } + + if srv.Listener != nil { + srv.Listener.Close() + } + + for rw := range srv.conns { + rw.SetReadDeadline(aLongTimeAgo) // Unblock reads + } + + srv.lock.Unlock() + + if testShutdownNotify != nil { + testShutdownNotify.Broadcast() + } + + var ctxErr error + select { + case <-srv.shutdown: + case <-ctx.Done(): + ctxErr = ctx.Err() + } + + if srv.PacketConn != nil { + srv.PacketConn.Close() + } + + return ctxErr +} + +var testShutdownNotify *sync.Cond + +// getReadTimeout is a helper func to use system timeout if server did not intend to change it. +func (srv *Server) getReadTimeout() time.Duration { + if srv.ReadTimeout != 0 { + return srv.ReadTimeout + } + return dnsTimeout +} + +// serveTCP starts a TCP listener for the server. +func (srv *Server) serveTCP(l net.Listener) error { + defer l.Close() + + if srv.NotifyStartedFunc != nil { + srv.NotifyStartedFunc() + } + + var wg sync.WaitGroup + defer func() { + wg.Wait() + close(srv.shutdown) + }() + + for srv.isStarted() { + rw, err := l.Accept() + if err != nil { + if !srv.isStarted() { + return nil + } + if neterr, ok := err.(net.Error); ok && neterr.Temporary() { + continue + } + return err + } + srv.lock.Lock() + // Track the connection to allow unblocking reads on shutdown. + srv.conns[rw] = struct{}{} + srv.lock.Unlock() + wg.Add(1) + go srv.serveTCPConn(&wg, rw) + } + + return nil +} + +// serveUDP starts a UDP listener for the server. +func (srv *Server) serveUDP(l *net.UDPConn) error { + defer l.Close() + + if srv.NotifyStartedFunc != nil { + srv.NotifyStartedFunc() + } + + reader := Reader(defaultReader{srv}) + if srv.DecorateReader != nil { + reader = srv.DecorateReader(reader) + } + + var wg sync.WaitGroup + defer func() { + wg.Wait() + close(srv.shutdown) + }() + + rtimeout := srv.getReadTimeout() + // deadline is not used here + for srv.isStarted() { + m, s, err := reader.ReadUDP(l, rtimeout) + if err != nil { + if !srv.isStarted() { + return nil + } + if netErr, ok := err.(net.Error); ok && netErr.Temporary() { + continue + } + return err + } + if len(m) < headerSize { + if cap(m) == srv.UDPSize { + srv.udpPool.Put(m[:srv.UDPSize]) + } + continue + } + wg.Add(1) + go srv.serveUDPPacket(&wg, m, l, s) + } + + return nil +} + +// Serve a new TCP connection. +func (srv *Server) serveTCPConn(wg *sync.WaitGroup, rw net.Conn) { + w := &response{tsigSecret: srv.TsigSecret, tcp: rw} + if srv.DecorateWriter != nil { + w.writer = srv.DecorateWriter(w) + } else { + w.writer = w + } + + reader := Reader(defaultReader{srv}) + if srv.DecorateReader != nil { + reader = srv.DecorateReader(reader) + } + + idleTimeout := tcpIdleTimeout + if srv.IdleTimeout != nil { + idleTimeout = srv.IdleTimeout() + } + + timeout := srv.getReadTimeout() + + limit := srv.MaxTCPQueries + if limit == 0 { + limit = maxTCPQueries + } + + for q := 0; (q < limit || limit == -1) && srv.isStarted(); q++ { + m, err := reader.ReadTCP(w.tcp, timeout) + if err != nil { + // TODO(tmthrgd): handle error + break + } + srv.serveDNS(m, w) + if w.closed { + break // Close() was called + } + if w.hijacked { + break // client will call Close() themselves + } + // The first read uses the read timeout, the rest use the + // idle timeout. + timeout = idleTimeout + } + + if !w.hijacked { + w.Close() + } + + srv.lock.Lock() + delete(srv.conns, w.tcp) + srv.lock.Unlock() + + wg.Done() +} + +// Serve a new UDP request. +func (srv *Server) serveUDPPacket(wg *sync.WaitGroup, m []byte, u *net.UDPConn, s *SessionUDP) { + w := &response{tsigSecret: srv.TsigSecret, udp: u, udpSession: s} + if srv.DecorateWriter != nil { + w.writer = srv.DecorateWriter(w) + } else { + w.writer = w + } + + srv.serveDNS(m, w) + wg.Done() +} + +func (srv *Server) serveDNS(m []byte, w *response) { + dh, off, err := unpackMsgHdr(m, 0) + if err != nil { + // Let client hang, they are sending crap; any reply can be used to amplify. + return + } + + req := new(Msg) + req.setHdr(dh) + + switch action := srv.MsgAcceptFunc(dh); action { + case MsgAccept: + if req.unpack(dh, m, off) == nil { + break + } + + fallthrough + case MsgReject, MsgRejectNotImplemented: + opcode := req.Opcode + req.SetRcodeFormatError(req) + req.Zero = false + if action == MsgRejectNotImplemented { + req.Opcode = opcode + req.Rcode = RcodeNotImplemented + } + + // Are we allowed to delete any OPT records here? + req.Ns, req.Answer, req.Extra = nil, nil, nil + + w.WriteMsg(req) + fallthrough + case MsgIgnore: + if w.udp != nil && cap(m) == srv.UDPSize { + srv.udpPool.Put(m[:srv.UDPSize]) + } + + return + } + + w.tsigStatus = nil + if w.tsigSecret != nil { + if t := req.IsTsig(); t != nil { + if secret, ok := w.tsigSecret[t.Hdr.Name]; ok { + w.tsigStatus = TsigVerify(m, secret, "", false) + } else { + w.tsigStatus = ErrSecret + } + w.tsigTimersOnly = false + w.tsigRequestMAC = req.Extra[len(req.Extra)-1].(*TSIG).MAC + } + } + + if w.udp != nil && cap(m) == srv.UDPSize { + srv.udpPool.Put(m[:srv.UDPSize]) + } + + srv.Handler.ServeDNS(w, req) // Writes back to the client +} + +func (srv *Server) readTCP(conn net.Conn, timeout time.Duration) ([]byte, error) { + // If we race with ShutdownContext, the read deadline may + // have been set in the distant past to unblock the read + // below. We must not override it, otherwise we may block + // ShutdownContext. + srv.lock.RLock() + if srv.started { + conn.SetReadDeadline(time.Now().Add(timeout)) + } + srv.lock.RUnlock() + + var length uint16 + if err := binary.Read(conn, binary.BigEndian, &length); err != nil { + return nil, err + } + + m := make([]byte, length) + if _, err := io.ReadFull(conn, m); err != nil { + return nil, err + } + + return m, nil +} + +func (srv *Server) readUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *SessionUDP, error) { + srv.lock.RLock() + if srv.started { + // See the comment in readTCP above. + conn.SetReadDeadline(time.Now().Add(timeout)) + } + srv.lock.RUnlock() + + m := srv.udpPool.Get().([]byte) + n, s, err := ReadFromSessionUDP(conn, m) + if err != nil { + srv.udpPool.Put(m) + return nil, nil, err + } + m = m[:n] + return m, s, nil +} + +// WriteMsg implements the ResponseWriter.WriteMsg method. +func (w *response) WriteMsg(m *Msg) (err error) { + if w.closed { + return &Error{err: "WriteMsg called after Close"} + } + + var data []byte + if w.tsigSecret != nil { // if no secrets, dont check for the tsig (which is a longer check) + if t := m.IsTsig(); t != nil { + data, w.tsigRequestMAC, err = TsigGenerate(m, w.tsigSecret[t.Hdr.Name], w.tsigRequestMAC, w.tsigTimersOnly) + if err != nil { + return err + } + _, err = w.writer.Write(data) + return err + } + } + data, err = m.Pack() + if err != nil { + return err + } + _, err = w.writer.Write(data) + return err +} + +// Write implements the ResponseWriter.Write method. +func (w *response) Write(m []byte) (int, error) { + if w.closed { + return 0, &Error{err: "Write called after Close"} + } + + switch { + case w.udp != nil: + return WriteToSessionUDP(w.udp, m, w.udpSession) + case w.tcp != nil: + if len(m) > MaxMsgSize { + return 0, &Error{err: "message too large"} + } + + l := make([]byte, 2) + binary.BigEndian.PutUint16(l, uint16(len(m))) + + n, err := (&net.Buffers{l, m}).WriteTo(w.tcp) + return int(n), err + default: + panic("dns: internal error: udp and tcp both nil") + } +} + +// LocalAddr implements the ResponseWriter.LocalAddr method. +func (w *response) LocalAddr() net.Addr { + switch { + case w.udp != nil: + return w.udp.LocalAddr() + case w.tcp != nil: + return w.tcp.LocalAddr() + default: + panic("dns: internal error: udp and tcp both nil") + } +} + +// RemoteAddr implements the ResponseWriter.RemoteAddr method. +func (w *response) RemoteAddr() net.Addr { + switch { + case w.udpSession != nil: + return w.udpSession.RemoteAddr() + case w.tcp != nil: + return w.tcp.RemoteAddr() + default: + panic("dns: internal error: udpSession and tcp both nil") + } +} + +// TsigStatus implements the ResponseWriter.TsigStatus method. +func (w *response) TsigStatus() error { return w.tsigStatus } + +// TsigTimersOnly implements the ResponseWriter.TsigTimersOnly method. +func (w *response) TsigTimersOnly(b bool) { w.tsigTimersOnly = b } + +// Hijack implements the ResponseWriter.Hijack method. +func (w *response) Hijack() { w.hijacked = true } + +// Close implements the ResponseWriter.Close method +func (w *response) Close() error { + if w.closed { + return &Error{err: "connection already closed"} + } + w.closed = true + + switch { + case w.udp != nil: + // Can't close the udp conn, as that is actually the listener. + return nil + case w.tcp != nil: + return w.tcp.Close() + default: + panic("dns: internal error: udp and tcp both nil") + } +} + +// ConnectionState() implements the ConnectionStater.ConnectionState() interface. +func (w *response) ConnectionState() *tls.ConnectionState { + type tlsConnectionStater interface { + ConnectionState() tls.ConnectionState + } + if v, ok := w.tcp.(tlsConnectionStater); ok { + t := v.ConnectionState() + return &t + } + return nil +} diff --git a/vendor/github.com/miekg/dns/sig0.go b/vendor/github.com/miekg/dns/sig0.go new file mode 100644 index 00000000..55cf1c38 --- /dev/null +++ b/vendor/github.com/miekg/dns/sig0.go @@ -0,0 +1,209 @@ +package dns + +import ( + "crypto" + "crypto/dsa" + "crypto/ecdsa" + "crypto/rsa" + "encoding/binary" + "math/big" + "strings" + "time" +) + +// Sign signs a dns.Msg. It fills the signature with the appropriate data. +// The SIG record should have the SignerName, KeyTag, Algorithm, Inception +// and Expiration set. +func (rr *SIG) Sign(k crypto.Signer, m *Msg) ([]byte, error) { + if k == nil { + return nil, ErrPrivKey + } + if rr.KeyTag == 0 || len(rr.SignerName) == 0 || rr.Algorithm == 0 { + return nil, ErrKey + } + + rr.Hdr = RR_Header{Name: ".", Rrtype: TypeSIG, Class: ClassANY, Ttl: 0} + rr.OrigTtl, rr.TypeCovered, rr.Labels = 0, 0, 0 + + buf := make([]byte, m.Len()+Len(rr)) + mbuf, err := m.PackBuffer(buf) + if err != nil { + return nil, err + } + if &buf[0] != &mbuf[0] { + return nil, ErrBuf + } + off, err := PackRR(rr, buf, len(mbuf), nil, false) + if err != nil { + return nil, err + } + buf = buf[:off:cap(buf)] + + hash, ok := AlgorithmToHash[rr.Algorithm] + if !ok { + return nil, ErrAlg + } + + hasher := hash.New() + // Write SIG rdata + hasher.Write(buf[len(mbuf)+1+2+2+4+2:]) + // Write message + hasher.Write(buf[:len(mbuf)]) + + signature, err := sign(k, hasher.Sum(nil), hash, rr.Algorithm) + if err != nil { + return nil, err + } + + rr.Signature = toBase64(signature) + + buf = append(buf, signature...) + if len(buf) > int(^uint16(0)) { + return nil, ErrBuf + } + // Adjust sig data length + rdoff := len(mbuf) + 1 + 2 + 2 + 4 + rdlen := binary.BigEndian.Uint16(buf[rdoff:]) + rdlen += uint16(len(signature)) + binary.BigEndian.PutUint16(buf[rdoff:], rdlen) + // Adjust additional count + adc := binary.BigEndian.Uint16(buf[10:]) + adc++ + binary.BigEndian.PutUint16(buf[10:], adc) + return buf, nil +} + +// Verify validates the message buf using the key k. +// It's assumed that buf is a valid message from which rr was unpacked. +func (rr *SIG) Verify(k *KEY, buf []byte) error { + if k == nil { + return ErrKey + } + if rr.KeyTag == 0 || len(rr.SignerName) == 0 || rr.Algorithm == 0 { + return ErrKey + } + + var hash crypto.Hash + switch rr.Algorithm { + case DSA, RSASHA1: + hash = crypto.SHA1 + case RSASHA256, ECDSAP256SHA256: + hash = crypto.SHA256 + case ECDSAP384SHA384: + hash = crypto.SHA384 + case RSASHA512: + hash = crypto.SHA512 + default: + return ErrAlg + } + hasher := hash.New() + + buflen := len(buf) + qdc := binary.BigEndian.Uint16(buf[4:]) + anc := binary.BigEndian.Uint16(buf[6:]) + auc := binary.BigEndian.Uint16(buf[8:]) + adc := binary.BigEndian.Uint16(buf[10:]) + offset := headerSize + var err error + for i := uint16(0); i < qdc && offset < buflen; i++ { + _, offset, err = UnpackDomainName(buf, offset) + if err != nil { + return err + } + // Skip past Type and Class + offset += 2 + 2 + } + for i := uint16(1); i < anc+auc+adc && offset < buflen; i++ { + _, offset, err = UnpackDomainName(buf, offset) + if err != nil { + return err + } + // Skip past Type, Class and TTL + offset += 2 + 2 + 4 + if offset+1 >= buflen { + continue + } + rdlen := binary.BigEndian.Uint16(buf[offset:]) + offset += 2 + offset += int(rdlen) + } + if offset >= buflen { + return &Error{err: "overflowing unpacking signed message"} + } + + // offset should be just prior to SIG + bodyend := offset + // owner name SHOULD be root + _, offset, err = UnpackDomainName(buf, offset) + if err != nil { + return err + } + // Skip Type, Class, TTL, RDLen + offset += 2 + 2 + 4 + 2 + sigstart := offset + // Skip Type Covered, Algorithm, Labels, Original TTL + offset += 2 + 1 + 1 + 4 + if offset+4+4 >= buflen { + return &Error{err: "overflow unpacking signed message"} + } + expire := binary.BigEndian.Uint32(buf[offset:]) + offset += 4 + incept := binary.BigEndian.Uint32(buf[offset:]) + offset += 4 + now := uint32(time.Now().Unix()) + if now < incept || now > expire { + return ErrTime + } + // Skip key tag + offset += 2 + var signername string + signername, offset, err = UnpackDomainName(buf, offset) + if err != nil { + return err + } + // If key has come from the DNS name compression might + // have mangled the case of the name + if !strings.EqualFold(signername, k.Header().Name) { + return &Error{err: "signer name doesn't match key name"} + } + sigend := offset + hasher.Write(buf[sigstart:sigend]) + hasher.Write(buf[:10]) + hasher.Write([]byte{ + byte((adc - 1) << 8), + byte(adc - 1), + }) + hasher.Write(buf[12:bodyend]) + + hashed := hasher.Sum(nil) + sig := buf[sigend:] + switch k.Algorithm { + case DSA: + pk := k.publicKeyDSA() + sig = sig[1:] + r := new(big.Int).SetBytes(sig[:len(sig)/2]) + s := new(big.Int).SetBytes(sig[len(sig)/2:]) + if pk != nil { + if dsa.Verify(pk, hashed, r, s) { + return nil + } + return ErrSig + } + case RSASHA1, RSASHA256, RSASHA512: + pk := k.publicKeyRSA() + if pk != nil { + return rsa.VerifyPKCS1v15(pk, hash, hashed, sig) + } + case ECDSAP256SHA256, ECDSAP384SHA384: + pk := k.publicKeyECDSA() + r := new(big.Int).SetBytes(sig[:len(sig)/2]) + s := new(big.Int).SetBytes(sig[len(sig)/2:]) + if pk != nil { + if ecdsa.Verify(pk, hashed, r, s) { + return nil + } + return ErrSig + } + } + return ErrKeyAlg +} diff --git a/vendor/github.com/miekg/dns/singleinflight.go b/vendor/github.com/miekg/dns/singleinflight.go new file mode 100644 index 00000000..febcc300 --- /dev/null +++ b/vendor/github.com/miekg/dns/singleinflight.go @@ -0,0 +1,61 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Adapted for dns package usage by Miek Gieben. + +package dns + +import "sync" +import "time" + +// call is an in-flight or completed singleflight.Do call +type call struct { + wg sync.WaitGroup + val *Msg + rtt time.Duration + err error + dups int +} + +// singleflight represents a class of work and forms a namespace in +// which units of work can be executed with duplicate suppression. +type singleflight struct { + sync.Mutex // protects m + m map[string]*call // lazily initialized + + dontDeleteForTesting bool // this is only to be used by TestConcurrentExchanges +} + +// Do executes and returns the results of the given function, making +// sure that only one execution is in-flight for a given key at a +// time. If a duplicate comes in, the duplicate caller waits for the +// original to complete and receives the same results. +// The return value shared indicates whether v was given to multiple callers. +func (g *singleflight) Do(key string, fn func() (*Msg, time.Duration, error)) (v *Msg, rtt time.Duration, err error, shared bool) { + g.Lock() + if g.m == nil { + g.m = make(map[string]*call) + } + if c, ok := g.m[key]; ok { + c.dups++ + g.Unlock() + c.wg.Wait() + return c.val, c.rtt, c.err, true + } + c := new(call) + c.wg.Add(1) + g.m[key] = c + g.Unlock() + + c.val, c.rtt, c.err = fn() + c.wg.Done() + + if !g.dontDeleteForTesting { + g.Lock() + delete(g.m, key) + g.Unlock() + } + + return c.val, c.rtt, c.err, c.dups > 0 +} diff --git a/vendor/github.com/miekg/dns/smimea.go b/vendor/github.com/miekg/dns/smimea.go new file mode 100644 index 00000000..89f09f0d --- /dev/null +++ b/vendor/github.com/miekg/dns/smimea.go @@ -0,0 +1,44 @@ +package dns + +import ( + "crypto/sha256" + "crypto/x509" + "encoding/hex" +) + +// Sign creates a SMIMEA record from an SSL certificate. +func (r *SMIMEA) Sign(usage, selector, matchingType int, cert *x509.Certificate) (err error) { + r.Hdr.Rrtype = TypeSMIMEA + r.Usage = uint8(usage) + r.Selector = uint8(selector) + r.MatchingType = uint8(matchingType) + + r.Certificate, err = CertificateToDANE(r.Selector, r.MatchingType, cert) + return err +} + +// Verify verifies a SMIMEA record against an SSL certificate. If it is OK +// a nil error is returned. +func (r *SMIMEA) Verify(cert *x509.Certificate) error { + c, err := CertificateToDANE(r.Selector, r.MatchingType, cert) + if err != nil { + return err // Not also ErrSig? + } + if r.Certificate == c { + return nil + } + return ErrSig // ErrSig, really? +} + +// SMIMEAName returns the ownername of a SMIMEA resource record as per the +// format specified in RFC 'draft-ietf-dane-smime-12' Section 2 and 3 +func SMIMEAName(email, domain string) (string, error) { + hasher := sha256.New() + hasher.Write([]byte(email)) + + // RFC Section 3: "The local-part is hashed using the SHA2-256 + // algorithm with the hash truncated to 28 octets and + // represented in its hexadecimal representation to become the + // left-most label in the prepared domain name" + return hex.EncodeToString(hasher.Sum(nil)[:28]) + "." + "_smimecert." + domain, nil +} diff --git a/vendor/github.com/miekg/dns/tlsa.go b/vendor/github.com/miekg/dns/tlsa.go new file mode 100644 index 00000000..4e07983b --- /dev/null +++ b/vendor/github.com/miekg/dns/tlsa.go @@ -0,0 +1,44 @@ +package dns + +import ( + "crypto/x509" + "net" + "strconv" +) + +// Sign creates a TLSA record from an SSL certificate. +func (r *TLSA) Sign(usage, selector, matchingType int, cert *x509.Certificate) (err error) { + r.Hdr.Rrtype = TypeTLSA + r.Usage = uint8(usage) + r.Selector = uint8(selector) + r.MatchingType = uint8(matchingType) + + r.Certificate, err = CertificateToDANE(r.Selector, r.MatchingType, cert) + return err +} + +// Verify verifies a TLSA record against an SSL certificate. If it is OK +// a nil error is returned. +func (r *TLSA) Verify(cert *x509.Certificate) error { + c, err := CertificateToDANE(r.Selector, r.MatchingType, cert) + if err != nil { + return err // Not also ErrSig? + } + if r.Certificate == c { + return nil + } + return ErrSig // ErrSig, really? +} + +// TLSAName returns the ownername of a TLSA resource record as per the +// rules specified in RFC 6698, Section 3. +func TLSAName(name, service, network string) (string, error) { + if !IsFqdn(name) { + return "", ErrFqdn + } + p, err := net.LookupPort(network, service) + if err != nil { + return "", err + } + return "_" + strconv.Itoa(p) + "._" + network + "." + name, nil +} diff --git a/vendor/github.com/miekg/dns/tsig.go b/vendor/github.com/miekg/dns/tsig.go new file mode 100644 index 00000000..61efa248 --- /dev/null +++ b/vendor/github.com/miekg/dns/tsig.go @@ -0,0 +1,389 @@ +package dns + +import ( + "crypto/hmac" + "crypto/md5" + "crypto/sha1" + "crypto/sha256" + "crypto/sha512" + "encoding/binary" + "encoding/hex" + "hash" + "strconv" + "strings" + "time" +) + +// HMAC hashing codes. These are transmitted as domain names. +const ( + HmacMD5 = "hmac-md5.sig-alg.reg.int." + HmacSHA1 = "hmac-sha1." + HmacSHA256 = "hmac-sha256." + HmacSHA512 = "hmac-sha512." +) + +// TSIG is the RR the holds the transaction signature of a message. +// See RFC 2845 and RFC 4635. +type TSIG struct { + Hdr RR_Header + Algorithm string `dns:"domain-name"` + TimeSigned uint64 `dns:"uint48"` + Fudge uint16 + MACSize uint16 + MAC string `dns:"size-hex:MACSize"` + OrigId uint16 + Error uint16 + OtherLen uint16 + OtherData string `dns:"size-hex:OtherLen"` +} + +// TSIG has no official presentation format, but this will suffice. + +func (rr *TSIG) String() string { + s := "\n;; TSIG PSEUDOSECTION:\n; " // add another semi-colon to signify TSIG does not have a presentation format + s += rr.Hdr.String() + + " " + rr.Algorithm + + " " + tsigTimeToString(rr.TimeSigned) + + " " + strconv.Itoa(int(rr.Fudge)) + + " " + strconv.Itoa(int(rr.MACSize)) + + " " + strings.ToUpper(rr.MAC) + + " " + strconv.Itoa(int(rr.OrigId)) + + " " + strconv.Itoa(int(rr.Error)) + // BIND prints NOERROR + " " + strconv.Itoa(int(rr.OtherLen)) + + " " + rr.OtherData + return s +} + +func (rr *TSIG) parse(c *zlexer, origin string) *ParseError { + panic("dns: internal error: parse should never be called on TSIG") +} + +// The following values must be put in wireformat, so that the MAC can be calculated. +// RFC 2845, section 3.4.2. TSIG Variables. +type tsigWireFmt struct { + // From RR_Header + Name string `dns:"domain-name"` + Class uint16 + Ttl uint32 + // Rdata of the TSIG + Algorithm string `dns:"domain-name"` + TimeSigned uint64 `dns:"uint48"` + Fudge uint16 + // MACSize, MAC and OrigId excluded + Error uint16 + OtherLen uint16 + OtherData string `dns:"size-hex:OtherLen"` +} + +// If we have the MAC use this type to convert it to wiredata. Section 3.4.3. Request MAC +type macWireFmt struct { + MACSize uint16 + MAC string `dns:"size-hex:MACSize"` +} + +// 3.3. Time values used in TSIG calculations +type timerWireFmt struct { + TimeSigned uint64 `dns:"uint48"` + Fudge uint16 +} + +// TsigGenerate fills out the TSIG record attached to the message. +// The message should contain +// a "stub" TSIG RR with the algorithm, key name (owner name of the RR), +// time fudge (defaults to 300 seconds) and the current time +// The TSIG MAC is saved in that Tsig RR. +// When TsigGenerate is called for the first time requestMAC is set to the empty string and +// timersOnly is false. +// If something goes wrong an error is returned, otherwise it is nil. +func TsigGenerate(m *Msg, secret, requestMAC string, timersOnly bool) ([]byte, string, error) { + if m.IsTsig() == nil { + panic("dns: TSIG not last RR in additional") + } + // If we barf here, the caller is to blame + rawsecret, err := fromBase64([]byte(secret)) + if err != nil { + return nil, "", err + } + + rr := m.Extra[len(m.Extra)-1].(*TSIG) + m.Extra = m.Extra[0 : len(m.Extra)-1] // kill the TSIG from the msg + mbuf, err := m.Pack() + if err != nil { + return nil, "", err + } + buf := tsigBuffer(mbuf, rr, requestMAC, timersOnly) + + t := new(TSIG) + var h hash.Hash + switch strings.ToLower(rr.Algorithm) { + case HmacMD5: + h = hmac.New(md5.New, rawsecret) + case HmacSHA1: + h = hmac.New(sha1.New, rawsecret) + case HmacSHA256: + h = hmac.New(sha256.New, rawsecret) + case HmacSHA512: + h = hmac.New(sha512.New, rawsecret) + default: + return nil, "", ErrKeyAlg + } + h.Write(buf) + t.MAC = hex.EncodeToString(h.Sum(nil)) + t.MACSize = uint16(len(t.MAC) / 2) // Size is half! + + t.Hdr = RR_Header{Name: rr.Hdr.Name, Rrtype: TypeTSIG, Class: ClassANY, Ttl: 0} + t.Fudge = rr.Fudge + t.TimeSigned = rr.TimeSigned + t.Algorithm = rr.Algorithm + t.OrigId = m.Id + + tbuf := make([]byte, Len(t)) + off, err := PackRR(t, tbuf, 0, nil, false) + if err != nil { + return nil, "", err + } + mbuf = append(mbuf, tbuf[:off]...) + // Update the ArCount directly in the buffer. + binary.BigEndian.PutUint16(mbuf[10:], uint16(len(m.Extra)+1)) + + return mbuf, t.MAC, nil +} + +// TsigVerify verifies the TSIG on a message. +// If the signature does not validate err contains the +// error, otherwise it is nil. +func TsigVerify(msg []byte, secret, requestMAC string, timersOnly bool) error { + rawsecret, err := fromBase64([]byte(secret)) + if err != nil { + return err + } + // Strip the TSIG from the incoming msg + stripped, tsig, err := stripTsig(msg) + if err != nil { + return err + } + + msgMAC, err := hex.DecodeString(tsig.MAC) + if err != nil { + return err + } + + buf := tsigBuffer(stripped, tsig, requestMAC, timersOnly) + + // Fudge factor works both ways. A message can arrive before it was signed because + // of clock skew. + now := uint64(time.Now().Unix()) + ti := now - tsig.TimeSigned + if now < tsig.TimeSigned { + ti = tsig.TimeSigned - now + } + if uint64(tsig.Fudge) < ti { + return ErrTime + } + + var h hash.Hash + switch strings.ToLower(tsig.Algorithm) { + case HmacMD5: + h = hmac.New(md5.New, rawsecret) + case HmacSHA1: + h = hmac.New(sha1.New, rawsecret) + case HmacSHA256: + h = hmac.New(sha256.New, rawsecret) + case HmacSHA512: + h = hmac.New(sha512.New, rawsecret) + default: + return ErrKeyAlg + } + h.Write(buf) + if !hmac.Equal(h.Sum(nil), msgMAC) { + return ErrSig + } + return nil +} + +// Create a wiredata buffer for the MAC calculation. +func tsigBuffer(msgbuf []byte, rr *TSIG, requestMAC string, timersOnly bool) []byte { + var buf []byte + if rr.TimeSigned == 0 { + rr.TimeSigned = uint64(time.Now().Unix()) + } + if rr.Fudge == 0 { + rr.Fudge = 300 // Standard (RFC) default. + } + + // Replace message ID in header with original ID from TSIG + binary.BigEndian.PutUint16(msgbuf[0:2], rr.OrigId) + + if requestMAC != "" { + m := new(macWireFmt) + m.MACSize = uint16(len(requestMAC) / 2) + m.MAC = requestMAC + buf = make([]byte, len(requestMAC)) // long enough + n, _ := packMacWire(m, buf) + buf = buf[:n] + } + + tsigvar := make([]byte, DefaultMsgSize) + if timersOnly { + tsig := new(timerWireFmt) + tsig.TimeSigned = rr.TimeSigned + tsig.Fudge = rr.Fudge + n, _ := packTimerWire(tsig, tsigvar) + tsigvar = tsigvar[:n] + } else { + tsig := new(tsigWireFmt) + tsig.Name = strings.ToLower(rr.Hdr.Name) + tsig.Class = ClassANY + tsig.Ttl = rr.Hdr.Ttl + tsig.Algorithm = strings.ToLower(rr.Algorithm) + tsig.TimeSigned = rr.TimeSigned + tsig.Fudge = rr.Fudge + tsig.Error = rr.Error + tsig.OtherLen = rr.OtherLen + tsig.OtherData = rr.OtherData + n, _ := packTsigWire(tsig, tsigvar) + tsigvar = tsigvar[:n] + } + + if requestMAC != "" { + x := append(buf, msgbuf...) + buf = append(x, tsigvar...) + } else { + buf = append(msgbuf, tsigvar...) + } + return buf +} + +// Strip the TSIG from the raw message. +func stripTsig(msg []byte) ([]byte, *TSIG, error) { + // Copied from msg.go's Unpack() Header, but modified. + var ( + dh Header + err error + ) + off, tsigoff := 0, 0 + + if dh, off, err = unpackMsgHdr(msg, off); err != nil { + return nil, nil, err + } + if dh.Arcount == 0 { + return nil, nil, ErrNoSig + } + + // Rcode, see msg.go Unpack() + if int(dh.Bits&0xF) == RcodeNotAuth { + return nil, nil, ErrAuth + } + + for i := 0; i < int(dh.Qdcount); i++ { + _, off, err = unpackQuestion(msg, off) + if err != nil { + return nil, nil, err + } + } + + _, off, err = unpackRRslice(int(dh.Ancount), msg, off) + if err != nil { + return nil, nil, err + } + _, off, err = unpackRRslice(int(dh.Nscount), msg, off) + if err != nil { + return nil, nil, err + } + + rr := new(TSIG) + var extra RR + for i := 0; i < int(dh.Arcount); i++ { + tsigoff = off + extra, off, err = UnpackRR(msg, off) + if err != nil { + return nil, nil, err + } + if extra.Header().Rrtype == TypeTSIG { + rr = extra.(*TSIG) + // Adjust Arcount. + arcount := binary.BigEndian.Uint16(msg[10:]) + binary.BigEndian.PutUint16(msg[10:], arcount-1) + break + } + } + if rr == nil { + return nil, nil, ErrNoSig + } + return msg[:tsigoff], rr, nil +} + +// Translate the TSIG time signed into a date. There is no +// need for RFC1982 calculations as this date is 48 bits. +func tsigTimeToString(t uint64) string { + ti := time.Unix(int64(t), 0).UTC() + return ti.Format("20060102150405") +} + +func packTsigWire(tw *tsigWireFmt, msg []byte) (int, error) { + // copied from zmsg.go TSIG packing + // RR_Header + off, err := PackDomainName(tw.Name, msg, 0, nil, false) + if err != nil { + return off, err + } + off, err = packUint16(tw.Class, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(tw.Ttl, msg, off) + if err != nil { + return off, err + } + + off, err = PackDomainName(tw.Algorithm, msg, off, nil, false) + if err != nil { + return off, err + } + off, err = packUint48(tw.TimeSigned, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(tw.Fudge, msg, off) + if err != nil { + return off, err + } + + off, err = packUint16(tw.Error, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(tw.OtherLen, msg, off) + if err != nil { + return off, err + } + off, err = packStringHex(tw.OtherData, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func packMacWire(mw *macWireFmt, msg []byte) (int, error) { + off, err := packUint16(mw.MACSize, msg, 0) + if err != nil { + return off, err + } + off, err = packStringHex(mw.MAC, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func packTimerWire(tw *timerWireFmt, msg []byte) (int, error) { + off, err := packUint48(tw.TimeSigned, msg, 0) + if err != nil { + return off, err + } + off, err = packUint16(tw.Fudge, msg, off) + if err != nil { + return off, err + } + return off, nil +} diff --git a/vendor/github.com/miekg/dns/types.go b/vendor/github.com/miekg/dns/types.go new file mode 100644 index 00000000..a6048cb1 --- /dev/null +++ b/vendor/github.com/miekg/dns/types.go @@ -0,0 +1,1527 @@ +package dns + +import ( + "bytes" + "fmt" + "net" + "strconv" + "strings" + "time" +) + +type ( + // Type is a DNS type. + Type uint16 + // Class is a DNS class. + Class uint16 + // Name is a DNS domain name. + Name string +) + +// Packet formats + +// Wire constants and supported types. +const ( + // valid RR_Header.Rrtype and Question.qtype + + TypeNone uint16 = 0 + TypeA uint16 = 1 + TypeNS uint16 = 2 + TypeMD uint16 = 3 + TypeMF uint16 = 4 + TypeCNAME uint16 = 5 + TypeSOA uint16 = 6 + TypeMB uint16 = 7 + TypeMG uint16 = 8 + TypeMR uint16 = 9 + TypeNULL uint16 = 10 + TypePTR uint16 = 12 + TypeHINFO uint16 = 13 + TypeMINFO uint16 = 14 + TypeMX uint16 = 15 + TypeTXT uint16 = 16 + TypeRP uint16 = 17 + TypeAFSDB uint16 = 18 + TypeX25 uint16 = 19 + TypeISDN uint16 = 20 + TypeRT uint16 = 21 + TypeNSAPPTR uint16 = 23 + TypeSIG uint16 = 24 + TypeKEY uint16 = 25 + TypePX uint16 = 26 + TypeGPOS uint16 = 27 + TypeAAAA uint16 = 28 + TypeLOC uint16 = 29 + TypeNXT uint16 = 30 + TypeEID uint16 = 31 + TypeNIMLOC uint16 = 32 + TypeSRV uint16 = 33 + TypeATMA uint16 = 34 + TypeNAPTR uint16 = 35 + TypeKX uint16 = 36 + TypeCERT uint16 = 37 + TypeDNAME uint16 = 39 + TypeOPT uint16 = 41 // EDNS + TypeAPL uint16 = 42 + TypeDS uint16 = 43 + TypeSSHFP uint16 = 44 + TypeRRSIG uint16 = 46 + TypeNSEC uint16 = 47 + TypeDNSKEY uint16 = 48 + TypeDHCID uint16 = 49 + TypeNSEC3 uint16 = 50 + TypeNSEC3PARAM uint16 = 51 + TypeTLSA uint16 = 52 + TypeSMIMEA uint16 = 53 + TypeHIP uint16 = 55 + TypeNINFO uint16 = 56 + TypeRKEY uint16 = 57 + TypeTALINK uint16 = 58 + TypeCDS uint16 = 59 + TypeCDNSKEY uint16 = 60 + TypeOPENPGPKEY uint16 = 61 + TypeCSYNC uint16 = 62 + TypeSPF uint16 = 99 + TypeUINFO uint16 = 100 + TypeUID uint16 = 101 + TypeGID uint16 = 102 + TypeUNSPEC uint16 = 103 + TypeNID uint16 = 104 + TypeL32 uint16 = 105 + TypeL64 uint16 = 106 + TypeLP uint16 = 107 + TypeEUI48 uint16 = 108 + TypeEUI64 uint16 = 109 + TypeURI uint16 = 256 + TypeCAA uint16 = 257 + TypeAVC uint16 = 258 + + TypeTKEY uint16 = 249 + TypeTSIG uint16 = 250 + + // valid Question.Qtype only + TypeIXFR uint16 = 251 + TypeAXFR uint16 = 252 + TypeMAILB uint16 = 253 + TypeMAILA uint16 = 254 + TypeANY uint16 = 255 + + TypeTA uint16 = 32768 + TypeDLV uint16 = 32769 + TypeReserved uint16 = 65535 + + // valid Question.Qclass + ClassINET = 1 + ClassCSNET = 2 + ClassCHAOS = 3 + ClassHESIOD = 4 + ClassNONE = 254 + ClassANY = 255 + + // Message Response Codes, see https://www.iana.org/assignments/dns-parameters/dns-parameters.xhtml + RcodeSuccess = 0 // NoError - No Error [DNS] + RcodeFormatError = 1 // FormErr - Format Error [DNS] + RcodeServerFailure = 2 // ServFail - Server Failure [DNS] + RcodeNameError = 3 // NXDomain - Non-Existent Domain [DNS] + RcodeNotImplemented = 4 // NotImp - Not Implemented [DNS] + RcodeRefused = 5 // Refused - Query Refused [DNS] + RcodeYXDomain = 6 // YXDomain - Name Exists when it should not [DNS Update] + RcodeYXRrset = 7 // YXRRSet - RR Set Exists when it should not [DNS Update] + RcodeNXRrset = 8 // NXRRSet - RR Set that should exist does not [DNS Update] + RcodeNotAuth = 9 // NotAuth - Server Not Authoritative for zone [DNS Update] + RcodeNotZone = 10 // NotZone - Name not contained in zone [DNS Update/TSIG] + RcodeBadSig = 16 // BADSIG - TSIG Signature Failure [TSIG] + RcodeBadVers = 16 // BADVERS - Bad OPT Version [EDNS0] + RcodeBadKey = 17 // BADKEY - Key not recognized [TSIG] + RcodeBadTime = 18 // BADTIME - Signature out of time window [TSIG] + RcodeBadMode = 19 // BADMODE - Bad TKEY Mode [TKEY] + RcodeBadName = 20 // BADNAME - Duplicate key name [TKEY] + RcodeBadAlg = 21 // BADALG - Algorithm not supported [TKEY] + RcodeBadTrunc = 22 // BADTRUNC - Bad Truncation [TSIG] + RcodeBadCookie = 23 // BADCOOKIE - Bad/missing Server Cookie [DNS Cookies] + + // Message Opcodes. There is no 3. + OpcodeQuery = 0 + OpcodeIQuery = 1 + OpcodeStatus = 2 + OpcodeNotify = 4 + OpcodeUpdate = 5 +) + +// Header is the wire format for the DNS packet header. +type Header struct { + Id uint16 + Bits uint16 + Qdcount, Ancount, Nscount, Arcount uint16 +} + +const ( + headerSize = 12 + + // Header.Bits + _QR = 1 << 15 // query/response (response=1) + _AA = 1 << 10 // authoritative + _TC = 1 << 9 // truncated + _RD = 1 << 8 // recursion desired + _RA = 1 << 7 // recursion available + _Z = 1 << 6 // Z + _AD = 1 << 5 // authticated data + _CD = 1 << 4 // checking disabled +) + +// Various constants used in the LOC RR, See RFC 1887. +const ( + LOC_EQUATOR = 1 << 31 // RFC 1876, Section 2. + LOC_PRIMEMERIDIAN = 1 << 31 // RFC 1876, Section 2. + LOC_HOURS = 60 * 1000 + LOC_DEGREES = 60 * LOC_HOURS + LOC_ALTITUDEBASE = 100000 +) + +// Different Certificate Types, see RFC 4398, Section 2.1 +const ( + CertPKIX = 1 + iota + CertSPKI + CertPGP + CertIPIX + CertISPKI + CertIPGP + CertACPKIX + CertIACPKIX + CertURI = 253 + CertOID = 254 +) + +// CertTypeToString converts the Cert Type to its string representation. +// See RFC 4398 and RFC 6944. +var CertTypeToString = map[uint16]string{ + CertPKIX: "PKIX", + CertSPKI: "SPKI", + CertPGP: "PGP", + CertIPIX: "IPIX", + CertISPKI: "ISPKI", + CertIPGP: "IPGP", + CertACPKIX: "ACPKIX", + CertIACPKIX: "IACPKIX", + CertURI: "URI", + CertOID: "OID", +} + +//go:generate go run types_generate.go + +// Question holds a DNS question. There can be multiple questions in the +// question section of a message. Usually there is just one. +type Question struct { + Name string `dns:"cdomain-name"` // "cdomain-name" specifies encoding (and may be compressed) + Qtype uint16 + Qclass uint16 +} + +func (q *Question) len(off int, compression map[string]struct{}) int { + l := domainNameLen(q.Name, off, compression, true) + l += 2 + 2 + return l +} + +func (q *Question) String() (s string) { + // prefix with ; (as in dig) + s = ";" + sprintName(q.Name) + "\t" + s += Class(q.Qclass).String() + "\t" + s += " " + Type(q.Qtype).String() + return s +} + +// ANY is a wildcard record. See RFC 1035, Section 3.2.3. ANY +// is named "*" there. +type ANY struct { + Hdr RR_Header + // Does not have any rdata +} + +func (rr *ANY) String() string { return rr.Hdr.String() } + +func (rr *ANY) parse(c *zlexer, origin string) *ParseError { + panic("dns: internal error: parse should never be called on ANY") +} + +// NULL RR. See RFC 1035. +type NULL struct { + Hdr RR_Header + Data string `dns:"any"` +} + +func (rr *NULL) String() string { + // There is no presentation format; prefix string with a comment. + return ";" + rr.Hdr.String() + rr.Data +} + +func (rr *NULL) parse(c *zlexer, origin string) *ParseError { + panic("dns: internal error: parse should never be called on NULL") +} + +// CNAME RR. See RFC 1034. +type CNAME struct { + Hdr RR_Header + Target string `dns:"cdomain-name"` +} + +func (rr *CNAME) String() string { return rr.Hdr.String() + sprintName(rr.Target) } + +// HINFO RR. See RFC 1034. +type HINFO struct { + Hdr RR_Header + Cpu string + Os string +} + +func (rr *HINFO) String() string { + return rr.Hdr.String() + sprintTxt([]string{rr.Cpu, rr.Os}) +} + +// MB RR. See RFC 1035. +type MB struct { + Hdr RR_Header + Mb string `dns:"cdomain-name"` +} + +func (rr *MB) String() string { return rr.Hdr.String() + sprintName(rr.Mb) } + +// MG RR. See RFC 1035. +type MG struct { + Hdr RR_Header + Mg string `dns:"cdomain-name"` +} + +func (rr *MG) String() string { return rr.Hdr.String() + sprintName(rr.Mg) } + +// MINFO RR. See RFC 1035. +type MINFO struct { + Hdr RR_Header + Rmail string `dns:"cdomain-name"` + Email string `dns:"cdomain-name"` +} + +func (rr *MINFO) String() string { + return rr.Hdr.String() + sprintName(rr.Rmail) + " " + sprintName(rr.Email) +} + +// MR RR. See RFC 1035. +type MR struct { + Hdr RR_Header + Mr string `dns:"cdomain-name"` +} + +func (rr *MR) String() string { + return rr.Hdr.String() + sprintName(rr.Mr) +} + +// MF RR. See RFC 1035. +type MF struct { + Hdr RR_Header + Mf string `dns:"cdomain-name"` +} + +func (rr *MF) String() string { + return rr.Hdr.String() + sprintName(rr.Mf) +} + +// MD RR. See RFC 1035. +type MD struct { + Hdr RR_Header + Md string `dns:"cdomain-name"` +} + +func (rr *MD) String() string { + return rr.Hdr.String() + sprintName(rr.Md) +} + +// MX RR. See RFC 1035. +type MX struct { + Hdr RR_Header + Preference uint16 + Mx string `dns:"cdomain-name"` +} + +func (rr *MX) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + " " + sprintName(rr.Mx) +} + +// AFSDB RR. See RFC 1183. +type AFSDB struct { + Hdr RR_Header + Subtype uint16 + Hostname string `dns:"domain-name"` +} + +func (rr *AFSDB) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.Subtype)) + " " + sprintName(rr.Hostname) +} + +// X25 RR. See RFC 1183, Section 3.1. +type X25 struct { + Hdr RR_Header + PSDNAddress string +} + +func (rr *X25) String() string { + return rr.Hdr.String() + rr.PSDNAddress +} + +// RT RR. See RFC 1183, Section 3.3. +type RT struct { + Hdr RR_Header + Preference uint16 + Host string `dns:"domain-name"` // RFC 3597 prohibits compressing records not defined in RFC 1035. +} + +func (rr *RT) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + " " + sprintName(rr.Host) +} + +// NS RR. See RFC 1035. +type NS struct { + Hdr RR_Header + Ns string `dns:"cdomain-name"` +} + +func (rr *NS) String() string { + return rr.Hdr.String() + sprintName(rr.Ns) +} + +// PTR RR. See RFC 1035. +type PTR struct { + Hdr RR_Header + Ptr string `dns:"cdomain-name"` +} + +func (rr *PTR) String() string { + return rr.Hdr.String() + sprintName(rr.Ptr) +} + +// RP RR. See RFC 1138, Section 2.2. +type RP struct { + Hdr RR_Header + Mbox string `dns:"domain-name"` + Txt string `dns:"domain-name"` +} + +func (rr *RP) String() string { + return rr.Hdr.String() + sprintName(rr.Mbox) + " " + sprintName(rr.Txt) +} + +// SOA RR. See RFC 1035. +type SOA struct { + Hdr RR_Header + Ns string `dns:"cdomain-name"` + Mbox string `dns:"cdomain-name"` + Serial uint32 + Refresh uint32 + Retry uint32 + Expire uint32 + Minttl uint32 +} + +func (rr *SOA) String() string { + return rr.Hdr.String() + sprintName(rr.Ns) + " " + sprintName(rr.Mbox) + + " " + strconv.FormatInt(int64(rr.Serial), 10) + + " " + strconv.FormatInt(int64(rr.Refresh), 10) + + " " + strconv.FormatInt(int64(rr.Retry), 10) + + " " + strconv.FormatInt(int64(rr.Expire), 10) + + " " + strconv.FormatInt(int64(rr.Minttl), 10) +} + +// TXT RR. See RFC 1035. +type TXT struct { + Hdr RR_Header + Txt []string `dns:"txt"` +} + +func (rr *TXT) String() string { return rr.Hdr.String() + sprintTxt(rr.Txt) } + +func sprintName(s string) string { + var dst strings.Builder + + for i := 0; i < len(s); { + if i+1 < len(s) && s[i] == '\\' && s[i+1] == '.' { + if dst.Len() != 0 { + dst.WriteString(s[i : i+2]) + } + i += 2 + continue + } + + b, n := nextByte(s, i) + if n == 0 { + i++ + continue + } + if b == '.' { + if dst.Len() != 0 { + dst.WriteByte('.') + } + i += n + continue + } + switch b { + case ' ', '\'', '@', ';', '(', ')', '"', '\\': // additional chars to escape + if dst.Len() == 0 { + dst.Grow(len(s) * 2) + dst.WriteString(s[:i]) + } + dst.WriteByte('\\') + dst.WriteByte(b) + default: + if ' ' <= b && b <= '~' { + if dst.Len() != 0 { + dst.WriteByte(b) + } + } else { + if dst.Len() == 0 { + dst.Grow(len(s) * 2) + dst.WriteString(s[:i]) + } + dst.WriteString(escapeByte(b)) + } + } + i += n + } + if dst.Len() == 0 { + return s + } + return dst.String() +} + +func sprintTxtOctet(s string) string { + var dst strings.Builder + dst.Grow(2 + len(s)) + dst.WriteByte('"') + for i := 0; i < len(s); { + if i+1 < len(s) && s[i] == '\\' && s[i+1] == '.' { + dst.WriteString(s[i : i+2]) + i += 2 + continue + } + + b, n := nextByte(s, i) + switch { + case n == 0: + i++ // dangling back slash + case b == '.': + dst.WriteByte('.') + case b < ' ' || b > '~': + dst.WriteString(escapeByte(b)) + default: + dst.WriteByte(b) + } + i += n + } + dst.WriteByte('"') + return dst.String() +} + +func sprintTxt(txt []string) string { + var out strings.Builder + for i, s := range txt { + out.Grow(3 + len(s)) + if i > 0 { + out.WriteString(` "`) + } else { + out.WriteByte('"') + } + for j := 0; j < len(s); { + b, n := nextByte(s, j) + if n == 0 { + break + } + writeTXTStringByte(&out, b) + j += n + } + out.WriteByte('"') + } + return out.String() +} + +func writeTXTStringByte(s *strings.Builder, b byte) { + switch { + case b == '"' || b == '\\': + s.WriteByte('\\') + s.WriteByte(b) + case b < ' ' || b > '~': + s.WriteString(escapeByte(b)) + default: + s.WriteByte(b) + } +} + +const ( + escapedByteSmall = "" + + `\000\001\002\003\004\005\006\007\008\009` + + `\010\011\012\013\014\015\016\017\018\019` + + `\020\021\022\023\024\025\026\027\028\029` + + `\030\031` + escapedByteLarge = `\127\128\129` + + `\130\131\132\133\134\135\136\137\138\139` + + `\140\141\142\143\144\145\146\147\148\149` + + `\150\151\152\153\154\155\156\157\158\159` + + `\160\161\162\163\164\165\166\167\168\169` + + `\170\171\172\173\174\175\176\177\178\179` + + `\180\181\182\183\184\185\186\187\188\189` + + `\190\191\192\193\194\195\196\197\198\199` + + `\200\201\202\203\204\205\206\207\208\209` + + `\210\211\212\213\214\215\216\217\218\219` + + `\220\221\222\223\224\225\226\227\228\229` + + `\230\231\232\233\234\235\236\237\238\239` + + `\240\241\242\243\244\245\246\247\248\249` + + `\250\251\252\253\254\255` +) + +// escapeByte returns the \DDD escaping of b which must +// satisfy b < ' ' || b > '~'. +func escapeByte(b byte) string { + if b < ' ' { + return escapedByteSmall[b*4 : b*4+4] + } + + b -= '~' + 1 + // The cast here is needed as b*4 may overflow byte. + return escapedByteLarge[int(b)*4 : int(b)*4+4] +} + +func nextByte(s string, offset int) (byte, int) { + if offset >= len(s) { + return 0, 0 + } + if s[offset] != '\\' { + // not an escape sequence + return s[offset], 1 + } + switch len(s) - offset { + case 1: // dangling escape + return 0, 0 + case 2, 3: // too short to be \ddd + default: // maybe \ddd + if isDigit(s[offset+1]) && isDigit(s[offset+2]) && isDigit(s[offset+3]) { + return dddStringToByte(s[offset+1:]), 4 + } + } + // not \ddd, just an RFC 1035 "quoted" character + return s[offset+1], 2 +} + +// SPF RR. See RFC 4408, Section 3.1.1. +type SPF struct { + Hdr RR_Header + Txt []string `dns:"txt"` +} + +func (rr *SPF) String() string { return rr.Hdr.String() + sprintTxt(rr.Txt) } + +// AVC RR. See https://www.iana.org/assignments/dns-parameters/AVC/avc-completed-template. +type AVC struct { + Hdr RR_Header + Txt []string `dns:"txt"` +} + +func (rr *AVC) String() string { return rr.Hdr.String() + sprintTxt(rr.Txt) } + +// SRV RR. See RFC 2782. +type SRV struct { + Hdr RR_Header + Priority uint16 + Weight uint16 + Port uint16 + Target string `dns:"domain-name"` +} + +func (rr *SRV) String() string { + return rr.Hdr.String() + + strconv.Itoa(int(rr.Priority)) + " " + + strconv.Itoa(int(rr.Weight)) + " " + + strconv.Itoa(int(rr.Port)) + " " + sprintName(rr.Target) +} + +// NAPTR RR. See RFC 2915. +type NAPTR struct { + Hdr RR_Header + Order uint16 + Preference uint16 + Flags string + Service string + Regexp string + Replacement string `dns:"domain-name"` +} + +func (rr *NAPTR) String() string { + return rr.Hdr.String() + + strconv.Itoa(int(rr.Order)) + " " + + strconv.Itoa(int(rr.Preference)) + " " + + "\"" + rr.Flags + "\" " + + "\"" + rr.Service + "\" " + + "\"" + rr.Regexp + "\" " + + rr.Replacement +} + +// CERT RR. See RFC 4398. +type CERT struct { + Hdr RR_Header + Type uint16 + KeyTag uint16 + Algorithm uint8 + Certificate string `dns:"base64"` +} + +func (rr *CERT) String() string { + var ( + ok bool + certtype, algorithm string + ) + if certtype, ok = CertTypeToString[rr.Type]; !ok { + certtype = strconv.Itoa(int(rr.Type)) + } + if algorithm, ok = AlgorithmToString[rr.Algorithm]; !ok { + algorithm = strconv.Itoa(int(rr.Algorithm)) + } + return rr.Hdr.String() + certtype + + " " + strconv.Itoa(int(rr.KeyTag)) + + " " + algorithm + + " " + rr.Certificate +} + +// DNAME RR. See RFC 2672. +type DNAME struct { + Hdr RR_Header + Target string `dns:"domain-name"` +} + +func (rr *DNAME) String() string { + return rr.Hdr.String() + sprintName(rr.Target) +} + +// A RR. See RFC 1035. +type A struct { + Hdr RR_Header + A net.IP `dns:"a"` +} + +func (rr *A) String() string { + if rr.A == nil { + return rr.Hdr.String() + } + return rr.Hdr.String() + rr.A.String() +} + +// AAAA RR. See RFC 3596. +type AAAA struct { + Hdr RR_Header + AAAA net.IP `dns:"aaaa"` +} + +func (rr *AAAA) String() string { + if rr.AAAA == nil { + return rr.Hdr.String() + } + return rr.Hdr.String() + rr.AAAA.String() +} + +// PX RR. See RFC 2163. +type PX struct { + Hdr RR_Header + Preference uint16 + Map822 string `dns:"domain-name"` + Mapx400 string `dns:"domain-name"` +} + +func (rr *PX) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + " " + sprintName(rr.Map822) + " " + sprintName(rr.Mapx400) +} + +// GPOS RR. See RFC 1712. +type GPOS struct { + Hdr RR_Header + Longitude string + Latitude string + Altitude string +} + +func (rr *GPOS) String() string { + return rr.Hdr.String() + rr.Longitude + " " + rr.Latitude + " " + rr.Altitude +} + +// LOC RR. See RFC RFC 1876. +type LOC struct { + Hdr RR_Header + Version uint8 + Size uint8 + HorizPre uint8 + VertPre uint8 + Latitude uint32 + Longitude uint32 + Altitude uint32 +} + +// cmToM takes a cm value expressed in RFC1876 SIZE mantissa/exponent +// format and returns a string in m (two decimals for the cm) +func cmToM(m, e uint8) string { + if e < 2 { + if e == 1 { + m *= 10 + } + + return fmt.Sprintf("0.%02d", m) + } + + s := fmt.Sprintf("%d", m) + for e > 2 { + s += "0" + e-- + } + return s +} + +func (rr *LOC) String() string { + s := rr.Hdr.String() + + lat := rr.Latitude + ns := "N" + if lat > LOC_EQUATOR { + lat = lat - LOC_EQUATOR + } else { + ns = "S" + lat = LOC_EQUATOR - lat + } + h := lat / LOC_DEGREES + lat = lat % LOC_DEGREES + m := lat / LOC_HOURS + lat = lat % LOC_HOURS + s += fmt.Sprintf("%02d %02d %0.3f %s ", h, m, float64(lat)/1000, ns) + + lon := rr.Longitude + ew := "E" + if lon > LOC_PRIMEMERIDIAN { + lon = lon - LOC_PRIMEMERIDIAN + } else { + ew = "W" + lon = LOC_PRIMEMERIDIAN - lon + } + h = lon / LOC_DEGREES + lon = lon % LOC_DEGREES + m = lon / LOC_HOURS + lon = lon % LOC_HOURS + s += fmt.Sprintf("%02d %02d %0.3f %s ", h, m, float64(lon)/1000, ew) + + var alt = float64(rr.Altitude) / 100 + alt -= LOC_ALTITUDEBASE + if rr.Altitude%100 != 0 { + s += fmt.Sprintf("%.2fm ", alt) + } else { + s += fmt.Sprintf("%.0fm ", alt) + } + + s += cmToM(rr.Size&0xf0>>4, rr.Size&0x0f) + "m " + s += cmToM(rr.HorizPre&0xf0>>4, rr.HorizPre&0x0f) + "m " + s += cmToM(rr.VertPre&0xf0>>4, rr.VertPre&0x0f) + "m" + + return s +} + +// SIG RR. See RFC 2535. The SIG RR is identical to RRSIG and nowadays only used for SIG(0), See RFC 2931. +type SIG struct { + RRSIG +} + +// RRSIG RR. See RFC 4034 and RFC 3755. +type RRSIG struct { + Hdr RR_Header + TypeCovered uint16 + Algorithm uint8 + Labels uint8 + OrigTtl uint32 + Expiration uint32 + Inception uint32 + KeyTag uint16 + SignerName string `dns:"domain-name"` + Signature string `dns:"base64"` +} + +func (rr *RRSIG) String() string { + s := rr.Hdr.String() + s += Type(rr.TypeCovered).String() + s += " " + strconv.Itoa(int(rr.Algorithm)) + + " " + strconv.Itoa(int(rr.Labels)) + + " " + strconv.FormatInt(int64(rr.OrigTtl), 10) + + " " + TimeToString(rr.Expiration) + + " " + TimeToString(rr.Inception) + + " " + strconv.Itoa(int(rr.KeyTag)) + + " " + sprintName(rr.SignerName) + + " " + rr.Signature + return s +} + +// NSEC RR. See RFC 4034 and RFC 3755. +type NSEC struct { + Hdr RR_Header + NextDomain string `dns:"domain-name"` + TypeBitMap []uint16 `dns:"nsec"` +} + +func (rr *NSEC) String() string { + s := rr.Hdr.String() + sprintName(rr.NextDomain) + for _, t := range rr.TypeBitMap { + s += " " + Type(t).String() + } + return s +} + +func (rr *NSEC) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += domainNameLen(rr.NextDomain, off+l, compression, false) + l += typeBitMapLen(rr.TypeBitMap) + return l +} + +// DLV RR. See RFC 4431. +type DLV struct{ DS } + +// CDS RR. See RFC 7344. +type CDS struct{ DS } + +// DS RR. See RFC 4034 and RFC 3658. +type DS struct { + Hdr RR_Header + KeyTag uint16 + Algorithm uint8 + DigestType uint8 + Digest string `dns:"hex"` +} + +func (rr *DS) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.KeyTag)) + + " " + strconv.Itoa(int(rr.Algorithm)) + + " " + strconv.Itoa(int(rr.DigestType)) + + " " + strings.ToUpper(rr.Digest) +} + +// KX RR. See RFC 2230. +type KX struct { + Hdr RR_Header + Preference uint16 + Exchanger string `dns:"domain-name"` +} + +func (rr *KX) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + + " " + sprintName(rr.Exchanger) +} + +// TA RR. See http://www.watson.org/~weiler/INI1999-19.pdf. +type TA struct { + Hdr RR_Header + KeyTag uint16 + Algorithm uint8 + DigestType uint8 + Digest string `dns:"hex"` +} + +func (rr *TA) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.KeyTag)) + + " " + strconv.Itoa(int(rr.Algorithm)) + + " " + strconv.Itoa(int(rr.DigestType)) + + " " + strings.ToUpper(rr.Digest) +} + +// TALINK RR. See https://www.iana.org/assignments/dns-parameters/TALINK/talink-completed-template. +type TALINK struct { + Hdr RR_Header + PreviousName string `dns:"domain-name"` + NextName string `dns:"domain-name"` +} + +func (rr *TALINK) String() string { + return rr.Hdr.String() + + sprintName(rr.PreviousName) + " " + sprintName(rr.NextName) +} + +// SSHFP RR. See RFC RFC 4255. +type SSHFP struct { + Hdr RR_Header + Algorithm uint8 + Type uint8 + FingerPrint string `dns:"hex"` +} + +func (rr *SSHFP) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.Algorithm)) + + " " + strconv.Itoa(int(rr.Type)) + + " " + strings.ToUpper(rr.FingerPrint) +} + +// KEY RR. See RFC RFC 2535. +type KEY struct { + DNSKEY +} + +// CDNSKEY RR. See RFC 7344. +type CDNSKEY struct { + DNSKEY +} + +// DNSKEY RR. See RFC 4034 and RFC 3755. +type DNSKEY struct { + Hdr RR_Header + Flags uint16 + Protocol uint8 + Algorithm uint8 + PublicKey string `dns:"base64"` +} + +func (rr *DNSKEY) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.Flags)) + + " " + strconv.Itoa(int(rr.Protocol)) + + " " + strconv.Itoa(int(rr.Algorithm)) + + " " + rr.PublicKey +} + +// RKEY RR. See https://www.iana.org/assignments/dns-parameters/RKEY/rkey-completed-template. +type RKEY struct { + Hdr RR_Header + Flags uint16 + Protocol uint8 + Algorithm uint8 + PublicKey string `dns:"base64"` +} + +func (rr *RKEY) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.Flags)) + + " " + strconv.Itoa(int(rr.Protocol)) + + " " + strconv.Itoa(int(rr.Algorithm)) + + " " + rr.PublicKey +} + +// NSAPPTR RR. See RFC 1348. +type NSAPPTR struct { + Hdr RR_Header + Ptr string `dns:"domain-name"` +} + +func (rr *NSAPPTR) String() string { return rr.Hdr.String() + sprintName(rr.Ptr) } + +// NSEC3 RR. See RFC 5155. +type NSEC3 struct { + Hdr RR_Header + Hash uint8 + Flags uint8 + Iterations uint16 + SaltLength uint8 + Salt string `dns:"size-hex:SaltLength"` + HashLength uint8 + NextDomain string `dns:"size-base32:HashLength"` + TypeBitMap []uint16 `dns:"nsec"` +} + +func (rr *NSEC3) String() string { + s := rr.Hdr.String() + s += strconv.Itoa(int(rr.Hash)) + + " " + strconv.Itoa(int(rr.Flags)) + + " " + strconv.Itoa(int(rr.Iterations)) + + " " + saltToString(rr.Salt) + + " " + rr.NextDomain + for _, t := range rr.TypeBitMap { + s += " " + Type(t).String() + } + return s +} + +func (rr *NSEC3) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += 6 + len(rr.Salt)/2 + 1 + len(rr.NextDomain) + 1 + l += typeBitMapLen(rr.TypeBitMap) + return l +} + +// NSEC3PARAM RR. See RFC 5155. +type NSEC3PARAM struct { + Hdr RR_Header + Hash uint8 + Flags uint8 + Iterations uint16 + SaltLength uint8 + Salt string `dns:"size-hex:SaltLength"` +} + +func (rr *NSEC3PARAM) String() string { + s := rr.Hdr.String() + s += strconv.Itoa(int(rr.Hash)) + + " " + strconv.Itoa(int(rr.Flags)) + + " " + strconv.Itoa(int(rr.Iterations)) + + " " + saltToString(rr.Salt) + return s +} + +// TKEY RR. See RFC 2930. +type TKEY struct { + Hdr RR_Header + Algorithm string `dns:"domain-name"` + Inception uint32 + Expiration uint32 + Mode uint16 + Error uint16 + KeySize uint16 + Key string `dns:"size-hex:KeySize"` + OtherLen uint16 + OtherData string `dns:"size-hex:OtherLen"` +} + +// TKEY has no official presentation format, but this will suffice. +func (rr *TKEY) String() string { + s := ";" + rr.Hdr.String() + + " " + rr.Algorithm + + " " + TimeToString(rr.Inception) + + " " + TimeToString(rr.Expiration) + + " " + strconv.Itoa(int(rr.Mode)) + + " " + strconv.Itoa(int(rr.Error)) + + " " + strconv.Itoa(int(rr.KeySize)) + + " " + rr.Key + + " " + strconv.Itoa(int(rr.OtherLen)) + + " " + rr.OtherData + return s +} + +// RFC3597 represents an unknown/generic RR. See RFC 3597. +type RFC3597 struct { + Hdr RR_Header + Rdata string `dns:"hex"` +} + +func (rr *RFC3597) String() string { + // Let's call it a hack + s := rfc3597Header(rr.Hdr) + + s += "\\# " + strconv.Itoa(len(rr.Rdata)/2) + " " + rr.Rdata + return s +} + +func rfc3597Header(h RR_Header) string { + var s string + + s += sprintName(h.Name) + "\t" + s += strconv.FormatInt(int64(h.Ttl), 10) + "\t" + s += "CLASS" + strconv.Itoa(int(h.Class)) + "\t" + s += "TYPE" + strconv.Itoa(int(h.Rrtype)) + "\t" + return s +} + +// URI RR. See RFC 7553. +type URI struct { + Hdr RR_Header + Priority uint16 + Weight uint16 + Target string `dns:"octet"` +} + +func (rr *URI) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.Priority)) + + " " + strconv.Itoa(int(rr.Weight)) + " " + sprintTxtOctet(rr.Target) +} + +// DHCID RR. See RFC 4701. +type DHCID struct { + Hdr RR_Header + Digest string `dns:"base64"` +} + +func (rr *DHCID) String() string { return rr.Hdr.String() + rr.Digest } + +// TLSA RR. See RFC 6698. +type TLSA struct { + Hdr RR_Header + Usage uint8 + Selector uint8 + MatchingType uint8 + Certificate string `dns:"hex"` +} + +func (rr *TLSA) String() string { + return rr.Hdr.String() + + strconv.Itoa(int(rr.Usage)) + + " " + strconv.Itoa(int(rr.Selector)) + + " " + strconv.Itoa(int(rr.MatchingType)) + + " " + rr.Certificate +} + +// SMIMEA RR. See RFC 8162. +type SMIMEA struct { + Hdr RR_Header + Usage uint8 + Selector uint8 + MatchingType uint8 + Certificate string `dns:"hex"` +} + +func (rr *SMIMEA) String() string { + s := rr.Hdr.String() + + strconv.Itoa(int(rr.Usage)) + + " " + strconv.Itoa(int(rr.Selector)) + + " " + strconv.Itoa(int(rr.MatchingType)) + + // Every Nth char needs a space on this output. If we output + // this as one giant line, we can't read it can in because in some cases + // the cert length overflows scan.maxTok (2048). + sx := splitN(rr.Certificate, 1024) // conservative value here + s += " " + strings.Join(sx, " ") + return s +} + +// HIP RR. See RFC 8005. +type HIP struct { + Hdr RR_Header + HitLength uint8 + PublicKeyAlgorithm uint8 + PublicKeyLength uint16 + Hit string `dns:"size-hex:HitLength"` + PublicKey string `dns:"size-base64:PublicKeyLength"` + RendezvousServers []string `dns:"domain-name"` +} + +func (rr *HIP) String() string { + s := rr.Hdr.String() + + strconv.Itoa(int(rr.PublicKeyAlgorithm)) + + " " + rr.Hit + + " " + rr.PublicKey + for _, d := range rr.RendezvousServers { + s += " " + sprintName(d) + } + return s +} + +// NINFO RR. See https://www.iana.org/assignments/dns-parameters/NINFO/ninfo-completed-template. +type NINFO struct { + Hdr RR_Header + ZSData []string `dns:"txt"` +} + +func (rr *NINFO) String() string { return rr.Hdr.String() + sprintTxt(rr.ZSData) } + +// NID RR. See RFC RFC 6742. +type NID struct { + Hdr RR_Header + Preference uint16 + NodeID uint64 +} + +func (rr *NID) String() string { + s := rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + node := fmt.Sprintf("%0.16x", rr.NodeID) + s += " " + node[0:4] + ":" + node[4:8] + ":" + node[8:12] + ":" + node[12:16] + return s +} + +// L32 RR, See RFC 6742. +type L32 struct { + Hdr RR_Header + Preference uint16 + Locator32 net.IP `dns:"a"` +} + +func (rr *L32) String() string { + if rr.Locator32 == nil { + return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + } + return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + + " " + rr.Locator32.String() +} + +// L64 RR, See RFC 6742. +type L64 struct { + Hdr RR_Header + Preference uint16 + Locator64 uint64 +} + +func (rr *L64) String() string { + s := rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + node := fmt.Sprintf("%0.16X", rr.Locator64) + s += " " + node[0:4] + ":" + node[4:8] + ":" + node[8:12] + ":" + node[12:16] + return s +} + +// LP RR. See RFC 6742. +type LP struct { + Hdr RR_Header + Preference uint16 + Fqdn string `dns:"domain-name"` +} + +func (rr *LP) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + " " + sprintName(rr.Fqdn) +} + +// EUI48 RR. See RFC 7043. +type EUI48 struct { + Hdr RR_Header + Address uint64 `dns:"uint48"` +} + +func (rr *EUI48) String() string { return rr.Hdr.String() + euiToString(rr.Address, 48) } + +// EUI64 RR. See RFC 7043. +type EUI64 struct { + Hdr RR_Header + Address uint64 +} + +func (rr *EUI64) String() string { return rr.Hdr.String() + euiToString(rr.Address, 64) } + +// CAA RR. See RFC 6844. +type CAA struct { + Hdr RR_Header + Flag uint8 + Tag string + Value string `dns:"octet"` +} + +func (rr *CAA) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.Flag)) + " " + rr.Tag + " " + sprintTxtOctet(rr.Value) +} + +// UID RR. Deprecated, IANA-Reserved. +type UID struct { + Hdr RR_Header + Uid uint32 +} + +func (rr *UID) String() string { return rr.Hdr.String() + strconv.FormatInt(int64(rr.Uid), 10) } + +// GID RR. Deprecated, IANA-Reserved. +type GID struct { + Hdr RR_Header + Gid uint32 +} + +func (rr *GID) String() string { return rr.Hdr.String() + strconv.FormatInt(int64(rr.Gid), 10) } + +// UINFO RR. Deprecated, IANA-Reserved. +type UINFO struct { + Hdr RR_Header + Uinfo string +} + +func (rr *UINFO) String() string { return rr.Hdr.String() + sprintTxt([]string{rr.Uinfo}) } + +// EID RR. See http://ana-3.lcs.mit.edu/~jnc/nimrod/dns.txt. +type EID struct { + Hdr RR_Header + Endpoint string `dns:"hex"` +} + +func (rr *EID) String() string { return rr.Hdr.String() + strings.ToUpper(rr.Endpoint) } + +// NIMLOC RR. See http://ana-3.lcs.mit.edu/~jnc/nimrod/dns.txt. +type NIMLOC struct { + Hdr RR_Header + Locator string `dns:"hex"` +} + +func (rr *NIMLOC) String() string { return rr.Hdr.String() + strings.ToUpper(rr.Locator) } + +// OPENPGPKEY RR. See RFC 7929. +type OPENPGPKEY struct { + Hdr RR_Header + PublicKey string `dns:"base64"` +} + +func (rr *OPENPGPKEY) String() string { return rr.Hdr.String() + rr.PublicKey } + +// CSYNC RR. See RFC 7477. +type CSYNC struct { + Hdr RR_Header + Serial uint32 + Flags uint16 + TypeBitMap []uint16 `dns:"nsec"` +} + +func (rr *CSYNC) String() string { + s := rr.Hdr.String() + strconv.FormatInt(int64(rr.Serial), 10) + " " + strconv.Itoa(int(rr.Flags)) + + for _, t := range rr.TypeBitMap { + s += " " + Type(t).String() + } + return s +} + +func (rr *CSYNC) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += 4 + 2 + l += typeBitMapLen(rr.TypeBitMap) + return l +} + +// APL RR. See RFC 3123. +type APL struct { + Hdr RR_Header + Prefixes []APLPrefix `dns:"apl"` +} + +// APLPrefix is an address prefix hold by an APL record. +type APLPrefix struct { + Negation bool + Network net.IPNet +} + +// String returns presentation form of the APL record. +func (rr *APL) String() string { + var sb strings.Builder + sb.WriteString(rr.Hdr.String()) + for i, p := range rr.Prefixes { + if i > 0 { + sb.WriteByte(' ') + } + sb.WriteString(p.str()) + } + return sb.String() +} + +// str returns presentation form of the APL prefix. +func (p *APLPrefix) str() string { + var sb strings.Builder + if p.Negation { + sb.WriteByte('!') + } + + switch len(p.Network.IP) { + case net.IPv4len: + sb.WriteByte('1') + case net.IPv6len: + sb.WriteByte('2') + } + + sb.WriteByte(':') + + switch len(p.Network.IP) { + case net.IPv4len: + sb.WriteString(p.Network.IP.String()) + case net.IPv6len: + // add prefix for IPv4-mapped IPv6 + if v4 := p.Network.IP.To4(); v4 != nil { + sb.WriteString("::ffff:") + } + sb.WriteString(p.Network.IP.String()) + } + + sb.WriteByte('/') + + prefix, _ := p.Network.Mask.Size() + sb.WriteString(strconv.Itoa(prefix)) + + return sb.String() +} + +// equals reports whether two APL prefixes are identical. +func (a *APLPrefix) equals(b *APLPrefix) bool { + return a.Negation == b.Negation && + bytes.Equal(a.Network.IP, b.Network.IP) && + bytes.Equal(a.Network.Mask, b.Network.Mask) +} + +// copy returns a copy of the APL prefix. +func (p *APLPrefix) copy() APLPrefix { + return APLPrefix{ + Negation: p.Negation, + Network: copyNet(p.Network), + } +} + +// len returns size of the prefix in wire format. +func (p *APLPrefix) len() int { + // 4-byte header and the network address prefix (see Section 4 of RFC 3123) + prefix, _ := p.Network.Mask.Size() + return 4 + (prefix+7)/8 +} + +// TimeToString translates the RRSIG's incep. and expir. times to the +// string representation used when printing the record. +// It takes serial arithmetic (RFC 1982) into account. +func TimeToString(t uint32) string { + mod := (int64(t)-time.Now().Unix())/year68 - 1 + if mod < 0 { + mod = 0 + } + ti := time.Unix(int64(t)-mod*year68, 0).UTC() + return ti.Format("20060102150405") +} + +// StringToTime translates the RRSIG's incep. and expir. times from +// string values like "20110403154150" to an 32 bit integer. +// It takes serial arithmetic (RFC 1982) into account. +func StringToTime(s string) (uint32, error) { + t, err := time.Parse("20060102150405", s) + if err != nil { + return 0, err + } + mod := t.Unix()/year68 - 1 + if mod < 0 { + mod = 0 + } + return uint32(t.Unix() - mod*year68), nil +} + +// saltToString converts a NSECX salt to uppercase and returns "-" when it is empty. +func saltToString(s string) string { + if len(s) == 0 { + return "-" + } + return strings.ToUpper(s) +} + +func euiToString(eui uint64, bits int) (hex string) { + switch bits { + case 64: + hex = fmt.Sprintf("%16.16x", eui) + hex = hex[0:2] + "-" + hex[2:4] + "-" + hex[4:6] + "-" + hex[6:8] + + "-" + hex[8:10] + "-" + hex[10:12] + "-" + hex[12:14] + "-" + hex[14:16] + case 48: + hex = fmt.Sprintf("%12.12x", eui) + hex = hex[0:2] + "-" + hex[2:4] + "-" + hex[4:6] + "-" + hex[6:8] + + "-" + hex[8:10] + "-" + hex[10:12] + } + return +} + +// copyIP returns a copy of ip. +func copyIP(ip net.IP) net.IP { + p := make(net.IP, len(ip)) + copy(p, ip) + return p +} + +// copyNet returns a copy of a subnet. +func copyNet(n net.IPNet) net.IPNet { + m := make(net.IPMask, len(n.Mask)) + copy(m, n.Mask) + + return net.IPNet{ + IP: copyIP(n.IP), + Mask: m, + } +} + +// SplitN splits a string into N sized string chunks. +// This might become an exported function once. +func splitN(s string, n int) []string { + if len(s) < n { + return []string{s} + } + sx := []string{} + p, i := 0, n + for { + if i <= len(s) { + sx = append(sx, s[p:i]) + } else { + sx = append(sx, s[p:]) + break + + } + p, i = p+n, i+n + } + + return sx +} diff --git a/vendor/github.com/miekg/dns/udp.go b/vendor/github.com/miekg/dns/udp.go new file mode 100644 index 00000000..a4826ee2 --- /dev/null +++ b/vendor/github.com/miekg/dns/udp.go @@ -0,0 +1,102 @@ +// +build !windows + +package dns + +import ( + "net" + + "golang.org/x/net/ipv4" + "golang.org/x/net/ipv6" +) + +// This is the required size of the OOB buffer to pass to ReadMsgUDP. +var udpOOBSize = func() int { + // We can't know whether we'll get an IPv4 control message or an + // IPv6 control message ahead of time. To get around this, we size + // the buffer equal to the largest of the two. + + oob4 := ipv4.NewControlMessage(ipv4.FlagDst | ipv4.FlagInterface) + oob6 := ipv6.NewControlMessage(ipv6.FlagDst | ipv6.FlagInterface) + + if len(oob4) > len(oob6) { + return len(oob4) + } + + return len(oob6) +}() + +// SessionUDP holds the remote address and the associated +// out-of-band data. +type SessionUDP struct { + raddr *net.UDPAddr + context []byte +} + +// RemoteAddr returns the remote network address. +func (s *SessionUDP) RemoteAddr() net.Addr { return s.raddr } + +// ReadFromSessionUDP acts just like net.UDPConn.ReadFrom(), but returns a session object instead of a +// net.UDPAddr. +func ReadFromSessionUDP(conn *net.UDPConn, b []byte) (int, *SessionUDP, error) { + oob := make([]byte, udpOOBSize) + n, oobn, _, raddr, err := conn.ReadMsgUDP(b, oob) + if err != nil { + return n, nil, err + } + return n, &SessionUDP{raddr, oob[:oobn]}, err +} + +// WriteToSessionUDP acts just like net.UDPConn.WriteTo(), but uses a *SessionUDP instead of a net.Addr. +func WriteToSessionUDP(conn *net.UDPConn, b []byte, session *SessionUDP) (int, error) { + oob := correctSource(session.context) + n, _, err := conn.WriteMsgUDP(b, oob, session.raddr) + return n, err +} + +func setUDPSocketOptions(conn *net.UDPConn) error { + // Try setting the flags for both families and ignore the errors unless they + // both error. + err6 := ipv6.NewPacketConn(conn).SetControlMessage(ipv6.FlagDst|ipv6.FlagInterface, true) + err4 := ipv4.NewPacketConn(conn).SetControlMessage(ipv4.FlagDst|ipv4.FlagInterface, true) + if err6 != nil && err4 != nil { + return err4 + } + return nil +} + +// parseDstFromOOB takes oob data and returns the destination IP. +func parseDstFromOOB(oob []byte) net.IP { + // Start with IPv6 and then fallback to IPv4 + // TODO(fastest963): Figure out a way to prefer one or the other. Looking at + // the lvl of the header for a 0 or 41 isn't cross-platform. + cm6 := new(ipv6.ControlMessage) + if cm6.Parse(oob) == nil && cm6.Dst != nil { + return cm6.Dst + } + cm4 := new(ipv4.ControlMessage) + if cm4.Parse(oob) == nil && cm4.Dst != nil { + return cm4.Dst + } + return nil +} + +// correctSource takes oob data and returns new oob data with the Src equal to the Dst +func correctSource(oob []byte) []byte { + dst := parseDstFromOOB(oob) + if dst == nil { + return nil + } + // If the dst is definitely an IPv6, then use ipv6's ControlMessage to + // respond otherwise use ipv4's because ipv6's marshal ignores ipv4 + // addresses. + if dst.To4() == nil { + cm := new(ipv6.ControlMessage) + cm.Src = dst + oob = cm.Marshal() + } else { + cm := new(ipv4.ControlMessage) + cm.Src = dst + oob = cm.Marshal() + } + return oob +} diff --git a/vendor/github.com/miekg/dns/udp_windows.go b/vendor/github.com/miekg/dns/udp_windows.go new file mode 100644 index 00000000..e7dd8ca3 --- /dev/null +++ b/vendor/github.com/miekg/dns/udp_windows.go @@ -0,0 +1,35 @@ +// +build windows + +package dns + +import "net" + +// SessionUDP holds the remote address +type SessionUDP struct { + raddr *net.UDPAddr +} + +// RemoteAddr returns the remote network address. +func (s *SessionUDP) RemoteAddr() net.Addr { return s.raddr } + +// ReadFromSessionUDP acts just like net.UDPConn.ReadFrom(), but returns a session object instead of a +// net.UDPAddr. +// TODO(fastest963): Once go1.10 is released, use ReadMsgUDP. +func ReadFromSessionUDP(conn *net.UDPConn, b []byte) (int, *SessionUDP, error) { + n, raddr, err := conn.ReadFrom(b) + if err != nil { + return n, nil, err + } + return n, &SessionUDP{raddr.(*net.UDPAddr)}, err +} + +// WriteToSessionUDP acts just like net.UDPConn.WriteTo(), but uses a *SessionUDP instead of a net.Addr. +// TODO(fastest963): Once go1.10 is released, use WriteMsgUDP. +func WriteToSessionUDP(conn *net.UDPConn, b []byte, session *SessionUDP) (int, error) { + return conn.WriteTo(b, session.raddr) +} + +// TODO(fastest963): Once go1.10 is released and we can use *MsgUDP methods +// use the standard method in udp.go for these. +func setUDPSocketOptions(*net.UDPConn) error { return nil } +func parseDstFromOOB([]byte, net.IP) net.IP { return nil } diff --git a/vendor/github.com/miekg/dns/update.go b/vendor/github.com/miekg/dns/update.go new file mode 100644 index 00000000..69dd3865 --- /dev/null +++ b/vendor/github.com/miekg/dns/update.go @@ -0,0 +1,110 @@ +package dns + +// NameUsed sets the RRs in the prereq section to +// "Name is in use" RRs. RFC 2136 section 2.4.4. +func (u *Msg) NameUsed(rr []RR) { + if u.Answer == nil { + u.Answer = make([]RR, 0, len(rr)) + } + for _, r := range rr { + u.Answer = append(u.Answer, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: TypeANY, Class: ClassANY}}) + } +} + +// NameNotUsed sets the RRs in the prereq section to +// "Name is in not use" RRs. RFC 2136 section 2.4.5. +func (u *Msg) NameNotUsed(rr []RR) { + if u.Answer == nil { + u.Answer = make([]RR, 0, len(rr)) + } + for _, r := range rr { + u.Answer = append(u.Answer, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: TypeANY, Class: ClassNONE}}) + } +} + +// Used sets the RRs in the prereq section to +// "RRset exists (value dependent -- with rdata)" RRs. RFC 2136 section 2.4.2. +func (u *Msg) Used(rr []RR) { + if len(u.Question) == 0 { + panic("dns: empty question section") + } + if u.Answer == nil { + u.Answer = make([]RR, 0, len(rr)) + } + for _, r := range rr { + r.Header().Class = u.Question[0].Qclass + u.Answer = append(u.Answer, r) + } +} + +// RRsetUsed sets the RRs in the prereq section to +// "RRset exists (value independent -- no rdata)" RRs. RFC 2136 section 2.4.1. +func (u *Msg) RRsetUsed(rr []RR) { + if u.Answer == nil { + u.Answer = make([]RR, 0, len(rr)) + } + for _, r := range rr { + h := r.Header() + u.Answer = append(u.Answer, &ANY{Hdr: RR_Header{Name: h.Name, Ttl: 0, Rrtype: h.Rrtype, Class: ClassANY}}) + } +} + +// RRsetNotUsed sets the RRs in the prereq section to +// "RRset does not exist" RRs. RFC 2136 section 2.4.3. +func (u *Msg) RRsetNotUsed(rr []RR) { + if u.Answer == nil { + u.Answer = make([]RR, 0, len(rr)) + } + for _, r := range rr { + h := r.Header() + u.Answer = append(u.Answer, &ANY{Hdr: RR_Header{Name: h.Name, Ttl: 0, Rrtype: h.Rrtype, Class: ClassNONE}}) + } +} + +// Insert creates a dynamic update packet that adds an complete RRset, see RFC 2136 section 2.5.1. +func (u *Msg) Insert(rr []RR) { + if len(u.Question) == 0 { + panic("dns: empty question section") + } + if u.Ns == nil { + u.Ns = make([]RR, 0, len(rr)) + } + for _, r := range rr { + r.Header().Class = u.Question[0].Qclass + u.Ns = append(u.Ns, r) + } +} + +// RemoveRRset creates a dynamic update packet that deletes an RRset, see RFC 2136 section 2.5.2. +func (u *Msg) RemoveRRset(rr []RR) { + if u.Ns == nil { + u.Ns = make([]RR, 0, len(rr)) + } + for _, r := range rr { + h := r.Header() + u.Ns = append(u.Ns, &ANY{Hdr: RR_Header{Name: h.Name, Ttl: 0, Rrtype: h.Rrtype, Class: ClassANY}}) + } +} + +// RemoveName creates a dynamic update packet that deletes all RRsets of a name, see RFC 2136 section 2.5.3 +func (u *Msg) RemoveName(rr []RR) { + if u.Ns == nil { + u.Ns = make([]RR, 0, len(rr)) + } + for _, r := range rr { + u.Ns = append(u.Ns, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: TypeANY, Class: ClassANY}}) + } +} + +// Remove creates a dynamic update packet deletes RR from a RRSset, see RFC 2136 section 2.5.4 +func (u *Msg) Remove(rr []RR) { + if u.Ns == nil { + u.Ns = make([]RR, 0, len(rr)) + } + for _, r := range rr { + h := r.Header() + h.Class = ClassNONE + h.Ttl = 0 + u.Ns = append(u.Ns, r) + } +} diff --git a/vendor/github.com/miekg/dns/version.go b/vendor/github.com/miekg/dns/version.go new file mode 100644 index 00000000..cab46b4f --- /dev/null +++ b/vendor/github.com/miekg/dns/version.go @@ -0,0 +1,15 @@ +package dns + +import "fmt" + +// Version is current version of this library. +var Version = V{1, 1, 27} + +// V holds the version of this library. +type V struct { + Major, Minor, Patch int +} + +func (v V) String() string { + return fmt.Sprintf("%d.%d.%d", v.Major, v.Minor, v.Patch) +} diff --git a/vendor/github.com/miekg/dns/xfr.go b/vendor/github.com/miekg/dns/xfr.go new file mode 100644 index 00000000..43970e64 --- /dev/null +++ b/vendor/github.com/miekg/dns/xfr.go @@ -0,0 +1,266 @@ +package dns + +import ( + "fmt" + "time" +) + +// Envelope is used when doing a zone transfer with a remote server. +type Envelope struct { + RR []RR // The set of RRs in the answer section of the xfr reply message. + Error error // If something went wrong, this contains the error. +} + +// A Transfer defines parameters that are used during a zone transfer. +type Transfer struct { + *Conn + DialTimeout time.Duration // net.DialTimeout, defaults to 2 seconds + ReadTimeout time.Duration // net.Conn.SetReadTimeout value for connections, defaults to 2 seconds + WriteTimeout time.Duration // net.Conn.SetWriteTimeout value for connections, defaults to 2 seconds + TsigSecret map[string]string // Secret(s) for Tsig map[], zonename must be in canonical form (lowercase, fqdn, see RFC 4034 Section 6.2) + tsigTimersOnly bool +} + +// Think we need to away to stop the transfer + +// In performs an incoming transfer with the server in a. +// If you would like to set the source IP, or some other attribute +// of a Dialer for a Transfer, you can do so by specifying the attributes +// in the Transfer.Conn: +// +// d := net.Dialer{LocalAddr: transfer_source} +// con, err := d.Dial("tcp", master) +// dnscon := &dns.Conn{Conn:con} +// transfer = &dns.Transfer{Conn: dnscon} +// channel, err := transfer.In(message, master) +// +func (t *Transfer) In(q *Msg, a string) (env chan *Envelope, err error) { + switch q.Question[0].Qtype { + case TypeAXFR, TypeIXFR: + default: + return nil, &Error{"unsupported question type"} + } + + timeout := dnsTimeout + if t.DialTimeout != 0 { + timeout = t.DialTimeout + } + + if t.Conn == nil { + t.Conn, err = DialTimeout("tcp", a, timeout) + if err != nil { + return nil, err + } + } + + if err := t.WriteMsg(q); err != nil { + return nil, err + } + + env = make(chan *Envelope) + switch q.Question[0].Qtype { + case TypeAXFR: + go t.inAxfr(q, env) + case TypeIXFR: + go t.inIxfr(q, env) + } + + return env, nil +} + +func (t *Transfer) inAxfr(q *Msg, c chan *Envelope) { + first := true + defer t.Close() + defer close(c) + timeout := dnsTimeout + if t.ReadTimeout != 0 { + timeout = t.ReadTimeout + } + for { + t.Conn.SetReadDeadline(time.Now().Add(timeout)) + in, err := t.ReadMsg() + if err != nil { + c <- &Envelope{nil, err} + return + } + if q.Id != in.Id { + c <- &Envelope{in.Answer, ErrId} + return + } + if first { + if in.Rcode != RcodeSuccess { + c <- &Envelope{in.Answer, &Error{err: fmt.Sprintf(errXFR, in.Rcode)}} + return + } + if !isSOAFirst(in) { + c <- &Envelope{in.Answer, ErrSoa} + return + } + first = !first + // only one answer that is SOA, receive more + if len(in.Answer) == 1 { + t.tsigTimersOnly = true + c <- &Envelope{in.Answer, nil} + continue + } + } + + if !first { + t.tsigTimersOnly = true // Subsequent envelopes use this. + if isSOALast(in) { + c <- &Envelope{in.Answer, nil} + return + } + c <- &Envelope{in.Answer, nil} + } + } +} + +func (t *Transfer) inIxfr(q *Msg, c chan *Envelope) { + var serial uint32 // The first serial seen is the current server serial + axfr := true + n := 0 + qser := q.Ns[0].(*SOA).Serial + defer t.Close() + defer close(c) + timeout := dnsTimeout + if t.ReadTimeout != 0 { + timeout = t.ReadTimeout + } + for { + t.SetReadDeadline(time.Now().Add(timeout)) + in, err := t.ReadMsg() + if err != nil { + c <- &Envelope{nil, err} + return + } + if q.Id != in.Id { + c <- &Envelope{in.Answer, ErrId} + return + } + if in.Rcode != RcodeSuccess { + c <- &Envelope{in.Answer, &Error{err: fmt.Sprintf(errXFR, in.Rcode)}} + return + } + if n == 0 { + // Check if the returned answer is ok + if !isSOAFirst(in) { + c <- &Envelope{in.Answer, ErrSoa} + return + } + // This serial is important + serial = in.Answer[0].(*SOA).Serial + // Check if there are no changes in zone + if qser >= serial { + c <- &Envelope{in.Answer, nil} + return + } + } + // Now we need to check each message for SOA records, to see what we need to do + t.tsigTimersOnly = true + for _, rr := range in.Answer { + if v, ok := rr.(*SOA); ok { + if v.Serial == serial { + n++ + // quit if it's a full axfr or the the servers' SOA is repeated the third time + if axfr && n == 2 || n == 3 { + c <- &Envelope{in.Answer, nil} + return + } + } else if axfr { + // it's an ixfr + axfr = false + } + } + } + c <- &Envelope{in.Answer, nil} + } +} + +// Out performs an outgoing transfer with the client connecting in w. +// Basic use pattern: +// +// ch := make(chan *dns.Envelope) +// tr := new(dns.Transfer) +// var wg sync.WaitGroup +// go func() { +// tr.Out(w, r, ch) +// wg.Done() +// }() +// ch <- &dns.Envelope{RR: []dns.RR{soa, rr1, rr2, rr3, soa}} +// close(ch) +// wg.Wait() // wait until everything is written out +// w.Close() // close connection +// +// The server is responsible for sending the correct sequence of RRs through the channel ch. +func (t *Transfer) Out(w ResponseWriter, q *Msg, ch chan *Envelope) error { + for x := range ch { + r := new(Msg) + // Compress? + r.SetReply(q) + r.Authoritative = true + // assume it fits TODO(miek): fix + r.Answer = append(r.Answer, x.RR...) + if tsig := q.IsTsig(); tsig != nil && w.TsigStatus() == nil { + r.SetTsig(tsig.Hdr.Name, tsig.Algorithm, tsig.Fudge, time.Now().Unix()) + } + if err := w.WriteMsg(r); err != nil { + return err + } + w.TsigTimersOnly(true) + } + return nil +} + +// ReadMsg reads a message from the transfer connection t. +func (t *Transfer) ReadMsg() (*Msg, error) { + m := new(Msg) + p := make([]byte, MaxMsgSize) + n, err := t.Read(p) + if err != nil && n == 0 { + return nil, err + } + p = p[:n] + if err := m.Unpack(p); err != nil { + return nil, err + } + if ts := m.IsTsig(); ts != nil && t.TsigSecret != nil { + if _, ok := t.TsigSecret[ts.Hdr.Name]; !ok { + return m, ErrSecret + } + // Need to work on the original message p, as that was used to calculate the tsig. + err = TsigVerify(p, t.TsigSecret[ts.Hdr.Name], t.tsigRequestMAC, t.tsigTimersOnly) + t.tsigRequestMAC = ts.MAC + } + return m, err +} + +// WriteMsg writes a message through the transfer connection t. +func (t *Transfer) WriteMsg(m *Msg) (err error) { + var out []byte + if ts := m.IsTsig(); ts != nil && t.TsigSecret != nil { + if _, ok := t.TsigSecret[ts.Hdr.Name]; !ok { + return ErrSecret + } + out, t.tsigRequestMAC, err = TsigGenerate(m, t.TsigSecret[ts.Hdr.Name], t.tsigRequestMAC, t.tsigTimersOnly) + } else { + out, err = m.Pack() + } + if err != nil { + return err + } + _, err = t.Write(out) + return err +} + +func isSOAFirst(in *Msg) bool { + return len(in.Answer) > 0 && + in.Answer[0].Header().Rrtype == TypeSOA +} + +func isSOALast(in *Msg) bool { + return len(in.Answer) > 0 && + in.Answer[len(in.Answer)-1].Header().Rrtype == TypeSOA +} + +const errXFR = "bad xfr rcode: %d" diff --git a/vendor/github.com/miekg/dns/zduplicate.go b/vendor/github.com/miekg/dns/zduplicate.go new file mode 100644 index 00000000..a58a8c0c --- /dev/null +++ b/vendor/github.com/miekg/dns/zduplicate.go @@ -0,0 +1,1157 @@ +// Code generated by "go run duplicate_generate.go"; DO NOT EDIT. + +package dns + +// isDuplicate() functions + +func (r1 *A) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*A) + if !ok { + return false + } + _ = r2 + if !r1.A.Equal(r2.A) { + return false + } + return true +} + +func (r1 *AAAA) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*AAAA) + if !ok { + return false + } + _ = r2 + if !r1.AAAA.Equal(r2.AAAA) { + return false + } + return true +} + +func (r1 *AFSDB) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*AFSDB) + if !ok { + return false + } + _ = r2 + if r1.Subtype != r2.Subtype { + return false + } + if !isDuplicateName(r1.Hostname, r2.Hostname) { + return false + } + return true +} + +func (r1 *ANY) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*ANY) + if !ok { + return false + } + _ = r2 + return true +} + +func (r1 *APL) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*APL) + if !ok { + return false + } + _ = r2 + if len(r1.Prefixes) != len(r2.Prefixes) { + return false + } + for i := 0; i < len(r1.Prefixes); i++ { + if !r1.Prefixes[i].equals(&r2.Prefixes[i]) { + return false + } + } + return true +} + +func (r1 *AVC) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*AVC) + if !ok { + return false + } + _ = r2 + if len(r1.Txt) != len(r2.Txt) { + return false + } + for i := 0; i < len(r1.Txt); i++ { + if r1.Txt[i] != r2.Txt[i] { + return false + } + } + return true +} + +func (r1 *CAA) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*CAA) + if !ok { + return false + } + _ = r2 + if r1.Flag != r2.Flag { + return false + } + if r1.Tag != r2.Tag { + return false + } + if r1.Value != r2.Value { + return false + } + return true +} + +func (r1 *CERT) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*CERT) + if !ok { + return false + } + _ = r2 + if r1.Type != r2.Type { + return false + } + if r1.KeyTag != r2.KeyTag { + return false + } + if r1.Algorithm != r2.Algorithm { + return false + } + if r1.Certificate != r2.Certificate { + return false + } + return true +} + +func (r1 *CNAME) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*CNAME) + if !ok { + return false + } + _ = r2 + if !isDuplicateName(r1.Target, r2.Target) { + return false + } + return true +} + +func (r1 *CSYNC) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*CSYNC) + if !ok { + return false + } + _ = r2 + if r1.Serial != r2.Serial { + return false + } + if r1.Flags != r2.Flags { + return false + } + if len(r1.TypeBitMap) != len(r2.TypeBitMap) { + return false + } + for i := 0; i < len(r1.TypeBitMap); i++ { + if r1.TypeBitMap[i] != r2.TypeBitMap[i] { + return false + } + } + return true +} + +func (r1 *DHCID) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*DHCID) + if !ok { + return false + } + _ = r2 + if r1.Digest != r2.Digest { + return false + } + return true +} + +func (r1 *DNAME) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*DNAME) + if !ok { + return false + } + _ = r2 + if !isDuplicateName(r1.Target, r2.Target) { + return false + } + return true +} + +func (r1 *DNSKEY) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*DNSKEY) + if !ok { + return false + } + _ = r2 + if r1.Flags != r2.Flags { + return false + } + if r1.Protocol != r2.Protocol { + return false + } + if r1.Algorithm != r2.Algorithm { + return false + } + if r1.PublicKey != r2.PublicKey { + return false + } + return true +} + +func (r1 *DS) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*DS) + if !ok { + return false + } + _ = r2 + if r1.KeyTag != r2.KeyTag { + return false + } + if r1.Algorithm != r2.Algorithm { + return false + } + if r1.DigestType != r2.DigestType { + return false + } + if r1.Digest != r2.Digest { + return false + } + return true +} + +func (r1 *EID) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*EID) + if !ok { + return false + } + _ = r2 + if r1.Endpoint != r2.Endpoint { + return false + } + return true +} + +func (r1 *EUI48) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*EUI48) + if !ok { + return false + } + _ = r2 + if r1.Address != r2.Address { + return false + } + return true +} + +func (r1 *EUI64) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*EUI64) + if !ok { + return false + } + _ = r2 + if r1.Address != r2.Address { + return false + } + return true +} + +func (r1 *GID) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*GID) + if !ok { + return false + } + _ = r2 + if r1.Gid != r2.Gid { + return false + } + return true +} + +func (r1 *GPOS) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*GPOS) + if !ok { + return false + } + _ = r2 + if r1.Longitude != r2.Longitude { + return false + } + if r1.Latitude != r2.Latitude { + return false + } + if r1.Altitude != r2.Altitude { + return false + } + return true +} + +func (r1 *HINFO) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*HINFO) + if !ok { + return false + } + _ = r2 + if r1.Cpu != r2.Cpu { + return false + } + if r1.Os != r2.Os { + return false + } + return true +} + +func (r1 *HIP) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*HIP) + if !ok { + return false + } + _ = r2 + if r1.HitLength != r2.HitLength { + return false + } + if r1.PublicKeyAlgorithm != r2.PublicKeyAlgorithm { + return false + } + if r1.PublicKeyLength != r2.PublicKeyLength { + return false + } + if r1.Hit != r2.Hit { + return false + } + if r1.PublicKey != r2.PublicKey { + return false + } + if len(r1.RendezvousServers) != len(r2.RendezvousServers) { + return false + } + for i := 0; i < len(r1.RendezvousServers); i++ { + if !isDuplicateName(r1.RendezvousServers[i], r2.RendezvousServers[i]) { + return false + } + } + return true +} + +func (r1 *KX) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*KX) + if !ok { + return false + } + _ = r2 + if r1.Preference != r2.Preference { + return false + } + if !isDuplicateName(r1.Exchanger, r2.Exchanger) { + return false + } + return true +} + +func (r1 *L32) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*L32) + if !ok { + return false + } + _ = r2 + if r1.Preference != r2.Preference { + return false + } + if !r1.Locator32.Equal(r2.Locator32) { + return false + } + return true +} + +func (r1 *L64) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*L64) + if !ok { + return false + } + _ = r2 + if r1.Preference != r2.Preference { + return false + } + if r1.Locator64 != r2.Locator64 { + return false + } + return true +} + +func (r1 *LOC) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*LOC) + if !ok { + return false + } + _ = r2 + if r1.Version != r2.Version { + return false + } + if r1.Size != r2.Size { + return false + } + if r1.HorizPre != r2.HorizPre { + return false + } + if r1.VertPre != r2.VertPre { + return false + } + if r1.Latitude != r2.Latitude { + return false + } + if r1.Longitude != r2.Longitude { + return false + } + if r1.Altitude != r2.Altitude { + return false + } + return true +} + +func (r1 *LP) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*LP) + if !ok { + return false + } + _ = r2 + if r1.Preference != r2.Preference { + return false + } + if !isDuplicateName(r1.Fqdn, r2.Fqdn) { + return false + } + return true +} + +func (r1 *MB) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*MB) + if !ok { + return false + } + _ = r2 + if !isDuplicateName(r1.Mb, r2.Mb) { + return false + } + return true +} + +func (r1 *MD) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*MD) + if !ok { + return false + } + _ = r2 + if !isDuplicateName(r1.Md, r2.Md) { + return false + } + return true +} + +func (r1 *MF) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*MF) + if !ok { + return false + } + _ = r2 + if !isDuplicateName(r1.Mf, r2.Mf) { + return false + } + return true +} + +func (r1 *MG) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*MG) + if !ok { + return false + } + _ = r2 + if !isDuplicateName(r1.Mg, r2.Mg) { + return false + } + return true +} + +func (r1 *MINFO) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*MINFO) + if !ok { + return false + } + _ = r2 + if !isDuplicateName(r1.Rmail, r2.Rmail) { + return false + } + if !isDuplicateName(r1.Email, r2.Email) { + return false + } + return true +} + +func (r1 *MR) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*MR) + if !ok { + return false + } + _ = r2 + if !isDuplicateName(r1.Mr, r2.Mr) { + return false + } + return true +} + +func (r1 *MX) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*MX) + if !ok { + return false + } + _ = r2 + if r1.Preference != r2.Preference { + return false + } + if !isDuplicateName(r1.Mx, r2.Mx) { + return false + } + return true +} + +func (r1 *NAPTR) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*NAPTR) + if !ok { + return false + } + _ = r2 + if r1.Order != r2.Order { + return false + } + if r1.Preference != r2.Preference { + return false + } + if r1.Flags != r2.Flags { + return false + } + if r1.Service != r2.Service { + return false + } + if r1.Regexp != r2.Regexp { + return false + } + if !isDuplicateName(r1.Replacement, r2.Replacement) { + return false + } + return true +} + +func (r1 *NID) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*NID) + if !ok { + return false + } + _ = r2 + if r1.Preference != r2.Preference { + return false + } + if r1.NodeID != r2.NodeID { + return false + } + return true +} + +func (r1 *NIMLOC) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*NIMLOC) + if !ok { + return false + } + _ = r2 + if r1.Locator != r2.Locator { + return false + } + return true +} + +func (r1 *NINFO) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*NINFO) + if !ok { + return false + } + _ = r2 + if len(r1.ZSData) != len(r2.ZSData) { + return false + } + for i := 0; i < len(r1.ZSData); i++ { + if r1.ZSData[i] != r2.ZSData[i] { + return false + } + } + return true +} + +func (r1 *NS) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*NS) + if !ok { + return false + } + _ = r2 + if !isDuplicateName(r1.Ns, r2.Ns) { + return false + } + return true +} + +func (r1 *NSAPPTR) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*NSAPPTR) + if !ok { + return false + } + _ = r2 + if !isDuplicateName(r1.Ptr, r2.Ptr) { + return false + } + return true +} + +func (r1 *NSEC) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*NSEC) + if !ok { + return false + } + _ = r2 + if !isDuplicateName(r1.NextDomain, r2.NextDomain) { + return false + } + if len(r1.TypeBitMap) != len(r2.TypeBitMap) { + return false + } + for i := 0; i < len(r1.TypeBitMap); i++ { + if r1.TypeBitMap[i] != r2.TypeBitMap[i] { + return false + } + } + return true +} + +func (r1 *NSEC3) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*NSEC3) + if !ok { + return false + } + _ = r2 + if r1.Hash != r2.Hash { + return false + } + if r1.Flags != r2.Flags { + return false + } + if r1.Iterations != r2.Iterations { + return false + } + if r1.SaltLength != r2.SaltLength { + return false + } + if r1.Salt != r2.Salt { + return false + } + if r1.HashLength != r2.HashLength { + return false + } + if r1.NextDomain != r2.NextDomain { + return false + } + if len(r1.TypeBitMap) != len(r2.TypeBitMap) { + return false + } + for i := 0; i < len(r1.TypeBitMap); i++ { + if r1.TypeBitMap[i] != r2.TypeBitMap[i] { + return false + } + } + return true +} + +func (r1 *NSEC3PARAM) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*NSEC3PARAM) + if !ok { + return false + } + _ = r2 + if r1.Hash != r2.Hash { + return false + } + if r1.Flags != r2.Flags { + return false + } + if r1.Iterations != r2.Iterations { + return false + } + if r1.SaltLength != r2.SaltLength { + return false + } + if r1.Salt != r2.Salt { + return false + } + return true +} + +func (r1 *NULL) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*NULL) + if !ok { + return false + } + _ = r2 + if r1.Data != r2.Data { + return false + } + return true +} + +func (r1 *OPENPGPKEY) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*OPENPGPKEY) + if !ok { + return false + } + _ = r2 + if r1.PublicKey != r2.PublicKey { + return false + } + return true +} + +func (r1 *PTR) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*PTR) + if !ok { + return false + } + _ = r2 + if !isDuplicateName(r1.Ptr, r2.Ptr) { + return false + } + return true +} + +func (r1 *PX) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*PX) + if !ok { + return false + } + _ = r2 + if r1.Preference != r2.Preference { + return false + } + if !isDuplicateName(r1.Map822, r2.Map822) { + return false + } + if !isDuplicateName(r1.Mapx400, r2.Mapx400) { + return false + } + return true +} + +func (r1 *RFC3597) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*RFC3597) + if !ok { + return false + } + _ = r2 + if r1.Rdata != r2.Rdata { + return false + } + return true +} + +func (r1 *RKEY) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*RKEY) + if !ok { + return false + } + _ = r2 + if r1.Flags != r2.Flags { + return false + } + if r1.Protocol != r2.Protocol { + return false + } + if r1.Algorithm != r2.Algorithm { + return false + } + if r1.PublicKey != r2.PublicKey { + return false + } + return true +} + +func (r1 *RP) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*RP) + if !ok { + return false + } + _ = r2 + if !isDuplicateName(r1.Mbox, r2.Mbox) { + return false + } + if !isDuplicateName(r1.Txt, r2.Txt) { + return false + } + return true +} + +func (r1 *RRSIG) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*RRSIG) + if !ok { + return false + } + _ = r2 + if r1.TypeCovered != r2.TypeCovered { + return false + } + if r1.Algorithm != r2.Algorithm { + return false + } + if r1.Labels != r2.Labels { + return false + } + if r1.OrigTtl != r2.OrigTtl { + return false + } + if r1.Expiration != r2.Expiration { + return false + } + if r1.Inception != r2.Inception { + return false + } + if r1.KeyTag != r2.KeyTag { + return false + } + if !isDuplicateName(r1.SignerName, r2.SignerName) { + return false + } + if r1.Signature != r2.Signature { + return false + } + return true +} + +func (r1 *RT) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*RT) + if !ok { + return false + } + _ = r2 + if r1.Preference != r2.Preference { + return false + } + if !isDuplicateName(r1.Host, r2.Host) { + return false + } + return true +} + +func (r1 *SMIMEA) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*SMIMEA) + if !ok { + return false + } + _ = r2 + if r1.Usage != r2.Usage { + return false + } + if r1.Selector != r2.Selector { + return false + } + if r1.MatchingType != r2.MatchingType { + return false + } + if r1.Certificate != r2.Certificate { + return false + } + return true +} + +func (r1 *SOA) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*SOA) + if !ok { + return false + } + _ = r2 + if !isDuplicateName(r1.Ns, r2.Ns) { + return false + } + if !isDuplicateName(r1.Mbox, r2.Mbox) { + return false + } + if r1.Serial != r2.Serial { + return false + } + if r1.Refresh != r2.Refresh { + return false + } + if r1.Retry != r2.Retry { + return false + } + if r1.Expire != r2.Expire { + return false + } + if r1.Minttl != r2.Minttl { + return false + } + return true +} + +func (r1 *SPF) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*SPF) + if !ok { + return false + } + _ = r2 + if len(r1.Txt) != len(r2.Txt) { + return false + } + for i := 0; i < len(r1.Txt); i++ { + if r1.Txt[i] != r2.Txt[i] { + return false + } + } + return true +} + +func (r1 *SRV) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*SRV) + if !ok { + return false + } + _ = r2 + if r1.Priority != r2.Priority { + return false + } + if r1.Weight != r2.Weight { + return false + } + if r1.Port != r2.Port { + return false + } + if !isDuplicateName(r1.Target, r2.Target) { + return false + } + return true +} + +func (r1 *SSHFP) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*SSHFP) + if !ok { + return false + } + _ = r2 + if r1.Algorithm != r2.Algorithm { + return false + } + if r1.Type != r2.Type { + return false + } + if r1.FingerPrint != r2.FingerPrint { + return false + } + return true +} + +func (r1 *TA) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*TA) + if !ok { + return false + } + _ = r2 + if r1.KeyTag != r2.KeyTag { + return false + } + if r1.Algorithm != r2.Algorithm { + return false + } + if r1.DigestType != r2.DigestType { + return false + } + if r1.Digest != r2.Digest { + return false + } + return true +} + +func (r1 *TALINK) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*TALINK) + if !ok { + return false + } + _ = r2 + if !isDuplicateName(r1.PreviousName, r2.PreviousName) { + return false + } + if !isDuplicateName(r1.NextName, r2.NextName) { + return false + } + return true +} + +func (r1 *TKEY) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*TKEY) + if !ok { + return false + } + _ = r2 + if !isDuplicateName(r1.Algorithm, r2.Algorithm) { + return false + } + if r1.Inception != r2.Inception { + return false + } + if r1.Expiration != r2.Expiration { + return false + } + if r1.Mode != r2.Mode { + return false + } + if r1.Error != r2.Error { + return false + } + if r1.KeySize != r2.KeySize { + return false + } + if r1.Key != r2.Key { + return false + } + if r1.OtherLen != r2.OtherLen { + return false + } + if r1.OtherData != r2.OtherData { + return false + } + return true +} + +func (r1 *TLSA) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*TLSA) + if !ok { + return false + } + _ = r2 + if r1.Usage != r2.Usage { + return false + } + if r1.Selector != r2.Selector { + return false + } + if r1.MatchingType != r2.MatchingType { + return false + } + if r1.Certificate != r2.Certificate { + return false + } + return true +} + +func (r1 *TSIG) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*TSIG) + if !ok { + return false + } + _ = r2 + if !isDuplicateName(r1.Algorithm, r2.Algorithm) { + return false + } + if r1.TimeSigned != r2.TimeSigned { + return false + } + if r1.Fudge != r2.Fudge { + return false + } + if r1.MACSize != r2.MACSize { + return false + } + if r1.MAC != r2.MAC { + return false + } + if r1.OrigId != r2.OrigId { + return false + } + if r1.Error != r2.Error { + return false + } + if r1.OtherLen != r2.OtherLen { + return false + } + if r1.OtherData != r2.OtherData { + return false + } + return true +} + +func (r1 *TXT) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*TXT) + if !ok { + return false + } + _ = r2 + if len(r1.Txt) != len(r2.Txt) { + return false + } + for i := 0; i < len(r1.Txt); i++ { + if r1.Txt[i] != r2.Txt[i] { + return false + } + } + return true +} + +func (r1 *UID) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*UID) + if !ok { + return false + } + _ = r2 + if r1.Uid != r2.Uid { + return false + } + return true +} + +func (r1 *UINFO) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*UINFO) + if !ok { + return false + } + _ = r2 + if r1.Uinfo != r2.Uinfo { + return false + } + return true +} + +func (r1 *URI) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*URI) + if !ok { + return false + } + _ = r2 + if r1.Priority != r2.Priority { + return false + } + if r1.Weight != r2.Weight { + return false + } + if r1.Target != r2.Target { + return false + } + return true +} + +func (r1 *X25) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*X25) + if !ok { + return false + } + _ = r2 + if r1.PSDNAddress != r2.PSDNAddress { + return false + } + return true +} diff --git a/vendor/github.com/miekg/dns/zmsg.go b/vendor/github.com/miekg/dns/zmsg.go new file mode 100644 index 00000000..02a5dfa4 --- /dev/null +++ b/vendor/github.com/miekg/dns/zmsg.go @@ -0,0 +1,2741 @@ +// Code generated by "go run msg_generate.go"; DO NOT EDIT. + +package dns + +// pack*() functions + +func (rr *A) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packDataA(rr.A, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *AAAA) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packDataAAAA(rr.AAAA, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *AFSDB) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packUint16(rr.Subtype, msg, off) + if err != nil { + return off, err + } + off, err = packDomainName(rr.Hostname, msg, off, compression, false) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *ANY) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + return off, nil +} + +func (rr *APL) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packDataApl(rr.Prefixes, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *AVC) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packStringTxt(rr.Txt, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *CAA) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packUint8(rr.Flag, msg, off) + if err != nil { + return off, err + } + off, err = packString(rr.Tag, msg, off) + if err != nil { + return off, err + } + off, err = packStringOctet(rr.Value, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *CDNSKEY) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packUint16(rr.Flags, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Protocol, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packStringBase64(rr.PublicKey, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *CDS) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packUint16(rr.KeyTag, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.DigestType, msg, off) + if err != nil { + return off, err + } + off, err = packStringHex(rr.Digest, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *CERT) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packUint16(rr.Type, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.KeyTag, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packStringBase64(rr.Certificate, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *CNAME) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packDomainName(rr.Target, msg, off, compression, compress) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *CSYNC) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packUint32(rr.Serial, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.Flags, msg, off) + if err != nil { + return off, err + } + off, err = packDataNsec(rr.TypeBitMap, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *DHCID) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packStringBase64(rr.Digest, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *DLV) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packUint16(rr.KeyTag, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.DigestType, msg, off) + if err != nil { + return off, err + } + off, err = packStringHex(rr.Digest, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *DNAME) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packDomainName(rr.Target, msg, off, compression, false) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *DNSKEY) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packUint16(rr.Flags, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Protocol, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packStringBase64(rr.PublicKey, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *DS) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packUint16(rr.KeyTag, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.DigestType, msg, off) + if err != nil { + return off, err + } + off, err = packStringHex(rr.Digest, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *EID) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packStringHex(rr.Endpoint, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *EUI48) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packUint48(rr.Address, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *EUI64) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packUint64(rr.Address, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *GID) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packUint32(rr.Gid, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *GPOS) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packString(rr.Longitude, msg, off) + if err != nil { + return off, err + } + off, err = packString(rr.Latitude, msg, off) + if err != nil { + return off, err + } + off, err = packString(rr.Altitude, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *HINFO) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packString(rr.Cpu, msg, off) + if err != nil { + return off, err + } + off, err = packString(rr.Os, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *HIP) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packUint8(rr.HitLength, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.PublicKeyAlgorithm, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.PublicKeyLength, msg, off) + if err != nil { + return off, err + } + off, err = packStringHex(rr.Hit, msg, off) + if err != nil { + return off, err + } + off, err = packStringBase64(rr.PublicKey, msg, off) + if err != nil { + return off, err + } + off, err = packDataDomainNames(rr.RendezvousServers, msg, off, compression, false) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *KEY) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packUint16(rr.Flags, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Protocol, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packStringBase64(rr.PublicKey, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *KX) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packUint16(rr.Preference, msg, off) + if err != nil { + return off, err + } + off, err = packDomainName(rr.Exchanger, msg, off, compression, false) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *L32) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packUint16(rr.Preference, msg, off) + if err != nil { + return off, err + } + off, err = packDataA(rr.Locator32, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *L64) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packUint16(rr.Preference, msg, off) + if err != nil { + return off, err + } + off, err = packUint64(rr.Locator64, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *LOC) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packUint8(rr.Version, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Size, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.HorizPre, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.VertPre, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.Latitude, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.Longitude, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.Altitude, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *LP) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packUint16(rr.Preference, msg, off) + if err != nil { + return off, err + } + off, err = packDomainName(rr.Fqdn, msg, off, compression, false) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *MB) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packDomainName(rr.Mb, msg, off, compression, compress) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *MD) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packDomainName(rr.Md, msg, off, compression, compress) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *MF) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packDomainName(rr.Mf, msg, off, compression, compress) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *MG) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packDomainName(rr.Mg, msg, off, compression, compress) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *MINFO) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packDomainName(rr.Rmail, msg, off, compression, compress) + if err != nil { + return off, err + } + off, err = packDomainName(rr.Email, msg, off, compression, compress) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *MR) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packDomainName(rr.Mr, msg, off, compression, compress) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *MX) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packUint16(rr.Preference, msg, off) + if err != nil { + return off, err + } + off, err = packDomainName(rr.Mx, msg, off, compression, compress) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *NAPTR) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packUint16(rr.Order, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.Preference, msg, off) + if err != nil { + return off, err + } + off, err = packString(rr.Flags, msg, off) + if err != nil { + return off, err + } + off, err = packString(rr.Service, msg, off) + if err != nil { + return off, err + } + off, err = packString(rr.Regexp, msg, off) + if err != nil { + return off, err + } + off, err = packDomainName(rr.Replacement, msg, off, compression, false) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *NID) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packUint16(rr.Preference, msg, off) + if err != nil { + return off, err + } + off, err = packUint64(rr.NodeID, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *NIMLOC) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packStringHex(rr.Locator, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *NINFO) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packStringTxt(rr.ZSData, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *NS) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packDomainName(rr.Ns, msg, off, compression, compress) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *NSAPPTR) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packDomainName(rr.Ptr, msg, off, compression, false) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *NSEC) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packDomainName(rr.NextDomain, msg, off, compression, false) + if err != nil { + return off, err + } + off, err = packDataNsec(rr.TypeBitMap, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *NSEC3) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packUint8(rr.Hash, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Flags, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.Iterations, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.SaltLength, msg, off) + if err != nil { + return off, err + } + // Only pack salt if value is not "-", i.e. empty + if rr.Salt != "-" { + off, err = packStringHex(rr.Salt, msg, off) + if err != nil { + return off, err + } + } + off, err = packUint8(rr.HashLength, msg, off) + if err != nil { + return off, err + } + off, err = packStringBase32(rr.NextDomain, msg, off) + if err != nil { + return off, err + } + off, err = packDataNsec(rr.TypeBitMap, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *NSEC3PARAM) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packUint8(rr.Hash, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Flags, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.Iterations, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.SaltLength, msg, off) + if err != nil { + return off, err + } + // Only pack salt if value is not "-", i.e. empty + if rr.Salt != "-" { + off, err = packStringHex(rr.Salt, msg, off) + if err != nil { + return off, err + } + } + return off, nil +} + +func (rr *NULL) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packStringAny(rr.Data, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *OPENPGPKEY) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packStringBase64(rr.PublicKey, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *OPT) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packDataOpt(rr.Option, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *PTR) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packDomainName(rr.Ptr, msg, off, compression, compress) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *PX) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packUint16(rr.Preference, msg, off) + if err != nil { + return off, err + } + off, err = packDomainName(rr.Map822, msg, off, compression, false) + if err != nil { + return off, err + } + off, err = packDomainName(rr.Mapx400, msg, off, compression, false) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *RFC3597) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packStringHex(rr.Rdata, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *RKEY) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packUint16(rr.Flags, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Protocol, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packStringBase64(rr.PublicKey, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *RP) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packDomainName(rr.Mbox, msg, off, compression, false) + if err != nil { + return off, err + } + off, err = packDomainName(rr.Txt, msg, off, compression, false) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *RRSIG) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packUint16(rr.TypeCovered, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Labels, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.OrigTtl, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.Expiration, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.Inception, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.KeyTag, msg, off) + if err != nil { + return off, err + } + off, err = packDomainName(rr.SignerName, msg, off, compression, false) + if err != nil { + return off, err + } + off, err = packStringBase64(rr.Signature, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *RT) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packUint16(rr.Preference, msg, off) + if err != nil { + return off, err + } + off, err = packDomainName(rr.Host, msg, off, compression, false) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *SIG) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packUint16(rr.TypeCovered, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Labels, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.OrigTtl, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.Expiration, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.Inception, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.KeyTag, msg, off) + if err != nil { + return off, err + } + off, err = packDomainName(rr.SignerName, msg, off, compression, false) + if err != nil { + return off, err + } + off, err = packStringBase64(rr.Signature, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *SMIMEA) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packUint8(rr.Usage, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Selector, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.MatchingType, msg, off) + if err != nil { + return off, err + } + off, err = packStringHex(rr.Certificate, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *SOA) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packDomainName(rr.Ns, msg, off, compression, compress) + if err != nil { + return off, err + } + off, err = packDomainName(rr.Mbox, msg, off, compression, compress) + if err != nil { + return off, err + } + off, err = packUint32(rr.Serial, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.Refresh, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.Retry, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.Expire, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.Minttl, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *SPF) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packStringTxt(rr.Txt, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *SRV) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packUint16(rr.Priority, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.Weight, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.Port, msg, off) + if err != nil { + return off, err + } + off, err = packDomainName(rr.Target, msg, off, compression, false) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *SSHFP) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Type, msg, off) + if err != nil { + return off, err + } + off, err = packStringHex(rr.FingerPrint, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *TA) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packUint16(rr.KeyTag, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.DigestType, msg, off) + if err != nil { + return off, err + } + off, err = packStringHex(rr.Digest, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *TALINK) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packDomainName(rr.PreviousName, msg, off, compression, false) + if err != nil { + return off, err + } + off, err = packDomainName(rr.NextName, msg, off, compression, false) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *TKEY) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packDomainName(rr.Algorithm, msg, off, compression, false) + if err != nil { + return off, err + } + off, err = packUint32(rr.Inception, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.Expiration, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.Mode, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.Error, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.KeySize, msg, off) + if err != nil { + return off, err + } + off, err = packStringHex(rr.Key, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.OtherLen, msg, off) + if err != nil { + return off, err + } + off, err = packStringHex(rr.OtherData, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *TLSA) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packUint8(rr.Usage, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Selector, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.MatchingType, msg, off) + if err != nil { + return off, err + } + off, err = packStringHex(rr.Certificate, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *TSIG) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packDomainName(rr.Algorithm, msg, off, compression, false) + if err != nil { + return off, err + } + off, err = packUint48(rr.TimeSigned, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.Fudge, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.MACSize, msg, off) + if err != nil { + return off, err + } + off, err = packStringHex(rr.MAC, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.OrigId, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.Error, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.OtherLen, msg, off) + if err != nil { + return off, err + } + off, err = packStringHex(rr.OtherData, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *TXT) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packStringTxt(rr.Txt, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *UID) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packUint32(rr.Uid, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *UINFO) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packString(rr.Uinfo, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *URI) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packUint16(rr.Priority, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.Weight, msg, off) + if err != nil { + return off, err + } + off, err = packStringOctet(rr.Target, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *X25) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packString(rr.PSDNAddress, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +// unpack*() functions + +func (rr *A) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.A, off, err = unpackDataA(msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *AAAA) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.AAAA, off, err = unpackDataAAAA(msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *AFSDB) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Subtype, off, err = unpackUint16(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Hostname, off, err = UnpackDomainName(msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *ANY) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + return off, nil +} + +func (rr *APL) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Prefixes, off, err = unpackDataApl(msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *AVC) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Txt, off, err = unpackStringTxt(msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *CAA) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Flag, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Tag, off, err = unpackString(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Value, off, err = unpackStringOctet(msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *CDNSKEY) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Flags, off, err = unpackUint16(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Protocol, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.PublicKey, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *CDS) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.KeyTag, off, err = unpackUint16(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.DigestType, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Digest, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *CERT) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Type, off, err = unpackUint16(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.KeyTag, off, err = unpackUint16(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Certificate, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *CNAME) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Target, off, err = UnpackDomainName(msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *CSYNC) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Serial, off, err = unpackUint32(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Flags, off, err = unpackUint16(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.TypeBitMap, off, err = unpackDataNsec(msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *DHCID) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Digest, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *DLV) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.KeyTag, off, err = unpackUint16(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.DigestType, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Digest, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *DNAME) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Target, off, err = UnpackDomainName(msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *DNSKEY) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Flags, off, err = unpackUint16(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Protocol, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.PublicKey, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *DS) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.KeyTag, off, err = unpackUint16(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.DigestType, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Digest, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *EID) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Endpoint, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *EUI48) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Address, off, err = unpackUint48(msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *EUI64) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Address, off, err = unpackUint64(msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *GID) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Gid, off, err = unpackUint32(msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *GPOS) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Longitude, off, err = unpackString(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Latitude, off, err = unpackString(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Altitude, off, err = unpackString(msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *HINFO) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Cpu, off, err = unpackString(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Os, off, err = unpackString(msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *HIP) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.HitLength, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.PublicKeyAlgorithm, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.PublicKeyLength, off, err = unpackUint16(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Hit, off, err = unpackStringHex(msg, off, off+int(rr.HitLength)) + if err != nil { + return off, err + } + rr.PublicKey, off, err = unpackStringBase64(msg, off, off+int(rr.PublicKeyLength)) + if err != nil { + return off, err + } + rr.RendezvousServers, off, err = unpackDataDomainNames(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *KEY) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Flags, off, err = unpackUint16(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Protocol, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.PublicKey, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *KX) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Preference, off, err = unpackUint16(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Exchanger, off, err = UnpackDomainName(msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *L32) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Preference, off, err = unpackUint16(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Locator32, off, err = unpackDataA(msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *L64) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Preference, off, err = unpackUint16(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Locator64, off, err = unpackUint64(msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *LOC) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Version, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Size, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.HorizPre, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.VertPre, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Latitude, off, err = unpackUint32(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Longitude, off, err = unpackUint32(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Altitude, off, err = unpackUint32(msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *LP) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Preference, off, err = unpackUint16(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Fqdn, off, err = UnpackDomainName(msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *MB) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Mb, off, err = UnpackDomainName(msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *MD) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Md, off, err = UnpackDomainName(msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *MF) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Mf, off, err = UnpackDomainName(msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *MG) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Mg, off, err = UnpackDomainName(msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *MINFO) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Rmail, off, err = UnpackDomainName(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Email, off, err = UnpackDomainName(msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *MR) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Mr, off, err = UnpackDomainName(msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *MX) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Preference, off, err = unpackUint16(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Mx, off, err = UnpackDomainName(msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *NAPTR) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Order, off, err = unpackUint16(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Preference, off, err = unpackUint16(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Flags, off, err = unpackString(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Service, off, err = unpackString(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Regexp, off, err = unpackString(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Replacement, off, err = UnpackDomainName(msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *NID) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Preference, off, err = unpackUint16(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.NodeID, off, err = unpackUint64(msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *NIMLOC) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Locator, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *NINFO) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.ZSData, off, err = unpackStringTxt(msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *NS) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Ns, off, err = UnpackDomainName(msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *NSAPPTR) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Ptr, off, err = UnpackDomainName(msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *NSEC) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.NextDomain, off, err = UnpackDomainName(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.TypeBitMap, off, err = unpackDataNsec(msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *NSEC3) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Hash, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Flags, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Iterations, off, err = unpackUint16(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.SaltLength, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Salt, off, err = unpackStringHex(msg, off, off+int(rr.SaltLength)) + if err != nil { + return off, err + } + rr.HashLength, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.NextDomain, off, err = unpackStringBase32(msg, off, off+int(rr.HashLength)) + if err != nil { + return off, err + } + rr.TypeBitMap, off, err = unpackDataNsec(msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *NSEC3PARAM) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Hash, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Flags, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Iterations, off, err = unpackUint16(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.SaltLength, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Salt, off, err = unpackStringHex(msg, off, off+int(rr.SaltLength)) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *NULL) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Data, off, err = unpackStringAny(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *OPENPGPKEY) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.PublicKey, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *OPT) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Option, off, err = unpackDataOpt(msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *PTR) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Ptr, off, err = UnpackDomainName(msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *PX) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Preference, off, err = unpackUint16(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Map822, off, err = UnpackDomainName(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Mapx400, off, err = UnpackDomainName(msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *RFC3597) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Rdata, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *RKEY) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Flags, off, err = unpackUint16(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Protocol, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.PublicKey, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *RP) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Mbox, off, err = UnpackDomainName(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Txt, off, err = UnpackDomainName(msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *RRSIG) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.TypeCovered, off, err = unpackUint16(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Labels, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.OrigTtl, off, err = unpackUint32(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Expiration, off, err = unpackUint32(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Inception, off, err = unpackUint32(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.KeyTag, off, err = unpackUint16(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.SignerName, off, err = UnpackDomainName(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Signature, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *RT) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Preference, off, err = unpackUint16(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Host, off, err = UnpackDomainName(msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *SIG) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.TypeCovered, off, err = unpackUint16(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Labels, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.OrigTtl, off, err = unpackUint32(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Expiration, off, err = unpackUint32(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Inception, off, err = unpackUint32(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.KeyTag, off, err = unpackUint16(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.SignerName, off, err = UnpackDomainName(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Signature, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *SMIMEA) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Usage, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Selector, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.MatchingType, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Certificate, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *SOA) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Ns, off, err = UnpackDomainName(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Mbox, off, err = UnpackDomainName(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Serial, off, err = unpackUint32(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Refresh, off, err = unpackUint32(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Retry, off, err = unpackUint32(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Expire, off, err = unpackUint32(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Minttl, off, err = unpackUint32(msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *SPF) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Txt, off, err = unpackStringTxt(msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *SRV) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Priority, off, err = unpackUint16(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Weight, off, err = unpackUint16(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Port, off, err = unpackUint16(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Target, off, err = UnpackDomainName(msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *SSHFP) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Type, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.FingerPrint, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *TA) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.KeyTag, off, err = unpackUint16(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.DigestType, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Digest, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *TALINK) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.PreviousName, off, err = UnpackDomainName(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.NextName, off, err = UnpackDomainName(msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *TKEY) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Algorithm, off, err = UnpackDomainName(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Inception, off, err = unpackUint32(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Expiration, off, err = unpackUint32(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Mode, off, err = unpackUint16(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Error, off, err = unpackUint16(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.KeySize, off, err = unpackUint16(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Key, off, err = unpackStringHex(msg, off, off+int(rr.KeySize)) + if err != nil { + return off, err + } + rr.OtherLen, off, err = unpackUint16(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.OtherData, off, err = unpackStringHex(msg, off, off+int(rr.OtherLen)) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *TLSA) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Usage, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Selector, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.MatchingType, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Certificate, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *TSIG) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Algorithm, off, err = UnpackDomainName(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.TimeSigned, off, err = unpackUint48(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Fudge, off, err = unpackUint16(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.MACSize, off, err = unpackUint16(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.MAC, off, err = unpackStringHex(msg, off, off+int(rr.MACSize)) + if err != nil { + return off, err + } + rr.OrigId, off, err = unpackUint16(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Error, off, err = unpackUint16(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.OtherLen, off, err = unpackUint16(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.OtherData, off, err = unpackStringHex(msg, off, off+int(rr.OtherLen)) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *TXT) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Txt, off, err = unpackStringTxt(msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *UID) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Uid, off, err = unpackUint32(msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *UINFO) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Uinfo, off, err = unpackString(msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *URI) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Priority, off, err = unpackUint16(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Weight, off, err = unpackUint16(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Target, off, err = unpackStringOctet(msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *X25) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.PSDNAddress, off, err = unpackString(msg, off) + if err != nil { + return off, err + } + return off, nil +} diff --git a/vendor/github.com/miekg/dns/ztypes.go b/vendor/github.com/miekg/dns/ztypes.go new file mode 100644 index 00000000..1cbd6d3f --- /dev/null +++ b/vendor/github.com/miekg/dns/ztypes.go @@ -0,0 +1,898 @@ +// Code generated by "go run types_generate.go"; DO NOT EDIT. + +package dns + +import ( + "encoding/base64" + "net" +) + +// TypeToRR is a map of constructors for each RR type. +var TypeToRR = map[uint16]func() RR{ + TypeA: func() RR { return new(A) }, + TypeAAAA: func() RR { return new(AAAA) }, + TypeAFSDB: func() RR { return new(AFSDB) }, + TypeANY: func() RR { return new(ANY) }, + TypeAPL: func() RR { return new(APL) }, + TypeAVC: func() RR { return new(AVC) }, + TypeCAA: func() RR { return new(CAA) }, + TypeCDNSKEY: func() RR { return new(CDNSKEY) }, + TypeCDS: func() RR { return new(CDS) }, + TypeCERT: func() RR { return new(CERT) }, + TypeCNAME: func() RR { return new(CNAME) }, + TypeCSYNC: func() RR { return new(CSYNC) }, + TypeDHCID: func() RR { return new(DHCID) }, + TypeDLV: func() RR { return new(DLV) }, + TypeDNAME: func() RR { return new(DNAME) }, + TypeDNSKEY: func() RR { return new(DNSKEY) }, + TypeDS: func() RR { return new(DS) }, + TypeEID: func() RR { return new(EID) }, + TypeEUI48: func() RR { return new(EUI48) }, + TypeEUI64: func() RR { return new(EUI64) }, + TypeGID: func() RR { return new(GID) }, + TypeGPOS: func() RR { return new(GPOS) }, + TypeHINFO: func() RR { return new(HINFO) }, + TypeHIP: func() RR { return new(HIP) }, + TypeKEY: func() RR { return new(KEY) }, + TypeKX: func() RR { return new(KX) }, + TypeL32: func() RR { return new(L32) }, + TypeL64: func() RR { return new(L64) }, + TypeLOC: func() RR { return new(LOC) }, + TypeLP: func() RR { return new(LP) }, + TypeMB: func() RR { return new(MB) }, + TypeMD: func() RR { return new(MD) }, + TypeMF: func() RR { return new(MF) }, + TypeMG: func() RR { return new(MG) }, + TypeMINFO: func() RR { return new(MINFO) }, + TypeMR: func() RR { return new(MR) }, + TypeMX: func() RR { return new(MX) }, + TypeNAPTR: func() RR { return new(NAPTR) }, + TypeNID: func() RR { return new(NID) }, + TypeNIMLOC: func() RR { return new(NIMLOC) }, + TypeNINFO: func() RR { return new(NINFO) }, + TypeNS: func() RR { return new(NS) }, + TypeNSAPPTR: func() RR { return new(NSAPPTR) }, + TypeNSEC: func() RR { return new(NSEC) }, + TypeNSEC3: func() RR { return new(NSEC3) }, + TypeNSEC3PARAM: func() RR { return new(NSEC3PARAM) }, + TypeNULL: func() RR { return new(NULL) }, + TypeOPENPGPKEY: func() RR { return new(OPENPGPKEY) }, + TypeOPT: func() RR { return new(OPT) }, + TypePTR: func() RR { return new(PTR) }, + TypePX: func() RR { return new(PX) }, + TypeRKEY: func() RR { return new(RKEY) }, + TypeRP: func() RR { return new(RP) }, + TypeRRSIG: func() RR { return new(RRSIG) }, + TypeRT: func() RR { return new(RT) }, + TypeSIG: func() RR { return new(SIG) }, + TypeSMIMEA: func() RR { return new(SMIMEA) }, + TypeSOA: func() RR { return new(SOA) }, + TypeSPF: func() RR { return new(SPF) }, + TypeSRV: func() RR { return new(SRV) }, + TypeSSHFP: func() RR { return new(SSHFP) }, + TypeTA: func() RR { return new(TA) }, + TypeTALINK: func() RR { return new(TALINK) }, + TypeTKEY: func() RR { return new(TKEY) }, + TypeTLSA: func() RR { return new(TLSA) }, + TypeTSIG: func() RR { return new(TSIG) }, + TypeTXT: func() RR { return new(TXT) }, + TypeUID: func() RR { return new(UID) }, + TypeUINFO: func() RR { return new(UINFO) }, + TypeURI: func() RR { return new(URI) }, + TypeX25: func() RR { return new(X25) }, +} + +// TypeToString is a map of strings for each RR type. +var TypeToString = map[uint16]string{ + TypeA: "A", + TypeAAAA: "AAAA", + TypeAFSDB: "AFSDB", + TypeANY: "ANY", + TypeAPL: "APL", + TypeATMA: "ATMA", + TypeAVC: "AVC", + TypeAXFR: "AXFR", + TypeCAA: "CAA", + TypeCDNSKEY: "CDNSKEY", + TypeCDS: "CDS", + TypeCERT: "CERT", + TypeCNAME: "CNAME", + TypeCSYNC: "CSYNC", + TypeDHCID: "DHCID", + TypeDLV: "DLV", + TypeDNAME: "DNAME", + TypeDNSKEY: "DNSKEY", + TypeDS: "DS", + TypeEID: "EID", + TypeEUI48: "EUI48", + TypeEUI64: "EUI64", + TypeGID: "GID", + TypeGPOS: "GPOS", + TypeHINFO: "HINFO", + TypeHIP: "HIP", + TypeISDN: "ISDN", + TypeIXFR: "IXFR", + TypeKEY: "KEY", + TypeKX: "KX", + TypeL32: "L32", + TypeL64: "L64", + TypeLOC: "LOC", + TypeLP: "LP", + TypeMAILA: "MAILA", + TypeMAILB: "MAILB", + TypeMB: "MB", + TypeMD: "MD", + TypeMF: "MF", + TypeMG: "MG", + TypeMINFO: "MINFO", + TypeMR: "MR", + TypeMX: "MX", + TypeNAPTR: "NAPTR", + TypeNID: "NID", + TypeNIMLOC: "NIMLOC", + TypeNINFO: "NINFO", + TypeNS: "NS", + TypeNSEC: "NSEC", + TypeNSEC3: "NSEC3", + TypeNSEC3PARAM: "NSEC3PARAM", + TypeNULL: "NULL", + TypeNXT: "NXT", + TypeNone: "None", + TypeOPENPGPKEY: "OPENPGPKEY", + TypeOPT: "OPT", + TypePTR: "PTR", + TypePX: "PX", + TypeRKEY: "RKEY", + TypeRP: "RP", + TypeRRSIG: "RRSIG", + TypeRT: "RT", + TypeReserved: "Reserved", + TypeSIG: "SIG", + TypeSMIMEA: "SMIMEA", + TypeSOA: "SOA", + TypeSPF: "SPF", + TypeSRV: "SRV", + TypeSSHFP: "SSHFP", + TypeTA: "TA", + TypeTALINK: "TALINK", + TypeTKEY: "TKEY", + TypeTLSA: "TLSA", + TypeTSIG: "TSIG", + TypeTXT: "TXT", + TypeUID: "UID", + TypeUINFO: "UINFO", + TypeUNSPEC: "UNSPEC", + TypeURI: "URI", + TypeX25: "X25", + TypeNSAPPTR: "NSAP-PTR", +} + +func (rr *A) Header() *RR_Header { return &rr.Hdr } +func (rr *AAAA) Header() *RR_Header { return &rr.Hdr } +func (rr *AFSDB) Header() *RR_Header { return &rr.Hdr } +func (rr *ANY) Header() *RR_Header { return &rr.Hdr } +func (rr *APL) Header() *RR_Header { return &rr.Hdr } +func (rr *AVC) Header() *RR_Header { return &rr.Hdr } +func (rr *CAA) Header() *RR_Header { return &rr.Hdr } +func (rr *CDNSKEY) Header() *RR_Header { return &rr.Hdr } +func (rr *CDS) Header() *RR_Header { return &rr.Hdr } +func (rr *CERT) Header() *RR_Header { return &rr.Hdr } +func (rr *CNAME) Header() *RR_Header { return &rr.Hdr } +func (rr *CSYNC) Header() *RR_Header { return &rr.Hdr } +func (rr *DHCID) Header() *RR_Header { return &rr.Hdr } +func (rr *DLV) Header() *RR_Header { return &rr.Hdr } +func (rr *DNAME) Header() *RR_Header { return &rr.Hdr } +func (rr *DNSKEY) Header() *RR_Header { return &rr.Hdr } +func (rr *DS) Header() *RR_Header { return &rr.Hdr } +func (rr *EID) Header() *RR_Header { return &rr.Hdr } +func (rr *EUI48) Header() *RR_Header { return &rr.Hdr } +func (rr *EUI64) Header() *RR_Header { return &rr.Hdr } +func (rr *GID) Header() *RR_Header { return &rr.Hdr } +func (rr *GPOS) Header() *RR_Header { return &rr.Hdr } +func (rr *HINFO) Header() *RR_Header { return &rr.Hdr } +func (rr *HIP) Header() *RR_Header { return &rr.Hdr } +func (rr *KEY) Header() *RR_Header { return &rr.Hdr } +func (rr *KX) Header() *RR_Header { return &rr.Hdr } +func (rr *L32) Header() *RR_Header { return &rr.Hdr } +func (rr *L64) Header() *RR_Header { return &rr.Hdr } +func (rr *LOC) Header() *RR_Header { return &rr.Hdr } +func (rr *LP) Header() *RR_Header { return &rr.Hdr } +func (rr *MB) Header() *RR_Header { return &rr.Hdr } +func (rr *MD) Header() *RR_Header { return &rr.Hdr } +func (rr *MF) Header() *RR_Header { return &rr.Hdr } +func (rr *MG) Header() *RR_Header { return &rr.Hdr } +func (rr *MINFO) Header() *RR_Header { return &rr.Hdr } +func (rr *MR) Header() *RR_Header { return &rr.Hdr } +func (rr *MX) Header() *RR_Header { return &rr.Hdr } +func (rr *NAPTR) Header() *RR_Header { return &rr.Hdr } +func (rr *NID) Header() *RR_Header { return &rr.Hdr } +func (rr *NIMLOC) Header() *RR_Header { return &rr.Hdr } +func (rr *NINFO) Header() *RR_Header { return &rr.Hdr } +func (rr *NS) Header() *RR_Header { return &rr.Hdr } +func (rr *NSAPPTR) Header() *RR_Header { return &rr.Hdr } +func (rr *NSEC) Header() *RR_Header { return &rr.Hdr } +func (rr *NSEC3) Header() *RR_Header { return &rr.Hdr } +func (rr *NSEC3PARAM) Header() *RR_Header { return &rr.Hdr } +func (rr *NULL) Header() *RR_Header { return &rr.Hdr } +func (rr *OPENPGPKEY) Header() *RR_Header { return &rr.Hdr } +func (rr *OPT) Header() *RR_Header { return &rr.Hdr } +func (rr *PTR) Header() *RR_Header { return &rr.Hdr } +func (rr *PX) Header() *RR_Header { return &rr.Hdr } +func (rr *RFC3597) Header() *RR_Header { return &rr.Hdr } +func (rr *RKEY) Header() *RR_Header { return &rr.Hdr } +func (rr *RP) Header() *RR_Header { return &rr.Hdr } +func (rr *RRSIG) Header() *RR_Header { return &rr.Hdr } +func (rr *RT) Header() *RR_Header { return &rr.Hdr } +func (rr *SIG) Header() *RR_Header { return &rr.Hdr } +func (rr *SMIMEA) Header() *RR_Header { return &rr.Hdr } +func (rr *SOA) Header() *RR_Header { return &rr.Hdr } +func (rr *SPF) Header() *RR_Header { return &rr.Hdr } +func (rr *SRV) Header() *RR_Header { return &rr.Hdr } +func (rr *SSHFP) Header() *RR_Header { return &rr.Hdr } +func (rr *TA) Header() *RR_Header { return &rr.Hdr } +func (rr *TALINK) Header() *RR_Header { return &rr.Hdr } +func (rr *TKEY) Header() *RR_Header { return &rr.Hdr } +func (rr *TLSA) Header() *RR_Header { return &rr.Hdr } +func (rr *TSIG) Header() *RR_Header { return &rr.Hdr } +func (rr *TXT) Header() *RR_Header { return &rr.Hdr } +func (rr *UID) Header() *RR_Header { return &rr.Hdr } +func (rr *UINFO) Header() *RR_Header { return &rr.Hdr } +func (rr *URI) Header() *RR_Header { return &rr.Hdr } +func (rr *X25) Header() *RR_Header { return &rr.Hdr } + +// len() functions +func (rr *A) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + if len(rr.A) != 0 { + l += net.IPv4len + } + return l +} +func (rr *AAAA) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + if len(rr.AAAA) != 0 { + l += net.IPv6len + } + return l +} +func (rr *AFSDB) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += 2 // Subtype + l += domainNameLen(rr.Hostname, off+l, compression, false) + return l +} +func (rr *ANY) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + return l +} +func (rr *APL) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + for _, x := range rr.Prefixes { + l += x.len() + } + return l +} +func (rr *AVC) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + for _, x := range rr.Txt { + l += len(x) + 1 + } + return l +} +func (rr *CAA) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l++ // Flag + l += len(rr.Tag) + 1 + l += len(rr.Value) + return l +} +func (rr *CERT) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += 2 // Type + l += 2 // KeyTag + l++ // Algorithm + l += base64.StdEncoding.DecodedLen(len(rr.Certificate)) + return l +} +func (rr *CNAME) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += domainNameLen(rr.Target, off+l, compression, true) + return l +} +func (rr *DHCID) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += base64.StdEncoding.DecodedLen(len(rr.Digest)) + return l +} +func (rr *DNAME) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += domainNameLen(rr.Target, off+l, compression, false) + return l +} +func (rr *DNSKEY) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += 2 // Flags + l++ // Protocol + l++ // Algorithm + l += base64.StdEncoding.DecodedLen(len(rr.PublicKey)) + return l +} +func (rr *DS) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += 2 // KeyTag + l++ // Algorithm + l++ // DigestType + l += len(rr.Digest) / 2 + return l +} +func (rr *EID) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += len(rr.Endpoint) / 2 + return l +} +func (rr *EUI48) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += 6 // Address + return l +} +func (rr *EUI64) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += 8 // Address + return l +} +func (rr *GID) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += 4 // Gid + return l +} +func (rr *GPOS) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += len(rr.Longitude) + 1 + l += len(rr.Latitude) + 1 + l += len(rr.Altitude) + 1 + return l +} +func (rr *HINFO) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += len(rr.Cpu) + 1 + l += len(rr.Os) + 1 + return l +} +func (rr *HIP) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l++ // HitLength + l++ // PublicKeyAlgorithm + l += 2 // PublicKeyLength + l += len(rr.Hit) / 2 + l += base64.StdEncoding.DecodedLen(len(rr.PublicKey)) + for _, x := range rr.RendezvousServers { + l += domainNameLen(x, off+l, compression, false) + } + return l +} +func (rr *KX) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += 2 // Preference + l += domainNameLen(rr.Exchanger, off+l, compression, false) + return l +} +func (rr *L32) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += 2 // Preference + if len(rr.Locator32) != 0 { + l += net.IPv4len + } + return l +} +func (rr *L64) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += 2 // Preference + l += 8 // Locator64 + return l +} +func (rr *LOC) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l++ // Version + l++ // Size + l++ // HorizPre + l++ // VertPre + l += 4 // Latitude + l += 4 // Longitude + l += 4 // Altitude + return l +} +func (rr *LP) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += 2 // Preference + l += domainNameLen(rr.Fqdn, off+l, compression, false) + return l +} +func (rr *MB) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += domainNameLen(rr.Mb, off+l, compression, true) + return l +} +func (rr *MD) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += domainNameLen(rr.Md, off+l, compression, true) + return l +} +func (rr *MF) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += domainNameLen(rr.Mf, off+l, compression, true) + return l +} +func (rr *MG) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += domainNameLen(rr.Mg, off+l, compression, true) + return l +} +func (rr *MINFO) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += domainNameLen(rr.Rmail, off+l, compression, true) + l += domainNameLen(rr.Email, off+l, compression, true) + return l +} +func (rr *MR) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += domainNameLen(rr.Mr, off+l, compression, true) + return l +} +func (rr *MX) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += 2 // Preference + l += domainNameLen(rr.Mx, off+l, compression, true) + return l +} +func (rr *NAPTR) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += 2 // Order + l += 2 // Preference + l += len(rr.Flags) + 1 + l += len(rr.Service) + 1 + l += len(rr.Regexp) + 1 + l += domainNameLen(rr.Replacement, off+l, compression, false) + return l +} +func (rr *NID) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += 2 // Preference + l += 8 // NodeID + return l +} +func (rr *NIMLOC) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += len(rr.Locator) / 2 + return l +} +func (rr *NINFO) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + for _, x := range rr.ZSData { + l += len(x) + 1 + } + return l +} +func (rr *NS) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += domainNameLen(rr.Ns, off+l, compression, true) + return l +} +func (rr *NSAPPTR) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += domainNameLen(rr.Ptr, off+l, compression, false) + return l +} +func (rr *NSEC3PARAM) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l++ // Hash + l++ // Flags + l += 2 // Iterations + l++ // SaltLength + l += len(rr.Salt) / 2 + return l +} +func (rr *NULL) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += len(rr.Data) + return l +} +func (rr *OPENPGPKEY) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += base64.StdEncoding.DecodedLen(len(rr.PublicKey)) + return l +} +func (rr *PTR) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += domainNameLen(rr.Ptr, off+l, compression, true) + return l +} +func (rr *PX) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += 2 // Preference + l += domainNameLen(rr.Map822, off+l, compression, false) + l += domainNameLen(rr.Mapx400, off+l, compression, false) + return l +} +func (rr *RFC3597) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += len(rr.Rdata) / 2 + return l +} +func (rr *RKEY) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += 2 // Flags + l++ // Protocol + l++ // Algorithm + l += base64.StdEncoding.DecodedLen(len(rr.PublicKey)) + return l +} +func (rr *RP) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += domainNameLen(rr.Mbox, off+l, compression, false) + l += domainNameLen(rr.Txt, off+l, compression, false) + return l +} +func (rr *RRSIG) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += 2 // TypeCovered + l++ // Algorithm + l++ // Labels + l += 4 // OrigTtl + l += 4 // Expiration + l += 4 // Inception + l += 2 // KeyTag + l += domainNameLen(rr.SignerName, off+l, compression, false) + l += base64.StdEncoding.DecodedLen(len(rr.Signature)) + return l +} +func (rr *RT) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += 2 // Preference + l += domainNameLen(rr.Host, off+l, compression, false) + return l +} +func (rr *SMIMEA) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l++ // Usage + l++ // Selector + l++ // MatchingType + l += len(rr.Certificate) / 2 + return l +} +func (rr *SOA) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += domainNameLen(rr.Ns, off+l, compression, true) + l += domainNameLen(rr.Mbox, off+l, compression, true) + l += 4 // Serial + l += 4 // Refresh + l += 4 // Retry + l += 4 // Expire + l += 4 // Minttl + return l +} +func (rr *SPF) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + for _, x := range rr.Txt { + l += len(x) + 1 + } + return l +} +func (rr *SRV) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += 2 // Priority + l += 2 // Weight + l += 2 // Port + l += domainNameLen(rr.Target, off+l, compression, false) + return l +} +func (rr *SSHFP) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l++ // Algorithm + l++ // Type + l += len(rr.FingerPrint) / 2 + return l +} +func (rr *TA) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += 2 // KeyTag + l++ // Algorithm + l++ // DigestType + l += len(rr.Digest) / 2 + return l +} +func (rr *TALINK) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += domainNameLen(rr.PreviousName, off+l, compression, false) + l += domainNameLen(rr.NextName, off+l, compression, false) + return l +} +func (rr *TKEY) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += domainNameLen(rr.Algorithm, off+l, compression, false) + l += 4 // Inception + l += 4 // Expiration + l += 2 // Mode + l += 2 // Error + l += 2 // KeySize + l += len(rr.Key) / 2 + l += 2 // OtherLen + l += len(rr.OtherData) / 2 + return l +} +func (rr *TLSA) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l++ // Usage + l++ // Selector + l++ // MatchingType + l += len(rr.Certificate) / 2 + return l +} +func (rr *TSIG) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += domainNameLen(rr.Algorithm, off+l, compression, false) + l += 6 // TimeSigned + l += 2 // Fudge + l += 2 // MACSize + l += len(rr.MAC) / 2 + l += 2 // OrigId + l += 2 // Error + l += 2 // OtherLen + l += len(rr.OtherData) / 2 + return l +} +func (rr *TXT) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + for _, x := range rr.Txt { + l += len(x) + 1 + } + return l +} +func (rr *UID) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += 4 // Uid + return l +} +func (rr *UINFO) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += len(rr.Uinfo) + 1 + return l +} +func (rr *URI) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += 2 // Priority + l += 2 // Weight + l += len(rr.Target) + return l +} +func (rr *X25) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += len(rr.PSDNAddress) + 1 + return l +} + +// copy() functions +func (rr *A) copy() RR { + return &A{rr.Hdr, copyIP(rr.A)} +} +func (rr *AAAA) copy() RR { + return &AAAA{rr.Hdr, copyIP(rr.AAAA)} +} +func (rr *AFSDB) copy() RR { + return &AFSDB{rr.Hdr, rr.Subtype, rr.Hostname} +} +func (rr *ANY) copy() RR { + return &ANY{rr.Hdr} +} +func (rr *APL) copy() RR { + Prefixes := make([]APLPrefix, len(rr.Prefixes)) + for i := range rr.Prefixes { + Prefixes[i] = rr.Prefixes[i].copy() + } + return &APL{rr.Hdr, Prefixes} +} +func (rr *AVC) copy() RR { + Txt := make([]string, len(rr.Txt)) + copy(Txt, rr.Txt) + return &AVC{rr.Hdr, Txt} +} +func (rr *CAA) copy() RR { + return &CAA{rr.Hdr, rr.Flag, rr.Tag, rr.Value} +} +func (rr *CERT) copy() RR { + return &CERT{rr.Hdr, rr.Type, rr.KeyTag, rr.Algorithm, rr.Certificate} +} +func (rr *CNAME) copy() RR { + return &CNAME{rr.Hdr, rr.Target} +} +func (rr *CSYNC) copy() RR { + TypeBitMap := make([]uint16, len(rr.TypeBitMap)) + copy(TypeBitMap, rr.TypeBitMap) + return &CSYNC{rr.Hdr, rr.Serial, rr.Flags, TypeBitMap} +} +func (rr *DHCID) copy() RR { + return &DHCID{rr.Hdr, rr.Digest} +} +func (rr *DNAME) copy() RR { + return &DNAME{rr.Hdr, rr.Target} +} +func (rr *DNSKEY) copy() RR { + return &DNSKEY{rr.Hdr, rr.Flags, rr.Protocol, rr.Algorithm, rr.PublicKey} +} +func (rr *DS) copy() RR { + return &DS{rr.Hdr, rr.KeyTag, rr.Algorithm, rr.DigestType, rr.Digest} +} +func (rr *EID) copy() RR { + return &EID{rr.Hdr, rr.Endpoint} +} +func (rr *EUI48) copy() RR { + return &EUI48{rr.Hdr, rr.Address} +} +func (rr *EUI64) copy() RR { + return &EUI64{rr.Hdr, rr.Address} +} +func (rr *GID) copy() RR { + return &GID{rr.Hdr, rr.Gid} +} +func (rr *GPOS) copy() RR { + return &GPOS{rr.Hdr, rr.Longitude, rr.Latitude, rr.Altitude} +} +func (rr *HINFO) copy() RR { + return &HINFO{rr.Hdr, rr.Cpu, rr.Os} +} +func (rr *HIP) copy() RR { + RendezvousServers := make([]string, len(rr.RendezvousServers)) + copy(RendezvousServers, rr.RendezvousServers) + return &HIP{rr.Hdr, rr.HitLength, rr.PublicKeyAlgorithm, rr.PublicKeyLength, rr.Hit, rr.PublicKey, RendezvousServers} +} +func (rr *KX) copy() RR { + return &KX{rr.Hdr, rr.Preference, rr.Exchanger} +} +func (rr *L32) copy() RR { + return &L32{rr.Hdr, rr.Preference, copyIP(rr.Locator32)} +} +func (rr *L64) copy() RR { + return &L64{rr.Hdr, rr.Preference, rr.Locator64} +} +func (rr *LOC) copy() RR { + return &LOC{rr.Hdr, rr.Version, rr.Size, rr.HorizPre, rr.VertPre, rr.Latitude, rr.Longitude, rr.Altitude} +} +func (rr *LP) copy() RR { + return &LP{rr.Hdr, rr.Preference, rr.Fqdn} +} +func (rr *MB) copy() RR { + return &MB{rr.Hdr, rr.Mb} +} +func (rr *MD) copy() RR { + return &MD{rr.Hdr, rr.Md} +} +func (rr *MF) copy() RR { + return &MF{rr.Hdr, rr.Mf} +} +func (rr *MG) copy() RR { + return &MG{rr.Hdr, rr.Mg} +} +func (rr *MINFO) copy() RR { + return &MINFO{rr.Hdr, rr.Rmail, rr.Email} +} +func (rr *MR) copy() RR { + return &MR{rr.Hdr, rr.Mr} +} +func (rr *MX) copy() RR { + return &MX{rr.Hdr, rr.Preference, rr.Mx} +} +func (rr *NAPTR) copy() RR { + return &NAPTR{rr.Hdr, rr.Order, rr.Preference, rr.Flags, rr.Service, rr.Regexp, rr.Replacement} +} +func (rr *NID) copy() RR { + return &NID{rr.Hdr, rr.Preference, rr.NodeID} +} +func (rr *NIMLOC) copy() RR { + return &NIMLOC{rr.Hdr, rr.Locator} +} +func (rr *NINFO) copy() RR { + ZSData := make([]string, len(rr.ZSData)) + copy(ZSData, rr.ZSData) + return &NINFO{rr.Hdr, ZSData} +} +func (rr *NS) copy() RR { + return &NS{rr.Hdr, rr.Ns} +} +func (rr *NSAPPTR) copy() RR { + return &NSAPPTR{rr.Hdr, rr.Ptr} +} +func (rr *NSEC) copy() RR { + TypeBitMap := make([]uint16, len(rr.TypeBitMap)) + copy(TypeBitMap, rr.TypeBitMap) + return &NSEC{rr.Hdr, rr.NextDomain, TypeBitMap} +} +func (rr *NSEC3) copy() RR { + TypeBitMap := make([]uint16, len(rr.TypeBitMap)) + copy(TypeBitMap, rr.TypeBitMap) + return &NSEC3{rr.Hdr, rr.Hash, rr.Flags, rr.Iterations, rr.SaltLength, rr.Salt, rr.HashLength, rr.NextDomain, TypeBitMap} +} +func (rr *NSEC3PARAM) copy() RR { + return &NSEC3PARAM{rr.Hdr, rr.Hash, rr.Flags, rr.Iterations, rr.SaltLength, rr.Salt} +} +func (rr *NULL) copy() RR { + return &NULL{rr.Hdr, rr.Data} +} +func (rr *OPENPGPKEY) copy() RR { + return &OPENPGPKEY{rr.Hdr, rr.PublicKey} +} +func (rr *OPT) copy() RR { + Option := make([]EDNS0, len(rr.Option)) + for i, e := range rr.Option { + Option[i] = e.copy() + } + return &OPT{rr.Hdr, Option} +} +func (rr *PTR) copy() RR { + return &PTR{rr.Hdr, rr.Ptr} +} +func (rr *PX) copy() RR { + return &PX{rr.Hdr, rr.Preference, rr.Map822, rr.Mapx400} +} +func (rr *RFC3597) copy() RR { + return &RFC3597{rr.Hdr, rr.Rdata} +} +func (rr *RKEY) copy() RR { + return &RKEY{rr.Hdr, rr.Flags, rr.Protocol, rr.Algorithm, rr.PublicKey} +} +func (rr *RP) copy() RR { + return &RP{rr.Hdr, rr.Mbox, rr.Txt} +} +func (rr *RRSIG) copy() RR { + return &RRSIG{rr.Hdr, rr.TypeCovered, rr.Algorithm, rr.Labels, rr.OrigTtl, rr.Expiration, rr.Inception, rr.KeyTag, rr.SignerName, rr.Signature} +} +func (rr *RT) copy() RR { + return &RT{rr.Hdr, rr.Preference, rr.Host} +} +func (rr *SMIMEA) copy() RR { + return &SMIMEA{rr.Hdr, rr.Usage, rr.Selector, rr.MatchingType, rr.Certificate} +} +func (rr *SOA) copy() RR { + return &SOA{rr.Hdr, rr.Ns, rr.Mbox, rr.Serial, rr.Refresh, rr.Retry, rr.Expire, rr.Minttl} +} +func (rr *SPF) copy() RR { + Txt := make([]string, len(rr.Txt)) + copy(Txt, rr.Txt) + return &SPF{rr.Hdr, Txt} +} +func (rr *SRV) copy() RR { + return &SRV{rr.Hdr, rr.Priority, rr.Weight, rr.Port, rr.Target} +} +func (rr *SSHFP) copy() RR { + return &SSHFP{rr.Hdr, rr.Algorithm, rr.Type, rr.FingerPrint} +} +func (rr *TA) copy() RR { + return &TA{rr.Hdr, rr.KeyTag, rr.Algorithm, rr.DigestType, rr.Digest} +} +func (rr *TALINK) copy() RR { + return &TALINK{rr.Hdr, rr.PreviousName, rr.NextName} +} +func (rr *TKEY) copy() RR { + return &TKEY{rr.Hdr, rr.Algorithm, rr.Inception, rr.Expiration, rr.Mode, rr.Error, rr.KeySize, rr.Key, rr.OtherLen, rr.OtherData} +} +func (rr *TLSA) copy() RR { + return &TLSA{rr.Hdr, rr.Usage, rr.Selector, rr.MatchingType, rr.Certificate} +} +func (rr *TSIG) copy() RR { + return &TSIG{rr.Hdr, rr.Algorithm, rr.TimeSigned, rr.Fudge, rr.MACSize, rr.MAC, rr.OrigId, rr.Error, rr.OtherLen, rr.OtherData} +} +func (rr *TXT) copy() RR { + Txt := make([]string, len(rr.Txt)) + copy(Txt, rr.Txt) + return &TXT{rr.Hdr, Txt} +} +func (rr *UID) copy() RR { + return &UID{rr.Hdr, rr.Uid} +} +func (rr *UINFO) copy() RR { + return &UINFO{rr.Hdr, rr.Uinfo} +} +func (rr *URI) copy() RR { + return &URI{rr.Hdr, rr.Priority, rr.Weight, rr.Target} +} +func (rr *X25) copy() RR { + return &X25{rr.Hdr, rr.PSDNAddress} +} diff --git a/vendor/github.com/pborman/uuid/.travis.yml b/vendor/github.com/pborman/uuid/.travis.yml deleted file mode 100644 index 3deb4a12..00000000 --- a/vendor/github.com/pborman/uuid/.travis.yml +++ /dev/null @@ -1,10 +0,0 @@ -language: go - -go: - - "1.9" - - "1.10" - - "1.11" - - tip - -script: - - go test -v ./... diff --git a/vendor/github.com/pborman/uuid/CONTRIBUTING.md b/vendor/github.com/pborman/uuid/CONTRIBUTING.md deleted file mode 100644 index 04fdf09f..00000000 --- a/vendor/github.com/pborman/uuid/CONTRIBUTING.md +++ /dev/null @@ -1,10 +0,0 @@ -# How to contribute - -We definitely welcome patches and contribution to this project! - -### Legal requirements - -In order to protect both you and ourselves, you will need to sign the -[Contributor License Agreement](https://cla.developers.google.com/clas). - -You may have already signed it for other Google projects. diff --git a/vendor/github.com/pborman/uuid/CONTRIBUTORS b/vendor/github.com/pborman/uuid/CONTRIBUTORS deleted file mode 100644 index b382a04e..00000000 --- a/vendor/github.com/pborman/uuid/CONTRIBUTORS +++ /dev/null @@ -1 +0,0 @@ -Paul Borman diff --git a/vendor/github.com/pborman/uuid/README.md b/vendor/github.com/pborman/uuid/README.md deleted file mode 100644 index 810ad40d..00000000 --- a/vendor/github.com/pborman/uuid/README.md +++ /dev/null @@ -1,15 +0,0 @@ -This project was automatically exported from code.google.com/p/go-uuid - -# uuid ![build status](https://travis-ci.org/pborman/uuid.svg?branch=master) -The uuid package generates and inspects UUIDs based on [RFC 4122](http://tools.ietf.org/html/rfc4122) and DCE 1.1: Authentication and Security Services. - -This package now leverages the github.com/google/uuid package (which is based off an earlier version of this package). - -###### Install -`go get github.com/pborman/uuid` - -###### Documentation -[![GoDoc](https://godoc.org/github.com/pborman/uuid?status.svg)](http://godoc.org/github.com/pborman/uuid) - -Full `go doc` style documentation for the package can be viewed online without installing this package by using the GoDoc site here: -http://godoc.org/github.com/pborman/uuid diff --git a/vendor/github.com/pborman/uuid/dce.go b/vendor/github.com/pborman/uuid/dce.go deleted file mode 100644 index 50a0f2d0..00000000 --- a/vendor/github.com/pborman/uuid/dce.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "encoding/binary" - "fmt" - "os" -) - -// A Domain represents a Version 2 domain -type Domain byte - -// Domain constants for DCE Security (Version 2) UUIDs. -const ( - Person = Domain(0) - Group = Domain(1) - Org = Domain(2) -) - -// NewDCESecurity returns a DCE Security (Version 2) UUID. -// -// The domain should be one of Person, Group or Org. -// On a POSIX system the id should be the users UID for the Person -// domain and the users GID for the Group. The meaning of id for -// the domain Org or on non-POSIX systems is site defined. -// -// For a given domain/id pair the same token may be returned for up to -// 7 minutes and 10 seconds. -func NewDCESecurity(domain Domain, id uint32) UUID { - uuid := NewUUID() - if uuid != nil { - uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2 - uuid[9] = byte(domain) - binary.BigEndian.PutUint32(uuid[0:], id) - } - return uuid -} - -// NewDCEPerson returns a DCE Security (Version 2) UUID in the person -// domain with the id returned by os.Getuid. -// -// NewDCEPerson(Person, uint32(os.Getuid())) -func NewDCEPerson() UUID { - return NewDCESecurity(Person, uint32(os.Getuid())) -} - -// NewDCEGroup returns a DCE Security (Version 2) UUID in the group -// domain with the id returned by os.Getgid. -// -// NewDCEGroup(Group, uint32(os.Getgid())) -func NewDCEGroup() UUID { - return NewDCESecurity(Group, uint32(os.Getgid())) -} - -// Domain returns the domain for a Version 2 UUID or false. -func (uuid UUID) Domain() (Domain, bool) { - if v, _ := uuid.Version(); v != 2 { - return 0, false - } - return Domain(uuid[9]), true -} - -// Id returns the id for a Version 2 UUID or false. -func (uuid UUID) Id() (uint32, bool) { - if v, _ := uuid.Version(); v != 2 { - return 0, false - } - return binary.BigEndian.Uint32(uuid[0:4]), true -} - -func (d Domain) String() string { - switch d { - case Person: - return "Person" - case Group: - return "Group" - case Org: - return "Org" - } - return fmt.Sprintf("Domain%d", int(d)) -} diff --git a/vendor/github.com/pborman/uuid/doc.go b/vendor/github.com/pborman/uuid/doc.go deleted file mode 100644 index 727d7616..00000000 --- a/vendor/github.com/pborman/uuid/doc.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// The uuid package generates and inspects UUIDs. -// -// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security -// Services. -// -// This package is a partial wrapper around the github.com/google/uuid package. -// This package represents a UUID as []byte while github.com/google/uuid -// represents a UUID as [16]byte. -package uuid diff --git a/vendor/github.com/pborman/uuid/go.mod b/vendor/github.com/pborman/uuid/go.mod deleted file mode 100644 index 099fc7de..00000000 --- a/vendor/github.com/pborman/uuid/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/pborman/uuid - -require github.com/google/uuid v1.0.0 diff --git a/vendor/github.com/pborman/uuid/go.sum b/vendor/github.com/pborman/uuid/go.sum deleted file mode 100644 index db2574a9..00000000 --- a/vendor/github.com/pborman/uuid/go.sum +++ /dev/null @@ -1,2 +0,0 @@ -github.com/google/uuid v1.0.0 h1:b4Gk+7WdP/d3HZH8EJsZpvV7EtDOgaZLtnaNGIu1adA= -github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= diff --git a/vendor/github.com/pborman/uuid/hash.go b/vendor/github.com/pborman/uuid/hash.go deleted file mode 100644 index a0420c1e..00000000 --- a/vendor/github.com/pborman/uuid/hash.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "crypto/md5" - "crypto/sha1" - "hash" -) - -// Well known Name Space IDs and UUIDs -var ( - NameSpace_DNS = Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8") - NameSpace_URL = Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8") - NameSpace_OID = Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8") - NameSpace_X500 = Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8") - NIL = Parse("00000000-0000-0000-0000-000000000000") -) - -// NewHash returns a new UUID derived from the hash of space concatenated with -// data generated by h. The hash should be at least 16 byte in length. The -// first 16 bytes of the hash are used to form the UUID. The version of the -// UUID will be the lower 4 bits of version. NewHash is used to implement -// NewMD5 and NewSHA1. -func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID { - h.Reset() - h.Write(space) - h.Write([]byte(data)) - s := h.Sum(nil) - uuid := make([]byte, 16) - copy(uuid, s) - uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4) - uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant - return uuid -} - -// NewMD5 returns a new MD5 (Version 3) UUID based on the -// supplied name space and data. -// -// NewHash(md5.New(), space, data, 3) -func NewMD5(space UUID, data []byte) UUID { - return NewHash(md5.New(), space, data, 3) -} - -// NewSHA1 returns a new SHA1 (Version 5) UUID based on the -// supplied name space and data. -// -// NewHash(sha1.New(), space, data, 5) -func NewSHA1(space UUID, data []byte) UUID { - return NewHash(sha1.New(), space, data, 5) -} diff --git a/vendor/github.com/pborman/uuid/marshal.go b/vendor/github.com/pborman/uuid/marshal.go deleted file mode 100644 index 35b89352..00000000 --- a/vendor/github.com/pborman/uuid/marshal.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "errors" - "fmt" - - guuid "github.com/google/uuid" -) - -// MarshalText implements encoding.TextMarshaler. -func (u UUID) MarshalText() ([]byte, error) { - if len(u) != 16 { - return nil, nil - } - var js [36]byte - encodeHex(js[:], u) - return js[:], nil -} - -// UnmarshalText implements encoding.TextUnmarshaler. -func (u *UUID) UnmarshalText(data []byte) error { - if len(data) == 0 { - return nil - } - id := Parse(string(data)) - if id == nil { - return errors.New("invalid UUID") - } - *u = id - return nil -} - -// MarshalBinary implements encoding.BinaryMarshaler. -func (u UUID) MarshalBinary() ([]byte, error) { - return u[:], nil -} - -// UnmarshalBinary implements encoding.BinaryUnmarshaler. -func (u *UUID) UnmarshalBinary(data []byte) error { - if len(data) == 0 { - return nil - } - if len(data) != 16 { - return fmt.Errorf("invalid UUID (got %d bytes)", len(data)) - } - var id [16]byte - copy(id[:], data) - *u = id[:] - return nil -} - -// MarshalText implements encoding.TextMarshaler. -func (u Array) MarshalText() ([]byte, error) { - var js [36]byte - encodeHex(js[:], u[:]) - return js[:], nil -} - -// UnmarshalText implements encoding.TextUnmarshaler. -func (u *Array) UnmarshalText(data []byte) error { - id, err := guuid.ParseBytes(data) - if err != nil { - return err - } - *u = Array(id) - return nil -} - -// MarshalBinary implements encoding.BinaryMarshaler. -func (u Array) MarshalBinary() ([]byte, error) { - return u[:], nil -} - -// UnmarshalBinary implements encoding.BinaryUnmarshaler. -func (u *Array) UnmarshalBinary(data []byte) error { - if len(data) != 16 { - return fmt.Errorf("invalid UUID (got %d bytes)", len(data)) - } - copy(u[:], data) - return nil -} diff --git a/vendor/github.com/pborman/uuid/node.go b/vendor/github.com/pborman/uuid/node.go deleted file mode 100644 index e524e010..00000000 --- a/vendor/github.com/pborman/uuid/node.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - guuid "github.com/google/uuid" -) - -// NodeInterface returns the name of the interface from which the NodeID was -// derived. The interface "user" is returned if the NodeID was set by -// SetNodeID. -func NodeInterface() string { - return guuid.NodeInterface() -} - -// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs. -// If name is "" then the first usable interface found will be used or a random -// Node ID will be generated. If a named interface cannot be found then false -// is returned. -// -// SetNodeInterface never fails when name is "". -func SetNodeInterface(name string) bool { - return guuid.SetNodeInterface(name) -} - -// NodeID returns a slice of a copy of the current Node ID, setting the Node ID -// if not already set. -func NodeID() []byte { - return guuid.NodeID() -} - -// SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes -// of id are used. If id is less than 6 bytes then false is returned and the -// Node ID is not set. -func SetNodeID(id []byte) bool { - return guuid.SetNodeID(id) -} - -// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is -// not valid. The NodeID is only well defined for version 1 and 2 UUIDs. -func (uuid UUID) NodeID() []byte { - if len(uuid) != 16 { - return nil - } - node := make([]byte, 6) - copy(node, uuid[10:]) - return node -} diff --git a/vendor/github.com/pborman/uuid/sql.go b/vendor/github.com/pborman/uuid/sql.go deleted file mode 100644 index 929c3847..00000000 --- a/vendor/github.com/pborman/uuid/sql.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "database/sql/driver" - "errors" - "fmt" -) - -// Scan implements sql.Scanner so UUIDs can be read from databases transparently -// Currently, database types that map to string and []byte are supported. Please -// consult database-specific driver documentation for matching types. -func (uuid *UUID) Scan(src interface{}) error { - switch src.(type) { - case string: - // if an empty UUID comes from a table, we return a null UUID - if src.(string) == "" { - return nil - } - - // see uuid.Parse for required string format - parsed := Parse(src.(string)) - - if parsed == nil { - return errors.New("Scan: invalid UUID format") - } - - *uuid = parsed - case []byte: - b := src.([]byte) - - // if an empty UUID comes from a table, we return a null UUID - if len(b) == 0 { - return nil - } - - // assumes a simple slice of bytes if 16 bytes - // otherwise attempts to parse - if len(b) == 16 { - parsed := make([]byte, 16) - copy(parsed, b) - *uuid = UUID(parsed) - } else { - u := Parse(string(b)) - - if u == nil { - return errors.New("Scan: invalid UUID format") - } - - *uuid = u - } - - default: - return fmt.Errorf("Scan: unable to scan type %T into UUID", src) - } - - return nil -} - -// Value implements sql.Valuer so that UUIDs can be written to databases -// transparently. Currently, UUIDs map to strings. Please consult -// database-specific driver documentation for matching types. -func (uuid UUID) Value() (driver.Value, error) { - return uuid.String(), nil -} diff --git a/vendor/github.com/pborman/uuid/time.go b/vendor/github.com/pborman/uuid/time.go deleted file mode 100644 index 5c0960d8..00000000 --- a/vendor/github.com/pborman/uuid/time.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2014 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "encoding/binary" - - guuid "github.com/google/uuid" -) - -// A Time represents a time as the number of 100's of nanoseconds since 15 Oct -// 1582. -type Time = guuid.Time - -// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and -// clock sequence as well as adjusting the clock sequence as needed. An error -// is returned if the current time cannot be determined. -func GetTime() (Time, uint16, error) { return guuid.GetTime() } - -// ClockSequence returns the current clock sequence, generating one if not -// already set. The clock sequence is only used for Version 1 UUIDs. -// -// The uuid package does not use global static storage for the clock sequence or -// the last time a UUID was generated. Unless SetClockSequence a new random -// clock sequence is generated the first time a clock sequence is requested by -// ClockSequence, GetTime, or NewUUID. (section 4.2.1.1) sequence is generated -// for -func ClockSequence() int { return guuid.ClockSequence() } - -// SetClockSeq sets the clock sequence to the lower 14 bits of seq. Setting to -// -1 causes a new sequence to be generated. -func SetClockSequence(seq int) { guuid.SetClockSequence(seq) } - -// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in -// uuid. It returns false if uuid is not valid. The time is only well defined -// for version 1 and 2 UUIDs. -func (uuid UUID) Time() (Time, bool) { - if len(uuid) != 16 { - return 0, false - } - time := int64(binary.BigEndian.Uint32(uuid[0:4])) - time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32 - time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48 - return Time(time), true -} - -// ClockSequence returns the clock sequence encoded in uuid. It returns false -// if uuid is not valid. The clock sequence is only well defined for version 1 -// and 2 UUIDs. -func (uuid UUID) ClockSequence() (int, bool) { - if len(uuid) != 16 { - return 0, false - } - return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff, true -} diff --git a/vendor/github.com/pborman/uuid/util.go b/vendor/github.com/pborman/uuid/util.go deleted file mode 100644 index 255b5e24..00000000 --- a/vendor/github.com/pborman/uuid/util.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -// xvalues returns the value of a byte as a hexadecimal digit or 255. -var xvalues = [256]byte{ - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255, - 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, -} - -// xtob converts the the first two hex bytes of x into a byte. -func xtob(x string) (byte, bool) { - b1 := xvalues[x[0]] - b2 := xvalues[x[1]] - return (b1 << 4) | b2, b1 != 255 && b2 != 255 -} diff --git a/vendor/github.com/pborman/uuid/uuid.go b/vendor/github.com/pborman/uuid/uuid.go deleted file mode 100644 index 33700042..00000000 --- a/vendor/github.com/pborman/uuid/uuid.go +++ /dev/null @@ -1,162 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "bytes" - "crypto/rand" - "encoding/hex" - "io" - - guuid "github.com/google/uuid" -) - -// Array is a pass-by-value UUID that can be used as an effecient key in a map. -type Array [16]byte - -// UUID converts uuid into a slice. -func (uuid Array) UUID() UUID { - return uuid[:] -} - -// String returns the string representation of uuid, -// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx. -func (uuid Array) String() string { - return guuid.UUID(uuid).String() -} - -// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC -// 4122. -type UUID []byte - -// A Version represents a UUIDs version. -type Version = guuid.Version - -// A Variant represents a UUIDs variant. -type Variant = guuid.Variant - -// Constants returned by Variant. -const ( - Invalid = guuid.Invalid // Invalid UUID - RFC4122 = guuid.RFC4122 // The variant specified in RFC4122 - Reserved = guuid.Reserved // Reserved, NCS backward compatibility. - Microsoft = guuid.Microsoft // Reserved, Microsoft Corporation backward compatibility. - Future = guuid.Future // Reserved for future definition. -) - -var rander = rand.Reader // random function - -// New returns a new random (version 4) UUID as a string. It is a convenience -// function for NewRandom().String(). -func New() string { - return NewRandom().String() -} - -// Parse decodes s into a UUID or returns nil. See github.com/google/uuid for -// the formats parsed. -func Parse(s string) UUID { - gu, err := guuid.Parse(s) - if err == nil { - return gu[:] - } - return nil -} - -// ParseBytes is like Parse, except it parses a byte slice instead of a string. -func ParseBytes(b []byte) (UUID, error) { - gu, err := guuid.ParseBytes(b) - if err == nil { - return gu[:], nil - } - return nil, err -} - -// Equal returns true if uuid1 and uuid2 are equal. -func Equal(uuid1, uuid2 UUID) bool { - return bytes.Equal(uuid1, uuid2) -} - -// Array returns an array representation of uuid that can be used as a map key. -// Array panics if uuid is not valid. -func (uuid UUID) Array() Array { - if len(uuid) != 16 { - panic("invalid uuid") - } - var a Array - copy(a[:], uuid) - return a -} - -// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx -// , or "" if uuid is invalid. -func (uuid UUID) String() string { - if len(uuid) != 16 { - return "" - } - var buf [36]byte - encodeHex(buf[:], uuid) - return string(buf[:]) -} - -// URN returns the RFC 2141 URN form of uuid, -// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid. -func (uuid UUID) URN() string { - if len(uuid) != 16 { - return "" - } - var buf [36 + 9]byte - copy(buf[:], "urn:uuid:") - encodeHex(buf[9:], uuid) - return string(buf[:]) -} - -func encodeHex(dst []byte, uuid UUID) { - hex.Encode(dst[:], uuid[:4]) - dst[8] = '-' - hex.Encode(dst[9:13], uuid[4:6]) - dst[13] = '-' - hex.Encode(dst[14:18], uuid[6:8]) - dst[18] = '-' - hex.Encode(dst[19:23], uuid[8:10]) - dst[23] = '-' - hex.Encode(dst[24:], uuid[10:]) -} - -// Variant returns the variant encoded in uuid. It returns Invalid if -// uuid is invalid. -func (uuid UUID) Variant() Variant { - if len(uuid) != 16 { - return Invalid - } - switch { - case (uuid[8] & 0xc0) == 0x80: - return RFC4122 - case (uuid[8] & 0xe0) == 0xc0: - return Microsoft - case (uuid[8] & 0xe0) == 0xe0: - return Future - default: - return Reserved - } -} - -// Version returns the version of uuid. It returns false if uuid is not -// valid. -func (uuid UUID) Version() (Version, bool) { - if len(uuid) != 16 { - return 0, false - } - return Version(uuid[6] >> 4), true -} - -// SetRand sets the random number generator to r, which implements io.Reader. -// If r.Read returns an error when the package requests random data then -// a panic will be issued. -// -// Calling SetRand with nil sets the random number generator to the default -// generator. -func SetRand(r io.Reader) { - guuid.SetRand(r) -} diff --git a/vendor/github.com/pborman/uuid/version1.go b/vendor/github.com/pborman/uuid/version1.go deleted file mode 100644 index 7af948da..00000000 --- a/vendor/github.com/pborman/uuid/version1.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - guuid "github.com/google/uuid" -) - -// NewUUID returns a Version 1 UUID based on the current NodeID and clock -// sequence, and the current time. If the NodeID has not been set by SetNodeID -// or SetNodeInterface then it will be set automatically. If the NodeID cannot -// be set NewUUID returns nil. If clock sequence has not been set by -// SetClockSequence then it will be set automatically. If GetTime fails to -// return the current NewUUID returns nil. -func NewUUID() UUID { - gu, err := guuid.NewUUID() - if err == nil { - return UUID(gu[:]) - } - return nil -} diff --git a/vendor/github.com/pborman/uuid/version4.go b/vendor/github.com/pborman/uuid/version4.go deleted file mode 100644 index b459d46d..00000000 --- a/vendor/github.com/pborman/uuid/version4.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import guuid "github.com/google/uuid" - -// Random returns a Random (Version 4) UUID or panics. -// -// The strength of the UUIDs is based on the strength of the crypto/rand -// package. -// -// A note about uniqueness derived from the UUID Wikipedia entry: -// -// Randomly generated UUIDs have 122 random bits. One's annual risk of being -// hit by a meteorite is estimated to be one chance in 17 billion, that -// means the probability is about 0.00000000006 (6 × 10−11), -// equivalent to the odds of creating a few tens of trillions of UUIDs in a -// year and having one duplicate. -func NewRandom() UUID { - if gu, err := guuid.NewRandom(); err == nil { - return UUID(gu[:]) - } - return nil -} diff --git a/vendor/github.com/peterbourgon/diskv/LICENSE b/vendor/github.com/peterbourgon/diskv/LICENSE deleted file mode 100644 index 41ce7f16..00000000 --- a/vendor/github.com/peterbourgon/diskv/LICENSE +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (c) 2011-2012 Peter Bourgon - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/peterbourgon/diskv/README.md b/vendor/github.com/peterbourgon/diskv/README.md deleted file mode 100644 index 3474739e..00000000 --- a/vendor/github.com/peterbourgon/diskv/README.md +++ /dev/null @@ -1,141 +0,0 @@ -# What is diskv? - -Diskv (disk-vee) is a simple, persistent key-value store written in the Go -language. It starts with an incredibly simple API for storing arbitrary data on -a filesystem by key, and builds several layers of performance-enhancing -abstraction on top. The end result is a conceptually simple, but highly -performant, disk-backed storage system. - -[![Build Status][1]][2] - -[1]: https://drone.io/github.com/peterbourgon/diskv/status.png -[2]: https://drone.io/github.com/peterbourgon/diskv/latest - - -# Installing - -Install [Go 1][3], either [from source][4] or [with a prepackaged binary][5]. -Then, - -```bash -$ go get github.com/peterbourgon/diskv -``` - -[3]: http://golang.org -[4]: http://golang.org/doc/install/source -[5]: http://golang.org/doc/install - - -# Usage - -```go -package main - -import ( - "fmt" - "github.com/peterbourgon/diskv" -) - -func main() { - // Simplest transform function: put all the data files into the base dir. - flatTransform := func(s string) []string { return []string{} } - - // Initialize a new diskv store, rooted at "my-data-dir", with a 1MB cache. - d := diskv.New(diskv.Options{ - BasePath: "my-data-dir", - Transform: flatTransform, - CacheSizeMax: 1024 * 1024, - }) - - // Write three bytes to the key "alpha". - key := "alpha" - d.Write(key, []byte{'1', '2', '3'}) - - // Read the value back out of the store. - value, _ := d.Read(key) - fmt.Printf("%v\n", value) - - // Erase the key+value from the store (and the disk). - d.Erase(key) -} -``` - -More complex examples can be found in the "examples" subdirectory. - - -# Theory - -## Basic idea - -At its core, diskv is a map of a key (`string`) to arbitrary data (`[]byte`). -The data is written to a single file on disk, with the same name as the key. -The key determines where that file will be stored, via a user-provided -`TransformFunc`, which takes a key and returns a slice (`[]string`) -corresponding to a path list where the key file will be stored. The simplest -TransformFunc, - -```go -func SimpleTransform (key string) []string { - return []string{} -} -``` - -will place all keys in the same, base directory. The design is inspired by -[Redis diskstore][6]; a TransformFunc which emulates the default diskstore -behavior is available in the content-addressable-storage example. - -[6]: http://groups.google.com/group/redis-db/browse_thread/thread/d444bc786689bde9?pli=1 - -**Note** that your TransformFunc should ensure that one valid key doesn't -transform to a subset of another valid key. That is, it shouldn't be possible -to construct valid keys that resolve to directory names. As a concrete example, -if your TransformFunc splits on every 3 characters, then - -```go -d.Write("abcabc", val) // OK: written to /abc/abc/abcabc -d.Write("abc", val) // Error: attempted write to /abc/abc, but it's a directory -``` - -This will be addressed in an upcoming version of diskv. - -Probably the most important design principle behind diskv is that your data is -always flatly available on the disk. diskv will never do anything that would -prevent you from accessing, copying, backing up, or otherwise interacting with -your data via common UNIX commandline tools. - -## Adding a cache - -An in-memory caching layer is provided by combining the BasicStore -functionality with a simple map structure, and keeping it up-to-date as -appropriate. Since the map structure in Go is not threadsafe, it's combined -with a RWMutex to provide safe concurrent access. - -## Adding order - -diskv is a key-value store and therefore inherently unordered. An ordering -system can be injected into the store by passing something which satisfies the -diskv.Index interface. (A default implementation, using Google's -[btree][7] package, is provided.) Basically, diskv keeps an ordered (by a -user-provided Less function) index of the keys, which can be queried. - -[7]: https://github.com/google/btree - -## Adding compression - -Something which implements the diskv.Compression interface may be passed -during store creation, so that all Writes and Reads are filtered through -a compression/decompression pipeline. Several default implementations, -using stdlib compression algorithms, are provided. Note that data is cached -compressed; the cost of decompression is borne with each Read. - -## Streaming - -diskv also now provides ReadStream and WriteStream methods, to allow very large -data to be handled efficiently. - - -# Future plans - - * Needs plenty of robust testing: huge datasets, etc... - * More thorough benchmarking - * Your suggestions for use-cases I haven't thought of diff --git a/vendor/github.com/peterbourgon/diskv/compression.go b/vendor/github.com/peterbourgon/diskv/compression.go deleted file mode 100644 index 5192b027..00000000 --- a/vendor/github.com/peterbourgon/diskv/compression.go +++ /dev/null @@ -1,64 +0,0 @@ -package diskv - -import ( - "compress/flate" - "compress/gzip" - "compress/zlib" - "io" -) - -// Compression is an interface that Diskv uses to implement compression of -// data. Writer takes a destination io.Writer and returns a WriteCloser that -// compresses all data written through it. Reader takes a source io.Reader and -// returns a ReadCloser that decompresses all data read through it. You may -// define these methods on your own type, or use one of the NewCompression -// helpers. -type Compression interface { - Writer(dst io.Writer) (io.WriteCloser, error) - Reader(src io.Reader) (io.ReadCloser, error) -} - -// NewGzipCompression returns a Gzip-based Compression. -func NewGzipCompression() Compression { - return NewGzipCompressionLevel(flate.DefaultCompression) -} - -// NewGzipCompressionLevel returns a Gzip-based Compression with the given level. -func NewGzipCompressionLevel(level int) Compression { - return &genericCompression{ - wf: func(w io.Writer) (io.WriteCloser, error) { return gzip.NewWriterLevel(w, level) }, - rf: func(r io.Reader) (io.ReadCloser, error) { return gzip.NewReader(r) }, - } -} - -// NewZlibCompression returns a Zlib-based Compression. -func NewZlibCompression() Compression { - return NewZlibCompressionLevel(flate.DefaultCompression) -} - -// NewZlibCompressionLevel returns a Zlib-based Compression with the given level. -func NewZlibCompressionLevel(level int) Compression { - return NewZlibCompressionLevelDict(level, nil) -} - -// NewZlibCompressionLevelDict returns a Zlib-based Compression with the given -// level, based on the given dictionary. -func NewZlibCompressionLevelDict(level int, dict []byte) Compression { - return &genericCompression{ - func(w io.Writer) (io.WriteCloser, error) { return zlib.NewWriterLevelDict(w, level, dict) }, - func(r io.Reader) (io.ReadCloser, error) { return zlib.NewReaderDict(r, dict) }, - } -} - -type genericCompression struct { - wf func(w io.Writer) (io.WriteCloser, error) - rf func(r io.Reader) (io.ReadCloser, error) -} - -func (g *genericCompression) Writer(dst io.Writer) (io.WriteCloser, error) { - return g.wf(dst) -} - -func (g *genericCompression) Reader(src io.Reader) (io.ReadCloser, error) { - return g.rf(src) -} diff --git a/vendor/github.com/peterbourgon/diskv/diskv.go b/vendor/github.com/peterbourgon/diskv/diskv.go deleted file mode 100644 index 524dc0a6..00000000 --- a/vendor/github.com/peterbourgon/diskv/diskv.go +++ /dev/null @@ -1,624 +0,0 @@ -// Diskv (disk-vee) is a simple, persistent, key-value store. -// It stores all data flatly on the filesystem. - -package diskv - -import ( - "bytes" - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "strings" - "sync" - "syscall" -) - -const ( - defaultBasePath = "diskv" - defaultFilePerm os.FileMode = 0666 - defaultPathPerm os.FileMode = 0777 -) - -var ( - defaultTransform = func(s string) []string { return []string{} } - errCanceled = errors.New("canceled") - errEmptyKey = errors.New("empty key") - errBadKey = errors.New("bad key") - errImportDirectory = errors.New("can't import a directory") -) - -// TransformFunction transforms a key into a slice of strings, with each -// element in the slice representing a directory in the file path where the -// key's entry will eventually be stored. -// -// For example, if TransformFunc transforms "abcdef" to ["ab", "cde", "f"], -// the final location of the data file will be /ab/cde/f/abcdef -type TransformFunction func(s string) []string - -// Options define a set of properties that dictate Diskv behavior. -// All values are optional. -type Options struct { - BasePath string - Transform TransformFunction - CacheSizeMax uint64 // bytes - PathPerm os.FileMode - FilePerm os.FileMode - // If TempDir is set, it will enable filesystem atomic writes by - // writing temporary files to that location before being moved - // to BasePath. - // Note that TempDir MUST be on the same device/partition as - // BasePath. - TempDir string - - Index Index - IndexLess LessFunction - - Compression Compression -} - -// Diskv implements the Diskv interface. You shouldn't construct Diskv -// structures directly; instead, use the New constructor. -type Diskv struct { - Options - mu sync.RWMutex - cache map[string][]byte - cacheSize uint64 -} - -// New returns an initialized Diskv structure, ready to use. -// If the path identified by baseDir already contains data, -// it will be accessible, but not yet cached. -func New(o Options) *Diskv { - if o.BasePath == "" { - o.BasePath = defaultBasePath - } - if o.Transform == nil { - o.Transform = defaultTransform - } - if o.PathPerm == 0 { - o.PathPerm = defaultPathPerm - } - if o.FilePerm == 0 { - o.FilePerm = defaultFilePerm - } - - d := &Diskv{ - Options: o, - cache: map[string][]byte{}, - cacheSize: 0, - } - - if d.Index != nil && d.IndexLess != nil { - d.Index.Initialize(d.IndexLess, d.Keys(nil)) - } - - return d -} - -// Write synchronously writes the key-value pair to disk, making it immediately -// available for reads. Write relies on the filesystem to perform an eventual -// sync to physical media. If you need stronger guarantees, see WriteStream. -func (d *Diskv) Write(key string, val []byte) error { - return d.WriteStream(key, bytes.NewBuffer(val), false) -} - -// WriteStream writes the data represented by the io.Reader to the disk, under -// the provided key. If sync is true, WriteStream performs an explicit sync on -// the file as soon as it's written. -// -// bytes.Buffer provides io.Reader semantics for basic data types. -func (d *Diskv) WriteStream(key string, r io.Reader, sync bool) error { - if len(key) <= 0 { - return errEmptyKey - } - - d.mu.Lock() - defer d.mu.Unlock() - - return d.writeStreamWithLock(key, r, sync) -} - -// createKeyFileWithLock either creates the key file directly, or -// creates a temporary file in TempDir if it is set. -func (d *Diskv) createKeyFileWithLock(key string) (*os.File, error) { - if d.TempDir != "" { - if err := os.MkdirAll(d.TempDir, d.PathPerm); err != nil { - return nil, fmt.Errorf("temp mkdir: %s", err) - } - f, err := ioutil.TempFile(d.TempDir, "") - if err != nil { - return nil, fmt.Errorf("temp file: %s", err) - } - - if err := f.Chmod(d.FilePerm); err != nil { - f.Close() // error deliberately ignored - os.Remove(f.Name()) // error deliberately ignored - return nil, fmt.Errorf("chmod: %s", err) - } - return f, nil - } - - mode := os.O_WRONLY | os.O_CREATE | os.O_TRUNC // overwrite if exists - f, err := os.OpenFile(d.completeFilename(key), mode, d.FilePerm) - if err != nil { - return nil, fmt.Errorf("open file: %s", err) - } - return f, nil -} - -// writeStream does no input validation checking. -func (d *Diskv) writeStreamWithLock(key string, r io.Reader, sync bool) error { - if err := d.ensurePathWithLock(key); err != nil { - return fmt.Errorf("ensure path: %s", err) - } - - f, err := d.createKeyFileWithLock(key) - if err != nil { - return fmt.Errorf("create key file: %s", err) - } - - wc := io.WriteCloser(&nopWriteCloser{f}) - if d.Compression != nil { - wc, err = d.Compression.Writer(f) - if err != nil { - f.Close() // error deliberately ignored - os.Remove(f.Name()) // error deliberately ignored - return fmt.Errorf("compression writer: %s", err) - } - } - - if _, err := io.Copy(wc, r); err != nil { - f.Close() // error deliberately ignored - os.Remove(f.Name()) // error deliberately ignored - return fmt.Errorf("i/o copy: %s", err) - } - - if err := wc.Close(); err != nil { - f.Close() // error deliberately ignored - os.Remove(f.Name()) // error deliberately ignored - return fmt.Errorf("compression close: %s", err) - } - - if sync { - if err := f.Sync(); err != nil { - f.Close() // error deliberately ignored - os.Remove(f.Name()) // error deliberately ignored - return fmt.Errorf("file sync: %s", err) - } - } - - if err := f.Close(); err != nil { - return fmt.Errorf("file close: %s", err) - } - - if f.Name() != d.completeFilename(key) { - if err := os.Rename(f.Name(), d.completeFilename(key)); err != nil { - os.Remove(f.Name()) // error deliberately ignored - return fmt.Errorf("rename: %s", err) - } - } - - if d.Index != nil { - d.Index.Insert(key) - } - - d.bustCacheWithLock(key) // cache only on read - - return nil -} - -// Import imports the source file into diskv under the destination key. If the -// destination key already exists, it's overwritten. If move is true, the -// source file is removed after a successful import. -func (d *Diskv) Import(srcFilename, dstKey string, move bool) (err error) { - if dstKey == "" { - return errEmptyKey - } - - if fi, err := os.Stat(srcFilename); err != nil { - return err - } else if fi.IsDir() { - return errImportDirectory - } - - d.mu.Lock() - defer d.mu.Unlock() - - if err := d.ensurePathWithLock(dstKey); err != nil { - return fmt.Errorf("ensure path: %s", err) - } - - if move { - if err := syscall.Rename(srcFilename, d.completeFilename(dstKey)); err == nil { - d.bustCacheWithLock(dstKey) - return nil - } else if err != syscall.EXDEV { - // If it failed due to being on a different device, fall back to copying - return err - } - } - - f, err := os.Open(srcFilename) - if err != nil { - return err - } - defer f.Close() - err = d.writeStreamWithLock(dstKey, f, false) - if err == nil && move { - err = os.Remove(srcFilename) - } - return err -} - -// Read reads the key and returns the value. -// If the key is available in the cache, Read won't touch the disk. -// If the key is not in the cache, Read will have the side-effect of -// lazily caching the value. -func (d *Diskv) Read(key string) ([]byte, error) { - rc, err := d.ReadStream(key, false) - if err != nil { - return []byte{}, err - } - defer rc.Close() - return ioutil.ReadAll(rc) -} - -// ReadStream reads the key and returns the value (data) as an io.ReadCloser. -// If the value is cached from a previous read, and direct is false, -// ReadStream will use the cached value. Otherwise, it will return a handle to -// the file on disk, and cache the data on read. -// -// If direct is true, ReadStream will lazily delete any cached value for the -// key, and return a direct handle to the file on disk. -// -// If compression is enabled, ReadStream taps into the io.Reader stream prior -// to decompression, and caches the compressed data. -func (d *Diskv) ReadStream(key string, direct bool) (io.ReadCloser, error) { - d.mu.RLock() - defer d.mu.RUnlock() - - if val, ok := d.cache[key]; ok { - if !direct { - buf := bytes.NewBuffer(val) - if d.Compression != nil { - return d.Compression.Reader(buf) - } - return ioutil.NopCloser(buf), nil - } - - go func() { - d.mu.Lock() - defer d.mu.Unlock() - d.uncacheWithLock(key, uint64(len(val))) - }() - } - - return d.readWithRLock(key) -} - -// read ignores the cache, and returns an io.ReadCloser representing the -// decompressed data for the given key, streamed from the disk. Clients should -// acquire a read lock on the Diskv and check the cache themselves before -// calling read. -func (d *Diskv) readWithRLock(key string) (io.ReadCloser, error) { - filename := d.completeFilename(key) - - fi, err := os.Stat(filename) - if err != nil { - return nil, err - } - if fi.IsDir() { - return nil, os.ErrNotExist - } - - f, err := os.Open(filename) - if err != nil { - return nil, err - } - - var r io.Reader - if d.CacheSizeMax > 0 { - r = newSiphon(f, d, key) - } else { - r = &closingReader{f} - } - - var rc = io.ReadCloser(ioutil.NopCloser(r)) - if d.Compression != nil { - rc, err = d.Compression.Reader(r) - if err != nil { - return nil, err - } - } - - return rc, nil -} - -// closingReader provides a Reader that automatically closes the -// embedded ReadCloser when it reaches EOF -type closingReader struct { - rc io.ReadCloser -} - -func (cr closingReader) Read(p []byte) (int, error) { - n, err := cr.rc.Read(p) - if err == io.EOF { - if closeErr := cr.rc.Close(); closeErr != nil { - return n, closeErr // close must succeed for Read to succeed - } - } - return n, err -} - -// siphon is like a TeeReader: it copies all data read through it to an -// internal buffer, and moves that buffer to the cache at EOF. -type siphon struct { - f *os.File - d *Diskv - key string - buf *bytes.Buffer -} - -// newSiphon constructs a siphoning reader that represents the passed file. -// When a successful series of reads ends in an EOF, the siphon will write -// the buffered data to Diskv's cache under the given key. -func newSiphon(f *os.File, d *Diskv, key string) io.Reader { - return &siphon{ - f: f, - d: d, - key: key, - buf: &bytes.Buffer{}, - } -} - -// Read implements the io.Reader interface for siphon. -func (s *siphon) Read(p []byte) (int, error) { - n, err := s.f.Read(p) - - if err == nil { - return s.buf.Write(p[0:n]) // Write must succeed for Read to succeed - } - - if err == io.EOF { - s.d.cacheWithoutLock(s.key, s.buf.Bytes()) // cache may fail - if closeErr := s.f.Close(); closeErr != nil { - return n, closeErr // close must succeed for Read to succeed - } - return n, err - } - - return n, err -} - -// Erase synchronously erases the given key from the disk and the cache. -func (d *Diskv) Erase(key string) error { - d.mu.Lock() - defer d.mu.Unlock() - - d.bustCacheWithLock(key) - - // erase from index - if d.Index != nil { - d.Index.Delete(key) - } - - // erase from disk - filename := d.completeFilename(key) - if s, err := os.Stat(filename); err == nil { - if s.IsDir() { - return errBadKey - } - if err = os.Remove(filename); err != nil { - return err - } - } else { - // Return err as-is so caller can do os.IsNotExist(err). - return err - } - - // clean up and return - d.pruneDirsWithLock(key) - return nil -} - -// EraseAll will delete all of the data from the store, both in the cache and on -// the disk. Note that EraseAll doesn't distinguish diskv-related data from non- -// diskv-related data. Care should be taken to always specify a diskv base -// directory that is exclusively for diskv data. -func (d *Diskv) EraseAll() error { - d.mu.Lock() - defer d.mu.Unlock() - d.cache = make(map[string][]byte) - d.cacheSize = 0 - if d.TempDir != "" { - os.RemoveAll(d.TempDir) // errors ignored - } - return os.RemoveAll(d.BasePath) -} - -// Has returns true if the given key exists. -func (d *Diskv) Has(key string) bool { - d.mu.Lock() - defer d.mu.Unlock() - - if _, ok := d.cache[key]; ok { - return true - } - - filename := d.completeFilename(key) - s, err := os.Stat(filename) - if err != nil { - return false - } - if s.IsDir() { - return false - } - - return true -} - -// Keys returns a channel that will yield every key accessible by the store, -// in undefined order. If a cancel channel is provided, closing it will -// terminate and close the keys channel. -func (d *Diskv) Keys(cancel <-chan struct{}) <-chan string { - return d.KeysPrefix("", cancel) -} - -// KeysPrefix returns a channel that will yield every key accessible by the -// store with the given prefix, in undefined order. If a cancel channel is -// provided, closing it will terminate and close the keys channel. If the -// provided prefix is the empty string, all keys will be yielded. -func (d *Diskv) KeysPrefix(prefix string, cancel <-chan struct{}) <-chan string { - var prepath string - if prefix == "" { - prepath = d.BasePath - } else { - prepath = d.pathFor(prefix) - } - c := make(chan string) - go func() { - filepath.Walk(prepath, walker(c, prefix, cancel)) - close(c) - }() - return c -} - -// walker returns a function which satisfies the filepath.WalkFunc interface. -// It sends every non-directory file entry down the channel c. -func walker(c chan<- string, prefix string, cancel <-chan struct{}) filepath.WalkFunc { - return func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - - if info.IsDir() || !strings.HasPrefix(info.Name(), prefix) { - return nil // "pass" - } - - select { - case c <- info.Name(): - case <-cancel: - return errCanceled - } - - return nil - } -} - -// pathFor returns the absolute path for location on the filesystem where the -// data for the given key will be stored. -func (d *Diskv) pathFor(key string) string { - return filepath.Join(d.BasePath, filepath.Join(d.Transform(key)...)) -} - -// ensurePathWithLock is a helper function that generates all necessary -// directories on the filesystem for the given key. -func (d *Diskv) ensurePathWithLock(key string) error { - return os.MkdirAll(d.pathFor(key), d.PathPerm) -} - -// completeFilename returns the absolute path to the file for the given key. -func (d *Diskv) completeFilename(key string) string { - return filepath.Join(d.pathFor(key), key) -} - -// cacheWithLock attempts to cache the given key-value pair in the store's -// cache. It can fail if the value is larger than the cache's maximum size. -func (d *Diskv) cacheWithLock(key string, val []byte) error { - valueSize := uint64(len(val)) - if err := d.ensureCacheSpaceWithLock(valueSize); err != nil { - return fmt.Errorf("%s; not caching", err) - } - - // be very strict about memory guarantees - if (d.cacheSize + valueSize) > d.CacheSizeMax { - panic(fmt.Sprintf("failed to make room for value (%d/%d)", valueSize, d.CacheSizeMax)) - } - - d.cache[key] = val - d.cacheSize += valueSize - return nil -} - -// cacheWithoutLock acquires the store's (write) mutex and calls cacheWithLock. -func (d *Diskv) cacheWithoutLock(key string, val []byte) error { - d.mu.Lock() - defer d.mu.Unlock() - return d.cacheWithLock(key, val) -} - -func (d *Diskv) bustCacheWithLock(key string) { - if val, ok := d.cache[key]; ok { - d.uncacheWithLock(key, uint64(len(val))) - } -} - -func (d *Diskv) uncacheWithLock(key string, sz uint64) { - d.cacheSize -= sz - delete(d.cache, key) -} - -// pruneDirsWithLock deletes empty directories in the path walk leading to the -// key k. Typically this function is called after an Erase is made. -func (d *Diskv) pruneDirsWithLock(key string) error { - pathlist := d.Transform(key) - for i := range pathlist { - dir := filepath.Join(d.BasePath, filepath.Join(pathlist[:len(pathlist)-i]...)) - - // thanks to Steven Blenkinsop for this snippet - switch fi, err := os.Stat(dir); true { - case err != nil: - return err - case !fi.IsDir(): - panic(fmt.Sprintf("corrupt dirstate at %s", dir)) - } - - nlinks, err := filepath.Glob(filepath.Join(dir, "*")) - if err != nil { - return err - } else if len(nlinks) > 0 { - return nil // has subdirs -- do not prune - } - if err = os.Remove(dir); err != nil { - return err - } - } - - return nil -} - -// ensureCacheSpaceWithLock deletes entries from the cache in arbitrary order -// until the cache has at least valueSize bytes available. -func (d *Diskv) ensureCacheSpaceWithLock(valueSize uint64) error { - if valueSize > d.CacheSizeMax { - return fmt.Errorf("value size (%d bytes) too large for cache (%d bytes)", valueSize, d.CacheSizeMax) - } - - safe := func() bool { return (d.cacheSize + valueSize) <= d.CacheSizeMax } - - for key, val := range d.cache { - if safe() { - break - } - - d.uncacheWithLock(key, uint64(len(val))) - } - - if !safe() { - panic(fmt.Sprintf("%d bytes still won't fit in the cache! (max %d bytes)", valueSize, d.CacheSizeMax)) - } - - return nil -} - -// nopWriteCloser wraps an io.Writer and provides a no-op Close method to -// satisfy the io.WriteCloser interface. -type nopWriteCloser struct { - io.Writer -} - -func (wc *nopWriteCloser) Write(p []byte) (int, error) { return wc.Writer.Write(p) } -func (wc *nopWriteCloser) Close() error { return nil } diff --git a/vendor/github.com/peterbourgon/diskv/index.go b/vendor/github.com/peterbourgon/diskv/index.go deleted file mode 100644 index 96fee515..00000000 --- a/vendor/github.com/peterbourgon/diskv/index.go +++ /dev/null @@ -1,115 +0,0 @@ -package diskv - -import ( - "sync" - - "github.com/google/btree" -) - -// Index is a generic interface for things that can -// provide an ordered list of keys. -type Index interface { - Initialize(less LessFunction, keys <-chan string) - Insert(key string) - Delete(key string) - Keys(from string, n int) []string -} - -// LessFunction is used to initialize an Index of keys in a specific order. -type LessFunction func(string, string) bool - -// btreeString is a custom data type that satisfies the BTree Less interface, -// making the strings it wraps sortable by the BTree package. -type btreeString struct { - s string - l LessFunction -} - -// Less satisfies the BTree.Less interface using the btreeString's LessFunction. -func (s btreeString) Less(i btree.Item) bool { - return s.l(s.s, i.(btreeString).s) -} - -// BTreeIndex is an implementation of the Index interface using google/btree. -type BTreeIndex struct { - sync.RWMutex - LessFunction - *btree.BTree -} - -// Initialize populates the BTree tree with data from the keys channel, -// according to the passed less function. It's destructive to the BTreeIndex. -func (i *BTreeIndex) Initialize(less LessFunction, keys <-chan string) { - i.Lock() - defer i.Unlock() - i.LessFunction = less - i.BTree = rebuild(less, keys) -} - -// Insert inserts the given key (only) into the BTree tree. -func (i *BTreeIndex) Insert(key string) { - i.Lock() - defer i.Unlock() - if i.BTree == nil || i.LessFunction == nil { - panic("uninitialized index") - } - i.BTree.ReplaceOrInsert(btreeString{s: key, l: i.LessFunction}) -} - -// Delete removes the given key (only) from the BTree tree. -func (i *BTreeIndex) Delete(key string) { - i.Lock() - defer i.Unlock() - if i.BTree == nil || i.LessFunction == nil { - panic("uninitialized index") - } - i.BTree.Delete(btreeString{s: key, l: i.LessFunction}) -} - -// Keys yields a maximum of n keys in order. If the passed 'from' key is empty, -// Keys will return the first n keys. If the passed 'from' key is non-empty, the -// first key in the returned slice will be the key that immediately follows the -// passed key, in key order. -func (i *BTreeIndex) Keys(from string, n int) []string { - i.RLock() - defer i.RUnlock() - - if i.BTree == nil || i.LessFunction == nil { - panic("uninitialized index") - } - - if i.BTree.Len() <= 0 { - return []string{} - } - - btreeFrom := btreeString{s: from, l: i.LessFunction} - skipFirst := true - if len(from) <= 0 || !i.BTree.Has(btreeFrom) { - // no such key, so fabricate an always-smallest item - btreeFrom = btreeString{s: "", l: func(string, string) bool { return true }} - skipFirst = false - } - - keys := []string{} - iterator := func(i btree.Item) bool { - keys = append(keys, i.(btreeString).s) - return len(keys) < n - } - i.BTree.AscendGreaterOrEqual(btreeFrom, iterator) - - if skipFirst && len(keys) > 0 { - keys = keys[1:] - } - - return keys -} - -// rebuildIndex does the work of regenerating the index -// with the given keys. -func rebuild(less LessFunction, keys <-chan string) *btree.BTree { - tree := btree.New(2) - for key := range keys { - tree.ReplaceOrInsert(btreeString{s: key, l: less}) - } - return tree -} diff --git a/vendor/golang.org/x/crypto/ed25519/ed25519.go b/vendor/golang.org/x/crypto/ed25519/ed25519.go new file mode 100644 index 00000000..c7f8c7e6 --- /dev/null +++ b/vendor/golang.org/x/crypto/ed25519/ed25519.go @@ -0,0 +1,222 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// In Go 1.13, the ed25519 package was promoted to the standard library as +// crypto/ed25519, and this package became a wrapper for the standard library one. +// +// +build !go1.13 + +// Package ed25519 implements the Ed25519 signature algorithm. See +// https://ed25519.cr.yp.to/. +// +// These functions are also compatible with the “Ed25519” function defined in +// RFC 8032. However, unlike RFC 8032's formulation, this package's private key +// representation includes a public key suffix to make multiple signing +// operations with the same key more efficient. This package refers to the RFC +// 8032 private key as the “seed”. +package ed25519 + +// This code is a port of the public domain, “ref10” implementation of ed25519 +// from SUPERCOP. + +import ( + "bytes" + "crypto" + cryptorand "crypto/rand" + "crypto/sha512" + "errors" + "io" + "strconv" + + "golang.org/x/crypto/ed25519/internal/edwards25519" +) + +const ( + // PublicKeySize is the size, in bytes, of public keys as used in this package. + PublicKeySize = 32 + // PrivateKeySize is the size, in bytes, of private keys as used in this package. + PrivateKeySize = 64 + // SignatureSize is the size, in bytes, of signatures generated and verified by this package. + SignatureSize = 64 + // SeedSize is the size, in bytes, of private key seeds. These are the private key representations used by RFC 8032. + SeedSize = 32 +) + +// PublicKey is the type of Ed25519 public keys. +type PublicKey []byte + +// PrivateKey is the type of Ed25519 private keys. It implements crypto.Signer. +type PrivateKey []byte + +// Public returns the PublicKey corresponding to priv. +func (priv PrivateKey) Public() crypto.PublicKey { + publicKey := make([]byte, PublicKeySize) + copy(publicKey, priv[32:]) + return PublicKey(publicKey) +} + +// Seed returns the private key seed corresponding to priv. It is provided for +// interoperability with RFC 8032. RFC 8032's private keys correspond to seeds +// in this package. +func (priv PrivateKey) Seed() []byte { + seed := make([]byte, SeedSize) + copy(seed, priv[:32]) + return seed +} + +// Sign signs the given message with priv. +// Ed25519 performs two passes over messages to be signed and therefore cannot +// handle pre-hashed messages. Thus opts.HashFunc() must return zero to +// indicate the message hasn't been hashed. This can be achieved by passing +// crypto.Hash(0) as the value for opts. +func (priv PrivateKey) Sign(rand io.Reader, message []byte, opts crypto.SignerOpts) (signature []byte, err error) { + if opts.HashFunc() != crypto.Hash(0) { + return nil, errors.New("ed25519: cannot sign hashed message") + } + + return Sign(priv, message), nil +} + +// GenerateKey generates a public/private key pair using entropy from rand. +// If rand is nil, crypto/rand.Reader will be used. +func GenerateKey(rand io.Reader) (PublicKey, PrivateKey, error) { + if rand == nil { + rand = cryptorand.Reader + } + + seed := make([]byte, SeedSize) + if _, err := io.ReadFull(rand, seed); err != nil { + return nil, nil, err + } + + privateKey := NewKeyFromSeed(seed) + publicKey := make([]byte, PublicKeySize) + copy(publicKey, privateKey[32:]) + + return publicKey, privateKey, nil +} + +// NewKeyFromSeed calculates a private key from a seed. It will panic if +// len(seed) is not SeedSize. This function is provided for interoperability +// with RFC 8032. RFC 8032's private keys correspond to seeds in this +// package. +func NewKeyFromSeed(seed []byte) PrivateKey { + if l := len(seed); l != SeedSize { + panic("ed25519: bad seed length: " + strconv.Itoa(l)) + } + + digest := sha512.Sum512(seed) + digest[0] &= 248 + digest[31] &= 127 + digest[31] |= 64 + + var A edwards25519.ExtendedGroupElement + var hBytes [32]byte + copy(hBytes[:], digest[:]) + edwards25519.GeScalarMultBase(&A, &hBytes) + var publicKeyBytes [32]byte + A.ToBytes(&publicKeyBytes) + + privateKey := make([]byte, PrivateKeySize) + copy(privateKey, seed) + copy(privateKey[32:], publicKeyBytes[:]) + + return privateKey +} + +// Sign signs the message with privateKey and returns a signature. It will +// panic if len(privateKey) is not PrivateKeySize. +func Sign(privateKey PrivateKey, message []byte) []byte { + if l := len(privateKey); l != PrivateKeySize { + panic("ed25519: bad private key length: " + strconv.Itoa(l)) + } + + h := sha512.New() + h.Write(privateKey[:32]) + + var digest1, messageDigest, hramDigest [64]byte + var expandedSecretKey [32]byte + h.Sum(digest1[:0]) + copy(expandedSecretKey[:], digest1[:]) + expandedSecretKey[0] &= 248 + expandedSecretKey[31] &= 63 + expandedSecretKey[31] |= 64 + + h.Reset() + h.Write(digest1[32:]) + h.Write(message) + h.Sum(messageDigest[:0]) + + var messageDigestReduced [32]byte + edwards25519.ScReduce(&messageDigestReduced, &messageDigest) + var R edwards25519.ExtendedGroupElement + edwards25519.GeScalarMultBase(&R, &messageDigestReduced) + + var encodedR [32]byte + R.ToBytes(&encodedR) + + h.Reset() + h.Write(encodedR[:]) + h.Write(privateKey[32:]) + h.Write(message) + h.Sum(hramDigest[:0]) + var hramDigestReduced [32]byte + edwards25519.ScReduce(&hramDigestReduced, &hramDigest) + + var s [32]byte + edwards25519.ScMulAdd(&s, &hramDigestReduced, &expandedSecretKey, &messageDigestReduced) + + signature := make([]byte, SignatureSize) + copy(signature[:], encodedR[:]) + copy(signature[32:], s[:]) + + return signature +} + +// Verify reports whether sig is a valid signature of message by publicKey. It +// will panic if len(publicKey) is not PublicKeySize. +func Verify(publicKey PublicKey, message, sig []byte) bool { + if l := len(publicKey); l != PublicKeySize { + panic("ed25519: bad public key length: " + strconv.Itoa(l)) + } + + if len(sig) != SignatureSize || sig[63]&224 != 0 { + return false + } + + var A edwards25519.ExtendedGroupElement + var publicKeyBytes [32]byte + copy(publicKeyBytes[:], publicKey) + if !A.FromBytes(&publicKeyBytes) { + return false + } + edwards25519.FeNeg(&A.X, &A.X) + edwards25519.FeNeg(&A.T, &A.T) + + h := sha512.New() + h.Write(sig[:32]) + h.Write(publicKey[:]) + h.Write(message) + var digest [64]byte + h.Sum(digest[:0]) + + var hReduced [32]byte + edwards25519.ScReduce(&hReduced, &digest) + + var R edwards25519.ProjectiveGroupElement + var s [32]byte + copy(s[:], sig[32:]) + + // https://tools.ietf.org/html/rfc8032#section-5.1.7 requires that s be in + // the range [0, order) in order to prevent signature malleability. + if !edwards25519.ScMinimal(&s) { + return false + } + + edwards25519.GeDoubleScalarMultVartime(&R, &hReduced, &A, &s) + + var checkR [32]byte + R.ToBytes(&checkR) + return bytes.Equal(sig[:32], checkR[:]) +} diff --git a/vendor/golang.org/x/crypto/ed25519/ed25519_go113.go b/vendor/golang.org/x/crypto/ed25519/ed25519_go113.go new file mode 100644 index 00000000..d1448d8d --- /dev/null +++ b/vendor/golang.org/x/crypto/ed25519/ed25519_go113.go @@ -0,0 +1,73 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.13 + +// Package ed25519 implements the Ed25519 signature algorithm. See +// https://ed25519.cr.yp.to/. +// +// These functions are also compatible with the “Ed25519” function defined in +// RFC 8032. However, unlike RFC 8032's formulation, this package's private key +// representation includes a public key suffix to make multiple signing +// operations with the same key more efficient. This package refers to the RFC +// 8032 private key as the “seed”. +// +// Beginning with Go 1.13, the functionality of this package was moved to the +// standard library as crypto/ed25519. This package only acts as a compatibility +// wrapper. +package ed25519 + +import ( + "crypto/ed25519" + "io" +) + +const ( + // PublicKeySize is the size, in bytes, of public keys as used in this package. + PublicKeySize = 32 + // PrivateKeySize is the size, in bytes, of private keys as used in this package. + PrivateKeySize = 64 + // SignatureSize is the size, in bytes, of signatures generated and verified by this package. + SignatureSize = 64 + // SeedSize is the size, in bytes, of private key seeds. These are the private key representations used by RFC 8032. + SeedSize = 32 +) + +// PublicKey is the type of Ed25519 public keys. +// +// This type is an alias for crypto/ed25519's PublicKey type. +// See the crypto/ed25519 package for the methods on this type. +type PublicKey = ed25519.PublicKey + +// PrivateKey is the type of Ed25519 private keys. It implements crypto.Signer. +// +// This type is an alias for crypto/ed25519's PrivateKey type. +// See the crypto/ed25519 package for the methods on this type. +type PrivateKey = ed25519.PrivateKey + +// GenerateKey generates a public/private key pair using entropy from rand. +// If rand is nil, crypto/rand.Reader will be used. +func GenerateKey(rand io.Reader) (PublicKey, PrivateKey, error) { + return ed25519.GenerateKey(rand) +} + +// NewKeyFromSeed calculates a private key from a seed. It will panic if +// len(seed) is not SeedSize. This function is provided for interoperability +// with RFC 8032. RFC 8032's private keys correspond to seeds in this +// package. +func NewKeyFromSeed(seed []byte) PrivateKey { + return ed25519.NewKeyFromSeed(seed) +} + +// Sign signs the message with privateKey and returns a signature. It will +// panic if len(privateKey) is not PrivateKeySize. +func Sign(privateKey PrivateKey, message []byte) []byte { + return ed25519.Sign(privateKey, message) +} + +// Verify reports whether sig is a valid signature of message by publicKey. It +// will panic if len(publicKey) is not PublicKeySize. +func Verify(publicKey PublicKey, message, sig []byte) bool { + return ed25519.Verify(publicKey, message, sig) +} diff --git a/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/const.go b/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/const.go new file mode 100644 index 00000000..e39f086c --- /dev/null +++ b/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/const.go @@ -0,0 +1,1422 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package edwards25519 + +// These values are from the public domain, “ref10” implementation of ed25519 +// from SUPERCOP. + +// d is a constant in the Edwards curve equation. +var d = FieldElement{ + -10913610, 13857413, -15372611, 6949391, 114729, -8787816, -6275908, -3247719, -18696448, -12055116, +} + +// d2 is 2*d. +var d2 = FieldElement{ + -21827239, -5839606, -30745221, 13898782, 229458, 15978800, -12551817, -6495438, 29715968, 9444199, +} + +// SqrtM1 is the square-root of -1 in the field. +var SqrtM1 = FieldElement{ + -32595792, -7943725, 9377950, 3500415, 12389472, -272473, -25146209, -2005654, 326686, 11406482, +} + +// A is a constant in the Montgomery-form of curve25519. +var A = FieldElement{ + 486662, 0, 0, 0, 0, 0, 0, 0, 0, 0, +} + +// bi contains precomputed multiples of the base-point. See the Ed25519 paper +// for a discussion about how these values are used. +var bi = [8]PreComputedGroupElement{ + { + FieldElement{25967493, -14356035, 29566456, 3660896, -12694345, 4014787, 27544626, -11754271, -6079156, 2047605}, + FieldElement{-12545711, 934262, -2722910, 3049990, -727428, 9406986, 12720692, 5043384, 19500929, -15469378}, + FieldElement{-8738181, 4489570, 9688441, -14785194, 10184609, -12363380, 29287919, 11864899, -24514362, -4438546}, + }, + { + FieldElement{15636291, -9688557, 24204773, -7912398, 616977, -16685262, 27787600, -14772189, 28944400, -1550024}, + FieldElement{16568933, 4717097, -11556148, -1102322, 15682896, -11807043, 16354577, -11775962, 7689662, 11199574}, + FieldElement{30464156, -5976125, -11779434, -15670865, 23220365, 15915852, 7512774, 10017326, -17749093, -9920357}, + }, + { + FieldElement{10861363, 11473154, 27284546, 1981175, -30064349, 12577861, 32867885, 14515107, -15438304, 10819380}, + FieldElement{4708026, 6336745, 20377586, 9066809, -11272109, 6594696, -25653668, 12483688, -12668491, 5581306}, + FieldElement{19563160, 16186464, -29386857, 4097519, 10237984, -4348115, 28542350, 13850243, -23678021, -15815942}, + }, + { + FieldElement{5153746, 9909285, 1723747, -2777874, 30523605, 5516873, 19480852, 5230134, -23952439, -15175766}, + FieldElement{-30269007, -3463509, 7665486, 10083793, 28475525, 1649722, 20654025, 16520125, 30598449, 7715701}, + FieldElement{28881845, 14381568, 9657904, 3680757, -20181635, 7843316, -31400660, 1370708, 29794553, -1409300}, + }, + { + FieldElement{-22518993, -6692182, 14201702, -8745502, -23510406, 8844726, 18474211, -1361450, -13062696, 13821877}, + FieldElement{-6455177, -7839871, 3374702, -4740862, -27098617, -10571707, 31655028, -7212327, 18853322, -14220951}, + FieldElement{4566830, -12963868, -28974889, -12240689, -7602672, -2830569, -8514358, -10431137, 2207753, -3209784}, + }, + { + FieldElement{-25154831, -4185821, 29681144, 7868801, -6854661, -9423865, -12437364, -663000, -31111463, -16132436}, + FieldElement{25576264, -2703214, 7349804, -11814844, 16472782, 9300885, 3844789, 15725684, 171356, 6466918}, + FieldElement{23103977, 13316479, 9739013, -16149481, 817875, -15038942, 8965339, -14088058, -30714912, 16193877}, + }, + { + FieldElement{-33521811, 3180713, -2394130, 14003687, -16903474, -16270840, 17238398, 4729455, -18074513, 9256800}, + FieldElement{-25182317, -4174131, 32336398, 5036987, -21236817, 11360617, 22616405, 9761698, -19827198, 630305}, + FieldElement{-13720693, 2639453, -24237460, -7406481, 9494427, -5774029, -6554551, -15960994, -2449256, -14291300}, + }, + { + FieldElement{-3151181, -5046075, 9282714, 6866145, -31907062, -863023, -18940575, 15033784, 25105118, -7894876}, + FieldElement{-24326370, 15950226, -31801215, -14592823, -11662737, -5090925, 1573892, -2625887, 2198790, -15804619}, + FieldElement{-3099351, 10324967, -2241613, 7453183, -5446979, -2735503, -13812022, -16236442, -32461234, -12290683}, + }, +} + +// base contains precomputed multiples of the base-point. See the Ed25519 paper +// for a discussion about how these values are used. +var base = [32][8]PreComputedGroupElement{ + { + { + FieldElement{25967493, -14356035, 29566456, 3660896, -12694345, 4014787, 27544626, -11754271, -6079156, 2047605}, + FieldElement{-12545711, 934262, -2722910, 3049990, -727428, 9406986, 12720692, 5043384, 19500929, -15469378}, + FieldElement{-8738181, 4489570, 9688441, -14785194, 10184609, -12363380, 29287919, 11864899, -24514362, -4438546}, + }, + { + FieldElement{-12815894, -12976347, -21581243, 11784320, -25355658, -2750717, -11717903, -3814571, -358445, -10211303}, + FieldElement{-21703237, 6903825, 27185491, 6451973, -29577724, -9554005, -15616551, 11189268, -26829678, -5319081}, + FieldElement{26966642, 11152617, 32442495, 15396054, 14353839, -12752335, -3128826, -9541118, -15472047, -4166697}, + }, + { + FieldElement{15636291, -9688557, 24204773, -7912398, 616977, -16685262, 27787600, -14772189, 28944400, -1550024}, + FieldElement{16568933, 4717097, -11556148, -1102322, 15682896, -11807043, 16354577, -11775962, 7689662, 11199574}, + FieldElement{30464156, -5976125, -11779434, -15670865, 23220365, 15915852, 7512774, 10017326, -17749093, -9920357}, + }, + { + FieldElement{-17036878, 13921892, 10945806, -6033431, 27105052, -16084379, -28926210, 15006023, 3284568, -6276540}, + FieldElement{23599295, -8306047, -11193664, -7687416, 13236774, 10506355, 7464579, 9656445, 13059162, 10374397}, + FieldElement{7798556, 16710257, 3033922, 2874086, 28997861, 2835604, 32406664, -3839045, -641708, -101325}, + }, + { + FieldElement{10861363, 11473154, 27284546, 1981175, -30064349, 12577861, 32867885, 14515107, -15438304, 10819380}, + FieldElement{4708026, 6336745, 20377586, 9066809, -11272109, 6594696, -25653668, 12483688, -12668491, 5581306}, + FieldElement{19563160, 16186464, -29386857, 4097519, 10237984, -4348115, 28542350, 13850243, -23678021, -15815942}, + }, + { + FieldElement{-15371964, -12862754, 32573250, 4720197, -26436522, 5875511, -19188627, -15224819, -9818940, -12085777}, + FieldElement{-8549212, 109983, 15149363, 2178705, 22900618, 4543417, 3044240, -15689887, 1762328, 14866737}, + FieldElement{-18199695, -15951423, -10473290, 1707278, -17185920, 3916101, -28236412, 3959421, 27914454, 4383652}, + }, + { + FieldElement{5153746, 9909285, 1723747, -2777874, 30523605, 5516873, 19480852, 5230134, -23952439, -15175766}, + FieldElement{-30269007, -3463509, 7665486, 10083793, 28475525, 1649722, 20654025, 16520125, 30598449, 7715701}, + FieldElement{28881845, 14381568, 9657904, 3680757, -20181635, 7843316, -31400660, 1370708, 29794553, -1409300}, + }, + { + FieldElement{14499471, -2729599, -33191113, -4254652, 28494862, 14271267, 30290735, 10876454, -33154098, 2381726}, + FieldElement{-7195431, -2655363, -14730155, 462251, -27724326, 3941372, -6236617, 3696005, -32300832, 15351955}, + FieldElement{27431194, 8222322, 16448760, -3907995, -18707002, 11938355, -32961401, -2970515, 29551813, 10109425}, + }, + }, + { + { + FieldElement{-13657040, -13155431, -31283750, 11777098, 21447386, 6519384, -2378284, -1627556, 10092783, -4764171}, + FieldElement{27939166, 14210322, 4677035, 16277044, -22964462, -12398139, -32508754, 12005538, -17810127, 12803510}, + FieldElement{17228999, -15661624, -1233527, 300140, -1224870, -11714777, 30364213, -9038194, 18016357, 4397660}, + }, + { + FieldElement{-10958843, -7690207, 4776341, -14954238, 27850028, -15602212, -26619106, 14544525, -17477504, 982639}, + FieldElement{29253598, 15796703, -2863982, -9908884, 10057023, 3163536, 7332899, -4120128, -21047696, 9934963}, + FieldElement{5793303, 16271923, -24131614, -10116404, 29188560, 1206517, -14747930, 4559895, -30123922, -10897950}, + }, + { + FieldElement{-27643952, -11493006, 16282657, -11036493, 28414021, -15012264, 24191034, 4541697, -13338309, 5500568}, + FieldElement{12650548, -1497113, 9052871, 11355358, -17680037, -8400164, -17430592, 12264343, 10874051, 13524335}, + FieldElement{25556948, -3045990, 714651, 2510400, 23394682, -10415330, 33119038, 5080568, -22528059, 5376628}, + }, + { + FieldElement{-26088264, -4011052, -17013699, -3537628, -6726793, 1920897, -22321305, -9447443, 4535768, 1569007}, + FieldElement{-2255422, 14606630, -21692440, -8039818, 28430649, 8775819, -30494562, 3044290, 31848280, 12543772}, + FieldElement{-22028579, 2943893, -31857513, 6777306, 13784462, -4292203, -27377195, -2062731, 7718482, 14474653}, + }, + { + FieldElement{2385315, 2454213, -22631320, 46603, -4437935, -15680415, 656965, -7236665, 24316168, -5253567}, + FieldElement{13741529, 10911568, -33233417, -8603737, -20177830, -1033297, 33040651, -13424532, -20729456, 8321686}, + FieldElement{21060490, -2212744, 15712757, -4336099, 1639040, 10656336, 23845965, -11874838, -9984458, 608372}, + }, + { + FieldElement{-13672732, -15087586, -10889693, -7557059, -6036909, 11305547, 1123968, -6780577, 27229399, 23887}, + FieldElement{-23244140, -294205, -11744728, 14712571, -29465699, -2029617, 12797024, -6440308, -1633405, 16678954}, + FieldElement{-29500620, 4770662, -16054387, 14001338, 7830047, 9564805, -1508144, -4795045, -17169265, 4904953}, + }, + { + FieldElement{24059557, 14617003, 19037157, -15039908, 19766093, -14906429, 5169211, 16191880, 2128236, -4326833}, + FieldElement{-16981152, 4124966, -8540610, -10653797, 30336522, -14105247, -29806336, 916033, -6882542, -2986532}, + FieldElement{-22630907, 12419372, -7134229, -7473371, -16478904, 16739175, 285431, 2763829, 15736322, 4143876}, + }, + { + FieldElement{2379352, 11839345, -4110402, -5988665, 11274298, 794957, 212801, -14594663, 23527084, -16458268}, + FieldElement{33431127, -11130478, -17838966, -15626900, 8909499, 8376530, -32625340, 4087881, -15188911, -14416214}, + FieldElement{1767683, 7197987, -13205226, -2022635, -13091350, 448826, 5799055, 4357868, -4774191, -16323038}, + }, + }, + { + { + FieldElement{6721966, 13833823, -23523388, -1551314, 26354293, -11863321, 23365147, -3949732, 7390890, 2759800}, + FieldElement{4409041, 2052381, 23373853, 10530217, 7676779, -12885954, 21302353, -4264057, 1244380, -12919645}, + FieldElement{-4421239, 7169619, 4982368, -2957590, 30256825, -2777540, 14086413, 9208236, 15886429, 16489664}, + }, + { + FieldElement{1996075, 10375649, 14346367, 13311202, -6874135, -16438411, -13693198, 398369, -30606455, -712933}, + FieldElement{-25307465, 9795880, -2777414, 14878809, -33531835, 14780363, 13348553, 12076947, -30836462, 5113182}, + FieldElement{-17770784, 11797796, 31950843, 13929123, -25888302, 12288344, -30341101, -7336386, 13847711, 5387222}, + }, + { + FieldElement{-18582163, -3416217, 17824843, -2340966, 22744343, -10442611, 8763061, 3617786, -19600662, 10370991}, + FieldElement{20246567, -14369378, 22358229, -543712, 18507283, -10413996, 14554437, -8746092, 32232924, 16763880}, + FieldElement{9648505, 10094563, 26416693, 14745928, -30374318, -6472621, 11094161, 15689506, 3140038, -16510092}, + }, + { + FieldElement{-16160072, 5472695, 31895588, 4744994, 8823515, 10365685, -27224800, 9448613, -28774454, 366295}, + FieldElement{19153450, 11523972, -11096490, -6503142, -24647631, 5420647, 28344573, 8041113, 719605, 11671788}, + FieldElement{8678025, 2694440, -6808014, 2517372, 4964326, 11152271, -15432916, -15266516, 27000813, -10195553}, + }, + { + FieldElement{-15157904, 7134312, 8639287, -2814877, -7235688, 10421742, 564065, 5336097, 6750977, -14521026}, + FieldElement{11836410, -3979488, 26297894, 16080799, 23455045, 15735944, 1695823, -8819122, 8169720, 16220347}, + FieldElement{-18115838, 8653647, 17578566, -6092619, -8025777, -16012763, -11144307, -2627664, -5990708, -14166033}, + }, + { + FieldElement{-23308498, -10968312, 15213228, -10081214, -30853605, -11050004, 27884329, 2847284, 2655861, 1738395}, + FieldElement{-27537433, -14253021, -25336301, -8002780, -9370762, 8129821, 21651608, -3239336, -19087449, -11005278}, + FieldElement{1533110, 3437855, 23735889, 459276, 29970501, 11335377, 26030092, 5821408, 10478196, 8544890}, + }, + { + FieldElement{32173121, -16129311, 24896207, 3921497, 22579056, -3410854, 19270449, 12217473, 17789017, -3395995}, + FieldElement{-30552961, -2228401, -15578829, -10147201, 13243889, 517024, 15479401, -3853233, 30460520, 1052596}, + FieldElement{-11614875, 13323618, 32618793, 8175907, -15230173, 12596687, 27491595, -4612359, 3179268, -9478891}, + }, + { + FieldElement{31947069, -14366651, -4640583, -15339921, -15125977, -6039709, -14756777, -16411740, 19072640, -9511060}, + FieldElement{11685058, 11822410, 3158003, -13952594, 33402194, -4165066, 5977896, -5215017, 473099, 5040608}, + FieldElement{-20290863, 8198642, -27410132, 11602123, 1290375, -2799760, 28326862, 1721092, -19558642, -3131606}, + }, + }, + { + { + FieldElement{7881532, 10687937, 7578723, 7738378, -18951012, -2553952, 21820786, 8076149, -27868496, 11538389}, + FieldElement{-19935666, 3899861, 18283497, -6801568, -15728660, -11249211, 8754525, 7446702, -5676054, 5797016}, + FieldElement{-11295600, -3793569, -15782110, -7964573, 12708869, -8456199, 2014099, -9050574, -2369172, -5877341}, + }, + { + FieldElement{-22472376, -11568741, -27682020, 1146375, 18956691, 16640559, 1192730, -3714199, 15123619, 10811505}, + FieldElement{14352098, -3419715, -18942044, 10822655, 32750596, 4699007, -70363, 15776356, -28886779, -11974553}, + FieldElement{-28241164, -8072475, -4978962, -5315317, 29416931, 1847569, -20654173, -16484855, 4714547, -9600655}, + }, + { + FieldElement{15200332, 8368572, 19679101, 15970074, -31872674, 1959451, 24611599, -4543832, -11745876, 12340220}, + FieldElement{12876937, -10480056, 33134381, 6590940, -6307776, 14872440, 9613953, 8241152, 15370987, 9608631}, + FieldElement{-4143277, -12014408, 8446281, -391603, 4407738, 13629032, -7724868, 15866074, -28210621, -8814099}, + }, + { + FieldElement{26660628, -15677655, 8393734, 358047, -7401291, 992988, -23904233, 858697, 20571223, 8420556}, + FieldElement{14620715, 13067227, -15447274, 8264467, 14106269, 15080814, 33531827, 12516406, -21574435, -12476749}, + FieldElement{236881, 10476226, 57258, -14677024, 6472998, 2466984, 17258519, 7256740, 8791136, 15069930}, + }, + { + FieldElement{1276410, -9371918, 22949635, -16322807, -23493039, -5702186, 14711875, 4874229, -30663140, -2331391}, + FieldElement{5855666, 4990204, -13711848, 7294284, -7804282, 1924647, -1423175, -7912378, -33069337, 9234253}, + FieldElement{20590503, -9018988, 31529744, -7352666, -2706834, 10650548, 31559055, -11609587, 18979186, 13396066}, + }, + { + FieldElement{24474287, 4968103, 22267082, 4407354, 24063882, -8325180, -18816887, 13594782, 33514650, 7021958}, + FieldElement{-11566906, -6565505, -21365085, 15928892, -26158305, 4315421, -25948728, -3916677, -21480480, 12868082}, + FieldElement{-28635013, 13504661, 19988037, -2132761, 21078225, 6443208, -21446107, 2244500, -12455797, -8089383}, + }, + { + FieldElement{-30595528, 13793479, -5852820, 319136, -25723172, -6263899, 33086546, 8957937, -15233648, 5540521}, + FieldElement{-11630176, -11503902, -8119500, -7643073, 2620056, 1022908, -23710744, -1568984, -16128528, -14962807}, + FieldElement{23152971, 775386, 27395463, 14006635, -9701118, 4649512, 1689819, 892185, -11513277, -15205948}, + }, + { + FieldElement{9770129, 9586738, 26496094, 4324120, 1556511, -3550024, 27453819, 4763127, -19179614, 5867134}, + FieldElement{-32765025, 1927590, 31726409, -4753295, 23962434, -16019500, 27846559, 5931263, -29749703, -16108455}, + FieldElement{27461885, -2977536, 22380810, 1815854, -23033753, -3031938, 7283490, -15148073, -19526700, 7734629}, + }, + }, + { + { + FieldElement{-8010264, -9590817, -11120403, 6196038, 29344158, -13430885, 7585295, -3176626, 18549497, 15302069}, + FieldElement{-32658337, -6171222, -7672793, -11051681, 6258878, 13504381, 10458790, -6418461, -8872242, 8424746}, + FieldElement{24687205, 8613276, -30667046, -3233545, 1863892, -1830544, 19206234, 7134917, -11284482, -828919}, + }, + { + FieldElement{11334899, -9218022, 8025293, 12707519, 17523892, -10476071, 10243738, -14685461, -5066034, 16498837}, + FieldElement{8911542, 6887158, -9584260, -6958590, 11145641, -9543680, 17303925, -14124238, 6536641, 10543906}, + FieldElement{-28946384, 15479763, -17466835, 568876, -1497683, 11223454, -2669190, -16625574, -27235709, 8876771}, + }, + { + FieldElement{-25742899, -12566864, -15649966, -846607, -33026686, -796288, -33481822, 15824474, -604426, -9039817}, + FieldElement{10330056, 70051, 7957388, -9002667, 9764902, 15609756, 27698697, -4890037, 1657394, 3084098}, + FieldElement{10477963, -7470260, 12119566, -13250805, 29016247, -5365589, 31280319, 14396151, -30233575, 15272409}, + }, + { + FieldElement{-12288309, 3169463, 28813183, 16658753, 25116432, -5630466, -25173957, -12636138, -25014757, 1950504}, + FieldElement{-26180358, 9489187, 11053416, -14746161, -31053720, 5825630, -8384306, -8767532, 15341279, 8373727}, + FieldElement{28685821, 7759505, -14378516, -12002860, -31971820, 4079242, 298136, -10232602, -2878207, 15190420}, + }, + { + FieldElement{-32932876, 13806336, -14337485, -15794431, -24004620, 10940928, 8669718, 2742393, -26033313, -6875003}, + FieldElement{-1580388, -11729417, -25979658, -11445023, -17411874, -10912854, 9291594, -16247779, -12154742, 6048605}, + FieldElement{-30305315, 14843444, 1539301, 11864366, 20201677, 1900163, 13934231, 5128323, 11213262, 9168384}, + }, + { + FieldElement{-26280513, 11007847, 19408960, -940758, -18592965, -4328580, -5088060, -11105150, 20470157, -16398701}, + FieldElement{-23136053, 9282192, 14855179, -15390078, -7362815, -14408560, -22783952, 14461608, 14042978, 5230683}, + FieldElement{29969567, -2741594, -16711867, -8552442, 9175486, -2468974, 21556951, 3506042, -5933891, -12449708}, + }, + { + FieldElement{-3144746, 8744661, 19704003, 4581278, -20430686, 6830683, -21284170, 8971513, -28539189, 15326563}, + FieldElement{-19464629, 10110288, -17262528, -3503892, -23500387, 1355669, -15523050, 15300988, -20514118, 9168260}, + FieldElement{-5353335, 4488613, -23803248, 16314347, 7780487, -15638939, -28948358, 9601605, 33087103, -9011387}, + }, + { + FieldElement{-19443170, -15512900, -20797467, -12445323, -29824447, 10229461, -27444329, -15000531, -5996870, 15664672}, + FieldElement{23294591, -16632613, -22650781, -8470978, 27844204, 11461195, 13099750, -2460356, 18151676, 13417686}, + FieldElement{-24722913, -4176517, -31150679, 5988919, -26858785, 6685065, 1661597, -12551441, 15271676, -15452665}, + }, + }, + { + { + FieldElement{11433042, -13228665, 8239631, -5279517, -1985436, -725718, -18698764, 2167544, -6921301, -13440182}, + FieldElement{-31436171, 15575146, 30436815, 12192228, -22463353, 9395379, -9917708, -8638997, 12215110, 12028277}, + FieldElement{14098400, 6555944, 23007258, 5757252, -15427832, -12950502, 30123440, 4617780, -16900089, -655628}, + }, + { + FieldElement{-4026201, -15240835, 11893168, 13718664, -14809462, 1847385, -15819999, 10154009, 23973261, -12684474}, + FieldElement{-26531820, -3695990, -1908898, 2534301, -31870557, -16550355, 18341390, -11419951, 32013174, -10103539}, + FieldElement{-25479301, 10876443, -11771086, -14625140, -12369567, 1838104, 21911214, 6354752, 4425632, -837822}, + }, + { + FieldElement{-10433389, -14612966, 22229858, -3091047, -13191166, 776729, -17415375, -12020462, 4725005, 14044970}, + FieldElement{19268650, -7304421, 1555349, 8692754, -21474059, -9910664, 6347390, -1411784, -19522291, -16109756}, + FieldElement{-24864089, 12986008, -10898878, -5558584, -11312371, -148526, 19541418, 8180106, 9282262, 10282508}, + }, + { + FieldElement{-26205082, 4428547, -8661196, -13194263, 4098402, -14165257, 15522535, 8372215, 5542595, -10702683}, + FieldElement{-10562541, 14895633, 26814552, -16673850, -17480754, -2489360, -2781891, 6993761, -18093885, 10114655}, + FieldElement{-20107055, -929418, 31422704, 10427861, -7110749, 6150669, -29091755, -11529146, 25953725, -106158}, + }, + { + FieldElement{-4234397, -8039292, -9119125, 3046000, 2101609, -12607294, 19390020, 6094296, -3315279, 12831125}, + FieldElement{-15998678, 7578152, 5310217, 14408357, -33548620, -224739, 31575954, 6326196, 7381791, -2421839}, + FieldElement{-20902779, 3296811, 24736065, -16328389, 18374254, 7318640, 6295303, 8082724, -15362489, 12339664}, + }, + { + FieldElement{27724736, 2291157, 6088201, -14184798, 1792727, 5857634, 13848414, 15768922, 25091167, 14856294}, + FieldElement{-18866652, 8331043, 24373479, 8541013, -701998, -9269457, 12927300, -12695493, -22182473, -9012899}, + FieldElement{-11423429, -5421590, 11632845, 3405020, 30536730, -11674039, -27260765, 13866390, 30146206, 9142070}, + }, + { + FieldElement{3924129, -15307516, -13817122, -10054960, 12291820, -668366, -27702774, 9326384, -8237858, 4171294}, + FieldElement{-15921940, 16037937, 6713787, 16606682, -21612135, 2790944, 26396185, 3731949, 345228, -5462949}, + FieldElement{-21327538, 13448259, 25284571, 1143661, 20614966, -8849387, 2031539, -12391231, -16253183, -13582083}, + }, + { + FieldElement{31016211, -16722429, 26371392, -14451233, -5027349, 14854137, 17477601, 3842657, 28012650, -16405420}, + FieldElement{-5075835, 9368966, -8562079, -4600902, -15249953, 6970560, -9189873, 16292057, -8867157, 3507940}, + FieldElement{29439664, 3537914, 23333589, 6997794, -17555561, -11018068, -15209202, -15051267, -9164929, 6580396}, + }, + }, + { + { + FieldElement{-12185861, -7679788, 16438269, 10826160, -8696817, -6235611, 17860444, -9273846, -2095802, 9304567}, + FieldElement{20714564, -4336911, 29088195, 7406487, 11426967, -5095705, 14792667, -14608617, 5289421, -477127}, + FieldElement{-16665533, -10650790, -6160345, -13305760, 9192020, -1802462, 17271490, 12349094, 26939669, -3752294}, + }, + { + FieldElement{-12889898, 9373458, 31595848, 16374215, 21471720, 13221525, -27283495, -12348559, -3698806, 117887}, + FieldElement{22263325, -6560050, 3984570, -11174646, -15114008, -566785, 28311253, 5358056, -23319780, 541964}, + FieldElement{16259219, 3261970, 2309254, -15534474, -16885711, -4581916, 24134070, -16705829, -13337066, -13552195}, + }, + { + FieldElement{9378160, -13140186, -22845982, -12745264, 28198281, -7244098, -2399684, -717351, 690426, 14876244}, + FieldElement{24977353, -314384, -8223969, -13465086, 28432343, -1176353, -13068804, -12297348, -22380984, 6618999}, + FieldElement{-1538174, 11685646, 12944378, 13682314, -24389511, -14413193, 8044829, -13817328, 32239829, -5652762}, + }, + { + FieldElement{-18603066, 4762990, -926250, 8885304, -28412480, -3187315, 9781647, -10350059, 32779359, 5095274}, + FieldElement{-33008130, -5214506, -32264887, -3685216, 9460461, -9327423, -24601656, 14506724, 21639561, -2630236}, + FieldElement{-16400943, -13112215, 25239338, 15531969, 3987758, -4499318, -1289502, -6863535, 17874574, 558605}, + }, + { + FieldElement{-13600129, 10240081, 9171883, 16131053, -20869254, 9599700, 33499487, 5080151, 2085892, 5119761}, + FieldElement{-22205145, -2519528, -16381601, 414691, -25019550, 2170430, 30634760, -8363614, -31999993, -5759884}, + FieldElement{-6845704, 15791202, 8550074, -1312654, 29928809, -12092256, 27534430, -7192145, -22351378, 12961482}, + }, + { + FieldElement{-24492060, -9570771, 10368194, 11582341, -23397293, -2245287, 16533930, 8206996, -30194652, -5159638}, + FieldElement{-11121496, -3382234, 2307366, 6362031, -135455, 8868177, -16835630, 7031275, 7589640, 8945490}, + FieldElement{-32152748, 8917967, 6661220, -11677616, -1192060, -15793393, 7251489, -11182180, 24099109, -14456170}, + }, + { + FieldElement{5019558, -7907470, 4244127, -14714356, -26933272, 6453165, -19118182, -13289025, -6231896, -10280736}, + FieldElement{10853594, 10721687, 26480089, 5861829, -22995819, 1972175, -1866647, -10557898, -3363451, -6441124}, + FieldElement{-17002408, 5906790, 221599, -6563147, 7828208, -13248918, 24362661, -2008168, -13866408, 7421392}, + }, + { + FieldElement{8139927, -6546497, 32257646, -5890546, 30375719, 1886181, -21175108, 15441252, 28826358, -4123029}, + FieldElement{6267086, 9695052, 7709135, -16603597, -32869068, -1886135, 14795160, -7840124, 13746021, -1742048}, + FieldElement{28584902, 7787108, -6732942, -15050729, 22846041, -7571236, -3181936, -363524, 4771362, -8419958}, + }, + }, + { + { + FieldElement{24949256, 6376279, -27466481, -8174608, -18646154, -9930606, 33543569, -12141695, 3569627, 11342593}, + FieldElement{26514989, 4740088, 27912651, 3697550, 19331575, -11472339, 6809886, 4608608, 7325975, -14801071}, + FieldElement{-11618399, -14554430, -24321212, 7655128, -1369274, 5214312, -27400540, 10258390, -17646694, -8186692}, + }, + { + FieldElement{11431204, 15823007, 26570245, 14329124, 18029990, 4796082, -31446179, 15580664, 9280358, -3973687}, + FieldElement{-160783, -10326257, -22855316, -4304997, -20861367, -13621002, -32810901, -11181622, -15545091, 4387441}, + FieldElement{-20799378, 12194512, 3937617, -5805892, -27154820, 9340370, -24513992, 8548137, 20617071, -7482001}, + }, + { + FieldElement{-938825, -3930586, -8714311, 16124718, 24603125, -6225393, -13775352, -11875822, 24345683, 10325460}, + FieldElement{-19855277, -1568885, -22202708, 8714034, 14007766, 6928528, 16318175, -1010689, 4766743, 3552007}, + FieldElement{-21751364, -16730916, 1351763, -803421, -4009670, 3950935, 3217514, 14481909, 10988822, -3994762}, + }, + { + FieldElement{15564307, -14311570, 3101243, 5684148, 30446780, -8051356, 12677127, -6505343, -8295852, 13296005}, + FieldElement{-9442290, 6624296, -30298964, -11913677, -4670981, -2057379, 31521204, 9614054, -30000824, 12074674}, + FieldElement{4771191, -135239, 14290749, -13089852, 27992298, 14998318, -1413936, -1556716, 29832613, -16391035}, + }, + { + FieldElement{7064884, -7541174, -19161962, -5067537, -18891269, -2912736, 25825242, 5293297, -27122660, 13101590}, + FieldElement{-2298563, 2439670, -7466610, 1719965, -27267541, -16328445, 32512469, -5317593, -30356070, -4190957}, + FieldElement{-30006540, 10162316, -33180176, 3981723, -16482138, -13070044, 14413974, 9515896, 19568978, 9628812}, + }, + { + FieldElement{33053803, 199357, 15894591, 1583059, 27380243, -4580435, -17838894, -6106839, -6291786, 3437740}, + FieldElement{-18978877, 3884493, 19469877, 12726490, 15913552, 13614290, -22961733, 70104, 7463304, 4176122}, + FieldElement{-27124001, 10659917, 11482427, -16070381, 12771467, -6635117, -32719404, -5322751, 24216882, 5944158}, + }, + { + FieldElement{8894125, 7450974, -2664149, -9765752, -28080517, -12389115, 19345746, 14680796, 11632993, 5847885}, + FieldElement{26942781, -2315317, 9129564, -4906607, 26024105, 11769399, -11518837, 6367194, -9727230, 4782140}, + FieldElement{19916461, -4828410, -22910704, -11414391, 25606324, -5972441, 33253853, 8220911, 6358847, -1873857}, + }, + { + FieldElement{801428, -2081702, 16569428, 11065167, 29875704, 96627, 7908388, -4480480, -13538503, 1387155}, + FieldElement{19646058, 5720633, -11416706, 12814209, 11607948, 12749789, 14147075, 15156355, -21866831, 11835260}, + FieldElement{19299512, 1155910, 28703737, 14890794, 2925026, 7269399, 26121523, 15467869, -26560550, 5052483}, + }, + }, + { + { + FieldElement{-3017432, 10058206, 1980837, 3964243, 22160966, 12322533, -6431123, -12618185, 12228557, -7003677}, + FieldElement{32944382, 14922211, -22844894, 5188528, 21913450, -8719943, 4001465, 13238564, -6114803, 8653815}, + FieldElement{22865569, -4652735, 27603668, -12545395, 14348958, 8234005, 24808405, 5719875, 28483275, 2841751}, + }, + { + FieldElement{-16420968, -1113305, -327719, -12107856, 21886282, -15552774, -1887966, -315658, 19932058, -12739203}, + FieldElement{-11656086, 10087521, -8864888, -5536143, -19278573, -3055912, 3999228, 13239134, -4777469, -13910208}, + FieldElement{1382174, -11694719, 17266790, 9194690, -13324356, 9720081, 20403944, 11284705, -14013818, 3093230}, + }, + { + FieldElement{16650921, -11037932, -1064178, 1570629, -8329746, 7352753, -302424, 16271225, -24049421, -6691850}, + FieldElement{-21911077, -5927941, -4611316, -5560156, -31744103, -10785293, 24123614, 15193618, -21652117, -16739389}, + FieldElement{-9935934, -4289447, -25279823, 4372842, 2087473, 10399484, 31870908, 14690798, 17361620, 11864968}, + }, + { + FieldElement{-11307610, 6210372, 13206574, 5806320, -29017692, -13967200, -12331205, -7486601, -25578460, -16240689}, + FieldElement{14668462, -12270235, 26039039, 15305210, 25515617, 4542480, 10453892, 6577524, 9145645, -6443880}, + FieldElement{5974874, 3053895, -9433049, -10385191, -31865124, 3225009, -7972642, 3936128, -5652273, -3050304}, + }, + { + FieldElement{30625386, -4729400, -25555961, -12792866, -20484575, 7695099, 17097188, -16303496, -27999779, 1803632}, + FieldElement{-3553091, 9865099, -5228566, 4272701, -5673832, -16689700, 14911344, 12196514, -21405489, 7047412}, + FieldElement{20093277, 9920966, -11138194, -5343857, 13161587, 12044805, -32856851, 4124601, -32343828, -10257566}, + }, + { + FieldElement{-20788824, 14084654, -13531713, 7842147, 19119038, -13822605, 4752377, -8714640, -21679658, 2288038}, + FieldElement{-26819236, -3283715, 29965059, 3039786, -14473765, 2540457, 29457502, 14625692, -24819617, 12570232}, + FieldElement{-1063558, -11551823, 16920318, 12494842, 1278292, -5869109, -21159943, -3498680, -11974704, 4724943}, + }, + { + FieldElement{17960970, -11775534, -4140968, -9702530, -8876562, -1410617, -12907383, -8659932, -29576300, 1903856}, + FieldElement{23134274, -14279132, -10681997, -1611936, 20684485, 15770816, -12989750, 3190296, 26955097, 14109738}, + FieldElement{15308788, 5320727, -30113809, -14318877, 22902008, 7767164, 29425325, -11277562, 31960942, 11934971}, + }, + { + FieldElement{-27395711, 8435796, 4109644, 12222639, -24627868, 14818669, 20638173, 4875028, 10491392, 1379718}, + FieldElement{-13159415, 9197841, 3875503, -8936108, -1383712, -5879801, 33518459, 16176658, 21432314, 12180697}, + FieldElement{-11787308, 11500838, 13787581, -13832590, -22430679, 10140205, 1465425, 12689540, -10301319, -13872883}, + }, + }, + { + { + FieldElement{5414091, -15386041, -21007664, 9643570, 12834970, 1186149, -2622916, -1342231, 26128231, 6032912}, + FieldElement{-26337395, -13766162, 32496025, -13653919, 17847801, -12669156, 3604025, 8316894, -25875034, -10437358}, + FieldElement{3296484, 6223048, 24680646, -12246460, -23052020, 5903205, -8862297, -4639164, 12376617, 3188849}, + }, + { + FieldElement{29190488, -14659046, 27549113, -1183516, 3520066, -10697301, 32049515, -7309113, -16109234, -9852307}, + FieldElement{-14744486, -9309156, 735818, -598978, -20407687, -5057904, 25246078, -15795669, 18640741, -960977}, + FieldElement{-6928835, -16430795, 10361374, 5642961, 4910474, 12345252, -31638386, -494430, 10530747, 1053335}, + }, + { + FieldElement{-29265967, -14186805, -13538216, -12117373, -19457059, -10655384, -31462369, -2948985, 24018831, 15026644}, + FieldElement{-22592535, -3145277, -2289276, 5953843, -13440189, 9425631, 25310643, 13003497, -2314791, -15145616}, + FieldElement{-27419985, -603321, -8043984, -1669117, -26092265, 13987819, -27297622, 187899, -23166419, -2531735}, + }, + { + FieldElement{-21744398, -13810475, 1844840, 5021428, -10434399, -15911473, 9716667, 16266922, -5070217, 726099}, + FieldElement{29370922, -6053998, 7334071, -15342259, 9385287, 2247707, -13661962, -4839461, 30007388, -15823341}, + FieldElement{-936379, 16086691, 23751945, -543318, -1167538, -5189036, 9137109, 730663, 9835848, 4555336}, + }, + { + FieldElement{-23376435, 1410446, -22253753, -12899614, 30867635, 15826977, 17693930, 544696, -11985298, 12422646}, + FieldElement{31117226, -12215734, -13502838, 6561947, -9876867, -12757670, -5118685, -4096706, 29120153, 13924425}, + FieldElement{-17400879, -14233209, 19675799, -2734756, -11006962, -5858820, -9383939, -11317700, 7240931, -237388}, + }, + { + FieldElement{-31361739, -11346780, -15007447, -5856218, -22453340, -12152771, 1222336, 4389483, 3293637, -15551743}, + FieldElement{-16684801, -14444245, 11038544, 11054958, -13801175, -3338533, -24319580, 7733547, 12796905, -6335822}, + FieldElement{-8759414, -10817836, -25418864, 10783769, -30615557, -9746811, -28253339, 3647836, 3222231, -11160462}, + }, + { + FieldElement{18606113, 1693100, -25448386, -15170272, 4112353, 10045021, 23603893, -2048234, -7550776, 2484985}, + FieldElement{9255317, -3131197, -12156162, -1004256, 13098013, -9214866, 16377220, -2102812, -19802075, -3034702}, + FieldElement{-22729289, 7496160, -5742199, 11329249, 19991973, -3347502, -31718148, 9936966, -30097688, -10618797}, + }, + { + FieldElement{21878590, -5001297, 4338336, 13643897, -3036865, 13160960, 19708896, 5415497, -7360503, -4109293}, + FieldElement{27736861, 10103576, 12500508, 8502413, -3413016, -9633558, 10436918, -1550276, -23659143, -8132100}, + FieldElement{19492550, -12104365, -29681976, -852630, -3208171, 12403437, 30066266, 8367329, 13243957, 8709688}, + }, + }, + { + { + FieldElement{12015105, 2801261, 28198131, 10151021, 24818120, -4743133, -11194191, -5645734, 5150968, 7274186}, + FieldElement{2831366, -12492146, 1478975, 6122054, 23825128, -12733586, 31097299, 6083058, 31021603, -9793610}, + FieldElement{-2529932, -2229646, 445613, 10720828, -13849527, -11505937, -23507731, 16354465, 15067285, -14147707}, + }, + { + FieldElement{7840942, 14037873, -33364863, 15934016, -728213, -3642706, 21403988, 1057586, -19379462, -12403220}, + FieldElement{915865, -16469274, 15608285, -8789130, -24357026, 6060030, -17371319, 8410997, -7220461, 16527025}, + FieldElement{32922597, -556987, 20336074, -16184568, 10903705, -5384487, 16957574, 52992, 23834301, 6588044}, + }, + { + FieldElement{32752030, 11232950, 3381995, -8714866, 22652988, -10744103, 17159699, 16689107, -20314580, -1305992}, + FieldElement{-4689649, 9166776, -25710296, -10847306, 11576752, 12733943, 7924251, -2752281, 1976123, -7249027}, + FieldElement{21251222, 16309901, -2983015, -6783122, 30810597, 12967303, 156041, -3371252, 12331345, -8237197}, + }, + { + FieldElement{8651614, -4477032, -16085636, -4996994, 13002507, 2950805, 29054427, -5106970, 10008136, -4667901}, + FieldElement{31486080, 15114593, -14261250, 12951354, 14369431, -7387845, 16347321, -13662089, 8684155, -10532952}, + FieldElement{19443825, 11385320, 24468943, -9659068, -23919258, 2187569, -26263207, -6086921, 31316348, 14219878}, + }, + { + FieldElement{-28594490, 1193785, 32245219, 11392485, 31092169, 15722801, 27146014, 6992409, 29126555, 9207390}, + FieldElement{32382935, 1110093, 18477781, 11028262, -27411763, -7548111, -4980517, 10843782, -7957600, -14435730}, + FieldElement{2814918, 7836403, 27519878, -7868156, -20894015, -11553689, -21494559, 8550130, 28346258, 1994730}, + }, + { + FieldElement{-19578299, 8085545, -14000519, -3948622, 2785838, -16231307, -19516951, 7174894, 22628102, 8115180}, + FieldElement{-30405132, 955511, -11133838, -15078069, -32447087, -13278079, -25651578, 3317160, -9943017, 930272}, + FieldElement{-15303681, -6833769, 28856490, 1357446, 23421993, 1057177, 24091212, -1388970, -22765376, -10650715}, + }, + { + FieldElement{-22751231, -5303997, -12907607, -12768866, -15811511, -7797053, -14839018, -16554220, -1867018, 8398970}, + FieldElement{-31969310, 2106403, -4736360, 1362501, 12813763, 16200670, 22981545, -6291273, 18009408, -15772772}, + FieldElement{-17220923, -9545221, -27784654, 14166835, 29815394, 7444469, 29551787, -3727419, 19288549, 1325865}, + }, + { + FieldElement{15100157, -15835752, -23923978, -1005098, -26450192, 15509408, 12376730, -3479146, 33166107, -8042750}, + FieldElement{20909231, 13023121, -9209752, 16251778, -5778415, -8094914, 12412151, 10018715, 2213263, -13878373}, + FieldElement{32529814, -11074689, 30361439, -16689753, -9135940, 1513226, 22922121, 6382134, -5766928, 8371348}, + }, + }, + { + { + FieldElement{9923462, 11271500, 12616794, 3544722, -29998368, -1721626, 12891687, -8193132, -26442943, 10486144}, + FieldElement{-22597207, -7012665, 8587003, -8257861, 4084309, -12970062, 361726, 2610596, -23921530, -11455195}, + FieldElement{5408411, -1136691, -4969122, 10561668, 24145918, 14240566, 31319731, -4235541, 19985175, -3436086}, + }, + { + FieldElement{-13994457, 16616821, 14549246, 3341099, 32155958, 13648976, -17577068, 8849297, 65030, 8370684}, + FieldElement{-8320926, -12049626, 31204563, 5839400, -20627288, -1057277, -19442942, 6922164, 12743482, -9800518}, + FieldElement{-2361371, 12678785, 28815050, 4759974, -23893047, 4884717, 23783145, 11038569, 18800704, 255233}, + }, + { + FieldElement{-5269658, -1773886, 13957886, 7990715, 23132995, 728773, 13393847, 9066957, 19258688, -14753793}, + FieldElement{-2936654, -10827535, -10432089, 14516793, -3640786, 4372541, -31934921, 2209390, -1524053, 2055794}, + FieldElement{580882, 16705327, 5468415, -2683018, -30926419, -14696000, -7203346, -8994389, -30021019, 7394435}, + }, + { + FieldElement{23838809, 1822728, -15738443, 15242727, 8318092, -3733104, -21672180, -3492205, -4821741, 14799921}, + FieldElement{13345610, 9759151, 3371034, -16137791, 16353039, 8577942, 31129804, 13496856, -9056018, 7402518}, + FieldElement{2286874, -4435931, -20042458, -2008336, -13696227, 5038122, 11006906, -15760352, 8205061, 1607563}, + }, + { + FieldElement{14414086, -8002132, 3331830, -3208217, 22249151, -5594188, 18364661, -2906958, 30019587, -9029278}, + FieldElement{-27688051, 1585953, -10775053, 931069, -29120221, -11002319, -14410829, 12029093, 9944378, 8024}, + FieldElement{4368715, -3709630, 29874200, -15022983, -20230386, -11410704, -16114594, -999085, -8142388, 5640030}, + }, + { + FieldElement{10299610, 13746483, 11661824, 16234854, 7630238, 5998374, 9809887, -16694564, 15219798, -14327783}, + FieldElement{27425505, -5719081, 3055006, 10660664, 23458024, 595578, -15398605, -1173195, -18342183, 9742717}, + FieldElement{6744077, 2427284, 26042789, 2720740, -847906, 1118974, 32324614, 7406442, 12420155, 1994844}, + }, + { + FieldElement{14012521, -5024720, -18384453, -9578469, -26485342, -3936439, -13033478, -10909803, 24319929, -6446333}, + FieldElement{16412690, -4507367, 10772641, 15929391, -17068788, -4658621, 10555945, -10484049, -30102368, -4739048}, + FieldElement{22397382, -7767684, -9293161, -12792868, 17166287, -9755136, -27333065, 6199366, 21880021, -12250760}, + }, + { + FieldElement{-4283307, 5368523, -31117018, 8163389, -30323063, 3209128, 16557151, 8890729, 8840445, 4957760}, + FieldElement{-15447727, 709327, -6919446, -10870178, -29777922, 6522332, -21720181, 12130072, -14796503, 5005757}, + FieldElement{-2114751, -14308128, 23019042, 15765735, -25269683, 6002752, 10183197, -13239326, -16395286, -2176112}, + }, + }, + { + { + FieldElement{-19025756, 1632005, 13466291, -7995100, -23640451, 16573537, -32013908, -3057104, 22208662, 2000468}, + FieldElement{3065073, -1412761, -25598674, -361432, -17683065, -5703415, -8164212, 11248527, -3691214, -7414184}, + FieldElement{10379208, -6045554, 8877319, 1473647, -29291284, -12507580, 16690915, 2553332, -3132688, 16400289}, + }, + { + FieldElement{15716668, 1254266, -18472690, 7446274, -8448918, 6344164, -22097271, -7285580, 26894937, 9132066}, + FieldElement{24158887, 12938817, 11085297, -8177598, -28063478, -4457083, -30576463, 64452, -6817084, -2692882}, + FieldElement{13488534, 7794716, 22236231, 5989356, 25426474, -12578208, 2350710, -3418511, -4688006, 2364226}, + }, + { + FieldElement{16335052, 9132434, 25640582, 6678888, 1725628, 8517937, -11807024, -11697457, 15445875, -7798101}, + FieldElement{29004207, -7867081, 28661402, -640412, -12794003, -7943086, 31863255, -4135540, -278050, -15759279}, + FieldElement{-6122061, -14866665, -28614905, 14569919, -10857999, -3591829, 10343412, -6976290, -29828287, -10815811}, + }, + { + FieldElement{27081650, 3463984, 14099042, -4517604, 1616303, -6205604, 29542636, 15372179, 17293797, 960709}, + FieldElement{20263915, 11434237, -5765435, 11236810, 13505955, -10857102, -16111345, 6493122, -19384511, 7639714}, + FieldElement{-2830798, -14839232, 25403038, -8215196, -8317012, -16173699, 18006287, -16043750, 29994677, -15808121}, + }, + { + FieldElement{9769828, 5202651, -24157398, -13631392, -28051003, -11561624, -24613141, -13860782, -31184575, 709464}, + FieldElement{12286395, 13076066, -21775189, -1176622, -25003198, 4057652, -32018128, -8890874, 16102007, 13205847}, + FieldElement{13733362, 5599946, 10557076, 3195751, -5557991, 8536970, -25540170, 8525972, 10151379, 10394400}, + }, + { + FieldElement{4024660, -16137551, 22436262, 12276534, -9099015, -2686099, 19698229, 11743039, -33302334, 8934414}, + FieldElement{-15879800, -4525240, -8580747, -2934061, 14634845, -698278, -9449077, 3137094, -11536886, 11721158}, + FieldElement{17555939, -5013938, 8268606, 2331751, -22738815, 9761013, 9319229, 8835153, -9205489, -1280045}, + }, + { + FieldElement{-461409, -7830014, 20614118, 16688288, -7514766, -4807119, 22300304, 505429, 6108462, -6183415}, + FieldElement{-5070281, 12367917, -30663534, 3234473, 32617080, -8422642, 29880583, -13483331, -26898490, -7867459}, + FieldElement{-31975283, 5726539, 26934134, 10237677, -3173717, -605053, 24199304, 3795095, 7592688, -14992079}, + }, + { + FieldElement{21594432, -14964228, 17466408, -4077222, 32537084, 2739898, 6407723, 12018833, -28256052, 4298412}, + FieldElement{-20650503, -11961496, -27236275, 570498, 3767144, -1717540, 13891942, -1569194, 13717174, 10805743}, + FieldElement{-14676630, -15644296, 15287174, 11927123, 24177847, -8175568, -796431, 14860609, -26938930, -5863836}, + }, + }, + { + { + FieldElement{12962541, 5311799, -10060768, 11658280, 18855286, -7954201, 13286263, -12808704, -4381056, 9882022}, + FieldElement{18512079, 11319350, -20123124, 15090309, 18818594, 5271736, -22727904, 3666879, -23967430, -3299429}, + FieldElement{-6789020, -3146043, 16192429, 13241070, 15898607, -14206114, -10084880, -6661110, -2403099, 5276065}, + }, + { + FieldElement{30169808, -5317648, 26306206, -11750859, 27814964, 7069267, 7152851, 3684982, 1449224, 13082861}, + FieldElement{10342826, 3098505, 2119311, 193222, 25702612, 12233820, 23697382, 15056736, -21016438, -8202000}, + FieldElement{-33150110, 3261608, 22745853, 7948688, 19370557, -15177665, -26171976, 6482814, -10300080, -11060101}, + }, + { + FieldElement{32869458, -5408545, 25609743, 15678670, -10687769, -15471071, 26112421, 2521008, -22664288, 6904815}, + FieldElement{29506923, 4457497, 3377935, -9796444, -30510046, 12935080, 1561737, 3841096, -29003639, -6657642}, + FieldElement{10340844, -6630377, -18656632, -2278430, 12621151, -13339055, 30878497, -11824370, -25584551, 5181966}, + }, + { + FieldElement{25940115, -12658025, 17324188, -10307374, -8671468, 15029094, 24396252, -16450922, -2322852, -12388574}, + FieldElement{-21765684, 9916823, -1300409, 4079498, -1028346, 11909559, 1782390, 12641087, 20603771, -6561742}, + FieldElement{-18882287, -11673380, 24849422, 11501709, 13161720, -4768874, 1925523, 11914390, 4662781, 7820689}, + }, + { + FieldElement{12241050, -425982, 8132691, 9393934, 32846760, -1599620, 29749456, 12172924, 16136752, 15264020}, + FieldElement{-10349955, -14680563, -8211979, 2330220, -17662549, -14545780, 10658213, 6671822, 19012087, 3772772}, + FieldElement{3753511, -3421066, 10617074, 2028709, 14841030, -6721664, 28718732, -15762884, 20527771, 12988982}, + }, + { + FieldElement{-14822485, -5797269, -3707987, 12689773, -898983, -10914866, -24183046, -10564943, 3299665, -12424953}, + FieldElement{-16777703, -15253301, -9642417, 4978983, 3308785, 8755439, 6943197, 6461331, -25583147, 8991218}, + FieldElement{-17226263, 1816362, -1673288, -6086439, 31783888, -8175991, -32948145, 7417950, -30242287, 1507265}, + }, + { + FieldElement{29692663, 6829891, -10498800, 4334896, 20945975, -11906496, -28887608, 8209391, 14606362, -10647073}, + FieldElement{-3481570, 8707081, 32188102, 5672294, 22096700, 1711240, -33020695, 9761487, 4170404, -2085325}, + FieldElement{-11587470, 14855945, -4127778, -1531857, -26649089, 15084046, 22186522, 16002000, -14276837, -8400798}, + }, + { + FieldElement{-4811456, 13761029, -31703877, -2483919, -3312471, 7869047, -7113572, -9620092, 13240845, 10965870}, + FieldElement{-7742563, -8256762, -14768334, -13656260, -23232383, 12387166, 4498947, 14147411, 29514390, 4302863}, + FieldElement{-13413405, -12407859, 20757302, -13801832, 14785143, 8976368, -5061276, -2144373, 17846988, -13971927}, + }, + }, + { + { + FieldElement{-2244452, -754728, -4597030, -1066309, -6247172, 1455299, -21647728, -9214789, -5222701, 12650267}, + FieldElement{-9906797, -16070310, 21134160, 12198166, -27064575, 708126, 387813, 13770293, -19134326, 10958663}, + FieldElement{22470984, 12369526, 23446014, -5441109, -21520802, -9698723, -11772496, -11574455, -25083830, 4271862}, + }, + { + FieldElement{-25169565, -10053642, -19909332, 15361595, -5984358, 2159192, 75375, -4278529, -32526221, 8469673}, + FieldElement{15854970, 4148314, -8893890, 7259002, 11666551, 13824734, -30531198, 2697372, 24154791, -9460943}, + FieldElement{15446137, -15806644, 29759747, 14019369, 30811221, -9610191, -31582008, 12840104, 24913809, 9815020}, + }, + { + FieldElement{-4709286, -5614269, -31841498, -12288893, -14443537, 10799414, -9103676, 13438769, 18735128, 9466238}, + FieldElement{11933045, 9281483, 5081055, -5183824, -2628162, -4905629, -7727821, -10896103, -22728655, 16199064}, + FieldElement{14576810, 379472, -26786533, -8317236, -29426508, -10812974, -102766, 1876699, 30801119, 2164795}, + }, + { + FieldElement{15995086, 3199873, 13672555, 13712240, -19378835, -4647646, -13081610, -15496269, -13492807, 1268052}, + FieldElement{-10290614, -3659039, -3286592, 10948818, 23037027, 3794475, -3470338, -12600221, -17055369, 3565904}, + FieldElement{29210088, -9419337, -5919792, -4952785, 10834811, -13327726, -16512102, -10820713, -27162222, -14030531}, + }, + { + FieldElement{-13161890, 15508588, 16663704, -8156150, -28349942, 9019123, -29183421, -3769423, 2244111, -14001979}, + FieldElement{-5152875, -3800936, -9306475, -6071583, 16243069, 14684434, -25673088, -16180800, 13491506, 4641841}, + FieldElement{10813417, 643330, -19188515, -728916, 30292062, -16600078, 27548447, -7721242, 14476989, -12767431}, + }, + { + FieldElement{10292079, 9984945, 6481436, 8279905, -7251514, 7032743, 27282937, -1644259, -27912810, 12651324}, + FieldElement{-31185513, -813383, 22271204, 11835308, 10201545, 15351028, 17099662, 3988035, 21721536, -3148940}, + FieldElement{10202177, -6545839, -31373232, -9574638, -32150642, -8119683, -12906320, 3852694, 13216206, 14842320}, + }, + { + FieldElement{-15815640, -10601066, -6538952, -7258995, -6984659, -6581778, -31500847, 13765824, -27434397, 9900184}, + FieldElement{14465505, -13833331, -32133984, -14738873, -27443187, 12990492, 33046193, 15796406, -7051866, -8040114}, + FieldElement{30924417, -8279620, 6359016, -12816335, 16508377, 9071735, -25488601, 15413635, 9524356, -7018878}, + }, + { + FieldElement{12274201, -13175547, 32627641, -1785326, 6736625, 13267305, 5237659, -5109483, 15663516, 4035784}, + FieldElement{-2951309, 8903985, 17349946, 601635, -16432815, -4612556, -13732739, -15889334, -22258478, 4659091}, + FieldElement{-16916263, -4952973, -30393711, -15158821, 20774812, 15897498, 5736189, 15026997, -2178256, -13455585}, + }, + }, + { + { + FieldElement{-8858980, -2219056, 28571666, -10155518, -474467, -10105698, -3801496, 278095, 23440562, -290208}, + FieldElement{10226241, -5928702, 15139956, 120818, -14867693, 5218603, 32937275, 11551483, -16571960, -7442864}, + FieldElement{17932739, -12437276, -24039557, 10749060, 11316803, 7535897, 22503767, 5561594, -3646624, 3898661}, + }, + { + FieldElement{7749907, -969567, -16339731, -16464, -25018111, 15122143, -1573531, 7152530, 21831162, 1245233}, + FieldElement{26958459, -14658026, 4314586, 8346991, -5677764, 11960072, -32589295, -620035, -30402091, -16716212}, + FieldElement{-12165896, 9166947, 33491384, 13673479, 29787085, 13096535, 6280834, 14587357, -22338025, 13987525}, + }, + { + FieldElement{-24349909, 7778775, 21116000, 15572597, -4833266, -5357778, -4300898, -5124639, -7469781, -2858068}, + FieldElement{9681908, -6737123, -31951644, 13591838, -6883821, 386950, 31622781, 6439245, -14581012, 4091397}, + FieldElement{-8426427, 1470727, -28109679, -1596990, 3978627, -5123623, -19622683, 12092163, 29077877, -14741988}, + }, + { + FieldElement{5269168, -6859726, -13230211, -8020715, 25932563, 1763552, -5606110, -5505881, -20017847, 2357889}, + FieldElement{32264008, -15407652, -5387735, -1160093, -2091322, -3946900, 23104804, -12869908, 5727338, 189038}, + FieldElement{14609123, -8954470, -6000566, -16622781, -14577387, -7743898, -26745169, 10942115, -25888931, -14884697}, + }, + { + FieldElement{20513500, 5557931, -15604613, 7829531, 26413943, -2019404, -21378968, 7471781, 13913677, -5137875}, + FieldElement{-25574376, 11967826, 29233242, 12948236, -6754465, 4713227, -8940970, 14059180, 12878652, 8511905}, + FieldElement{-25656801, 3393631, -2955415, -7075526, -2250709, 9366908, -30223418, 6812974, 5568676, -3127656}, + }, + { + FieldElement{11630004, 12144454, 2116339, 13606037, 27378885, 15676917, -17408753, -13504373, -14395196, 8070818}, + FieldElement{27117696, -10007378, -31282771, -5570088, 1127282, 12772488, -29845906, 10483306, -11552749, -1028714}, + FieldElement{10637467, -5688064, 5674781, 1072708, -26343588, -6982302, -1683975, 9177853, -27493162, 15431203}, + }, + { + FieldElement{20525145, 10892566, -12742472, 12779443, -29493034, 16150075, -28240519, 14943142, -15056790, -7935931}, + FieldElement{-30024462, 5626926, -551567, -9981087, 753598, 11981191, 25244767, -3239766, -3356550, 9594024}, + FieldElement{-23752644, 2636870, -5163910, -10103818, 585134, 7877383, 11345683, -6492290, 13352335, -10977084}, + }, + { + FieldElement{-1931799, -5407458, 3304649, -12884869, 17015806, -4877091, -29783850, -7752482, -13215537, -319204}, + FieldElement{20239939, 6607058, 6203985, 3483793, -18386976, -779229, -20723742, 15077870, -22750759, 14523817}, + FieldElement{27406042, -6041657, 27423596, -4497394, 4996214, 10002360, -28842031, -4545494, -30172742, -4805667}, + }, + }, + { + { + FieldElement{11374242, 12660715, 17861383, -12540833, 10935568, 1099227, -13886076, -9091740, -27727044, 11358504}, + FieldElement{-12730809, 10311867, 1510375, 10778093, -2119455, -9145702, 32676003, 11149336, -26123651, 4985768}, + FieldElement{-19096303, 341147, -6197485, -239033, 15756973, -8796662, -983043, 13794114, -19414307, -15621255}, + }, + { + FieldElement{6490081, 11940286, 25495923, -7726360, 8668373, -8751316, 3367603, 6970005, -1691065, -9004790}, + FieldElement{1656497, 13457317, 15370807, 6364910, 13605745, 8362338, -19174622, -5475723, -16796596, -5031438}, + FieldElement{-22273315, -13524424, -64685, -4334223, -18605636, -10921968, -20571065, -7007978, -99853, -10237333}, + }, + { + FieldElement{17747465, 10039260, 19368299, -4050591, -20630635, -16041286, 31992683, -15857976, -29260363, -5511971}, + FieldElement{31932027, -4986141, -19612382, 16366580, 22023614, 88450, 11371999, -3744247, 4882242, -10626905}, + FieldElement{29796507, 37186, 19818052, 10115756, -11829032, 3352736, 18551198, 3272828, -5190932, -4162409}, + }, + { + FieldElement{12501286, 4044383, -8612957, -13392385, -32430052, 5136599, -19230378, -3529697, 330070, -3659409}, + FieldElement{6384877, 2899513, 17807477, 7663917, -2358888, 12363165, 25366522, -8573892, -271295, 12071499}, + FieldElement{-8365515, -4042521, 25133448, -4517355, -6211027, 2265927, -32769618, 1936675, -5159697, 3829363}, + }, + { + FieldElement{28425966, -5835433, -577090, -4697198, -14217555, 6870930, 7921550, -6567787, 26333140, 14267664}, + FieldElement{-11067219, 11871231, 27385719, -10559544, -4585914, -11189312, 10004786, -8709488, -21761224, 8930324}, + FieldElement{-21197785, -16396035, 25654216, -1725397, 12282012, 11008919, 1541940, 4757911, -26491501, -16408940}, + }, + { + FieldElement{13537262, -7759490, -20604840, 10961927, -5922820, -13218065, -13156584, 6217254, -15943699, 13814990}, + FieldElement{-17422573, 15157790, 18705543, 29619, 24409717, -260476, 27361681, 9257833, -1956526, -1776914}, + FieldElement{-25045300, -10191966, 15366585, 15166509, -13105086, 8423556, -29171540, 12361135, -18685978, 4578290}, + }, + { + FieldElement{24579768, 3711570, 1342322, -11180126, -27005135, 14124956, -22544529, 14074919, 21964432, 8235257}, + FieldElement{-6528613, -2411497, 9442966, -5925588, 12025640, -1487420, -2981514, -1669206, 13006806, 2355433}, + FieldElement{-16304899, -13605259, -6632427, -5142349, 16974359, -10911083, 27202044, 1719366, 1141648, -12796236}, + }, + { + FieldElement{-12863944, -13219986, -8318266, -11018091, -6810145, -4843894, 13475066, -3133972, 32674895, 13715045}, + FieldElement{11423335, -5468059, 32344216, 8962751, 24989809, 9241752, -13265253, 16086212, -28740881, -15642093}, + FieldElement{-1409668, 12530728, -6368726, 10847387, 19531186, -14132160, -11709148, 7791794, -27245943, 4383347}, + }, + }, + { + { + FieldElement{-28970898, 5271447, -1266009, -9736989, -12455236, 16732599, -4862407, -4906449, 27193557, 6245191}, + FieldElement{-15193956, 5362278, -1783893, 2695834, 4960227, 12840725, 23061898, 3260492, 22510453, 8577507}, + FieldElement{-12632451, 11257346, -32692994, 13548177, -721004, 10879011, 31168030, 13952092, -29571492, -3635906}, + }, + { + FieldElement{3877321, -9572739, 32416692, 5405324, -11004407, -13656635, 3759769, 11935320, 5611860, 8164018}, + FieldElement{-16275802, 14667797, 15906460, 12155291, -22111149, -9039718, 32003002, -8832289, 5773085, -8422109}, + FieldElement{-23788118, -8254300, 1950875, 8937633, 18686727, 16459170, -905725, 12376320, 31632953, 190926}, + }, + { + FieldElement{-24593607, -16138885, -8423991, 13378746, 14162407, 6901328, -8288749, 4508564, -25341555, -3627528}, + FieldElement{8884438, -5884009, 6023974, 10104341, -6881569, -4941533, 18722941, -14786005, -1672488, 827625}, + FieldElement{-32720583, -16289296, -32503547, 7101210, 13354605, 2659080, -1800575, -14108036, -24878478, 1541286}, + }, + { + FieldElement{2901347, -1117687, 3880376, -10059388, -17620940, -3612781, -21802117, -3567481, 20456845, -1885033}, + FieldElement{27019610, 12299467, -13658288, -1603234, -12861660, -4861471, -19540150, -5016058, 29439641, 15138866}, + FieldElement{21536104, -6626420, -32447818, -10690208, -22408077, 5175814, -5420040, -16361163, 7779328, 109896}, + }, + { + FieldElement{30279744, 14648750, -8044871, 6425558, 13639621, -743509, 28698390, 12180118, 23177719, -554075}, + FieldElement{26572847, 3405927, -31701700, 12890905, -19265668, 5335866, -6493768, 2378492, 4439158, -13279347}, + FieldElement{-22716706, 3489070, -9225266, -332753, 18875722, -1140095, 14819434, -12731527, -17717757, -5461437}, + }, + { + FieldElement{-5056483, 16566551, 15953661, 3767752, -10436499, 15627060, -820954, 2177225, 8550082, -15114165}, + FieldElement{-18473302, 16596775, -381660, 15663611, 22860960, 15585581, -27844109, -3582739, -23260460, -8428588}, + FieldElement{-32480551, 15707275, -8205912, -5652081, 29464558, 2713815, -22725137, 15860482, -21902570, 1494193}, + }, + { + FieldElement{-19562091, -14087393, -25583872, -9299552, 13127842, 759709, 21923482, 16529112, 8742704, 12967017}, + FieldElement{-28464899, 1553205, 32536856, -10473729, -24691605, -406174, -8914625, -2933896, -29903758, 15553883}, + FieldElement{21877909, 3230008, 9881174, 10539357, -4797115, 2841332, 11543572, 14513274, 19375923, -12647961}, + }, + { + FieldElement{8832269, -14495485, 13253511, 5137575, 5037871, 4078777, 24880818, -6222716, 2862653, 9455043}, + FieldElement{29306751, 5123106, 20245049, -14149889, 9592566, 8447059, -2077124, -2990080, 15511449, 4789663}, + FieldElement{-20679756, 7004547, 8824831, -9434977, -4045704, -3750736, -5754762, 108893, 23513200, 16652362}, + }, + }, + { + { + FieldElement{-33256173, 4144782, -4476029, -6579123, 10770039, -7155542, -6650416, -12936300, -18319198, 10212860}, + FieldElement{2756081, 8598110, 7383731, -6859892, 22312759, -1105012, 21179801, 2600940, -9988298, -12506466}, + FieldElement{-24645692, 13317462, -30449259, -15653928, 21365574, -10869657, 11344424, 864440, -2499677, -16710063}, + }, + { + FieldElement{-26432803, 6148329, -17184412, -14474154, 18782929, -275997, -22561534, 211300, 2719757, 4940997}, + FieldElement{-1323882, 3911313, -6948744, 14759765, -30027150, 7851207, 21690126, 8518463, 26699843, 5276295}, + FieldElement{-13149873, -6429067, 9396249, 365013, 24703301, -10488939, 1321586, 149635, -15452774, 7159369}, + }, + { + FieldElement{9987780, -3404759, 17507962, 9505530, 9731535, -2165514, 22356009, 8312176, 22477218, -8403385}, + FieldElement{18155857, -16504990, 19744716, 9006923, 15154154, -10538976, 24256460, -4864995, -22548173, 9334109}, + FieldElement{2986088, -4911893, 10776628, -3473844, 10620590, -7083203, -21413845, 14253545, -22587149, 536906}, + }, + { + FieldElement{4377756, 8115836, 24567078, 15495314, 11625074, 13064599, 7390551, 10589625, 10838060, -15420424}, + FieldElement{-19342404, 867880, 9277171, -3218459, -14431572, -1986443, 19295826, -15796950, 6378260, 699185}, + FieldElement{7895026, 4057113, -7081772, -13077756, -17886831, -323126, -716039, 15693155, -5045064, -13373962}, + }, + { + FieldElement{-7737563, -5869402, -14566319, -7406919, 11385654, 13201616, 31730678, -10962840, -3918636, -9669325}, + FieldElement{10188286, -15770834, -7336361, 13427543, 22223443, 14896287, 30743455, 7116568, -21786507, 5427593}, + FieldElement{696102, 13206899, 27047647, -10632082, 15285305, -9853179, 10798490, -4578720, 19236243, 12477404}, + }, + { + FieldElement{-11229439, 11243796, -17054270, -8040865, -788228, -8167967, -3897669, 11180504, -23169516, 7733644}, + FieldElement{17800790, -14036179, -27000429, -11766671, 23887827, 3149671, 23466177, -10538171, 10322027, 15313801}, + FieldElement{26246234, 11968874, 32263343, -5468728, 6830755, -13323031, -15794704, -101982, -24449242, 10890804}, + }, + { + FieldElement{-31365647, 10271363, -12660625, -6267268, 16690207, -13062544, -14982212, 16484931, 25180797, -5334884}, + FieldElement{-586574, 10376444, -32586414, -11286356, 19801893, 10997610, 2276632, 9482883, 316878, 13820577}, + FieldElement{-9882808, -4510367, -2115506, 16457136, -11100081, 11674996, 30756178, -7515054, 30696930, -3712849}, + }, + { + FieldElement{32988917, -9603412, 12499366, 7910787, -10617257, -11931514, -7342816, -9985397, -32349517, 7392473}, + FieldElement{-8855661, 15927861, 9866406, -3649411, -2396914, -16655781, -30409476, -9134995, 25112947, -2926644}, + FieldElement{-2504044, -436966, 25621774, -5678772, 15085042, -5479877, -24884878, -13526194, 5537438, -13914319}, + }, + }, + { + { + FieldElement{-11225584, 2320285, -9584280, 10149187, -33444663, 5808648, -14876251, -1729667, 31234590, 6090599}, + FieldElement{-9633316, 116426, 26083934, 2897444, -6364437, -2688086, 609721, 15878753, -6970405, -9034768}, + FieldElement{-27757857, 247744, -15194774, -9002551, 23288161, -10011936, -23869595, 6503646, 20650474, 1804084}, + }, + { + FieldElement{-27589786, 15456424, 8972517, 8469608, 15640622, 4439847, 3121995, -10329713, 27842616, -202328}, + FieldElement{-15306973, 2839644, 22530074, 10026331, 4602058, 5048462, 28248656, 5031932, -11375082, 12714369}, + FieldElement{20807691, -7270825, 29286141, 11421711, -27876523, -13868230, -21227475, 1035546, -19733229, 12796920}, + }, + { + FieldElement{12076899, -14301286, -8785001, -11848922, -25012791, 16400684, -17591495, -12899438, 3480665, -15182815}, + FieldElement{-32361549, 5457597, 28548107, 7833186, 7303070, -11953545, -24363064, -15921875, -33374054, 2771025}, + FieldElement{-21389266, 421932, 26597266, 6860826, 22486084, -6737172, -17137485, -4210226, -24552282, 15673397}, + }, + { + FieldElement{-20184622, 2338216, 19788685, -9620956, -4001265, -8740893, -20271184, 4733254, 3727144, -12934448}, + FieldElement{6120119, 814863, -11794402, -622716, 6812205, -15747771, 2019594, 7975683, 31123697, -10958981}, + FieldElement{30069250, -11435332, 30434654, 2958439, 18399564, -976289, 12296869, 9204260, -16432438, 9648165}, + }, + { + FieldElement{32705432, -1550977, 30705658, 7451065, -11805606, 9631813, 3305266, 5248604, -26008332, -11377501}, + FieldElement{17219865, 2375039, -31570947, -5575615, -19459679, 9219903, 294711, 15298639, 2662509, -16297073}, + FieldElement{-1172927, -7558695, -4366770, -4287744, -21346413, -8434326, 32087529, -1222777, 32247248, -14389861}, + }, + { + FieldElement{14312628, 1221556, 17395390, -8700143, -4945741, -8684635, -28197744, -9637817, -16027623, -13378845}, + FieldElement{-1428825, -9678990, -9235681, 6549687, -7383069, -468664, 23046502, 9803137, 17597934, 2346211}, + FieldElement{18510800, 15337574, 26171504, 981392, -22241552, 7827556, -23491134, -11323352, 3059833, -11782870}, + }, + { + FieldElement{10141598, 6082907, 17829293, -1947643, 9830092, 13613136, -25556636, -5544586, -33502212, 3592096}, + FieldElement{33114168, -15889352, -26525686, -13343397, 33076705, 8716171, 1151462, 1521897, -982665, -6837803}, + FieldElement{-32939165, -4255815, 23947181, -324178, -33072974, -12305637, -16637686, 3891704, 26353178, 693168}, + }, + { + FieldElement{30374239, 1595580, -16884039, 13186931, 4600344, 406904, 9585294, -400668, 31375464, 14369965}, + FieldElement{-14370654, -7772529, 1510301, 6434173, -18784789, -6262728, 32732230, -13108839, 17901441, 16011505}, + FieldElement{18171223, -11934626, -12500402, 15197122, -11038147, -15230035, -19172240, -16046376, 8764035, 12309598}, + }, + }, + { + { + FieldElement{5975908, -5243188, -19459362, -9681747, -11541277, 14015782, -23665757, 1228319, 17544096, -10593782}, + FieldElement{5811932, -1715293, 3442887, -2269310, -18367348, -8359541, -18044043, -15410127, -5565381, 12348900}, + FieldElement{-31399660, 11407555, 25755363, 6891399, -3256938, 14872274, -24849353, 8141295, -10632534, -585479}, + }, + { + FieldElement{-12675304, 694026, -5076145, 13300344, 14015258, -14451394, -9698672, -11329050, 30944593, 1130208}, + FieldElement{8247766, -6710942, -26562381, -7709309, -14401939, -14648910, 4652152, 2488540, 23550156, -271232}, + FieldElement{17294316, -3788438, 7026748, 15626851, 22990044, 113481, 2267737, -5908146, -408818, -137719}, + }, + { + FieldElement{16091085, -16253926, 18599252, 7340678, 2137637, -1221657, -3364161, 14550936, 3260525, -7166271}, + FieldElement{-4910104, -13332887, 18550887, 10864893, -16459325, -7291596, -23028869, -13204905, -12748722, 2701326}, + FieldElement{-8574695, 16099415, 4629974, -16340524, -20786213, -6005432, -10018363, 9276971, 11329923, 1862132}, + }, + { + FieldElement{14763076, -15903608, -30918270, 3689867, 3511892, 10313526, -21951088, 12219231, -9037963, -940300}, + FieldElement{8894987, -3446094, 6150753, 3013931, 301220, 15693451, -31981216, -2909717, -15438168, 11595570}, + FieldElement{15214962, 3537601, -26238722, -14058872, 4418657, -15230761, 13947276, 10730794, -13489462, -4363670}, + }, + { + FieldElement{-2538306, 7682793, 32759013, 263109, -29984731, -7955452, -22332124, -10188635, 977108, 699994}, + FieldElement{-12466472, 4195084, -9211532, 550904, -15565337, 12917920, 19118110, -439841, -30534533, -14337913}, + FieldElement{31788461, -14507657, 4799989, 7372237, 8808585, -14747943, 9408237, -10051775, 12493932, -5409317}, + }, + { + FieldElement{-25680606, 5260744, -19235809, -6284470, -3695942, 16566087, 27218280, 2607121, 29375955, 6024730}, + FieldElement{842132, -2794693, -4763381, -8722815, 26332018, -12405641, 11831880, 6985184, -9940361, 2854096}, + FieldElement{-4847262, -7969331, 2516242, -5847713, 9695691, -7221186, 16512645, 960770, 12121869, 16648078}, + }, + { + FieldElement{-15218652, 14667096, -13336229, 2013717, 30598287, -464137, -31504922, -7882064, 20237806, 2838411}, + FieldElement{-19288047, 4453152, 15298546, -16178388, 22115043, -15972604, 12544294, -13470457, 1068881, -12499905}, + FieldElement{-9558883, -16518835, 33238498, 13506958, 30505848, -1114596, -8486907, -2630053, 12521378, 4845654}, + }, + { + FieldElement{-28198521, 10744108, -2958380, 10199664, 7759311, -13088600, 3409348, -873400, -6482306, -12885870}, + FieldElement{-23561822, 6230156, -20382013, 10655314, -24040585, -11621172, 10477734, -1240216, -3113227, 13974498}, + FieldElement{12966261, 15550616, -32038948, -1615346, 21025980, -629444, 5642325, 7188737, 18895762, 12629579}, + }, + }, + { + { + FieldElement{14741879, -14946887, 22177208, -11721237, 1279741, 8058600, 11758140, 789443, 32195181, 3895677}, + FieldElement{10758205, 15755439, -4509950, 9243698, -4879422, 6879879, -2204575, -3566119, -8982069, 4429647}, + FieldElement{-2453894, 15725973, -20436342, -10410672, -5803908, -11040220, -7135870, -11642895, 18047436, -15281743}, + }, + { + FieldElement{-25173001, -11307165, 29759956, 11776784, -22262383, -15820455, 10993114, -12850837, -17620701, -9408468}, + FieldElement{21987233, 700364, -24505048, 14972008, -7774265, -5718395, 32155026, 2581431, -29958985, 8773375}, + FieldElement{-25568350, 454463, -13211935, 16126715, 25240068, 8594567, 20656846, 12017935, -7874389, -13920155}, + }, + { + FieldElement{6028182, 6263078, -31011806, -11301710, -818919, 2461772, -31841174, -5468042, -1721788, -2776725}, + FieldElement{-12278994, 16624277, 987579, -5922598, 32908203, 1248608, 7719845, -4166698, 28408820, 6816612}, + FieldElement{-10358094, -8237829, 19549651, -12169222, 22082623, 16147817, 20613181, 13982702, -10339570, 5067943}, + }, + { + FieldElement{-30505967, -3821767, 12074681, 13582412, -19877972, 2443951, -19719286, 12746132, 5331210, -10105944}, + FieldElement{30528811, 3601899, -1957090, 4619785, -27361822, -15436388, 24180793, -12570394, 27679908, -1648928}, + FieldElement{9402404, -13957065, 32834043, 10838634, -26580150, -13237195, 26653274, -8685565, 22611444, -12715406}, + }, + { + FieldElement{22190590, 1118029, 22736441, 15130463, -30460692, -5991321, 19189625, -4648942, 4854859, 6622139}, + FieldElement{-8310738, -2953450, -8262579, -3388049, -10401731, -271929, 13424426, -3567227, 26404409, 13001963}, + FieldElement{-31241838, -15415700, -2994250, 8939346, 11562230, -12840670, -26064365, -11621720, -15405155, 11020693}, + }, + { + FieldElement{1866042, -7949489, -7898649, -10301010, 12483315, 13477547, 3175636, -12424163, 28761762, 1406734}, + FieldElement{-448555, -1777666, 13018551, 3194501, -9580420, -11161737, 24760585, -4347088, 25577411, -13378680}, + FieldElement{-24290378, 4759345, -690653, -1852816, 2066747, 10693769, -29595790, 9884936, -9368926, 4745410}, + }, + { + FieldElement{-9141284, 6049714, -19531061, -4341411, -31260798, 9944276, -15462008, -11311852, 10931924, -11931931}, + FieldElement{-16561513, 14112680, -8012645, 4817318, -8040464, -11414606, -22853429, 10856641, -20470770, 13434654}, + FieldElement{22759489, -10073434, -16766264, -1871422, 13637442, -10168091, 1765144, -12654326, 28445307, -5364710}, + }, + { + FieldElement{29875063, 12493613, 2795536, -3786330, 1710620, 15181182, -10195717, -8788675, 9074234, 1167180}, + FieldElement{-26205683, 11014233, -9842651, -2635485, -26908120, 7532294, -18716888, -9535498, 3843903, 9367684}, + FieldElement{-10969595, -6403711, 9591134, 9582310, 11349256, 108879, 16235123, 8601684, -139197, 4242895}, + }, + }, + { + { + FieldElement{22092954, -13191123, -2042793, -11968512, 32186753, -11517388, -6574341, 2470660, -27417366, 16625501}, + FieldElement{-11057722, 3042016, 13770083, -9257922, 584236, -544855, -7770857, 2602725, -27351616, 14247413}, + FieldElement{6314175, -10264892, -32772502, 15957557, -10157730, 168750, -8618807, 14290061, 27108877, -1180880}, + }, + { + FieldElement{-8586597, -7170966, 13241782, 10960156, -32991015, -13794596, 33547976, -11058889, -27148451, 981874}, + FieldElement{22833440, 9293594, -32649448, -13618667, -9136966, 14756819, -22928859, -13970780, -10479804, -16197962}, + FieldElement{-7768587, 3326786, -28111797, 10783824, 19178761, 14905060, 22680049, 13906969, -15933690, 3797899}, + }, + { + FieldElement{21721356, -4212746, -12206123, 9310182, -3882239, -13653110, 23740224, -2709232, 20491983, -8042152}, + FieldElement{9209270, -15135055, -13256557, -6167798, -731016, 15289673, 25947805, 15286587, 30997318, -6703063}, + FieldElement{7392032, 16618386, 23946583, -8039892, -13265164, -1533858, -14197445, -2321576, 17649998, -250080}, + }, + { + FieldElement{-9301088, -14193827, 30609526, -3049543, -25175069, -1283752, -15241566, -9525724, -2233253, 7662146}, + FieldElement{-17558673, 1763594, -33114336, 15908610, -30040870, -12174295, 7335080, -8472199, -3174674, 3440183}, + FieldElement{-19889700, -5977008, -24111293, -9688870, 10799743, -16571957, 40450, -4431835, 4862400, 1133}, + }, + { + FieldElement{-32856209, -7873957, -5422389, 14860950, -16319031, 7956142, 7258061, 311861, -30594991, -7379421}, + FieldElement{-3773428, -1565936, 28985340, 7499440, 24445838, 9325937, 29727763, 16527196, 18278453, 15405622}, + FieldElement{-4381906, 8508652, -19898366, -3674424, -5984453, 15149970, -13313598, 843523, -21875062, 13626197}, + }, + { + FieldElement{2281448, -13487055, -10915418, -2609910, 1879358, 16164207, -10783882, 3953792, 13340839, 15928663}, + FieldElement{31727126, -7179855, -18437503, -8283652, 2875793, -16390330, -25269894, -7014826, -23452306, 5964753}, + FieldElement{4100420, -5959452, -17179337, 6017714, -18705837, 12227141, -26684835, 11344144, 2538215, -7570755}, + }, + { + FieldElement{-9433605, 6123113, 11159803, -2156608, 30016280, 14966241, -20474983, 1485421, -629256, -15958862}, + FieldElement{-26804558, 4260919, 11851389, 9658551, -32017107, 16367492, -20205425, -13191288, 11659922, -11115118}, + FieldElement{26180396, 10015009, -30844224, -8581293, 5418197, 9480663, 2231568, -10170080, 33100372, -1306171}, + }, + { + FieldElement{15121113, -5201871, -10389905, 15427821, -27509937, -15992507, 21670947, 4486675, -5931810, -14466380}, + FieldElement{16166486, -9483733, -11104130, 6023908, -31926798, -1364923, 2340060, -16254968, -10735770, -10039824}, + FieldElement{28042865, -3557089, -12126526, 12259706, -3717498, -6945899, 6766453, -8689599, 18036436, 5803270}, + }, + }, + { + { + FieldElement{-817581, 6763912, 11803561, 1585585, 10958447, -2671165, 23855391, 4598332, -6159431, -14117438}, + FieldElement{-31031306, -14256194, 17332029, -2383520, 31312682, -5967183, 696309, 50292, -20095739, 11763584}, + FieldElement{-594563, -2514283, -32234153, 12643980, 12650761, 14811489, 665117, -12613632, -19773211, -10713562}, + }, + { + FieldElement{30464590, -11262872, -4127476, -12734478, 19835327, -7105613, -24396175, 2075773, -17020157, 992471}, + FieldElement{18357185, -6994433, 7766382, 16342475, -29324918, 411174, 14578841, 8080033, -11574335, -10601610}, + FieldElement{19598397, 10334610, 12555054, 2555664, 18821899, -10339780, 21873263, 16014234, 26224780, 16452269}, + }, + { + FieldElement{-30223925, 5145196, 5944548, 16385966, 3976735, 2009897, -11377804, -7618186, -20533829, 3698650}, + FieldElement{14187449, 3448569, -10636236, -10810935, -22663880, -3433596, 7268410, -10890444, 27394301, 12015369}, + FieldElement{19695761, 16087646, 28032085, 12999827, 6817792, 11427614, 20244189, -1312777, -13259127, -3402461}, + }, + { + FieldElement{30860103, 12735208, -1888245, -4699734, -16974906, 2256940, -8166013, 12298312, -8550524, -10393462}, + FieldElement{-5719826, -11245325, -1910649, 15569035, 26642876, -7587760, -5789354, -15118654, -4976164, 12651793}, + FieldElement{-2848395, 9953421, 11531313, -5282879, 26895123, -12697089, -13118820, -16517902, 9768698, -2533218}, + }, + { + FieldElement{-24719459, 1894651, -287698, -4704085, 15348719, -8156530, 32767513, 12765450, 4940095, 10678226}, + FieldElement{18860224, 15980149, -18987240, -1562570, -26233012, -11071856, -7843882, 13944024, -24372348, 16582019}, + FieldElement{-15504260, 4970268, -29893044, 4175593, -20993212, -2199756, -11704054, 15444560, -11003761, 7989037}, + }, + { + FieldElement{31490452, 5568061, -2412803, 2182383, -32336847, 4531686, -32078269, 6200206, -19686113, -14800171}, + FieldElement{-17308668, -15879940, -31522777, -2831, -32887382, 16375549, 8680158, -16371713, 28550068, -6857132}, + FieldElement{-28126887, -5688091, 16837845, -1820458, -6850681, 12700016, -30039981, 4364038, 1155602, 5988841}, + }, + { + FieldElement{21890435, -13272907, -12624011, 12154349, -7831873, 15300496, 23148983, -4470481, 24618407, 8283181}, + FieldElement{-33136107, -10512751, 9975416, 6841041, -31559793, 16356536, 3070187, -7025928, 1466169, 10740210}, + FieldElement{-1509399, -15488185, -13503385, -10655916, 32799044, 909394, -13938903, -5779719, -32164649, -15327040}, + }, + { + FieldElement{3960823, -14267803, -28026090, -15918051, -19404858, 13146868, 15567327, 951507, -3260321, -573935}, + FieldElement{24740841, 5052253, -30094131, 8961361, 25877428, 6165135, -24368180, 14397372, -7380369, -6144105}, + FieldElement{-28888365, 3510803, -28103278, -1158478, -11238128, -10631454, -15441463, -14453128, -1625486, -6494814}, + }, + }, + { + { + FieldElement{793299, -9230478, 8836302, -6235707, -27360908, -2369593, 33152843, -4885251, -9906200, -621852}, + FieldElement{5666233, 525582, 20782575, -8038419, -24538499, 14657740, 16099374, 1468826, -6171428, -15186581}, + FieldElement{-4859255, -3779343, -2917758, -6748019, 7778750, 11688288, -30404353, -9871238, -1558923, -9863646}, + }, + { + FieldElement{10896332, -7719704, 824275, 472601, -19460308, 3009587, 25248958, 14783338, -30581476, -15757844}, + FieldElement{10566929, 12612572, -31944212, 11118703, -12633376, 12362879, 21752402, 8822496, 24003793, 14264025}, + FieldElement{27713862, -7355973, -11008240, 9227530, 27050101, 2504721, 23886875, -13117525, 13958495, -5732453}, + }, + { + FieldElement{-23481610, 4867226, -27247128, 3900521, 29838369, -8212291, -31889399, -10041781, 7340521, -15410068}, + FieldElement{4646514, -8011124, -22766023, -11532654, 23184553, 8566613, 31366726, -1381061, -15066784, -10375192}, + FieldElement{-17270517, 12723032, -16993061, 14878794, 21619651, -6197576, 27584817, 3093888, -8843694, 3849921}, + }, + { + FieldElement{-9064912, 2103172, 25561640, -15125738, -5239824, 9582958, 32477045, -9017955, 5002294, -15550259}, + FieldElement{-12057553, -11177906, 21115585, -13365155, 8808712, -12030708, 16489530, 13378448, -25845716, 12741426}, + FieldElement{-5946367, 10645103, -30911586, 15390284, -3286982, -7118677, 24306472, 15852464, 28834118, -7646072}, + }, + { + FieldElement{-17335748, -9107057, -24531279, 9434953, -8472084, -583362, -13090771, 455841, 20461858, 5491305}, + FieldElement{13669248, -16095482, -12481974, -10203039, -14569770, -11893198, -24995986, 11293807, -28588204, -9421832}, + FieldElement{28497928, 6272777, -33022994, 14470570, 8906179, -1225630, 18504674, -14165166, 29867745, -8795943}, + }, + { + FieldElement{-16207023, 13517196, -27799630, -13697798, 24009064, -6373891, -6367600, -13175392, 22853429, -4012011}, + FieldElement{24191378, 16712145, -13931797, 15217831, 14542237, 1646131, 18603514, -11037887, 12876623, -2112447}, + FieldElement{17902668, 4518229, -411702, -2829247, 26878217, 5258055, -12860753, 608397, 16031844, 3723494}, + }, + { + FieldElement{-28632773, 12763728, -20446446, 7577504, 33001348, -13017745, 17558842, -7872890, 23896954, -4314245}, + FieldElement{-20005381, -12011952, 31520464, 605201, 2543521, 5991821, -2945064, 7229064, -9919646, -8826859}, + FieldElement{28816045, 298879, -28165016, -15920938, 19000928, -1665890, -12680833, -2949325, -18051778, -2082915}, + }, + { + FieldElement{16000882, -344896, 3493092, -11447198, -29504595, -13159789, 12577740, 16041268, -19715240, 7847707}, + FieldElement{10151868, 10572098, 27312476, 7922682, 14825339, 4723128, -32855931, -6519018, -10020567, 3852848}, + FieldElement{-11430470, 15697596, -21121557, -4420647, 5386314, 15063598, 16514493, -15932110, 29330899, -15076224}, + }, + }, + { + { + FieldElement{-25499735, -4378794, -15222908, -6901211, 16615731, 2051784, 3303702, 15490, -27548796, 12314391}, + FieldElement{15683520, -6003043, 18109120, -9980648, 15337968, -5997823, -16717435, 15921866, 16103996, -3731215}, + FieldElement{-23169824, -10781249, 13588192, -1628807, -3798557, -1074929, -19273607, 5402699, -29815713, -9841101}, + }, + { + FieldElement{23190676, 2384583, -32714340, 3462154, -29903655, -1529132, -11266856, 8911517, -25205859, 2739713}, + FieldElement{21374101, -3554250, -33524649, 9874411, 15377179, 11831242, -33529904, 6134907, 4931255, 11987849}, + FieldElement{-7732, -2978858, -16223486, 7277597, 105524, -322051, -31480539, 13861388, -30076310, 10117930}, + }, + { + FieldElement{-29501170, -10744872, -26163768, 13051539, -25625564, 5089643, -6325503, 6704079, 12890019, 15728940}, + FieldElement{-21972360, -11771379, -951059, -4418840, 14704840, 2695116, 903376, -10428139, 12885167, 8311031}, + FieldElement{-17516482, 5352194, 10384213, -13811658, 7506451, 13453191, 26423267, 4384730, 1888765, -5435404}, + }, + { + FieldElement{-25817338, -3107312, -13494599, -3182506, 30896459, -13921729, -32251644, -12707869, -19464434, -3340243}, + FieldElement{-23607977, -2665774, -526091, 4651136, 5765089, 4618330, 6092245, 14845197, 17151279, -9854116}, + FieldElement{-24830458, -12733720, -15165978, 10367250, -29530908, -265356, 22825805, -7087279, -16866484, 16176525}, + }, + { + FieldElement{-23583256, 6564961, 20063689, 3798228, -4740178, 7359225, 2006182, -10363426, -28746253, -10197509}, + FieldElement{-10626600, -4486402, -13320562, -5125317, 3432136, -6393229, 23632037, -1940610, 32808310, 1099883}, + FieldElement{15030977, 5768825, -27451236, -2887299, -6427378, -15361371, -15277896, -6809350, 2051441, -15225865}, + }, + { + FieldElement{-3362323, -7239372, 7517890, 9824992, 23555850, 295369, 5148398, -14154188, -22686354, 16633660}, + FieldElement{4577086, -16752288, 13249841, -15304328, 19958763, -14537274, 18559670, -10759549, 8402478, -9864273}, + FieldElement{-28406330, -1051581, -26790155, -907698, -17212414, -11030789, 9453451, -14980072, 17983010, 9967138}, + }, + { + FieldElement{-25762494, 6524722, 26585488, 9969270, 24709298, 1220360, -1677990, 7806337, 17507396, 3651560}, + FieldElement{-10420457, -4118111, 14584639, 15971087, -15768321, 8861010, 26556809, -5574557, -18553322, -11357135}, + FieldElement{2839101, 14284142, 4029895, 3472686, 14402957, 12689363, -26642121, 8459447, -5605463, -7621941}, + }, + { + FieldElement{-4839289, -3535444, 9744961, 2871048, 25113978, 3187018, -25110813, -849066, 17258084, -7977739}, + FieldElement{18164541, -10595176, -17154882, -1542417, 19237078, -9745295, 23357533, -15217008, 26908270, 12150756}, + FieldElement{-30264870, -7647865, 5112249, -7036672, -1499807, -6974257, 43168, -5537701, -32302074, 16215819}, + }, + }, + { + { + FieldElement{-6898905, 9824394, -12304779, -4401089, -31397141, -6276835, 32574489, 12532905, -7503072, -8675347}, + FieldElement{-27343522, -16515468, -27151524, -10722951, 946346, 16291093, 254968, 7168080, 21676107, -1943028}, + FieldElement{21260961, -8424752, -16831886, -11920822, -23677961, 3968121, -3651949, -6215466, -3556191, -7913075}, + }, + { + FieldElement{16544754, 13250366, -16804428, 15546242, -4583003, 12757258, -2462308, -8680336, -18907032, -9662799}, + FieldElement{-2415239, -15577728, 18312303, 4964443, -15272530, -12653564, 26820651, 16690659, 25459437, -4564609}, + FieldElement{-25144690, 11425020, 28423002, -11020557, -6144921, -15826224, 9142795, -2391602, -6432418, -1644817}, + }, + { + FieldElement{-23104652, 6253476, 16964147, -3768872, -25113972, -12296437, -27457225, -16344658, 6335692, 7249989}, + FieldElement{-30333227, 13979675, 7503222, -12368314, -11956721, -4621693, -30272269, 2682242, 25993170, -12478523}, + FieldElement{4364628, 5930691, 32304656, -10044554, -8054781, 15091131, 22857016, -10598955, 31820368, 15075278}, + }, + { + FieldElement{31879134, -8918693, 17258761, 90626, -8041836, -4917709, 24162788, -9650886, -17970238, 12833045}, + FieldElement{19073683, 14851414, -24403169, -11860168, 7625278, 11091125, -19619190, 2074449, -9413939, 14905377}, + FieldElement{24483667, -11935567, -2518866, -11547418, -1553130, 15355506, -25282080, 9253129, 27628530, -7555480}, + }, + { + FieldElement{17597607, 8340603, 19355617, 552187, 26198470, -3176583, 4593324, -9157582, -14110875, 15297016}, + FieldElement{510886, 14337390, -31785257, 16638632, 6328095, 2713355, -20217417, -11864220, 8683221, 2921426}, + FieldElement{18606791, 11874196, 27155355, -5281482, -24031742, 6265446, -25178240, -1278924, 4674690, 13890525}, + }, + { + FieldElement{13609624, 13069022, -27372361, -13055908, 24360586, 9592974, 14977157, 9835105, 4389687, 288396}, + FieldElement{9922506, -519394, 13613107, 5883594, -18758345, -434263, -12304062, 8317628, 23388070, 16052080}, + FieldElement{12720016, 11937594, -31970060, -5028689, 26900120, 8561328, -20155687, -11632979, -14754271, -10812892}, + }, + { + FieldElement{15961858, 14150409, 26716931, -665832, -22794328, 13603569, 11829573, 7467844, -28822128, 929275}, + FieldElement{11038231, -11582396, -27310482, -7316562, -10498527, -16307831, -23479533, -9371869, -21393143, 2465074}, + FieldElement{20017163, -4323226, 27915242, 1529148, 12396362, 15675764, 13817261, -9658066, 2463391, -4622140}, + }, + { + FieldElement{-16358878, -12663911, -12065183, 4996454, -1256422, 1073572, 9583558, 12851107, 4003896, 12673717}, + FieldElement{-1731589, -15155870, -3262930, 16143082, 19294135, 13385325, 14741514, -9103726, 7903886, 2348101}, + FieldElement{24536016, -16515207, 12715592, -3862155, 1511293, 10047386, -3842346, -7129159, -28377538, 10048127}, + }, + }, + { + { + FieldElement{-12622226, -6204820, 30718825, 2591312, -10617028, 12192840, 18873298, -7297090, -32297756, 15221632}, + FieldElement{-26478122, -11103864, 11546244, -1852483, 9180880, 7656409, -21343950, 2095755, 29769758, 6593415}, + FieldElement{-31994208, -2907461, 4176912, 3264766, 12538965, -868111, 26312345, -6118678, 30958054, 8292160}, + }, + { + FieldElement{31429822, -13959116, 29173532, 15632448, 12174511, -2760094, 32808831, 3977186, 26143136, -3148876}, + FieldElement{22648901, 1402143, -22799984, 13746059, 7936347, 365344, -8668633, -1674433, -3758243, -2304625}, + FieldElement{-15491917, 8012313, -2514730, -12702462, -23965846, -10254029, -1612713, -1535569, -16664475, 8194478}, + }, + { + FieldElement{27338066, -7507420, -7414224, 10140405, -19026427, -6589889, 27277191, 8855376, 28572286, 3005164}, + FieldElement{26287124, 4821776, 25476601, -4145903, -3764513, -15788984, -18008582, 1182479, -26094821, -13079595}, + FieldElement{-7171154, 3178080, 23970071, 6201893, -17195577, -4489192, -21876275, -13982627, 32208683, -1198248}, + }, + { + FieldElement{-16657702, 2817643, -10286362, 14811298, 6024667, 13349505, -27315504, -10497842, -27672585, -11539858}, + FieldElement{15941029, -9405932, -21367050, 8062055, 31876073, -238629, -15278393, -1444429, 15397331, -4130193}, + FieldElement{8934485, -13485467, -23286397, -13423241, -32446090, 14047986, 31170398, -1441021, -27505566, 15087184}, + }, + { + FieldElement{-18357243, -2156491, 24524913, -16677868, 15520427, -6360776, -15502406, 11461896, 16788528, -5868942}, + FieldElement{-1947386, 16013773, 21750665, 3714552, -17401782, -16055433, -3770287, -10323320, 31322514, -11615635}, + FieldElement{21426655, -5650218, -13648287, -5347537, -28812189, -4920970, -18275391, -14621414, 13040862, -12112948}, + }, + { + FieldElement{11293895, 12478086, -27136401, 15083750, -29307421, 14748872, 14555558, -13417103, 1613711, 4896935}, + FieldElement{-25894883, 15323294, -8489791, -8057900, 25967126, -13425460, 2825960, -4897045, -23971776, -11267415}, + FieldElement{-15924766, -5229880, -17443532, 6410664, 3622847, 10243618, 20615400, 12405433, -23753030, -8436416}, + }, + { + FieldElement{-7091295, 12556208, -20191352, 9025187, -17072479, 4333801, 4378436, 2432030, 23097949, -566018}, + FieldElement{4565804, -16025654, 20084412, -7842817, 1724999, 189254, 24767264, 10103221, -18512313, 2424778}, + FieldElement{366633, -11976806, 8173090, -6890119, 30788634, 5745705, -7168678, 1344109, -3642553, 12412659}, + }, + { + FieldElement{-24001791, 7690286, 14929416, -168257, -32210835, -13412986, 24162697, -15326504, -3141501, 11179385}, + FieldElement{18289522, -14724954, 8056945, 16430056, -21729724, 7842514, -6001441, -1486897, -18684645, -11443503}, + FieldElement{476239, 6601091, -6152790, -9723375, 17503545, -4863900, 27672959, 13403813, 11052904, 5219329}, + }, + }, + { + { + FieldElement{20678546, -8375738, -32671898, 8849123, -5009758, 14574752, 31186971, -3973730, 9014762, -8579056}, + FieldElement{-13644050, -10350239, -15962508, 5075808, -1514661, -11534600, -33102500, 9160280, 8473550, -3256838}, + FieldElement{24900749, 14435722, 17209120, -15292541, -22592275, 9878983, -7689309, -16335821, -24568481, 11788948}, + }, + { + FieldElement{-3118155, -11395194, -13802089, 14797441, 9652448, -6845904, -20037437, 10410733, -24568470, -1458691}, + FieldElement{-15659161, 16736706, -22467150, 10215878, -9097177, 7563911, 11871841, -12505194, -18513325, 8464118}, + FieldElement{-23400612, 8348507, -14585951, -861714, -3950205, -6373419, 14325289, 8628612, 33313881, -8370517}, + }, + { + FieldElement{-20186973, -4967935, 22367356, 5271547, -1097117, -4788838, -24805667, -10236854, -8940735, -5818269}, + FieldElement{-6948785, -1795212, -32625683, -16021179, 32635414, -7374245, 15989197, -12838188, 28358192, -4253904}, + FieldElement{-23561781, -2799059, -32351682, -1661963, -9147719, 10429267, -16637684, 4072016, -5351664, 5596589}, + }, + { + FieldElement{-28236598, -3390048, 12312896, 6213178, 3117142, 16078565, 29266239, 2557221, 1768301, 15373193}, + FieldElement{-7243358, -3246960, -4593467, -7553353, -127927, -912245, -1090902, -4504991, -24660491, 3442910}, + FieldElement{-30210571, 5124043, 14181784, 8197961, 18964734, -11939093, 22597931, 7176455, -18585478, 13365930}, + }, + { + FieldElement{-7877390, -1499958, 8324673, 4690079, 6261860, 890446, 24538107, -8570186, -9689599, -3031667}, + FieldElement{25008904, -10771599, -4305031, -9638010, 16265036, 15721635, 683793, -11823784, 15723479, -15163481}, + FieldElement{-9660625, 12374379, -27006999, -7026148, -7724114, -12314514, 11879682, 5400171, 519526, -1235876}, + }, + { + FieldElement{22258397, -16332233, -7869817, 14613016, -22520255, -2950923, -20353881, 7315967, 16648397, 7605640}, + FieldElement{-8081308, -8464597, -8223311, 9719710, 19259459, -15348212, 23994942, -5281555, -9468848, 4763278}, + FieldElement{-21699244, 9220969, -15730624, 1084137, -25476107, -2852390, 31088447, -7764523, -11356529, 728112}, + }, + { + FieldElement{26047220, -11751471, -6900323, -16521798, 24092068, 9158119, -4273545, -12555558, -29365436, -5498272}, + FieldElement{17510331, -322857, 5854289, 8403524, 17133918, -3112612, -28111007, 12327945, 10750447, 10014012}, + FieldElement{-10312768, 3936952, 9156313, -8897683, 16498692, -994647, -27481051, -666732, 3424691, 7540221}, + }, + { + FieldElement{30322361, -6964110, 11361005, -4143317, 7433304, 4989748, -7071422, -16317219, -9244265, 15258046}, + FieldElement{13054562, -2779497, 19155474, 469045, -12482797, 4566042, 5631406, 2711395, 1062915, -5136345}, + FieldElement{-19240248, -11254599, -29509029, -7499965, -5835763, 13005411, -6066489, 12194497, 32960380, 1459310}, + }, + }, + { + { + FieldElement{19852034, 7027924, 23669353, 10020366, 8586503, -6657907, 394197, -6101885, 18638003, -11174937}, + FieldElement{31395534, 15098109, 26581030, 8030562, -16527914, -5007134, 9012486, -7584354, -6643087, -5442636}, + FieldElement{-9192165, -2347377, -1997099, 4529534, 25766844, 607986, -13222, 9677543, -32294889, -6456008}, + }, + { + FieldElement{-2444496, -149937, 29348902, 8186665, 1873760, 12489863, -30934579, -7839692, -7852844, -8138429}, + FieldElement{-15236356, -15433509, 7766470, 746860, 26346930, -10221762, -27333451, 10754588, -9431476, 5203576}, + FieldElement{31834314, 14135496, -770007, 5159118, 20917671, -16768096, -7467973, -7337524, 31809243, 7347066}, + }, + { + FieldElement{-9606723, -11874240, 20414459, 13033986, 13716524, -11691881, 19797970, -12211255, 15192876, -2087490}, + FieldElement{-12663563, -2181719, 1168162, -3804809, 26747877, -14138091, 10609330, 12694420, 33473243, -13382104}, + FieldElement{33184999, 11180355, 15832085, -11385430, -1633671, 225884, 15089336, -11023903, -6135662, 14480053}, + }, + { + FieldElement{31308717, -5619998, 31030840, -1897099, 15674547, -6582883, 5496208, 13685227, 27595050, 8737275}, + FieldElement{-20318852, -15150239, 10933843, -16178022, 8335352, -7546022, -31008351, -12610604, 26498114, 66511}, + FieldElement{22644454, -8761729, -16671776, 4884562, -3105614, -13559366, 30540766, -4286747, -13327787, -7515095}, + }, + { + FieldElement{-28017847, 9834845, 18617207, -2681312, -3401956, -13307506, 8205540, 13585437, -17127465, 15115439}, + FieldElement{23711543, -672915, 31206561, -8362711, 6164647, -9709987, -33535882, -1426096, 8236921, 16492939}, + FieldElement{-23910559, -13515526, -26299483, -4503841, 25005590, -7687270, 19574902, 10071562, 6708380, -6222424}, + }, + { + FieldElement{2101391, -4930054, 19702731, 2367575, -15427167, 1047675, 5301017, 9328700, 29955601, -11678310}, + FieldElement{3096359, 9271816, -21620864, -15521844, -14847996, -7592937, -25892142, -12635595, -9917575, 6216608}, + FieldElement{-32615849, 338663, -25195611, 2510422, -29213566, -13820213, 24822830, -6146567, -26767480, 7525079}, + }, + { + FieldElement{-23066649, -13985623, 16133487, -7896178, -3389565, 778788, -910336, -2782495, -19386633, 11994101}, + FieldElement{21691500, -13624626, -641331, -14367021, 3285881, -3483596, -25064666, 9718258, -7477437, 13381418}, + FieldElement{18445390, -4202236, 14979846, 11622458, -1727110, -3582980, 23111648, -6375247, 28535282, 15779576}, + }, + { + FieldElement{30098053, 3089662, -9234387, 16662135, -21306940, 11308411, -14068454, 12021730, 9955285, -16303356}, + FieldElement{9734894, -14576830, -7473633, -9138735, 2060392, 11313496, -18426029, 9924399, 20194861, 13380996}, + FieldElement{-26378102, -7965207, -22167821, 15789297, -18055342, -6168792, -1984914, 15707771, 26342023, 10146099}, + }, + }, + { + { + FieldElement{-26016874, -219943, 21339191, -41388, 19745256, -2878700, -29637280, 2227040, 21612326, -545728}, + FieldElement{-13077387, 1184228, 23562814, -5970442, -20351244, -6348714, 25764461, 12243797, -20856566, 11649658}, + FieldElement{-10031494, 11262626, 27384172, 2271902, 26947504, -15997771, 39944, 6114064, 33514190, 2333242}, + }, + { + FieldElement{-21433588, -12421821, 8119782, 7219913, -21830522, -9016134, -6679750, -12670638, 24350578, -13450001}, + FieldElement{-4116307, -11271533, -23886186, 4843615, -30088339, 690623, -31536088, -10406836, 8317860, 12352766}, + FieldElement{18200138, -14475911, -33087759, -2696619, -23702521, -9102511, -23552096, -2287550, 20712163, 6719373}, + }, + { + FieldElement{26656208, 6075253, -7858556, 1886072, -28344043, 4262326, 11117530, -3763210, 26224235, -3297458}, + FieldElement{-17168938, -14854097, -3395676, -16369877, -19954045, 14050420, 21728352, 9493610, 18620611, -16428628}, + FieldElement{-13323321, 13325349, 11432106, 5964811, 18609221, 6062965, -5269471, -9725556, -30701573, -16479657}, + }, + { + FieldElement{-23860538, -11233159, 26961357, 1640861, -32413112, -16737940, 12248509, -5240639, 13735342, 1934062}, + FieldElement{25089769, 6742589, 17081145, -13406266, 21909293, -16067981, -15136294, -3765346, -21277997, 5473616}, + FieldElement{31883677, -7961101, 1083432, -11572403, 22828471, 13290673, -7125085, 12469656, 29111212, -5451014}, + }, + { + FieldElement{24244947, -15050407, -26262976, 2791540, -14997599, 16666678, 24367466, 6388839, -10295587, 452383}, + FieldElement{-25640782, -3417841, 5217916, 16224624, 19987036, -4082269, -24236251, -5915248, 15766062, 8407814}, + FieldElement{-20406999, 13990231, 15495425, 16395525, 5377168, 15166495, -8917023, -4388953, -8067909, 2276718}, + }, + { + FieldElement{30157918, 12924066, -17712050, 9245753, 19895028, 3368142, -23827587, 5096219, 22740376, -7303417}, + FieldElement{2041139, -14256350, 7783687, 13876377, -25946985, -13352459, 24051124, 13742383, -15637599, 13295222}, + FieldElement{33338237, -8505733, 12532113, 7977527, 9106186, -1715251, -17720195, -4612972, -4451357, -14669444}, + }, + { + FieldElement{-20045281, 5454097, -14346548, 6447146, 28862071, 1883651, -2469266, -4141880, 7770569, 9620597}, + FieldElement{23208068, 7979712, 33071466, 8149229, 1758231, -10834995, 30945528, -1694323, -33502340, -14767970}, + FieldElement{1439958, -16270480, -1079989, -793782, 4625402, 10647766, -5043801, 1220118, 30494170, -11440799}, + }, + { + FieldElement{-5037580, -13028295, -2970559, -3061767, 15640974, -6701666, -26739026, 926050, -1684339, -13333647}, + FieldElement{13908495, -3549272, 30919928, -6273825, -21521863, 7989039, 9021034, 9078865, 3353509, 4033511}, + FieldElement{-29663431, -15113610, 32259991, -344482, 24295849, -12912123, 23161163, 8839127, 27485041, 7356032}, + }, + }, + { + { + FieldElement{9661027, 705443, 11980065, -5370154, -1628543, 14661173, -6346142, 2625015, 28431036, -16771834}, + FieldElement{-23839233, -8311415, -25945511, 7480958, -17681669, -8354183, -22545972, 14150565, 15970762, 4099461}, + FieldElement{29262576, 16756590, 26350592, -8793563, 8529671, -11208050, 13617293, -9937143, 11465739, 8317062}, + }, + { + FieldElement{-25493081, -6962928, 32500200, -9419051, -23038724, -2302222, 14898637, 3848455, 20969334, -5157516}, + FieldElement{-20384450, -14347713, -18336405, 13884722, -33039454, 2842114, -21610826, -3649888, 11177095, 14989547}, + FieldElement{-24496721, -11716016, 16959896, 2278463, 12066309, 10137771, 13515641, 2581286, -28487508, 9930240}, + }, + { + FieldElement{-17751622, -2097826, 16544300, -13009300, -15914807, -14949081, 18345767, -13403753, 16291481, -5314038}, + FieldElement{-33229194, 2553288, 32678213, 9875984, 8534129, 6889387, -9676774, 6957617, 4368891, 9788741}, + FieldElement{16660756, 7281060, -10830758, 12911820, 20108584, -8101676, -21722536, -8613148, 16250552, -11111103}, + }, + { + FieldElement{-19765507, 2390526, -16551031, 14161980, 1905286, 6414907, 4689584, 10604807, -30190403, 4782747}, + FieldElement{-1354539, 14736941, -7367442, -13292886, 7710542, -14155590, -9981571, 4383045, 22546403, 437323}, + FieldElement{31665577, -12180464, -16186830, 1491339, -18368625, 3294682, 27343084, 2786261, -30633590, -14097016}, + }, + { + FieldElement{-14467279, -683715, -33374107, 7448552, 19294360, 14334329, -19690631, 2355319, -19284671, -6114373}, + FieldElement{15121312, -15796162, 6377020, -6031361, -10798111, -12957845, 18952177, 15496498, -29380133, 11754228}, + FieldElement{-2637277, -13483075, 8488727, -14303896, 12728761, -1622493, 7141596, 11724556, 22761615, -10134141}, + }, + { + FieldElement{16918416, 11729663, -18083579, 3022987, -31015732, -13339659, -28741185, -12227393, 32851222, 11717399}, + FieldElement{11166634, 7338049, -6722523, 4531520, -29468672, -7302055, 31474879, 3483633, -1193175, -4030831}, + FieldElement{-185635, 9921305, 31456609, -13536438, -12013818, 13348923, 33142652, 6546660, -19985279, -3948376}, + }, + { + FieldElement{-32460596, 11266712, -11197107, -7899103, 31703694, 3855903, -8537131, -12833048, -30772034, -15486313}, + FieldElement{-18006477, 12709068, 3991746, -6479188, -21491523, -10550425, -31135347, -16049879, 10928917, 3011958}, + FieldElement{-6957757, -15594337, 31696059, 334240, 29576716, 14796075, -30831056, -12805180, 18008031, 10258577}, + }, + { + FieldElement{-22448644, 15655569, 7018479, -4410003, -30314266, -1201591, -1853465, 1367120, 25127874, 6671743}, + FieldElement{29701166, -14373934, -10878120, 9279288, -17568, 13127210, 21382910, 11042292, 25838796, 4642684}, + FieldElement{-20430234, 14955537, -24126347, 8124619, -5369288, -5990470, 30468147, -13900640, 18423289, 4177476}, + }, + }, +} diff --git a/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go b/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go new file mode 100644 index 00000000..fd03c252 --- /dev/null +++ b/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go @@ -0,0 +1,1793 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package edwards25519 + +import "encoding/binary" + +// This code is a port of the public domain, “ref10” implementation of ed25519 +// from SUPERCOP. + +// FieldElement represents an element of the field GF(2^255 - 19). An element +// t, entries t[0]...t[9], represents the integer t[0]+2^26 t[1]+2^51 t[2]+2^77 +// t[3]+2^102 t[4]+...+2^230 t[9]. Bounds on each t[i] vary depending on +// context. +type FieldElement [10]int32 + +var zero FieldElement + +func FeZero(fe *FieldElement) { + copy(fe[:], zero[:]) +} + +func FeOne(fe *FieldElement) { + FeZero(fe) + fe[0] = 1 +} + +func FeAdd(dst, a, b *FieldElement) { + dst[0] = a[0] + b[0] + dst[1] = a[1] + b[1] + dst[2] = a[2] + b[2] + dst[3] = a[3] + b[3] + dst[4] = a[4] + b[4] + dst[5] = a[5] + b[5] + dst[6] = a[6] + b[6] + dst[7] = a[7] + b[7] + dst[8] = a[8] + b[8] + dst[9] = a[9] + b[9] +} + +func FeSub(dst, a, b *FieldElement) { + dst[0] = a[0] - b[0] + dst[1] = a[1] - b[1] + dst[2] = a[2] - b[2] + dst[3] = a[3] - b[3] + dst[4] = a[4] - b[4] + dst[5] = a[5] - b[5] + dst[6] = a[6] - b[6] + dst[7] = a[7] - b[7] + dst[8] = a[8] - b[8] + dst[9] = a[9] - b[9] +} + +func FeCopy(dst, src *FieldElement) { + copy(dst[:], src[:]) +} + +// Replace (f,g) with (g,g) if b == 1; +// replace (f,g) with (f,g) if b == 0. +// +// Preconditions: b in {0,1}. +func FeCMove(f, g *FieldElement, b int32) { + b = -b + f[0] ^= b & (f[0] ^ g[0]) + f[1] ^= b & (f[1] ^ g[1]) + f[2] ^= b & (f[2] ^ g[2]) + f[3] ^= b & (f[3] ^ g[3]) + f[4] ^= b & (f[4] ^ g[4]) + f[5] ^= b & (f[5] ^ g[5]) + f[6] ^= b & (f[6] ^ g[6]) + f[7] ^= b & (f[7] ^ g[7]) + f[8] ^= b & (f[8] ^ g[8]) + f[9] ^= b & (f[9] ^ g[9]) +} + +func load3(in []byte) int64 { + var r int64 + r = int64(in[0]) + r |= int64(in[1]) << 8 + r |= int64(in[2]) << 16 + return r +} + +func load4(in []byte) int64 { + var r int64 + r = int64(in[0]) + r |= int64(in[1]) << 8 + r |= int64(in[2]) << 16 + r |= int64(in[3]) << 24 + return r +} + +func FeFromBytes(dst *FieldElement, src *[32]byte) { + h0 := load4(src[:]) + h1 := load3(src[4:]) << 6 + h2 := load3(src[7:]) << 5 + h3 := load3(src[10:]) << 3 + h4 := load3(src[13:]) << 2 + h5 := load4(src[16:]) + h6 := load3(src[20:]) << 7 + h7 := load3(src[23:]) << 5 + h8 := load3(src[26:]) << 4 + h9 := (load3(src[29:]) & 8388607) << 2 + + FeCombine(dst, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9) +} + +// FeToBytes marshals h to s. +// Preconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +// +// Write p=2^255-19; q=floor(h/p). +// Basic claim: q = floor(2^(-255)(h + 19 2^(-25)h9 + 2^(-1))). +// +// Proof: +// Have |h|<=p so |q|<=1 so |19^2 2^(-255) q|<1/4. +// Also have |h-2^230 h9|<2^230 so |19 2^(-255)(h-2^230 h9)|<1/4. +// +// Write y=2^(-1)-19^2 2^(-255)q-19 2^(-255)(h-2^230 h9). +// Then 0> 25 + q = (h[0] + q) >> 26 + q = (h[1] + q) >> 25 + q = (h[2] + q) >> 26 + q = (h[3] + q) >> 25 + q = (h[4] + q) >> 26 + q = (h[5] + q) >> 25 + q = (h[6] + q) >> 26 + q = (h[7] + q) >> 25 + q = (h[8] + q) >> 26 + q = (h[9] + q) >> 25 + + // Goal: Output h-(2^255-19)q, which is between 0 and 2^255-20. + h[0] += 19 * q + // Goal: Output h-2^255 q, which is between 0 and 2^255-20. + + carry[0] = h[0] >> 26 + h[1] += carry[0] + h[0] -= carry[0] << 26 + carry[1] = h[1] >> 25 + h[2] += carry[1] + h[1] -= carry[1] << 25 + carry[2] = h[2] >> 26 + h[3] += carry[2] + h[2] -= carry[2] << 26 + carry[3] = h[3] >> 25 + h[4] += carry[3] + h[3] -= carry[3] << 25 + carry[4] = h[4] >> 26 + h[5] += carry[4] + h[4] -= carry[4] << 26 + carry[5] = h[5] >> 25 + h[6] += carry[5] + h[5] -= carry[5] << 25 + carry[6] = h[6] >> 26 + h[7] += carry[6] + h[6] -= carry[6] << 26 + carry[7] = h[7] >> 25 + h[8] += carry[7] + h[7] -= carry[7] << 25 + carry[8] = h[8] >> 26 + h[9] += carry[8] + h[8] -= carry[8] << 26 + carry[9] = h[9] >> 25 + h[9] -= carry[9] << 25 + // h10 = carry9 + + // Goal: Output h[0]+...+2^255 h10-2^255 q, which is between 0 and 2^255-20. + // Have h[0]+...+2^230 h[9] between 0 and 2^255-1; + // evidently 2^255 h10-2^255 q = 0. + // Goal: Output h[0]+...+2^230 h[9]. + + s[0] = byte(h[0] >> 0) + s[1] = byte(h[0] >> 8) + s[2] = byte(h[0] >> 16) + s[3] = byte((h[0] >> 24) | (h[1] << 2)) + s[4] = byte(h[1] >> 6) + s[5] = byte(h[1] >> 14) + s[6] = byte((h[1] >> 22) | (h[2] << 3)) + s[7] = byte(h[2] >> 5) + s[8] = byte(h[2] >> 13) + s[9] = byte((h[2] >> 21) | (h[3] << 5)) + s[10] = byte(h[3] >> 3) + s[11] = byte(h[3] >> 11) + s[12] = byte((h[3] >> 19) | (h[4] << 6)) + s[13] = byte(h[4] >> 2) + s[14] = byte(h[4] >> 10) + s[15] = byte(h[4] >> 18) + s[16] = byte(h[5] >> 0) + s[17] = byte(h[5] >> 8) + s[18] = byte(h[5] >> 16) + s[19] = byte((h[5] >> 24) | (h[6] << 1)) + s[20] = byte(h[6] >> 7) + s[21] = byte(h[6] >> 15) + s[22] = byte((h[6] >> 23) | (h[7] << 3)) + s[23] = byte(h[7] >> 5) + s[24] = byte(h[7] >> 13) + s[25] = byte((h[7] >> 21) | (h[8] << 4)) + s[26] = byte(h[8] >> 4) + s[27] = byte(h[8] >> 12) + s[28] = byte((h[8] >> 20) | (h[9] << 6)) + s[29] = byte(h[9] >> 2) + s[30] = byte(h[9] >> 10) + s[31] = byte(h[9] >> 18) +} + +func FeIsNegative(f *FieldElement) byte { + var s [32]byte + FeToBytes(&s, f) + return s[0] & 1 +} + +func FeIsNonZero(f *FieldElement) int32 { + var s [32]byte + FeToBytes(&s, f) + var x uint8 + for _, b := range s { + x |= b + } + x |= x >> 4 + x |= x >> 2 + x |= x >> 1 + return int32(x & 1) +} + +// FeNeg sets h = -f +// +// Preconditions: +// |f| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +// +// Postconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +func FeNeg(h, f *FieldElement) { + h[0] = -f[0] + h[1] = -f[1] + h[2] = -f[2] + h[3] = -f[3] + h[4] = -f[4] + h[5] = -f[5] + h[6] = -f[6] + h[7] = -f[7] + h[8] = -f[8] + h[9] = -f[9] +} + +func FeCombine(h *FieldElement, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 int64) { + var c0, c1, c2, c3, c4, c5, c6, c7, c8, c9 int64 + + /* + |h0| <= (1.1*1.1*2^52*(1+19+19+19+19)+1.1*1.1*2^50*(38+38+38+38+38)) + i.e. |h0| <= 1.2*2^59; narrower ranges for h2, h4, h6, h8 + |h1| <= (1.1*1.1*2^51*(1+1+19+19+19+19+19+19+19+19)) + i.e. |h1| <= 1.5*2^58; narrower ranges for h3, h5, h7, h9 + */ + + c0 = (h0 + (1 << 25)) >> 26 + h1 += c0 + h0 -= c0 << 26 + c4 = (h4 + (1 << 25)) >> 26 + h5 += c4 + h4 -= c4 << 26 + /* |h0| <= 2^25 */ + /* |h4| <= 2^25 */ + /* |h1| <= 1.51*2^58 */ + /* |h5| <= 1.51*2^58 */ + + c1 = (h1 + (1 << 24)) >> 25 + h2 += c1 + h1 -= c1 << 25 + c5 = (h5 + (1 << 24)) >> 25 + h6 += c5 + h5 -= c5 << 25 + /* |h1| <= 2^24; from now on fits into int32 */ + /* |h5| <= 2^24; from now on fits into int32 */ + /* |h2| <= 1.21*2^59 */ + /* |h6| <= 1.21*2^59 */ + + c2 = (h2 + (1 << 25)) >> 26 + h3 += c2 + h2 -= c2 << 26 + c6 = (h6 + (1 << 25)) >> 26 + h7 += c6 + h6 -= c6 << 26 + /* |h2| <= 2^25; from now on fits into int32 unchanged */ + /* |h6| <= 2^25; from now on fits into int32 unchanged */ + /* |h3| <= 1.51*2^58 */ + /* |h7| <= 1.51*2^58 */ + + c3 = (h3 + (1 << 24)) >> 25 + h4 += c3 + h3 -= c3 << 25 + c7 = (h7 + (1 << 24)) >> 25 + h8 += c7 + h7 -= c7 << 25 + /* |h3| <= 2^24; from now on fits into int32 unchanged */ + /* |h7| <= 2^24; from now on fits into int32 unchanged */ + /* |h4| <= 1.52*2^33 */ + /* |h8| <= 1.52*2^33 */ + + c4 = (h4 + (1 << 25)) >> 26 + h5 += c4 + h4 -= c4 << 26 + c8 = (h8 + (1 << 25)) >> 26 + h9 += c8 + h8 -= c8 << 26 + /* |h4| <= 2^25; from now on fits into int32 unchanged */ + /* |h8| <= 2^25; from now on fits into int32 unchanged */ + /* |h5| <= 1.01*2^24 */ + /* |h9| <= 1.51*2^58 */ + + c9 = (h9 + (1 << 24)) >> 25 + h0 += c9 * 19 + h9 -= c9 << 25 + /* |h9| <= 2^24; from now on fits into int32 unchanged */ + /* |h0| <= 1.8*2^37 */ + + c0 = (h0 + (1 << 25)) >> 26 + h1 += c0 + h0 -= c0 << 26 + /* |h0| <= 2^25; from now on fits into int32 unchanged */ + /* |h1| <= 1.01*2^24 */ + + h[0] = int32(h0) + h[1] = int32(h1) + h[2] = int32(h2) + h[3] = int32(h3) + h[4] = int32(h4) + h[5] = int32(h5) + h[6] = int32(h6) + h[7] = int32(h7) + h[8] = int32(h8) + h[9] = int32(h9) +} + +// FeMul calculates h = f * g +// Can overlap h with f or g. +// +// Preconditions: +// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. +// |g| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. +// +// Postconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +// +// Notes on implementation strategy: +// +// Using schoolbook multiplication. +// Karatsuba would save a little in some cost models. +// +// Most multiplications by 2 and 19 are 32-bit precomputations; +// cheaper than 64-bit postcomputations. +// +// There is one remaining multiplication by 19 in the carry chain; +// one *19 precomputation can be merged into this, +// but the resulting data flow is considerably less clean. +// +// There are 12 carries below. +// 10 of them are 2-way parallelizable and vectorizable. +// Can get away with 11 carries, but then data flow is much deeper. +// +// With tighter constraints on inputs, can squeeze carries into int32. +func FeMul(h, f, g *FieldElement) { + f0 := int64(f[0]) + f1 := int64(f[1]) + f2 := int64(f[2]) + f3 := int64(f[3]) + f4 := int64(f[4]) + f5 := int64(f[5]) + f6 := int64(f[6]) + f7 := int64(f[7]) + f8 := int64(f[8]) + f9 := int64(f[9]) + + f1_2 := int64(2 * f[1]) + f3_2 := int64(2 * f[3]) + f5_2 := int64(2 * f[5]) + f7_2 := int64(2 * f[7]) + f9_2 := int64(2 * f[9]) + + g0 := int64(g[0]) + g1 := int64(g[1]) + g2 := int64(g[2]) + g3 := int64(g[3]) + g4 := int64(g[4]) + g5 := int64(g[5]) + g6 := int64(g[6]) + g7 := int64(g[7]) + g8 := int64(g[8]) + g9 := int64(g[9]) + + g1_19 := int64(19 * g[1]) /* 1.4*2^29 */ + g2_19 := int64(19 * g[2]) /* 1.4*2^30; still ok */ + g3_19 := int64(19 * g[3]) + g4_19 := int64(19 * g[4]) + g5_19 := int64(19 * g[5]) + g6_19 := int64(19 * g[6]) + g7_19 := int64(19 * g[7]) + g8_19 := int64(19 * g[8]) + g9_19 := int64(19 * g[9]) + + h0 := f0*g0 + f1_2*g9_19 + f2*g8_19 + f3_2*g7_19 + f4*g6_19 + f5_2*g5_19 + f6*g4_19 + f7_2*g3_19 + f8*g2_19 + f9_2*g1_19 + h1 := f0*g1 + f1*g0 + f2*g9_19 + f3*g8_19 + f4*g7_19 + f5*g6_19 + f6*g5_19 + f7*g4_19 + f8*g3_19 + f9*g2_19 + h2 := f0*g2 + f1_2*g1 + f2*g0 + f3_2*g9_19 + f4*g8_19 + f5_2*g7_19 + f6*g6_19 + f7_2*g5_19 + f8*g4_19 + f9_2*g3_19 + h3 := f0*g3 + f1*g2 + f2*g1 + f3*g0 + f4*g9_19 + f5*g8_19 + f6*g7_19 + f7*g6_19 + f8*g5_19 + f9*g4_19 + h4 := f0*g4 + f1_2*g3 + f2*g2 + f3_2*g1 + f4*g0 + f5_2*g9_19 + f6*g8_19 + f7_2*g7_19 + f8*g6_19 + f9_2*g5_19 + h5 := f0*g5 + f1*g4 + f2*g3 + f3*g2 + f4*g1 + f5*g0 + f6*g9_19 + f7*g8_19 + f8*g7_19 + f9*g6_19 + h6 := f0*g6 + f1_2*g5 + f2*g4 + f3_2*g3 + f4*g2 + f5_2*g1 + f6*g0 + f7_2*g9_19 + f8*g8_19 + f9_2*g7_19 + h7 := f0*g7 + f1*g6 + f2*g5 + f3*g4 + f4*g3 + f5*g2 + f6*g1 + f7*g0 + f8*g9_19 + f9*g8_19 + h8 := f0*g8 + f1_2*g7 + f2*g6 + f3_2*g5 + f4*g4 + f5_2*g3 + f6*g2 + f7_2*g1 + f8*g0 + f9_2*g9_19 + h9 := f0*g9 + f1*g8 + f2*g7 + f3*g6 + f4*g5 + f5*g4 + f6*g3 + f7*g2 + f8*g1 + f9*g0 + + FeCombine(h, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9) +} + +func feSquare(f *FieldElement) (h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 int64) { + f0 := int64(f[0]) + f1 := int64(f[1]) + f2 := int64(f[2]) + f3 := int64(f[3]) + f4 := int64(f[4]) + f5 := int64(f[5]) + f6 := int64(f[6]) + f7 := int64(f[7]) + f8 := int64(f[8]) + f9 := int64(f[9]) + f0_2 := int64(2 * f[0]) + f1_2 := int64(2 * f[1]) + f2_2 := int64(2 * f[2]) + f3_2 := int64(2 * f[3]) + f4_2 := int64(2 * f[4]) + f5_2 := int64(2 * f[5]) + f6_2 := int64(2 * f[6]) + f7_2 := int64(2 * f[7]) + f5_38 := 38 * f5 // 1.31*2^30 + f6_19 := 19 * f6 // 1.31*2^30 + f7_38 := 38 * f7 // 1.31*2^30 + f8_19 := 19 * f8 // 1.31*2^30 + f9_38 := 38 * f9 // 1.31*2^30 + + h0 = f0*f0 + f1_2*f9_38 + f2_2*f8_19 + f3_2*f7_38 + f4_2*f6_19 + f5*f5_38 + h1 = f0_2*f1 + f2*f9_38 + f3_2*f8_19 + f4*f7_38 + f5_2*f6_19 + h2 = f0_2*f2 + f1_2*f1 + f3_2*f9_38 + f4_2*f8_19 + f5_2*f7_38 + f6*f6_19 + h3 = f0_2*f3 + f1_2*f2 + f4*f9_38 + f5_2*f8_19 + f6*f7_38 + h4 = f0_2*f4 + f1_2*f3_2 + f2*f2 + f5_2*f9_38 + f6_2*f8_19 + f7*f7_38 + h5 = f0_2*f5 + f1_2*f4 + f2_2*f3 + f6*f9_38 + f7_2*f8_19 + h6 = f0_2*f6 + f1_2*f5_2 + f2_2*f4 + f3_2*f3 + f7_2*f9_38 + f8*f8_19 + h7 = f0_2*f7 + f1_2*f6 + f2_2*f5 + f3_2*f4 + f8*f9_38 + h8 = f0_2*f8 + f1_2*f7_2 + f2_2*f6 + f3_2*f5_2 + f4*f4 + f9*f9_38 + h9 = f0_2*f9 + f1_2*f8 + f2_2*f7 + f3_2*f6 + f4_2*f5 + + return +} + +// FeSquare calculates h = f*f. Can overlap h with f. +// +// Preconditions: +// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. +// +// Postconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +func FeSquare(h, f *FieldElement) { + h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 := feSquare(f) + FeCombine(h, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9) +} + +// FeSquare2 sets h = 2 * f * f +// +// Can overlap h with f. +// +// Preconditions: +// |f| bounded by 1.65*2^26,1.65*2^25,1.65*2^26,1.65*2^25,etc. +// +// Postconditions: +// |h| bounded by 1.01*2^25,1.01*2^24,1.01*2^25,1.01*2^24,etc. +// See fe_mul.c for discussion of implementation strategy. +func FeSquare2(h, f *FieldElement) { + h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 := feSquare(f) + + h0 += h0 + h1 += h1 + h2 += h2 + h3 += h3 + h4 += h4 + h5 += h5 + h6 += h6 + h7 += h7 + h8 += h8 + h9 += h9 + + FeCombine(h, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9) +} + +func FeInvert(out, z *FieldElement) { + var t0, t1, t2, t3 FieldElement + var i int + + FeSquare(&t0, z) // 2^1 + FeSquare(&t1, &t0) // 2^2 + for i = 1; i < 2; i++ { // 2^3 + FeSquare(&t1, &t1) + } + FeMul(&t1, z, &t1) // 2^3 + 2^0 + FeMul(&t0, &t0, &t1) // 2^3 + 2^1 + 2^0 + FeSquare(&t2, &t0) // 2^4 + 2^2 + 2^1 + FeMul(&t1, &t1, &t2) // 2^4 + 2^3 + 2^2 + 2^1 + 2^0 + FeSquare(&t2, &t1) // 5,4,3,2,1 + for i = 1; i < 5; i++ { // 9,8,7,6,5 + FeSquare(&t2, &t2) + } + FeMul(&t1, &t2, &t1) // 9,8,7,6,5,4,3,2,1,0 + FeSquare(&t2, &t1) // 10..1 + for i = 1; i < 10; i++ { // 19..10 + FeSquare(&t2, &t2) + } + FeMul(&t2, &t2, &t1) // 19..0 + FeSquare(&t3, &t2) // 20..1 + for i = 1; i < 20; i++ { // 39..20 + FeSquare(&t3, &t3) + } + FeMul(&t2, &t3, &t2) // 39..0 + FeSquare(&t2, &t2) // 40..1 + for i = 1; i < 10; i++ { // 49..10 + FeSquare(&t2, &t2) + } + FeMul(&t1, &t2, &t1) // 49..0 + FeSquare(&t2, &t1) // 50..1 + for i = 1; i < 50; i++ { // 99..50 + FeSquare(&t2, &t2) + } + FeMul(&t2, &t2, &t1) // 99..0 + FeSquare(&t3, &t2) // 100..1 + for i = 1; i < 100; i++ { // 199..100 + FeSquare(&t3, &t3) + } + FeMul(&t2, &t3, &t2) // 199..0 + FeSquare(&t2, &t2) // 200..1 + for i = 1; i < 50; i++ { // 249..50 + FeSquare(&t2, &t2) + } + FeMul(&t1, &t2, &t1) // 249..0 + FeSquare(&t1, &t1) // 250..1 + for i = 1; i < 5; i++ { // 254..5 + FeSquare(&t1, &t1) + } + FeMul(out, &t1, &t0) // 254..5,3,1,0 +} + +func fePow22523(out, z *FieldElement) { + var t0, t1, t2 FieldElement + var i int + + FeSquare(&t0, z) + for i = 1; i < 1; i++ { + FeSquare(&t0, &t0) + } + FeSquare(&t1, &t0) + for i = 1; i < 2; i++ { + FeSquare(&t1, &t1) + } + FeMul(&t1, z, &t1) + FeMul(&t0, &t0, &t1) + FeSquare(&t0, &t0) + for i = 1; i < 1; i++ { + FeSquare(&t0, &t0) + } + FeMul(&t0, &t1, &t0) + FeSquare(&t1, &t0) + for i = 1; i < 5; i++ { + FeSquare(&t1, &t1) + } + FeMul(&t0, &t1, &t0) + FeSquare(&t1, &t0) + for i = 1; i < 10; i++ { + FeSquare(&t1, &t1) + } + FeMul(&t1, &t1, &t0) + FeSquare(&t2, &t1) + for i = 1; i < 20; i++ { + FeSquare(&t2, &t2) + } + FeMul(&t1, &t2, &t1) + FeSquare(&t1, &t1) + for i = 1; i < 10; i++ { + FeSquare(&t1, &t1) + } + FeMul(&t0, &t1, &t0) + FeSquare(&t1, &t0) + for i = 1; i < 50; i++ { + FeSquare(&t1, &t1) + } + FeMul(&t1, &t1, &t0) + FeSquare(&t2, &t1) + for i = 1; i < 100; i++ { + FeSquare(&t2, &t2) + } + FeMul(&t1, &t2, &t1) + FeSquare(&t1, &t1) + for i = 1; i < 50; i++ { + FeSquare(&t1, &t1) + } + FeMul(&t0, &t1, &t0) + FeSquare(&t0, &t0) + for i = 1; i < 2; i++ { + FeSquare(&t0, &t0) + } + FeMul(out, &t0, z) +} + +// Group elements are members of the elliptic curve -x^2 + y^2 = 1 + d * x^2 * +// y^2 where d = -121665/121666. +// +// Several representations are used: +// ProjectiveGroupElement: (X:Y:Z) satisfying x=X/Z, y=Y/Z +// ExtendedGroupElement: (X:Y:Z:T) satisfying x=X/Z, y=Y/Z, XY=ZT +// CompletedGroupElement: ((X:Z),(Y:T)) satisfying x=X/Z, y=Y/T +// PreComputedGroupElement: (y+x,y-x,2dxy) + +type ProjectiveGroupElement struct { + X, Y, Z FieldElement +} + +type ExtendedGroupElement struct { + X, Y, Z, T FieldElement +} + +type CompletedGroupElement struct { + X, Y, Z, T FieldElement +} + +type PreComputedGroupElement struct { + yPlusX, yMinusX, xy2d FieldElement +} + +type CachedGroupElement struct { + yPlusX, yMinusX, Z, T2d FieldElement +} + +func (p *ProjectiveGroupElement) Zero() { + FeZero(&p.X) + FeOne(&p.Y) + FeOne(&p.Z) +} + +func (p *ProjectiveGroupElement) Double(r *CompletedGroupElement) { + var t0 FieldElement + + FeSquare(&r.X, &p.X) + FeSquare(&r.Z, &p.Y) + FeSquare2(&r.T, &p.Z) + FeAdd(&r.Y, &p.X, &p.Y) + FeSquare(&t0, &r.Y) + FeAdd(&r.Y, &r.Z, &r.X) + FeSub(&r.Z, &r.Z, &r.X) + FeSub(&r.X, &t0, &r.Y) + FeSub(&r.T, &r.T, &r.Z) +} + +func (p *ProjectiveGroupElement) ToBytes(s *[32]byte) { + var recip, x, y FieldElement + + FeInvert(&recip, &p.Z) + FeMul(&x, &p.X, &recip) + FeMul(&y, &p.Y, &recip) + FeToBytes(s, &y) + s[31] ^= FeIsNegative(&x) << 7 +} + +func (p *ExtendedGroupElement) Zero() { + FeZero(&p.X) + FeOne(&p.Y) + FeOne(&p.Z) + FeZero(&p.T) +} + +func (p *ExtendedGroupElement) Double(r *CompletedGroupElement) { + var q ProjectiveGroupElement + p.ToProjective(&q) + q.Double(r) +} + +func (p *ExtendedGroupElement) ToCached(r *CachedGroupElement) { + FeAdd(&r.yPlusX, &p.Y, &p.X) + FeSub(&r.yMinusX, &p.Y, &p.X) + FeCopy(&r.Z, &p.Z) + FeMul(&r.T2d, &p.T, &d2) +} + +func (p *ExtendedGroupElement) ToProjective(r *ProjectiveGroupElement) { + FeCopy(&r.X, &p.X) + FeCopy(&r.Y, &p.Y) + FeCopy(&r.Z, &p.Z) +} + +func (p *ExtendedGroupElement) ToBytes(s *[32]byte) { + var recip, x, y FieldElement + + FeInvert(&recip, &p.Z) + FeMul(&x, &p.X, &recip) + FeMul(&y, &p.Y, &recip) + FeToBytes(s, &y) + s[31] ^= FeIsNegative(&x) << 7 +} + +func (p *ExtendedGroupElement) FromBytes(s *[32]byte) bool { + var u, v, v3, vxx, check FieldElement + + FeFromBytes(&p.Y, s) + FeOne(&p.Z) + FeSquare(&u, &p.Y) + FeMul(&v, &u, &d) + FeSub(&u, &u, &p.Z) // y = y^2-1 + FeAdd(&v, &v, &p.Z) // v = dy^2+1 + + FeSquare(&v3, &v) + FeMul(&v3, &v3, &v) // v3 = v^3 + FeSquare(&p.X, &v3) + FeMul(&p.X, &p.X, &v) + FeMul(&p.X, &p.X, &u) // x = uv^7 + + fePow22523(&p.X, &p.X) // x = (uv^7)^((q-5)/8) + FeMul(&p.X, &p.X, &v3) + FeMul(&p.X, &p.X, &u) // x = uv^3(uv^7)^((q-5)/8) + + var tmpX, tmp2 [32]byte + + FeSquare(&vxx, &p.X) + FeMul(&vxx, &vxx, &v) + FeSub(&check, &vxx, &u) // vx^2-u + if FeIsNonZero(&check) == 1 { + FeAdd(&check, &vxx, &u) // vx^2+u + if FeIsNonZero(&check) == 1 { + return false + } + FeMul(&p.X, &p.X, &SqrtM1) + + FeToBytes(&tmpX, &p.X) + for i, v := range tmpX { + tmp2[31-i] = v + } + } + + if FeIsNegative(&p.X) != (s[31] >> 7) { + FeNeg(&p.X, &p.X) + } + + FeMul(&p.T, &p.X, &p.Y) + return true +} + +func (p *CompletedGroupElement) ToProjective(r *ProjectiveGroupElement) { + FeMul(&r.X, &p.X, &p.T) + FeMul(&r.Y, &p.Y, &p.Z) + FeMul(&r.Z, &p.Z, &p.T) +} + +func (p *CompletedGroupElement) ToExtended(r *ExtendedGroupElement) { + FeMul(&r.X, &p.X, &p.T) + FeMul(&r.Y, &p.Y, &p.Z) + FeMul(&r.Z, &p.Z, &p.T) + FeMul(&r.T, &p.X, &p.Y) +} + +func (p *PreComputedGroupElement) Zero() { + FeOne(&p.yPlusX) + FeOne(&p.yMinusX) + FeZero(&p.xy2d) +} + +func geAdd(r *CompletedGroupElement, p *ExtendedGroupElement, q *CachedGroupElement) { + var t0 FieldElement + + FeAdd(&r.X, &p.Y, &p.X) + FeSub(&r.Y, &p.Y, &p.X) + FeMul(&r.Z, &r.X, &q.yPlusX) + FeMul(&r.Y, &r.Y, &q.yMinusX) + FeMul(&r.T, &q.T2d, &p.T) + FeMul(&r.X, &p.Z, &q.Z) + FeAdd(&t0, &r.X, &r.X) + FeSub(&r.X, &r.Z, &r.Y) + FeAdd(&r.Y, &r.Z, &r.Y) + FeAdd(&r.Z, &t0, &r.T) + FeSub(&r.T, &t0, &r.T) +} + +func geSub(r *CompletedGroupElement, p *ExtendedGroupElement, q *CachedGroupElement) { + var t0 FieldElement + + FeAdd(&r.X, &p.Y, &p.X) + FeSub(&r.Y, &p.Y, &p.X) + FeMul(&r.Z, &r.X, &q.yMinusX) + FeMul(&r.Y, &r.Y, &q.yPlusX) + FeMul(&r.T, &q.T2d, &p.T) + FeMul(&r.X, &p.Z, &q.Z) + FeAdd(&t0, &r.X, &r.X) + FeSub(&r.X, &r.Z, &r.Y) + FeAdd(&r.Y, &r.Z, &r.Y) + FeSub(&r.Z, &t0, &r.T) + FeAdd(&r.T, &t0, &r.T) +} + +func geMixedAdd(r *CompletedGroupElement, p *ExtendedGroupElement, q *PreComputedGroupElement) { + var t0 FieldElement + + FeAdd(&r.X, &p.Y, &p.X) + FeSub(&r.Y, &p.Y, &p.X) + FeMul(&r.Z, &r.X, &q.yPlusX) + FeMul(&r.Y, &r.Y, &q.yMinusX) + FeMul(&r.T, &q.xy2d, &p.T) + FeAdd(&t0, &p.Z, &p.Z) + FeSub(&r.X, &r.Z, &r.Y) + FeAdd(&r.Y, &r.Z, &r.Y) + FeAdd(&r.Z, &t0, &r.T) + FeSub(&r.T, &t0, &r.T) +} + +func geMixedSub(r *CompletedGroupElement, p *ExtendedGroupElement, q *PreComputedGroupElement) { + var t0 FieldElement + + FeAdd(&r.X, &p.Y, &p.X) + FeSub(&r.Y, &p.Y, &p.X) + FeMul(&r.Z, &r.X, &q.yMinusX) + FeMul(&r.Y, &r.Y, &q.yPlusX) + FeMul(&r.T, &q.xy2d, &p.T) + FeAdd(&t0, &p.Z, &p.Z) + FeSub(&r.X, &r.Z, &r.Y) + FeAdd(&r.Y, &r.Z, &r.Y) + FeSub(&r.Z, &t0, &r.T) + FeAdd(&r.T, &t0, &r.T) +} + +func slide(r *[256]int8, a *[32]byte) { + for i := range r { + r[i] = int8(1 & (a[i>>3] >> uint(i&7))) + } + + for i := range r { + if r[i] != 0 { + for b := 1; b <= 6 && i+b < 256; b++ { + if r[i+b] != 0 { + if r[i]+(r[i+b]<= -15 { + r[i] -= r[i+b] << uint(b) + for k := i + b; k < 256; k++ { + if r[k] == 0 { + r[k] = 1 + break + } + r[k] = 0 + } + } else { + break + } + } + } + } + } +} + +// GeDoubleScalarMultVartime sets r = a*A + b*B +// where a = a[0]+256*a[1]+...+256^31 a[31]. +// and b = b[0]+256*b[1]+...+256^31 b[31]. +// B is the Ed25519 base point (x,4/5) with x positive. +func GeDoubleScalarMultVartime(r *ProjectiveGroupElement, a *[32]byte, A *ExtendedGroupElement, b *[32]byte) { + var aSlide, bSlide [256]int8 + var Ai [8]CachedGroupElement // A,3A,5A,7A,9A,11A,13A,15A + var t CompletedGroupElement + var u, A2 ExtendedGroupElement + var i int + + slide(&aSlide, a) + slide(&bSlide, b) + + A.ToCached(&Ai[0]) + A.Double(&t) + t.ToExtended(&A2) + + for i := 0; i < 7; i++ { + geAdd(&t, &A2, &Ai[i]) + t.ToExtended(&u) + u.ToCached(&Ai[i+1]) + } + + r.Zero() + + for i = 255; i >= 0; i-- { + if aSlide[i] != 0 || bSlide[i] != 0 { + break + } + } + + for ; i >= 0; i-- { + r.Double(&t) + + if aSlide[i] > 0 { + t.ToExtended(&u) + geAdd(&t, &u, &Ai[aSlide[i]/2]) + } else if aSlide[i] < 0 { + t.ToExtended(&u) + geSub(&t, &u, &Ai[(-aSlide[i])/2]) + } + + if bSlide[i] > 0 { + t.ToExtended(&u) + geMixedAdd(&t, &u, &bi[bSlide[i]/2]) + } else if bSlide[i] < 0 { + t.ToExtended(&u) + geMixedSub(&t, &u, &bi[(-bSlide[i])/2]) + } + + t.ToProjective(r) + } +} + +// equal returns 1 if b == c and 0 otherwise, assuming that b and c are +// non-negative. +func equal(b, c int32) int32 { + x := uint32(b ^ c) + x-- + return int32(x >> 31) +} + +// negative returns 1 if b < 0 and 0 otherwise. +func negative(b int32) int32 { + return (b >> 31) & 1 +} + +func PreComputedGroupElementCMove(t, u *PreComputedGroupElement, b int32) { + FeCMove(&t.yPlusX, &u.yPlusX, b) + FeCMove(&t.yMinusX, &u.yMinusX, b) + FeCMove(&t.xy2d, &u.xy2d, b) +} + +func selectPoint(t *PreComputedGroupElement, pos int32, b int32) { + var minusT PreComputedGroupElement + bNegative := negative(b) + bAbs := b - (((-bNegative) & b) << 1) + + t.Zero() + for i := int32(0); i < 8; i++ { + PreComputedGroupElementCMove(t, &base[pos][i], equal(bAbs, i+1)) + } + FeCopy(&minusT.yPlusX, &t.yMinusX) + FeCopy(&minusT.yMinusX, &t.yPlusX) + FeNeg(&minusT.xy2d, &t.xy2d) + PreComputedGroupElementCMove(t, &minusT, bNegative) +} + +// GeScalarMultBase computes h = a*B, where +// a = a[0]+256*a[1]+...+256^31 a[31] +// B is the Ed25519 base point (x,4/5) with x positive. +// +// Preconditions: +// a[31] <= 127 +func GeScalarMultBase(h *ExtendedGroupElement, a *[32]byte) { + var e [64]int8 + + for i, v := range a { + e[2*i] = int8(v & 15) + e[2*i+1] = int8((v >> 4) & 15) + } + + // each e[i] is between 0 and 15 and e[63] is between 0 and 7. + + carry := int8(0) + for i := 0; i < 63; i++ { + e[i] += carry + carry = (e[i] + 8) >> 4 + e[i] -= carry << 4 + } + e[63] += carry + // each e[i] is between -8 and 8. + + h.Zero() + var t PreComputedGroupElement + var r CompletedGroupElement + for i := int32(1); i < 64; i += 2 { + selectPoint(&t, i/2, int32(e[i])) + geMixedAdd(&r, h, &t) + r.ToExtended(h) + } + + var s ProjectiveGroupElement + + h.Double(&r) + r.ToProjective(&s) + s.Double(&r) + r.ToProjective(&s) + s.Double(&r) + r.ToProjective(&s) + s.Double(&r) + r.ToExtended(h) + + for i := int32(0); i < 64; i += 2 { + selectPoint(&t, i/2, int32(e[i])) + geMixedAdd(&r, h, &t) + r.ToExtended(h) + } +} + +// The scalars are GF(2^252 + 27742317777372353535851937790883648493). + +// Input: +// a[0]+256*a[1]+...+256^31*a[31] = a +// b[0]+256*b[1]+...+256^31*b[31] = b +// c[0]+256*c[1]+...+256^31*c[31] = c +// +// Output: +// s[0]+256*s[1]+...+256^31*s[31] = (ab+c) mod l +// where l = 2^252 + 27742317777372353535851937790883648493. +func ScMulAdd(s, a, b, c *[32]byte) { + a0 := 2097151 & load3(a[:]) + a1 := 2097151 & (load4(a[2:]) >> 5) + a2 := 2097151 & (load3(a[5:]) >> 2) + a3 := 2097151 & (load4(a[7:]) >> 7) + a4 := 2097151 & (load4(a[10:]) >> 4) + a5 := 2097151 & (load3(a[13:]) >> 1) + a6 := 2097151 & (load4(a[15:]) >> 6) + a7 := 2097151 & (load3(a[18:]) >> 3) + a8 := 2097151 & load3(a[21:]) + a9 := 2097151 & (load4(a[23:]) >> 5) + a10 := 2097151 & (load3(a[26:]) >> 2) + a11 := (load4(a[28:]) >> 7) + b0 := 2097151 & load3(b[:]) + b1 := 2097151 & (load4(b[2:]) >> 5) + b2 := 2097151 & (load3(b[5:]) >> 2) + b3 := 2097151 & (load4(b[7:]) >> 7) + b4 := 2097151 & (load4(b[10:]) >> 4) + b5 := 2097151 & (load3(b[13:]) >> 1) + b6 := 2097151 & (load4(b[15:]) >> 6) + b7 := 2097151 & (load3(b[18:]) >> 3) + b8 := 2097151 & load3(b[21:]) + b9 := 2097151 & (load4(b[23:]) >> 5) + b10 := 2097151 & (load3(b[26:]) >> 2) + b11 := (load4(b[28:]) >> 7) + c0 := 2097151 & load3(c[:]) + c1 := 2097151 & (load4(c[2:]) >> 5) + c2 := 2097151 & (load3(c[5:]) >> 2) + c3 := 2097151 & (load4(c[7:]) >> 7) + c4 := 2097151 & (load4(c[10:]) >> 4) + c5 := 2097151 & (load3(c[13:]) >> 1) + c6 := 2097151 & (load4(c[15:]) >> 6) + c7 := 2097151 & (load3(c[18:]) >> 3) + c8 := 2097151 & load3(c[21:]) + c9 := 2097151 & (load4(c[23:]) >> 5) + c10 := 2097151 & (load3(c[26:]) >> 2) + c11 := (load4(c[28:]) >> 7) + var carry [23]int64 + + s0 := c0 + a0*b0 + s1 := c1 + a0*b1 + a1*b0 + s2 := c2 + a0*b2 + a1*b1 + a2*b0 + s3 := c3 + a0*b3 + a1*b2 + a2*b1 + a3*b0 + s4 := c4 + a0*b4 + a1*b3 + a2*b2 + a3*b1 + a4*b0 + s5 := c5 + a0*b5 + a1*b4 + a2*b3 + a3*b2 + a4*b1 + a5*b0 + s6 := c6 + a0*b6 + a1*b5 + a2*b4 + a3*b3 + a4*b2 + a5*b1 + a6*b0 + s7 := c7 + a0*b7 + a1*b6 + a2*b5 + a3*b4 + a4*b3 + a5*b2 + a6*b1 + a7*b0 + s8 := c8 + a0*b8 + a1*b7 + a2*b6 + a3*b5 + a4*b4 + a5*b3 + a6*b2 + a7*b1 + a8*b0 + s9 := c9 + a0*b9 + a1*b8 + a2*b7 + a3*b6 + a4*b5 + a5*b4 + a6*b3 + a7*b2 + a8*b1 + a9*b0 + s10 := c10 + a0*b10 + a1*b9 + a2*b8 + a3*b7 + a4*b6 + a5*b5 + a6*b4 + a7*b3 + a8*b2 + a9*b1 + a10*b0 + s11 := c11 + a0*b11 + a1*b10 + a2*b9 + a3*b8 + a4*b7 + a5*b6 + a6*b5 + a7*b4 + a8*b3 + a9*b2 + a10*b1 + a11*b0 + s12 := a1*b11 + a2*b10 + a3*b9 + a4*b8 + a5*b7 + a6*b6 + a7*b5 + a8*b4 + a9*b3 + a10*b2 + a11*b1 + s13 := a2*b11 + a3*b10 + a4*b9 + a5*b8 + a6*b7 + a7*b6 + a8*b5 + a9*b4 + a10*b3 + a11*b2 + s14 := a3*b11 + a4*b10 + a5*b9 + a6*b8 + a7*b7 + a8*b6 + a9*b5 + a10*b4 + a11*b3 + s15 := a4*b11 + a5*b10 + a6*b9 + a7*b8 + a8*b7 + a9*b6 + a10*b5 + a11*b4 + s16 := a5*b11 + a6*b10 + a7*b9 + a8*b8 + a9*b7 + a10*b6 + a11*b5 + s17 := a6*b11 + a7*b10 + a8*b9 + a9*b8 + a10*b7 + a11*b6 + s18 := a7*b11 + a8*b10 + a9*b9 + a10*b8 + a11*b7 + s19 := a8*b11 + a9*b10 + a10*b9 + a11*b8 + s20 := a9*b11 + a10*b10 + a11*b9 + s21 := a10*b11 + a11*b10 + s22 := a11 * b11 + s23 := int64(0) + + carry[0] = (s0 + (1 << 20)) >> 21 + s1 += carry[0] + s0 -= carry[0] << 21 + carry[2] = (s2 + (1 << 20)) >> 21 + s3 += carry[2] + s2 -= carry[2] << 21 + carry[4] = (s4 + (1 << 20)) >> 21 + s5 += carry[4] + s4 -= carry[4] << 21 + carry[6] = (s6 + (1 << 20)) >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[8] = (s8 + (1 << 20)) >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[10] = (s10 + (1 << 20)) >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + carry[12] = (s12 + (1 << 20)) >> 21 + s13 += carry[12] + s12 -= carry[12] << 21 + carry[14] = (s14 + (1 << 20)) >> 21 + s15 += carry[14] + s14 -= carry[14] << 21 + carry[16] = (s16 + (1 << 20)) >> 21 + s17 += carry[16] + s16 -= carry[16] << 21 + carry[18] = (s18 + (1 << 20)) >> 21 + s19 += carry[18] + s18 -= carry[18] << 21 + carry[20] = (s20 + (1 << 20)) >> 21 + s21 += carry[20] + s20 -= carry[20] << 21 + carry[22] = (s22 + (1 << 20)) >> 21 + s23 += carry[22] + s22 -= carry[22] << 21 + + carry[1] = (s1 + (1 << 20)) >> 21 + s2 += carry[1] + s1 -= carry[1] << 21 + carry[3] = (s3 + (1 << 20)) >> 21 + s4 += carry[3] + s3 -= carry[3] << 21 + carry[5] = (s5 + (1 << 20)) >> 21 + s6 += carry[5] + s5 -= carry[5] << 21 + carry[7] = (s7 + (1 << 20)) >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[9] = (s9 + (1 << 20)) >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[11] = (s11 + (1 << 20)) >> 21 + s12 += carry[11] + s11 -= carry[11] << 21 + carry[13] = (s13 + (1 << 20)) >> 21 + s14 += carry[13] + s13 -= carry[13] << 21 + carry[15] = (s15 + (1 << 20)) >> 21 + s16 += carry[15] + s15 -= carry[15] << 21 + carry[17] = (s17 + (1 << 20)) >> 21 + s18 += carry[17] + s17 -= carry[17] << 21 + carry[19] = (s19 + (1 << 20)) >> 21 + s20 += carry[19] + s19 -= carry[19] << 21 + carry[21] = (s21 + (1 << 20)) >> 21 + s22 += carry[21] + s21 -= carry[21] << 21 + + s11 += s23 * 666643 + s12 += s23 * 470296 + s13 += s23 * 654183 + s14 -= s23 * 997805 + s15 += s23 * 136657 + s16 -= s23 * 683901 + s23 = 0 + + s10 += s22 * 666643 + s11 += s22 * 470296 + s12 += s22 * 654183 + s13 -= s22 * 997805 + s14 += s22 * 136657 + s15 -= s22 * 683901 + s22 = 0 + + s9 += s21 * 666643 + s10 += s21 * 470296 + s11 += s21 * 654183 + s12 -= s21 * 997805 + s13 += s21 * 136657 + s14 -= s21 * 683901 + s21 = 0 + + s8 += s20 * 666643 + s9 += s20 * 470296 + s10 += s20 * 654183 + s11 -= s20 * 997805 + s12 += s20 * 136657 + s13 -= s20 * 683901 + s20 = 0 + + s7 += s19 * 666643 + s8 += s19 * 470296 + s9 += s19 * 654183 + s10 -= s19 * 997805 + s11 += s19 * 136657 + s12 -= s19 * 683901 + s19 = 0 + + s6 += s18 * 666643 + s7 += s18 * 470296 + s8 += s18 * 654183 + s9 -= s18 * 997805 + s10 += s18 * 136657 + s11 -= s18 * 683901 + s18 = 0 + + carry[6] = (s6 + (1 << 20)) >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[8] = (s8 + (1 << 20)) >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[10] = (s10 + (1 << 20)) >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + carry[12] = (s12 + (1 << 20)) >> 21 + s13 += carry[12] + s12 -= carry[12] << 21 + carry[14] = (s14 + (1 << 20)) >> 21 + s15 += carry[14] + s14 -= carry[14] << 21 + carry[16] = (s16 + (1 << 20)) >> 21 + s17 += carry[16] + s16 -= carry[16] << 21 + + carry[7] = (s7 + (1 << 20)) >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[9] = (s9 + (1 << 20)) >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[11] = (s11 + (1 << 20)) >> 21 + s12 += carry[11] + s11 -= carry[11] << 21 + carry[13] = (s13 + (1 << 20)) >> 21 + s14 += carry[13] + s13 -= carry[13] << 21 + carry[15] = (s15 + (1 << 20)) >> 21 + s16 += carry[15] + s15 -= carry[15] << 21 + + s5 += s17 * 666643 + s6 += s17 * 470296 + s7 += s17 * 654183 + s8 -= s17 * 997805 + s9 += s17 * 136657 + s10 -= s17 * 683901 + s17 = 0 + + s4 += s16 * 666643 + s5 += s16 * 470296 + s6 += s16 * 654183 + s7 -= s16 * 997805 + s8 += s16 * 136657 + s9 -= s16 * 683901 + s16 = 0 + + s3 += s15 * 666643 + s4 += s15 * 470296 + s5 += s15 * 654183 + s6 -= s15 * 997805 + s7 += s15 * 136657 + s8 -= s15 * 683901 + s15 = 0 + + s2 += s14 * 666643 + s3 += s14 * 470296 + s4 += s14 * 654183 + s5 -= s14 * 997805 + s6 += s14 * 136657 + s7 -= s14 * 683901 + s14 = 0 + + s1 += s13 * 666643 + s2 += s13 * 470296 + s3 += s13 * 654183 + s4 -= s13 * 997805 + s5 += s13 * 136657 + s6 -= s13 * 683901 + s13 = 0 + + s0 += s12 * 666643 + s1 += s12 * 470296 + s2 += s12 * 654183 + s3 -= s12 * 997805 + s4 += s12 * 136657 + s5 -= s12 * 683901 + s12 = 0 + + carry[0] = (s0 + (1 << 20)) >> 21 + s1 += carry[0] + s0 -= carry[0] << 21 + carry[2] = (s2 + (1 << 20)) >> 21 + s3 += carry[2] + s2 -= carry[2] << 21 + carry[4] = (s4 + (1 << 20)) >> 21 + s5 += carry[4] + s4 -= carry[4] << 21 + carry[6] = (s6 + (1 << 20)) >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[8] = (s8 + (1 << 20)) >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[10] = (s10 + (1 << 20)) >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + + carry[1] = (s1 + (1 << 20)) >> 21 + s2 += carry[1] + s1 -= carry[1] << 21 + carry[3] = (s3 + (1 << 20)) >> 21 + s4 += carry[3] + s3 -= carry[3] << 21 + carry[5] = (s5 + (1 << 20)) >> 21 + s6 += carry[5] + s5 -= carry[5] << 21 + carry[7] = (s7 + (1 << 20)) >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[9] = (s9 + (1 << 20)) >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[11] = (s11 + (1 << 20)) >> 21 + s12 += carry[11] + s11 -= carry[11] << 21 + + s0 += s12 * 666643 + s1 += s12 * 470296 + s2 += s12 * 654183 + s3 -= s12 * 997805 + s4 += s12 * 136657 + s5 -= s12 * 683901 + s12 = 0 + + carry[0] = s0 >> 21 + s1 += carry[0] + s0 -= carry[0] << 21 + carry[1] = s1 >> 21 + s2 += carry[1] + s1 -= carry[1] << 21 + carry[2] = s2 >> 21 + s3 += carry[2] + s2 -= carry[2] << 21 + carry[3] = s3 >> 21 + s4 += carry[3] + s3 -= carry[3] << 21 + carry[4] = s4 >> 21 + s5 += carry[4] + s4 -= carry[4] << 21 + carry[5] = s5 >> 21 + s6 += carry[5] + s5 -= carry[5] << 21 + carry[6] = s6 >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[7] = s7 >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[8] = s8 >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[9] = s9 >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[10] = s10 >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + carry[11] = s11 >> 21 + s12 += carry[11] + s11 -= carry[11] << 21 + + s0 += s12 * 666643 + s1 += s12 * 470296 + s2 += s12 * 654183 + s3 -= s12 * 997805 + s4 += s12 * 136657 + s5 -= s12 * 683901 + s12 = 0 + + carry[0] = s0 >> 21 + s1 += carry[0] + s0 -= carry[0] << 21 + carry[1] = s1 >> 21 + s2 += carry[1] + s1 -= carry[1] << 21 + carry[2] = s2 >> 21 + s3 += carry[2] + s2 -= carry[2] << 21 + carry[3] = s3 >> 21 + s4 += carry[3] + s3 -= carry[3] << 21 + carry[4] = s4 >> 21 + s5 += carry[4] + s4 -= carry[4] << 21 + carry[5] = s5 >> 21 + s6 += carry[5] + s5 -= carry[5] << 21 + carry[6] = s6 >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[7] = s7 >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[8] = s8 >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[9] = s9 >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[10] = s10 >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + + s[0] = byte(s0 >> 0) + s[1] = byte(s0 >> 8) + s[2] = byte((s0 >> 16) | (s1 << 5)) + s[3] = byte(s1 >> 3) + s[4] = byte(s1 >> 11) + s[5] = byte((s1 >> 19) | (s2 << 2)) + s[6] = byte(s2 >> 6) + s[7] = byte((s2 >> 14) | (s3 << 7)) + s[8] = byte(s3 >> 1) + s[9] = byte(s3 >> 9) + s[10] = byte((s3 >> 17) | (s4 << 4)) + s[11] = byte(s4 >> 4) + s[12] = byte(s4 >> 12) + s[13] = byte((s4 >> 20) | (s5 << 1)) + s[14] = byte(s5 >> 7) + s[15] = byte((s5 >> 15) | (s6 << 6)) + s[16] = byte(s6 >> 2) + s[17] = byte(s6 >> 10) + s[18] = byte((s6 >> 18) | (s7 << 3)) + s[19] = byte(s7 >> 5) + s[20] = byte(s7 >> 13) + s[21] = byte(s8 >> 0) + s[22] = byte(s8 >> 8) + s[23] = byte((s8 >> 16) | (s9 << 5)) + s[24] = byte(s9 >> 3) + s[25] = byte(s9 >> 11) + s[26] = byte((s9 >> 19) | (s10 << 2)) + s[27] = byte(s10 >> 6) + s[28] = byte((s10 >> 14) | (s11 << 7)) + s[29] = byte(s11 >> 1) + s[30] = byte(s11 >> 9) + s[31] = byte(s11 >> 17) +} + +// Input: +// s[0]+256*s[1]+...+256^63*s[63] = s +// +// Output: +// s[0]+256*s[1]+...+256^31*s[31] = s mod l +// where l = 2^252 + 27742317777372353535851937790883648493. +func ScReduce(out *[32]byte, s *[64]byte) { + s0 := 2097151 & load3(s[:]) + s1 := 2097151 & (load4(s[2:]) >> 5) + s2 := 2097151 & (load3(s[5:]) >> 2) + s3 := 2097151 & (load4(s[7:]) >> 7) + s4 := 2097151 & (load4(s[10:]) >> 4) + s5 := 2097151 & (load3(s[13:]) >> 1) + s6 := 2097151 & (load4(s[15:]) >> 6) + s7 := 2097151 & (load3(s[18:]) >> 3) + s8 := 2097151 & load3(s[21:]) + s9 := 2097151 & (load4(s[23:]) >> 5) + s10 := 2097151 & (load3(s[26:]) >> 2) + s11 := 2097151 & (load4(s[28:]) >> 7) + s12 := 2097151 & (load4(s[31:]) >> 4) + s13 := 2097151 & (load3(s[34:]) >> 1) + s14 := 2097151 & (load4(s[36:]) >> 6) + s15 := 2097151 & (load3(s[39:]) >> 3) + s16 := 2097151 & load3(s[42:]) + s17 := 2097151 & (load4(s[44:]) >> 5) + s18 := 2097151 & (load3(s[47:]) >> 2) + s19 := 2097151 & (load4(s[49:]) >> 7) + s20 := 2097151 & (load4(s[52:]) >> 4) + s21 := 2097151 & (load3(s[55:]) >> 1) + s22 := 2097151 & (load4(s[57:]) >> 6) + s23 := (load4(s[60:]) >> 3) + + s11 += s23 * 666643 + s12 += s23 * 470296 + s13 += s23 * 654183 + s14 -= s23 * 997805 + s15 += s23 * 136657 + s16 -= s23 * 683901 + s23 = 0 + + s10 += s22 * 666643 + s11 += s22 * 470296 + s12 += s22 * 654183 + s13 -= s22 * 997805 + s14 += s22 * 136657 + s15 -= s22 * 683901 + s22 = 0 + + s9 += s21 * 666643 + s10 += s21 * 470296 + s11 += s21 * 654183 + s12 -= s21 * 997805 + s13 += s21 * 136657 + s14 -= s21 * 683901 + s21 = 0 + + s8 += s20 * 666643 + s9 += s20 * 470296 + s10 += s20 * 654183 + s11 -= s20 * 997805 + s12 += s20 * 136657 + s13 -= s20 * 683901 + s20 = 0 + + s7 += s19 * 666643 + s8 += s19 * 470296 + s9 += s19 * 654183 + s10 -= s19 * 997805 + s11 += s19 * 136657 + s12 -= s19 * 683901 + s19 = 0 + + s6 += s18 * 666643 + s7 += s18 * 470296 + s8 += s18 * 654183 + s9 -= s18 * 997805 + s10 += s18 * 136657 + s11 -= s18 * 683901 + s18 = 0 + + var carry [17]int64 + + carry[6] = (s6 + (1 << 20)) >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[8] = (s8 + (1 << 20)) >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[10] = (s10 + (1 << 20)) >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + carry[12] = (s12 + (1 << 20)) >> 21 + s13 += carry[12] + s12 -= carry[12] << 21 + carry[14] = (s14 + (1 << 20)) >> 21 + s15 += carry[14] + s14 -= carry[14] << 21 + carry[16] = (s16 + (1 << 20)) >> 21 + s17 += carry[16] + s16 -= carry[16] << 21 + + carry[7] = (s7 + (1 << 20)) >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[9] = (s9 + (1 << 20)) >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[11] = (s11 + (1 << 20)) >> 21 + s12 += carry[11] + s11 -= carry[11] << 21 + carry[13] = (s13 + (1 << 20)) >> 21 + s14 += carry[13] + s13 -= carry[13] << 21 + carry[15] = (s15 + (1 << 20)) >> 21 + s16 += carry[15] + s15 -= carry[15] << 21 + + s5 += s17 * 666643 + s6 += s17 * 470296 + s7 += s17 * 654183 + s8 -= s17 * 997805 + s9 += s17 * 136657 + s10 -= s17 * 683901 + s17 = 0 + + s4 += s16 * 666643 + s5 += s16 * 470296 + s6 += s16 * 654183 + s7 -= s16 * 997805 + s8 += s16 * 136657 + s9 -= s16 * 683901 + s16 = 0 + + s3 += s15 * 666643 + s4 += s15 * 470296 + s5 += s15 * 654183 + s6 -= s15 * 997805 + s7 += s15 * 136657 + s8 -= s15 * 683901 + s15 = 0 + + s2 += s14 * 666643 + s3 += s14 * 470296 + s4 += s14 * 654183 + s5 -= s14 * 997805 + s6 += s14 * 136657 + s7 -= s14 * 683901 + s14 = 0 + + s1 += s13 * 666643 + s2 += s13 * 470296 + s3 += s13 * 654183 + s4 -= s13 * 997805 + s5 += s13 * 136657 + s6 -= s13 * 683901 + s13 = 0 + + s0 += s12 * 666643 + s1 += s12 * 470296 + s2 += s12 * 654183 + s3 -= s12 * 997805 + s4 += s12 * 136657 + s5 -= s12 * 683901 + s12 = 0 + + carry[0] = (s0 + (1 << 20)) >> 21 + s1 += carry[0] + s0 -= carry[0] << 21 + carry[2] = (s2 + (1 << 20)) >> 21 + s3 += carry[2] + s2 -= carry[2] << 21 + carry[4] = (s4 + (1 << 20)) >> 21 + s5 += carry[4] + s4 -= carry[4] << 21 + carry[6] = (s6 + (1 << 20)) >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[8] = (s8 + (1 << 20)) >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[10] = (s10 + (1 << 20)) >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + + carry[1] = (s1 + (1 << 20)) >> 21 + s2 += carry[1] + s1 -= carry[1] << 21 + carry[3] = (s3 + (1 << 20)) >> 21 + s4 += carry[3] + s3 -= carry[3] << 21 + carry[5] = (s5 + (1 << 20)) >> 21 + s6 += carry[5] + s5 -= carry[5] << 21 + carry[7] = (s7 + (1 << 20)) >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[9] = (s9 + (1 << 20)) >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[11] = (s11 + (1 << 20)) >> 21 + s12 += carry[11] + s11 -= carry[11] << 21 + + s0 += s12 * 666643 + s1 += s12 * 470296 + s2 += s12 * 654183 + s3 -= s12 * 997805 + s4 += s12 * 136657 + s5 -= s12 * 683901 + s12 = 0 + + carry[0] = s0 >> 21 + s1 += carry[0] + s0 -= carry[0] << 21 + carry[1] = s1 >> 21 + s2 += carry[1] + s1 -= carry[1] << 21 + carry[2] = s2 >> 21 + s3 += carry[2] + s2 -= carry[2] << 21 + carry[3] = s3 >> 21 + s4 += carry[3] + s3 -= carry[3] << 21 + carry[4] = s4 >> 21 + s5 += carry[4] + s4 -= carry[4] << 21 + carry[5] = s5 >> 21 + s6 += carry[5] + s5 -= carry[5] << 21 + carry[6] = s6 >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[7] = s7 >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[8] = s8 >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[9] = s9 >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[10] = s10 >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + carry[11] = s11 >> 21 + s12 += carry[11] + s11 -= carry[11] << 21 + + s0 += s12 * 666643 + s1 += s12 * 470296 + s2 += s12 * 654183 + s3 -= s12 * 997805 + s4 += s12 * 136657 + s5 -= s12 * 683901 + s12 = 0 + + carry[0] = s0 >> 21 + s1 += carry[0] + s0 -= carry[0] << 21 + carry[1] = s1 >> 21 + s2 += carry[1] + s1 -= carry[1] << 21 + carry[2] = s2 >> 21 + s3 += carry[2] + s2 -= carry[2] << 21 + carry[3] = s3 >> 21 + s4 += carry[3] + s3 -= carry[3] << 21 + carry[4] = s4 >> 21 + s5 += carry[4] + s4 -= carry[4] << 21 + carry[5] = s5 >> 21 + s6 += carry[5] + s5 -= carry[5] << 21 + carry[6] = s6 >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[7] = s7 >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[8] = s8 >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[9] = s9 >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[10] = s10 >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + + out[0] = byte(s0 >> 0) + out[1] = byte(s0 >> 8) + out[2] = byte((s0 >> 16) | (s1 << 5)) + out[3] = byte(s1 >> 3) + out[4] = byte(s1 >> 11) + out[5] = byte((s1 >> 19) | (s2 << 2)) + out[6] = byte(s2 >> 6) + out[7] = byte((s2 >> 14) | (s3 << 7)) + out[8] = byte(s3 >> 1) + out[9] = byte(s3 >> 9) + out[10] = byte((s3 >> 17) | (s4 << 4)) + out[11] = byte(s4 >> 4) + out[12] = byte(s4 >> 12) + out[13] = byte((s4 >> 20) | (s5 << 1)) + out[14] = byte(s5 >> 7) + out[15] = byte((s5 >> 15) | (s6 << 6)) + out[16] = byte(s6 >> 2) + out[17] = byte(s6 >> 10) + out[18] = byte((s6 >> 18) | (s7 << 3)) + out[19] = byte(s7 >> 5) + out[20] = byte(s7 >> 13) + out[21] = byte(s8 >> 0) + out[22] = byte(s8 >> 8) + out[23] = byte((s8 >> 16) | (s9 << 5)) + out[24] = byte(s9 >> 3) + out[25] = byte(s9 >> 11) + out[26] = byte((s9 >> 19) | (s10 << 2)) + out[27] = byte(s10 >> 6) + out[28] = byte((s10 >> 14) | (s11 << 7)) + out[29] = byte(s11 >> 1) + out[30] = byte(s11 >> 9) + out[31] = byte(s11 >> 17) +} + +// order is the order of Curve25519 in little-endian form. +var order = [4]uint64{0x5812631a5cf5d3ed, 0x14def9dea2f79cd6, 0, 0x1000000000000000} + +// ScMinimal returns true if the given scalar is less than the order of the +// curve. +func ScMinimal(scalar *[32]byte) bool { + for i := 3; ; i-- { + v := binary.LittleEndian.Uint64(scalar[i*8:]) + if v > order[i] { + return false + } else if v < order[i] { + break + } else if i == 0 { + return false + } + } + + return true +} diff --git a/vendor/golang.org/x/crypto/ssh/terminal/terminal.go b/vendor/golang.org/x/crypto/ssh/terminal/terminal.go index 9d666ffc..2f04ee5b 100644 --- a/vendor/golang.org/x/crypto/ssh/terminal/terminal.go +++ b/vendor/golang.org/x/crypto/ssh/terminal/terminal.go @@ -7,6 +7,7 @@ package terminal import ( "bytes" "io" + "strconv" "sync" "unicode/utf8" ) @@ -271,34 +272,44 @@ func (t *Terminal) moveCursorToPos(pos int) { } func (t *Terminal) move(up, down, left, right int) { - movement := make([]rune, 3*(up+down+left+right)) - m := movement - for i := 0; i < up; i++ { - m[0] = keyEscape - m[1] = '[' - m[2] = 'A' - m = m[3:] - } - for i := 0; i < down; i++ { - m[0] = keyEscape - m[1] = '[' - m[2] = 'B' - m = m[3:] - } - for i := 0; i < left; i++ { - m[0] = keyEscape - m[1] = '[' - m[2] = 'D' - m = m[3:] - } - for i := 0; i < right; i++ { - m[0] = keyEscape - m[1] = '[' - m[2] = 'C' - m = m[3:] - } - - t.queue(movement) + m := []rune{} + + // 1 unit up can be expressed as ^[[A or ^[A + // 5 units up can be expressed as ^[[5A + + if up == 1 { + m = append(m, keyEscape, '[', 'A') + } else if up > 1 { + m = append(m, keyEscape, '[') + m = append(m, []rune(strconv.Itoa(up))...) + m = append(m, 'A') + } + + if down == 1 { + m = append(m, keyEscape, '[', 'B') + } else if down > 1 { + m = append(m, keyEscape, '[') + m = append(m, []rune(strconv.Itoa(down))...) + m = append(m, 'B') + } + + if right == 1 { + m = append(m, keyEscape, '[', 'C') + } else if right > 1 { + m = append(m, keyEscape, '[') + m = append(m, []rune(strconv.Itoa(right))...) + m = append(m, 'C') + } + + if left == 1 { + m = append(m, keyEscape, '[', 'D') + } else if left > 1 { + m = append(m, keyEscape, '[') + m = append(m, []rune(strconv.Itoa(left))...) + m = append(m, 'D') + } + + t.queue(m) } func (t *Terminal) clearLineToRight() { diff --git a/vendor/golang.org/x/net/bpf/asm.go b/vendor/golang.org/x/net/bpf/asm.go new file mode 100644 index 00000000..15e21b18 --- /dev/null +++ b/vendor/golang.org/x/net/bpf/asm.go @@ -0,0 +1,41 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bpf + +import "fmt" + +// Assemble converts insts into raw instructions suitable for loading +// into a BPF virtual machine. +// +// Currently, no optimization is attempted, the assembled program flow +// is exactly as provided. +func Assemble(insts []Instruction) ([]RawInstruction, error) { + ret := make([]RawInstruction, len(insts)) + var err error + for i, inst := range insts { + ret[i], err = inst.Assemble() + if err != nil { + return nil, fmt.Errorf("assembling instruction %d: %s", i+1, err) + } + } + return ret, nil +} + +// Disassemble attempts to parse raw back into +// Instructions. Unrecognized RawInstructions are assumed to be an +// extension not implemented by this package, and are passed through +// unchanged to the output. The allDecoded value reports whether insts +// contains no RawInstructions. +func Disassemble(raw []RawInstruction) (insts []Instruction, allDecoded bool) { + insts = make([]Instruction, len(raw)) + allDecoded = true + for i, r := range raw { + insts[i] = r.Disassemble() + if _, ok := insts[i].(RawInstruction); ok { + allDecoded = false + } + } + return insts, allDecoded +} diff --git a/vendor/golang.org/x/net/bpf/constants.go b/vendor/golang.org/x/net/bpf/constants.go new file mode 100644 index 00000000..12f3ee83 --- /dev/null +++ b/vendor/golang.org/x/net/bpf/constants.go @@ -0,0 +1,222 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bpf + +// A Register is a register of the BPF virtual machine. +type Register uint16 + +const ( + // RegA is the accumulator register. RegA is always the + // destination register of ALU operations. + RegA Register = iota + // RegX is the indirection register, used by LoadIndirect + // operations. + RegX +) + +// An ALUOp is an arithmetic or logic operation. +type ALUOp uint16 + +// ALU binary operation types. +const ( + ALUOpAdd ALUOp = iota << 4 + ALUOpSub + ALUOpMul + ALUOpDiv + ALUOpOr + ALUOpAnd + ALUOpShiftLeft + ALUOpShiftRight + aluOpNeg // Not exported because it's the only unary ALU operation, and gets its own instruction type. + ALUOpMod + ALUOpXor +) + +// A JumpTest is a comparison operator used in conditional jumps. +type JumpTest uint16 + +// Supported operators for conditional jumps. +// K can be RegX for JumpIfX +const ( + // K == A + JumpEqual JumpTest = iota + // K != A + JumpNotEqual + // K > A + JumpGreaterThan + // K < A + JumpLessThan + // K >= A + JumpGreaterOrEqual + // K <= A + JumpLessOrEqual + // K & A != 0 + JumpBitsSet + // K & A == 0 + JumpBitsNotSet +) + +// An Extension is a function call provided by the kernel that +// performs advanced operations that are expensive or impossible +// within the BPF virtual machine. +// +// Extensions are only implemented by the Linux kernel. +// +// TODO: should we prune this list? Some of these extensions seem +// either broken or near-impossible to use correctly, whereas other +// (len, random, ifindex) are quite useful. +type Extension int + +// Extension functions available in the Linux kernel. +const ( + // extOffset is the negative maximum number of instructions used + // to load instructions by overloading the K argument. + extOffset = -0x1000 + // ExtLen returns the length of the packet. + ExtLen Extension = 1 + // ExtProto returns the packet's L3 protocol type. + ExtProto Extension = 0 + // ExtType returns the packet's type (skb->pkt_type in the kernel) + // + // TODO: better documentation. How nice an API do we want to + // provide for these esoteric extensions? + ExtType Extension = 4 + // ExtPayloadOffset returns the offset of the packet payload, or + // the first protocol header that the kernel does not know how to + // parse. + ExtPayloadOffset Extension = 52 + // ExtInterfaceIndex returns the index of the interface on which + // the packet was received. + ExtInterfaceIndex Extension = 8 + // ExtNetlinkAttr returns the netlink attribute of type X at + // offset A. + ExtNetlinkAttr Extension = 12 + // ExtNetlinkAttrNested returns the nested netlink attribute of + // type X at offset A. + ExtNetlinkAttrNested Extension = 16 + // ExtMark returns the packet's mark value. + ExtMark Extension = 20 + // ExtQueue returns the packet's assigned hardware queue. + ExtQueue Extension = 24 + // ExtLinkLayerType returns the packet's hardware address type + // (e.g. Ethernet, Infiniband). + ExtLinkLayerType Extension = 28 + // ExtRXHash returns the packets receive hash. + // + // TODO: figure out what this rxhash actually is. + ExtRXHash Extension = 32 + // ExtCPUID returns the ID of the CPU processing the current + // packet. + ExtCPUID Extension = 36 + // ExtVLANTag returns the packet's VLAN tag. + ExtVLANTag Extension = 44 + // ExtVLANTagPresent returns non-zero if the packet has a VLAN + // tag. + // + // TODO: I think this might be a lie: it reads bit 0x1000 of the + // VLAN header, which changed meaning in recent revisions of the + // spec - this extension may now return meaningless information. + ExtVLANTagPresent Extension = 48 + // ExtVLANProto returns 0x8100 if the frame has a VLAN header, + // 0x88a8 if the frame has a "Q-in-Q" double VLAN header, or some + // other value if no VLAN information is present. + ExtVLANProto Extension = 60 + // ExtRand returns a uniformly random uint32. + ExtRand Extension = 56 +) + +// The following gives names to various bit patterns used in opcode construction. + +const ( + opMaskCls uint16 = 0x7 + // opClsLoad masks + opMaskLoadDest = 0x01 + opMaskLoadWidth = 0x18 + opMaskLoadMode = 0xe0 + // opClsALU & opClsJump + opMaskOperand = 0x08 + opMaskOperator = 0xf0 +) + +const ( + // +---------------+-----------------+---+---+---+ + // | AddrMode (3b) | LoadWidth (2b) | 0 | 0 | 0 | + // +---------------+-----------------+---+---+---+ + opClsLoadA uint16 = iota + // +---------------+-----------------+---+---+---+ + // | AddrMode (3b) | LoadWidth (2b) | 0 | 0 | 1 | + // +---------------+-----------------+---+---+---+ + opClsLoadX + // +---+---+---+---+---+---+---+---+ + // | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | + // +---+---+---+---+---+---+---+---+ + opClsStoreA + // +---+---+---+---+---+---+---+---+ + // | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | + // +---+---+---+---+---+---+---+---+ + opClsStoreX + // +---------------+-----------------+---+---+---+ + // | Operator (4b) | OperandSrc (1b) | 1 | 0 | 0 | + // +---------------+-----------------+---+---+---+ + opClsALU + // +-----------------------------+---+---+---+---+ + // | TestOperator (4b) | 0 | 1 | 0 | 1 | + // +-----------------------------+---+---+---+---+ + opClsJump + // +---+-------------------------+---+---+---+---+ + // | 0 | 0 | 0 | RetSrc (1b) | 0 | 1 | 1 | 0 | + // +---+-------------------------+---+---+---+---+ + opClsReturn + // +---+-------------------------+---+---+---+---+ + // | 0 | 0 | 0 | TXAorTAX (1b) | 0 | 1 | 1 | 1 | + // +---+-------------------------+---+---+---+---+ + opClsMisc +) + +const ( + opAddrModeImmediate uint16 = iota << 5 + opAddrModeAbsolute + opAddrModeIndirect + opAddrModeScratch + opAddrModePacketLen // actually an extension, not an addressing mode. + opAddrModeMemShift +) + +const ( + opLoadWidth4 uint16 = iota << 3 + opLoadWidth2 + opLoadWidth1 +) + +// Operand for ALU and Jump instructions +type opOperand uint16 + +// Supported operand sources. +const ( + opOperandConstant opOperand = iota << 3 + opOperandX +) + +// An jumpOp is a conditional jump condition. +type jumpOp uint16 + +// Supported jump conditions. +const ( + opJumpAlways jumpOp = iota << 4 + opJumpEqual + opJumpGT + opJumpGE + opJumpSet +) + +const ( + opRetSrcConstant uint16 = iota << 4 + opRetSrcA +) + +const ( + opMiscTAX = 0x00 + opMiscTXA = 0x80 +) diff --git a/vendor/golang.org/x/net/bpf/doc.go b/vendor/golang.org/x/net/bpf/doc.go new file mode 100644 index 00000000..ae62feb5 --- /dev/null +++ b/vendor/golang.org/x/net/bpf/doc.go @@ -0,0 +1,82 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* + +Package bpf implements marshaling and unmarshaling of programs for the +Berkeley Packet Filter virtual machine, and provides a Go implementation +of the virtual machine. + +BPF's main use is to specify a packet filter for network taps, so that +the kernel doesn't have to expensively copy every packet it sees to +userspace. However, it's been repurposed to other areas where running +user code in-kernel is needed. For example, Linux's seccomp uses BPF +to apply security policies to system calls. For simplicity, this +documentation refers only to packets, but other uses of BPF have their +own data payloads. + +BPF programs run in a restricted virtual machine. It has almost no +access to kernel functions, and while conditional branches are +allowed, they can only jump forwards, to guarantee that there are no +infinite loops. + +The virtual machine + +The BPF VM is an accumulator machine. Its main register, called +register A, is an implicit source and destination in all arithmetic +and logic operations. The machine also has 16 scratch registers for +temporary storage, and an indirection register (register X) for +indirect memory access. All registers are 32 bits wide. + +Each run of a BPF program is given one packet, which is placed in the +VM's read-only "main memory". LoadAbsolute and LoadIndirect +instructions can fetch up to 32 bits at a time into register A for +examination. + +The goal of a BPF program is to produce and return a verdict (uint32), +which tells the kernel what to do with the packet. In the context of +packet filtering, the returned value is the number of bytes of the +packet to forward to userspace, or 0 to ignore the packet. Other +contexts like seccomp define their own return values. + +In order to simplify programs, attempts to read past the end of the +packet terminate the program execution with a verdict of 0 (ignore +packet). This means that the vast majority of BPF programs don't need +to do any explicit bounds checking. + +In addition to the bytes of the packet, some BPF programs have access +to extensions, which are essentially calls to kernel utility +functions. Currently, the only extensions supported by this package +are the Linux packet filter extensions. + +Examples + +This packet filter selects all ARP packets. + + bpf.Assemble([]bpf.Instruction{ + // Load "EtherType" field from the ethernet header. + bpf.LoadAbsolute{Off: 12, Size: 2}, + // Skip over the next instruction if EtherType is not ARP. + bpf.JumpIf{Cond: bpf.JumpNotEqual, Val: 0x0806, SkipTrue: 1}, + // Verdict is "send up to 4k of the packet to userspace." + bpf.RetConstant{Val: 4096}, + // Verdict is "ignore packet." + bpf.RetConstant{Val: 0}, + }) + +This packet filter captures a random 1% sample of traffic. + + bpf.Assemble([]bpf.Instruction{ + // Get a 32-bit random number from the Linux kernel. + bpf.LoadExtension{Num: bpf.ExtRand}, + // 1% dice roll? + bpf.JumpIf{Cond: bpf.JumpLessThan, Val: 2^32/100, SkipFalse: 1}, + // Capture. + bpf.RetConstant{Val: 4096}, + // Ignore. + bpf.RetConstant{Val: 0}, + }) + +*/ +package bpf // import "golang.org/x/net/bpf" diff --git a/vendor/golang.org/x/net/bpf/instructions.go b/vendor/golang.org/x/net/bpf/instructions.go new file mode 100644 index 00000000..3cffcaa0 --- /dev/null +++ b/vendor/golang.org/x/net/bpf/instructions.go @@ -0,0 +1,726 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bpf + +import "fmt" + +// An Instruction is one instruction executed by the BPF virtual +// machine. +type Instruction interface { + // Assemble assembles the Instruction into a RawInstruction. + Assemble() (RawInstruction, error) +} + +// A RawInstruction is a raw BPF virtual machine instruction. +type RawInstruction struct { + // Operation to execute. + Op uint16 + // For conditional jump instructions, the number of instructions + // to skip if the condition is true/false. + Jt uint8 + Jf uint8 + // Constant parameter. The meaning depends on the Op. + K uint32 +} + +// Assemble implements the Instruction Assemble method. +func (ri RawInstruction) Assemble() (RawInstruction, error) { return ri, nil } + +// Disassemble parses ri into an Instruction and returns it. If ri is +// not recognized by this package, ri itself is returned. +func (ri RawInstruction) Disassemble() Instruction { + switch ri.Op & opMaskCls { + case opClsLoadA, opClsLoadX: + reg := Register(ri.Op & opMaskLoadDest) + sz := 0 + switch ri.Op & opMaskLoadWidth { + case opLoadWidth4: + sz = 4 + case opLoadWidth2: + sz = 2 + case opLoadWidth1: + sz = 1 + default: + return ri + } + switch ri.Op & opMaskLoadMode { + case opAddrModeImmediate: + if sz != 4 { + return ri + } + return LoadConstant{Dst: reg, Val: ri.K} + case opAddrModeScratch: + if sz != 4 || ri.K > 15 { + return ri + } + return LoadScratch{Dst: reg, N: int(ri.K)} + case opAddrModeAbsolute: + if ri.K > extOffset+0xffffffff { + return LoadExtension{Num: Extension(-extOffset + ri.K)} + } + return LoadAbsolute{Size: sz, Off: ri.K} + case opAddrModeIndirect: + return LoadIndirect{Size: sz, Off: ri.K} + case opAddrModePacketLen: + if sz != 4 { + return ri + } + return LoadExtension{Num: ExtLen} + case opAddrModeMemShift: + return LoadMemShift{Off: ri.K} + default: + return ri + } + + case opClsStoreA: + if ri.Op != opClsStoreA || ri.K > 15 { + return ri + } + return StoreScratch{Src: RegA, N: int(ri.K)} + + case opClsStoreX: + if ri.Op != opClsStoreX || ri.K > 15 { + return ri + } + return StoreScratch{Src: RegX, N: int(ri.K)} + + case opClsALU: + switch op := ALUOp(ri.Op & opMaskOperator); op { + case ALUOpAdd, ALUOpSub, ALUOpMul, ALUOpDiv, ALUOpOr, ALUOpAnd, ALUOpShiftLeft, ALUOpShiftRight, ALUOpMod, ALUOpXor: + switch operand := opOperand(ri.Op & opMaskOperand); operand { + case opOperandX: + return ALUOpX{Op: op} + case opOperandConstant: + return ALUOpConstant{Op: op, Val: ri.K} + default: + return ri + } + case aluOpNeg: + return NegateA{} + default: + return ri + } + + case opClsJump: + switch op := jumpOp(ri.Op & opMaskOperator); op { + case opJumpAlways: + return Jump{Skip: ri.K} + case opJumpEqual, opJumpGT, opJumpGE, opJumpSet: + cond, skipTrue, skipFalse := jumpOpToTest(op, ri.Jt, ri.Jf) + switch operand := opOperand(ri.Op & opMaskOperand); operand { + case opOperandX: + return JumpIfX{Cond: cond, SkipTrue: skipTrue, SkipFalse: skipFalse} + case opOperandConstant: + return JumpIf{Cond: cond, Val: ri.K, SkipTrue: skipTrue, SkipFalse: skipFalse} + default: + return ri + } + default: + return ri + } + + case opClsReturn: + switch ri.Op { + case opClsReturn | opRetSrcA: + return RetA{} + case opClsReturn | opRetSrcConstant: + return RetConstant{Val: ri.K} + default: + return ri + } + + case opClsMisc: + switch ri.Op { + case opClsMisc | opMiscTAX: + return TAX{} + case opClsMisc | opMiscTXA: + return TXA{} + default: + return ri + } + + default: + panic("unreachable") // switch is exhaustive on the bit pattern + } +} + +func jumpOpToTest(op jumpOp, skipTrue uint8, skipFalse uint8) (JumpTest, uint8, uint8) { + var test JumpTest + + // Decode "fake" jump conditions that don't appear in machine code + // Ensures the Assemble -> Disassemble stage recreates the same instructions + // See https://github.com/golang/go/issues/18470 + if skipTrue == 0 { + switch op { + case opJumpEqual: + test = JumpNotEqual + case opJumpGT: + test = JumpLessOrEqual + case opJumpGE: + test = JumpLessThan + case opJumpSet: + test = JumpBitsNotSet + } + + return test, skipFalse, 0 + } + + switch op { + case opJumpEqual: + test = JumpEqual + case opJumpGT: + test = JumpGreaterThan + case opJumpGE: + test = JumpGreaterOrEqual + case opJumpSet: + test = JumpBitsSet + } + + return test, skipTrue, skipFalse +} + +// LoadConstant loads Val into register Dst. +type LoadConstant struct { + Dst Register + Val uint32 +} + +// Assemble implements the Instruction Assemble method. +func (a LoadConstant) Assemble() (RawInstruction, error) { + return assembleLoad(a.Dst, 4, opAddrModeImmediate, a.Val) +} + +// String returns the instruction in assembler notation. +func (a LoadConstant) String() string { + switch a.Dst { + case RegA: + return fmt.Sprintf("ld #%d", a.Val) + case RegX: + return fmt.Sprintf("ldx #%d", a.Val) + default: + return fmt.Sprintf("unknown instruction: %#v", a) + } +} + +// LoadScratch loads scratch[N] into register Dst. +type LoadScratch struct { + Dst Register + N int // 0-15 +} + +// Assemble implements the Instruction Assemble method. +func (a LoadScratch) Assemble() (RawInstruction, error) { + if a.N < 0 || a.N > 15 { + return RawInstruction{}, fmt.Errorf("invalid scratch slot %d", a.N) + } + return assembleLoad(a.Dst, 4, opAddrModeScratch, uint32(a.N)) +} + +// String returns the instruction in assembler notation. +func (a LoadScratch) String() string { + switch a.Dst { + case RegA: + return fmt.Sprintf("ld M[%d]", a.N) + case RegX: + return fmt.Sprintf("ldx M[%d]", a.N) + default: + return fmt.Sprintf("unknown instruction: %#v", a) + } +} + +// LoadAbsolute loads packet[Off:Off+Size] as an integer value into +// register A. +type LoadAbsolute struct { + Off uint32 + Size int // 1, 2 or 4 +} + +// Assemble implements the Instruction Assemble method. +func (a LoadAbsolute) Assemble() (RawInstruction, error) { + return assembleLoad(RegA, a.Size, opAddrModeAbsolute, a.Off) +} + +// String returns the instruction in assembler notation. +func (a LoadAbsolute) String() string { + switch a.Size { + case 1: // byte + return fmt.Sprintf("ldb [%d]", a.Off) + case 2: // half word + return fmt.Sprintf("ldh [%d]", a.Off) + case 4: // word + if a.Off > extOffset+0xffffffff { + return LoadExtension{Num: Extension(a.Off + 0x1000)}.String() + } + return fmt.Sprintf("ld [%d]", a.Off) + default: + return fmt.Sprintf("unknown instruction: %#v", a) + } +} + +// LoadIndirect loads packet[X+Off:X+Off+Size] as an integer value +// into register A. +type LoadIndirect struct { + Off uint32 + Size int // 1, 2 or 4 +} + +// Assemble implements the Instruction Assemble method. +func (a LoadIndirect) Assemble() (RawInstruction, error) { + return assembleLoad(RegA, a.Size, opAddrModeIndirect, a.Off) +} + +// String returns the instruction in assembler notation. +func (a LoadIndirect) String() string { + switch a.Size { + case 1: // byte + return fmt.Sprintf("ldb [x + %d]", a.Off) + case 2: // half word + return fmt.Sprintf("ldh [x + %d]", a.Off) + case 4: // word + return fmt.Sprintf("ld [x + %d]", a.Off) + default: + return fmt.Sprintf("unknown instruction: %#v", a) + } +} + +// LoadMemShift multiplies the first 4 bits of the byte at packet[Off] +// by 4 and stores the result in register X. +// +// This instruction is mainly useful to load into X the length of an +// IPv4 packet header in a single instruction, rather than have to do +// the arithmetic on the header's first byte by hand. +type LoadMemShift struct { + Off uint32 +} + +// Assemble implements the Instruction Assemble method. +func (a LoadMemShift) Assemble() (RawInstruction, error) { + return assembleLoad(RegX, 1, opAddrModeMemShift, a.Off) +} + +// String returns the instruction in assembler notation. +func (a LoadMemShift) String() string { + return fmt.Sprintf("ldx 4*([%d]&0xf)", a.Off) +} + +// LoadExtension invokes a linux-specific extension and stores the +// result in register A. +type LoadExtension struct { + Num Extension +} + +// Assemble implements the Instruction Assemble method. +func (a LoadExtension) Assemble() (RawInstruction, error) { + if a.Num == ExtLen { + return assembleLoad(RegA, 4, opAddrModePacketLen, 0) + } + return assembleLoad(RegA, 4, opAddrModeAbsolute, uint32(extOffset+a.Num)) +} + +// String returns the instruction in assembler notation. +func (a LoadExtension) String() string { + switch a.Num { + case ExtLen: + return "ld #len" + case ExtProto: + return "ld #proto" + case ExtType: + return "ld #type" + case ExtPayloadOffset: + return "ld #poff" + case ExtInterfaceIndex: + return "ld #ifidx" + case ExtNetlinkAttr: + return "ld #nla" + case ExtNetlinkAttrNested: + return "ld #nlan" + case ExtMark: + return "ld #mark" + case ExtQueue: + return "ld #queue" + case ExtLinkLayerType: + return "ld #hatype" + case ExtRXHash: + return "ld #rxhash" + case ExtCPUID: + return "ld #cpu" + case ExtVLANTag: + return "ld #vlan_tci" + case ExtVLANTagPresent: + return "ld #vlan_avail" + case ExtVLANProto: + return "ld #vlan_tpid" + case ExtRand: + return "ld #rand" + default: + return fmt.Sprintf("unknown instruction: %#v", a) + } +} + +// StoreScratch stores register Src into scratch[N]. +type StoreScratch struct { + Src Register + N int // 0-15 +} + +// Assemble implements the Instruction Assemble method. +func (a StoreScratch) Assemble() (RawInstruction, error) { + if a.N < 0 || a.N > 15 { + return RawInstruction{}, fmt.Errorf("invalid scratch slot %d", a.N) + } + var op uint16 + switch a.Src { + case RegA: + op = opClsStoreA + case RegX: + op = opClsStoreX + default: + return RawInstruction{}, fmt.Errorf("invalid source register %v", a.Src) + } + + return RawInstruction{ + Op: op, + K: uint32(a.N), + }, nil +} + +// String returns the instruction in assembler notation. +func (a StoreScratch) String() string { + switch a.Src { + case RegA: + return fmt.Sprintf("st M[%d]", a.N) + case RegX: + return fmt.Sprintf("stx M[%d]", a.N) + default: + return fmt.Sprintf("unknown instruction: %#v", a) + } +} + +// ALUOpConstant executes A = A Val. +type ALUOpConstant struct { + Op ALUOp + Val uint32 +} + +// Assemble implements the Instruction Assemble method. +func (a ALUOpConstant) Assemble() (RawInstruction, error) { + return RawInstruction{ + Op: opClsALU | uint16(opOperandConstant) | uint16(a.Op), + K: a.Val, + }, nil +} + +// String returns the instruction in assembler notation. +func (a ALUOpConstant) String() string { + switch a.Op { + case ALUOpAdd: + return fmt.Sprintf("add #%d", a.Val) + case ALUOpSub: + return fmt.Sprintf("sub #%d", a.Val) + case ALUOpMul: + return fmt.Sprintf("mul #%d", a.Val) + case ALUOpDiv: + return fmt.Sprintf("div #%d", a.Val) + case ALUOpMod: + return fmt.Sprintf("mod #%d", a.Val) + case ALUOpAnd: + return fmt.Sprintf("and #%d", a.Val) + case ALUOpOr: + return fmt.Sprintf("or #%d", a.Val) + case ALUOpXor: + return fmt.Sprintf("xor #%d", a.Val) + case ALUOpShiftLeft: + return fmt.Sprintf("lsh #%d", a.Val) + case ALUOpShiftRight: + return fmt.Sprintf("rsh #%d", a.Val) + default: + return fmt.Sprintf("unknown instruction: %#v", a) + } +} + +// ALUOpX executes A = A X +type ALUOpX struct { + Op ALUOp +} + +// Assemble implements the Instruction Assemble method. +func (a ALUOpX) Assemble() (RawInstruction, error) { + return RawInstruction{ + Op: opClsALU | uint16(opOperandX) | uint16(a.Op), + }, nil +} + +// String returns the instruction in assembler notation. +func (a ALUOpX) String() string { + switch a.Op { + case ALUOpAdd: + return "add x" + case ALUOpSub: + return "sub x" + case ALUOpMul: + return "mul x" + case ALUOpDiv: + return "div x" + case ALUOpMod: + return "mod x" + case ALUOpAnd: + return "and x" + case ALUOpOr: + return "or x" + case ALUOpXor: + return "xor x" + case ALUOpShiftLeft: + return "lsh x" + case ALUOpShiftRight: + return "rsh x" + default: + return fmt.Sprintf("unknown instruction: %#v", a) + } +} + +// NegateA executes A = -A. +type NegateA struct{} + +// Assemble implements the Instruction Assemble method. +func (a NegateA) Assemble() (RawInstruction, error) { + return RawInstruction{ + Op: opClsALU | uint16(aluOpNeg), + }, nil +} + +// String returns the instruction in assembler notation. +func (a NegateA) String() string { + return fmt.Sprintf("neg") +} + +// Jump skips the following Skip instructions in the program. +type Jump struct { + Skip uint32 +} + +// Assemble implements the Instruction Assemble method. +func (a Jump) Assemble() (RawInstruction, error) { + return RawInstruction{ + Op: opClsJump | uint16(opJumpAlways), + K: a.Skip, + }, nil +} + +// String returns the instruction in assembler notation. +func (a Jump) String() string { + return fmt.Sprintf("ja %d", a.Skip) +} + +// JumpIf skips the following Skip instructions in the program if A +// Val is true. +type JumpIf struct { + Cond JumpTest + Val uint32 + SkipTrue uint8 + SkipFalse uint8 +} + +// Assemble implements the Instruction Assemble method. +func (a JumpIf) Assemble() (RawInstruction, error) { + return jumpToRaw(a.Cond, opOperandConstant, a.Val, a.SkipTrue, a.SkipFalse) +} + +// String returns the instruction in assembler notation. +func (a JumpIf) String() string { + return jumpToString(a.Cond, fmt.Sprintf("#%d", a.Val), a.SkipTrue, a.SkipFalse) +} + +// JumpIfX skips the following Skip instructions in the program if A +// X is true. +type JumpIfX struct { + Cond JumpTest + SkipTrue uint8 + SkipFalse uint8 +} + +// Assemble implements the Instruction Assemble method. +func (a JumpIfX) Assemble() (RawInstruction, error) { + return jumpToRaw(a.Cond, opOperandX, 0, a.SkipTrue, a.SkipFalse) +} + +// String returns the instruction in assembler notation. +func (a JumpIfX) String() string { + return jumpToString(a.Cond, "x", a.SkipTrue, a.SkipFalse) +} + +// jumpToRaw assembles a jump instruction into a RawInstruction +func jumpToRaw(test JumpTest, operand opOperand, k uint32, skipTrue, skipFalse uint8) (RawInstruction, error) { + var ( + cond jumpOp + flip bool + ) + switch test { + case JumpEqual: + cond = opJumpEqual + case JumpNotEqual: + cond, flip = opJumpEqual, true + case JumpGreaterThan: + cond = opJumpGT + case JumpLessThan: + cond, flip = opJumpGE, true + case JumpGreaterOrEqual: + cond = opJumpGE + case JumpLessOrEqual: + cond, flip = opJumpGT, true + case JumpBitsSet: + cond = opJumpSet + case JumpBitsNotSet: + cond, flip = opJumpSet, true + default: + return RawInstruction{}, fmt.Errorf("unknown JumpTest %v", test) + } + jt, jf := skipTrue, skipFalse + if flip { + jt, jf = jf, jt + } + return RawInstruction{ + Op: opClsJump | uint16(cond) | uint16(operand), + Jt: jt, + Jf: jf, + K: k, + }, nil +} + +// jumpToString converts a jump instruction to assembler notation +func jumpToString(cond JumpTest, operand string, skipTrue, skipFalse uint8) string { + switch cond { + // K == A + case JumpEqual: + return conditionalJump(operand, skipTrue, skipFalse, "jeq", "jneq") + // K != A + case JumpNotEqual: + return fmt.Sprintf("jneq %s,%d", operand, skipTrue) + // K > A + case JumpGreaterThan: + return conditionalJump(operand, skipTrue, skipFalse, "jgt", "jle") + // K < A + case JumpLessThan: + return fmt.Sprintf("jlt %s,%d", operand, skipTrue) + // K >= A + case JumpGreaterOrEqual: + return conditionalJump(operand, skipTrue, skipFalse, "jge", "jlt") + // K <= A + case JumpLessOrEqual: + return fmt.Sprintf("jle %s,%d", operand, skipTrue) + // K & A != 0 + case JumpBitsSet: + if skipFalse > 0 { + return fmt.Sprintf("jset %s,%d,%d", operand, skipTrue, skipFalse) + } + return fmt.Sprintf("jset %s,%d", operand, skipTrue) + // K & A == 0, there is no assembler instruction for JumpBitNotSet, use JumpBitSet and invert skips + case JumpBitsNotSet: + return jumpToString(JumpBitsSet, operand, skipFalse, skipTrue) + default: + return fmt.Sprintf("unknown JumpTest %#v", cond) + } +} + +func conditionalJump(operand string, skipTrue, skipFalse uint8, positiveJump, negativeJump string) string { + if skipTrue > 0 { + if skipFalse > 0 { + return fmt.Sprintf("%s %s,%d,%d", positiveJump, operand, skipTrue, skipFalse) + } + return fmt.Sprintf("%s %s,%d", positiveJump, operand, skipTrue) + } + return fmt.Sprintf("%s %s,%d", negativeJump, operand, skipFalse) +} + +// RetA exits the BPF program, returning the value of register A. +type RetA struct{} + +// Assemble implements the Instruction Assemble method. +func (a RetA) Assemble() (RawInstruction, error) { + return RawInstruction{ + Op: opClsReturn | opRetSrcA, + }, nil +} + +// String returns the instruction in assembler notation. +func (a RetA) String() string { + return fmt.Sprintf("ret a") +} + +// RetConstant exits the BPF program, returning a constant value. +type RetConstant struct { + Val uint32 +} + +// Assemble implements the Instruction Assemble method. +func (a RetConstant) Assemble() (RawInstruction, error) { + return RawInstruction{ + Op: opClsReturn | opRetSrcConstant, + K: a.Val, + }, nil +} + +// String returns the instruction in assembler notation. +func (a RetConstant) String() string { + return fmt.Sprintf("ret #%d", a.Val) +} + +// TXA copies the value of register X to register A. +type TXA struct{} + +// Assemble implements the Instruction Assemble method. +func (a TXA) Assemble() (RawInstruction, error) { + return RawInstruction{ + Op: opClsMisc | opMiscTXA, + }, nil +} + +// String returns the instruction in assembler notation. +func (a TXA) String() string { + return fmt.Sprintf("txa") +} + +// TAX copies the value of register A to register X. +type TAX struct{} + +// Assemble implements the Instruction Assemble method. +func (a TAX) Assemble() (RawInstruction, error) { + return RawInstruction{ + Op: opClsMisc | opMiscTAX, + }, nil +} + +// String returns the instruction in assembler notation. +func (a TAX) String() string { + return fmt.Sprintf("tax") +} + +func assembleLoad(dst Register, loadSize int, mode uint16, k uint32) (RawInstruction, error) { + var ( + cls uint16 + sz uint16 + ) + switch dst { + case RegA: + cls = opClsLoadA + case RegX: + cls = opClsLoadX + default: + return RawInstruction{}, fmt.Errorf("invalid target register %v", dst) + } + switch loadSize { + case 1: + sz = opLoadWidth1 + case 2: + sz = opLoadWidth2 + case 4: + sz = opLoadWidth4 + default: + return RawInstruction{}, fmt.Errorf("invalid load byte length %d", sz) + } + return RawInstruction{ + Op: cls | sz | mode, + K: k, + }, nil +} diff --git a/vendor/golang.org/x/net/bpf/setter.go b/vendor/golang.org/x/net/bpf/setter.go new file mode 100644 index 00000000..43e35f0a --- /dev/null +++ b/vendor/golang.org/x/net/bpf/setter.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bpf + +// A Setter is a type which can attach a compiled BPF filter to itself. +type Setter interface { + SetBPF(filter []RawInstruction) error +} diff --git a/vendor/golang.org/x/net/bpf/vm.go b/vendor/golang.org/x/net/bpf/vm.go new file mode 100644 index 00000000..73f57f1f --- /dev/null +++ b/vendor/golang.org/x/net/bpf/vm.go @@ -0,0 +1,150 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bpf + +import ( + "errors" + "fmt" +) + +// A VM is an emulated BPF virtual machine. +type VM struct { + filter []Instruction +} + +// NewVM returns a new VM using the input BPF program. +func NewVM(filter []Instruction) (*VM, error) { + if len(filter) == 0 { + return nil, errors.New("one or more Instructions must be specified") + } + + for i, ins := range filter { + check := len(filter) - (i + 1) + switch ins := ins.(type) { + // Check for out-of-bounds jumps in instructions + case Jump: + if check <= int(ins.Skip) { + return nil, fmt.Errorf("cannot jump %d instructions; jumping past program bounds", ins.Skip) + } + case JumpIf: + if check <= int(ins.SkipTrue) { + return nil, fmt.Errorf("cannot jump %d instructions in true case; jumping past program bounds", ins.SkipTrue) + } + if check <= int(ins.SkipFalse) { + return nil, fmt.Errorf("cannot jump %d instructions in false case; jumping past program bounds", ins.SkipFalse) + } + case JumpIfX: + if check <= int(ins.SkipTrue) { + return nil, fmt.Errorf("cannot jump %d instructions in true case; jumping past program bounds", ins.SkipTrue) + } + if check <= int(ins.SkipFalse) { + return nil, fmt.Errorf("cannot jump %d instructions in false case; jumping past program bounds", ins.SkipFalse) + } + // Check for division or modulus by zero + case ALUOpConstant: + if ins.Val != 0 { + break + } + + switch ins.Op { + case ALUOpDiv, ALUOpMod: + return nil, errors.New("cannot divide by zero using ALUOpConstant") + } + // Check for unknown extensions + case LoadExtension: + switch ins.Num { + case ExtLen: + default: + return nil, fmt.Errorf("extension %d not implemented", ins.Num) + } + } + } + + // Make sure last instruction is a return instruction + switch filter[len(filter)-1].(type) { + case RetA, RetConstant: + default: + return nil, errors.New("BPF program must end with RetA or RetConstant") + } + + // Though our VM works using disassembled instructions, we + // attempt to assemble the input filter anyway to ensure it is compatible + // with an operating system VM. + _, err := Assemble(filter) + + return &VM{ + filter: filter, + }, err +} + +// Run runs the VM's BPF program against the input bytes. +// Run returns the number of bytes accepted by the BPF program, and any errors +// which occurred while processing the program. +func (v *VM) Run(in []byte) (int, error) { + var ( + // Registers of the virtual machine + regA uint32 + regX uint32 + regScratch [16]uint32 + + // OK is true if the program should continue processing the next + // instruction, or false if not, causing the loop to break + ok = true + ) + + // TODO(mdlayher): implement: + // - NegateA: + // - would require a change from uint32 registers to int32 + // registers + + // TODO(mdlayher): add interop tests that check signedness of ALU + // operations against kernel implementation, and make sure Go + // implementation matches behavior + + for i := 0; i < len(v.filter) && ok; i++ { + ins := v.filter[i] + + switch ins := ins.(type) { + case ALUOpConstant: + regA = aluOpConstant(ins, regA) + case ALUOpX: + regA, ok = aluOpX(ins, regA, regX) + case Jump: + i += int(ins.Skip) + case JumpIf: + jump := jumpIf(ins, regA) + i += jump + case JumpIfX: + jump := jumpIfX(ins, regA, regX) + i += jump + case LoadAbsolute: + regA, ok = loadAbsolute(ins, in) + case LoadConstant: + regA, regX = loadConstant(ins, regA, regX) + case LoadExtension: + regA = loadExtension(ins, in) + case LoadIndirect: + regA, ok = loadIndirect(ins, in, regX) + case LoadMemShift: + regX, ok = loadMemShift(ins, in) + case LoadScratch: + regA, regX = loadScratch(ins, regScratch, regA, regX) + case RetA: + return int(regA), nil + case RetConstant: + return int(ins.Val), nil + case StoreScratch: + regScratch = storeScratch(ins, regScratch, regA, regX) + case TAX: + regX = regA + case TXA: + regA = regX + default: + return 0, fmt.Errorf("unknown Instruction at index %d: %T", i, ins) + } + } + + return 0, nil +} diff --git a/vendor/golang.org/x/net/bpf/vm_instructions.go b/vendor/golang.org/x/net/bpf/vm_instructions.go new file mode 100644 index 00000000..cf8947c3 --- /dev/null +++ b/vendor/golang.org/x/net/bpf/vm_instructions.go @@ -0,0 +1,182 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bpf + +import ( + "encoding/binary" + "fmt" +) + +func aluOpConstant(ins ALUOpConstant, regA uint32) uint32 { + return aluOpCommon(ins.Op, regA, ins.Val) +} + +func aluOpX(ins ALUOpX, regA uint32, regX uint32) (uint32, bool) { + // Guard against division or modulus by zero by terminating + // the program, as the OS BPF VM does + if regX == 0 { + switch ins.Op { + case ALUOpDiv, ALUOpMod: + return 0, false + } + } + + return aluOpCommon(ins.Op, regA, regX), true +} + +func aluOpCommon(op ALUOp, regA uint32, value uint32) uint32 { + switch op { + case ALUOpAdd: + return regA + value + case ALUOpSub: + return regA - value + case ALUOpMul: + return regA * value + case ALUOpDiv: + // Division by zero not permitted by NewVM and aluOpX checks + return regA / value + case ALUOpOr: + return regA | value + case ALUOpAnd: + return regA & value + case ALUOpShiftLeft: + return regA << value + case ALUOpShiftRight: + return regA >> value + case ALUOpMod: + // Modulus by zero not permitted by NewVM and aluOpX checks + return regA % value + case ALUOpXor: + return regA ^ value + default: + return regA + } +} + +func jumpIf(ins JumpIf, regA uint32) int { + return jumpIfCommon(ins.Cond, ins.SkipTrue, ins.SkipFalse, regA, ins.Val) +} + +func jumpIfX(ins JumpIfX, regA uint32, regX uint32) int { + return jumpIfCommon(ins.Cond, ins.SkipTrue, ins.SkipFalse, regA, regX) +} + +func jumpIfCommon(cond JumpTest, skipTrue, skipFalse uint8, regA uint32, value uint32) int { + var ok bool + + switch cond { + case JumpEqual: + ok = regA == value + case JumpNotEqual: + ok = regA != value + case JumpGreaterThan: + ok = regA > value + case JumpLessThan: + ok = regA < value + case JumpGreaterOrEqual: + ok = regA >= value + case JumpLessOrEqual: + ok = regA <= value + case JumpBitsSet: + ok = (regA & value) != 0 + case JumpBitsNotSet: + ok = (regA & value) == 0 + } + + if ok { + return int(skipTrue) + } + + return int(skipFalse) +} + +func loadAbsolute(ins LoadAbsolute, in []byte) (uint32, bool) { + offset := int(ins.Off) + size := int(ins.Size) + + return loadCommon(in, offset, size) +} + +func loadConstant(ins LoadConstant, regA uint32, regX uint32) (uint32, uint32) { + switch ins.Dst { + case RegA: + regA = ins.Val + case RegX: + regX = ins.Val + } + + return regA, regX +} + +func loadExtension(ins LoadExtension, in []byte) uint32 { + switch ins.Num { + case ExtLen: + return uint32(len(in)) + default: + panic(fmt.Sprintf("unimplemented extension: %d", ins.Num)) + } +} + +func loadIndirect(ins LoadIndirect, in []byte, regX uint32) (uint32, bool) { + offset := int(ins.Off) + int(regX) + size := int(ins.Size) + + return loadCommon(in, offset, size) +} + +func loadMemShift(ins LoadMemShift, in []byte) (uint32, bool) { + offset := int(ins.Off) + + // Size of LoadMemShift is always 1 byte + if !inBounds(len(in), offset, 1) { + return 0, false + } + + // Mask off high 4 bits and multiply low 4 bits by 4 + return uint32(in[offset]&0x0f) * 4, true +} + +func inBounds(inLen int, offset int, size int) bool { + return offset+size <= inLen +} + +func loadCommon(in []byte, offset int, size int) (uint32, bool) { + if !inBounds(len(in), offset, size) { + return 0, false + } + + switch size { + case 1: + return uint32(in[offset]), true + case 2: + return uint32(binary.BigEndian.Uint16(in[offset : offset+size])), true + case 4: + return uint32(binary.BigEndian.Uint32(in[offset : offset+size])), true + default: + panic(fmt.Sprintf("invalid load size: %d", size)) + } +} + +func loadScratch(ins LoadScratch, regScratch [16]uint32, regA uint32, regX uint32) (uint32, uint32) { + switch ins.Dst { + case RegA: + regA = regScratch[ins.N] + case RegX: + regX = regScratch[ins.N] + } + + return regA, regX +} + +func storeScratch(ins StoreScratch, regScratch [16]uint32, regA uint32, regX uint32) [16]uint32 { + switch ins.Src { + case RegA: + regScratch[ins.N] = regA + case RegX: + regScratch[ins.N] = regX + } + + return regScratch +} diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index 57334dc7..5e01ce9a 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -52,10 +52,11 @@ import ( ) const ( - prefaceTimeout = 10 * time.Second - firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway - handlerChunkWriteSize = 4 << 10 - defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to? + prefaceTimeout = 10 * time.Second + firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway + handlerChunkWriteSize = 4 << 10 + defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to? + maxQueuedControlFrames = 10000 ) var ( @@ -163,6 +164,15 @@ func (s *Server) maxConcurrentStreams() uint32 { return defaultMaxStreams } +// maxQueuedControlFrames is the maximum number of control frames like +// SETTINGS, PING and RST_STREAM that will be queued for writing before +// the connection is closed to prevent memory exhaustion attacks. +func (s *Server) maxQueuedControlFrames() int { + // TODO: if anybody asks, add a Server field, and remember to define the + // behavior of negative values. + return maxQueuedControlFrames +} + type serverInternalState struct { mu sync.Mutex activeConns map[*serverConn]struct{} @@ -506,6 +516,7 @@ type serverConn struct { sawFirstSettings bool // got the initial SETTINGS frame after the preface needToSendSettingsAck bool unackedSettings int // how many SETTINGS have we sent without ACKs? + queuedControlFrames int // control frames in the writeSched queue clientMaxStreams uint32 // SETTINGS_MAX_CONCURRENT_STREAMS from client (our PUSH_PROMISE limit) advMaxStreams uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client curClientStreams uint32 // number of open streams initiated by the client @@ -894,6 +905,14 @@ func (sc *serverConn) serve() { } } + // If the peer is causing us to generate a lot of control frames, + // but not reading them from us, assume they are trying to make us + // run out of memory. + if sc.queuedControlFrames > sc.srv.maxQueuedControlFrames() { + sc.vlogf("http2: too many control frames in send queue, closing connection") + return + } + // Start the shutdown timer after sending a GOAWAY. When sending GOAWAY // with no error code (graceful shutdown), don't start the timer until // all open streams have been completed. @@ -1093,6 +1112,14 @@ func (sc *serverConn) writeFrame(wr FrameWriteRequest) { } if !ignoreWrite { + if wr.isControl() { + sc.queuedControlFrames++ + // For extra safety, detect wraparounds, which should not happen, + // and pull the plug. + if sc.queuedControlFrames < 0 { + sc.conn.Close() + } + } sc.writeSched.Push(wr) } sc.scheduleFrameWrite() @@ -1210,10 +1237,8 @@ func (sc *serverConn) wroteFrame(res frameWriteResult) { // If a frame is already being written, nothing happens. This will be called again // when the frame is done being written. // -// If a frame isn't being written we need to send one, the best frame -// to send is selected, preferring first things that aren't -// stream-specific (e.g. ACKing settings), and then finding the -// highest priority stream. +// If a frame isn't being written and we need to send one, the best frame +// to send is selected by writeSched. // // If a frame isn't being written and there's nothing else to send, we // flush the write buffer. @@ -1241,6 +1266,9 @@ func (sc *serverConn) scheduleFrameWrite() { } if !sc.inGoAway || sc.goAwayCode == ErrCodeNo { if wr, ok := sc.writeSched.Pop(); ok { + if wr.isControl() { + sc.queuedControlFrames-- + } sc.startFrameWrite(wr) continue } @@ -1533,6 +1561,8 @@ func (sc *serverConn) processSettings(f *SettingsFrame) error { if err := f.ForeachSetting(sc.processSetting); err != nil { return err } + // TODO: judging by RFC 7540, Section 6.5.3 each SETTINGS frame should be + // acknowledged individually, even if multiple are received before the ACK. sc.needToSendSettingsAck = true sc.scheduleFrameWrite() return nil diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index c0c80d89..aeac7d8a 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -992,7 +992,7 @@ func (cc *ClientConn) roundTrip(req *http.Request) (res *http.Response, gotErrAf req.Method != "HEAD" { // Request gzip only, not deflate. Deflate is ambiguous and // not as universally supported anyway. - // See: http://www.gzip.org/zlib/zlib_faq.html#faq38 + // See: https://zlib.net/zlib_faq.html#faq39 // // Note that we don't request this for HEAD requests, // due to a bug in nginx: diff --git a/vendor/golang.org/x/net/http2/writesched.go b/vendor/golang.org/x/net/http2/writesched.go index 4fe30730..f24d2b1e 100644 --- a/vendor/golang.org/x/net/http2/writesched.go +++ b/vendor/golang.org/x/net/http2/writesched.go @@ -32,7 +32,7 @@ type WriteScheduler interface { // Pop dequeues the next frame to write. Returns false if no frames can // be written. Frames with a given wr.StreamID() are Pop'd in the same - // order they are Push'd. + // order they are Push'd. No frames should be discarded except by CloseStream. Pop() (wr FrameWriteRequest, ok bool) } @@ -76,6 +76,12 @@ func (wr FrameWriteRequest) StreamID() uint32 { return wr.stream.id } +// isControl reports whether wr is a control frame for MaxQueuedControlFrames +// purposes. That includes non-stream frames and RST_STREAM frames. +func (wr FrameWriteRequest) isControl() bool { + return wr.stream == nil +} + // DataSize returns the number of flow control bytes that must be consumed // to write this entire frame. This is 0 for non-DATA frames. func (wr FrameWriteRequest) DataSize() int { diff --git a/vendor/golang.org/x/net/http2/writesched_random.go b/vendor/golang.org/x/net/http2/writesched_random.go index 36d7919f..9a7b9e58 100644 --- a/vendor/golang.org/x/net/http2/writesched_random.go +++ b/vendor/golang.org/x/net/http2/writesched_random.go @@ -19,7 +19,8 @@ type randomWriteScheduler struct { zero writeQueue // sq contains the stream-specific queues, keyed by stream ID. - // When a stream is idle or closed, it's deleted from the map. + // When a stream is idle, closed, or emptied, it's deleted + // from the map. sq map[uint32]*writeQueue // pool of empty queues for reuse. @@ -63,8 +64,12 @@ func (ws *randomWriteScheduler) Pop() (FrameWriteRequest, bool) { return ws.zero.shift(), true } // Iterate over all non-idle streams until finding one that can be consumed. - for _, q := range ws.sq { + for streamID, q := range ws.sq { if wr, ok := q.consume(math.MaxInt32); ok { + if q.empty() { + delete(ws.sq, streamID) + ws.queuePool.put(q) + } return wr, true } } diff --git a/vendor/golang.org/x/net/internal/iana/const.go b/vendor/golang.org/x/net/internal/iana/const.go new file mode 100644 index 00000000..cea712fa --- /dev/null +++ b/vendor/golang.org/x/net/internal/iana/const.go @@ -0,0 +1,223 @@ +// go generate gen.go +// Code generated by the command above; DO NOT EDIT. + +// Package iana provides protocol number resources managed by the Internet Assigned Numbers Authority (IANA). +package iana // import "golang.org/x/net/internal/iana" + +// Differentiated Services Field Codepoints (DSCP), Updated: 2018-05-04 +const ( + DiffServCS0 = 0x00 // CS0 + DiffServCS1 = 0x20 // CS1 + DiffServCS2 = 0x40 // CS2 + DiffServCS3 = 0x60 // CS3 + DiffServCS4 = 0x80 // CS4 + DiffServCS5 = 0xa0 // CS5 + DiffServCS6 = 0xc0 // CS6 + DiffServCS7 = 0xe0 // CS7 + DiffServAF11 = 0x28 // AF11 + DiffServAF12 = 0x30 // AF12 + DiffServAF13 = 0x38 // AF13 + DiffServAF21 = 0x48 // AF21 + DiffServAF22 = 0x50 // AF22 + DiffServAF23 = 0x58 // AF23 + DiffServAF31 = 0x68 // AF31 + DiffServAF32 = 0x70 // AF32 + DiffServAF33 = 0x78 // AF33 + DiffServAF41 = 0x88 // AF41 + DiffServAF42 = 0x90 // AF42 + DiffServAF43 = 0x98 // AF43 + DiffServEF = 0xb8 // EF + DiffServVOICEADMIT = 0xb0 // VOICE-ADMIT + NotECNTransport = 0x00 // Not-ECT (Not ECN-Capable Transport) + ECNTransport1 = 0x01 // ECT(1) (ECN-Capable Transport(1)) + ECNTransport0 = 0x02 // ECT(0) (ECN-Capable Transport(0)) + CongestionExperienced = 0x03 // CE (Congestion Experienced) +) + +// Protocol Numbers, Updated: 2017-10-13 +const ( + ProtocolIP = 0 // IPv4 encapsulation, pseudo protocol number + ProtocolHOPOPT = 0 // IPv6 Hop-by-Hop Option + ProtocolICMP = 1 // Internet Control Message + ProtocolIGMP = 2 // Internet Group Management + ProtocolGGP = 3 // Gateway-to-Gateway + ProtocolIPv4 = 4 // IPv4 encapsulation + ProtocolST = 5 // Stream + ProtocolTCP = 6 // Transmission Control + ProtocolCBT = 7 // CBT + ProtocolEGP = 8 // Exterior Gateway Protocol + ProtocolIGP = 9 // any private interior gateway (used by Cisco for their IGRP) + ProtocolBBNRCCMON = 10 // BBN RCC Monitoring + ProtocolNVPII = 11 // Network Voice Protocol + ProtocolPUP = 12 // PUP + ProtocolEMCON = 14 // EMCON + ProtocolXNET = 15 // Cross Net Debugger + ProtocolCHAOS = 16 // Chaos + ProtocolUDP = 17 // User Datagram + ProtocolMUX = 18 // Multiplexing + ProtocolDCNMEAS = 19 // DCN Measurement Subsystems + ProtocolHMP = 20 // Host Monitoring + ProtocolPRM = 21 // Packet Radio Measurement + ProtocolXNSIDP = 22 // XEROX NS IDP + ProtocolTRUNK1 = 23 // Trunk-1 + ProtocolTRUNK2 = 24 // Trunk-2 + ProtocolLEAF1 = 25 // Leaf-1 + ProtocolLEAF2 = 26 // Leaf-2 + ProtocolRDP = 27 // Reliable Data Protocol + ProtocolIRTP = 28 // Internet Reliable Transaction + ProtocolISOTP4 = 29 // ISO Transport Protocol Class 4 + ProtocolNETBLT = 30 // Bulk Data Transfer Protocol + ProtocolMFENSP = 31 // MFE Network Services Protocol + ProtocolMERITINP = 32 // MERIT Internodal Protocol + ProtocolDCCP = 33 // Datagram Congestion Control Protocol + Protocol3PC = 34 // Third Party Connect Protocol + ProtocolIDPR = 35 // Inter-Domain Policy Routing Protocol + ProtocolXTP = 36 // XTP + ProtocolDDP = 37 // Datagram Delivery Protocol + ProtocolIDPRCMTP = 38 // IDPR Control Message Transport Proto + ProtocolTPPP = 39 // TP++ Transport Protocol + ProtocolIL = 40 // IL Transport Protocol + ProtocolIPv6 = 41 // IPv6 encapsulation + ProtocolSDRP = 42 // Source Demand Routing Protocol + ProtocolIPv6Route = 43 // Routing Header for IPv6 + ProtocolIPv6Frag = 44 // Fragment Header for IPv6 + ProtocolIDRP = 45 // Inter-Domain Routing Protocol + ProtocolRSVP = 46 // Reservation Protocol + ProtocolGRE = 47 // Generic Routing Encapsulation + ProtocolDSR = 48 // Dynamic Source Routing Protocol + ProtocolBNA = 49 // BNA + ProtocolESP = 50 // Encap Security Payload + ProtocolAH = 51 // Authentication Header + ProtocolINLSP = 52 // Integrated Net Layer Security TUBA + ProtocolNARP = 54 // NBMA Address Resolution Protocol + ProtocolMOBILE = 55 // IP Mobility + ProtocolTLSP = 56 // Transport Layer Security Protocol using Kryptonet key management + ProtocolSKIP = 57 // SKIP + ProtocolIPv6ICMP = 58 // ICMP for IPv6 + ProtocolIPv6NoNxt = 59 // No Next Header for IPv6 + ProtocolIPv6Opts = 60 // Destination Options for IPv6 + ProtocolCFTP = 62 // CFTP + ProtocolSATEXPAK = 64 // SATNET and Backroom EXPAK + ProtocolKRYPTOLAN = 65 // Kryptolan + ProtocolRVD = 66 // MIT Remote Virtual Disk Protocol + ProtocolIPPC = 67 // Internet Pluribus Packet Core + ProtocolSATMON = 69 // SATNET Monitoring + ProtocolVISA = 70 // VISA Protocol + ProtocolIPCV = 71 // Internet Packet Core Utility + ProtocolCPNX = 72 // Computer Protocol Network Executive + ProtocolCPHB = 73 // Computer Protocol Heart Beat + ProtocolWSN = 74 // Wang Span Network + ProtocolPVP = 75 // Packet Video Protocol + ProtocolBRSATMON = 76 // Backroom SATNET Monitoring + ProtocolSUNND = 77 // SUN ND PROTOCOL-Temporary + ProtocolWBMON = 78 // WIDEBAND Monitoring + ProtocolWBEXPAK = 79 // WIDEBAND EXPAK + ProtocolISOIP = 80 // ISO Internet Protocol + ProtocolVMTP = 81 // VMTP + ProtocolSECUREVMTP = 82 // SECURE-VMTP + ProtocolVINES = 83 // VINES + ProtocolTTP = 84 // Transaction Transport Protocol + ProtocolIPTM = 84 // Internet Protocol Traffic Manager + ProtocolNSFNETIGP = 85 // NSFNET-IGP + ProtocolDGP = 86 // Dissimilar Gateway Protocol + ProtocolTCF = 87 // TCF + ProtocolEIGRP = 88 // EIGRP + ProtocolOSPFIGP = 89 // OSPFIGP + ProtocolSpriteRPC = 90 // Sprite RPC Protocol + ProtocolLARP = 91 // Locus Address Resolution Protocol + ProtocolMTP = 92 // Multicast Transport Protocol + ProtocolAX25 = 93 // AX.25 Frames + ProtocolIPIP = 94 // IP-within-IP Encapsulation Protocol + ProtocolSCCSP = 96 // Semaphore Communications Sec. Pro. + ProtocolETHERIP = 97 // Ethernet-within-IP Encapsulation + ProtocolENCAP = 98 // Encapsulation Header + ProtocolGMTP = 100 // GMTP + ProtocolIFMP = 101 // Ipsilon Flow Management Protocol + ProtocolPNNI = 102 // PNNI over IP + ProtocolPIM = 103 // Protocol Independent Multicast + ProtocolARIS = 104 // ARIS + ProtocolSCPS = 105 // SCPS + ProtocolQNX = 106 // QNX + ProtocolAN = 107 // Active Networks + ProtocolIPComp = 108 // IP Payload Compression Protocol + ProtocolSNP = 109 // Sitara Networks Protocol + ProtocolCompaqPeer = 110 // Compaq Peer Protocol + ProtocolIPXinIP = 111 // IPX in IP + ProtocolVRRP = 112 // Virtual Router Redundancy Protocol + ProtocolPGM = 113 // PGM Reliable Transport Protocol + ProtocolL2TP = 115 // Layer Two Tunneling Protocol + ProtocolDDX = 116 // D-II Data Exchange (DDX) + ProtocolIATP = 117 // Interactive Agent Transfer Protocol + ProtocolSTP = 118 // Schedule Transfer Protocol + ProtocolSRP = 119 // SpectraLink Radio Protocol + ProtocolUTI = 120 // UTI + ProtocolSMP = 121 // Simple Message Protocol + ProtocolPTP = 123 // Performance Transparency Protocol + ProtocolISIS = 124 // ISIS over IPv4 + ProtocolFIRE = 125 // FIRE + ProtocolCRTP = 126 // Combat Radio Transport Protocol + ProtocolCRUDP = 127 // Combat Radio User Datagram + ProtocolSSCOPMCE = 128 // SSCOPMCE + ProtocolIPLT = 129 // IPLT + ProtocolSPS = 130 // Secure Packet Shield + ProtocolPIPE = 131 // Private IP Encapsulation within IP + ProtocolSCTP = 132 // Stream Control Transmission Protocol + ProtocolFC = 133 // Fibre Channel + ProtocolRSVPE2EIGNORE = 134 // RSVP-E2E-IGNORE + ProtocolMobilityHeader = 135 // Mobility Header + ProtocolUDPLite = 136 // UDPLite + ProtocolMPLSinIP = 137 // MPLS-in-IP + ProtocolMANET = 138 // MANET Protocols + ProtocolHIP = 139 // Host Identity Protocol + ProtocolShim6 = 140 // Shim6 Protocol + ProtocolWESP = 141 // Wrapped Encapsulating Security Payload + ProtocolROHC = 142 // Robust Header Compression + ProtocolReserved = 255 // Reserved +) + +// Address Family Numbers, Updated: 2018-04-02 +const ( + AddrFamilyIPv4 = 1 // IP (IP version 4) + AddrFamilyIPv6 = 2 // IP6 (IP version 6) + AddrFamilyNSAP = 3 // NSAP + AddrFamilyHDLC = 4 // HDLC (8-bit multidrop) + AddrFamilyBBN1822 = 5 // BBN 1822 + AddrFamily802 = 6 // 802 (includes all 802 media plus Ethernet "canonical format") + AddrFamilyE163 = 7 // E.163 + AddrFamilyE164 = 8 // E.164 (SMDS, Frame Relay, ATM) + AddrFamilyF69 = 9 // F.69 (Telex) + AddrFamilyX121 = 10 // X.121 (X.25, Frame Relay) + AddrFamilyIPX = 11 // IPX + AddrFamilyAppletalk = 12 // Appletalk + AddrFamilyDecnetIV = 13 // Decnet IV + AddrFamilyBanyanVines = 14 // Banyan Vines + AddrFamilyE164withSubaddress = 15 // E.164 with NSAP format subaddress + AddrFamilyDNS = 16 // DNS (Domain Name System) + AddrFamilyDistinguishedName = 17 // Distinguished Name + AddrFamilyASNumber = 18 // AS Number + AddrFamilyXTPoverIPv4 = 19 // XTP over IP version 4 + AddrFamilyXTPoverIPv6 = 20 // XTP over IP version 6 + AddrFamilyXTPnativemodeXTP = 21 // XTP native mode XTP + AddrFamilyFibreChannelWorldWidePortName = 22 // Fibre Channel World-Wide Port Name + AddrFamilyFibreChannelWorldWideNodeName = 23 // Fibre Channel World-Wide Node Name + AddrFamilyGWID = 24 // GWID + AddrFamilyL2VPN = 25 // AFI for L2VPN information + AddrFamilyMPLSTPSectionEndpointID = 26 // MPLS-TP Section Endpoint Identifier + AddrFamilyMPLSTPLSPEndpointID = 27 // MPLS-TP LSP Endpoint Identifier + AddrFamilyMPLSTPPseudowireEndpointID = 28 // MPLS-TP Pseudowire Endpoint Identifier + AddrFamilyMTIPv4 = 29 // MT IP: Multi-Topology IP version 4 + AddrFamilyMTIPv6 = 30 // MT IPv6: Multi-Topology IP version 6 + AddrFamilyEIGRPCommonServiceFamily = 16384 // EIGRP Common Service Family + AddrFamilyEIGRPIPv4ServiceFamily = 16385 // EIGRP IPv4 Service Family + AddrFamilyEIGRPIPv6ServiceFamily = 16386 // EIGRP IPv6 Service Family + AddrFamilyLISPCanonicalAddressFormat = 16387 // LISP Canonical Address Format (LCAF) + AddrFamilyBGPLS = 16388 // BGP-LS + AddrFamily48bitMAC = 16389 // 48-bit MAC + AddrFamily64bitMAC = 16390 // 64-bit MAC + AddrFamilyOUI = 16391 // OUI + AddrFamilyMACFinal24bits = 16392 // MAC/24 + AddrFamilyMACFinal40bits = 16393 // MAC/40 + AddrFamilyIPv6Initial64bits = 16394 // IPv6/64 + AddrFamilyRBridgePortID = 16395 // RBridge Port ID + AddrFamilyTRILLNickname = 16396 // TRILL Nickname +) diff --git a/vendor/golang.org/x/net/internal/socket/cmsghdr.go b/vendor/golang.org/x/net/internal/socket/cmsghdr.go new file mode 100644 index 00000000..0a73e277 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/cmsghdr.go @@ -0,0 +1,11 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris + +package socket + +func (h *cmsghdr) len() int { return int(h.Len) } +func (h *cmsghdr) lvl() int { return int(h.Level) } +func (h *cmsghdr) typ() int { return int(h.Type) } diff --git a/vendor/golang.org/x/net/internal/socket/cmsghdr_bsd.go b/vendor/golang.org/x/net/internal/socket/cmsghdr_bsd.go new file mode 100644 index 00000000..14dbb3ad --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/cmsghdr_bsd.go @@ -0,0 +1,13 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix darwin dragonfly freebsd netbsd openbsd + +package socket + +func (h *cmsghdr) set(l, lvl, typ int) { + h.Len = uint32(l) + h.Level = int32(lvl) + h.Type = int32(typ) +} diff --git a/vendor/golang.org/x/net/internal/socket/cmsghdr_linux_32bit.go b/vendor/golang.org/x/net/internal/socket/cmsghdr_linux_32bit.go new file mode 100644 index 00000000..bac66811 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/cmsghdr_linux_32bit.go @@ -0,0 +1,14 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build arm mips mipsle 386 +// +build linux + +package socket + +func (h *cmsghdr) set(l, lvl, typ int) { + h.Len = uint32(l) + h.Level = int32(lvl) + h.Type = int32(typ) +} diff --git a/vendor/golang.org/x/net/internal/socket/cmsghdr_linux_64bit.go b/vendor/golang.org/x/net/internal/socket/cmsghdr_linux_64bit.go new file mode 100644 index 00000000..27be0efa --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/cmsghdr_linux_64bit.go @@ -0,0 +1,14 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build arm64 amd64 ppc64 ppc64le mips64 mips64le riscv64 s390x +// +build linux + +package socket + +func (h *cmsghdr) set(l, lvl, typ int) { + h.Len = uint64(l) + h.Level = int32(lvl) + h.Type = int32(typ) +} diff --git a/vendor/golang.org/x/net/internal/socket/cmsghdr_solaris_64bit.go b/vendor/golang.org/x/net/internal/socket/cmsghdr_solaris_64bit.go new file mode 100644 index 00000000..7dedd430 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/cmsghdr_solaris_64bit.go @@ -0,0 +1,14 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64 +// +build solaris + +package socket + +func (h *cmsghdr) set(l, lvl, typ int) { + h.Len = uint32(l) + h.Level = int32(lvl) + h.Type = int32(typ) +} diff --git a/vendor/golang.org/x/net/internal/socket/cmsghdr_stub.go b/vendor/golang.org/x/net/internal/socket/cmsghdr_stub.go new file mode 100644 index 00000000..e581011b --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/cmsghdr_stub.go @@ -0,0 +1,17 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris + +package socket + +type cmsghdr struct{} + +const sizeofCmsghdr = 0 + +func (h *cmsghdr) len() int { return 0 } +func (h *cmsghdr) lvl() int { return 0 } +func (h *cmsghdr) typ() int { return 0 } + +func (h *cmsghdr) set(l, lvl, typ int) {} diff --git a/vendor/golang.org/x/net/internal/socket/empty.s b/vendor/golang.org/x/net/internal/socket/empty.s new file mode 100644 index 00000000..bff0231c --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/empty.s @@ -0,0 +1,7 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin,go1.12 + +// This exists solely so we can linkname in symbols from syscall. diff --git a/vendor/golang.org/x/net/internal/socket/error_unix.go b/vendor/golang.org/x/net/internal/socket/error_unix.go new file mode 100644 index 00000000..f14872d3 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/error_unix.go @@ -0,0 +1,31 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris + +package socket + +import "syscall" + +var ( + errEAGAIN error = syscall.EAGAIN + errEINVAL error = syscall.EINVAL + errENOENT error = syscall.ENOENT +) + +// errnoErr returns common boxed Errno values, to prevent allocations +// at runtime. +func errnoErr(errno syscall.Errno) error { + switch errno { + case 0: + return nil + case syscall.EAGAIN: + return errEAGAIN + case syscall.EINVAL: + return errEINVAL + case syscall.ENOENT: + return errENOENT + } + return errno +} diff --git a/vendor/golang.org/x/net/internal/socket/error_windows.go b/vendor/golang.org/x/net/internal/socket/error_windows.go new file mode 100644 index 00000000..6a6379a8 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/error_windows.go @@ -0,0 +1,26 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +import "syscall" + +var ( + errERROR_IO_PENDING error = syscall.ERROR_IO_PENDING + errEINVAL error = syscall.EINVAL +) + +// errnoErr returns common boxed Errno values, to prevent allocations +// at runtime. +func errnoErr(errno syscall.Errno) error { + switch errno { + case 0: + return nil + case syscall.ERROR_IO_PENDING: + return errERROR_IO_PENDING + case syscall.EINVAL: + return errEINVAL + } + return errno +} diff --git a/vendor/golang.org/x/net/internal/socket/iovec_32bit.go b/vendor/golang.org/x/net/internal/socket/iovec_32bit.go new file mode 100644 index 00000000..05d6082d --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/iovec_32bit.go @@ -0,0 +1,19 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build arm mips mipsle 386 +// +build darwin dragonfly freebsd linux netbsd openbsd + +package socket + +import "unsafe" + +func (v *iovec) set(b []byte) { + l := len(b) + if l == 0 { + return + } + v.Base = (*byte)(unsafe.Pointer(&b[0])) + v.Len = uint32(l) +} diff --git a/vendor/golang.org/x/net/internal/socket/iovec_64bit.go b/vendor/golang.org/x/net/internal/socket/iovec_64bit.go new file mode 100644 index 00000000..dfeda752 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/iovec_64bit.go @@ -0,0 +1,19 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build arm64 amd64 ppc64 ppc64le mips64 mips64le riscv64 s390x +// +build aix darwin dragonfly freebsd linux netbsd openbsd + +package socket + +import "unsafe" + +func (v *iovec) set(b []byte) { + l := len(b) + if l == 0 { + return + } + v.Base = (*byte)(unsafe.Pointer(&b[0])) + v.Len = uint64(l) +} diff --git a/vendor/golang.org/x/net/internal/socket/iovec_solaris_64bit.go b/vendor/golang.org/x/net/internal/socket/iovec_solaris_64bit.go new file mode 100644 index 00000000..8d17a40c --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/iovec_solaris_64bit.go @@ -0,0 +1,19 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64 +// +build solaris + +package socket + +import "unsafe" + +func (v *iovec) set(b []byte) { + l := len(b) + if l == 0 { + return + } + v.Base = (*int8)(unsafe.Pointer(&b[0])) + v.Len = uint64(l) +} diff --git a/vendor/golang.org/x/net/internal/socket/iovec_stub.go b/vendor/golang.org/x/net/internal/socket/iovec_stub.go new file mode 100644 index 00000000..a746e90e --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/iovec_stub.go @@ -0,0 +1,11 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris + +package socket + +type iovec struct{} + +func (v *iovec) set(b []byte) {} diff --git a/vendor/golang.org/x/net/internal/socket/mmsghdr_stub.go b/vendor/golang.org/x/net/internal/socket/mmsghdr_stub.go new file mode 100644 index 00000000..1a7f2792 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/mmsghdr_stub.go @@ -0,0 +1,21 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !aix,!linux,!netbsd + +package socket + +import "net" + +type mmsghdr struct{} + +type mmsghdrs []mmsghdr + +func (hs mmsghdrs) pack(ms []Message, parseFn func([]byte, string) (net.Addr, error), marshalFn func(net.Addr) []byte) error { + return nil +} + +func (hs mmsghdrs) unpack(ms []Message, parseFn func([]byte, string) (net.Addr, error), hint string) error { + return nil +} diff --git a/vendor/golang.org/x/net/internal/socket/mmsghdr_unix.go b/vendor/golang.org/x/net/internal/socket/mmsghdr_unix.go new file mode 100644 index 00000000..f1100683 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/mmsghdr_unix.go @@ -0,0 +1,42 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix linux netbsd + +package socket + +import "net" + +type mmsghdrs []mmsghdr + +func (hs mmsghdrs) pack(ms []Message, parseFn func([]byte, string) (net.Addr, error), marshalFn func(net.Addr) []byte) error { + for i := range hs { + vs := make([]iovec, len(ms[i].Buffers)) + var sa []byte + if parseFn != nil { + sa = make([]byte, sizeofSockaddrInet6) + } + if marshalFn != nil { + sa = marshalFn(ms[i].Addr) + } + hs[i].Hdr.pack(vs, ms[i].Buffers, ms[i].OOB, sa) + } + return nil +} + +func (hs mmsghdrs) unpack(ms []Message, parseFn func([]byte, string) (net.Addr, error), hint string) error { + for i := range hs { + ms[i].N = int(hs[i].Len) + ms[i].NN = hs[i].Hdr.controllen() + ms[i].Flags = hs[i].Hdr.flags() + if parseFn != nil { + var err error + ms[i].Addr, err = parseFn(hs[i].Hdr.name(), hint) + if err != nil { + return err + } + } + } + return nil +} diff --git a/vendor/golang.org/x/net/internal/socket/msghdr_bsd.go b/vendor/golang.org/x/net/internal/socket/msghdr_bsd.go new file mode 100644 index 00000000..77f44c1f --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/msghdr_bsd.go @@ -0,0 +1,39 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix darwin dragonfly freebsd netbsd openbsd + +package socket + +import "unsafe" + +func (h *msghdr) pack(vs []iovec, bs [][]byte, oob []byte, sa []byte) { + for i := range vs { + vs[i].set(bs[i]) + } + h.setIov(vs) + if len(oob) > 0 { + h.Control = (*byte)(unsafe.Pointer(&oob[0])) + h.Controllen = uint32(len(oob)) + } + if sa != nil { + h.Name = (*byte)(unsafe.Pointer(&sa[0])) + h.Namelen = uint32(len(sa)) + } +} + +func (h *msghdr) name() []byte { + if h.Name != nil && h.Namelen > 0 { + return (*[sizeofSockaddrInet6]byte)(unsafe.Pointer(h.Name))[:h.Namelen] + } + return nil +} + +func (h *msghdr) controllen() int { + return int(h.Controllen) +} + +func (h *msghdr) flags() int { + return int(h.Flags) +} diff --git a/vendor/golang.org/x/net/internal/socket/msghdr_bsdvar.go b/vendor/golang.org/x/net/internal/socket/msghdr_bsdvar.go new file mode 100644 index 00000000..c5562dd6 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/msghdr_bsdvar.go @@ -0,0 +1,16 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix darwin dragonfly freebsd netbsd + +package socket + +func (h *msghdr) setIov(vs []iovec) { + l := len(vs) + if l == 0 { + return + } + h.Iov = &vs[0] + h.Iovlen = int32(l) +} diff --git a/vendor/golang.org/x/net/internal/socket/msghdr_linux.go b/vendor/golang.org/x/net/internal/socket/msghdr_linux.go new file mode 100644 index 00000000..5a38798c --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/msghdr_linux.go @@ -0,0 +1,36 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +import "unsafe" + +func (h *msghdr) pack(vs []iovec, bs [][]byte, oob []byte, sa []byte) { + for i := range vs { + vs[i].set(bs[i]) + } + h.setIov(vs) + if len(oob) > 0 { + h.setControl(oob) + } + if sa != nil { + h.Name = (*byte)(unsafe.Pointer(&sa[0])) + h.Namelen = uint32(len(sa)) + } +} + +func (h *msghdr) name() []byte { + if h.Name != nil && h.Namelen > 0 { + return (*[sizeofSockaddrInet6]byte)(unsafe.Pointer(h.Name))[:h.Namelen] + } + return nil +} + +func (h *msghdr) controllen() int { + return int(h.Controllen) +} + +func (h *msghdr) flags() int { + return int(h.Flags) +} diff --git a/vendor/golang.org/x/net/internal/socket/msghdr_linux_32bit.go b/vendor/golang.org/x/net/internal/socket/msghdr_linux_32bit.go new file mode 100644 index 00000000..a7a5987c --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/msghdr_linux_32bit.go @@ -0,0 +1,24 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build arm mips mipsle 386 +// +build linux + +package socket + +import "unsafe" + +func (h *msghdr) setIov(vs []iovec) { + l := len(vs) + if l == 0 { + return + } + h.Iov = &vs[0] + h.Iovlen = uint32(l) +} + +func (h *msghdr) setControl(b []byte) { + h.Control = (*byte)(unsafe.Pointer(&b[0])) + h.Controllen = uint32(len(b)) +} diff --git a/vendor/golang.org/x/net/internal/socket/msghdr_linux_64bit.go b/vendor/golang.org/x/net/internal/socket/msghdr_linux_64bit.go new file mode 100644 index 00000000..e731833a --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/msghdr_linux_64bit.go @@ -0,0 +1,24 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build arm64 amd64 ppc64 ppc64le mips64 mips64le riscv64 s390x +// +build linux + +package socket + +import "unsafe" + +func (h *msghdr) setIov(vs []iovec) { + l := len(vs) + if l == 0 { + return + } + h.Iov = &vs[0] + h.Iovlen = uint64(l) +} + +func (h *msghdr) setControl(b []byte) { + h.Control = (*byte)(unsafe.Pointer(&b[0])) + h.Controllen = uint64(len(b)) +} diff --git a/vendor/golang.org/x/net/internal/socket/msghdr_openbsd.go b/vendor/golang.org/x/net/internal/socket/msghdr_openbsd.go new file mode 100644 index 00000000..71a69e25 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/msghdr_openbsd.go @@ -0,0 +1,14 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +func (h *msghdr) setIov(vs []iovec) { + l := len(vs) + if l == 0 { + return + } + h.Iov = &vs[0] + h.Iovlen = uint32(l) +} diff --git a/vendor/golang.org/x/net/internal/socket/msghdr_solaris_64bit.go b/vendor/golang.org/x/net/internal/socket/msghdr_solaris_64bit.go new file mode 100644 index 00000000..6465b207 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/msghdr_solaris_64bit.go @@ -0,0 +1,36 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64 +// +build solaris + +package socket + +import "unsafe" + +func (h *msghdr) pack(vs []iovec, bs [][]byte, oob []byte, sa []byte) { + for i := range vs { + vs[i].set(bs[i]) + } + if len(vs) > 0 { + h.Iov = &vs[0] + h.Iovlen = int32(len(vs)) + } + if len(oob) > 0 { + h.Accrights = (*int8)(unsafe.Pointer(&oob[0])) + h.Accrightslen = int32(len(oob)) + } + if sa != nil { + h.Name = (*byte)(unsafe.Pointer(&sa[0])) + h.Namelen = uint32(len(sa)) + } +} + +func (h *msghdr) controllen() int { + return int(h.Accrightslen) +} + +func (h *msghdr) flags() int { + return int(NativeEndian.Uint32(h.Pad_cgo_2[:])) +} diff --git a/vendor/golang.org/x/net/internal/socket/msghdr_stub.go b/vendor/golang.org/x/net/internal/socket/msghdr_stub.go new file mode 100644 index 00000000..873490a7 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/msghdr_stub.go @@ -0,0 +1,14 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris + +package socket + +type msghdr struct{} + +func (h *msghdr) pack(vs []iovec, bs [][]byte, oob []byte, sa []byte) {} +func (h *msghdr) name() []byte { return nil } +func (h *msghdr) controllen() int { return 0 } +func (h *msghdr) flags() int { return 0 } diff --git a/vendor/golang.org/x/net/internal/socket/rawconn.go b/vendor/golang.org/x/net/internal/socket/rawconn.go new file mode 100644 index 00000000..b07b8900 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/rawconn.go @@ -0,0 +1,64 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +import ( + "errors" + "net" + "os" + "syscall" +) + +// A Conn represents a raw connection. +type Conn struct { + network string + c syscall.RawConn +} + +// NewConn returns a new raw connection. +func NewConn(c net.Conn) (*Conn, error) { + var err error + var cc Conn + switch c := c.(type) { + case *net.TCPConn: + cc.network = "tcp" + cc.c, err = c.SyscallConn() + case *net.UDPConn: + cc.network = "udp" + cc.c, err = c.SyscallConn() + case *net.IPConn: + cc.network = "ip" + cc.c, err = c.SyscallConn() + default: + return nil, errors.New("unknown connection type") + } + if err != nil { + return nil, err + } + return &cc, nil +} + +func (o *Option) get(c *Conn, b []byte) (int, error) { + var operr error + var n int + fn := func(s uintptr) { + n, operr = getsockopt(s, o.Level, o.Name, b) + } + if err := c.c.Control(fn); err != nil { + return 0, err + } + return n, os.NewSyscallError("getsockopt", operr) +} + +func (o *Option) set(c *Conn, b []byte) error { + var operr error + fn := func(s uintptr) { + operr = setsockopt(s, o.Level, o.Name, b) + } + if err := c.c.Control(fn); err != nil { + return err + } + return os.NewSyscallError("setsockopt", operr) +} diff --git a/vendor/golang.org/x/net/internal/socket/rawconn_mmsg.go b/vendor/golang.org/x/net/internal/socket/rawconn_mmsg.go new file mode 100644 index 00000000..1f4cb3b3 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/rawconn_mmsg.go @@ -0,0 +1,73 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux + +package socket + +import ( + "net" + "os" + "syscall" +) + +func (c *Conn) recvMsgs(ms []Message, flags int) (int, error) { + hs := make(mmsghdrs, len(ms)) + var parseFn func([]byte, string) (net.Addr, error) + if c.network != "tcp" { + parseFn = parseInetAddr + } + if err := hs.pack(ms, parseFn, nil); err != nil { + return 0, err + } + var operr error + var n int + fn := func(s uintptr) bool { + n, operr = recvmmsg(s, hs, flags) + if operr == syscall.EAGAIN { + return false + } + return true + } + if err := c.c.Read(fn); err != nil { + return n, err + } + if operr != nil { + return n, os.NewSyscallError("recvmmsg", operr) + } + if err := hs[:n].unpack(ms[:n], parseFn, c.network); err != nil { + return n, err + } + return n, nil +} + +func (c *Conn) sendMsgs(ms []Message, flags int) (int, error) { + hs := make(mmsghdrs, len(ms)) + var marshalFn func(net.Addr) []byte + if c.network != "tcp" { + marshalFn = marshalInetAddr + } + if err := hs.pack(ms, nil, marshalFn); err != nil { + return 0, err + } + var operr error + var n int + fn := func(s uintptr) bool { + n, operr = sendmmsg(s, hs, flags) + if operr == syscall.EAGAIN { + return false + } + return true + } + if err := c.c.Write(fn); err != nil { + return n, err + } + if operr != nil { + return n, os.NewSyscallError("sendmmsg", operr) + } + if err := hs[:n].unpack(ms[:n], nil, ""); err != nil { + return n, err + } + return n, nil +} diff --git a/vendor/golang.org/x/net/internal/socket/rawconn_msg.go b/vendor/golang.org/x/net/internal/socket/rawconn_msg.go new file mode 100644 index 00000000..a9720118 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/rawconn_msg.go @@ -0,0 +1,76 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris windows + +package socket + +import ( + "os" + "syscall" +) + +func (c *Conn) recvMsg(m *Message, flags int) error { + var h msghdr + vs := make([]iovec, len(m.Buffers)) + var sa []byte + if c.network != "tcp" { + sa = make([]byte, sizeofSockaddrInet6) + } + h.pack(vs, m.Buffers, m.OOB, sa) + var operr error + var n int + fn := func(s uintptr) bool { + n, operr = recvmsg(s, &h, flags) + if operr == syscall.EAGAIN { + return false + } + return true + } + if err := c.c.Read(fn); err != nil { + return err + } + if operr != nil { + return os.NewSyscallError("recvmsg", operr) + } + if c.network != "tcp" { + var err error + m.Addr, err = parseInetAddr(sa[:], c.network) + if err != nil { + return err + } + } + m.N = n + m.NN = h.controllen() + m.Flags = h.flags() + return nil +} + +func (c *Conn) sendMsg(m *Message, flags int) error { + var h msghdr + vs := make([]iovec, len(m.Buffers)) + var sa []byte + if m.Addr != nil { + sa = marshalInetAddr(m.Addr) + } + h.pack(vs, m.Buffers, m.OOB, sa) + var operr error + var n int + fn := func(s uintptr) bool { + n, operr = sendmsg(s, &h, flags) + if operr == syscall.EAGAIN { + return false + } + return true + } + if err := c.c.Write(fn); err != nil { + return err + } + if operr != nil { + return os.NewSyscallError("sendmsg", operr) + } + m.N = n + m.NN = len(m.OOB) + return nil +} diff --git a/vendor/golang.org/x/net/internal/socket/rawconn_nommsg.go b/vendor/golang.org/x/net/internal/socket/rawconn_nommsg.go new file mode 100644 index 00000000..fe5bb942 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/rawconn_nommsg.go @@ -0,0 +1,15 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !linux + +package socket + +func (c *Conn) recvMsgs(ms []Message, flags int) (int, error) { + return 0, errNotImplemented +} + +func (c *Conn) sendMsgs(ms []Message, flags int) (int, error) { + return 0, errNotImplemented +} diff --git a/vendor/golang.org/x/net/internal/socket/rawconn_nomsg.go b/vendor/golang.org/x/net/internal/socket/rawconn_nomsg.go new file mode 100644 index 00000000..b8cea6fe --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/rawconn_nomsg.go @@ -0,0 +1,15 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows + +package socket + +func (c *Conn) recvMsg(m *Message, flags int) error { + return errNotImplemented +} + +func (c *Conn) sendMsg(m *Message, flags int) error { + return errNotImplemented +} diff --git a/vendor/golang.org/x/net/internal/socket/socket.go b/vendor/golang.org/x/net/internal/socket/socket.go new file mode 100644 index 00000000..23571b8d --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/socket.go @@ -0,0 +1,288 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package socket provides a portable interface for socket system +// calls. +package socket // import "golang.org/x/net/internal/socket" + +import ( + "errors" + "net" + "runtime" + "unsafe" +) + +var errNotImplemented = errors.New("not implemented on " + runtime.GOOS + "/" + runtime.GOARCH) + +// An Option represents a sticky socket option. +type Option struct { + Level int // level + Name int // name; must be equal or greater than 1 + Len int // length of value in bytes; must be equal or greater than 1 +} + +// Get reads a value for the option from the kernel. +// It returns the number of bytes written into b. +func (o *Option) Get(c *Conn, b []byte) (int, error) { + if o.Name < 1 || o.Len < 1 { + return 0, errors.New("invalid option") + } + if len(b) < o.Len { + return 0, errors.New("short buffer") + } + return o.get(c, b) +} + +// GetInt returns an integer value for the option. +// +// The Len field of Option must be either 1 or 4. +func (o *Option) GetInt(c *Conn) (int, error) { + if o.Len != 1 && o.Len != 4 { + return 0, errors.New("invalid option") + } + var b []byte + var bb [4]byte + if o.Len == 1 { + b = bb[:1] + } else { + b = bb[:4] + } + n, err := o.get(c, b) + if err != nil { + return 0, err + } + if n != o.Len { + return 0, errors.New("invalid option length") + } + if o.Len == 1 { + return int(b[0]), nil + } + return int(NativeEndian.Uint32(b[:4])), nil +} + +// Set writes the option and value to the kernel. +func (o *Option) Set(c *Conn, b []byte) error { + if o.Name < 1 || o.Len < 1 { + return errors.New("invalid option") + } + if len(b) < o.Len { + return errors.New("short buffer") + } + return o.set(c, b) +} + +// SetInt writes the option and value to the kernel. +// +// The Len field of Option must be either 1 or 4. +func (o *Option) SetInt(c *Conn, v int) error { + if o.Len != 1 && o.Len != 4 { + return errors.New("invalid option") + } + var b []byte + if o.Len == 1 { + b = []byte{byte(v)} + } else { + var bb [4]byte + NativeEndian.PutUint32(bb[:o.Len], uint32(v)) + b = bb[:4] + } + return o.set(c, b) +} + +func controlHeaderLen() int { + return roundup(sizeofCmsghdr) +} + +func controlMessageLen(dataLen int) int { + return roundup(sizeofCmsghdr) + dataLen +} + +// ControlMessageSpace returns the whole length of control message. +func ControlMessageSpace(dataLen int) int { + return roundup(sizeofCmsghdr) + roundup(dataLen) +} + +// A ControlMessage represents the head message in a stream of control +// messages. +// +// A control message comprises of a header, data and a few padding +// fields to conform to the interface to the kernel. +// +// See RFC 3542 for further information. +type ControlMessage []byte + +// Data returns the data field of the control message at the head on +// m. +func (m ControlMessage) Data(dataLen int) []byte { + l := controlHeaderLen() + if len(m) < l || len(m) < l+dataLen { + return nil + } + return m[l : l+dataLen] +} + +// Next returns the control message at the next on m. +// +// Next works only for standard control messages. +func (m ControlMessage) Next(dataLen int) ControlMessage { + l := ControlMessageSpace(dataLen) + if len(m) < l { + return nil + } + return m[l:] +} + +// MarshalHeader marshals the header fields of the control message at +// the head on m. +func (m ControlMessage) MarshalHeader(lvl, typ, dataLen int) error { + if len(m) < controlHeaderLen() { + return errors.New("short message") + } + h := (*cmsghdr)(unsafe.Pointer(&m[0])) + h.set(controlMessageLen(dataLen), lvl, typ) + return nil +} + +// ParseHeader parses and returns the header fields of the control +// message at the head on m. +func (m ControlMessage) ParseHeader() (lvl, typ, dataLen int, err error) { + l := controlHeaderLen() + if len(m) < l { + return 0, 0, 0, errors.New("short message") + } + h := (*cmsghdr)(unsafe.Pointer(&m[0])) + return h.lvl(), h.typ(), int(uint64(h.len()) - uint64(l)), nil +} + +// Marshal marshals the control message at the head on m, and returns +// the next control message. +func (m ControlMessage) Marshal(lvl, typ int, data []byte) (ControlMessage, error) { + l := len(data) + if len(m) < ControlMessageSpace(l) { + return nil, errors.New("short message") + } + h := (*cmsghdr)(unsafe.Pointer(&m[0])) + h.set(controlMessageLen(l), lvl, typ) + if l > 0 { + copy(m.Data(l), data) + } + return m.Next(l), nil +} + +// Parse parses m as a single or multiple control messages. +// +// Parse works for both standard and compatible messages. +func (m ControlMessage) Parse() ([]ControlMessage, error) { + var ms []ControlMessage + for len(m) >= controlHeaderLen() { + h := (*cmsghdr)(unsafe.Pointer(&m[0])) + l := h.len() + if l <= 0 { + return nil, errors.New("invalid header length") + } + if uint64(l) < uint64(controlHeaderLen()) { + return nil, errors.New("invalid message length") + } + if uint64(l) > uint64(len(m)) { + return nil, errors.New("short buffer") + } + // On message reception: + // + // |<- ControlMessageSpace --------------->| + // |<- controlMessageLen ---------->| | + // |<- controlHeaderLen ->| | | + // +---------------+------+---------+------+ + // | Header | PadH | Data | PadD | + // +---------------+------+---------+------+ + // + // On compatible message reception: + // + // | ... |<- controlMessageLen ----------->| + // | ... |<- controlHeaderLen ->| | + // +-----+---------------+------+----------+ + // | ... | Header | PadH | Data | + // +-----+---------------+------+----------+ + ms = append(ms, ControlMessage(m[:l])) + ll := l - controlHeaderLen() + if len(m) >= ControlMessageSpace(ll) { + m = m[ControlMessageSpace(ll):] + } else { + m = m[controlMessageLen(ll):] + } + } + return ms, nil +} + +// NewControlMessage returns a new stream of control messages. +func NewControlMessage(dataLen []int) ControlMessage { + var l int + for i := range dataLen { + l += ControlMessageSpace(dataLen[i]) + } + return make([]byte, l) +} + +// A Message represents an IO message. +type Message struct { + // When writing, the Buffers field must contain at least one + // byte to write. + // When reading, the Buffers field will always contain a byte + // to read. + Buffers [][]byte + + // OOB contains protocol-specific control or miscellaneous + // ancillary data known as out-of-band data. + OOB []byte + + // Addr specifies a destination address when writing. + // It can be nil when the underlying protocol of the raw + // connection uses connection-oriented communication. + // After a successful read, it may contain the source address + // on the received packet. + Addr net.Addr + + N int // # of bytes read or written from/to Buffers + NN int // # of bytes read or written from/to OOB + Flags int // protocol-specific information on the received message +} + +// RecvMsg wraps recvmsg system call. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_PEEK. +func (c *Conn) RecvMsg(m *Message, flags int) error { + return c.recvMsg(m, flags) +} + +// SendMsg wraps sendmsg system call. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_DONTROUTE. +func (c *Conn) SendMsg(m *Message, flags int) error { + return c.sendMsg(m, flags) +} + +// RecvMsgs wraps recvmmsg system call. +// +// It returns the number of processed messages. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_PEEK. +// +// Only Linux supports this. +func (c *Conn) RecvMsgs(ms []Message, flags int) (int, error) { + return c.recvMsgs(ms, flags) +} + +// SendMsgs wraps sendmmsg system call. +// +// It returns the number of processed messages. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_DONTROUTE. +// +// Only Linux supports this. +func (c *Conn) SendMsgs(ms []Message, flags int) (int, error) { + return c.sendMsgs(ms, flags) +} diff --git a/vendor/golang.org/x/net/internal/socket/sys.go b/vendor/golang.org/x/net/internal/socket/sys.go new file mode 100644 index 00000000..ee492ba8 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys.go @@ -0,0 +1,33 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +import ( + "encoding/binary" + "unsafe" +) + +var ( + // NativeEndian is the machine native endian implementation of + // ByteOrder. + NativeEndian binary.ByteOrder + + kernelAlign int +) + +func init() { + i := uint32(1) + b := (*[4]byte)(unsafe.Pointer(&i)) + if b[0] == 1 { + NativeEndian = binary.LittleEndian + } else { + NativeEndian = binary.BigEndian + } + kernelAlign = probeProtocolStack() +} + +func roundup(l int) int { + return (l + kernelAlign - 1) &^ (kernelAlign - 1) +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_bsd.go b/vendor/golang.org/x/net/internal/socket/sys_bsd.go new file mode 100644 index 00000000..d432835b --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_bsd.go @@ -0,0 +1,15 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix darwin dragonfly freebsd openbsd + +package socket + +func recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + return 0, errNotImplemented +} + +func sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + return 0, errNotImplemented +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_bsdvar.go b/vendor/golang.org/x/net/internal/socket/sys_bsdvar.go new file mode 100644 index 00000000..b4f41b55 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_bsdvar.go @@ -0,0 +1,23 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix freebsd netbsd openbsd + +package socket + +import ( + "runtime" + "unsafe" +) + +func probeProtocolStack() int { + if (runtime.GOOS == "netbsd" || runtime.GOOS == "openbsd") && runtime.GOARCH == "arm" { + return 8 + } + if runtime.GOOS == "aix" { + return 1 + } + var p uintptr + return int(unsafe.Sizeof(p)) +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_const_unix.go b/vendor/golang.org/x/net/internal/socket/sys_const_unix.go new file mode 100644 index 00000000..43797d6e --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_const_unix.go @@ -0,0 +1,17 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris + +package socket + +import "golang.org/x/sys/unix" + +const ( + sysAF_UNSPEC = unix.AF_UNSPEC + sysAF_INET = unix.AF_INET + sysAF_INET6 = unix.AF_INET6 + + sysSOCK_RAW = unix.SOCK_RAW +) diff --git a/vendor/golang.org/x/net/internal/socket/sys_darwin.go b/vendor/golang.org/x/net/internal/socket/sys_darwin.go new file mode 100644 index 00000000..b17d223b --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_darwin.go @@ -0,0 +1,7 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +func probeProtocolStack() int { return 4 } diff --git a/vendor/golang.org/x/net/internal/socket/sys_dragonfly.go b/vendor/golang.org/x/net/internal/socket/sys_dragonfly.go new file mode 100644 index 00000000..b17d223b --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_dragonfly.go @@ -0,0 +1,7 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +func probeProtocolStack() int { return 4 } diff --git a/vendor/golang.org/x/net/internal/socket/sys_go1_11_darwin.go b/vendor/golang.org/x/net/internal/socket/sys_go1_11_darwin.go new file mode 100644 index 00000000..02d2b3cc --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_go1_11_darwin.go @@ -0,0 +1,33 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.12 + +package socket + +import ( + "syscall" + "unsafe" +) + +func getsockopt(s uintptr, level, name int, b []byte) (int, error) { + l := uint32(len(b)) + _, _, errno := syscall.Syscall6(syscall.SYS_GETSOCKOPT, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(unsafe.Pointer(&l)), 0) + return int(l), errnoErr(errno) +} + +func setsockopt(s uintptr, level, name int, b []byte) error { + _, _, errno := syscall.Syscall6(syscall.SYS_SETSOCKOPT, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), 0) + return errnoErr(errno) +} + +func recvmsg(s uintptr, h *msghdr, flags int) (int, error) { + n, _, errno := syscall.Syscall(syscall.SYS_RECVMSG, s, uintptr(unsafe.Pointer(h)), uintptr(flags)) + return int(n), errnoErr(errno) +} + +func sendmsg(s uintptr, h *msghdr, flags int) (int, error) { + n, _, errno := syscall.Syscall(syscall.SYS_SENDMSG, s, uintptr(unsafe.Pointer(h)), uintptr(flags)) + return int(n), errnoErr(errno) +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_linkname.go b/vendor/golang.org/x/net/internal/socket/sys_linkname.go new file mode 100644 index 00000000..61c3f38a --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linkname.go @@ -0,0 +1,42 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix go1.12,darwin + +package socket + +import ( + "syscall" + "unsafe" +) + +//go:linkname syscall_getsockopt syscall.getsockopt +func syscall_getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *uint32) error + +func getsockopt(s uintptr, level, name int, b []byte) (int, error) { + l := uint32(len(b)) + err := syscall_getsockopt(int(s), level, name, unsafe.Pointer(&b[0]), &l) + return int(l), err +} + +//go:linkname syscall_setsockopt syscall.setsockopt +func syscall_setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) error + +func setsockopt(s uintptr, level, name int, b []byte) error { + return syscall_setsockopt(int(s), level, name, unsafe.Pointer(&b[0]), uintptr(len(b))) +} + +//go:linkname syscall_recvmsg syscall.recvmsg +func syscall_recvmsg(s int, msg *syscall.Msghdr, flags int) (n int, err error) + +func recvmsg(s uintptr, h *msghdr, flags int) (int, error) { + return syscall_recvmsg(int(s), (*syscall.Msghdr)(unsafe.Pointer(h)), flags) +} + +//go:linkname syscall_sendmsg syscall.sendmsg +func syscall_sendmsg(s int, msg *syscall.Msghdr, flags int) (n int, err error) + +func sendmsg(s uintptr, h *msghdr, flags int) (int, error) { + return syscall_sendmsg(int(s), (*syscall.Msghdr)(unsafe.Pointer(h)), flags) +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux.go b/vendor/golang.org/x/net/internal/socket/sys_linux.go new file mode 100644 index 00000000..1559521e --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux.go @@ -0,0 +1,27 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux,!s390x,!386 + +package socket + +import ( + "syscall" + "unsafe" +) + +func probeProtocolStack() int { + var p uintptr + return int(unsafe.Sizeof(p)) +} + +func recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + n, _, errno := syscall.Syscall6(sysRECVMMSG, s, uintptr(unsafe.Pointer(&hs[0])), uintptr(len(hs)), uintptr(flags), 0, 0) + return int(n), errnoErr(errno) +} + +func sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + n, _, errno := syscall.Syscall6(sysSENDMMSG, s, uintptr(unsafe.Pointer(&hs[0])), uintptr(len(hs)), uintptr(flags), 0, 0) + return int(n), errnoErr(errno) +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_386.go b/vendor/golang.org/x/net/internal/socket/sys_linux_386.go new file mode 100644 index 00000000..235b2cc0 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_386.go @@ -0,0 +1,55 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +import ( + "syscall" + "unsafe" +) + +func probeProtocolStack() int { return 4 } + +const ( + sysSETSOCKOPT = 0xe + sysGETSOCKOPT = 0xf + sysSENDMSG = 0x10 + sysRECVMSG = 0x11 + sysRECVMMSG = 0x13 + sysSENDMMSG = 0x14 +) + +func socketcall(call, a0, a1, a2, a3, a4, a5 uintptr) (uintptr, syscall.Errno) +func rawsocketcall(call, a0, a1, a2, a3, a4, a5 uintptr) (uintptr, syscall.Errno) + +func getsockopt(s uintptr, level, name int, b []byte) (int, error) { + l := uint32(len(b)) + _, errno := socketcall(sysGETSOCKOPT, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(unsafe.Pointer(&l)), 0) + return int(l), errnoErr(errno) +} + +func setsockopt(s uintptr, level, name int, b []byte) error { + _, errno := socketcall(sysSETSOCKOPT, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), 0) + return errnoErr(errno) +} + +func recvmsg(s uintptr, h *msghdr, flags int) (int, error) { + n, errno := socketcall(sysRECVMSG, s, uintptr(unsafe.Pointer(h)), uintptr(flags), 0, 0, 0) + return int(n), errnoErr(errno) +} + +func sendmsg(s uintptr, h *msghdr, flags int) (int, error) { + n, errno := socketcall(sysSENDMSG, s, uintptr(unsafe.Pointer(h)), uintptr(flags), 0, 0, 0) + return int(n), errnoErr(errno) +} + +func recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + n, errno := socketcall(sysRECVMMSG, s, uintptr(unsafe.Pointer(&hs[0])), uintptr(len(hs)), uintptr(flags), 0, 0) + return int(n), errnoErr(errno) +} + +func sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + n, errno := socketcall(sysSENDMMSG, s, uintptr(unsafe.Pointer(&hs[0])), uintptr(len(hs)), uintptr(flags), 0, 0) + return int(n), errnoErr(errno) +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_386.s b/vendor/golang.org/x/net/internal/socket/sys_linux_386.s new file mode 100644 index 00000000..93e7d75e --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_386.s @@ -0,0 +1,11 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +TEXT ·socketcall(SB),NOSPLIT,$0-36 + JMP syscall·socketcall(SB) + +TEXT ·rawsocketcall(SB),NOSPLIT,$0-36 + JMP syscall·rawsocketcall(SB) diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_amd64.go b/vendor/golang.org/x/net/internal/socket/sys_linux_amd64.go new file mode 100644 index 00000000..9decee2e --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_amd64.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +const ( + sysRECVMMSG = 0x12b + sysSENDMMSG = 0x133 +) diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_arm.go b/vendor/golang.org/x/net/internal/socket/sys_linux_arm.go new file mode 100644 index 00000000..d753b436 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_arm.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +const ( + sysRECVMMSG = 0x16d + sysSENDMMSG = 0x176 +) diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_arm64.go b/vendor/golang.org/x/net/internal/socket/sys_linux_arm64.go new file mode 100644 index 00000000..b6708943 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_arm64.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +const ( + sysRECVMMSG = 0xf3 + sysSENDMMSG = 0x10d +) diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_mips.go b/vendor/golang.org/x/net/internal/socket/sys_linux_mips.go new file mode 100644 index 00000000..9c0d7401 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_mips.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +const ( + sysRECVMMSG = 0x10ef + sysSENDMMSG = 0x10f7 +) diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_mips64.go b/vendor/golang.org/x/net/internal/socket/sys_linux_mips64.go new file mode 100644 index 00000000..071a4aba --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_mips64.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +const ( + sysRECVMMSG = 0x14ae + sysSENDMMSG = 0x14b6 +) diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_mips64le.go b/vendor/golang.org/x/net/internal/socket/sys_linux_mips64le.go new file mode 100644 index 00000000..071a4aba --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_mips64le.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +const ( + sysRECVMMSG = 0x14ae + sysSENDMMSG = 0x14b6 +) diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_mipsle.go b/vendor/golang.org/x/net/internal/socket/sys_linux_mipsle.go new file mode 100644 index 00000000..9c0d7401 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_mipsle.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +const ( + sysRECVMMSG = 0x10ef + sysSENDMMSG = 0x10f7 +) diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_ppc64.go b/vendor/golang.org/x/net/internal/socket/sys_linux_ppc64.go new file mode 100644 index 00000000..21c1e3f0 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_ppc64.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +const ( + sysRECVMMSG = 0x157 + sysSENDMMSG = 0x15d +) diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_ppc64le.go b/vendor/golang.org/x/net/internal/socket/sys_linux_ppc64le.go new file mode 100644 index 00000000..21c1e3f0 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_ppc64le.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +const ( + sysRECVMMSG = 0x157 + sysSENDMMSG = 0x15d +) diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_riscv64.go b/vendor/golang.org/x/net/internal/socket/sys_linux_riscv64.go new file mode 100644 index 00000000..64f69f1d --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_riscv64.go @@ -0,0 +1,12 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build riscv64 + +package socket + +const ( + sysRECVMMSG = 0xf3 + sysSENDMMSG = 0x10d +) diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_s390x.go b/vendor/golang.org/x/net/internal/socket/sys_linux_s390x.go new file mode 100644 index 00000000..327979ef --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_s390x.go @@ -0,0 +1,55 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +import ( + "syscall" + "unsafe" +) + +func probeProtocolStack() int { return 8 } + +const ( + sysSETSOCKOPT = 0xe + sysGETSOCKOPT = 0xf + sysSENDMSG = 0x10 + sysRECVMSG = 0x11 + sysRECVMMSG = 0x13 + sysSENDMMSG = 0x14 +) + +func socketcall(call, a0, a1, a2, a3, a4, a5 uintptr) (uintptr, syscall.Errno) +func rawsocketcall(call, a0, a1, a2, a3, a4, a5 uintptr) (uintptr, syscall.Errno) + +func getsockopt(s uintptr, level, name int, b []byte) (int, error) { + l := uint32(len(b)) + _, errno := socketcall(sysGETSOCKOPT, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(unsafe.Pointer(&l)), 0) + return int(l), errnoErr(errno) +} + +func setsockopt(s uintptr, level, name int, b []byte) error { + _, errno := socketcall(sysSETSOCKOPT, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), 0) + return errnoErr(errno) +} + +func recvmsg(s uintptr, h *msghdr, flags int) (int, error) { + n, errno := socketcall(sysRECVMSG, s, uintptr(unsafe.Pointer(h)), uintptr(flags), 0, 0, 0) + return int(n), errnoErr(errno) +} + +func sendmsg(s uintptr, h *msghdr, flags int) (int, error) { + n, errno := socketcall(sysSENDMSG, s, uintptr(unsafe.Pointer(h)), uintptr(flags), 0, 0, 0) + return int(n), errnoErr(errno) +} + +func recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + n, errno := socketcall(sysRECVMMSG, s, uintptr(unsafe.Pointer(&hs[0])), uintptr(len(hs)), uintptr(flags), 0, 0) + return int(n), errnoErr(errno) +} + +func sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + n, errno := socketcall(sysSENDMMSG, s, uintptr(unsafe.Pointer(&hs[0])), uintptr(len(hs)), uintptr(flags), 0, 0) + return int(n), errnoErr(errno) +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_s390x.s b/vendor/golang.org/x/net/internal/socket/sys_linux_s390x.s new file mode 100644 index 00000000..06d75628 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_s390x.s @@ -0,0 +1,11 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +TEXT ·socketcall(SB),NOSPLIT,$0-72 + JMP syscall·socketcall(SB) + +TEXT ·rawsocketcall(SB),NOSPLIT,$0-72 + JMP syscall·rawsocketcall(SB) diff --git a/vendor/golang.org/x/net/internal/socket/sys_netbsd.go b/vendor/golang.org/x/net/internal/socket/sys_netbsd.go new file mode 100644 index 00000000..431851c1 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_netbsd.go @@ -0,0 +1,25 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +import ( + "syscall" + "unsafe" +) + +const ( + sysRECVMMSG = 0x1db + sysSENDMMSG = 0x1dc +) + +func recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + n, _, errno := syscall.Syscall6(sysRECVMMSG, s, uintptr(unsafe.Pointer(&hs[0])), uintptr(len(hs)), uintptr(flags), 0, 0) + return int(n), errnoErr(errno) +} + +func sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + n, _, errno := syscall.Syscall6(sysSENDMMSG, s, uintptr(unsafe.Pointer(&hs[0])), uintptr(len(hs)), uintptr(flags), 0, 0) + return int(n), errnoErr(errno) +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_posix.go b/vendor/golang.org/x/net/internal/socket/sys_posix.go new file mode 100644 index 00000000..22eae809 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_posix.go @@ -0,0 +1,183 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris windows + +package socket + +import ( + "encoding/binary" + "errors" + "net" + "runtime" + "strconv" + "sync" + "time" +) + +func marshalInetAddr(a net.Addr) []byte { + switch a := a.(type) { + case *net.TCPAddr: + return marshalSockaddr(a.IP, a.Port, a.Zone) + case *net.UDPAddr: + return marshalSockaddr(a.IP, a.Port, a.Zone) + case *net.IPAddr: + return marshalSockaddr(a.IP, 0, a.Zone) + default: + return nil + } +} + +func marshalSockaddr(ip net.IP, port int, zone string) []byte { + if ip4 := ip.To4(); ip4 != nil { + b := make([]byte, sizeofSockaddrInet) + switch runtime.GOOS { + case "android", "illumos", "linux", "solaris", "windows": + NativeEndian.PutUint16(b[:2], uint16(sysAF_INET)) + default: + b[0] = sizeofSockaddrInet + b[1] = sysAF_INET + } + binary.BigEndian.PutUint16(b[2:4], uint16(port)) + copy(b[4:8], ip4) + return b + } + if ip6 := ip.To16(); ip6 != nil && ip.To4() == nil { + b := make([]byte, sizeofSockaddrInet6) + switch runtime.GOOS { + case "android", "illumos", "linux", "solaris", "windows": + NativeEndian.PutUint16(b[:2], uint16(sysAF_INET6)) + default: + b[0] = sizeofSockaddrInet6 + b[1] = sysAF_INET6 + } + binary.BigEndian.PutUint16(b[2:4], uint16(port)) + copy(b[8:24], ip6) + if zone != "" { + NativeEndian.PutUint32(b[24:28], uint32(zoneCache.index(zone))) + } + return b + } + return nil +} + +func parseInetAddr(b []byte, network string) (net.Addr, error) { + if len(b) < 2 { + return nil, errors.New("invalid address") + } + var af int + switch runtime.GOOS { + case "android", "illumos", "linux", "solaris", "windows": + af = int(NativeEndian.Uint16(b[:2])) + default: + af = int(b[1]) + } + var ip net.IP + var zone string + if af == sysAF_INET { + if len(b) < sizeofSockaddrInet { + return nil, errors.New("short address") + } + ip = make(net.IP, net.IPv4len) + copy(ip, b[4:8]) + } + if af == sysAF_INET6 { + if len(b) < sizeofSockaddrInet6 { + return nil, errors.New("short address") + } + ip = make(net.IP, net.IPv6len) + copy(ip, b[8:24]) + if id := int(NativeEndian.Uint32(b[24:28])); id > 0 { + zone = zoneCache.name(id) + } + } + switch network { + case "tcp", "tcp4", "tcp6": + return &net.TCPAddr{IP: ip, Port: int(binary.BigEndian.Uint16(b[2:4])), Zone: zone}, nil + case "udp", "udp4", "udp6": + return &net.UDPAddr{IP: ip, Port: int(binary.BigEndian.Uint16(b[2:4])), Zone: zone}, nil + default: + return &net.IPAddr{IP: ip, Zone: zone}, nil + } +} + +// An ipv6ZoneCache represents a cache holding partial network +// interface information. It is used for reducing the cost of IPv6 +// addressing scope zone resolution. +// +// Multiple names sharing the index are managed by first-come +// first-served basis for consistency. +type ipv6ZoneCache struct { + sync.RWMutex // guard the following + lastFetched time.Time // last time routing information was fetched + toIndex map[string]int // interface name to its index + toName map[int]string // interface index to its name +} + +var zoneCache = ipv6ZoneCache{ + toIndex: make(map[string]int), + toName: make(map[int]string), +} + +// update refreshes the network interface information if the cache was last +// updated more than 1 minute ago, or if force is set. It returns whether the +// cache was updated. +func (zc *ipv6ZoneCache) update(ift []net.Interface, force bool) (updated bool) { + zc.Lock() + defer zc.Unlock() + now := time.Now() + if !force && zc.lastFetched.After(now.Add(-60*time.Second)) { + return false + } + zc.lastFetched = now + if len(ift) == 0 { + var err error + if ift, err = net.Interfaces(); err != nil { + return false + } + } + zc.toIndex = make(map[string]int, len(ift)) + zc.toName = make(map[int]string, len(ift)) + for _, ifi := range ift { + zc.toIndex[ifi.Name] = ifi.Index + if _, ok := zc.toName[ifi.Index]; !ok { + zc.toName[ifi.Index] = ifi.Name + } + } + return true +} + +func (zc *ipv6ZoneCache) name(zone int) string { + updated := zoneCache.update(nil, false) + zoneCache.RLock() + name, ok := zoneCache.toName[zone] + zoneCache.RUnlock() + if !ok && !updated { + zoneCache.update(nil, true) + zoneCache.RLock() + name, ok = zoneCache.toName[zone] + zoneCache.RUnlock() + } + if !ok { // last resort + name = strconv.Itoa(zone) + } + return name +} + +func (zc *ipv6ZoneCache) index(zone string) int { + updated := zoneCache.update(nil, false) + zoneCache.RLock() + index, ok := zoneCache.toIndex[zone] + zoneCache.RUnlock() + if !ok && !updated { + zoneCache.update(nil, true) + zoneCache.RLock() + index, ok = zoneCache.toIndex[zone] + zoneCache.RUnlock() + } + if !ok { // last resort + index, _ = strconv.Atoi(zone) + } + return index +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_solaris.go b/vendor/golang.org/x/net/internal/socket/sys_solaris.go new file mode 100644 index 00000000..66b55478 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_solaris.go @@ -0,0 +1,70 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +import ( + "runtime" + "syscall" + "unsafe" +) + +func probeProtocolStack() int { + switch runtime.GOARCH { + case "amd64": + return 4 + default: + var p uintptr + return int(unsafe.Sizeof(p)) + } +} + +//go:cgo_import_dynamic libc___xnet_getsockopt __xnet_getsockopt "libsocket.so" +//go:cgo_import_dynamic libc_setsockopt setsockopt "libsocket.so" +//go:cgo_import_dynamic libc___xnet_recvmsg __xnet_recvmsg "libsocket.so" +//go:cgo_import_dynamic libc___xnet_sendmsg __xnet_sendmsg "libsocket.so" + +//go:linkname procGetsockopt libc___xnet_getsockopt +//go:linkname procSetsockopt libc_setsockopt +//go:linkname procRecvmsg libc___xnet_recvmsg +//go:linkname procSendmsg libc___xnet_sendmsg + +var ( + procGetsockopt uintptr + procSetsockopt uintptr + procRecvmsg uintptr + procSendmsg uintptr +) + +func sysvicall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (uintptr, uintptr, syscall.Errno) +func rawSysvicall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (uintptr, uintptr, syscall.Errno) + +func getsockopt(s uintptr, level, name int, b []byte) (int, error) { + l := uint32(len(b)) + _, _, errno := sysvicall6(uintptr(unsafe.Pointer(&procGetsockopt)), 5, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(unsafe.Pointer(&l)), 0) + return int(l), errnoErr(errno) +} + +func setsockopt(s uintptr, level, name int, b []byte) error { + _, _, errno := sysvicall6(uintptr(unsafe.Pointer(&procSetsockopt)), 5, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), 0) + return errnoErr(errno) +} + +func recvmsg(s uintptr, h *msghdr, flags int) (int, error) { + n, _, errno := sysvicall6(uintptr(unsafe.Pointer(&procRecvmsg)), 3, s, uintptr(unsafe.Pointer(h)), uintptr(flags), 0, 0, 0) + return int(n), errnoErr(errno) +} + +func sendmsg(s uintptr, h *msghdr, flags int) (int, error) { + n, _, errno := sysvicall6(uintptr(unsafe.Pointer(&procSendmsg)), 3, s, uintptr(unsafe.Pointer(h)), uintptr(flags), 0, 0, 0) + return int(n), errnoErr(errno) +} + +func recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + return 0, errNotImplemented +} + +func sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + return 0, errNotImplemented +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_solaris_amd64.s b/vendor/golang.org/x/net/internal/socket/sys_solaris_amd64.s new file mode 100644 index 00000000..a18ac5ed --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_solaris_amd64.s @@ -0,0 +1,11 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +TEXT ·sysvicall6(SB),NOSPLIT,$0-88 + JMP syscall·sysvicall6(SB) + +TEXT ·rawSysvicall6(SB),NOSPLIT,$0-88 + JMP syscall·rawSysvicall6(SB) diff --git a/vendor/golang.org/x/net/internal/socket/sys_stub.go b/vendor/golang.org/x/net/internal/socket/sys_stub.go new file mode 100644 index 00000000..0f617426 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_stub.go @@ -0,0 +1,63 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows + +package socket + +import ( + "net" + "runtime" + "unsafe" +) + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0xa + + sysSOCK_RAW = 0x3 +) + +func probeProtocolStack() int { + switch runtime.GOARCH { + case "amd64p32", "mips64p32": + return 4 + default: + var p uintptr + return int(unsafe.Sizeof(p)) + } +} + +func marshalInetAddr(ip net.IP, port int, zone string) []byte { + return nil +} + +func parseInetAddr(b []byte, network string) (net.Addr, error) { + return nil, errNotImplemented +} + +func getsockopt(s uintptr, level, name int, b []byte) (int, error) { + return 0, errNotImplemented +} + +func setsockopt(s uintptr, level, name int, b []byte) error { + return errNotImplemented +} + +func recvmsg(s uintptr, h *msghdr, flags int) (int, error) { + return 0, errNotImplemented +} + +func sendmsg(s uintptr, h *msghdr, flags int) (int, error) { + return 0, errNotImplemented +} + +func recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + return 0, errNotImplemented +} + +func sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + return 0, errNotImplemented +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_unix.go b/vendor/golang.org/x/net/internal/socket/sys_unix.go new file mode 100644 index 00000000..0eb71283 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_unix.go @@ -0,0 +1,33 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build dragonfly freebsd linux,!s390x,!386 netbsd openbsd + +package socket + +import ( + "syscall" + "unsafe" +) + +func getsockopt(s uintptr, level, name int, b []byte) (int, error) { + l := uint32(len(b)) + _, _, errno := syscall.Syscall6(syscall.SYS_GETSOCKOPT, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(unsafe.Pointer(&l)), 0) + return int(l), errnoErr(errno) +} + +func setsockopt(s uintptr, level, name int, b []byte) error { + _, _, errno := syscall.Syscall6(syscall.SYS_SETSOCKOPT, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), 0) + return errnoErr(errno) +} + +func recvmsg(s uintptr, h *msghdr, flags int) (int, error) { + n, _, errno := syscall.Syscall(syscall.SYS_RECVMSG, s, uintptr(unsafe.Pointer(h)), uintptr(flags)) + return int(n), errnoErr(errno) +} + +func sendmsg(s uintptr, h *msghdr, flags int) (int, error) { + n, _, errno := syscall.Syscall(syscall.SYS_SENDMSG, s, uintptr(unsafe.Pointer(h)), uintptr(flags)) + return int(n), errnoErr(errno) +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_windows.go b/vendor/golang.org/x/net/internal/socket/sys_windows.go new file mode 100644 index 00000000..d556a446 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_windows.go @@ -0,0 +1,71 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +func probeProtocolStack() int { + var p uintptr + return int(unsafe.Sizeof(p)) +} + +const ( + sysAF_UNSPEC = windows.AF_UNSPEC + sysAF_INET = windows.AF_INET + sysAF_INET6 = windows.AF_INET6 + + sysSOCK_RAW = windows.SOCK_RAW +) + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) + +func getsockopt(s uintptr, level, name int, b []byte) (int, error) { + l := uint32(len(b)) + err := syscall.Getsockopt(syscall.Handle(s), int32(level), int32(name), (*byte)(unsafe.Pointer(&b[0])), (*int32)(unsafe.Pointer(&l))) + return int(l), err +} + +func setsockopt(s uintptr, level, name int, b []byte) error { + return syscall.Setsockopt(syscall.Handle(s), int32(level), int32(name), (*byte)(unsafe.Pointer(&b[0])), int32(len(b))) +} + +func recvmsg(s uintptr, h *msghdr, flags int) (int, error) { + return 0, errNotImplemented +} + +func sendmsg(s uintptr, h *msghdr, flags int) (int, error) { + return 0, errNotImplemented +} + +func recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + return 0, errNotImplemented +} + +func sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + return 0, errNotImplemented +} diff --git a/vendor/golang.org/x/net/internal/socket/zsys_aix_ppc64.go b/vendor/golang.org/x/net/internal/socket/zsys_aix_ppc64.go new file mode 100644 index 00000000..813385a9 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_aix_ppc64.go @@ -0,0 +1,61 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_aix.go + +// Added for go1.11 compatibility +// +build aix + +package socket + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen int32 + Control *byte + Controllen uint32 + Flags int32 +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 + Pad_cgo_0 [4]byte +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]uint8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 + sizeofMmsghdr = 0x38 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_darwin_386.go b/vendor/golang.org/x/net/internal/socket/zsys_darwin_386.go new file mode 100644 index 00000000..083bda51 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_darwin_386.go @@ -0,0 +1,51 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_darwin.go + +package socket + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen int32 + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_darwin_amd64.go b/vendor/golang.org/x/net/internal/socket/zsys_darwin_amd64.go new file mode 100644 index 00000000..55c6c9f5 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_darwin_amd64.go @@ -0,0 +1,53 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_darwin.go + +package socket + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen int32 + Pad_cgo_1 [4]byte + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_darwin_arm.go b/vendor/golang.org/x/net/internal/socket/zsys_darwin_arm.go new file mode 100644 index 00000000..083bda51 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_darwin_arm.go @@ -0,0 +1,51 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_darwin.go + +package socket + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen int32 + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_darwin_arm64.go b/vendor/golang.org/x/net/internal/socket/zsys_darwin_arm64.go new file mode 100644 index 00000000..55c6c9f5 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_darwin_arm64.go @@ -0,0 +1,53 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_darwin.go + +package socket + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen int32 + Pad_cgo_1 [4]byte + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_dragonfly_amd64.go b/vendor/golang.org/x/net/internal/socket/zsys_dragonfly_amd64.go new file mode 100644 index 00000000..8b7d161d --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_dragonfly_amd64.go @@ -0,0 +1,53 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_dragonfly.go + +package socket + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen int32 + Pad_cgo_1 [4]byte + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_freebsd_386.go b/vendor/golang.org/x/net/internal/socket/zsys_freebsd_386.go new file mode 100644 index 00000000..3e71ff57 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_freebsd_386.go @@ -0,0 +1,51 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_freebsd.go + +package socket + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen int32 + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_freebsd_amd64.go b/vendor/golang.org/x/net/internal/socket/zsys_freebsd_amd64.go new file mode 100644 index 00000000..238d90de --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_freebsd_amd64.go @@ -0,0 +1,53 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_freebsd.go + +package socket + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen int32 + Pad_cgo_1 [4]byte + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_freebsd_arm.go b/vendor/golang.org/x/net/internal/socket/zsys_freebsd_arm.go new file mode 100644 index 00000000..3e71ff57 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_freebsd_arm.go @@ -0,0 +1,51 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_freebsd.go + +package socket + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen int32 + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_freebsd_arm64.go b/vendor/golang.org/x/net/internal/socket/zsys_freebsd_arm64.go new file mode 100644 index 00000000..238d90de --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_freebsd_arm64.go @@ -0,0 +1,53 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_freebsd.go + +package socket + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen int32 + Pad_cgo_1 [4]byte + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_386.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_386.go new file mode 100644 index 00000000..72d8b254 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_386.go @@ -0,0 +1,55 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package socket + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofMmsghdr = 0x20 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_amd64.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_amd64.go new file mode 100644 index 00000000..3545319a --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_amd64.go @@ -0,0 +1,58 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package socket + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen uint64 + Control *byte + Controllen uint64 + Flags int32 + Pad_cgo_1 [4]byte +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 + Pad_cgo_0 [4]byte +} + +type cmsghdr struct { + Len uint64 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x38 + sizeofMmsghdr = 0x40 + sizeofCmsghdr = 0x10 + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_arm.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_arm.go new file mode 100644 index 00000000..72d8b254 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_arm.go @@ -0,0 +1,55 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package socket + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofMmsghdr = 0x20 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_arm64.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_arm64.go new file mode 100644 index 00000000..3545319a --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_arm64.go @@ -0,0 +1,58 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package socket + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen uint64 + Control *byte + Controllen uint64 + Flags int32 + Pad_cgo_1 [4]byte +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 + Pad_cgo_0 [4]byte +} + +type cmsghdr struct { + Len uint64 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x38 + sizeofMmsghdr = 0x40 + sizeofCmsghdr = 0x10 + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_mips.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_mips.go new file mode 100644 index 00000000..72d8b254 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_mips.go @@ -0,0 +1,55 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package socket + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofMmsghdr = 0x20 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_mips64.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_mips64.go new file mode 100644 index 00000000..3545319a --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_mips64.go @@ -0,0 +1,58 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package socket + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen uint64 + Control *byte + Controllen uint64 + Flags int32 + Pad_cgo_1 [4]byte +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 + Pad_cgo_0 [4]byte +} + +type cmsghdr struct { + Len uint64 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x38 + sizeofMmsghdr = 0x40 + sizeofCmsghdr = 0x10 + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_mips64le.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_mips64le.go new file mode 100644 index 00000000..3545319a --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_mips64le.go @@ -0,0 +1,58 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package socket + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen uint64 + Control *byte + Controllen uint64 + Flags int32 + Pad_cgo_1 [4]byte +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 + Pad_cgo_0 [4]byte +} + +type cmsghdr struct { + Len uint64 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x38 + sizeofMmsghdr = 0x40 + sizeofCmsghdr = 0x10 + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_mipsle.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_mipsle.go new file mode 100644 index 00000000..72d8b254 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_mipsle.go @@ -0,0 +1,55 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package socket + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofMmsghdr = 0x20 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_ppc64.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_ppc64.go new file mode 100644 index 00000000..3545319a --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_ppc64.go @@ -0,0 +1,58 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package socket + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen uint64 + Control *byte + Controllen uint64 + Flags int32 + Pad_cgo_1 [4]byte +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 + Pad_cgo_0 [4]byte +} + +type cmsghdr struct { + Len uint64 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x38 + sizeofMmsghdr = 0x40 + sizeofCmsghdr = 0x10 + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_ppc64le.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_ppc64le.go new file mode 100644 index 00000000..3545319a --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_ppc64le.go @@ -0,0 +1,58 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package socket + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen uint64 + Control *byte + Controllen uint64 + Flags int32 + Pad_cgo_1 [4]byte +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 + Pad_cgo_0 [4]byte +} + +type cmsghdr struct { + Len uint64 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x38 + sizeofMmsghdr = 0x40 + sizeofCmsghdr = 0x10 + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_riscv64.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_riscv64.go new file mode 100644 index 00000000..dbff234f --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_riscv64.go @@ -0,0 +1,59 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +// +build riscv64 + +package socket + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen uint64 + Control *byte + Controllen uint64 + Flags int32 + Pad_cgo_0 [4]byte +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 + Pad_cgo_0 [4]byte +} + +type cmsghdr struct { + Len uint64 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x38 + sizeofMmsghdr = 0x40 + sizeofCmsghdr = 0x10 + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_s390x.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_s390x.go new file mode 100644 index 00000000..3545319a --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_s390x.go @@ -0,0 +1,58 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package socket + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen uint64 + Control *byte + Controllen uint64 + Flags int32 + Pad_cgo_1 [4]byte +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 + Pad_cgo_0 [4]byte +} + +type cmsghdr struct { + Len uint64 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x38 + sizeofMmsghdr = 0x40 + sizeofCmsghdr = 0x10 + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_netbsd_386.go b/vendor/golang.org/x/net/internal/socket/zsys_netbsd_386.go new file mode 100644 index 00000000..bf8f47c8 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_netbsd_386.go @@ -0,0 +1,57 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_netbsd.go + +package socket + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen int32 + Control *byte + Controllen uint32 + Flags int32 +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofMmsghdr = 0x20 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_netbsd_amd64.go b/vendor/golang.org/x/net/internal/socket/zsys_netbsd_amd64.go new file mode 100644 index 00000000..a46eff99 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_netbsd_amd64.go @@ -0,0 +1,60 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_netbsd.go + +package socket + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen int32 + Pad_cgo_1 [4]byte + Control *byte + Controllen uint32 + Flags int32 +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 + Pad_cgo_0 [4]byte +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 + sizeofMmsghdr = 0x40 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_netbsd_arm.go b/vendor/golang.org/x/net/internal/socket/zsys_netbsd_arm.go new file mode 100644 index 00000000..bf8f47c8 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_netbsd_arm.go @@ -0,0 +1,57 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_netbsd.go + +package socket + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen int32 + Control *byte + Controllen uint32 + Flags int32 +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofMmsghdr = 0x20 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_netbsd_arm64.go b/vendor/golang.org/x/net/internal/socket/zsys_netbsd_arm64.go new file mode 100644 index 00000000..a46eff99 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_netbsd_arm64.go @@ -0,0 +1,60 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_netbsd.go + +package socket + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen int32 + Pad_cgo_1 [4]byte + Control *byte + Controllen uint32 + Flags int32 +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 + Pad_cgo_0 [4]byte +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 + sizeofMmsghdr = 0x40 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_openbsd_386.go b/vendor/golang.org/x/net/internal/socket/zsys_openbsd_386.go new file mode 100644 index 00000000..73655a14 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_openbsd_386.go @@ -0,0 +1,51 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_openbsd.go + +package socket + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_openbsd_amd64.go b/vendor/golang.org/x/net/internal/socket/zsys_openbsd_amd64.go new file mode 100644 index 00000000..0a4de80f --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_openbsd_amd64.go @@ -0,0 +1,53 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_openbsd.go + +package socket + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen uint32 + Pad_cgo_1 [4]byte + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_openbsd_arm.go b/vendor/golang.org/x/net/internal/socket/zsys_openbsd_arm.go new file mode 100644 index 00000000..73655a14 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_openbsd_arm.go @@ -0,0 +1,51 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_openbsd.go + +package socket + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_openbsd_arm64.go b/vendor/golang.org/x/net/internal/socket/zsys_openbsd_arm64.go new file mode 100644 index 00000000..0a4de80f --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_openbsd_arm64.go @@ -0,0 +1,53 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_openbsd.go + +package socket + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen uint32 + Pad_cgo_1 [4]byte + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_solaris_amd64.go b/vendor/golang.org/x/net/internal/socket/zsys_solaris_amd64.go new file mode 100644 index 00000000..353cd5fb --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_solaris_amd64.go @@ -0,0 +1,52 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_solaris.go + +package socket + +type iovec struct { + Base *int8 + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen int32 + Pad_cgo_1 [4]byte + Accrights *int8 + Accrightslen int32 + Pad_cgo_2 [4]byte +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 + X__sin6_src_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x20 +) diff --git a/vendor/golang.org/x/net/ipv4/batch.go b/vendor/golang.org/x/net/ipv4/batch.go new file mode 100644 index 00000000..1a3a4fc0 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/batch.go @@ -0,0 +1,194 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + "runtime" + + "golang.org/x/net/internal/socket" +) + +// BUG(mikio): On Windows, the ReadBatch and WriteBatch methods of +// PacketConn are not implemented. + +// BUG(mikio): On Windows, the ReadBatch and WriteBatch methods of +// RawConn are not implemented. + +// A Message represents an IO message. +// +// type Message struct { +// Buffers [][]byte +// OOB []byte +// Addr net.Addr +// N int +// NN int +// Flags int +// } +// +// The Buffers fields represents a list of contiguous buffers, which +// can be used for vectored IO, for example, putting a header and a +// payload in each slice. +// When writing, the Buffers field must contain at least one byte to +// write. +// When reading, the Buffers field will always contain a byte to read. +// +// The OOB field contains protocol-specific control or miscellaneous +// ancillary data known as out-of-band data. +// It can be nil when not required. +// +// The Addr field specifies a destination address when writing. +// It can be nil when the underlying protocol of the endpoint uses +// connection-oriented communication. +// After a successful read, it may contain the source address on the +// received packet. +// +// The N field indicates the number of bytes read or written from/to +// Buffers. +// +// The NN field indicates the number of bytes read or written from/to +// OOB. +// +// The Flags field contains protocol-specific information on the +// received message. +type Message = socket.Message + +// ReadBatch reads a batch of messages. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_PEEK. +// +// On a successful read it returns the number of messages received, up +// to len(ms). +// +// On Linux, a batch read will be optimized. +// On other platforms, this method will read only a single message. +// +// Unlike the ReadFrom method, it doesn't strip the IPv4 header +// followed by option headers from the received IPv4 datagram when the +// underlying transport is net.IPConn. Each Buffers field of Message +// must be large enough to accommodate an IPv4 header and option +// headers. +func (c *payloadHandler) ReadBatch(ms []Message, flags int) (int, error) { + if !c.ok() { + return 0, errInvalidConn + } + switch runtime.GOOS { + case "linux": + n, err := c.RecvMsgs([]socket.Message(ms), flags) + if err != nil { + err = &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + return n, err + default: + n := 1 + err := c.RecvMsg(&ms[0], flags) + if err != nil { + n = 0 + err = &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + if compatFreeBSD32 && ms[0].NN > 0 { + adjustFreeBSD32(&ms[0]) + } + return n, err + } +} + +// WriteBatch writes a batch of messages. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_DONTROUTE. +// +// It returns the number of messages written on a successful write. +// +// On Linux, a batch write will be optimized. +// On other platforms, this method will write only a single message. +func (c *payloadHandler) WriteBatch(ms []Message, flags int) (int, error) { + if !c.ok() { + return 0, errInvalidConn + } + switch runtime.GOOS { + case "linux": + n, err := c.SendMsgs([]socket.Message(ms), flags) + if err != nil { + err = &net.OpError{Op: "write", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + return n, err + default: + n := 1 + err := c.SendMsg(&ms[0], flags) + if err != nil { + n = 0 + err = &net.OpError{Op: "write", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + return n, err + } +} + +// ReadBatch reads a batch of messages. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_PEEK. +// +// On a successful read it returns the number of messages received, up +// to len(ms). +// +// On Linux, a batch read will be optimized. +// On other platforms, this method will read only a single message. +func (c *packetHandler) ReadBatch(ms []Message, flags int) (int, error) { + if !c.ok() { + return 0, errInvalidConn + } + switch runtime.GOOS { + case "linux": + n, err := c.RecvMsgs([]socket.Message(ms), flags) + if err != nil { + err = &net.OpError{Op: "read", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} + } + return n, err + default: + n := 1 + err := c.RecvMsg(&ms[0], flags) + if err != nil { + n = 0 + err = &net.OpError{Op: "read", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} + } + if compatFreeBSD32 && ms[0].NN > 0 { + adjustFreeBSD32(&ms[0]) + } + return n, err + } +} + +// WriteBatch writes a batch of messages. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_DONTROUTE. +// +// It returns the number of messages written on a successful write. +// +// On Linux, a batch write will be optimized. +// On other platforms, this method will write only a single message. +func (c *packetHandler) WriteBatch(ms []Message, flags int) (int, error) { + if !c.ok() { + return 0, errInvalidConn + } + switch runtime.GOOS { + case "linux": + n, err := c.SendMsgs([]socket.Message(ms), flags) + if err != nil { + err = &net.OpError{Op: "write", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} + } + return n, err + default: + n := 1 + err := c.SendMsg(&ms[0], flags) + if err != nil { + n = 0 + err = &net.OpError{Op: "write", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} + } + return n, err + } +} diff --git a/vendor/golang.org/x/net/ipv4/control.go b/vendor/golang.org/x/net/ipv4/control.go new file mode 100644 index 00000000..a2b02ca9 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/control.go @@ -0,0 +1,144 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "fmt" + "net" + "sync" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +type rawOpt struct { + sync.RWMutex + cflags ControlFlags +} + +func (c *rawOpt) set(f ControlFlags) { c.cflags |= f } +func (c *rawOpt) clear(f ControlFlags) { c.cflags &^= f } +func (c *rawOpt) isset(f ControlFlags) bool { return c.cflags&f != 0 } + +type ControlFlags uint + +const ( + FlagTTL ControlFlags = 1 << iota // pass the TTL on the received packet + FlagSrc // pass the source address on the received packet + FlagDst // pass the destination address on the received packet + FlagInterface // pass the interface index on the received packet +) + +// A ControlMessage represents per packet basis IP-level socket options. +type ControlMessage struct { + // Receiving socket options: SetControlMessage allows to + // receive the options from the protocol stack using ReadFrom + // method of PacketConn or RawConn. + // + // Specifying socket options: ControlMessage for WriteTo + // method of PacketConn or RawConn allows to send the options + // to the protocol stack. + // + TTL int // time-to-live, receiving only + Src net.IP // source address, specifying only + Dst net.IP // destination address, receiving only + IfIndex int // interface index, must be 1 <= value when specifying +} + +func (cm *ControlMessage) String() string { + if cm == nil { + return "" + } + return fmt.Sprintf("ttl=%d src=%v dst=%v ifindex=%d", cm.TTL, cm.Src, cm.Dst, cm.IfIndex) +} + +// Marshal returns the binary encoding of cm. +func (cm *ControlMessage) Marshal() []byte { + if cm == nil { + return nil + } + var m socket.ControlMessage + if ctlOpts[ctlPacketInfo].name > 0 && (cm.Src.To4() != nil || cm.IfIndex > 0) { + m = socket.NewControlMessage([]int{ctlOpts[ctlPacketInfo].length}) + } + if len(m) > 0 { + ctlOpts[ctlPacketInfo].marshal(m, cm) + } + return m +} + +// Parse parses b as a control message and stores the result in cm. +func (cm *ControlMessage) Parse(b []byte) error { + ms, err := socket.ControlMessage(b).Parse() + if err != nil { + return err + } + for _, m := range ms { + lvl, typ, l, err := m.ParseHeader() + if err != nil { + return err + } + if lvl != iana.ProtocolIP { + continue + } + switch { + case typ == ctlOpts[ctlTTL].name && l >= ctlOpts[ctlTTL].length: + ctlOpts[ctlTTL].parse(cm, m.Data(l)) + case typ == ctlOpts[ctlDst].name && l >= ctlOpts[ctlDst].length: + ctlOpts[ctlDst].parse(cm, m.Data(l)) + case typ == ctlOpts[ctlInterface].name && l >= ctlOpts[ctlInterface].length: + ctlOpts[ctlInterface].parse(cm, m.Data(l)) + case typ == ctlOpts[ctlPacketInfo].name && l >= ctlOpts[ctlPacketInfo].length: + ctlOpts[ctlPacketInfo].parse(cm, m.Data(l)) + } + } + return nil +} + +// NewControlMessage returns a new control message. +// +// The returned message is large enough for options specified by cf. +func NewControlMessage(cf ControlFlags) []byte { + opt := rawOpt{cflags: cf} + var l int + if opt.isset(FlagTTL) && ctlOpts[ctlTTL].name > 0 { + l += socket.ControlMessageSpace(ctlOpts[ctlTTL].length) + } + if ctlOpts[ctlPacketInfo].name > 0 { + if opt.isset(FlagSrc | FlagDst | FlagInterface) { + l += socket.ControlMessageSpace(ctlOpts[ctlPacketInfo].length) + } + } else { + if opt.isset(FlagDst) && ctlOpts[ctlDst].name > 0 { + l += socket.ControlMessageSpace(ctlOpts[ctlDst].length) + } + if opt.isset(FlagInterface) && ctlOpts[ctlInterface].name > 0 { + l += socket.ControlMessageSpace(ctlOpts[ctlInterface].length) + } + } + var b []byte + if l > 0 { + b = make([]byte, l) + } + return b +} + +// Ancillary data socket options +const ( + ctlTTL = iota // header field + ctlSrc // header field + ctlDst // header field + ctlInterface // inbound or outbound interface + ctlPacketInfo // inbound or outbound packet path + ctlMax +) + +// A ctlOpt represents a binding for ancillary data socket option. +type ctlOpt struct { + name int // option name, must be equal or greater than 1 + length int // option length + marshal func([]byte, *ControlMessage) []byte + parse func(*ControlMessage, []byte) +} diff --git a/vendor/golang.org/x/net/ipv4/control_bsd.go b/vendor/golang.org/x/net/ipv4/control_bsd.go new file mode 100644 index 00000000..19845c55 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/control_bsd.go @@ -0,0 +1,40 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix darwin dragonfly freebsd netbsd openbsd + +package ipv4 + +import ( + "net" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +func marshalDst(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIP, sysIP_RECVDSTADDR, net.IPv4len) + return m.Next(net.IPv4len) +} + +func parseDst(cm *ControlMessage, b []byte) { + if len(cm.Dst) < net.IPv4len { + cm.Dst = make(net.IP, net.IPv4len) + } + copy(cm.Dst, b[:net.IPv4len]) +} + +func marshalInterface(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIP, sysIP_RECVIF, syscall.SizeofSockaddrDatalink) + return m.Next(syscall.SizeofSockaddrDatalink) +} + +func parseInterface(cm *ControlMessage, b []byte) { + sadl := (*syscall.SockaddrDatalink)(unsafe.Pointer(&b[0])) + cm.IfIndex = int(sadl.Index) +} diff --git a/vendor/golang.org/x/net/ipv4/control_pktinfo.go b/vendor/golang.org/x/net/ipv4/control_pktinfo.go new file mode 100644 index 00000000..425338f3 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/control_pktinfo.go @@ -0,0 +1,39 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin linux solaris + +package ipv4 + +import ( + "net" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +func marshalPacketInfo(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIP, sysIP_PKTINFO, sizeofInetPktinfo) + if cm != nil { + pi := (*inetPktinfo)(unsafe.Pointer(&m.Data(sizeofInetPktinfo)[0])) + if ip := cm.Src.To4(); ip != nil { + copy(pi.Spec_dst[:], ip) + } + if cm.IfIndex > 0 { + pi.setIfindex(cm.IfIndex) + } + } + return m.Next(sizeofInetPktinfo) +} + +func parsePacketInfo(cm *ControlMessage, b []byte) { + pi := (*inetPktinfo)(unsafe.Pointer(&b[0])) + cm.IfIndex = int(pi.Ifindex) + if len(cm.Dst) < net.IPv4len { + cm.Dst = make(net.IP, net.IPv4len) + } + copy(cm.Dst, pi.Addr[:]) +} diff --git a/vendor/golang.org/x/net/ipv4/control_stub.go b/vendor/golang.org/x/net/ipv4/control_stub.go new file mode 100644 index 00000000..a0c049d6 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/control_stub.go @@ -0,0 +1,13 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows + +package ipv4 + +import "golang.org/x/net/internal/socket" + +func setControlMessage(c *socket.Conn, opt *rawOpt, cf ControlFlags, on bool) error { + return errNotImplemented +} diff --git a/vendor/golang.org/x/net/ipv4/control_unix.go b/vendor/golang.org/x/net/ipv4/control_unix.go new file mode 100644 index 00000000..b27fa490 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/control_unix.go @@ -0,0 +1,73 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris + +package ipv4 + +import ( + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +func setControlMessage(c *socket.Conn, opt *rawOpt, cf ControlFlags, on bool) error { + opt.Lock() + defer opt.Unlock() + if so, ok := sockOpts[ssoReceiveTTL]; ok && cf&FlagTTL != 0 { + if err := so.SetInt(c, boolint(on)); err != nil { + return err + } + if on { + opt.set(FlagTTL) + } else { + opt.clear(FlagTTL) + } + } + if so, ok := sockOpts[ssoPacketInfo]; ok { + if cf&(FlagSrc|FlagDst|FlagInterface) != 0 { + if err := so.SetInt(c, boolint(on)); err != nil { + return err + } + if on { + opt.set(cf & (FlagSrc | FlagDst | FlagInterface)) + } else { + opt.clear(cf & (FlagSrc | FlagDst | FlagInterface)) + } + } + } else { + if so, ok := sockOpts[ssoReceiveDst]; ok && cf&FlagDst != 0 { + if err := so.SetInt(c, boolint(on)); err != nil { + return err + } + if on { + opt.set(FlagDst) + } else { + opt.clear(FlagDst) + } + } + if so, ok := sockOpts[ssoReceiveInterface]; ok && cf&FlagInterface != 0 { + if err := so.SetInt(c, boolint(on)); err != nil { + return err + } + if on { + opt.set(FlagInterface) + } else { + opt.clear(FlagInterface) + } + } + } + return nil +} + +func marshalTTL(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIP, sysIP_RECVTTL, 1) + return m.Next(1) +} + +func parseTTL(cm *ControlMessage, b []byte) { + cm.TTL = int(*(*byte)(unsafe.Pointer(&b[:1][0]))) +} diff --git a/vendor/golang.org/x/net/ipv4/control_windows.go b/vendor/golang.org/x/net/ipv4/control_windows.go new file mode 100644 index 00000000..82c63064 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/control_windows.go @@ -0,0 +1,12 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import "golang.org/x/net/internal/socket" + +func setControlMessage(c *socket.Conn, opt *rawOpt, cf ControlFlags, on bool) error { + // TODO(mikio): implement this + return errNotImplemented +} diff --git a/vendor/golang.org/x/net/ipv4/dgramopt.go b/vendor/golang.org/x/net/ipv4/dgramopt.go new file mode 100644 index 00000000..c191c22a --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/dgramopt.go @@ -0,0 +1,264 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + + "golang.org/x/net/bpf" +) + +// MulticastTTL returns the time-to-live field value for outgoing +// multicast packets. +func (c *dgramOpt) MulticastTTL() (int, error) { + if !c.ok() { + return 0, errInvalidConn + } + so, ok := sockOpts[ssoMulticastTTL] + if !ok { + return 0, errNotImplemented + } + return so.GetInt(c.Conn) +} + +// SetMulticastTTL sets the time-to-live field value for future +// outgoing multicast packets. +func (c *dgramOpt) SetMulticastTTL(ttl int) error { + if !c.ok() { + return errInvalidConn + } + so, ok := sockOpts[ssoMulticastTTL] + if !ok { + return errNotImplemented + } + return so.SetInt(c.Conn, ttl) +} + +// MulticastInterface returns the default interface for multicast +// packet transmissions. +func (c *dgramOpt) MulticastInterface() (*net.Interface, error) { + if !c.ok() { + return nil, errInvalidConn + } + so, ok := sockOpts[ssoMulticastInterface] + if !ok { + return nil, errNotImplemented + } + return so.getMulticastInterface(c.Conn) +} + +// SetMulticastInterface sets the default interface for future +// multicast packet transmissions. +func (c *dgramOpt) SetMulticastInterface(ifi *net.Interface) error { + if !c.ok() { + return errInvalidConn + } + so, ok := sockOpts[ssoMulticastInterface] + if !ok { + return errNotImplemented + } + return so.setMulticastInterface(c.Conn, ifi) +} + +// MulticastLoopback reports whether transmitted multicast packets +// should be copied and send back to the originator. +func (c *dgramOpt) MulticastLoopback() (bool, error) { + if !c.ok() { + return false, errInvalidConn + } + so, ok := sockOpts[ssoMulticastLoopback] + if !ok { + return false, errNotImplemented + } + on, err := so.GetInt(c.Conn) + if err != nil { + return false, err + } + return on == 1, nil +} + +// SetMulticastLoopback sets whether transmitted multicast packets +// should be copied and send back to the originator. +func (c *dgramOpt) SetMulticastLoopback(on bool) error { + if !c.ok() { + return errInvalidConn + } + so, ok := sockOpts[ssoMulticastLoopback] + if !ok { + return errNotImplemented + } + return so.SetInt(c.Conn, boolint(on)) +} + +// JoinGroup joins the group address group on the interface ifi. +// By default all sources that can cast data to group are accepted. +// It's possible to mute and unmute data transmission from a specific +// source by using ExcludeSourceSpecificGroup and +// IncludeSourceSpecificGroup. +// JoinGroup uses the system assigned multicast interface when ifi is +// nil, although this is not recommended because the assignment +// depends on platforms and sometimes it might require routing +// configuration. +func (c *dgramOpt) JoinGroup(ifi *net.Interface, group net.Addr) error { + if !c.ok() { + return errInvalidConn + } + so, ok := sockOpts[ssoJoinGroup] + if !ok { + return errNotImplemented + } + grp := netAddrToIP4(group) + if grp == nil { + return errMissingAddress + } + return so.setGroup(c.Conn, ifi, grp) +} + +// LeaveGroup leaves the group address group on the interface ifi +// regardless of whether the group is any-source group or +// source-specific group. +func (c *dgramOpt) LeaveGroup(ifi *net.Interface, group net.Addr) error { + if !c.ok() { + return errInvalidConn + } + so, ok := sockOpts[ssoLeaveGroup] + if !ok { + return errNotImplemented + } + grp := netAddrToIP4(group) + if grp == nil { + return errMissingAddress + } + return so.setGroup(c.Conn, ifi, grp) +} + +// JoinSourceSpecificGroup joins the source-specific group comprising +// group and source on the interface ifi. +// JoinSourceSpecificGroup uses the system assigned multicast +// interface when ifi is nil, although this is not recommended because +// the assignment depends on platforms and sometimes it might require +// routing configuration. +func (c *dgramOpt) JoinSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { + if !c.ok() { + return errInvalidConn + } + so, ok := sockOpts[ssoJoinSourceGroup] + if !ok { + return errNotImplemented + } + grp := netAddrToIP4(group) + if grp == nil { + return errMissingAddress + } + src := netAddrToIP4(source) + if src == nil { + return errMissingAddress + } + return so.setSourceGroup(c.Conn, ifi, grp, src) +} + +// LeaveSourceSpecificGroup leaves the source-specific group on the +// interface ifi. +func (c *dgramOpt) LeaveSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { + if !c.ok() { + return errInvalidConn + } + so, ok := sockOpts[ssoLeaveSourceGroup] + if !ok { + return errNotImplemented + } + grp := netAddrToIP4(group) + if grp == nil { + return errMissingAddress + } + src := netAddrToIP4(source) + if src == nil { + return errMissingAddress + } + return so.setSourceGroup(c.Conn, ifi, grp, src) +} + +// ExcludeSourceSpecificGroup excludes the source-specific group from +// the already joined any-source groups by JoinGroup on the interface +// ifi. +func (c *dgramOpt) ExcludeSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { + if !c.ok() { + return errInvalidConn + } + so, ok := sockOpts[ssoBlockSourceGroup] + if !ok { + return errNotImplemented + } + grp := netAddrToIP4(group) + if grp == nil { + return errMissingAddress + } + src := netAddrToIP4(source) + if src == nil { + return errMissingAddress + } + return so.setSourceGroup(c.Conn, ifi, grp, src) +} + +// IncludeSourceSpecificGroup includes the excluded source-specific +// group by ExcludeSourceSpecificGroup again on the interface ifi. +func (c *dgramOpt) IncludeSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { + if !c.ok() { + return errInvalidConn + } + so, ok := sockOpts[ssoUnblockSourceGroup] + if !ok { + return errNotImplemented + } + grp := netAddrToIP4(group) + if grp == nil { + return errMissingAddress + } + src := netAddrToIP4(source) + if src == nil { + return errMissingAddress + } + return so.setSourceGroup(c.Conn, ifi, grp, src) +} + +// ICMPFilter returns an ICMP filter. +// Currently only Linux supports this. +func (c *dgramOpt) ICMPFilter() (*ICMPFilter, error) { + if !c.ok() { + return nil, errInvalidConn + } + so, ok := sockOpts[ssoICMPFilter] + if !ok { + return nil, errNotImplemented + } + return so.getICMPFilter(c.Conn) +} + +// SetICMPFilter deploys the ICMP filter. +// Currently only Linux supports this. +func (c *dgramOpt) SetICMPFilter(f *ICMPFilter) error { + if !c.ok() { + return errInvalidConn + } + so, ok := sockOpts[ssoICMPFilter] + if !ok { + return errNotImplemented + } + return so.setICMPFilter(c.Conn, f) +} + +// SetBPF attaches a BPF program to the connection. +// +// Only supported on Linux. +func (c *dgramOpt) SetBPF(filter []bpf.RawInstruction) error { + if !c.ok() { + return errInvalidConn + } + so, ok := sockOpts[ssoAttachFilter] + if !ok { + return errNotImplemented + } + return so.setBPF(c.Conn, filter) +} diff --git a/vendor/golang.org/x/net/ipv4/doc.go b/vendor/golang.org/x/net/ipv4/doc.go new file mode 100644 index 00000000..24583497 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/doc.go @@ -0,0 +1,244 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ipv4 implements IP-level socket options for the Internet +// Protocol version 4. +// +// The package provides IP-level socket options that allow +// manipulation of IPv4 facilities. +// +// The IPv4 protocol and basic host requirements for IPv4 are defined +// in RFC 791 and RFC 1122. +// Host extensions for multicasting and socket interface extensions +// for multicast source filters are defined in RFC 1112 and RFC 3678. +// IGMPv1, IGMPv2 and IGMPv3 are defined in RFC 1112, RFC 2236 and RFC +// 3376. +// Source-specific multicast is defined in RFC 4607. +// +// +// Unicasting +// +// The options for unicasting are available for net.TCPConn, +// net.UDPConn and net.IPConn which are created as network connections +// that use the IPv4 transport. When a single TCP connection carrying +// a data flow of multiple packets needs to indicate the flow is +// important, Conn is used to set the type-of-service field on the +// IPv4 header for each packet. +// +// ln, err := net.Listen("tcp4", "0.0.0.0:1024") +// if err != nil { +// // error handling +// } +// defer ln.Close() +// for { +// c, err := ln.Accept() +// if err != nil { +// // error handling +// } +// go func(c net.Conn) { +// defer c.Close() +// +// The outgoing packets will be labeled DiffServ assured forwarding +// class 1 low drop precedence, known as AF11 packets. +// +// if err := ipv4.NewConn(c).SetTOS(0x28); err != nil { +// // error handling +// } +// if _, err := c.Write(data); err != nil { +// // error handling +// } +// }(c) +// } +// +// +// Multicasting +// +// The options for multicasting are available for net.UDPConn and +// net.IPConn which are created as network connections that use the +// IPv4 transport. A few network facilities must be prepared before +// you begin multicasting, at a minimum joining network interfaces and +// multicast groups. +// +// en0, err := net.InterfaceByName("en0") +// if err != nil { +// // error handling +// } +// en1, err := net.InterfaceByIndex(911) +// if err != nil { +// // error handling +// } +// group := net.IPv4(224, 0, 0, 250) +// +// First, an application listens to an appropriate address with an +// appropriate service port. +// +// c, err := net.ListenPacket("udp4", "0.0.0.0:1024") +// if err != nil { +// // error handling +// } +// defer c.Close() +// +// Second, the application joins multicast groups, starts listening to +// the groups on the specified network interfaces. Note that the +// service port for transport layer protocol does not matter with this +// operation as joining groups affects only network and link layer +// protocols, such as IPv4 and Ethernet. +// +// p := ipv4.NewPacketConn(c) +// if err := p.JoinGroup(en0, &net.UDPAddr{IP: group}); err != nil { +// // error handling +// } +// if err := p.JoinGroup(en1, &net.UDPAddr{IP: group}); err != nil { +// // error handling +// } +// +// The application might set per packet control message transmissions +// between the protocol stack within the kernel. When the application +// needs a destination address on an incoming packet, +// SetControlMessage of PacketConn is used to enable control message +// transmissions. +// +// if err := p.SetControlMessage(ipv4.FlagDst, true); err != nil { +// // error handling +// } +// +// The application could identify whether the received packets are +// of interest by using the control message that contains the +// destination address of the received packet. +// +// b := make([]byte, 1500) +// for { +// n, cm, src, err := p.ReadFrom(b) +// if err != nil { +// // error handling +// } +// if cm.Dst.IsMulticast() { +// if cm.Dst.Equal(group) { +// // joined group, do something +// } else { +// // unknown group, discard +// continue +// } +// } +// +// The application can also send both unicast and multicast packets. +// +// p.SetTOS(0x0) +// p.SetTTL(16) +// if _, err := p.WriteTo(data, nil, src); err != nil { +// // error handling +// } +// dst := &net.UDPAddr{IP: group, Port: 1024} +// for _, ifi := range []*net.Interface{en0, en1} { +// if err := p.SetMulticastInterface(ifi); err != nil { +// // error handling +// } +// p.SetMulticastTTL(2) +// if _, err := p.WriteTo(data, nil, dst); err != nil { +// // error handling +// } +// } +// } +// +// +// More multicasting +// +// An application that uses PacketConn or RawConn may join multiple +// multicast groups. For example, a UDP listener with port 1024 might +// join two different groups across over two different network +// interfaces by using: +// +// c, err := net.ListenPacket("udp4", "0.0.0.0:1024") +// if err != nil { +// // error handling +// } +// defer c.Close() +// p := ipv4.NewPacketConn(c) +// if err := p.JoinGroup(en0, &net.UDPAddr{IP: net.IPv4(224, 0, 0, 248)}); err != nil { +// // error handling +// } +// if err := p.JoinGroup(en0, &net.UDPAddr{IP: net.IPv4(224, 0, 0, 249)}); err != nil { +// // error handling +// } +// if err := p.JoinGroup(en1, &net.UDPAddr{IP: net.IPv4(224, 0, 0, 249)}); err != nil { +// // error handling +// } +// +// It is possible for multiple UDP listeners that listen on the same +// UDP port to join the same multicast group. The net package will +// provide a socket that listens to a wildcard address with reusable +// UDP port when an appropriate multicast address prefix is passed to +// the net.ListenPacket or net.ListenUDP. +// +// c1, err := net.ListenPacket("udp4", "224.0.0.0:1024") +// if err != nil { +// // error handling +// } +// defer c1.Close() +// c2, err := net.ListenPacket("udp4", "224.0.0.0:1024") +// if err != nil { +// // error handling +// } +// defer c2.Close() +// p1 := ipv4.NewPacketConn(c1) +// if err := p1.JoinGroup(en0, &net.UDPAddr{IP: net.IPv4(224, 0, 0, 248)}); err != nil { +// // error handling +// } +// p2 := ipv4.NewPacketConn(c2) +// if err := p2.JoinGroup(en0, &net.UDPAddr{IP: net.IPv4(224, 0, 0, 248)}); err != nil { +// // error handling +// } +// +// Also it is possible for the application to leave or rejoin a +// multicast group on the network interface. +// +// if err := p.LeaveGroup(en0, &net.UDPAddr{IP: net.IPv4(224, 0, 0, 248)}); err != nil { +// // error handling +// } +// if err := p.JoinGroup(en0, &net.UDPAddr{IP: net.IPv4(224, 0, 0, 250)}); err != nil { +// // error handling +// } +// +// +// Source-specific multicasting +// +// An application that uses PacketConn or RawConn on IGMPv3 supported +// platform is able to join source-specific multicast groups. +// The application may use JoinSourceSpecificGroup and +// LeaveSourceSpecificGroup for the operation known as "include" mode, +// +// ssmgroup := net.UDPAddr{IP: net.IPv4(232, 7, 8, 9)} +// ssmsource := net.UDPAddr{IP: net.IPv4(192, 168, 0, 1)} +// if err := p.JoinSourceSpecificGroup(en0, &ssmgroup, &ssmsource); err != nil { +// // error handling +// } +// if err := p.LeaveSourceSpecificGroup(en0, &ssmgroup, &ssmsource); err != nil { +// // error handling +// } +// +// or JoinGroup, ExcludeSourceSpecificGroup, +// IncludeSourceSpecificGroup and LeaveGroup for the operation known +// as "exclude" mode. +// +// exclsource := net.UDPAddr{IP: net.IPv4(192, 168, 0, 254)} +// if err := p.JoinGroup(en0, &ssmgroup); err != nil { +// // error handling +// } +// if err := p.ExcludeSourceSpecificGroup(en0, &ssmgroup, &exclsource); err != nil { +// // error handling +// } +// if err := p.LeaveGroup(en0, &ssmgroup); err != nil { +// // error handling +// } +// +// Note that it depends on each platform implementation what happens +// when an application which runs on IGMPv3 unsupported platform uses +// JoinSourceSpecificGroup and LeaveSourceSpecificGroup. +// In general the platform tries to fall back to conversations using +// IGMPv1 or IGMPv2 and starts to listen to multicast traffic. +// In the fallback case, ExcludeSourceSpecificGroup and +// IncludeSourceSpecificGroup may return an error. +package ipv4 // import "golang.org/x/net/ipv4" + +// BUG(mikio): This package is not implemented on JS, NaCl and Plan 9. diff --git a/vendor/golang.org/x/net/ipv4/endpoint.go b/vendor/golang.org/x/net/ipv4/endpoint.go new file mode 100644 index 00000000..4a6d7a85 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/endpoint.go @@ -0,0 +1,186 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + "time" + + "golang.org/x/net/internal/socket" +) + +// BUG(mikio): On Windows, the JoinSourceSpecificGroup, +// LeaveSourceSpecificGroup, ExcludeSourceSpecificGroup and +// IncludeSourceSpecificGroup methods of PacketConn and RawConn are +// not implemented. + +// A Conn represents a network endpoint that uses the IPv4 transport. +// It is used to control basic IP-level socket options such as TOS and +// TTL. +type Conn struct { + genericOpt +} + +type genericOpt struct { + *socket.Conn +} + +func (c *genericOpt) ok() bool { return c != nil && c.Conn != nil } + +// NewConn returns a new Conn. +func NewConn(c net.Conn) *Conn { + cc, _ := socket.NewConn(c) + return &Conn{ + genericOpt: genericOpt{Conn: cc}, + } +} + +// A PacketConn represents a packet network endpoint that uses the +// IPv4 transport. It is used to control several IP-level socket +// options including multicasting. It also provides datagram based +// network I/O methods specific to the IPv4 and higher layer protocols +// such as UDP. +type PacketConn struct { + genericOpt + dgramOpt + payloadHandler +} + +type dgramOpt struct { + *socket.Conn +} + +func (c *dgramOpt) ok() bool { return c != nil && c.Conn != nil } + +// SetControlMessage sets the per packet IP-level socket options. +func (c *PacketConn) SetControlMessage(cf ControlFlags, on bool) error { + if !c.payloadHandler.ok() { + return errInvalidConn + } + return setControlMessage(c.dgramOpt.Conn, &c.payloadHandler.rawOpt, cf, on) +} + +// SetDeadline sets the read and write deadlines associated with the +// endpoint. +func (c *PacketConn) SetDeadline(t time.Time) error { + if !c.payloadHandler.ok() { + return errInvalidConn + } + return c.payloadHandler.PacketConn.SetDeadline(t) +} + +// SetReadDeadline sets the read deadline associated with the +// endpoint. +func (c *PacketConn) SetReadDeadline(t time.Time) error { + if !c.payloadHandler.ok() { + return errInvalidConn + } + return c.payloadHandler.PacketConn.SetReadDeadline(t) +} + +// SetWriteDeadline sets the write deadline associated with the +// endpoint. +func (c *PacketConn) SetWriteDeadline(t time.Time) error { + if !c.payloadHandler.ok() { + return errInvalidConn + } + return c.payloadHandler.PacketConn.SetWriteDeadline(t) +} + +// Close closes the endpoint. +func (c *PacketConn) Close() error { + if !c.payloadHandler.ok() { + return errInvalidConn + } + return c.payloadHandler.PacketConn.Close() +} + +// NewPacketConn returns a new PacketConn using c as its underlying +// transport. +func NewPacketConn(c net.PacketConn) *PacketConn { + cc, _ := socket.NewConn(c.(net.Conn)) + p := &PacketConn{ + genericOpt: genericOpt{Conn: cc}, + dgramOpt: dgramOpt{Conn: cc}, + payloadHandler: payloadHandler{PacketConn: c, Conn: cc}, + } + return p +} + +// A RawConn represents a packet network endpoint that uses the IPv4 +// transport. It is used to control several IP-level socket options +// including IPv4 header manipulation. It also provides datagram +// based network I/O methods specific to the IPv4 and higher layer +// protocols that handle IPv4 datagram directly such as OSPF, GRE. +type RawConn struct { + genericOpt + dgramOpt + packetHandler +} + +// SetControlMessage sets the per packet IP-level socket options. +func (c *RawConn) SetControlMessage(cf ControlFlags, on bool) error { + if !c.packetHandler.ok() { + return errInvalidConn + } + return setControlMessage(c.dgramOpt.Conn, &c.packetHandler.rawOpt, cf, on) +} + +// SetDeadline sets the read and write deadlines associated with the +// endpoint. +func (c *RawConn) SetDeadline(t time.Time) error { + if !c.packetHandler.ok() { + return errInvalidConn + } + return c.packetHandler.IPConn.SetDeadline(t) +} + +// SetReadDeadline sets the read deadline associated with the +// endpoint. +func (c *RawConn) SetReadDeadline(t time.Time) error { + if !c.packetHandler.ok() { + return errInvalidConn + } + return c.packetHandler.IPConn.SetReadDeadline(t) +} + +// SetWriteDeadline sets the write deadline associated with the +// endpoint. +func (c *RawConn) SetWriteDeadline(t time.Time) error { + if !c.packetHandler.ok() { + return errInvalidConn + } + return c.packetHandler.IPConn.SetWriteDeadline(t) +} + +// Close closes the endpoint. +func (c *RawConn) Close() error { + if !c.packetHandler.ok() { + return errInvalidConn + } + return c.packetHandler.IPConn.Close() +} + +// NewRawConn returns a new RawConn using c as its underlying +// transport. +func NewRawConn(c net.PacketConn) (*RawConn, error) { + cc, err := socket.NewConn(c.(net.Conn)) + if err != nil { + return nil, err + } + r := &RawConn{ + genericOpt: genericOpt{Conn: cc}, + dgramOpt: dgramOpt{Conn: cc}, + packetHandler: packetHandler{IPConn: c.(*net.IPConn), Conn: cc}, + } + so, ok := sockOpts[ssoHeaderPrepend] + if !ok { + return nil, errNotImplemented + } + if err := so.SetInt(r.dgramOpt.Conn, boolint(true)); err != nil { + return nil, err + } + return r, nil +} diff --git a/vendor/golang.org/x/net/ipv4/genericopt.go b/vendor/golang.org/x/net/ipv4/genericopt.go new file mode 100644 index 00000000..51c12371 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/genericopt.go @@ -0,0 +1,55 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +// TOS returns the type-of-service field value for outgoing packets. +func (c *genericOpt) TOS() (int, error) { + if !c.ok() { + return 0, errInvalidConn + } + so, ok := sockOpts[ssoTOS] + if !ok { + return 0, errNotImplemented + } + return so.GetInt(c.Conn) +} + +// SetTOS sets the type-of-service field value for future outgoing +// packets. +func (c *genericOpt) SetTOS(tos int) error { + if !c.ok() { + return errInvalidConn + } + so, ok := sockOpts[ssoTOS] + if !ok { + return errNotImplemented + } + return so.SetInt(c.Conn, tos) +} + +// TTL returns the time-to-live field value for outgoing packets. +func (c *genericOpt) TTL() (int, error) { + if !c.ok() { + return 0, errInvalidConn + } + so, ok := sockOpts[ssoTTL] + if !ok { + return 0, errNotImplemented + } + return so.GetInt(c.Conn) +} + +// SetTTL sets the time-to-live field value for future outgoing +// packets. +func (c *genericOpt) SetTTL(ttl int) error { + if !c.ok() { + return errInvalidConn + } + so, ok := sockOpts[ssoTTL] + if !ok { + return errNotImplemented + } + return so.SetInt(c.Conn, ttl) +} diff --git a/vendor/golang.org/x/net/ipv4/header.go b/vendor/golang.org/x/net/ipv4/header.go new file mode 100644 index 00000000..701bd4b2 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/header.go @@ -0,0 +1,173 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "encoding/binary" + "fmt" + "net" + "runtime" + + "golang.org/x/net/internal/socket" +) + +const ( + Version = 4 // protocol version + HeaderLen = 20 // header length without extension headers + maxHeaderLen = 60 // sensible default, revisit if later RFCs define new usage of version and header length fields +) + +type HeaderFlags int + +const ( + MoreFragments HeaderFlags = 1 << iota // more fragments flag + DontFragment // don't fragment flag +) + +// A Header represents an IPv4 header. +type Header struct { + Version int // protocol version + Len int // header length + TOS int // type-of-service + TotalLen int // packet total length + ID int // identification + Flags HeaderFlags // flags + FragOff int // fragment offset + TTL int // time-to-live + Protocol int // next protocol + Checksum int // checksum + Src net.IP // source address + Dst net.IP // destination address + Options []byte // options, extension headers +} + +func (h *Header) String() string { + if h == nil { + return "" + } + return fmt.Sprintf("ver=%d hdrlen=%d tos=%#x totallen=%d id=%#x flags=%#x fragoff=%#x ttl=%d proto=%d cksum=%#x src=%v dst=%v", h.Version, h.Len, h.TOS, h.TotalLen, h.ID, h.Flags, h.FragOff, h.TTL, h.Protocol, h.Checksum, h.Src, h.Dst) +} + +// Marshal returns the binary encoding of h. +// +// The returned slice is in the format used by a raw IP socket on the +// local system. +// This may differ from the wire format, depending on the system. +func (h *Header) Marshal() ([]byte, error) { + if h == nil { + return nil, errNilHeader + } + if h.Len < HeaderLen { + return nil, errHeaderTooShort + } + hdrlen := HeaderLen + len(h.Options) + b := make([]byte, hdrlen) + b[0] = byte(Version<<4 | (hdrlen >> 2 & 0x0f)) + b[1] = byte(h.TOS) + flagsAndFragOff := (h.FragOff & 0x1fff) | int(h.Flags<<13) + switch runtime.GOOS { + case "darwin", "dragonfly", "netbsd": + socket.NativeEndian.PutUint16(b[2:4], uint16(h.TotalLen)) + socket.NativeEndian.PutUint16(b[6:8], uint16(flagsAndFragOff)) + case "freebsd": + if freebsdVersion < 1100000 { + socket.NativeEndian.PutUint16(b[2:4], uint16(h.TotalLen)) + socket.NativeEndian.PutUint16(b[6:8], uint16(flagsAndFragOff)) + } else { + binary.BigEndian.PutUint16(b[2:4], uint16(h.TotalLen)) + binary.BigEndian.PutUint16(b[6:8], uint16(flagsAndFragOff)) + } + default: + binary.BigEndian.PutUint16(b[2:4], uint16(h.TotalLen)) + binary.BigEndian.PutUint16(b[6:8], uint16(flagsAndFragOff)) + } + binary.BigEndian.PutUint16(b[4:6], uint16(h.ID)) + b[8] = byte(h.TTL) + b[9] = byte(h.Protocol) + binary.BigEndian.PutUint16(b[10:12], uint16(h.Checksum)) + if ip := h.Src.To4(); ip != nil { + copy(b[12:16], ip[:net.IPv4len]) + } + if ip := h.Dst.To4(); ip != nil { + copy(b[16:20], ip[:net.IPv4len]) + } else { + return nil, errMissingAddress + } + if len(h.Options) > 0 { + copy(b[HeaderLen:], h.Options) + } + return b, nil +} + +// Parse parses b as an IPv4 header and stores the result in h. +// +// The provided b must be in the format used by a raw IP socket on the +// local system. +// This may differ from the wire format, depending on the system. +func (h *Header) Parse(b []byte) error { + if h == nil || b == nil { + return errNilHeader + } + if len(b) < HeaderLen { + return errHeaderTooShort + } + hdrlen := int(b[0]&0x0f) << 2 + if len(b) < hdrlen { + return errExtHeaderTooShort + } + h.Version = int(b[0] >> 4) + h.Len = hdrlen + h.TOS = int(b[1]) + h.ID = int(binary.BigEndian.Uint16(b[4:6])) + h.TTL = int(b[8]) + h.Protocol = int(b[9]) + h.Checksum = int(binary.BigEndian.Uint16(b[10:12])) + h.Src = net.IPv4(b[12], b[13], b[14], b[15]) + h.Dst = net.IPv4(b[16], b[17], b[18], b[19]) + switch runtime.GOOS { + case "darwin", "dragonfly", "netbsd": + h.TotalLen = int(socket.NativeEndian.Uint16(b[2:4])) + hdrlen + h.FragOff = int(socket.NativeEndian.Uint16(b[6:8])) + case "freebsd": + if freebsdVersion < 1100000 { + h.TotalLen = int(socket.NativeEndian.Uint16(b[2:4])) + if freebsdVersion < 1000000 { + h.TotalLen += hdrlen + } + h.FragOff = int(socket.NativeEndian.Uint16(b[6:8])) + } else { + h.TotalLen = int(binary.BigEndian.Uint16(b[2:4])) + h.FragOff = int(binary.BigEndian.Uint16(b[6:8])) + } + default: + h.TotalLen = int(binary.BigEndian.Uint16(b[2:4])) + h.FragOff = int(binary.BigEndian.Uint16(b[6:8])) + } + h.Flags = HeaderFlags(h.FragOff&0xe000) >> 13 + h.FragOff = h.FragOff & 0x1fff + optlen := hdrlen - HeaderLen + if optlen > 0 && len(b) >= hdrlen { + if cap(h.Options) < optlen { + h.Options = make([]byte, optlen) + } else { + h.Options = h.Options[:optlen] + } + copy(h.Options, b[HeaderLen:hdrlen]) + } + return nil +} + +// ParseHeader parses b as an IPv4 header. +// +// The provided b must be in the format used by a raw IP socket on the +// local system. +// This may differ from the wire format, depending on the system. +func ParseHeader(b []byte) (*Header, error) { + h := new(Header) + if err := h.Parse(b); err != nil { + return nil, err + } + return h, nil +} diff --git a/vendor/golang.org/x/net/ipv4/helper.go b/vendor/golang.org/x/net/ipv4/helper.go new file mode 100644 index 00000000..b494a2cd --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/helper.go @@ -0,0 +1,80 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "errors" + "net" + "runtime" + + "golang.org/x/net/internal/socket" +) + +var ( + errInvalidConn = errors.New("invalid connection") + errMissingAddress = errors.New("missing address") + errMissingHeader = errors.New("missing header") + errNilHeader = errors.New("nil header") + errHeaderTooShort = errors.New("header too short") + errExtHeaderTooShort = errors.New("extension header too short") + errInvalidConnType = errors.New("invalid conn type") + errNoSuchInterface = errors.New("no such interface") + errNoSuchMulticastInterface = errors.New("no such multicast interface") + errNotImplemented = errors.New("not implemented on " + runtime.GOOS + "/" + runtime.GOARCH) + + // See https://www.freebsd.org/doc/en/books/porters-handbook/versions.html. + freebsdVersion uint32 + compatFreeBSD32 bool // 386 emulation on amd64 +) + +// See golang.org/issue/30899. +func adjustFreeBSD32(m *socket.Message) { + // FreeBSD 12.0-RELEASE is affected by https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=236737 + if 1200086 <= freebsdVersion && freebsdVersion < 1201000 { + l := (m.NN + 4 - 1) &^ (4 - 1) + if m.NN < l && l <= len(m.OOB) { + m.NN = l + } + } +} + +func boolint(b bool) int { + if b { + return 1 + } + return 0 +} + +func netAddrToIP4(a net.Addr) net.IP { + switch v := a.(type) { + case *net.UDPAddr: + if ip := v.IP.To4(); ip != nil { + return ip + } + case *net.IPAddr: + if ip := v.IP.To4(); ip != nil { + return ip + } + } + return nil +} + +func opAddr(a net.Addr) net.Addr { + switch a.(type) { + case *net.TCPAddr: + if a == nil { + return nil + } + case *net.UDPAddr: + if a == nil { + return nil + } + case *net.IPAddr: + if a == nil { + return nil + } + } + return a +} diff --git a/vendor/golang.org/x/net/ipv4/iana.go b/vendor/golang.org/x/net/ipv4/iana.go new file mode 100644 index 00000000..4375b409 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/iana.go @@ -0,0 +1,38 @@ +// go generate gen.go +// Code generated by the command above; DO NOT EDIT. + +package ipv4 + +// Internet Control Message Protocol (ICMP) Parameters, Updated: 2018-02-26 +const ( + ICMPTypeEchoReply ICMPType = 0 // Echo Reply + ICMPTypeDestinationUnreachable ICMPType = 3 // Destination Unreachable + ICMPTypeRedirect ICMPType = 5 // Redirect + ICMPTypeEcho ICMPType = 8 // Echo + ICMPTypeRouterAdvertisement ICMPType = 9 // Router Advertisement + ICMPTypeRouterSolicitation ICMPType = 10 // Router Solicitation + ICMPTypeTimeExceeded ICMPType = 11 // Time Exceeded + ICMPTypeParameterProblem ICMPType = 12 // Parameter Problem + ICMPTypeTimestamp ICMPType = 13 // Timestamp + ICMPTypeTimestampReply ICMPType = 14 // Timestamp Reply + ICMPTypePhoturis ICMPType = 40 // Photuris + ICMPTypeExtendedEchoRequest ICMPType = 42 // Extended Echo Request + ICMPTypeExtendedEchoReply ICMPType = 43 // Extended Echo Reply +) + +// Internet Control Message Protocol (ICMP) Parameters, Updated: 2018-02-26 +var icmpTypes = map[ICMPType]string{ + 0: "echo reply", + 3: "destination unreachable", + 5: "redirect", + 8: "echo", + 9: "router advertisement", + 10: "router solicitation", + 11: "time exceeded", + 12: "parameter problem", + 13: "timestamp", + 14: "timestamp reply", + 40: "photuris", + 42: "extended echo request", + 43: "extended echo reply", +} diff --git a/vendor/golang.org/x/net/ipv4/icmp.go b/vendor/golang.org/x/net/ipv4/icmp.go new file mode 100644 index 00000000..9902bb3d --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/icmp.go @@ -0,0 +1,57 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import "golang.org/x/net/internal/iana" + +// An ICMPType represents a type of ICMP message. +type ICMPType int + +func (typ ICMPType) String() string { + s, ok := icmpTypes[typ] + if !ok { + return "" + } + return s +} + +// Protocol returns the ICMPv4 protocol number. +func (typ ICMPType) Protocol() int { + return iana.ProtocolICMP +} + +// An ICMPFilter represents an ICMP message filter for incoming +// packets. The filter belongs to a packet delivery path on a host and +// it cannot interact with forwarding packets or tunnel-outer packets. +// +// Note: RFC 8200 defines a reasonable role model and it works not +// only for IPv6 but IPv4. A node means a device that implements IP. +// A router means a node that forwards IP packets not explicitly +// addressed to itself, and a host means a node that is not a router. +type ICMPFilter struct { + icmpFilter +} + +// Accept accepts incoming ICMP packets including the type field value +// typ. +func (f *ICMPFilter) Accept(typ ICMPType) { + f.accept(typ) +} + +// Block blocks incoming ICMP packets including the type field value +// typ. +func (f *ICMPFilter) Block(typ ICMPType) { + f.block(typ) +} + +// SetAll sets the filter action to the filter. +func (f *ICMPFilter) SetAll(block bool) { + f.setAll(block) +} + +// WillBlock reports whether the ICMP type will be blocked. +func (f *ICMPFilter) WillBlock(typ ICMPType) bool { + return f.willBlock(typ) +} diff --git a/vendor/golang.org/x/net/ipv4/icmp_linux.go b/vendor/golang.org/x/net/ipv4/icmp_linux.go new file mode 100644 index 00000000..6e1c5c80 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/icmp_linux.go @@ -0,0 +1,25 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +func (f *icmpFilter) accept(typ ICMPType) { + f.Data &^= 1 << (uint32(typ) & 31) +} + +func (f *icmpFilter) block(typ ICMPType) { + f.Data |= 1 << (uint32(typ) & 31) +} + +func (f *icmpFilter) setAll(block bool) { + if block { + f.Data = 1<<32 - 1 + } else { + f.Data = 0 + } +} + +func (f *icmpFilter) willBlock(typ ICMPType) bool { + return f.Data&(1<<(uint32(typ)&31)) != 0 +} diff --git a/vendor/golang.org/x/net/ipv4/icmp_stub.go b/vendor/golang.org/x/net/ipv4/icmp_stub.go new file mode 100644 index 00000000..21bb29ab --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/icmp_stub.go @@ -0,0 +1,25 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !linux + +package ipv4 + +const sizeofICMPFilter = 0x0 + +type icmpFilter struct { +} + +func (f *icmpFilter) accept(typ ICMPType) { +} + +func (f *icmpFilter) block(typ ICMPType) { +} + +func (f *icmpFilter) setAll(block bool) { +} + +func (f *icmpFilter) willBlock(typ ICMPType) bool { + return false +} diff --git a/vendor/golang.org/x/net/ipv4/packet.go b/vendor/golang.org/x/net/ipv4/packet.go new file mode 100644 index 00000000..7d784e06 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/packet.go @@ -0,0 +1,117 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +// BUG(mikio): On Windows, the ReadFrom and WriteTo methods of RawConn +// are not implemented. + +// A packetHandler represents the IPv4 datagram handler. +type packetHandler struct { + *net.IPConn + *socket.Conn + rawOpt +} + +func (c *packetHandler) ok() bool { return c != nil && c.IPConn != nil && c.Conn != nil } + +// ReadFrom reads an IPv4 datagram from the endpoint c, copying the +// datagram into b. It returns the received datagram as the IPv4 +// header h, the payload p and the control message cm. +func (c *packetHandler) ReadFrom(b []byte) (h *Header, p []byte, cm *ControlMessage, err error) { + if !c.ok() { + return nil, nil, nil, errInvalidConn + } + c.rawOpt.RLock() + m := socket.Message{ + Buffers: [][]byte{b}, + OOB: NewControlMessage(c.rawOpt.cflags), + } + c.rawOpt.RUnlock() + if err := c.RecvMsg(&m, 0); err != nil { + return nil, nil, nil, &net.OpError{Op: "read", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} + } + var hs []byte + if hs, p, err = slicePacket(b[:m.N]); err != nil { + return nil, nil, nil, &net.OpError{Op: "read", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} + } + if h, err = ParseHeader(hs); err != nil { + return nil, nil, nil, &net.OpError{Op: "read", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} + } + if m.NN > 0 { + if compatFreeBSD32 { + adjustFreeBSD32(&m) + } + cm = new(ControlMessage) + if err := cm.Parse(m.OOB[:m.NN]); err != nil { + return nil, nil, nil, &net.OpError{Op: "read", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} + } + } + if src, ok := m.Addr.(*net.IPAddr); ok && cm != nil { + cm.Src = src.IP + } + return +} + +func slicePacket(b []byte) (h, p []byte, err error) { + if len(b) < HeaderLen { + return nil, nil, errHeaderTooShort + } + hdrlen := int(b[0]&0x0f) << 2 + return b[:hdrlen], b[hdrlen:], nil +} + +// WriteTo writes an IPv4 datagram through the endpoint c, copying the +// datagram from the IPv4 header h and the payload p. The control +// message cm allows the datagram path and the outgoing interface to be +// specified. Currently only Darwin and Linux support this. The cm +// may be nil if control of the outgoing datagram is not required. +// +// The IPv4 header h must contain appropriate fields that include: +// +// Version = +// Len = +// TOS = +// TotalLen = +// ID = platform sets an appropriate value if ID is zero +// FragOff = +// TTL = +// Protocol = +// Checksum = platform sets an appropriate value if Checksum is zero +// Src = platform sets an appropriate value if Src is nil +// Dst = +// Options = optional +func (c *packetHandler) WriteTo(h *Header, p []byte, cm *ControlMessage) error { + if !c.ok() { + return errInvalidConn + } + m := socket.Message{ + OOB: cm.Marshal(), + } + wh, err := h.Marshal() + if err != nil { + return err + } + m.Buffers = [][]byte{wh, p} + dst := new(net.IPAddr) + if cm != nil { + if ip := cm.Dst.To4(); ip != nil { + dst.IP = ip + } + } + if dst.IP == nil { + dst.IP = h.Dst + } + m.Addr = dst + if err := c.SendMsg(&m, 0); err != nil { + return &net.OpError{Op: "write", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Addr: opAddr(dst), Err: err} + } + return nil +} diff --git a/vendor/golang.org/x/net/ipv4/payload.go b/vendor/golang.org/x/net/ipv4/payload.go new file mode 100644 index 00000000..f95f811a --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/payload.go @@ -0,0 +1,23 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +// BUG(mikio): On Windows, the ControlMessage for ReadFrom and WriteTo +// methods of PacketConn is not implemented. + +// A payloadHandler represents the IPv4 datagram payload handler. +type payloadHandler struct { + net.PacketConn + *socket.Conn + rawOpt +} + +func (c *payloadHandler) ok() bool { return c != nil && c.PacketConn != nil && c.Conn != nil } diff --git a/vendor/golang.org/x/net/ipv4/payload_cmsg.go b/vendor/golang.org/x/net/ipv4/payload_cmsg.go new file mode 100644 index 00000000..e7614661 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/payload_cmsg.go @@ -0,0 +1,84 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris + +package ipv4 + +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +// ReadFrom reads a payload of the received IPv4 datagram, from the +// endpoint c, copying the payload into b. It returns the number of +// bytes copied into b, the control message cm and the source address +// src of the received datagram. +func (c *payloadHandler) ReadFrom(b []byte) (n int, cm *ControlMessage, src net.Addr, err error) { + if !c.ok() { + return 0, nil, nil, errInvalidConn + } + c.rawOpt.RLock() + m := socket.Message{ + OOB: NewControlMessage(c.rawOpt.cflags), + } + c.rawOpt.RUnlock() + switch c.PacketConn.(type) { + case *net.UDPConn: + m.Buffers = [][]byte{b} + if err := c.RecvMsg(&m, 0); err != nil { + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + case *net.IPConn: + h := make([]byte, HeaderLen) + m.Buffers = [][]byte{h, b} + if err := c.RecvMsg(&m, 0); err != nil { + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + hdrlen := int(h[0]&0x0f) << 2 + if hdrlen > len(h) { + d := hdrlen - len(h) + copy(b, b[d:]) + m.N -= d + } else { + m.N -= hdrlen + } + default: + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: errInvalidConnType} + } + if m.NN > 0 { + if compatFreeBSD32 { + adjustFreeBSD32(&m) + } + cm = new(ControlMessage) + if err := cm.Parse(m.OOB[:m.NN]); err != nil { + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + cm.Src = netAddrToIP4(m.Addr) + } + return m.N, cm, m.Addr, nil +} + +// WriteTo writes a payload of the IPv4 datagram, to the destination +// address dst through the endpoint c, copying the payload from b. It +// returns the number of bytes written. The control message cm allows +// the datagram path and the outgoing interface to be specified. +// Currently only Darwin and Linux support this. The cm may be nil if +// control of the outgoing datagram is not required. +func (c *payloadHandler) WriteTo(b []byte, cm *ControlMessage, dst net.Addr) (n int, err error) { + if !c.ok() { + return 0, errInvalidConn + } + m := socket.Message{ + Buffers: [][]byte{b}, + OOB: cm.Marshal(), + Addr: dst, + } + err = c.SendMsg(&m, 0) + if err != nil { + err = &net.OpError{Op: "write", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Addr: opAddr(dst), Err: err} + } + return m.N, err +} diff --git a/vendor/golang.org/x/net/ipv4/payload_nocmsg.go b/vendor/golang.org/x/net/ipv4/payload_nocmsg.go new file mode 100644 index 00000000..1116256f --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/payload_nocmsg.go @@ -0,0 +1,39 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris + +package ipv4 + +import "net" + +// ReadFrom reads a payload of the received IPv4 datagram, from the +// endpoint c, copying the payload into b. It returns the number of +// bytes copied into b, the control message cm and the source address +// src of the received datagram. +func (c *payloadHandler) ReadFrom(b []byte) (n int, cm *ControlMessage, src net.Addr, err error) { + if !c.ok() { + return 0, nil, nil, errInvalidConn + } + if n, src, err = c.PacketConn.ReadFrom(b); err != nil { + return 0, nil, nil, err + } + return +} + +// WriteTo writes a payload of the IPv4 datagram, to the destination +// address dst through the endpoint c, copying the payload from b. It +// returns the number of bytes written. The control message cm allows +// the datagram path and the outgoing interface to be specified. +// Currently only Darwin and Linux support this. The cm may be nil if +// control of the outgoing datagram is not required. +func (c *payloadHandler) WriteTo(b []byte, cm *ControlMessage, dst net.Addr) (n int, err error) { + if !c.ok() { + return 0, errInvalidConn + } + if dst == nil { + return 0, errMissingAddress + } + return c.PacketConn.WriteTo(b, dst) +} diff --git a/vendor/golang.org/x/net/ipv4/sockopt.go b/vendor/golang.org/x/net/ipv4/sockopt.go new file mode 100644 index 00000000..22e90c03 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sockopt.go @@ -0,0 +1,44 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import "golang.org/x/net/internal/socket" + +// Sticky socket options +const ( + ssoTOS = iota // header field for unicast packet + ssoTTL // header field for unicast packet + ssoMulticastTTL // header field for multicast packet + ssoMulticastInterface // outbound interface for multicast packet + ssoMulticastLoopback // loopback for multicast packet + ssoReceiveTTL // header field on received packet + ssoReceiveDst // header field on received packet + ssoReceiveInterface // inbound interface on received packet + ssoPacketInfo // incbound or outbound packet path + ssoHeaderPrepend // ipv4 header prepend + ssoStripHeader // strip ipv4 header + ssoICMPFilter // icmp filter + ssoJoinGroup // any-source multicast + ssoLeaveGroup // any-source multicast + ssoJoinSourceGroup // source-specific multicast + ssoLeaveSourceGroup // source-specific multicast + ssoBlockSourceGroup // any-source or source-specific multicast + ssoUnblockSourceGroup // any-source or source-specific multicast + ssoAttachFilter // attach BPF for filtering inbound traffic +) + +// Sticky socket option value types +const ( + ssoTypeIPMreq = iota + 1 + ssoTypeIPMreqn + ssoTypeGroupReq + ssoTypeGroupSourceReq +) + +// A sockOpt represents a binding for sticky socket option. +type sockOpt struct { + socket.Option + typ int // hint for option value type; optional +} diff --git a/vendor/golang.org/x/net/ipv4/sockopt_posix.go b/vendor/golang.org/x/net/ipv4/sockopt_posix.go new file mode 100644 index 00000000..dea64519 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sockopt_posix.go @@ -0,0 +1,71 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris windows + +package ipv4 + +import ( + "net" + "unsafe" + + "golang.org/x/net/bpf" + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) getMulticastInterface(c *socket.Conn) (*net.Interface, error) { + switch so.typ { + case ssoTypeIPMreqn: + return so.getIPMreqn(c) + default: + return so.getMulticastIf(c) + } +} + +func (so *sockOpt) setMulticastInterface(c *socket.Conn, ifi *net.Interface) error { + switch so.typ { + case ssoTypeIPMreqn: + return so.setIPMreqn(c, ifi, nil) + default: + return so.setMulticastIf(c, ifi) + } +} + +func (so *sockOpt) getICMPFilter(c *socket.Conn) (*ICMPFilter, error) { + b := make([]byte, so.Len) + n, err := so.Get(c, b) + if err != nil { + return nil, err + } + if n != sizeofICMPFilter { + return nil, errNotImplemented + } + return (*ICMPFilter)(unsafe.Pointer(&b[0])), nil +} + +func (so *sockOpt) setICMPFilter(c *socket.Conn, f *ICMPFilter) error { + b := (*[sizeofICMPFilter]byte)(unsafe.Pointer(f))[:sizeofICMPFilter] + return so.Set(c, b) +} + +func (so *sockOpt) setGroup(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + switch so.typ { + case ssoTypeIPMreq: + return so.setIPMreq(c, ifi, grp) + case ssoTypeIPMreqn: + return so.setIPMreqn(c, ifi, grp) + case ssoTypeGroupReq: + return so.setGroupReq(c, ifi, grp) + default: + return errNotImplemented + } +} + +func (so *sockOpt) setSourceGroup(c *socket.Conn, ifi *net.Interface, grp, src net.IP) error { + return so.setGroupSourceReq(c, ifi, grp, src) +} + +func (so *sockOpt) setBPF(c *socket.Conn, f []bpf.RawInstruction) error { + return so.setAttachFilter(c, f) +} diff --git a/vendor/golang.org/x/net/ipv4/sockopt_stub.go b/vendor/golang.org/x/net/ipv4/sockopt_stub.go new file mode 100644 index 00000000..37d4806b --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sockopt_stub.go @@ -0,0 +1,42 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows + +package ipv4 + +import ( + "net" + + "golang.org/x/net/bpf" + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) getMulticastInterface(c *socket.Conn) (*net.Interface, error) { + return nil, errNotImplemented +} + +func (so *sockOpt) setMulticastInterface(c *socket.Conn, ifi *net.Interface) error { + return errNotImplemented +} + +func (so *sockOpt) getICMPFilter(c *socket.Conn) (*ICMPFilter, error) { + return nil, errNotImplemented +} + +func (so *sockOpt) setICMPFilter(c *socket.Conn, f *ICMPFilter) error { + return errNotImplemented +} + +func (so *sockOpt) setGroup(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + return errNotImplemented +} + +func (so *sockOpt) setSourceGroup(c *socket.Conn, ifi *net.Interface, grp, src net.IP) error { + return errNotImplemented +} + +func (so *sockOpt) setBPF(c *socket.Conn, f []bpf.RawInstruction) error { + return errNotImplemented +} diff --git a/vendor/golang.org/x/net/ipv4/sys_aix.go b/vendor/golang.org/x/net/ipv4/sys_aix.go new file mode 100644 index 00000000..3d1201e6 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_aix.go @@ -0,0 +1,38 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Added for go1.11 compatibility +// +build aix + +package ipv4 + +import ( + "net" + "syscall" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTTL: {sysIP_RECVTTL, 1, marshalTTL, parseTTL}, + ctlDst: {sysIP_RECVDSTADDR, net.IPv4len, marshalDst, parseDst}, + ctlInterface: {sysIP_RECVIF, syscall.SizeofSockaddrDatalink, marshalInterface, parseInterface}, + } + + sockOpts = map[int]*sockOpt{ + ssoTOS: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TOS, Len: 4}}, + ssoTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TTL, Len: 4}}, + ssoMulticastTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_TTL, Len: 1}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_LOOP, Len: 1}}, + ssoReceiveTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVTTL, Len: 4}}, + ssoReceiveDst: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVDSTADDR, Len: 4}}, + ssoReceiveInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVIF, Len: 4}}, + ssoHeaderPrepend: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_HDRINCL, Len: 4}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_ADD_MEMBERSHIP, Len: sizeofIPMreq}, typ: ssoTypeIPMreq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_DROP_MEMBERSHIP, Len: sizeofIPMreq}, typ: ssoTypeIPMreq}, + } +) diff --git a/vendor/golang.org/x/net/ipv4/sys_asmreq.go b/vendor/golang.org/x/net/ipv4/sys_asmreq.go new file mode 100644 index 00000000..c5eaafe9 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_asmreq.go @@ -0,0 +1,119 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix darwin dragonfly freebsd netbsd openbsd solaris windows + +package ipv4 + +import ( + "net" + "unsafe" + + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setIPMreq(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + mreq := ipMreq{Multiaddr: [4]byte{grp[0], grp[1], grp[2], grp[3]}} + if err := setIPMreqInterface(&mreq, ifi); err != nil { + return err + } + b := (*[sizeofIPMreq]byte)(unsafe.Pointer(&mreq))[:sizeofIPMreq] + return so.Set(c, b) +} + +func (so *sockOpt) getMulticastIf(c *socket.Conn) (*net.Interface, error) { + var b [4]byte + if _, err := so.Get(c, b[:]); err != nil { + return nil, err + } + ifi, err := netIP4ToInterface(net.IPv4(b[0], b[1], b[2], b[3])) + if err != nil { + return nil, err + } + return ifi, nil +} + +func (so *sockOpt) setMulticastIf(c *socket.Conn, ifi *net.Interface) error { + ip, err := netInterfaceToIP4(ifi) + if err != nil { + return err + } + var b [4]byte + copy(b[:], ip) + return so.Set(c, b[:]) +} + +func setIPMreqInterface(mreq *ipMreq, ifi *net.Interface) error { + if ifi == nil { + return nil + } + ifat, err := ifi.Addrs() + if err != nil { + return err + } + for _, ifa := range ifat { + switch ifa := ifa.(type) { + case *net.IPAddr: + if ip := ifa.IP.To4(); ip != nil { + copy(mreq.Interface[:], ip) + return nil + } + case *net.IPNet: + if ip := ifa.IP.To4(); ip != nil { + copy(mreq.Interface[:], ip) + return nil + } + } + } + return errNoSuchInterface +} + +func netIP4ToInterface(ip net.IP) (*net.Interface, error) { + ift, err := net.Interfaces() + if err != nil { + return nil, err + } + for _, ifi := range ift { + ifat, err := ifi.Addrs() + if err != nil { + return nil, err + } + for _, ifa := range ifat { + switch ifa := ifa.(type) { + case *net.IPAddr: + if ip.Equal(ifa.IP) { + return &ifi, nil + } + case *net.IPNet: + if ip.Equal(ifa.IP) { + return &ifi, nil + } + } + } + } + return nil, errNoSuchInterface +} + +func netInterfaceToIP4(ifi *net.Interface) (net.IP, error) { + if ifi == nil { + return net.IPv4zero.To4(), nil + } + ifat, err := ifi.Addrs() + if err != nil { + return nil, err + } + for _, ifa := range ifat { + switch ifa := ifa.(type) { + case *net.IPAddr: + if ip := ifa.IP.To4(); ip != nil { + return ip, nil + } + case *net.IPNet: + if ip := ifa.IP.To4(); ip != nil { + return ip, nil + } + } + } + return nil, errNoSuchInterface +} diff --git a/vendor/golang.org/x/net/ipv4/sys_asmreq_stub.go b/vendor/golang.org/x/net/ipv4/sys_asmreq_stub.go new file mode 100644 index 00000000..6dc339ce --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_asmreq_stub.go @@ -0,0 +1,25 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !aix,!darwin,!dragonfly,!freebsd,!netbsd,!openbsd,!solaris,!windows + +package ipv4 + +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setIPMreq(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + return errNotImplemented +} + +func (so *sockOpt) getMulticastIf(c *socket.Conn) (*net.Interface, error) { + return nil, errNotImplemented +} + +func (so *sockOpt) setMulticastIf(c *socket.Conn, ifi *net.Interface) error { + return errNotImplemented +} diff --git a/vendor/golang.org/x/net/ipv4/sys_asmreqn.go b/vendor/golang.org/x/net/ipv4/sys_asmreqn.go new file mode 100644 index 00000000..1f24f69f --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_asmreqn.go @@ -0,0 +1,42 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin freebsd linux + +package ipv4 + +import ( + "net" + "unsafe" + + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) getIPMreqn(c *socket.Conn) (*net.Interface, error) { + b := make([]byte, so.Len) + if _, err := so.Get(c, b); err != nil { + return nil, err + } + mreqn := (*ipMreqn)(unsafe.Pointer(&b[0])) + if mreqn.Ifindex == 0 { + return nil, nil + } + ifi, err := net.InterfaceByIndex(int(mreqn.Ifindex)) + if err != nil { + return nil, err + } + return ifi, nil +} + +func (so *sockOpt) setIPMreqn(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + var mreqn ipMreqn + if ifi != nil { + mreqn.Ifindex = int32(ifi.Index) + } + if grp != nil { + mreqn.Multiaddr = [4]byte{grp[0], grp[1], grp[2], grp[3]} + } + b := (*[sizeofIPMreqn]byte)(unsafe.Pointer(&mreqn))[:sizeofIPMreqn] + return so.Set(c, b) +} diff --git a/vendor/golang.org/x/net/ipv4/sys_asmreqn_stub.go b/vendor/golang.org/x/net/ipv4/sys_asmreqn_stub.go new file mode 100644 index 00000000..48ef5562 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_asmreqn_stub.go @@ -0,0 +1,21 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!freebsd,!linux + +package ipv4 + +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) getIPMreqn(c *socket.Conn) (*net.Interface, error) { + return nil, errNotImplemented +} + +func (so *sockOpt) setIPMreqn(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + return errNotImplemented +} diff --git a/vendor/golang.org/x/net/ipv4/sys_bpf.go b/vendor/golang.org/x/net/ipv4/sys_bpf.go new file mode 100644 index 00000000..9f30b730 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_bpf.go @@ -0,0 +1,23 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux + +package ipv4 + +import ( + "unsafe" + + "golang.org/x/net/bpf" + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setAttachFilter(c *socket.Conn, f []bpf.RawInstruction) error { + prog := sockFProg{ + Len: uint16(len(f)), + Filter: (*sockFilter)(unsafe.Pointer(&f[0])), + } + b := (*[sizeofSockFprog]byte)(unsafe.Pointer(&prog))[:sizeofSockFprog] + return so.Set(c, b) +} diff --git a/vendor/golang.org/x/net/ipv4/sys_bpf_stub.go b/vendor/golang.org/x/net/ipv4/sys_bpf_stub.go new file mode 100644 index 00000000..5c986427 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_bpf_stub.go @@ -0,0 +1,16 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !linux + +package ipv4 + +import ( + "golang.org/x/net/bpf" + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setAttachFilter(c *socket.Conn, f []bpf.RawInstruction) error { + return errNotImplemented +} diff --git a/vendor/golang.org/x/net/ipv4/sys_bsd.go b/vendor/golang.org/x/net/ipv4/sys_bsd.go new file mode 100644 index 00000000..58256dd9 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_bsd.go @@ -0,0 +1,37 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build netbsd openbsd + +package ipv4 + +import ( + "net" + "syscall" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTTL: {sysIP_RECVTTL, 1, marshalTTL, parseTTL}, + ctlDst: {sysIP_RECVDSTADDR, net.IPv4len, marshalDst, parseDst}, + ctlInterface: {sysIP_RECVIF, syscall.SizeofSockaddrDatalink, marshalInterface, parseInterface}, + } + + sockOpts = map[int]*sockOpt{ + ssoTOS: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TOS, Len: 4}}, + ssoTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TTL, Len: 4}}, + ssoMulticastTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_TTL, Len: 1}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_LOOP, Len: 1}}, + ssoReceiveTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVTTL, Len: 4}}, + ssoReceiveDst: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVDSTADDR, Len: 4}}, + ssoReceiveInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVIF, Len: 4}}, + ssoHeaderPrepend: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_HDRINCL, Len: 4}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_ADD_MEMBERSHIP, Len: sizeofIPMreq}, typ: ssoTypeIPMreq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_DROP_MEMBERSHIP, Len: sizeofIPMreq}, typ: ssoTypeIPMreq}, + } +) diff --git a/vendor/golang.org/x/net/ipv4/sys_darwin.go b/vendor/golang.org/x/net/ipv4/sys_darwin.go new file mode 100644 index 00000000..ac213c73 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_darwin.go @@ -0,0 +1,65 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTTL: {sysIP_RECVTTL, 1, marshalTTL, parseTTL}, + ctlDst: {sysIP_RECVDSTADDR, net.IPv4len, marshalDst, parseDst}, + ctlInterface: {sysIP_RECVIF, syscall.SizeofSockaddrDatalink, marshalInterface, parseInterface}, + ctlPacketInfo: {sysIP_PKTINFO, sizeofInetPktinfo, marshalPacketInfo, parsePacketInfo}, + } + + sockOpts = map[int]*sockOpt{ + ssoTOS: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TOS, Len: 4}}, + ssoTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TTL, Len: 4}}, + ssoMulticastTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_TTL, Len: 1}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: sizeofIPMreqn}, typ: ssoTypeIPMreqn}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_LOOP, Len: 4}}, + ssoReceiveTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVTTL, Len: 4}}, + ssoReceiveDst: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVDSTADDR, Len: 4}}, + ssoReceiveInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVIF, Len: 4}}, + ssoHeaderPrepend: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_HDRINCL, Len: 4}}, + ssoStripHeader: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_STRIPHDR, Len: 4}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_JOIN_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_LEAVE_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoJoinSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_JOIN_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoLeaveSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_LEAVE_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoBlockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_BLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoUnblockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_UNBLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoPacketInfo: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVPKTINFO, Len: 4}}, + } +) + +func (pi *inetPktinfo) setIfindex(i int) { + pi.Ifindex = uint32(i) +} + +func (gr *groupReq) setGroup(grp net.IP) { + sa := (*sockaddrInet)(unsafe.Pointer(uintptr(unsafe.Pointer(gr)) + 4)) + sa.Len = sizeofSockaddrInet + sa.Family = syscall.AF_INET + copy(sa.Addr[:], grp) +} + +func (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sockaddrInet)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 4)) + sa.Len = sizeofSockaddrInet + sa.Family = syscall.AF_INET + copy(sa.Addr[:], grp) + sa = (*sockaddrInet)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 132)) + sa.Len = sizeofSockaddrInet + sa.Family = syscall.AF_INET + copy(sa.Addr[:], src) +} diff --git a/vendor/golang.org/x/net/ipv4/sys_dragonfly.go b/vendor/golang.org/x/net/ipv4/sys_dragonfly.go new file mode 100644 index 00000000..859764f3 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_dragonfly.go @@ -0,0 +1,35 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + "syscall" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTTL: {sysIP_RECVTTL, 1, marshalTTL, parseTTL}, + ctlDst: {sysIP_RECVDSTADDR, net.IPv4len, marshalDst, parseDst}, + ctlInterface: {sysIP_RECVIF, syscall.SizeofSockaddrDatalink, marshalInterface, parseInterface}, + } + + sockOpts = map[int]*sockOpt{ + ssoTOS: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TOS, Len: 4}}, + ssoTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TTL, Len: 4}}, + ssoMulticastTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_TTL, Len: 1}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_LOOP, Len: 4}}, + ssoReceiveTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVTTL, Len: 4}}, + ssoReceiveDst: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVDSTADDR, Len: 4}}, + ssoReceiveInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVIF, Len: 4}}, + ssoHeaderPrepend: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_HDRINCL, Len: 4}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_ADD_MEMBERSHIP, Len: sizeofIPMreq}, typ: ssoTypeIPMreq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_DROP_MEMBERSHIP, Len: sizeofIPMreq}, typ: ssoTypeIPMreq}, + } +) diff --git a/vendor/golang.org/x/net/ipv4/sys_freebsd.go b/vendor/golang.org/x/net/ipv4/sys_freebsd.go new file mode 100644 index 00000000..482873d9 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_freebsd.go @@ -0,0 +1,76 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + "runtime" + "strings" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTTL: {sysIP_RECVTTL, 1, marshalTTL, parseTTL}, + ctlDst: {sysIP_RECVDSTADDR, net.IPv4len, marshalDst, parseDst}, + ctlInterface: {sysIP_RECVIF, syscall.SizeofSockaddrDatalink, marshalInterface, parseInterface}, + } + + sockOpts = map[int]*sockOpt{ + ssoTOS: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TOS, Len: 4}}, + ssoTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TTL, Len: 4}}, + ssoMulticastTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_TTL, Len: 1}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_LOOP, Len: 4}}, + ssoReceiveTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVTTL, Len: 4}}, + ssoReceiveDst: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVDSTADDR, Len: 4}}, + ssoReceiveInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVIF, Len: 4}}, + ssoHeaderPrepend: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_HDRINCL, Len: 4}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_JOIN_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_LEAVE_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoJoinSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_JOIN_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoLeaveSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_LEAVE_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoBlockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_BLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoUnblockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_UNBLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + } +) + +func init() { + freebsdVersion, _ = syscall.SysctlUint32("kern.osreldate") + if freebsdVersion >= 1000000 { + sockOpts[ssoMulticastInterface] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: sizeofIPMreqn}, typ: ssoTypeIPMreqn} + } + if runtime.GOOS == "freebsd" && runtime.GOARCH == "386" { + archs, _ := syscall.Sysctl("kern.supported_archs") + for _, s := range strings.Fields(archs) { + if s == "amd64" { + compatFreeBSD32 = true + break + } + } + } +} + +func (gr *groupReq) setGroup(grp net.IP) { + sa := (*sockaddrInet)(unsafe.Pointer(&gr.Group)) + sa.Len = sizeofSockaddrInet + sa.Family = syscall.AF_INET + copy(sa.Addr[:], grp) +} + +func (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sockaddrInet)(unsafe.Pointer(&gsr.Group)) + sa.Len = sizeofSockaddrInet + sa.Family = syscall.AF_INET + copy(sa.Addr[:], grp) + sa = (*sockaddrInet)(unsafe.Pointer(&gsr.Source)) + sa.Len = sizeofSockaddrInet + sa.Family = syscall.AF_INET + copy(sa.Addr[:], src) +} diff --git a/vendor/golang.org/x/net/ipv4/sys_linux.go b/vendor/golang.org/x/net/ipv4/sys_linux.go new file mode 100644 index 00000000..60defe13 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_linux.go @@ -0,0 +1,59 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTTL: {sysIP_TTL, 1, marshalTTL, parseTTL}, + ctlPacketInfo: {sysIP_PKTINFO, sizeofInetPktinfo, marshalPacketInfo, parsePacketInfo}, + } + + sockOpts = map[int]*sockOpt{ + ssoTOS: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TOS, Len: 4}}, + ssoTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TTL, Len: 4}}, + ssoMulticastTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_TTL, Len: 4}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: sizeofIPMreqn}, typ: ssoTypeIPMreqn}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_LOOP, Len: 4}}, + ssoReceiveTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVTTL, Len: 4}}, + ssoPacketInfo: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_PKTINFO, Len: 4}}, + ssoHeaderPrepend: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_HDRINCL, Len: 4}}, + ssoICMPFilter: {Option: socket.Option{Level: iana.ProtocolReserved, Name: sysICMP_FILTER, Len: sizeofICMPFilter}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_JOIN_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_LEAVE_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoJoinSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_JOIN_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoLeaveSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_LEAVE_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoBlockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_BLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoUnblockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_UNBLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoAttachFilter: {Option: socket.Option{Level: sysSOL_SOCKET, Name: sysSO_ATTACH_FILTER, Len: sizeofSockFprog}}, + } +) + +func (pi *inetPktinfo) setIfindex(i int) { + pi.Ifindex = int32(i) +} + +func (gr *groupReq) setGroup(grp net.IP) { + sa := (*sockaddrInet)(unsafe.Pointer(&gr.Group)) + sa.Family = syscall.AF_INET + copy(sa.Addr[:], grp) +} + +func (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sockaddrInet)(unsafe.Pointer(&gsr.Group)) + sa.Family = syscall.AF_INET + copy(sa.Addr[:], grp) + sa = (*sockaddrInet)(unsafe.Pointer(&gsr.Source)) + sa.Family = syscall.AF_INET + copy(sa.Addr[:], src) +} diff --git a/vendor/golang.org/x/net/ipv4/sys_solaris.go b/vendor/golang.org/x/net/ipv4/sys_solaris.go new file mode 100644 index 00000000..832fef1e --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_solaris.go @@ -0,0 +1,57 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTTL: {sysIP_RECVTTL, 4, marshalTTL, parseTTL}, + ctlPacketInfo: {sysIP_PKTINFO, sizeofInetPktinfo, marshalPacketInfo, parsePacketInfo}, + } + + sockOpts = map[int]sockOpt{ + ssoTOS: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TOS, Len: 4}}, + ssoTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TTL, Len: 4}}, + ssoMulticastTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_TTL, Len: 1}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_LOOP, Len: 1}}, + ssoReceiveTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVTTL, Len: 4}}, + ssoPacketInfo: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVPKTINFO, Len: 4}}, + ssoHeaderPrepend: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_HDRINCL, Len: 4}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_JOIN_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_LEAVE_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoJoinSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_JOIN_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoLeaveSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_LEAVE_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoBlockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_BLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoUnblockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_UNBLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + } +) + +func (pi *inetPktinfo) setIfindex(i int) { + pi.Ifindex = uint32(i) +} + +func (gr *groupReq) setGroup(grp net.IP) { + sa := (*sockaddrInet)(unsafe.Pointer(uintptr(unsafe.Pointer(gr)) + 4)) + sa.Family = syscall.AF_INET + copy(sa.Addr[:], grp) +} + +func (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sockaddrInet)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 4)) + sa.Family = syscall.AF_INET + copy(sa.Addr[:], grp) + sa = (*sockaddrInet)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 260)) + sa.Family = syscall.AF_INET + copy(sa.Addr[:], src) +} diff --git a/vendor/golang.org/x/net/ipv4/sys_ssmreq.go b/vendor/golang.org/x/net/ipv4/sys_ssmreq.go new file mode 100644 index 00000000..eeced7f3 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_ssmreq.go @@ -0,0 +1,52 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin freebsd linux solaris + +package ipv4 + +import ( + "net" + "unsafe" + + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setGroupReq(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + var gr groupReq + if ifi != nil { + gr.Interface = uint32(ifi.Index) + } + gr.setGroup(grp) + var b []byte + if compatFreeBSD32 { + var d [sizeofGroupReq + 4]byte + s := (*[sizeofGroupReq]byte)(unsafe.Pointer(&gr)) + copy(d[:4], s[:4]) + copy(d[8:], s[4:]) + b = d[:] + } else { + b = (*[sizeofGroupReq]byte)(unsafe.Pointer(&gr))[:sizeofGroupReq] + } + return so.Set(c, b) +} + +func (so *sockOpt) setGroupSourceReq(c *socket.Conn, ifi *net.Interface, grp, src net.IP) error { + var gsr groupSourceReq + if ifi != nil { + gsr.Interface = uint32(ifi.Index) + } + gsr.setSourceGroup(grp, src) + var b []byte + if compatFreeBSD32 { + var d [sizeofGroupSourceReq + 4]byte + s := (*[sizeofGroupSourceReq]byte)(unsafe.Pointer(&gsr)) + copy(d[:4], s[:4]) + copy(d[8:], s[4:]) + b = d[:] + } else { + b = (*[sizeofGroupSourceReq]byte)(unsafe.Pointer(&gsr))[:sizeofGroupSourceReq] + } + return so.Set(c, b) +} diff --git a/vendor/golang.org/x/net/ipv4/sys_ssmreq_stub.go b/vendor/golang.org/x/net/ipv4/sys_ssmreq_stub.go new file mode 100644 index 00000000..c0921674 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_ssmreq_stub.go @@ -0,0 +1,21 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!freebsd,!linux,!solaris + +package ipv4 + +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setGroupReq(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + return errNotImplemented +} + +func (so *sockOpt) setGroupSourceReq(c *socket.Conn, ifi *net.Interface, grp, src net.IP) error { + return errNotImplemented +} diff --git a/vendor/golang.org/x/net/ipv4/sys_stub.go b/vendor/golang.org/x/net/ipv4/sys_stub.go new file mode 100644 index 00000000..b9c85b33 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_stub.go @@ -0,0 +1,13 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows + +package ipv4 + +var ( + ctlOpts = [ctlMax]ctlOpt{} + + sockOpts = map[int]*sockOpt{} +) diff --git a/vendor/golang.org/x/net/ipv4/sys_windows.go b/vendor/golang.org/x/net/ipv4/sys_windows.go new file mode 100644 index 00000000..b0913d53 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_windows.go @@ -0,0 +1,67 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +const ( + // See ws2tcpip.h. + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_MULTICAST_IF = 0x9 + sysIP_MULTICAST_TTL = 0xa + sysIP_MULTICAST_LOOP = 0xb + sysIP_ADD_MEMBERSHIP = 0xc + sysIP_DROP_MEMBERSHIP = 0xd + sysIP_DONTFRAGMENT = 0xe + sysIP_ADD_SOURCE_MEMBERSHIP = 0xf + sysIP_DROP_SOURCE_MEMBERSHIP = 0x10 + sysIP_PKTINFO = 0x13 + + sizeofInetPktinfo = 0x8 + sizeofIPMreq = 0x8 + sizeofIPMreqSource = 0xc +) + +type inetPktinfo struct { + Addr [4]byte + Ifindex int32 +} + +type ipMreq struct { + Multiaddr [4]byte + Interface [4]byte +} + +type ipMreqSource struct { + Multiaddr [4]byte + Sourceaddr [4]byte + Interface [4]byte +} + +// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms738586(v=vs.85).aspx +var ( + ctlOpts = [ctlMax]ctlOpt{} + + sockOpts = map[int]*sockOpt{ + ssoTOS: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TOS, Len: 4}}, + ssoTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TTL, Len: 4}}, + ssoMulticastTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_TTL, Len: 4}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_LOOP, Len: 4}}, + ssoHeaderPrepend: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_HDRINCL, Len: 4}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_ADD_MEMBERSHIP, Len: sizeofIPMreq}, typ: ssoTypeIPMreq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_DROP_MEMBERSHIP, Len: sizeofIPMreq}, typ: ssoTypeIPMreq}, + } +) + +func (pi *inetPktinfo) setIfindex(i int) { + pi.Ifindex = int32(i) +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_aix_ppc64.go b/vendor/golang.org/x/net/ipv4/zsys_aix_ppc64.go new file mode 100644 index 00000000..c741d5c8 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_aix_ppc64.go @@ -0,0 +1,33 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_aix.go + +// Added for go1.11 compatibility +// +build aix + +package ipv4 + +const ( + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_RECVOPTS = 0x5 + sysIP_RECVRETOPTS = 0x6 + sysIP_RECVDSTADDR = 0x7 + sysIP_RETOPTS = 0x8 + sysIP_RECVIF = 0x20 + sysIP_RECVTTL = 0x22 + + sysIP_MULTICAST_IF = 0x9 + sysIP_MULTICAST_TTL = 0xa + sysIP_MULTICAST_LOOP = 0xb + sysIP_ADD_MEMBERSHIP = 0xc + sysIP_DROP_MEMBERSHIP = 0xd + + sizeofIPMreq = 0x8 +) + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_darwin.go b/vendor/golang.org/x/net/ipv4/zsys_darwin.go new file mode 100644 index 00000000..e05a251b --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_darwin.go @@ -0,0 +1,99 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_darwin.go + +package ipv4 + +const ( + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_RECVOPTS = 0x5 + sysIP_RECVRETOPTS = 0x6 + sysIP_RECVDSTADDR = 0x7 + sysIP_RETOPTS = 0x8 + sysIP_RECVIF = 0x14 + sysIP_STRIPHDR = 0x17 + sysIP_RECVTTL = 0x18 + sysIP_BOUND_IF = 0x19 + sysIP_PKTINFO = 0x1a + sysIP_RECVPKTINFO = 0x1a + + sysIP_MULTICAST_IF = 0x9 + sysIP_MULTICAST_TTL = 0xa + sysIP_MULTICAST_LOOP = 0xb + sysIP_ADD_MEMBERSHIP = 0xc + sysIP_DROP_MEMBERSHIP = 0xd + sysIP_MULTICAST_VIF = 0xe + sysIP_MULTICAST_IFINDEX = 0x42 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x46 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x47 + sysIP_BLOCK_SOURCE = 0x48 + sysIP_UNBLOCK_SOURCE = 0x49 + sysMCAST_JOIN_GROUP = 0x50 + sysMCAST_LEAVE_GROUP = 0x51 + sysMCAST_JOIN_SOURCE_GROUP = 0x52 + sysMCAST_LEAVE_SOURCE_GROUP = 0x53 + sysMCAST_BLOCK_SOURCE = 0x54 + sysMCAST_UNBLOCK_SOURCE = 0x55 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 +) + +type sockaddrStorage struct { + Len uint8 + Family uint8 + X__ss_pad1 [6]int8 + X__ss_align int64 + X__ss_pad2 [112]int8 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type inetPktinfo struct { + Ifindex uint32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr [4]byte /* in_addr */ + Sourceaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [128]byte +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [128]byte + Pad_cgo_1 [128]byte +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_dragonfly.go b/vendor/golang.org/x/net/ipv4/zsys_dragonfly.go new file mode 100644 index 00000000..6d65e9fc --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_dragonfly.go @@ -0,0 +1,31 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_dragonfly.go + +package ipv4 + +const ( + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_RECVOPTS = 0x5 + sysIP_RECVRETOPTS = 0x6 + sysIP_RECVDSTADDR = 0x7 + sysIP_RETOPTS = 0x8 + sysIP_RECVIF = 0x14 + sysIP_RECVTTL = 0x41 + + sysIP_MULTICAST_IF = 0x9 + sysIP_MULTICAST_TTL = 0xa + sysIP_MULTICAST_LOOP = 0xb + sysIP_MULTICAST_VIF = 0xe + sysIP_ADD_MEMBERSHIP = 0xc + sysIP_DROP_MEMBERSHIP = 0xd + + sizeofIPMreq = 0x8 +) + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_freebsd_386.go b/vendor/golang.org/x/net/ipv4/zsys_freebsd_386.go new file mode 100644 index 00000000..136e2b8f --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_freebsd_386.go @@ -0,0 +1,93 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_freebsd.go + +package ipv4 + +const ( + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_RECVOPTS = 0x5 + sysIP_RECVRETOPTS = 0x6 + sysIP_RECVDSTADDR = 0x7 + sysIP_SENDSRCADDR = 0x7 + sysIP_RETOPTS = 0x8 + sysIP_RECVIF = 0x14 + sysIP_ONESBCAST = 0x17 + sysIP_BINDANY = 0x18 + sysIP_RECVTTL = 0x41 + sysIP_MINTTL = 0x42 + sysIP_DONTFRAG = 0x43 + sysIP_RECVTOS = 0x44 + + sysIP_MULTICAST_IF = 0x9 + sysIP_MULTICAST_TTL = 0xa + sysIP_MULTICAST_LOOP = 0xb + sysIP_ADD_MEMBERSHIP = 0xc + sysIP_DROP_MEMBERSHIP = 0xd + sysIP_MULTICAST_VIF = 0xe + sysIP_ADD_SOURCE_MEMBERSHIP = 0x46 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x47 + sysIP_BLOCK_SOURCE = 0x48 + sysIP_UNBLOCK_SOURCE = 0x49 + sysMCAST_JOIN_GROUP = 0x50 + sysMCAST_LEAVE_GROUP = 0x51 + sysMCAST_JOIN_SOURCE_GROUP = 0x52 + sysMCAST_LEAVE_SOURCE_GROUP = 0x53 + sysMCAST_BLOCK_SOURCE = 0x54 + sysMCAST_UNBLOCK_SOURCE = 0x55 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 +) + +type sockaddrStorage struct { + Len uint8 + Family uint8 + X__ss_pad1 [6]int8 + X__ss_align int64 + X__ss_pad2 [112]int8 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr [4]byte /* in_addr */ + Sourceaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type groupReq struct { + Interface uint32 + Group sockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group sockaddrStorage + Source sockaddrStorage +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_freebsd_amd64.go b/vendor/golang.org/x/net/ipv4/zsys_freebsd_amd64.go new file mode 100644 index 00000000..4f730f19 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_freebsd_amd64.go @@ -0,0 +1,95 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_freebsd.go + +package ipv4 + +const ( + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_RECVOPTS = 0x5 + sysIP_RECVRETOPTS = 0x6 + sysIP_RECVDSTADDR = 0x7 + sysIP_SENDSRCADDR = 0x7 + sysIP_RETOPTS = 0x8 + sysIP_RECVIF = 0x14 + sysIP_ONESBCAST = 0x17 + sysIP_BINDANY = 0x18 + sysIP_RECVTTL = 0x41 + sysIP_MINTTL = 0x42 + sysIP_DONTFRAG = 0x43 + sysIP_RECVTOS = 0x44 + + sysIP_MULTICAST_IF = 0x9 + sysIP_MULTICAST_TTL = 0xa + sysIP_MULTICAST_LOOP = 0xb + sysIP_ADD_MEMBERSHIP = 0xc + sysIP_DROP_MEMBERSHIP = 0xd + sysIP_MULTICAST_VIF = 0xe + sysIP_ADD_SOURCE_MEMBERSHIP = 0x46 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x47 + sysIP_BLOCK_SOURCE = 0x48 + sysIP_UNBLOCK_SOURCE = 0x49 + sysMCAST_JOIN_GROUP = 0x50 + sysMCAST_LEAVE_GROUP = 0x51 + sysMCAST_JOIN_SOURCE_GROUP = 0x52 + sysMCAST_LEAVE_SOURCE_GROUP = 0x53 + sysMCAST_BLOCK_SOURCE = 0x54 + sysMCAST_UNBLOCK_SOURCE = 0x55 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 +) + +type sockaddrStorage struct { + Len uint8 + Family uint8 + X__ss_pad1 [6]int8 + X__ss_align int64 + X__ss_pad2 [112]int8 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr [4]byte /* in_addr */ + Sourceaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sockaddrStorage + Source sockaddrStorage +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_freebsd_arm.go b/vendor/golang.org/x/net/ipv4/zsys_freebsd_arm.go new file mode 100644 index 00000000..4f730f19 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_freebsd_arm.go @@ -0,0 +1,95 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_freebsd.go + +package ipv4 + +const ( + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_RECVOPTS = 0x5 + sysIP_RECVRETOPTS = 0x6 + sysIP_RECVDSTADDR = 0x7 + sysIP_SENDSRCADDR = 0x7 + sysIP_RETOPTS = 0x8 + sysIP_RECVIF = 0x14 + sysIP_ONESBCAST = 0x17 + sysIP_BINDANY = 0x18 + sysIP_RECVTTL = 0x41 + sysIP_MINTTL = 0x42 + sysIP_DONTFRAG = 0x43 + sysIP_RECVTOS = 0x44 + + sysIP_MULTICAST_IF = 0x9 + sysIP_MULTICAST_TTL = 0xa + sysIP_MULTICAST_LOOP = 0xb + sysIP_ADD_MEMBERSHIP = 0xc + sysIP_DROP_MEMBERSHIP = 0xd + sysIP_MULTICAST_VIF = 0xe + sysIP_ADD_SOURCE_MEMBERSHIP = 0x46 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x47 + sysIP_BLOCK_SOURCE = 0x48 + sysIP_UNBLOCK_SOURCE = 0x49 + sysMCAST_JOIN_GROUP = 0x50 + sysMCAST_LEAVE_GROUP = 0x51 + sysMCAST_JOIN_SOURCE_GROUP = 0x52 + sysMCAST_LEAVE_SOURCE_GROUP = 0x53 + sysMCAST_BLOCK_SOURCE = 0x54 + sysMCAST_UNBLOCK_SOURCE = 0x55 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 +) + +type sockaddrStorage struct { + Len uint8 + Family uint8 + X__ss_pad1 [6]int8 + X__ss_align int64 + X__ss_pad2 [112]int8 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr [4]byte /* in_addr */ + Sourceaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sockaddrStorage + Source sockaddrStorage +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_386.go b/vendor/golang.org/x/net/ipv4/zsys_linux_386.go new file mode 100644 index 00000000..43ef8e59 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_386.go @@ -0,0 +1,148 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPFilter = 0x4 + + sizeofSockFprog = 0x8 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [2]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_amd64.go b/vendor/golang.org/x/net/ipv4/zsys_linux_amd64.go new file mode 100644 index 00000000..ee8204da --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_amd64.go @@ -0,0 +1,150 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPFilter = 0x4 + + sizeofSockFprog = 0x10 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_arm.go b/vendor/golang.org/x/net/ipv4/zsys_linux_arm.go new file mode 100644 index 00000000..43ef8e59 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_arm.go @@ -0,0 +1,148 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPFilter = 0x4 + + sizeofSockFprog = 0x8 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [2]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_arm64.go b/vendor/golang.org/x/net/ipv4/zsys_linux_arm64.go new file mode 100644 index 00000000..ee8204da --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_arm64.go @@ -0,0 +1,150 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPFilter = 0x4 + + sizeofSockFprog = 0x10 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_mips.go b/vendor/golang.org/x/net/ipv4/zsys_linux_mips.go new file mode 100644 index 00000000..43ef8e59 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_mips.go @@ -0,0 +1,148 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPFilter = 0x4 + + sizeofSockFprog = 0x8 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [2]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_mips64.go b/vendor/golang.org/x/net/ipv4/zsys_linux_mips64.go new file mode 100644 index 00000000..ee8204da --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_mips64.go @@ -0,0 +1,150 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPFilter = 0x4 + + sizeofSockFprog = 0x10 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_mips64le.go b/vendor/golang.org/x/net/ipv4/zsys_linux_mips64le.go new file mode 100644 index 00000000..ee8204da --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_mips64le.go @@ -0,0 +1,150 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPFilter = 0x4 + + sizeofSockFprog = 0x10 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_mipsle.go b/vendor/golang.org/x/net/ipv4/zsys_linux_mipsle.go new file mode 100644 index 00000000..43ef8e59 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_mipsle.go @@ -0,0 +1,148 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPFilter = 0x4 + + sizeofSockFprog = 0x8 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [2]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_ppc.go b/vendor/golang.org/x/net/ipv4/zsys_linux_ppc.go new file mode 100644 index 00000000..fa1b6bc6 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_ppc.go @@ -0,0 +1,148 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPFilter = 0x4 + + sizeofSockFprog = 0x8 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]uint8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [2]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64.go b/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64.go new file mode 100644 index 00000000..ee8204da --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64.go @@ -0,0 +1,150 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPFilter = 0x4 + + sizeofSockFprog = 0x10 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64le.go b/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64le.go new file mode 100644 index 00000000..ee8204da --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64le.go @@ -0,0 +1,150 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPFilter = 0x4 + + sizeofSockFprog = 0x10 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_riscv64.go b/vendor/golang.org/x/net/ipv4/zsys_linux_riscv64.go new file mode 100644 index 00000000..0c0d4801 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_riscv64.go @@ -0,0 +1,151 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +// +build riscv64 + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPFilter = 0x4 + + sizeofSockFprog = 0x10 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} + +type sockFProg struct { + Len uint16 + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_s390x.go b/vendor/golang.org/x/net/ipv4/zsys_linux_s390x.go new file mode 100644 index 00000000..ee8204da --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_s390x.go @@ -0,0 +1,150 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPFilter = 0x4 + + sizeofSockFprog = 0x10 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_netbsd.go b/vendor/golang.org/x/net/ipv4/zsys_netbsd.go new file mode 100644 index 00000000..8cfc648a --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_netbsd.go @@ -0,0 +1,30 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_netbsd.go + +package ipv4 + +const ( + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_RECVOPTS = 0x5 + sysIP_RECVRETOPTS = 0x6 + sysIP_RECVDSTADDR = 0x7 + sysIP_RETOPTS = 0x8 + sysIP_RECVIF = 0x14 + sysIP_RECVTTL = 0x17 + + sysIP_MULTICAST_IF = 0x9 + sysIP_MULTICAST_TTL = 0xa + sysIP_MULTICAST_LOOP = 0xb + sysIP_ADD_MEMBERSHIP = 0xc + sysIP_DROP_MEMBERSHIP = 0xd + + sizeofIPMreq = 0x8 +) + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_openbsd.go b/vendor/golang.org/x/net/ipv4/zsys_openbsd.go new file mode 100644 index 00000000..37629cb0 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_openbsd.go @@ -0,0 +1,30 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_openbsd.go + +package ipv4 + +const ( + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_RECVOPTS = 0x5 + sysIP_RECVRETOPTS = 0x6 + sysIP_RECVDSTADDR = 0x7 + sysIP_RETOPTS = 0x8 + sysIP_RECVIF = 0x1e + sysIP_RECVTTL = 0x1f + + sysIP_MULTICAST_IF = 0x9 + sysIP_MULTICAST_TTL = 0xa + sysIP_MULTICAST_LOOP = 0xb + sysIP_ADD_MEMBERSHIP = 0xc + sysIP_DROP_MEMBERSHIP = 0xd + + sizeofIPMreq = 0x8 +) + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_solaris.go b/vendor/golang.org/x/net/ipv4/zsys_solaris.go new file mode 100644 index 00000000..cb80a308 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_solaris.go @@ -0,0 +1,100 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_solaris.go + +package ipv4 + +const ( + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_RECVOPTS = 0x5 + sysIP_RECVRETOPTS = 0x6 + sysIP_RECVDSTADDR = 0x7 + sysIP_RETOPTS = 0x8 + sysIP_RECVIF = 0x9 + sysIP_RECVSLLA = 0xa + sysIP_RECVTTL = 0xb + + sysIP_MULTICAST_IF = 0x10 + sysIP_MULTICAST_TTL = 0x11 + sysIP_MULTICAST_LOOP = 0x12 + sysIP_ADD_MEMBERSHIP = 0x13 + sysIP_DROP_MEMBERSHIP = 0x14 + sysIP_BLOCK_SOURCE = 0x15 + sysIP_UNBLOCK_SOURCE = 0x16 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x17 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x18 + sysIP_NEXTHOP = 0x19 + + sysIP_PKTINFO = 0x1a + sysIP_RECVPKTINFO = 0x1a + sysIP_DONTFRAG = 0x1b + + sysIP_BOUND_IF = 0x41 + sysIP_UNSPEC_SRC = 0x42 + sysIP_BROADCAST_TTL = 0x43 + sysIP_DHCPINIT_IF = 0x45 + + sysIP_REUSEADDR = 0x104 + sysIP_DONTROUTE = 0x105 + sysIP_BROADCAST = 0x106 + + sysMCAST_JOIN_GROUP = 0x29 + sysMCAST_LEAVE_GROUP = 0x2a + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_JOIN_SOURCE_GROUP = 0x2d + sysMCAST_LEAVE_SOURCE_GROUP = 0x2e + + sizeofSockaddrStorage = 0x100 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + + sizeofIPMreq = 0x8 + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x104 + sizeofGroupSourceReq = 0x204 +) + +type sockaddrStorage struct { + Family uint16 + X_ss_pad1 [6]int8 + X_ss_align float64 + X_ss_pad2 [240]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type inetPktinfo struct { + Ifindex uint32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqSource struct { + Multiaddr [4]byte /* in_addr */ + Sourceaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [256]byte +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [256]byte + Pad_cgo_1 [256]byte +} diff --git a/vendor/golang.org/x/net/ipv6/batch.go b/vendor/golang.org/x/net/ipv6/batch.go new file mode 100644 index 00000000..2ccb9849 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/batch.go @@ -0,0 +1,116 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "net" + "runtime" + + "golang.org/x/net/internal/socket" +) + +// BUG(mikio): On Windows, the ReadBatch and WriteBatch methods of +// PacketConn are not implemented. + +// A Message represents an IO message. +// +// type Message struct { +// Buffers [][]byte +// OOB []byte +// Addr net.Addr +// N int +// NN int +// Flags int +// } +// +// The Buffers fields represents a list of contiguous buffers, which +// can be used for vectored IO, for example, putting a header and a +// payload in each slice. +// When writing, the Buffers field must contain at least one byte to +// write. +// When reading, the Buffers field will always contain a byte to read. +// +// The OOB field contains protocol-specific control or miscellaneous +// ancillary data known as out-of-band data. +// It can be nil when not required. +// +// The Addr field specifies a destination address when writing. +// It can be nil when the underlying protocol of the endpoint uses +// connection-oriented communication. +// After a successful read, it may contain the source address on the +// received packet. +// +// The N field indicates the number of bytes read or written from/to +// Buffers. +// +// The NN field indicates the number of bytes read or written from/to +// OOB. +// +// The Flags field contains protocol-specific information on the +// received message. +type Message = socket.Message + +// ReadBatch reads a batch of messages. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_PEEK. +// +// On a successful read it returns the number of messages received, up +// to len(ms). +// +// On Linux, a batch read will be optimized. +// On other platforms, this method will read only a single message. +func (c *payloadHandler) ReadBatch(ms []Message, flags int) (int, error) { + if !c.ok() { + return 0, errInvalidConn + } + switch runtime.GOOS { + case "linux": + n, err := c.RecvMsgs([]socket.Message(ms), flags) + if err != nil { + err = &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + return n, err + default: + n := 1 + err := c.RecvMsg(&ms[0], flags) + if err != nil { + n = 0 + err = &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + return n, err + } +} + +// WriteBatch writes a batch of messages. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_DONTROUTE. +// +// It returns the number of messages written on a successful write. +// +// On Linux, a batch write will be optimized. +// On other platforms, this method will write only a single message. +func (c *payloadHandler) WriteBatch(ms []Message, flags int) (int, error) { + if !c.ok() { + return 0, errInvalidConn + } + switch runtime.GOOS { + case "linux": + n, err := c.SendMsgs([]socket.Message(ms), flags) + if err != nil { + err = &net.OpError{Op: "write", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + return n, err + default: + n := 1 + err := c.SendMsg(&ms[0], flags) + if err != nil { + n = 0 + err = &net.OpError{Op: "write", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + return n, err + } +} diff --git a/vendor/golang.org/x/net/ipv6/control.go b/vendor/golang.org/x/net/ipv6/control.go new file mode 100644 index 00000000..2da64441 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/control.go @@ -0,0 +1,187 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "fmt" + "net" + "sync" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +// Note that RFC 3542 obsoletes RFC 2292 but OS X Snow Leopard and the +// former still support RFC 2292 only. Please be aware that almost +// all protocol implementations prohibit using a combination of RFC +// 2292 and RFC 3542 for some practical reasons. + +type rawOpt struct { + sync.RWMutex + cflags ControlFlags +} + +func (c *rawOpt) set(f ControlFlags) { c.cflags |= f } +func (c *rawOpt) clear(f ControlFlags) { c.cflags &^= f } +func (c *rawOpt) isset(f ControlFlags) bool { return c.cflags&f != 0 } + +// A ControlFlags represents per packet basis IP-level socket option +// control flags. +type ControlFlags uint + +const ( + FlagTrafficClass ControlFlags = 1 << iota // pass the traffic class on the received packet + FlagHopLimit // pass the hop limit on the received packet + FlagSrc // pass the source address on the received packet + FlagDst // pass the destination address on the received packet + FlagInterface // pass the interface index on the received packet + FlagPathMTU // pass the path MTU on the received packet path +) + +const flagPacketInfo = FlagDst | FlagInterface + +// A ControlMessage represents per packet basis IP-level socket +// options. +type ControlMessage struct { + // Receiving socket options: SetControlMessage allows to + // receive the options from the protocol stack using ReadFrom + // method of PacketConn. + // + // Specifying socket options: ControlMessage for WriteTo + // method of PacketConn allows to send the options to the + // protocol stack. + // + TrafficClass int // traffic class, must be 1 <= value <= 255 when specifying + HopLimit int // hop limit, must be 1 <= value <= 255 when specifying + Src net.IP // source address, specifying only + Dst net.IP // destination address, receiving only + IfIndex int // interface index, must be 1 <= value when specifying + NextHop net.IP // next hop address, specifying only + MTU int // path MTU, receiving only +} + +func (cm *ControlMessage) String() string { + if cm == nil { + return "" + } + return fmt.Sprintf("tclass=%#x hoplim=%d src=%v dst=%v ifindex=%d nexthop=%v mtu=%d", cm.TrafficClass, cm.HopLimit, cm.Src, cm.Dst, cm.IfIndex, cm.NextHop, cm.MTU) +} + +// Marshal returns the binary encoding of cm. +func (cm *ControlMessage) Marshal() []byte { + if cm == nil { + return nil + } + var l int + tclass := false + if ctlOpts[ctlTrafficClass].name > 0 && cm.TrafficClass > 0 { + tclass = true + l += socket.ControlMessageSpace(ctlOpts[ctlTrafficClass].length) + } + hoplimit := false + if ctlOpts[ctlHopLimit].name > 0 && cm.HopLimit > 0 { + hoplimit = true + l += socket.ControlMessageSpace(ctlOpts[ctlHopLimit].length) + } + pktinfo := false + if ctlOpts[ctlPacketInfo].name > 0 && (cm.Src.To16() != nil && cm.Src.To4() == nil || cm.IfIndex > 0) { + pktinfo = true + l += socket.ControlMessageSpace(ctlOpts[ctlPacketInfo].length) + } + nexthop := false + if ctlOpts[ctlNextHop].name > 0 && cm.NextHop.To16() != nil && cm.NextHop.To4() == nil { + nexthop = true + l += socket.ControlMessageSpace(ctlOpts[ctlNextHop].length) + } + var b []byte + if l > 0 { + b = make([]byte, l) + bb := b + if tclass { + bb = ctlOpts[ctlTrafficClass].marshal(bb, cm) + } + if hoplimit { + bb = ctlOpts[ctlHopLimit].marshal(bb, cm) + } + if pktinfo { + bb = ctlOpts[ctlPacketInfo].marshal(bb, cm) + } + if nexthop { + bb = ctlOpts[ctlNextHop].marshal(bb, cm) + } + } + return b +} + +// Parse parses b as a control message and stores the result in cm. +func (cm *ControlMessage) Parse(b []byte) error { + ms, err := socket.ControlMessage(b).Parse() + if err != nil { + return err + } + for _, m := range ms { + lvl, typ, l, err := m.ParseHeader() + if err != nil { + return err + } + if lvl != iana.ProtocolIPv6 { + continue + } + switch { + case typ == ctlOpts[ctlTrafficClass].name && l >= ctlOpts[ctlTrafficClass].length: + ctlOpts[ctlTrafficClass].parse(cm, m.Data(l)) + case typ == ctlOpts[ctlHopLimit].name && l >= ctlOpts[ctlHopLimit].length: + ctlOpts[ctlHopLimit].parse(cm, m.Data(l)) + case typ == ctlOpts[ctlPacketInfo].name && l >= ctlOpts[ctlPacketInfo].length: + ctlOpts[ctlPacketInfo].parse(cm, m.Data(l)) + case typ == ctlOpts[ctlPathMTU].name && l >= ctlOpts[ctlPathMTU].length: + ctlOpts[ctlPathMTU].parse(cm, m.Data(l)) + } + } + return nil +} + +// NewControlMessage returns a new control message. +// +// The returned message is large enough for options specified by cf. +func NewControlMessage(cf ControlFlags) []byte { + opt := rawOpt{cflags: cf} + var l int + if opt.isset(FlagTrafficClass) && ctlOpts[ctlTrafficClass].name > 0 { + l += socket.ControlMessageSpace(ctlOpts[ctlTrafficClass].length) + } + if opt.isset(FlagHopLimit) && ctlOpts[ctlHopLimit].name > 0 { + l += socket.ControlMessageSpace(ctlOpts[ctlHopLimit].length) + } + if opt.isset(flagPacketInfo) && ctlOpts[ctlPacketInfo].name > 0 { + l += socket.ControlMessageSpace(ctlOpts[ctlPacketInfo].length) + } + if opt.isset(FlagPathMTU) && ctlOpts[ctlPathMTU].name > 0 { + l += socket.ControlMessageSpace(ctlOpts[ctlPathMTU].length) + } + var b []byte + if l > 0 { + b = make([]byte, l) + } + return b +} + +// Ancillary data socket options +const ( + ctlTrafficClass = iota // header field + ctlHopLimit // header field + ctlPacketInfo // inbound or outbound packet path + ctlNextHop // nexthop + ctlPathMTU // path mtu + ctlMax +) + +// A ctlOpt represents a binding for ancillary data socket option. +type ctlOpt struct { + name int // option name, must be equal or greater than 1 + length int // option length + marshal func([]byte, *ControlMessage) []byte + parse func(*ControlMessage, []byte) +} diff --git a/vendor/golang.org/x/net/ipv6/control_rfc2292_unix.go b/vendor/golang.org/x/net/ipv6/control_rfc2292_unix.go new file mode 100644 index 00000000..9fd9eb15 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/control_rfc2292_unix.go @@ -0,0 +1,48 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin + +package ipv6 + +import ( + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +func marshal2292HopLimit(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIPv6, sysIPV6_2292HOPLIMIT, 4) + if cm != nil { + socket.NativeEndian.PutUint32(m.Data(4), uint32(cm.HopLimit)) + } + return m.Next(4) +} + +func marshal2292PacketInfo(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIPv6, sysIPV6_2292PKTINFO, sizeofInet6Pktinfo) + if cm != nil { + pi := (*inet6Pktinfo)(unsafe.Pointer(&m.Data(sizeofInet6Pktinfo)[0])) + if ip := cm.Src.To16(); ip != nil && ip.To4() == nil { + copy(pi.Addr[:], ip) + } + if cm.IfIndex > 0 { + pi.setIfindex(cm.IfIndex) + } + } + return m.Next(sizeofInet6Pktinfo) +} + +func marshal2292NextHop(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIPv6, sysIPV6_2292NEXTHOP, sizeofSockaddrInet6) + if cm != nil { + sa := (*sockaddrInet6)(unsafe.Pointer(&m.Data(sizeofSockaddrInet6)[0])) + sa.setSockaddr(cm.NextHop, cm.IfIndex) + } + return m.Next(sizeofSockaddrInet6) +} diff --git a/vendor/golang.org/x/net/ipv6/control_rfc3542_unix.go b/vendor/golang.org/x/net/ipv6/control_rfc3542_unix.go new file mode 100644 index 00000000..8c221b59 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/control_rfc3542_unix.go @@ -0,0 +1,94 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris + +package ipv6 + +import ( + "net" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +func marshalTrafficClass(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIPv6, sysIPV6_TCLASS, 4) + if cm != nil { + socket.NativeEndian.PutUint32(m.Data(4), uint32(cm.TrafficClass)) + } + return m.Next(4) +} + +func parseTrafficClass(cm *ControlMessage, b []byte) { + cm.TrafficClass = int(socket.NativeEndian.Uint32(b[:4])) +} + +func marshalHopLimit(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIPv6, sysIPV6_HOPLIMIT, 4) + if cm != nil { + socket.NativeEndian.PutUint32(m.Data(4), uint32(cm.HopLimit)) + } + return m.Next(4) +} + +func parseHopLimit(cm *ControlMessage, b []byte) { + cm.HopLimit = int(socket.NativeEndian.Uint32(b[:4])) +} + +func marshalPacketInfo(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIPv6, sysIPV6_PKTINFO, sizeofInet6Pktinfo) + if cm != nil { + pi := (*inet6Pktinfo)(unsafe.Pointer(&m.Data(sizeofInet6Pktinfo)[0])) + if ip := cm.Src.To16(); ip != nil && ip.To4() == nil { + copy(pi.Addr[:], ip) + } + if cm.IfIndex > 0 { + pi.setIfindex(cm.IfIndex) + } + } + return m.Next(sizeofInet6Pktinfo) +} + +func parsePacketInfo(cm *ControlMessage, b []byte) { + pi := (*inet6Pktinfo)(unsafe.Pointer(&b[0])) + if len(cm.Dst) < net.IPv6len { + cm.Dst = make(net.IP, net.IPv6len) + } + copy(cm.Dst, pi.Addr[:]) + cm.IfIndex = int(pi.Ifindex) +} + +func marshalNextHop(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIPv6, sysIPV6_NEXTHOP, sizeofSockaddrInet6) + if cm != nil { + sa := (*sockaddrInet6)(unsafe.Pointer(&m.Data(sizeofSockaddrInet6)[0])) + sa.setSockaddr(cm.NextHop, cm.IfIndex) + } + return m.Next(sizeofSockaddrInet6) +} + +func parseNextHop(cm *ControlMessage, b []byte) { +} + +func marshalPathMTU(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIPv6, sysIPV6_PATHMTU, sizeofIPv6Mtuinfo) + return m.Next(sizeofIPv6Mtuinfo) +} + +func parsePathMTU(cm *ControlMessage, b []byte) { + mi := (*ipv6Mtuinfo)(unsafe.Pointer(&b[0])) + if len(cm.Dst) < net.IPv6len { + cm.Dst = make(net.IP, net.IPv6len) + } + copy(cm.Dst, mi.Addr.Addr[:]) + cm.IfIndex = int(mi.Addr.Scope_id) + cm.MTU = int(mi.Mtu) +} diff --git a/vendor/golang.org/x/net/ipv6/control_stub.go b/vendor/golang.org/x/net/ipv6/control_stub.go new file mode 100644 index 00000000..1d773cbc --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/control_stub.go @@ -0,0 +1,13 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows + +package ipv6 + +import "golang.org/x/net/internal/socket" + +func setControlMessage(c *socket.Conn, opt *rawOpt, cf ControlFlags, on bool) error { + return errNotImplemented +} diff --git a/vendor/golang.org/x/net/ipv6/control_unix.go b/vendor/golang.org/x/net/ipv6/control_unix.go new file mode 100644 index 00000000..0971a008 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/control_unix.go @@ -0,0 +1,55 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris + +package ipv6 + +import "golang.org/x/net/internal/socket" + +func setControlMessage(c *socket.Conn, opt *rawOpt, cf ControlFlags, on bool) error { + opt.Lock() + defer opt.Unlock() + if so, ok := sockOpts[ssoReceiveTrafficClass]; ok && cf&FlagTrafficClass != 0 { + if err := so.SetInt(c, boolint(on)); err != nil { + return err + } + if on { + opt.set(FlagTrafficClass) + } else { + opt.clear(FlagTrafficClass) + } + } + if so, ok := sockOpts[ssoReceiveHopLimit]; ok && cf&FlagHopLimit != 0 { + if err := so.SetInt(c, boolint(on)); err != nil { + return err + } + if on { + opt.set(FlagHopLimit) + } else { + opt.clear(FlagHopLimit) + } + } + if so, ok := sockOpts[ssoReceivePacketInfo]; ok && cf&flagPacketInfo != 0 { + if err := so.SetInt(c, boolint(on)); err != nil { + return err + } + if on { + opt.set(cf & flagPacketInfo) + } else { + opt.clear(cf & flagPacketInfo) + } + } + if so, ok := sockOpts[ssoReceivePathMTU]; ok && cf&FlagPathMTU != 0 { + if err := so.SetInt(c, boolint(on)); err != nil { + return err + } + if on { + opt.set(FlagPathMTU) + } else { + opt.clear(FlagPathMTU) + } + } + return nil +} diff --git a/vendor/golang.org/x/net/ipv6/control_windows.go b/vendor/golang.org/x/net/ipv6/control_windows.go new file mode 100644 index 00000000..8882d819 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/control_windows.go @@ -0,0 +1,12 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import "golang.org/x/net/internal/socket" + +func setControlMessage(c *socket.Conn, opt *rawOpt, cf ControlFlags, on bool) error { + // TODO(mikio): implement this + return errNotImplemented +} diff --git a/vendor/golang.org/x/net/ipv6/dgramopt.go b/vendor/golang.org/x/net/ipv6/dgramopt.go new file mode 100644 index 00000000..1f422e71 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/dgramopt.go @@ -0,0 +1,301 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "net" + + "golang.org/x/net/bpf" +) + +// MulticastHopLimit returns the hop limit field value for outgoing +// multicast packets. +func (c *dgramOpt) MulticastHopLimit() (int, error) { + if !c.ok() { + return 0, errInvalidConn + } + so, ok := sockOpts[ssoMulticastHopLimit] + if !ok { + return 0, errNotImplemented + } + return so.GetInt(c.Conn) +} + +// SetMulticastHopLimit sets the hop limit field value for future +// outgoing multicast packets. +func (c *dgramOpt) SetMulticastHopLimit(hoplim int) error { + if !c.ok() { + return errInvalidConn + } + so, ok := sockOpts[ssoMulticastHopLimit] + if !ok { + return errNotImplemented + } + return so.SetInt(c.Conn, hoplim) +} + +// MulticastInterface returns the default interface for multicast +// packet transmissions. +func (c *dgramOpt) MulticastInterface() (*net.Interface, error) { + if !c.ok() { + return nil, errInvalidConn + } + so, ok := sockOpts[ssoMulticastInterface] + if !ok { + return nil, errNotImplemented + } + return so.getMulticastInterface(c.Conn) +} + +// SetMulticastInterface sets the default interface for future +// multicast packet transmissions. +func (c *dgramOpt) SetMulticastInterface(ifi *net.Interface) error { + if !c.ok() { + return errInvalidConn + } + so, ok := sockOpts[ssoMulticastInterface] + if !ok { + return errNotImplemented + } + return so.setMulticastInterface(c.Conn, ifi) +} + +// MulticastLoopback reports whether transmitted multicast packets +// should be copied and send back to the originator. +func (c *dgramOpt) MulticastLoopback() (bool, error) { + if !c.ok() { + return false, errInvalidConn + } + so, ok := sockOpts[ssoMulticastLoopback] + if !ok { + return false, errNotImplemented + } + on, err := so.GetInt(c.Conn) + if err != nil { + return false, err + } + return on == 1, nil +} + +// SetMulticastLoopback sets whether transmitted multicast packets +// should be copied and send back to the originator. +func (c *dgramOpt) SetMulticastLoopback(on bool) error { + if !c.ok() { + return errInvalidConn + } + so, ok := sockOpts[ssoMulticastLoopback] + if !ok { + return errNotImplemented + } + return so.SetInt(c.Conn, boolint(on)) +} + +// JoinGroup joins the group address group on the interface ifi. +// By default all sources that can cast data to group are accepted. +// It's possible to mute and unmute data transmission from a specific +// source by using ExcludeSourceSpecificGroup and +// IncludeSourceSpecificGroup. +// JoinGroup uses the system assigned multicast interface when ifi is +// nil, although this is not recommended because the assignment +// depends on platforms and sometimes it might require routing +// configuration. +func (c *dgramOpt) JoinGroup(ifi *net.Interface, group net.Addr) error { + if !c.ok() { + return errInvalidConn + } + so, ok := sockOpts[ssoJoinGroup] + if !ok { + return errNotImplemented + } + grp := netAddrToIP16(group) + if grp == nil { + return errMissingAddress + } + return so.setGroup(c.Conn, ifi, grp) +} + +// LeaveGroup leaves the group address group on the interface ifi +// regardless of whether the group is any-source group or +// source-specific group. +func (c *dgramOpt) LeaveGroup(ifi *net.Interface, group net.Addr) error { + if !c.ok() { + return errInvalidConn + } + so, ok := sockOpts[ssoLeaveGroup] + if !ok { + return errNotImplemented + } + grp := netAddrToIP16(group) + if grp == nil { + return errMissingAddress + } + return so.setGroup(c.Conn, ifi, grp) +} + +// JoinSourceSpecificGroup joins the source-specific group comprising +// group and source on the interface ifi. +// JoinSourceSpecificGroup uses the system assigned multicast +// interface when ifi is nil, although this is not recommended because +// the assignment depends on platforms and sometimes it might require +// routing configuration. +func (c *dgramOpt) JoinSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { + if !c.ok() { + return errInvalidConn + } + so, ok := sockOpts[ssoJoinSourceGroup] + if !ok { + return errNotImplemented + } + grp := netAddrToIP16(group) + if grp == nil { + return errMissingAddress + } + src := netAddrToIP16(source) + if src == nil { + return errMissingAddress + } + return so.setSourceGroup(c.Conn, ifi, grp, src) +} + +// LeaveSourceSpecificGroup leaves the source-specific group on the +// interface ifi. +func (c *dgramOpt) LeaveSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { + if !c.ok() { + return errInvalidConn + } + so, ok := sockOpts[ssoLeaveSourceGroup] + if !ok { + return errNotImplemented + } + grp := netAddrToIP16(group) + if grp == nil { + return errMissingAddress + } + src := netAddrToIP16(source) + if src == nil { + return errMissingAddress + } + return so.setSourceGroup(c.Conn, ifi, grp, src) +} + +// ExcludeSourceSpecificGroup excludes the source-specific group from +// the already joined any-source groups by JoinGroup on the interface +// ifi. +func (c *dgramOpt) ExcludeSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { + if !c.ok() { + return errInvalidConn + } + so, ok := sockOpts[ssoBlockSourceGroup] + if !ok { + return errNotImplemented + } + grp := netAddrToIP16(group) + if grp == nil { + return errMissingAddress + } + src := netAddrToIP16(source) + if src == nil { + return errMissingAddress + } + return so.setSourceGroup(c.Conn, ifi, grp, src) +} + +// IncludeSourceSpecificGroup includes the excluded source-specific +// group by ExcludeSourceSpecificGroup again on the interface ifi. +func (c *dgramOpt) IncludeSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { + if !c.ok() { + return errInvalidConn + } + so, ok := sockOpts[ssoUnblockSourceGroup] + if !ok { + return errNotImplemented + } + grp := netAddrToIP16(group) + if grp == nil { + return errMissingAddress + } + src := netAddrToIP16(source) + if src == nil { + return errMissingAddress + } + return so.setSourceGroup(c.Conn, ifi, grp, src) +} + +// Checksum reports whether the kernel will compute, store or verify a +// checksum for both incoming and outgoing packets. If on is true, it +// returns an offset in bytes into the data of where the checksum +// field is located. +func (c *dgramOpt) Checksum() (on bool, offset int, err error) { + if !c.ok() { + return false, 0, errInvalidConn + } + so, ok := sockOpts[ssoChecksum] + if !ok { + return false, 0, errNotImplemented + } + offset, err = so.GetInt(c.Conn) + if err != nil { + return false, 0, err + } + if offset < 0 { + return false, 0, nil + } + return true, offset, nil +} + +// SetChecksum enables the kernel checksum processing. If on is ture, +// the offset should be an offset in bytes into the data of where the +// checksum field is located. +func (c *dgramOpt) SetChecksum(on bool, offset int) error { + if !c.ok() { + return errInvalidConn + } + so, ok := sockOpts[ssoChecksum] + if !ok { + return errNotImplemented + } + if !on { + offset = -1 + } + return so.SetInt(c.Conn, offset) +} + +// ICMPFilter returns an ICMP filter. +func (c *dgramOpt) ICMPFilter() (*ICMPFilter, error) { + if !c.ok() { + return nil, errInvalidConn + } + so, ok := sockOpts[ssoICMPFilter] + if !ok { + return nil, errNotImplemented + } + return so.getICMPFilter(c.Conn) +} + +// SetICMPFilter deploys the ICMP filter. +func (c *dgramOpt) SetICMPFilter(f *ICMPFilter) error { + if !c.ok() { + return errInvalidConn + } + so, ok := sockOpts[ssoICMPFilter] + if !ok { + return errNotImplemented + } + return so.setICMPFilter(c.Conn, f) +} + +// SetBPF attaches a BPF program to the connection. +// +// Only supported on Linux. +func (c *dgramOpt) SetBPF(filter []bpf.RawInstruction) error { + if !c.ok() { + return errInvalidConn + } + so, ok := sockOpts[ssoAttachFilter] + if !ok { + return errNotImplemented + } + return so.setBPF(c.Conn, filter) +} diff --git a/vendor/golang.org/x/net/ipv6/doc.go b/vendor/golang.org/x/net/ipv6/doc.go new file mode 100644 index 00000000..e0be9d50 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/doc.go @@ -0,0 +1,243 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ipv6 implements IP-level socket options for the Internet +// Protocol version 6. +// +// The package provides IP-level socket options that allow +// manipulation of IPv6 facilities. +// +// The IPv6 protocol is defined in RFC 8200. +// Socket interface extensions are defined in RFC 3493, RFC 3542 and +// RFC 3678. +// MLDv1 and MLDv2 are defined in RFC 2710 and RFC 3810. +// Source-specific multicast is defined in RFC 4607. +// +// On Darwin, this package requires OS X Mavericks version 10.9 or +// above, or equivalent. +// +// +// Unicasting +// +// The options for unicasting are available for net.TCPConn, +// net.UDPConn and net.IPConn which are created as network connections +// that use the IPv6 transport. When a single TCP connection carrying +// a data flow of multiple packets needs to indicate the flow is +// important, Conn is used to set the traffic class field on the IPv6 +// header for each packet. +// +// ln, err := net.Listen("tcp6", "[::]:1024") +// if err != nil { +// // error handling +// } +// defer ln.Close() +// for { +// c, err := ln.Accept() +// if err != nil { +// // error handling +// } +// go func(c net.Conn) { +// defer c.Close() +// +// The outgoing packets will be labeled DiffServ assured forwarding +// class 1 low drop precedence, known as AF11 packets. +// +// if err := ipv6.NewConn(c).SetTrafficClass(0x28); err != nil { +// // error handling +// } +// if _, err := c.Write(data); err != nil { +// // error handling +// } +// }(c) +// } +// +// +// Multicasting +// +// The options for multicasting are available for net.UDPConn and +// net.IPConn which are created as network connections that use the +// IPv6 transport. A few network facilities must be prepared before +// you begin multicasting, at a minimum joining network interfaces and +// multicast groups. +// +// en0, err := net.InterfaceByName("en0") +// if err != nil { +// // error handling +// } +// en1, err := net.InterfaceByIndex(911) +// if err != nil { +// // error handling +// } +// group := net.ParseIP("ff02::114") +// +// First, an application listens to an appropriate address with an +// appropriate service port. +// +// c, err := net.ListenPacket("udp6", "[::]:1024") +// if err != nil { +// // error handling +// } +// defer c.Close() +// +// Second, the application joins multicast groups, starts listening to +// the groups on the specified network interfaces. Note that the +// service port for transport layer protocol does not matter with this +// operation as joining groups affects only network and link layer +// protocols, such as IPv6 and Ethernet. +// +// p := ipv6.NewPacketConn(c) +// if err := p.JoinGroup(en0, &net.UDPAddr{IP: group}); err != nil { +// // error handling +// } +// if err := p.JoinGroup(en1, &net.UDPAddr{IP: group}); err != nil { +// // error handling +// } +// +// The application might set per packet control message transmissions +// between the protocol stack within the kernel. When the application +// needs a destination address on an incoming packet, +// SetControlMessage of PacketConn is used to enable control message +// transmissions. +// +// if err := p.SetControlMessage(ipv6.FlagDst, true); err != nil { +// // error handling +// } +// +// The application could identify whether the received packets are +// of interest by using the control message that contains the +// destination address of the received packet. +// +// b := make([]byte, 1500) +// for { +// n, rcm, src, err := p.ReadFrom(b) +// if err != nil { +// // error handling +// } +// if rcm.Dst.IsMulticast() { +// if rcm.Dst.Equal(group) { +// // joined group, do something +// } else { +// // unknown group, discard +// continue +// } +// } +// +// The application can also send both unicast and multicast packets. +// +// p.SetTrafficClass(0x0) +// p.SetHopLimit(16) +// if _, err := p.WriteTo(data[:n], nil, src); err != nil { +// // error handling +// } +// dst := &net.UDPAddr{IP: group, Port: 1024} +// wcm := ipv6.ControlMessage{TrafficClass: 0xe0, HopLimit: 1} +// for _, ifi := range []*net.Interface{en0, en1} { +// wcm.IfIndex = ifi.Index +// if _, err := p.WriteTo(data[:n], &wcm, dst); err != nil { +// // error handling +// } +// } +// } +// +// +// More multicasting +// +// An application that uses PacketConn may join multiple multicast +// groups. For example, a UDP listener with port 1024 might join two +// different groups across over two different network interfaces by +// using: +// +// c, err := net.ListenPacket("udp6", "[::]:1024") +// if err != nil { +// // error handling +// } +// defer c.Close() +// p := ipv6.NewPacketConn(c) +// if err := p.JoinGroup(en0, &net.UDPAddr{IP: net.ParseIP("ff02::1:114")}); err != nil { +// // error handling +// } +// if err := p.JoinGroup(en0, &net.UDPAddr{IP: net.ParseIP("ff02::2:114")}); err != nil { +// // error handling +// } +// if err := p.JoinGroup(en1, &net.UDPAddr{IP: net.ParseIP("ff02::2:114")}); err != nil { +// // error handling +// } +// +// It is possible for multiple UDP listeners that listen on the same +// UDP port to join the same multicast group. The net package will +// provide a socket that listens to a wildcard address with reusable +// UDP port when an appropriate multicast address prefix is passed to +// the net.ListenPacket or net.ListenUDP. +// +// c1, err := net.ListenPacket("udp6", "[ff02::]:1024") +// if err != nil { +// // error handling +// } +// defer c1.Close() +// c2, err := net.ListenPacket("udp6", "[ff02::]:1024") +// if err != nil { +// // error handling +// } +// defer c2.Close() +// p1 := ipv6.NewPacketConn(c1) +// if err := p1.JoinGroup(en0, &net.UDPAddr{IP: net.ParseIP("ff02::114")}); err != nil { +// // error handling +// } +// p2 := ipv6.NewPacketConn(c2) +// if err := p2.JoinGroup(en0, &net.UDPAddr{IP: net.ParseIP("ff02::114")}); err != nil { +// // error handling +// } +// +// Also it is possible for the application to leave or rejoin a +// multicast group on the network interface. +// +// if err := p.LeaveGroup(en0, &net.UDPAddr{IP: net.ParseIP("ff02::114")}); err != nil { +// // error handling +// } +// if err := p.JoinGroup(en0, &net.UDPAddr{IP: net.ParseIP("ff01::114")}); err != nil { +// // error handling +// } +// +// +// Source-specific multicasting +// +// An application that uses PacketConn on MLDv2 supported platform is +// able to join source-specific multicast groups. +// The application may use JoinSourceSpecificGroup and +// LeaveSourceSpecificGroup for the operation known as "include" mode, +// +// ssmgroup := net.UDPAddr{IP: net.ParseIP("ff32::8000:9")} +// ssmsource := net.UDPAddr{IP: net.ParseIP("fe80::cafe")} +// if err := p.JoinSourceSpecificGroup(en0, &ssmgroup, &ssmsource); err != nil { +// // error handling +// } +// if err := p.LeaveSourceSpecificGroup(en0, &ssmgroup, &ssmsource); err != nil { +// // error handling +// } +// +// or JoinGroup, ExcludeSourceSpecificGroup, +// IncludeSourceSpecificGroup and LeaveGroup for the operation known +// as "exclude" mode. +// +// exclsource := net.UDPAddr{IP: net.ParseIP("fe80::dead")} +// if err := p.JoinGroup(en0, &ssmgroup); err != nil { +// // error handling +// } +// if err := p.ExcludeSourceSpecificGroup(en0, &ssmgroup, &exclsource); err != nil { +// // error handling +// } +// if err := p.LeaveGroup(en0, &ssmgroup); err != nil { +// // error handling +// } +// +// Note that it depends on each platform implementation what happens +// when an application which runs on MLDv2 unsupported platform uses +// JoinSourceSpecificGroup and LeaveSourceSpecificGroup. +// In general the platform tries to fall back to conversations using +// MLDv1 and starts to listen to multicast traffic. +// In the fallback case, ExcludeSourceSpecificGroup and +// IncludeSourceSpecificGroup may return an error. +package ipv6 // import "golang.org/x/net/ipv6" + +// BUG(mikio): This package is not implemented on JS, NaCl and Plan 9. diff --git a/vendor/golang.org/x/net/ipv6/endpoint.go b/vendor/golang.org/x/net/ipv6/endpoint.go new file mode 100644 index 00000000..f534a0bf --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/endpoint.go @@ -0,0 +1,127 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "net" + "time" + + "golang.org/x/net/internal/socket" +) + +// BUG(mikio): On Windows, the JoinSourceSpecificGroup, +// LeaveSourceSpecificGroup, ExcludeSourceSpecificGroup and +// IncludeSourceSpecificGroup methods of PacketConn are not +// implemented. + +// A Conn represents a network endpoint that uses IPv6 transport. +// It allows to set basic IP-level socket options such as traffic +// class and hop limit. +type Conn struct { + genericOpt +} + +type genericOpt struct { + *socket.Conn +} + +func (c *genericOpt) ok() bool { return c != nil && c.Conn != nil } + +// PathMTU returns a path MTU value for the destination associated +// with the endpoint. +func (c *Conn) PathMTU() (int, error) { + if !c.ok() { + return 0, errInvalidConn + } + so, ok := sockOpts[ssoPathMTU] + if !ok { + return 0, errNotImplemented + } + _, mtu, err := so.getMTUInfo(c.Conn) + if err != nil { + return 0, err + } + return mtu, nil +} + +// NewConn returns a new Conn. +func NewConn(c net.Conn) *Conn { + cc, _ := socket.NewConn(c) + return &Conn{ + genericOpt: genericOpt{Conn: cc}, + } +} + +// A PacketConn represents a packet network endpoint that uses IPv6 +// transport. It is used to control several IP-level socket options +// including IPv6 header manipulation. It also provides datagram +// based network I/O methods specific to the IPv6 and higher layer +// protocols such as OSPF, GRE, and UDP. +type PacketConn struct { + genericOpt + dgramOpt + payloadHandler +} + +type dgramOpt struct { + *socket.Conn +} + +func (c *dgramOpt) ok() bool { return c != nil && c.Conn != nil } + +// SetControlMessage allows to receive the per packet basis IP-level +// socket options. +func (c *PacketConn) SetControlMessage(cf ControlFlags, on bool) error { + if !c.payloadHandler.ok() { + return errInvalidConn + } + return setControlMessage(c.dgramOpt.Conn, &c.payloadHandler.rawOpt, cf, on) +} + +// SetDeadline sets the read and write deadlines associated with the +// endpoint. +func (c *PacketConn) SetDeadline(t time.Time) error { + if !c.payloadHandler.ok() { + return errInvalidConn + } + return c.payloadHandler.SetDeadline(t) +} + +// SetReadDeadline sets the read deadline associated with the +// endpoint. +func (c *PacketConn) SetReadDeadline(t time.Time) error { + if !c.payloadHandler.ok() { + return errInvalidConn + } + return c.payloadHandler.SetReadDeadline(t) +} + +// SetWriteDeadline sets the write deadline associated with the +// endpoint. +func (c *PacketConn) SetWriteDeadline(t time.Time) error { + if !c.payloadHandler.ok() { + return errInvalidConn + } + return c.payloadHandler.SetWriteDeadline(t) +} + +// Close closes the endpoint. +func (c *PacketConn) Close() error { + if !c.payloadHandler.ok() { + return errInvalidConn + } + return c.payloadHandler.Close() +} + +// NewPacketConn returns a new PacketConn using c as its underlying +// transport. +func NewPacketConn(c net.PacketConn) *PacketConn { + cc, _ := socket.NewConn(c.(net.Conn)) + return &PacketConn{ + genericOpt: genericOpt{Conn: cc}, + dgramOpt: dgramOpt{Conn: cc}, + payloadHandler: payloadHandler{PacketConn: c, Conn: cc}, + } +} diff --git a/vendor/golang.org/x/net/ipv6/genericopt.go b/vendor/golang.org/x/net/ipv6/genericopt.go new file mode 100644 index 00000000..0326aed6 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/genericopt.go @@ -0,0 +1,56 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +// TrafficClass returns the traffic class field value for outgoing +// packets. +func (c *genericOpt) TrafficClass() (int, error) { + if !c.ok() { + return 0, errInvalidConn + } + so, ok := sockOpts[ssoTrafficClass] + if !ok { + return 0, errNotImplemented + } + return so.GetInt(c.Conn) +} + +// SetTrafficClass sets the traffic class field value for future +// outgoing packets. +func (c *genericOpt) SetTrafficClass(tclass int) error { + if !c.ok() { + return errInvalidConn + } + so, ok := sockOpts[ssoTrafficClass] + if !ok { + return errNotImplemented + } + return so.SetInt(c.Conn, tclass) +} + +// HopLimit returns the hop limit field value for outgoing packets. +func (c *genericOpt) HopLimit() (int, error) { + if !c.ok() { + return 0, errInvalidConn + } + so, ok := sockOpts[ssoHopLimit] + if !ok { + return 0, errNotImplemented + } + return so.GetInt(c.Conn) +} + +// SetHopLimit sets the hop limit field value for future outgoing +// packets. +func (c *genericOpt) SetHopLimit(hoplim int) error { + if !c.ok() { + return errInvalidConn + } + so, ok := sockOpts[ssoHopLimit] + if !ok { + return errNotImplemented + } + return so.SetInt(c.Conn, hoplim) +} diff --git a/vendor/golang.org/x/net/ipv6/header.go b/vendor/golang.org/x/net/ipv6/header.go new file mode 100644 index 00000000..e05cb08b --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/header.go @@ -0,0 +1,55 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "encoding/binary" + "fmt" + "net" +) + +const ( + Version = 6 // protocol version + HeaderLen = 40 // header length +) + +// A Header represents an IPv6 base header. +type Header struct { + Version int // protocol version + TrafficClass int // traffic class + FlowLabel int // flow label + PayloadLen int // payload length + NextHeader int // next header + HopLimit int // hop limit + Src net.IP // source address + Dst net.IP // destination address +} + +func (h *Header) String() string { + if h == nil { + return "" + } + return fmt.Sprintf("ver=%d tclass=%#x flowlbl=%#x payloadlen=%d nxthdr=%d hoplim=%d src=%v dst=%v", h.Version, h.TrafficClass, h.FlowLabel, h.PayloadLen, h.NextHeader, h.HopLimit, h.Src, h.Dst) +} + +// ParseHeader parses b as an IPv6 base header. +func ParseHeader(b []byte) (*Header, error) { + if len(b) < HeaderLen { + return nil, errHeaderTooShort + } + h := &Header{ + Version: int(b[0]) >> 4, + TrafficClass: int(b[0]&0x0f)<<4 | int(b[1])>>4, + FlowLabel: int(b[1]&0x0f)<<16 | int(b[2])<<8 | int(b[3]), + PayloadLen: int(binary.BigEndian.Uint16(b[4:6])), + NextHeader: int(b[6]), + HopLimit: int(b[7]), + } + h.Src = make(net.IP, net.IPv6len) + copy(h.Src, b[8:24]) + h.Dst = make(net.IP, net.IPv6len) + copy(h.Dst, b[24:40]) + return h, nil +} diff --git a/vendor/golang.org/x/net/ipv6/helper.go b/vendor/golang.org/x/net/ipv6/helper.go new file mode 100644 index 00000000..f767b1f5 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/helper.go @@ -0,0 +1,59 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "errors" + "net" + "runtime" +) + +var ( + errInvalidConn = errors.New("invalid connection") + errMissingAddress = errors.New("missing address") + errHeaderTooShort = errors.New("header too short") + errInvalidConnType = errors.New("invalid conn type") + errNoSuchInterface = errors.New("no such interface") + errNotImplemented = errors.New("not implemented on " + runtime.GOOS + "/" + runtime.GOARCH) +) + +func boolint(b bool) int { + if b { + return 1 + } + return 0 +} + +func netAddrToIP16(a net.Addr) net.IP { + switch v := a.(type) { + case *net.UDPAddr: + if ip := v.IP.To16(); ip != nil && ip.To4() == nil { + return ip + } + case *net.IPAddr: + if ip := v.IP.To16(); ip != nil && ip.To4() == nil { + return ip + } + } + return nil +} + +func opAddr(a net.Addr) net.Addr { + switch a.(type) { + case *net.TCPAddr: + if a == nil { + return nil + } + case *net.UDPAddr: + if a == nil { + return nil + } + case *net.IPAddr: + if a == nil { + return nil + } + } + return a +} diff --git a/vendor/golang.org/x/net/ipv6/iana.go b/vendor/golang.org/x/net/ipv6/iana.go new file mode 100644 index 00000000..32db1aa9 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/iana.go @@ -0,0 +1,86 @@ +// go generate gen.go +// Code generated by the command above; DO NOT EDIT. + +package ipv6 + +// Internet Control Message Protocol version 6 (ICMPv6) Parameters, Updated: 2018-03-09 +const ( + ICMPTypeDestinationUnreachable ICMPType = 1 // Destination Unreachable + ICMPTypePacketTooBig ICMPType = 2 // Packet Too Big + ICMPTypeTimeExceeded ICMPType = 3 // Time Exceeded + ICMPTypeParameterProblem ICMPType = 4 // Parameter Problem + ICMPTypeEchoRequest ICMPType = 128 // Echo Request + ICMPTypeEchoReply ICMPType = 129 // Echo Reply + ICMPTypeMulticastListenerQuery ICMPType = 130 // Multicast Listener Query + ICMPTypeMulticastListenerReport ICMPType = 131 // Multicast Listener Report + ICMPTypeMulticastListenerDone ICMPType = 132 // Multicast Listener Done + ICMPTypeRouterSolicitation ICMPType = 133 // Router Solicitation + ICMPTypeRouterAdvertisement ICMPType = 134 // Router Advertisement + ICMPTypeNeighborSolicitation ICMPType = 135 // Neighbor Solicitation + ICMPTypeNeighborAdvertisement ICMPType = 136 // Neighbor Advertisement + ICMPTypeRedirect ICMPType = 137 // Redirect Message + ICMPTypeRouterRenumbering ICMPType = 138 // Router Renumbering + ICMPTypeNodeInformationQuery ICMPType = 139 // ICMP Node Information Query + ICMPTypeNodeInformationResponse ICMPType = 140 // ICMP Node Information Response + ICMPTypeInverseNeighborDiscoverySolicitation ICMPType = 141 // Inverse Neighbor Discovery Solicitation Message + ICMPTypeInverseNeighborDiscoveryAdvertisement ICMPType = 142 // Inverse Neighbor Discovery Advertisement Message + ICMPTypeVersion2MulticastListenerReport ICMPType = 143 // Version 2 Multicast Listener Report + ICMPTypeHomeAgentAddressDiscoveryRequest ICMPType = 144 // Home Agent Address Discovery Request Message + ICMPTypeHomeAgentAddressDiscoveryReply ICMPType = 145 // Home Agent Address Discovery Reply Message + ICMPTypeMobilePrefixSolicitation ICMPType = 146 // Mobile Prefix Solicitation + ICMPTypeMobilePrefixAdvertisement ICMPType = 147 // Mobile Prefix Advertisement + ICMPTypeCertificationPathSolicitation ICMPType = 148 // Certification Path Solicitation Message + ICMPTypeCertificationPathAdvertisement ICMPType = 149 // Certification Path Advertisement Message + ICMPTypeMulticastRouterAdvertisement ICMPType = 151 // Multicast Router Advertisement + ICMPTypeMulticastRouterSolicitation ICMPType = 152 // Multicast Router Solicitation + ICMPTypeMulticastRouterTermination ICMPType = 153 // Multicast Router Termination + ICMPTypeFMIPv6 ICMPType = 154 // FMIPv6 Messages + ICMPTypeRPLControl ICMPType = 155 // RPL Control Message + ICMPTypeILNPv6LocatorUpdate ICMPType = 156 // ILNPv6 Locator Update Message + ICMPTypeDuplicateAddressRequest ICMPType = 157 // Duplicate Address Request + ICMPTypeDuplicateAddressConfirmation ICMPType = 158 // Duplicate Address Confirmation + ICMPTypeMPLControl ICMPType = 159 // MPL Control Message + ICMPTypeExtendedEchoRequest ICMPType = 160 // Extended Echo Request + ICMPTypeExtendedEchoReply ICMPType = 161 // Extended Echo Reply +) + +// Internet Control Message Protocol version 6 (ICMPv6) Parameters, Updated: 2018-03-09 +var icmpTypes = map[ICMPType]string{ + 1: "destination unreachable", + 2: "packet too big", + 3: "time exceeded", + 4: "parameter problem", + 128: "echo request", + 129: "echo reply", + 130: "multicast listener query", + 131: "multicast listener report", + 132: "multicast listener done", + 133: "router solicitation", + 134: "router advertisement", + 135: "neighbor solicitation", + 136: "neighbor advertisement", + 137: "redirect message", + 138: "router renumbering", + 139: "icmp node information query", + 140: "icmp node information response", + 141: "inverse neighbor discovery solicitation message", + 142: "inverse neighbor discovery advertisement message", + 143: "version 2 multicast listener report", + 144: "home agent address discovery request message", + 145: "home agent address discovery reply message", + 146: "mobile prefix solicitation", + 147: "mobile prefix advertisement", + 148: "certification path solicitation message", + 149: "certification path advertisement message", + 151: "multicast router advertisement", + 152: "multicast router solicitation", + 153: "multicast router termination", + 154: "fmipv6 messages", + 155: "rpl control message", + 156: "ilnpv6 locator update message", + 157: "duplicate address request", + 158: "duplicate address confirmation", + 159: "mpl control message", + 160: "extended echo request", + 161: "extended echo reply", +} diff --git a/vendor/golang.org/x/net/ipv6/icmp.go b/vendor/golang.org/x/net/ipv6/icmp.go new file mode 100644 index 00000000..b7f48e27 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/icmp.go @@ -0,0 +1,60 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import "golang.org/x/net/internal/iana" + +// BUG(mikio): On Windows, methods related to ICMPFilter are not +// implemented. + +// An ICMPType represents a type of ICMP message. +type ICMPType int + +func (typ ICMPType) String() string { + s, ok := icmpTypes[typ] + if !ok { + return "" + } + return s +} + +// Protocol returns the ICMPv6 protocol number. +func (typ ICMPType) Protocol() int { + return iana.ProtocolIPv6ICMP +} + +// An ICMPFilter represents an ICMP message filter for incoming +// packets. The filter belongs to a packet delivery path on a host and +// it cannot interact with forwarding packets or tunnel-outer packets. +// +// Note: RFC 8200 defines a reasonable role model. A node means a +// device that implements IP. A router means a node that forwards IP +// packets not explicitly addressed to itself, and a host means a node +// that is not a router. +type ICMPFilter struct { + icmpv6Filter +} + +// Accept accepts incoming ICMP packets including the type field value +// typ. +func (f *ICMPFilter) Accept(typ ICMPType) { + f.accept(typ) +} + +// Block blocks incoming ICMP packets including the type field value +// typ. +func (f *ICMPFilter) Block(typ ICMPType) { + f.block(typ) +} + +// SetAll sets the filter action to the filter. +func (f *ICMPFilter) SetAll(block bool) { + f.setAll(block) +} + +// WillBlock reports whether the ICMP type will be blocked. +func (f *ICMPFilter) WillBlock(typ ICMPType) bool { + return f.willBlock(typ) +} diff --git a/vendor/golang.org/x/net/ipv6/icmp_bsd.go b/vendor/golang.org/x/net/ipv6/icmp_bsd.go new file mode 100644 index 00000000..b03025cd --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/icmp_bsd.go @@ -0,0 +1,29 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix darwin dragonfly freebsd netbsd openbsd + +package ipv6 + +func (f *icmpv6Filter) accept(typ ICMPType) { + f.Filt[typ>>5] |= 1 << (uint32(typ) & 31) +} + +func (f *icmpv6Filter) block(typ ICMPType) { + f.Filt[typ>>5] &^= 1 << (uint32(typ) & 31) +} + +func (f *icmpv6Filter) setAll(block bool) { + for i := range f.Filt { + if block { + f.Filt[i] = 0 + } else { + f.Filt[i] = 1<<32 - 1 + } + } +} + +func (f *icmpv6Filter) willBlock(typ ICMPType) bool { + return f.Filt[typ>>5]&(1<<(uint32(typ)&31)) == 0 +} diff --git a/vendor/golang.org/x/net/ipv6/icmp_linux.go b/vendor/golang.org/x/net/ipv6/icmp_linux.go new file mode 100644 index 00000000..647f6b44 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/icmp_linux.go @@ -0,0 +1,27 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +func (f *icmpv6Filter) accept(typ ICMPType) { + f.Data[typ>>5] &^= 1 << (uint32(typ) & 31) +} + +func (f *icmpv6Filter) block(typ ICMPType) { + f.Data[typ>>5] |= 1 << (uint32(typ) & 31) +} + +func (f *icmpv6Filter) setAll(block bool) { + for i := range f.Data { + if block { + f.Data[i] = 1<<32 - 1 + } else { + f.Data[i] = 0 + } + } +} + +func (f *icmpv6Filter) willBlock(typ ICMPType) bool { + return f.Data[typ>>5]&(1<<(uint32(typ)&31)) != 0 +} diff --git a/vendor/golang.org/x/net/ipv6/icmp_solaris.go b/vendor/golang.org/x/net/ipv6/icmp_solaris.go new file mode 100644 index 00000000..7c23bb1c --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/icmp_solaris.go @@ -0,0 +1,27 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +func (f *icmpv6Filter) accept(typ ICMPType) { + f.X__icmp6_filt[typ>>5] |= 1 << (uint32(typ) & 31) +} + +func (f *icmpv6Filter) block(typ ICMPType) { + f.X__icmp6_filt[typ>>5] &^= 1 << (uint32(typ) & 31) +} + +func (f *icmpv6Filter) setAll(block bool) { + for i := range f.X__icmp6_filt { + if block { + f.X__icmp6_filt[i] = 0 + } else { + f.X__icmp6_filt[i] = 1<<32 - 1 + } + } +} + +func (f *icmpv6Filter) willBlock(typ ICMPType) bool { + return f.X__icmp6_filt[typ>>5]&(1<<(uint32(typ)&31)) == 0 +} diff --git a/vendor/golang.org/x/net/ipv6/icmp_stub.go b/vendor/golang.org/x/net/ipv6/icmp_stub.go new file mode 100644 index 00000000..370e51ac --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/icmp_stub.go @@ -0,0 +1,23 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows + +package ipv6 + +type icmpv6Filter struct { +} + +func (f *icmpv6Filter) accept(typ ICMPType) { +} + +func (f *icmpv6Filter) block(typ ICMPType) { +} + +func (f *icmpv6Filter) setAll(block bool) { +} + +func (f *icmpv6Filter) willBlock(typ ICMPType) bool { + return false +} diff --git a/vendor/golang.org/x/net/ipv6/icmp_windows.go b/vendor/golang.org/x/net/ipv6/icmp_windows.go new file mode 100644 index 00000000..443cd073 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/icmp_windows.go @@ -0,0 +1,22 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +func (f *icmpv6Filter) accept(typ ICMPType) { + // TODO(mikio): implement this +} + +func (f *icmpv6Filter) block(typ ICMPType) { + // TODO(mikio): implement this +} + +func (f *icmpv6Filter) setAll(block bool) { + // TODO(mikio): implement this +} + +func (f *icmpv6Filter) willBlock(typ ICMPType) bool { + // TODO(mikio): implement this + return false +} diff --git a/vendor/golang.org/x/net/ipv6/payload.go b/vendor/golang.org/x/net/ipv6/payload.go new file mode 100644 index 00000000..a8197f16 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/payload.go @@ -0,0 +1,23 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +// BUG(mikio): On Windows, the ControlMessage for ReadFrom and WriteTo +// methods of PacketConn is not implemented. + +// A payloadHandler represents the IPv6 datagram payload handler. +type payloadHandler struct { + net.PacketConn + *socket.Conn + rawOpt +} + +func (c *payloadHandler) ok() bool { return c != nil && c.PacketConn != nil && c.Conn != nil } diff --git a/vendor/golang.org/x/net/ipv6/payload_cmsg.go b/vendor/golang.org/x/net/ipv6/payload_cmsg.go new file mode 100644 index 00000000..284a0427 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/payload_cmsg.go @@ -0,0 +1,70 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris + +package ipv6 + +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +// ReadFrom reads a payload of the received IPv6 datagram, from the +// endpoint c, copying the payload into b. It returns the number of +// bytes copied into b, the control message cm and the source address +// src of the received datagram. +func (c *payloadHandler) ReadFrom(b []byte) (n int, cm *ControlMessage, src net.Addr, err error) { + if !c.ok() { + return 0, nil, nil, errInvalidConn + } + c.rawOpt.RLock() + m := socket.Message{ + Buffers: [][]byte{b}, + OOB: NewControlMessage(c.rawOpt.cflags), + } + c.rawOpt.RUnlock() + switch c.PacketConn.(type) { + case *net.UDPConn: + if err := c.RecvMsg(&m, 0); err != nil { + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + case *net.IPConn: + if err := c.RecvMsg(&m, 0); err != nil { + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + default: + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: errInvalidConnType} + } + if m.NN > 0 { + cm = new(ControlMessage) + if err := cm.Parse(m.OOB[:m.NN]); err != nil { + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + cm.Src = netAddrToIP16(m.Addr) + } + return m.N, cm, m.Addr, nil +} + +// WriteTo writes a payload of the IPv6 datagram, to the destination +// address dst through the endpoint c, copying the payload from b. It +// returns the number of bytes written. The control message cm allows +// the IPv6 header fields and the datagram path to be specified. The +// cm may be nil if control of the outgoing datagram is not required. +func (c *payloadHandler) WriteTo(b []byte, cm *ControlMessage, dst net.Addr) (n int, err error) { + if !c.ok() { + return 0, errInvalidConn + } + m := socket.Message{ + Buffers: [][]byte{b}, + OOB: cm.Marshal(), + Addr: dst, + } + err = c.SendMsg(&m, 0) + if err != nil { + err = &net.OpError{Op: "write", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Addr: opAddr(dst), Err: err} + } + return m.N, err +} diff --git a/vendor/golang.org/x/net/ipv6/payload_nocmsg.go b/vendor/golang.org/x/net/ipv6/payload_nocmsg.go new file mode 100644 index 00000000..c5a4c967 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/payload_nocmsg.go @@ -0,0 +1,38 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris + +package ipv6 + +import "net" + +// ReadFrom reads a payload of the received IPv6 datagram, from the +// endpoint c, copying the payload into b. It returns the number of +// bytes copied into b, the control message cm and the source address +// src of the received datagram. +func (c *payloadHandler) ReadFrom(b []byte) (n int, cm *ControlMessage, src net.Addr, err error) { + if !c.ok() { + return 0, nil, nil, errInvalidConn + } + if n, src, err = c.PacketConn.ReadFrom(b); err != nil { + return 0, nil, nil, err + } + return +} + +// WriteTo writes a payload of the IPv6 datagram, to the destination +// address dst through the endpoint c, copying the payload from b. It +// returns the number of bytes written. The control message cm allows +// the IPv6 header fields and the datagram path to be specified. The +// cm may be nil if control of the outgoing datagram is not required. +func (c *payloadHandler) WriteTo(b []byte, cm *ControlMessage, dst net.Addr) (n int, err error) { + if !c.ok() { + return 0, errInvalidConn + } + if dst == nil { + return 0, errMissingAddress + } + return c.PacketConn.WriteTo(b, dst) +} diff --git a/vendor/golang.org/x/net/ipv6/sockopt.go b/vendor/golang.org/x/net/ipv6/sockopt.go new file mode 100644 index 00000000..cc3907df --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sockopt.go @@ -0,0 +1,43 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import "golang.org/x/net/internal/socket" + +// Sticky socket options +const ( + ssoTrafficClass = iota // header field for unicast packet, RFC 3542 + ssoHopLimit // header field for unicast packet, RFC 3493 + ssoMulticastInterface // outbound interface for multicast packet, RFC 3493 + ssoMulticastHopLimit // header field for multicast packet, RFC 3493 + ssoMulticastLoopback // loopback for multicast packet, RFC 3493 + ssoReceiveTrafficClass // header field on received packet, RFC 3542 + ssoReceiveHopLimit // header field on received packet, RFC 2292 or 3542 + ssoReceivePacketInfo // incbound or outbound packet path, RFC 2292 or 3542 + ssoReceivePathMTU // path mtu, RFC 3542 + ssoPathMTU // path mtu, RFC 3542 + ssoChecksum // packet checksum, RFC 2292 or 3542 + ssoICMPFilter // icmp filter, RFC 2292 or 3542 + ssoJoinGroup // any-source multicast, RFC 3493 + ssoLeaveGroup // any-source multicast, RFC 3493 + ssoJoinSourceGroup // source-specific multicast + ssoLeaveSourceGroup // source-specific multicast + ssoBlockSourceGroup // any-source or source-specific multicast + ssoUnblockSourceGroup // any-source or source-specific multicast + ssoAttachFilter // attach BPF for filtering inbound traffic +) + +// Sticky socket option value types +const ( + ssoTypeIPMreq = iota + 1 + ssoTypeGroupReq + ssoTypeGroupSourceReq +) + +// A sockOpt represents a binding for sticky socket option. +type sockOpt struct { + socket.Option + typ int // hint for option value type; optional +} diff --git a/vendor/golang.org/x/net/ipv6/sockopt_posix.go b/vendor/golang.org/x/net/ipv6/sockopt_posix.go new file mode 100644 index 00000000..824c623c --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sockopt_posix.go @@ -0,0 +1,89 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris windows + +package ipv6 + +import ( + "net" + "runtime" + "unsafe" + + "golang.org/x/net/bpf" + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) getMulticastInterface(c *socket.Conn) (*net.Interface, error) { + n, err := so.GetInt(c) + if err != nil { + return nil, err + } + return net.InterfaceByIndex(n) +} + +func (so *sockOpt) setMulticastInterface(c *socket.Conn, ifi *net.Interface) error { + var n int + if ifi != nil { + n = ifi.Index + } + return so.SetInt(c, n) +} + +func (so *sockOpt) getICMPFilter(c *socket.Conn) (*ICMPFilter, error) { + b := make([]byte, so.Len) + n, err := so.Get(c, b) + if err != nil { + return nil, err + } + if n != sizeofICMPv6Filter { + return nil, errNotImplemented + } + return (*ICMPFilter)(unsafe.Pointer(&b[0])), nil +} + +func (so *sockOpt) setICMPFilter(c *socket.Conn, f *ICMPFilter) error { + b := (*[sizeofICMPv6Filter]byte)(unsafe.Pointer(f))[:sizeofICMPv6Filter] + return so.Set(c, b) +} + +func (so *sockOpt) getMTUInfo(c *socket.Conn) (*net.Interface, int, error) { + b := make([]byte, so.Len) + n, err := so.Get(c, b) + if err != nil { + return nil, 0, err + } + if n != sizeofIPv6Mtuinfo { + return nil, 0, errNotImplemented + } + mi := (*ipv6Mtuinfo)(unsafe.Pointer(&b[0])) + if mi.Addr.Scope_id == 0 || runtime.GOOS == "aix" { + // AIX kernel might return a wrong address. + return nil, int(mi.Mtu), nil + } + ifi, err := net.InterfaceByIndex(int(mi.Addr.Scope_id)) + if err != nil { + return nil, 0, err + } + return ifi, int(mi.Mtu), nil +} + +func (so *sockOpt) setGroup(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + switch so.typ { + case ssoTypeIPMreq: + return so.setIPMreq(c, ifi, grp) + case ssoTypeGroupReq: + return so.setGroupReq(c, ifi, grp) + default: + return errNotImplemented + } +} + +func (so *sockOpt) setSourceGroup(c *socket.Conn, ifi *net.Interface, grp, src net.IP) error { + return so.setGroupSourceReq(c, ifi, grp, src) +} + +func (so *sockOpt) setBPF(c *socket.Conn, f []bpf.RawInstruction) error { + return so.setAttachFilter(c, f) +} diff --git a/vendor/golang.org/x/net/ipv6/sockopt_stub.go b/vendor/golang.org/x/net/ipv6/sockopt_stub.go new file mode 100644 index 00000000..0a87a93b --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sockopt_stub.go @@ -0,0 +1,46 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows + +package ipv6 + +import ( + "net" + + "golang.org/x/net/bpf" + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) getMulticastInterface(c *socket.Conn) (*net.Interface, error) { + return nil, errNotImplemented +} + +func (so *sockOpt) setMulticastInterface(c *socket.Conn, ifi *net.Interface) error { + return errNotImplemented +} + +func (so *sockOpt) getICMPFilter(c *socket.Conn) (*ICMPFilter, error) { + return nil, errNotImplemented +} + +func (so *sockOpt) setICMPFilter(c *socket.Conn, f *ICMPFilter) error { + return errNotImplemented +} + +func (so *sockOpt) getMTUInfo(c *socket.Conn) (*net.Interface, int, error) { + return nil, 0, errNotImplemented +} + +func (so *sockOpt) setGroup(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + return errNotImplemented +} + +func (so *sockOpt) setSourceGroup(c *socket.Conn, ifi *net.Interface, grp, src net.IP) error { + return errNotImplemented +} + +func (so *sockOpt) setBPF(c *socket.Conn, f []bpf.RawInstruction) error { + return errNotImplemented +} diff --git a/vendor/golang.org/x/net/ipv6/sys_aix.go b/vendor/golang.org/x/net/ipv6/sys_aix.go new file mode 100644 index 00000000..bce7091f --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_aix.go @@ -0,0 +1,77 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Added for go1.11 compatibility +// +build aix + +package ipv6 + +import ( + "net" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTrafficClass: {sysIPV6_TCLASS, 4, marshalTrafficClass, parseTrafficClass}, + ctlHopLimit: {sysIPV6_HOPLIMIT, 4, marshalHopLimit, parseHopLimit}, + ctlPacketInfo: {sysIPV6_PKTINFO, sizeofInet6Pktinfo, marshalPacketInfo, parsePacketInfo}, + ctlNextHop: {sysIPV6_NEXTHOP, sizeofSockaddrInet6, marshalNextHop, parseNextHop}, + ctlPathMTU: {sysIPV6_PATHMTU, sizeofIPv6Mtuinfo, marshalPathMTU, parsePathMTU}, + } + + sockOpts = map[int]*sockOpt{ + ssoTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_TCLASS, Len: 4}}, + ssoHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_UNICAST_HOPS, Len: 4}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_IF, Len: 4}}, + ssoMulticastHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_HOPS, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_LOOP, Len: 4}}, + ssoReceiveTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVTCLASS, Len: 4}}, + ssoReceiveHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVHOPLIMIT, Len: 4}}, + ssoReceivePacketInfo: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPKTINFO, Len: 4}}, + ssoReceivePathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPATHMTU, Len: 4}}, + ssoPathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_PATHMTU, Len: sizeofIPv6Mtuinfo}}, + ssoChecksum: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_CHECKSUM, Len: 4}}, + ssoICMPFilter: {Option: socket.Option{Level: iana.ProtocolIPv6ICMP, Name: sysICMP6_FILTER, Len: sizeofICMPv6Filter}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_JOIN_GROUP, Len: sizeofIPv6Mreq}, typ: ssoTypeIPMreq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_LEAVE_GROUP, Len: sizeofIPv6Mreq}, typ: ssoTypeIPMreq}, + } +) + +func (sa *sockaddrInet6) setSockaddr(ip net.IP, i int) { + sa.Len = sizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], ip) + sa.Scope_id = uint32(i) +} + +func (pi *inet6Pktinfo) setIfindex(i int) { + pi.Ifindex = int32(i) +} + +func (mreq *ipv6Mreq) setIfindex(i int) { + mreq.Interface = uint32(i) +} + +func (gr *groupReq) setGroup(grp net.IP) { + sa := (*sockaddrInet6)(unsafe.Pointer(uintptr(unsafe.Pointer(gr)) + 4)) + sa.Len = sizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], grp) +} + +func (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sockaddrInet6)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 4)) + sa.Len = sizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], grp) + sa = (*sockaddrInet6)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 132)) + sa.Len = sizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], src) +} diff --git a/vendor/golang.org/x/net/ipv6/sys_asmreq.go b/vendor/golang.org/x/net/ipv6/sys_asmreq.go new file mode 100644 index 00000000..8c3934c3 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_asmreq.go @@ -0,0 +1,24 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris windows + +package ipv6 + +import ( + "net" + "unsafe" + + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setIPMreq(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + var mreq ipv6Mreq + copy(mreq.Multiaddr[:], grp) + if ifi != nil { + mreq.setIfindex(ifi.Index) + } + b := (*[sizeofIPv6Mreq]byte)(unsafe.Pointer(&mreq))[:sizeofIPv6Mreq] + return so.Set(c, b) +} diff --git a/vendor/golang.org/x/net/ipv6/sys_asmreq_stub.go b/vendor/golang.org/x/net/ipv6/sys_asmreq_stub.go new file mode 100644 index 00000000..87ae4818 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_asmreq_stub.go @@ -0,0 +1,17 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows + +package ipv6 + +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setIPMreq(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + return errNotImplemented +} diff --git a/vendor/golang.org/x/net/ipv6/sys_bpf.go b/vendor/golang.org/x/net/ipv6/sys_bpf.go new file mode 100644 index 00000000..b2dbcb2f --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_bpf.go @@ -0,0 +1,23 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux + +package ipv6 + +import ( + "unsafe" + + "golang.org/x/net/bpf" + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setAttachFilter(c *socket.Conn, f []bpf.RawInstruction) error { + prog := sockFProg{ + Len: uint16(len(f)), + Filter: (*sockFilter)(unsafe.Pointer(&f[0])), + } + b := (*[sizeofSockFprog]byte)(unsafe.Pointer(&prog))[:sizeofSockFprog] + return so.Set(c, b) +} diff --git a/vendor/golang.org/x/net/ipv6/sys_bpf_stub.go b/vendor/golang.org/x/net/ipv6/sys_bpf_stub.go new file mode 100644 index 00000000..eb9f8316 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_bpf_stub.go @@ -0,0 +1,16 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !linux + +package ipv6 + +import ( + "golang.org/x/net/bpf" + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setAttachFilter(c *socket.Conn, f []bpf.RawInstruction) error { + return errNotImplemented +} diff --git a/vendor/golang.org/x/net/ipv6/sys_bsd.go b/vendor/golang.org/x/net/ipv6/sys_bsd.go new file mode 100644 index 00000000..e416eaa1 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_bsd.go @@ -0,0 +1,57 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build dragonfly netbsd openbsd + +package ipv6 + +import ( + "net" + "syscall" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTrafficClass: {sysIPV6_TCLASS, 4, marshalTrafficClass, parseTrafficClass}, + ctlHopLimit: {sysIPV6_HOPLIMIT, 4, marshalHopLimit, parseHopLimit}, + ctlPacketInfo: {sysIPV6_PKTINFO, sizeofInet6Pktinfo, marshalPacketInfo, parsePacketInfo}, + ctlNextHop: {sysIPV6_NEXTHOP, sizeofSockaddrInet6, marshalNextHop, parseNextHop}, + ctlPathMTU: {sysIPV6_PATHMTU, sizeofIPv6Mtuinfo, marshalPathMTU, parsePathMTU}, + } + + sockOpts = map[int]*sockOpt{ + ssoTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_TCLASS, Len: 4}}, + ssoHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_UNICAST_HOPS, Len: 4}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_IF, Len: 4}}, + ssoMulticastHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_HOPS, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_LOOP, Len: 4}}, + ssoReceiveTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVTCLASS, Len: 4}}, + ssoReceiveHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVHOPLIMIT, Len: 4}}, + ssoReceivePacketInfo: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPKTINFO, Len: 4}}, + ssoReceivePathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPATHMTU, Len: 4}}, + ssoPathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_PATHMTU, Len: sizeofIPv6Mtuinfo}}, + ssoChecksum: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_CHECKSUM, Len: 4}}, + ssoICMPFilter: {Option: socket.Option{Level: iana.ProtocolIPv6ICMP, Name: sysICMP6_FILTER, Len: sizeofICMPv6Filter}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_JOIN_GROUP, Len: sizeofIPv6Mreq}, typ: ssoTypeIPMreq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_LEAVE_GROUP, Len: sizeofIPv6Mreq}, typ: ssoTypeIPMreq}, + } +) + +func (sa *sockaddrInet6) setSockaddr(ip net.IP, i int) { + sa.Len = sizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], ip) + sa.Scope_id = uint32(i) +} + +func (pi *inet6Pktinfo) setIfindex(i int) { + pi.Ifindex = uint32(i) +} + +func (mreq *ipv6Mreq) setIfindex(i int) { + mreq.Interface = uint32(i) +} diff --git a/vendor/golang.org/x/net/ipv6/sys_darwin.go b/vendor/golang.org/x/net/ipv6/sys_darwin.go new file mode 100644 index 00000000..12cc5cb2 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_darwin.go @@ -0,0 +1,78 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "net" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTrafficClass: {sysIPV6_TCLASS, 4, marshalTrafficClass, parseTrafficClass}, + ctlHopLimit: {sysIPV6_HOPLIMIT, 4, marshalHopLimit, parseHopLimit}, + ctlPacketInfo: {sysIPV6_PKTINFO, sizeofInet6Pktinfo, marshalPacketInfo, parsePacketInfo}, + ctlNextHop: {sysIPV6_NEXTHOP, sizeofSockaddrInet6, marshalNextHop, parseNextHop}, + ctlPathMTU: {sysIPV6_PATHMTU, sizeofIPv6Mtuinfo, marshalPathMTU, parsePathMTU}, + } + + sockOpts = map[int]*sockOpt{ + ssoHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_UNICAST_HOPS, Len: 4}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_IF, Len: 4}}, + ssoMulticastHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_HOPS, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_LOOP, Len: 4}}, + ssoTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_TCLASS, Len: 4}}, + ssoReceiveTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVTCLASS, Len: 4}}, + ssoReceiveHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVHOPLIMIT, Len: 4}}, + ssoReceivePacketInfo: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPKTINFO, Len: 4}}, + ssoReceivePathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPATHMTU, Len: 4}}, + ssoPathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_PATHMTU, Len: sizeofIPv6Mtuinfo}}, + ssoChecksum: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_CHECKSUM, Len: 4}}, + ssoICMPFilter: {Option: socket.Option{Level: iana.ProtocolIPv6ICMP, Name: sysICMP6_FILTER, Len: sizeofICMPv6Filter}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_JOIN_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_LEAVE_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoJoinSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_JOIN_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoLeaveSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_LEAVE_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoBlockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_BLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoUnblockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_UNBLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + } +) + +func (sa *sockaddrInet6) setSockaddr(ip net.IP, i int) { + sa.Len = sizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], ip) + sa.Scope_id = uint32(i) +} + +func (pi *inet6Pktinfo) setIfindex(i int) { + pi.Ifindex = uint32(i) +} + +func (mreq *ipv6Mreq) setIfindex(i int) { + mreq.Interface = uint32(i) +} + +func (gr *groupReq) setGroup(grp net.IP) { + sa := (*sockaddrInet6)(unsafe.Pointer(uintptr(unsafe.Pointer(gr)) + 4)) + sa.Len = sizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], grp) +} + +func (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sockaddrInet6)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 4)) + sa.Len = sizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], grp) + sa = (*sockaddrInet6)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 132)) + sa.Len = sizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], src) +} diff --git a/vendor/golang.org/x/net/ipv6/sys_freebsd.go b/vendor/golang.org/x/net/ipv6/sys_freebsd.go new file mode 100644 index 00000000..85a9f5d0 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_freebsd.go @@ -0,0 +1,92 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "net" + "runtime" + "strings" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTrafficClass: {sysIPV6_TCLASS, 4, marshalTrafficClass, parseTrafficClass}, + ctlHopLimit: {sysIPV6_HOPLIMIT, 4, marshalHopLimit, parseHopLimit}, + ctlPacketInfo: {sysIPV6_PKTINFO, sizeofInet6Pktinfo, marshalPacketInfo, parsePacketInfo}, + ctlNextHop: {sysIPV6_NEXTHOP, sizeofSockaddrInet6, marshalNextHop, parseNextHop}, + ctlPathMTU: {sysIPV6_PATHMTU, sizeofIPv6Mtuinfo, marshalPathMTU, parsePathMTU}, + } + + sockOpts = map[int]sockOpt{ + ssoTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_TCLASS, Len: 4}}, + ssoHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_UNICAST_HOPS, Len: 4}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_IF, Len: 4}}, + ssoMulticastHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_HOPS, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_LOOP, Len: 4}}, + ssoReceiveTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVTCLASS, Len: 4}}, + ssoReceiveHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVHOPLIMIT, Len: 4}}, + ssoReceivePacketInfo: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPKTINFO, Len: 4}}, + ssoReceivePathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPATHMTU, Len: 4}}, + ssoPathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_PATHMTU, Len: sizeofIPv6Mtuinfo}}, + ssoChecksum: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_CHECKSUM, Len: 4}}, + ssoICMPFilter: {Option: socket.Option{Level: iana.ProtocolIPv6ICMP, Name: sysICMP6_FILTER, Len: sizeofICMPv6Filter}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_JOIN_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_LEAVE_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoJoinSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_JOIN_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoLeaveSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_LEAVE_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoBlockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_BLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoUnblockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_UNBLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + } +) + +func init() { + if runtime.GOOS == "freebsd" && runtime.GOARCH == "386" { + archs, _ := syscall.Sysctl("kern.supported_archs") + for _, s := range strings.Fields(archs) { + if s == "amd64" { + compatFreeBSD32 = true + break + } + } + } +} + +func (sa *sockaddrInet6) setSockaddr(ip net.IP, i int) { + sa.Len = sizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], ip) + sa.Scope_id = uint32(i) +} + +func (pi *inet6Pktinfo) setIfindex(i int) { + pi.Ifindex = uint32(i) +} + +func (mreq *ipv6Mreq) setIfindex(i int) { + mreq.Interface = uint32(i) +} + +func (gr *groupReq) setGroup(grp net.IP) { + sa := (*sockaddrInet6)(unsafe.Pointer(&gr.Group)) + sa.Len = sizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], grp) +} + +func (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sockaddrInet6)(unsafe.Pointer(&gsr.Group)) + sa.Len = sizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], grp) + sa = (*sockaddrInet6)(unsafe.Pointer(&gsr.Source)) + sa.Len = sizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], src) +} diff --git a/vendor/golang.org/x/net/ipv6/sys_linux.go b/vendor/golang.org/x/net/ipv6/sys_linux.go new file mode 100644 index 00000000..bc218103 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_linux.go @@ -0,0 +1,74 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "net" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTrafficClass: {sysIPV6_TCLASS, 4, marshalTrafficClass, parseTrafficClass}, + ctlHopLimit: {sysIPV6_HOPLIMIT, 4, marshalHopLimit, parseHopLimit}, + ctlPacketInfo: {sysIPV6_PKTINFO, sizeofInet6Pktinfo, marshalPacketInfo, parsePacketInfo}, + ctlPathMTU: {sysIPV6_PATHMTU, sizeofIPv6Mtuinfo, marshalPathMTU, parsePathMTU}, + } + + sockOpts = map[int]*sockOpt{ + ssoTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_TCLASS, Len: 4}}, + ssoHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_UNICAST_HOPS, Len: 4}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_IF, Len: 4}}, + ssoMulticastHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_HOPS, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_LOOP, Len: 4}}, + ssoReceiveTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVTCLASS, Len: 4}}, + ssoReceiveHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVHOPLIMIT, Len: 4}}, + ssoReceivePacketInfo: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPKTINFO, Len: 4}}, + ssoReceivePathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPATHMTU, Len: 4}}, + ssoPathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_PATHMTU, Len: sizeofIPv6Mtuinfo}}, + ssoChecksum: {Option: socket.Option{Level: iana.ProtocolReserved, Name: sysIPV6_CHECKSUM, Len: 4}}, + ssoICMPFilter: {Option: socket.Option{Level: iana.ProtocolIPv6ICMP, Name: sysICMPV6_FILTER, Len: sizeofICMPv6Filter}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_JOIN_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_LEAVE_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoJoinSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_JOIN_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoLeaveSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_LEAVE_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoBlockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_BLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoUnblockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_UNBLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoAttachFilter: {Option: socket.Option{Level: sysSOL_SOCKET, Name: sysSO_ATTACH_FILTER, Len: sizeofSockFprog}}, + } +) + +func (sa *sockaddrInet6) setSockaddr(ip net.IP, i int) { + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], ip) + sa.Scope_id = uint32(i) +} + +func (pi *inet6Pktinfo) setIfindex(i int) { + pi.Ifindex = int32(i) +} + +func (mreq *ipv6Mreq) setIfindex(i int) { + mreq.Ifindex = int32(i) +} + +func (gr *groupReq) setGroup(grp net.IP) { + sa := (*sockaddrInet6)(unsafe.Pointer(&gr.Group)) + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], grp) +} + +func (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sockaddrInet6)(unsafe.Pointer(&gsr.Group)) + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], grp) + sa = (*sockaddrInet6)(unsafe.Pointer(&gsr.Source)) + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], src) +} diff --git a/vendor/golang.org/x/net/ipv6/sys_solaris.go b/vendor/golang.org/x/net/ipv6/sys_solaris.go new file mode 100644 index 00000000..d348b5f6 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_solaris.go @@ -0,0 +1,74 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "net" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTrafficClass: {sysIPV6_TCLASS, 4, marshalTrafficClass, parseTrafficClass}, + ctlHopLimit: {sysIPV6_HOPLIMIT, 4, marshalHopLimit, parseHopLimit}, + ctlPacketInfo: {sysIPV6_PKTINFO, sizeofInet6Pktinfo, marshalPacketInfo, parsePacketInfo}, + ctlNextHop: {sysIPV6_NEXTHOP, sizeofSockaddrInet6, marshalNextHop, parseNextHop}, + ctlPathMTU: {sysIPV6_PATHMTU, sizeofIPv6Mtuinfo, marshalPathMTU, parsePathMTU}, + } + + sockOpts = map[int]*sockOpt{ + ssoTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_TCLASS, Len: 4}}, + ssoHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_UNICAST_HOPS, Len: 4}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_IF, Len: 4}}, + ssoMulticastHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_HOPS, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_LOOP, Len: 4}}, + ssoReceiveTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVTCLASS, Len: 4}}, + ssoReceiveHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVHOPLIMIT, Len: 4}}, + ssoReceivePacketInfo: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPKTINFO, Len: 4}}, + ssoReceivePathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPATHMTU, Len: 4}}, + ssoPathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_PATHMTU, Len: sizeofIPv6Mtuinfo}}, + ssoChecksum: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_CHECKSUM, Len: 4}}, + ssoICMPFilter: {Option: socket.Option{Level: iana.ProtocolIPv6ICMP, Name: sysICMP6_FILTER, Len: sizeofICMPv6Filter}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_JOIN_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_LEAVE_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoJoinSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_JOIN_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoLeaveSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_LEAVE_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoBlockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_BLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoUnblockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_UNBLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + } +) + +func (sa *sockaddrInet6) setSockaddr(ip net.IP, i int) { + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], ip) + sa.Scope_id = uint32(i) +} + +func (pi *inet6Pktinfo) setIfindex(i int) { + pi.Ifindex = uint32(i) +} + +func (mreq *ipv6Mreq) setIfindex(i int) { + mreq.Interface = uint32(i) +} + +func (gr *groupReq) setGroup(grp net.IP) { + sa := (*sockaddrInet6)(unsafe.Pointer(uintptr(unsafe.Pointer(gr)) + 4)) + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], grp) +} + +func (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sockaddrInet6)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 4)) + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], grp) + sa = (*sockaddrInet6)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 260)) + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], src) +} diff --git a/vendor/golang.org/x/net/ipv6/sys_ssmreq.go b/vendor/golang.org/x/net/ipv6/sys_ssmreq.go new file mode 100644 index 00000000..9b52e978 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_ssmreq.go @@ -0,0 +1,54 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix darwin freebsd linux solaris + +package ipv6 + +import ( + "net" + "unsafe" + + "golang.org/x/net/internal/socket" +) + +var compatFreeBSD32 bool // 386 emulation on amd64 + +func (so *sockOpt) setGroupReq(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + var gr groupReq + if ifi != nil { + gr.Interface = uint32(ifi.Index) + } + gr.setGroup(grp) + var b []byte + if compatFreeBSD32 { + var d [sizeofGroupReq + 4]byte + s := (*[sizeofGroupReq]byte)(unsafe.Pointer(&gr)) + copy(d[:4], s[:4]) + copy(d[8:], s[4:]) + b = d[:] + } else { + b = (*[sizeofGroupReq]byte)(unsafe.Pointer(&gr))[:sizeofGroupReq] + } + return so.Set(c, b) +} + +func (so *sockOpt) setGroupSourceReq(c *socket.Conn, ifi *net.Interface, grp, src net.IP) error { + var gsr groupSourceReq + if ifi != nil { + gsr.Interface = uint32(ifi.Index) + } + gsr.setSourceGroup(grp, src) + var b []byte + if compatFreeBSD32 { + var d [sizeofGroupSourceReq + 4]byte + s := (*[sizeofGroupSourceReq]byte)(unsafe.Pointer(&gsr)) + copy(d[:4], s[:4]) + copy(d[8:], s[4:]) + b = d[:] + } else { + b = (*[sizeofGroupSourceReq]byte)(unsafe.Pointer(&gsr))[:sizeofGroupSourceReq] + } + return so.Set(c, b) +} diff --git a/vendor/golang.org/x/net/ipv6/sys_ssmreq_stub.go b/vendor/golang.org/x/net/ipv6/sys_ssmreq_stub.go new file mode 100644 index 00000000..d5bc1108 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_ssmreq_stub.go @@ -0,0 +1,21 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !aix,!darwin,!freebsd,!linux,!solaris + +package ipv6 + +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setGroupReq(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + return errNotImplemented +} + +func (so *sockOpt) setGroupSourceReq(c *socket.Conn, ifi *net.Interface, grp, src net.IP) error { + return errNotImplemented +} diff --git a/vendor/golang.org/x/net/ipv6/sys_stub.go b/vendor/golang.org/x/net/ipv6/sys_stub.go new file mode 100644 index 00000000..4f252d09 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_stub.go @@ -0,0 +1,13 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows + +package ipv6 + +var ( + ctlOpts = [ctlMax]ctlOpt{} + + sockOpts = map[int]*sockOpt{} +) diff --git a/vendor/golang.org/x/net/ipv6/sys_windows.go b/vendor/golang.org/x/net/ipv6/sys_windows.go new file mode 100644 index 00000000..fc36b018 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_windows.go @@ -0,0 +1,75 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "net" + "syscall" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +const ( + // See ws2tcpip.h. + sysIPV6_UNICAST_HOPS = 0x4 + sysIPV6_MULTICAST_IF = 0x9 + sysIPV6_MULTICAST_HOPS = 0xa + sysIPV6_MULTICAST_LOOP = 0xb + sysIPV6_JOIN_GROUP = 0xc + sysIPV6_LEAVE_GROUP = 0xd + sysIPV6_PKTINFO = 0x13 + + sizeofSockaddrInet6 = 0x1c + + sizeofIPv6Mreq = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofICMPv6Filter = 0 +) + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type icmpv6Filter struct { + // TODO(mikio): implement this +} + +var ( + ctlOpts = [ctlMax]ctlOpt{} + + sockOpts = map[int]*sockOpt{ + ssoHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_UNICAST_HOPS, Len: 4}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_IF, Len: 4}}, + ssoMulticastHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_HOPS, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_LOOP, Len: 4}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_JOIN_GROUP, Len: sizeofIPv6Mreq}, typ: ssoTypeIPMreq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_LEAVE_GROUP, Len: sizeofIPv6Mreq}, typ: ssoTypeIPMreq}, + } +) + +func (sa *sockaddrInet6) setSockaddr(ip net.IP, i int) { + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], ip) + sa.Scope_id = uint32(i) +} + +func (mreq *ipv6Mreq) setIfindex(i int) { + mreq.Interface = uint32(i) +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_aix_ppc64.go b/vendor/golang.org/x/net/ipv6/zsys_aix_ppc64.go new file mode 100644 index 00000000..bf44e338 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_aix_ppc64.go @@ -0,0 +1,103 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_aix.go + +// Added for go1.11 compatibility +// +build aix + +package ipv6 + +const ( + sysIPV6_UNICAST_HOPS = 0x4 + sysIPV6_MULTICAST_IF = 0x9 + sysIPV6_MULTICAST_HOPS = 0xa + sysIPV6_MULTICAST_LOOP = 0xb + sysIPV6_JOIN_GROUP = 0xc + sysIPV6_LEAVE_GROUP = 0xd + sysICMP6_FILTER = 0x26 + + sysIPV6_CHECKSUM = 0x27 + sysIPV6_V6ONLY = 0x25 + + sysIPV6_RTHDRDSTOPTS = 0x37 + + sysIPV6_RECVPKTINFO = 0x23 + sysIPV6_RECVHOPLIMIT = 0x29 + sysIPV6_RECVRTHDR = 0x33 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_RECVDSTOPTS = 0x38 + + sysIPV6_USE_MIN_MTU = 0x2c + sysIPV6_RECVPATHMTU = 0x2f + sysIPV6_PATHMTU = 0x2e + + sysIPV6_PKTINFO = 0x21 + sysIPV6_HOPLIMIT = 0x28 + sysIPV6_NEXTHOP = 0x30 + sysIPV6_HOPOPTS = 0x34 + sysIPV6_DSTOPTS = 0x36 + sysIPV6_RTHDR = 0x32 + + sysIPV6_RECVTCLASS = 0x2a + + sysIPV6_TCLASS = 0x2b + sysIPV6_DONTFRAG = 0x2d + + sizeofSockaddrStorage = 0x508 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x510 + sizeofGroupSourceReq = 0xa18 + + sizeofICMPv6Filter = 0x20 +) + +type sockaddrStorage struct { + X__ss_len uint8 + Family uint8 + X__ss_pad1 [6]uint8 + X__ss_align int64 + X__ss_pad2 [1265]uint8 + Pad_cgo_0 [7]byte +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type icmpv6Filter struct { + Filt [8]uint32 +} + +type groupReq struct { + Interface uint32 + Group sockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group sockaddrStorage + Source sockaddrStorage +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_darwin.go b/vendor/golang.org/x/net/ipv6/zsys_darwin.go new file mode 100644 index 00000000..555744af --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_darwin.go @@ -0,0 +1,131 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_darwin.go + +package ipv6 + +const ( + sysIPV6_UNICAST_HOPS = 0x4 + sysIPV6_MULTICAST_IF = 0x9 + sysIPV6_MULTICAST_HOPS = 0xa + sysIPV6_MULTICAST_LOOP = 0xb + sysIPV6_JOIN_GROUP = 0xc + sysIPV6_LEAVE_GROUP = 0xd + + sysIPV6_PORTRANGE = 0xe + sysICMP6_FILTER = 0x12 + sysIPV6_2292PKTINFO = 0x13 + sysIPV6_2292HOPLIMIT = 0x14 + sysIPV6_2292NEXTHOP = 0x15 + sysIPV6_2292HOPOPTS = 0x16 + sysIPV6_2292DSTOPTS = 0x17 + sysIPV6_2292RTHDR = 0x18 + + sysIPV6_2292PKTOPTIONS = 0x19 + + sysIPV6_CHECKSUM = 0x1a + sysIPV6_V6ONLY = 0x1b + + sysIPV6_IPSEC_POLICY = 0x1c + + sysIPV6_RECVTCLASS = 0x23 + sysIPV6_TCLASS = 0x24 + + sysIPV6_RTHDRDSTOPTS = 0x39 + + sysIPV6_RECVPKTINFO = 0x3d + + sysIPV6_RECVHOPLIMIT = 0x25 + sysIPV6_RECVRTHDR = 0x26 + sysIPV6_RECVHOPOPTS = 0x27 + sysIPV6_RECVDSTOPTS = 0x28 + + sysIPV6_USE_MIN_MTU = 0x2a + sysIPV6_RECVPATHMTU = 0x2b + + sysIPV6_PATHMTU = 0x2c + + sysIPV6_PKTINFO = 0x2e + sysIPV6_HOPLIMIT = 0x2f + sysIPV6_NEXTHOP = 0x30 + sysIPV6_HOPOPTS = 0x31 + sysIPV6_DSTOPTS = 0x32 + sysIPV6_RTHDR = 0x33 + + sysIPV6_AUTOFLOWLABEL = 0x3b + + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_PREFER_TEMPADDR = 0x3f + + sysIPV6_MSFILTER = 0x4a + sysMCAST_JOIN_GROUP = 0x50 + sysMCAST_LEAVE_GROUP = 0x51 + sysMCAST_JOIN_SOURCE_GROUP = 0x52 + sysMCAST_LEAVE_SOURCE_GROUP = 0x53 + sysMCAST_BLOCK_SOURCE = 0x54 + sysMCAST_UNBLOCK_SOURCE = 0x55 + + sysIPV6_BOUND_IF = 0x7d + + sysIPV6_PORTRANGE_DEFAULT = 0x0 + sysIPV6_PORTRANGE_HIGH = 0x1 + sysIPV6_PORTRANGE_LOW = 0x2 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPv6Filter = 0x20 +) + +type sockaddrStorage struct { + Len uint8 + Family uint8 + X__ss_pad1 [6]int8 + X__ss_align int64 + X__ss_pad2 [112]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type icmpv6Filter struct { + Filt [8]uint32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [128]byte +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [128]byte + Pad_cgo_1 [128]byte +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_dragonfly.go b/vendor/golang.org/x/net/ipv6/zsys_dragonfly.go new file mode 100644 index 00000000..cf3cc102 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_dragonfly.go @@ -0,0 +1,88 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_dragonfly.go + +package ipv6 + +const ( + sysIPV6_UNICAST_HOPS = 0x4 + sysIPV6_MULTICAST_IF = 0x9 + sysIPV6_MULTICAST_HOPS = 0xa + sysIPV6_MULTICAST_LOOP = 0xb + sysIPV6_JOIN_GROUP = 0xc + sysIPV6_LEAVE_GROUP = 0xd + sysIPV6_PORTRANGE = 0xe + sysICMP6_FILTER = 0x12 + + sysIPV6_CHECKSUM = 0x1a + sysIPV6_V6ONLY = 0x1b + + sysIPV6_IPSEC_POLICY = 0x1c + + sysIPV6_RTHDRDSTOPTS = 0x23 + sysIPV6_RECVPKTINFO = 0x24 + sysIPV6_RECVHOPLIMIT = 0x25 + sysIPV6_RECVRTHDR = 0x26 + sysIPV6_RECVHOPOPTS = 0x27 + sysIPV6_RECVDSTOPTS = 0x28 + + sysIPV6_USE_MIN_MTU = 0x2a + sysIPV6_RECVPATHMTU = 0x2b + + sysIPV6_PATHMTU = 0x2c + + sysIPV6_PKTINFO = 0x2e + sysIPV6_HOPLIMIT = 0x2f + sysIPV6_NEXTHOP = 0x30 + sysIPV6_HOPOPTS = 0x31 + sysIPV6_DSTOPTS = 0x32 + sysIPV6_RTHDR = 0x33 + + sysIPV6_RECVTCLASS = 0x39 + + sysIPV6_AUTOFLOWLABEL = 0x3b + + sysIPV6_TCLASS = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_PREFER_TEMPADDR = 0x3f + + sysIPV6_PORTRANGE_DEFAULT = 0x0 + sysIPV6_PORTRANGE_HIGH = 0x1 + sysIPV6_PORTRANGE_LOW = 0x2 + + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + + sizeofIPv6Mreq = 0x14 + + sizeofICMPv6Filter = 0x20 +) + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type icmpv6Filter struct { + Filt [8]uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_freebsd_386.go b/vendor/golang.org/x/net/ipv6/zsys_freebsd_386.go new file mode 100644 index 00000000..73f31b26 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_freebsd_386.go @@ -0,0 +1,122 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_freebsd.go + +package ipv6 + +const ( + sysIPV6_UNICAST_HOPS = 0x4 + sysIPV6_MULTICAST_IF = 0x9 + sysIPV6_MULTICAST_HOPS = 0xa + sysIPV6_MULTICAST_LOOP = 0xb + sysIPV6_JOIN_GROUP = 0xc + sysIPV6_LEAVE_GROUP = 0xd + sysIPV6_PORTRANGE = 0xe + sysICMP6_FILTER = 0x12 + + sysIPV6_CHECKSUM = 0x1a + sysIPV6_V6ONLY = 0x1b + + sysIPV6_IPSEC_POLICY = 0x1c + + sysIPV6_RTHDRDSTOPTS = 0x23 + + sysIPV6_RECVPKTINFO = 0x24 + sysIPV6_RECVHOPLIMIT = 0x25 + sysIPV6_RECVRTHDR = 0x26 + sysIPV6_RECVHOPOPTS = 0x27 + sysIPV6_RECVDSTOPTS = 0x28 + + sysIPV6_USE_MIN_MTU = 0x2a + sysIPV6_RECVPATHMTU = 0x2b + + sysIPV6_PATHMTU = 0x2c + + sysIPV6_PKTINFO = 0x2e + sysIPV6_HOPLIMIT = 0x2f + sysIPV6_NEXTHOP = 0x30 + sysIPV6_HOPOPTS = 0x31 + sysIPV6_DSTOPTS = 0x32 + sysIPV6_RTHDR = 0x33 + + sysIPV6_RECVTCLASS = 0x39 + + sysIPV6_AUTOFLOWLABEL = 0x3b + + sysIPV6_TCLASS = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_PREFER_TEMPADDR = 0x3f + + sysIPV6_BINDANY = 0x40 + + sysIPV6_MSFILTER = 0x4a + + sysMCAST_JOIN_GROUP = 0x50 + sysMCAST_LEAVE_GROUP = 0x51 + sysMCAST_JOIN_SOURCE_GROUP = 0x52 + sysMCAST_LEAVE_SOURCE_GROUP = 0x53 + sysMCAST_BLOCK_SOURCE = 0x54 + sysMCAST_UNBLOCK_SOURCE = 0x55 + + sysIPV6_PORTRANGE_DEFAULT = 0x0 + sysIPV6_PORTRANGE_HIGH = 0x1 + sysIPV6_PORTRANGE_LOW = 0x2 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPv6Filter = 0x20 +) + +type sockaddrStorage struct { + Len uint8 + Family uint8 + X__ss_pad1 [6]int8 + X__ss_align int64 + X__ss_pad2 [112]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type groupReq struct { + Interface uint32 + Group sockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group sockaddrStorage + Source sockaddrStorage +} + +type icmpv6Filter struct { + Filt [8]uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_freebsd_amd64.go b/vendor/golang.org/x/net/ipv6/zsys_freebsd_amd64.go new file mode 100644 index 00000000..490ce7cf --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_freebsd_amd64.go @@ -0,0 +1,124 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_freebsd.go + +package ipv6 + +const ( + sysIPV6_UNICAST_HOPS = 0x4 + sysIPV6_MULTICAST_IF = 0x9 + sysIPV6_MULTICAST_HOPS = 0xa + sysIPV6_MULTICAST_LOOP = 0xb + sysIPV6_JOIN_GROUP = 0xc + sysIPV6_LEAVE_GROUP = 0xd + sysIPV6_PORTRANGE = 0xe + sysICMP6_FILTER = 0x12 + + sysIPV6_CHECKSUM = 0x1a + sysIPV6_V6ONLY = 0x1b + + sysIPV6_IPSEC_POLICY = 0x1c + + sysIPV6_RTHDRDSTOPTS = 0x23 + + sysIPV6_RECVPKTINFO = 0x24 + sysIPV6_RECVHOPLIMIT = 0x25 + sysIPV6_RECVRTHDR = 0x26 + sysIPV6_RECVHOPOPTS = 0x27 + sysIPV6_RECVDSTOPTS = 0x28 + + sysIPV6_USE_MIN_MTU = 0x2a + sysIPV6_RECVPATHMTU = 0x2b + + sysIPV6_PATHMTU = 0x2c + + sysIPV6_PKTINFO = 0x2e + sysIPV6_HOPLIMIT = 0x2f + sysIPV6_NEXTHOP = 0x30 + sysIPV6_HOPOPTS = 0x31 + sysIPV6_DSTOPTS = 0x32 + sysIPV6_RTHDR = 0x33 + + sysIPV6_RECVTCLASS = 0x39 + + sysIPV6_AUTOFLOWLABEL = 0x3b + + sysIPV6_TCLASS = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_PREFER_TEMPADDR = 0x3f + + sysIPV6_BINDANY = 0x40 + + sysIPV6_MSFILTER = 0x4a + + sysMCAST_JOIN_GROUP = 0x50 + sysMCAST_LEAVE_GROUP = 0x51 + sysMCAST_JOIN_SOURCE_GROUP = 0x52 + sysMCAST_LEAVE_SOURCE_GROUP = 0x53 + sysMCAST_BLOCK_SOURCE = 0x54 + sysMCAST_UNBLOCK_SOURCE = 0x55 + + sysIPV6_PORTRANGE_DEFAULT = 0x0 + sysIPV6_PORTRANGE_HIGH = 0x1 + sysIPV6_PORTRANGE_LOW = 0x2 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPv6Filter = 0x20 +) + +type sockaddrStorage struct { + Len uint8 + Family uint8 + X__ss_pad1 [6]int8 + X__ss_align int64 + X__ss_pad2 [112]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sockaddrStorage + Source sockaddrStorage +} + +type icmpv6Filter struct { + Filt [8]uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_freebsd_arm.go b/vendor/golang.org/x/net/ipv6/zsys_freebsd_arm.go new file mode 100644 index 00000000..490ce7cf --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_freebsd_arm.go @@ -0,0 +1,124 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_freebsd.go + +package ipv6 + +const ( + sysIPV6_UNICAST_HOPS = 0x4 + sysIPV6_MULTICAST_IF = 0x9 + sysIPV6_MULTICAST_HOPS = 0xa + sysIPV6_MULTICAST_LOOP = 0xb + sysIPV6_JOIN_GROUP = 0xc + sysIPV6_LEAVE_GROUP = 0xd + sysIPV6_PORTRANGE = 0xe + sysICMP6_FILTER = 0x12 + + sysIPV6_CHECKSUM = 0x1a + sysIPV6_V6ONLY = 0x1b + + sysIPV6_IPSEC_POLICY = 0x1c + + sysIPV6_RTHDRDSTOPTS = 0x23 + + sysIPV6_RECVPKTINFO = 0x24 + sysIPV6_RECVHOPLIMIT = 0x25 + sysIPV6_RECVRTHDR = 0x26 + sysIPV6_RECVHOPOPTS = 0x27 + sysIPV6_RECVDSTOPTS = 0x28 + + sysIPV6_USE_MIN_MTU = 0x2a + sysIPV6_RECVPATHMTU = 0x2b + + sysIPV6_PATHMTU = 0x2c + + sysIPV6_PKTINFO = 0x2e + sysIPV6_HOPLIMIT = 0x2f + sysIPV6_NEXTHOP = 0x30 + sysIPV6_HOPOPTS = 0x31 + sysIPV6_DSTOPTS = 0x32 + sysIPV6_RTHDR = 0x33 + + sysIPV6_RECVTCLASS = 0x39 + + sysIPV6_AUTOFLOWLABEL = 0x3b + + sysIPV6_TCLASS = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_PREFER_TEMPADDR = 0x3f + + sysIPV6_BINDANY = 0x40 + + sysIPV6_MSFILTER = 0x4a + + sysMCAST_JOIN_GROUP = 0x50 + sysMCAST_LEAVE_GROUP = 0x51 + sysMCAST_JOIN_SOURCE_GROUP = 0x52 + sysMCAST_LEAVE_SOURCE_GROUP = 0x53 + sysMCAST_BLOCK_SOURCE = 0x54 + sysMCAST_UNBLOCK_SOURCE = 0x55 + + sysIPV6_PORTRANGE_DEFAULT = 0x0 + sysIPV6_PORTRANGE_HIGH = 0x1 + sysIPV6_PORTRANGE_LOW = 0x2 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPv6Filter = 0x20 +) + +type sockaddrStorage struct { + Len uint8 + Family uint8 + X__ss_pad1 [6]int8 + X__ss_align int64 + X__ss_pad2 [112]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sockaddrStorage + Source sockaddrStorage +} + +type icmpv6Filter struct { + Filt [8]uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_386.go b/vendor/golang.org/x/net/ipv6/zsys_linux_386.go new file mode 100644 index 00000000..14155dec --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_386.go @@ -0,0 +1,170 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPv6Filter = 0x20 + + sizeofSockFprog = 0x8 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [2]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_amd64.go b/vendor/golang.org/x/net/ipv6/zsys_linux_amd64.go new file mode 100644 index 00000000..9566d764 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_amd64.go @@ -0,0 +1,172 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPv6Filter = 0x20 + + sizeofSockFprog = 0x10 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_arm.go b/vendor/golang.org/x/net/ipv6/zsys_linux_arm.go new file mode 100644 index 00000000..14155dec --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_arm.go @@ -0,0 +1,170 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPv6Filter = 0x20 + + sizeofSockFprog = 0x8 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [2]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_arm64.go b/vendor/golang.org/x/net/ipv6/zsys_linux_arm64.go new file mode 100644 index 00000000..9566d764 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_arm64.go @@ -0,0 +1,172 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPv6Filter = 0x20 + + sizeofSockFprog = 0x10 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_mips.go b/vendor/golang.org/x/net/ipv6/zsys_linux_mips.go new file mode 100644 index 00000000..14155dec --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_mips.go @@ -0,0 +1,170 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPv6Filter = 0x20 + + sizeofSockFprog = 0x8 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [2]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_mips64.go b/vendor/golang.org/x/net/ipv6/zsys_linux_mips64.go new file mode 100644 index 00000000..9566d764 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_mips64.go @@ -0,0 +1,172 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPv6Filter = 0x20 + + sizeofSockFprog = 0x10 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_mips64le.go b/vendor/golang.org/x/net/ipv6/zsys_linux_mips64le.go new file mode 100644 index 00000000..9566d764 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_mips64le.go @@ -0,0 +1,172 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPv6Filter = 0x20 + + sizeofSockFprog = 0x10 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_mipsle.go b/vendor/golang.org/x/net/ipv6/zsys_linux_mipsle.go new file mode 100644 index 00000000..14155dec --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_mipsle.go @@ -0,0 +1,170 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPv6Filter = 0x20 + + sizeofSockFprog = 0x8 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [2]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_ppc.go b/vendor/golang.org/x/net/ipv6/zsys_linux_ppc.go new file mode 100644 index 00000000..a51e142b --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_ppc.go @@ -0,0 +1,170 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPv6Filter = 0x20 + + sizeofSockFprog = 0x8 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [2]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64.go b/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64.go new file mode 100644 index 00000000..9566d764 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64.go @@ -0,0 +1,172 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPv6Filter = 0x20 + + sizeofSockFprog = 0x10 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64le.go b/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64le.go new file mode 100644 index 00000000..9566d764 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64le.go @@ -0,0 +1,172 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPv6Filter = 0x20 + + sizeofSockFprog = 0x10 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_riscv64.go b/vendor/golang.org/x/net/ipv6/zsys_linux_riscv64.go new file mode 100644 index 00000000..1ee237b2 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_riscv64.go @@ -0,0 +1,173 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +// +build riscv64 + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPv6Filter = 0x20 + + sizeofSockFprog = 0x10 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} + +type sockFProg struct { + Len uint16 + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_s390x.go b/vendor/golang.org/x/net/ipv6/zsys_linux_s390x.go new file mode 100644 index 00000000..9566d764 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_s390x.go @@ -0,0 +1,172 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPv6Filter = 0x20 + + sizeofSockFprog = 0x10 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_netbsd.go b/vendor/golang.org/x/net/ipv6/zsys_netbsd.go new file mode 100644 index 00000000..e39571e0 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_netbsd.go @@ -0,0 +1,84 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_netbsd.go + +package ipv6 + +const ( + sysIPV6_UNICAST_HOPS = 0x4 + sysIPV6_MULTICAST_IF = 0x9 + sysIPV6_MULTICAST_HOPS = 0xa + sysIPV6_MULTICAST_LOOP = 0xb + sysIPV6_JOIN_GROUP = 0xc + sysIPV6_LEAVE_GROUP = 0xd + sysIPV6_PORTRANGE = 0xe + sysICMP6_FILTER = 0x12 + + sysIPV6_CHECKSUM = 0x1a + sysIPV6_V6ONLY = 0x1b + + sysIPV6_IPSEC_POLICY = 0x1c + + sysIPV6_RTHDRDSTOPTS = 0x23 + + sysIPV6_RECVPKTINFO = 0x24 + sysIPV6_RECVHOPLIMIT = 0x25 + sysIPV6_RECVRTHDR = 0x26 + sysIPV6_RECVHOPOPTS = 0x27 + sysIPV6_RECVDSTOPTS = 0x28 + + sysIPV6_USE_MIN_MTU = 0x2a + sysIPV6_RECVPATHMTU = 0x2b + sysIPV6_PATHMTU = 0x2c + + sysIPV6_PKTINFO = 0x2e + sysIPV6_HOPLIMIT = 0x2f + sysIPV6_NEXTHOP = 0x30 + sysIPV6_HOPOPTS = 0x31 + sysIPV6_DSTOPTS = 0x32 + sysIPV6_RTHDR = 0x33 + + sysIPV6_RECVTCLASS = 0x39 + + sysIPV6_TCLASS = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_PORTRANGE_DEFAULT = 0x0 + sysIPV6_PORTRANGE_HIGH = 0x1 + sysIPV6_PORTRANGE_LOW = 0x2 + + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + + sizeofIPv6Mreq = 0x14 + + sizeofICMPv6Filter = 0x20 +) + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type icmpv6Filter struct { + Filt [8]uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_openbsd.go b/vendor/golang.org/x/net/ipv6/zsys_openbsd.go new file mode 100644 index 00000000..cc1899a6 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_openbsd.go @@ -0,0 +1,93 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_openbsd.go + +package ipv6 + +const ( + sysIPV6_UNICAST_HOPS = 0x4 + sysIPV6_MULTICAST_IF = 0x9 + sysIPV6_MULTICAST_HOPS = 0xa + sysIPV6_MULTICAST_LOOP = 0xb + sysIPV6_JOIN_GROUP = 0xc + sysIPV6_LEAVE_GROUP = 0xd + sysIPV6_PORTRANGE = 0xe + sysICMP6_FILTER = 0x12 + + sysIPV6_CHECKSUM = 0x1a + sysIPV6_V6ONLY = 0x1b + + sysIPV6_RTHDRDSTOPTS = 0x23 + + sysIPV6_RECVPKTINFO = 0x24 + sysIPV6_RECVHOPLIMIT = 0x25 + sysIPV6_RECVRTHDR = 0x26 + sysIPV6_RECVHOPOPTS = 0x27 + sysIPV6_RECVDSTOPTS = 0x28 + + sysIPV6_USE_MIN_MTU = 0x2a + sysIPV6_RECVPATHMTU = 0x2b + + sysIPV6_PATHMTU = 0x2c + + sysIPV6_PKTINFO = 0x2e + sysIPV6_HOPLIMIT = 0x2f + sysIPV6_NEXTHOP = 0x30 + sysIPV6_HOPOPTS = 0x31 + sysIPV6_DSTOPTS = 0x32 + sysIPV6_RTHDR = 0x33 + + sysIPV6_AUTH_LEVEL = 0x35 + sysIPV6_ESP_TRANS_LEVEL = 0x36 + sysIPV6_ESP_NETWORK_LEVEL = 0x37 + sysIPSEC6_OUTSA = 0x38 + sysIPV6_RECVTCLASS = 0x39 + + sysIPV6_AUTOFLOWLABEL = 0x3b + sysIPV6_IPCOMP_LEVEL = 0x3c + + sysIPV6_TCLASS = 0x3d + sysIPV6_DONTFRAG = 0x3e + sysIPV6_PIPEX = 0x3f + + sysIPV6_RTABLE = 0x1021 + + sysIPV6_PORTRANGE_DEFAULT = 0x0 + sysIPV6_PORTRANGE_HIGH = 0x1 + sysIPV6_PORTRANGE_LOW = 0x2 + + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + + sizeofIPv6Mreq = 0x14 + + sizeofICMPv6Filter = 0x20 +) + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type icmpv6Filter struct { + Filt [8]uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_solaris.go b/vendor/golang.org/x/net/ipv6/zsys_solaris.go new file mode 100644 index 00000000..690eef93 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_solaris.go @@ -0,0 +1,131 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_solaris.go + +package ipv6 + +const ( + sysIPV6_UNICAST_HOPS = 0x5 + sysIPV6_MULTICAST_IF = 0x6 + sysIPV6_MULTICAST_HOPS = 0x7 + sysIPV6_MULTICAST_LOOP = 0x8 + sysIPV6_JOIN_GROUP = 0x9 + sysIPV6_LEAVE_GROUP = 0xa + + sysIPV6_PKTINFO = 0xb + + sysIPV6_HOPLIMIT = 0xc + sysIPV6_NEXTHOP = 0xd + sysIPV6_HOPOPTS = 0xe + sysIPV6_DSTOPTS = 0xf + + sysIPV6_RTHDR = 0x10 + sysIPV6_RTHDRDSTOPTS = 0x11 + + sysIPV6_RECVPKTINFO = 0x12 + sysIPV6_RECVHOPLIMIT = 0x13 + sysIPV6_RECVHOPOPTS = 0x14 + + sysIPV6_RECVRTHDR = 0x16 + + sysIPV6_RECVRTHDRDSTOPTS = 0x17 + + sysIPV6_CHECKSUM = 0x18 + sysIPV6_RECVTCLASS = 0x19 + sysIPV6_USE_MIN_MTU = 0x20 + sysIPV6_DONTFRAG = 0x21 + sysIPV6_SEC_OPT = 0x22 + sysIPV6_SRC_PREFERENCES = 0x23 + sysIPV6_RECVPATHMTU = 0x24 + sysIPV6_PATHMTU = 0x25 + sysIPV6_TCLASS = 0x26 + sysIPV6_V6ONLY = 0x27 + + sysIPV6_RECVDSTOPTS = 0x28 + + sysMCAST_JOIN_GROUP = 0x29 + sysMCAST_LEAVE_GROUP = 0x2a + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_JOIN_SOURCE_GROUP = 0x2d + sysMCAST_LEAVE_SOURCE_GROUP = 0x2e + + sysIPV6_PREFER_SRC_HOME = 0x1 + sysIPV6_PREFER_SRC_COA = 0x2 + sysIPV6_PREFER_SRC_PUBLIC = 0x4 + sysIPV6_PREFER_SRC_TMP = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x10 + sysIPV6_PREFER_SRC_CGA = 0x20 + + sysIPV6_PREFER_SRC_MIPMASK = 0x3 + sysIPV6_PREFER_SRC_MIPDEFAULT = 0x1 + sysIPV6_PREFER_SRC_TMPMASK = 0xc + sysIPV6_PREFER_SRC_TMPDEFAULT = 0x4 + sysIPV6_PREFER_SRC_CGAMASK = 0x30 + sysIPV6_PREFER_SRC_CGADEFAULT = 0x10 + + sysIPV6_PREFER_SRC_MASK = 0x3f + + sysIPV6_PREFER_SRC_DEFAULT = 0x15 + + sysIPV6_BOUND_IF = 0x41 + sysIPV6_UNSPEC_SRC = 0x42 + + sysICMP6_FILTER = 0x1 + + sizeofSockaddrStorage = 0x100 + sizeofSockaddrInet6 = 0x20 + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x24 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x104 + sizeofGroupSourceReq = 0x204 + + sizeofICMPv6Filter = 0x20 +) + +type sockaddrStorage struct { + Family uint16 + X_ss_pad1 [6]int8 + X_ss_align float64 + X_ss_pad2 [240]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 + X__sin6_src_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [256]byte +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [256]byte + Pad_cgo_1 [256]byte +} + +type icmpv6Filter struct { + X__icmp6_filt [8]uint32 +} diff --git a/vendor/golang.org/x/sys/unix/mkasm_darwin.go b/vendor/golang.org/x/sys/unix/mkasm_darwin.go deleted file mode 100644 index 4548b993..00000000 --- a/vendor/golang.org/x/sys/unix/mkasm_darwin.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// mkasm_darwin.go generates assembly trampolines to call libSystem routines from Go. -//This program must be run after mksyscall.go. -package main - -import ( - "bytes" - "fmt" - "io/ioutil" - "log" - "os" - "strings" -) - -func main() { - in1, err := ioutil.ReadFile("syscall_darwin.go") - if err != nil { - log.Fatalf("can't open syscall_darwin.go: %s", err) - } - arch := os.Args[1] - in2, err := ioutil.ReadFile(fmt.Sprintf("syscall_darwin_%s.go", arch)) - if err != nil { - log.Fatalf("can't open syscall_darwin_%s.go: %s", arch, err) - } - in3, err := ioutil.ReadFile(fmt.Sprintf("zsyscall_darwin_%s.go", arch)) - if err != nil { - log.Fatalf("can't open zsyscall_darwin_%s.go: %s", arch, err) - } - in := string(in1) + string(in2) + string(in3) - - trampolines := map[string]bool{} - - var out bytes.Buffer - - fmt.Fprintf(&out, "// go run mkasm_darwin.go %s\n", strings.Join(os.Args[1:], " ")) - fmt.Fprintf(&out, "// Code generated by the command above; DO NOT EDIT.\n") - fmt.Fprintf(&out, "\n") - fmt.Fprintf(&out, "// +build go1.12\n") - fmt.Fprintf(&out, "\n") - fmt.Fprintf(&out, "#include \"textflag.h\"\n") - for _, line := range strings.Split(in, "\n") { - if !strings.HasPrefix(line, "func ") || !strings.HasSuffix(line, "_trampoline()") { - continue - } - fn := line[5 : len(line)-13] - if !trampolines[fn] { - trampolines[fn] = true - fmt.Fprintf(&out, "TEXT ·%s_trampoline(SB),NOSPLIT,$0-0\n", fn) - fmt.Fprintf(&out, "\tJMP\t%s(SB)\n", fn) - } - } - err = ioutil.WriteFile(fmt.Sprintf("zsyscall_darwin_%s.s", arch), out.Bytes(), 0644) - if err != nil { - log.Fatalf("can't write zsyscall_darwin_%s.s: %s", arch, err) - } -} diff --git a/vendor/golang.org/x/sys/unix/mkpost.go b/vendor/golang.org/x/sys/unix/mkpost.go deleted file mode 100644 index eb433205..00000000 --- a/vendor/golang.org/x/sys/unix/mkpost.go +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// mkpost processes the output of cgo -godefs to -// modify the generated types. It is used to clean up -// the sys API in an architecture specific manner. -// -// mkpost is run after cgo -godefs; see README.md. -package main - -import ( - "bytes" - "fmt" - "go/format" - "io/ioutil" - "log" - "os" - "regexp" -) - -func main() { - // Get the OS and architecture (using GOARCH_TARGET if it exists) - goos := os.Getenv("GOOS") - goarch := os.Getenv("GOARCH_TARGET") - if goarch == "" { - goarch = os.Getenv("GOARCH") - } - // Check that we are using the Docker-based build system if we should be. - if goos == "linux" { - if os.Getenv("GOLANG_SYS_BUILD") != "docker" { - os.Stderr.WriteString("In the Docker-based build system, mkpost should not be called directly.\n") - os.Stderr.WriteString("See README.md\n") - os.Exit(1) - } - } - - b, err := ioutil.ReadAll(os.Stdin) - if err != nil { - log.Fatal(err) - } - - if goos == "aix" { - // Replace type of Atim, Mtim and Ctim by Timespec in Stat_t - // to avoid having both StTimespec and Timespec. - sttimespec := regexp.MustCompile(`_Ctype_struct_st_timespec`) - b = sttimespec.ReplaceAll(b, []byte("Timespec")) - } - - // Intentionally export __val fields in Fsid and Sigset_t - valRegex := regexp.MustCompile(`type (Fsid|Sigset_t) struct {(\s+)X__(bits|val)(\s+\S+\s+)}`) - b = valRegex.ReplaceAll(b, []byte("type $1 struct {${2}Val$4}")) - - // Intentionally export __fds_bits field in FdSet - fdSetRegex := regexp.MustCompile(`type (FdSet) struct {(\s+)X__fds_bits(\s+\S+\s+)}`) - b = fdSetRegex.ReplaceAll(b, []byte("type $1 struct {${2}Bits$3}")) - - // If we have empty Ptrace structs, we should delete them. Only s390x emits - // nonempty Ptrace structs. - ptraceRexexp := regexp.MustCompile(`type Ptrace((Psw|Fpregs|Per) struct {\s*})`) - b = ptraceRexexp.ReplaceAll(b, nil) - - // Replace the control_regs union with a blank identifier for now. - controlRegsRegex := regexp.MustCompile(`(Control_regs)\s+\[0\]uint64`) - b = controlRegsRegex.ReplaceAll(b, []byte("_ [0]uint64")) - - // Remove fields that are added by glibc - // Note that this is unstable as the identifers are private. - removeFieldsRegex := regexp.MustCompile(`X__glibc\S*`) - b = removeFieldsRegex.ReplaceAll(b, []byte("_")) - - // Convert [65]int8 to [65]byte in Utsname members to simplify - // conversion to string; see golang.org/issue/20753 - convertUtsnameRegex := regexp.MustCompile(`((Sys|Node|Domain)name|Release|Version|Machine)(\s+)\[(\d+)\]u?int8`) - b = convertUtsnameRegex.ReplaceAll(b, []byte("$1$3[$4]byte")) - - // Convert [1024]int8 to [1024]byte in Ptmget members - convertPtmget := regexp.MustCompile(`([SC]n)(\s+)\[(\d+)\]u?int8`) - b = convertPtmget.ReplaceAll(b, []byte("$1[$3]byte")) - - // Remove spare fields (e.g. in Statx_t) - spareFieldsRegex := regexp.MustCompile(`X__spare\S*`) - b = spareFieldsRegex.ReplaceAll(b, []byte("_")) - - // Remove cgo padding fields - removePaddingFieldsRegex := regexp.MustCompile(`Pad_cgo_\d+`) - b = removePaddingFieldsRegex.ReplaceAll(b, []byte("_")) - - // Remove padding, hidden, or unused fields - removeFieldsRegex = regexp.MustCompile(`\b(X_\S+|Padding)`) - b = removeFieldsRegex.ReplaceAll(b, []byte("_")) - - // Remove the first line of warning from cgo - b = b[bytes.IndexByte(b, '\n')+1:] - // Modify the command in the header to include: - // mkpost, our own warning, and a build tag. - replacement := fmt.Sprintf(`$1 | go run mkpost.go -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build %s,%s`, goarch, goos) - cgoCommandRegex := regexp.MustCompile(`(cgo -godefs .*)`) - b = cgoCommandRegex.ReplaceAll(b, []byte(replacement)) - - // Rename Stat_t time fields - if goos == "freebsd" && goarch == "386" { - // Hide Stat_t.[AMCB]tim_ext fields - renameStatTimeExtFieldsRegex := regexp.MustCompile(`[AMCB]tim_ext`) - b = renameStatTimeExtFieldsRegex.ReplaceAll(b, []byte("_")) - } - renameStatTimeFieldsRegex := regexp.MustCompile(`([AMCB])(?:irth)?time?(?:spec)?\s+(Timespec|StTimespec)`) - b = renameStatTimeFieldsRegex.ReplaceAll(b, []byte("${1}tim ${2}")) - - // gofmt - b, err = format.Source(b) - if err != nil { - log.Fatal(err) - } - - os.Stdout.Write(b) -} diff --git a/vendor/golang.org/x/sys/unix/mksyscall.go b/vendor/golang.org/x/sys/unix/mksyscall.go deleted file mode 100644 index e4af9424..00000000 --- a/vendor/golang.org/x/sys/unix/mksyscall.go +++ /dev/null @@ -1,407 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -/* -This program reads a file containing function prototypes -(like syscall_darwin.go) and generates system call bodies. -The prototypes are marked by lines beginning with "//sys" -and read like func declarations if //sys is replaced by func, but: - * The parameter lists must give a name for each argument. - This includes return parameters. - * The parameter lists must give a type for each argument: - the (x, y, z int) shorthand is not allowed. - * If the return parameter is an error number, it must be named errno. - -A line beginning with //sysnb is like //sys, except that the -goroutine will not be suspended during the execution of the system -call. This must only be used for system calls which can never -block, as otherwise the system call could cause all goroutines to -hang. -*/ -package main - -import ( - "bufio" - "flag" - "fmt" - "os" - "regexp" - "strings" -) - -var ( - b32 = flag.Bool("b32", false, "32bit big-endian") - l32 = flag.Bool("l32", false, "32bit little-endian") - plan9 = flag.Bool("plan9", false, "plan9") - openbsd = flag.Bool("openbsd", false, "openbsd") - netbsd = flag.Bool("netbsd", false, "netbsd") - dragonfly = flag.Bool("dragonfly", false, "dragonfly") - arm = flag.Bool("arm", false, "arm") // 64-bit value should use (even, odd)-pair - tags = flag.String("tags", "", "build tags") - filename = flag.String("output", "", "output file name (standard output if omitted)") -) - -// cmdLine returns this programs's commandline arguments -func cmdLine() string { - return "go run mksyscall.go " + strings.Join(os.Args[1:], " ") -} - -// buildTags returns build tags -func buildTags() string { - return *tags -} - -// Param is function parameter -type Param struct { - Name string - Type string -} - -// usage prints the program usage -func usage() { - fmt.Fprintf(os.Stderr, "usage: go run mksyscall.go [-b32 | -l32] [-tags x,y] [file ...]\n") - os.Exit(1) -} - -// parseParamList parses parameter list and returns a slice of parameters -func parseParamList(list string) []string { - list = strings.TrimSpace(list) - if list == "" { - return []string{} - } - return regexp.MustCompile(`\s*,\s*`).Split(list, -1) -} - -// parseParam splits a parameter into name and type -func parseParam(p string) Param { - ps := regexp.MustCompile(`^(\S*) (\S*)$`).FindStringSubmatch(p) - if ps == nil { - fmt.Fprintf(os.Stderr, "malformed parameter: %s\n", p) - os.Exit(1) - } - return Param{ps[1], ps[2]} -} - -func main() { - // Get the OS and architecture (using GOARCH_TARGET if it exists) - goos := os.Getenv("GOOS") - if goos == "" { - fmt.Fprintln(os.Stderr, "GOOS not defined in environment") - os.Exit(1) - } - goarch := os.Getenv("GOARCH_TARGET") - if goarch == "" { - goarch = os.Getenv("GOARCH") - } - - // Check that we are using the Docker-based build system if we should - if goos == "linux" { - if os.Getenv("GOLANG_SYS_BUILD") != "docker" { - fmt.Fprintf(os.Stderr, "In the Docker-based build system, mksyscall should not be called directly.\n") - fmt.Fprintf(os.Stderr, "See README.md\n") - os.Exit(1) - } - } - - flag.Usage = usage - flag.Parse() - if len(flag.Args()) <= 0 { - fmt.Fprintf(os.Stderr, "no files to parse provided\n") - usage() - } - - endianness := "" - if *b32 { - endianness = "big-endian" - } else if *l32 { - endianness = "little-endian" - } - - libc := false - if goos == "darwin" && strings.Contains(buildTags(), ",go1.12") { - libc = true - } - trampolines := map[string]bool{} - - text := "" - for _, path := range flag.Args() { - file, err := os.Open(path) - if err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } - s := bufio.NewScanner(file) - for s.Scan() { - t := s.Text() - t = strings.TrimSpace(t) - t = regexp.MustCompile(`\s+`).ReplaceAllString(t, ` `) - nonblock := regexp.MustCompile(`^\/\/sysnb `).FindStringSubmatch(t) - if regexp.MustCompile(`^\/\/sys `).FindStringSubmatch(t) == nil && nonblock == nil { - continue - } - - // Line must be of the form - // func Open(path string, mode int, perm int) (fd int, errno error) - // Split into name, in params, out params. - f := regexp.MustCompile(`^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*((?i)SYS_[A-Z0-9_]+))?$`).FindStringSubmatch(t) - if f == nil { - fmt.Fprintf(os.Stderr, "%s:%s\nmalformed //sys declaration\n", path, t) - os.Exit(1) - } - funct, inps, outps, sysname := f[2], f[3], f[4], f[5] - - // ClockGettime doesn't have a syscall number on Darwin, only generate libc wrappers. - if goos == "darwin" && !libc && funct == "ClockGettime" { - continue - } - - // Split argument lists on comma. - in := parseParamList(inps) - out := parseParamList(outps) - - // Try in vain to keep people from editing this file. - // The theory is that they jump into the middle of the file - // without reading the header. - text += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n" - - // Go function header. - outDecl := "" - if len(out) > 0 { - outDecl = fmt.Sprintf(" (%s)", strings.Join(out, ", ")) - } - text += fmt.Sprintf("func %s(%s)%s {\n", funct, strings.Join(in, ", "), outDecl) - - // Check if err return available - errvar := "" - for _, param := range out { - p := parseParam(param) - if p.Type == "error" { - errvar = p.Name - break - } - } - - // Prepare arguments to Syscall. - var args []string - n := 0 - for _, param := range in { - p := parseParam(param) - if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil { - args = append(args, "uintptr(unsafe.Pointer("+p.Name+"))") - } else if p.Type == "string" && errvar != "" { - text += fmt.Sprintf("\tvar _p%d *byte\n", n) - text += fmt.Sprintf("\t_p%d, %s = BytePtrFromString(%s)\n", n, errvar, p.Name) - text += fmt.Sprintf("\tif %s != nil {\n\t\treturn\n\t}\n", errvar) - args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n)) - n++ - } else if p.Type == "string" { - fmt.Fprintf(os.Stderr, path+":"+funct+" uses string arguments, but has no error return\n") - text += fmt.Sprintf("\tvar _p%d *byte\n", n) - text += fmt.Sprintf("\t_p%d, _ = BytePtrFromString(%s)\n", n, p.Name) - args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n)) - n++ - } else if regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type) != nil { - // Convert slice into pointer, length. - // Have to be careful not to take address of &a[0] if len == 0: - // pass dummy pointer in that case. - // Used to pass nil, but some OSes or simulators reject write(fd, nil, 0). - text += fmt.Sprintf("\tvar _p%d unsafe.Pointer\n", n) - text += fmt.Sprintf("\tif len(%s) > 0 {\n\t\t_p%d = unsafe.Pointer(&%s[0])\n\t}", p.Name, n, p.Name) - text += fmt.Sprintf(" else {\n\t\t_p%d = unsafe.Pointer(&_zero)\n\t}\n", n) - args = append(args, fmt.Sprintf("uintptr(_p%d)", n), fmt.Sprintf("uintptr(len(%s))", p.Name)) - n++ - } else if p.Type == "int64" && (*openbsd || *netbsd) { - args = append(args, "0") - if endianness == "big-endian" { - args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name)) - } else if endianness == "little-endian" { - args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name)) - } else { - args = append(args, fmt.Sprintf("uintptr(%s)", p.Name)) - } - } else if p.Type == "int64" && *dragonfly { - if regexp.MustCompile(`^(?i)extp(read|write)`).FindStringSubmatch(funct) == nil { - args = append(args, "0") - } - if endianness == "big-endian" { - args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name)) - } else if endianness == "little-endian" { - args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name)) - } else { - args = append(args, fmt.Sprintf("uintptr(%s)", p.Name)) - } - } else if (p.Type == "int64" || p.Type == "uint64") && endianness != "" { - if len(args)%2 == 1 && *arm { - // arm abi specifies 64-bit argument uses - // (even, odd) pair - args = append(args, "0") - } - if endianness == "big-endian" { - args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name)) - } else { - args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name)) - } - } else { - args = append(args, fmt.Sprintf("uintptr(%s)", p.Name)) - } - } - - // Determine which form to use; pad args with zeros. - asm := "Syscall" - if nonblock != nil { - if errvar == "" && goos == "linux" { - asm = "RawSyscallNoError" - } else { - asm = "RawSyscall" - } - } else { - if errvar == "" && goos == "linux" { - asm = "SyscallNoError" - } - } - if len(args) <= 3 { - for len(args) < 3 { - args = append(args, "0") - } - } else if len(args) <= 6 { - asm += "6" - for len(args) < 6 { - args = append(args, "0") - } - } else if len(args) <= 9 { - asm += "9" - for len(args) < 9 { - args = append(args, "0") - } - } else { - fmt.Fprintf(os.Stderr, "%s:%s too many arguments to system call\n", path, funct) - } - - // System call number. - if sysname == "" { - sysname = "SYS_" + funct - sysname = regexp.MustCompile(`([a-z])([A-Z])`).ReplaceAllString(sysname, `${1}_$2`) - sysname = strings.ToUpper(sysname) - } - - var libcFn string - if libc { - asm = "syscall_" + strings.ToLower(asm[:1]) + asm[1:] // internal syscall call - sysname = strings.TrimPrefix(sysname, "SYS_") // remove SYS_ - sysname = strings.ToLower(sysname) // lowercase - if sysname == "getdirentries64" { - // Special case - libSystem name and - // raw syscall name don't match. - sysname = "__getdirentries64" - } - libcFn = sysname - sysname = "funcPC(libc_" + sysname + "_trampoline)" - } - - // Actual call. - arglist := strings.Join(args, ", ") - call := fmt.Sprintf("%s(%s, %s)", asm, sysname, arglist) - - // Assign return values. - body := "" - ret := []string{"_", "_", "_"} - doErrno := false - for i := 0; i < len(out); i++ { - p := parseParam(out[i]) - reg := "" - if p.Name == "err" && !*plan9 { - reg = "e1" - ret[2] = reg - doErrno = true - } else if p.Name == "err" && *plan9 { - ret[0] = "r0" - ret[2] = "e1" - break - } else { - reg = fmt.Sprintf("r%d", i) - ret[i] = reg - } - if p.Type == "bool" { - reg = fmt.Sprintf("%s != 0", reg) - } - if p.Type == "int64" && endianness != "" { - // 64-bit number in r1:r0 or r0:r1. - if i+2 > len(out) { - fmt.Fprintf(os.Stderr, "%s:%s not enough registers for int64 return\n", path, funct) - } - if endianness == "big-endian" { - reg = fmt.Sprintf("int64(r%d)<<32 | int64(r%d)", i, i+1) - } else { - reg = fmt.Sprintf("int64(r%d)<<32 | int64(r%d)", i+1, i) - } - ret[i] = fmt.Sprintf("r%d", i) - ret[i+1] = fmt.Sprintf("r%d", i+1) - } - if reg != "e1" || *plan9 { - body += fmt.Sprintf("\t%s = %s(%s)\n", p.Name, p.Type, reg) - } - } - if ret[0] == "_" && ret[1] == "_" && ret[2] == "_" { - text += fmt.Sprintf("\t%s\n", call) - } else { - if errvar == "" && goos == "linux" { - // raw syscall without error on Linux, see golang.org/issue/22924 - text += fmt.Sprintf("\t%s, %s := %s\n", ret[0], ret[1], call) - } else { - text += fmt.Sprintf("\t%s, %s, %s := %s\n", ret[0], ret[1], ret[2], call) - } - } - text += body - - if *plan9 && ret[2] == "e1" { - text += "\tif int32(r0) == -1 {\n" - text += "\t\terr = e1\n" - text += "\t}\n" - } else if doErrno { - text += "\tif e1 != 0 {\n" - text += "\t\terr = errnoErr(e1)\n" - text += "\t}\n" - } - text += "\treturn\n" - text += "}\n\n" - - if libc && !trampolines[libcFn] { - // some system calls share a trampoline, like read and readlen. - trampolines[libcFn] = true - // Declare assembly trampoline. - text += fmt.Sprintf("func libc_%s_trampoline()\n", libcFn) - // Assembly trampoline calls the libc_* function, which this magic - // redirects to use the function from libSystem. - text += fmt.Sprintf("//go:linkname libc_%s libc_%s\n", libcFn, libcFn) - text += fmt.Sprintf("//go:cgo_import_dynamic libc_%s %s \"/usr/lib/libSystem.B.dylib\"\n", libcFn, libcFn) - text += "\n" - } - } - if err := s.Err(); err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } - file.Close() - } - fmt.Printf(srcTemplate, cmdLine(), buildTags(), text) -} - -const srcTemplate = `// %s -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build %s - -package unix - -import ( - "syscall" - "unsafe" -) - -var _ syscall.Errno - -%s -` diff --git a/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc.go b/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc.go deleted file mode 100644 index 3be3cdfc..00000000 --- a/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc.go +++ /dev/null @@ -1,415 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -/* -This program reads a file containing function prototypes -(like syscall_aix.go) and generates system call bodies. -The prototypes are marked by lines beginning with "//sys" -and read like func declarations if //sys is replaced by func, but: - * The parameter lists must give a name for each argument. - This includes return parameters. - * The parameter lists must give a type for each argument: - the (x, y, z int) shorthand is not allowed. - * If the return parameter is an error number, it must be named err. - * If go func name needs to be different than its libc name, - * or the function is not in libc, name could be specified - * at the end, after "=" sign, like - //sys getsockopt(s int, level int, name int, val uintptr, vallen *_Socklen) (err error) = libsocket.getsockopt -*/ -package main - -import ( - "bufio" - "flag" - "fmt" - "os" - "regexp" - "strings" -) - -var ( - b32 = flag.Bool("b32", false, "32bit big-endian") - l32 = flag.Bool("l32", false, "32bit little-endian") - aix = flag.Bool("aix", false, "aix") - tags = flag.String("tags", "", "build tags") -) - -// cmdLine returns this programs's commandline arguments -func cmdLine() string { - return "go run mksyscall_aix_ppc.go " + strings.Join(os.Args[1:], " ") -} - -// buildTags returns build tags -func buildTags() string { - return *tags -} - -// Param is function parameter -type Param struct { - Name string - Type string -} - -// usage prints the program usage -func usage() { - fmt.Fprintf(os.Stderr, "usage: go run mksyscall_aix_ppc.go [-b32 | -l32] [-tags x,y] [file ...]\n") - os.Exit(1) -} - -// parseParamList parses parameter list and returns a slice of parameters -func parseParamList(list string) []string { - list = strings.TrimSpace(list) - if list == "" { - return []string{} - } - return regexp.MustCompile(`\s*,\s*`).Split(list, -1) -} - -// parseParam splits a parameter into name and type -func parseParam(p string) Param { - ps := regexp.MustCompile(`^(\S*) (\S*)$`).FindStringSubmatch(p) - if ps == nil { - fmt.Fprintf(os.Stderr, "malformed parameter: %s\n", p) - os.Exit(1) - } - return Param{ps[1], ps[2]} -} - -func main() { - flag.Usage = usage - flag.Parse() - if len(flag.Args()) <= 0 { - fmt.Fprintf(os.Stderr, "no files to parse provided\n") - usage() - } - - endianness := "" - if *b32 { - endianness = "big-endian" - } else if *l32 { - endianness = "little-endian" - } - - pack := "" - text := "" - cExtern := "/*\n#include \n#include \n" - for _, path := range flag.Args() { - file, err := os.Open(path) - if err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } - s := bufio.NewScanner(file) - for s.Scan() { - t := s.Text() - t = strings.TrimSpace(t) - t = regexp.MustCompile(`\s+`).ReplaceAllString(t, ` `) - if p := regexp.MustCompile(`^package (\S+)$`).FindStringSubmatch(t); p != nil && pack == "" { - pack = p[1] - } - nonblock := regexp.MustCompile(`^\/\/sysnb `).FindStringSubmatch(t) - if regexp.MustCompile(`^\/\/sys `).FindStringSubmatch(t) == nil && nonblock == nil { - continue - } - - // Line must be of the form - // func Open(path string, mode int, perm int) (fd int, err error) - // Split into name, in params, out params. - f := regexp.MustCompile(`^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*(?:(\w*)\.)?(\w*))?$`).FindStringSubmatch(t) - if f == nil { - fmt.Fprintf(os.Stderr, "%s:%s\nmalformed //sys declaration\n", path, t) - os.Exit(1) - } - funct, inps, outps, modname, sysname := f[2], f[3], f[4], f[5], f[6] - - // Split argument lists on comma. - in := parseParamList(inps) - out := parseParamList(outps) - - inps = strings.Join(in, ", ") - outps = strings.Join(out, ", ") - - // Try in vain to keep people from editing this file. - // The theory is that they jump into the middle of the file - // without reading the header. - text += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n" - - // Check if value return, err return available - errvar := "" - retvar := "" - rettype := "" - for _, param := range out { - p := parseParam(param) - if p.Type == "error" { - errvar = p.Name - } else { - retvar = p.Name - rettype = p.Type - } - } - - // System call name. - if sysname == "" { - sysname = funct - } - sysname = regexp.MustCompile(`([a-z])([A-Z])`).ReplaceAllString(sysname, `${1}_$2`) - sysname = strings.ToLower(sysname) // All libc functions are lowercase. - - cRettype := "" - if rettype == "unsafe.Pointer" { - cRettype = "uintptr_t" - } else if rettype == "uintptr" { - cRettype = "uintptr_t" - } else if regexp.MustCompile(`^_`).FindStringSubmatch(rettype) != nil { - cRettype = "uintptr_t" - } else if rettype == "int" { - cRettype = "int" - } else if rettype == "int32" { - cRettype = "int" - } else if rettype == "int64" { - cRettype = "long long" - } else if rettype == "uint32" { - cRettype = "unsigned int" - } else if rettype == "uint64" { - cRettype = "unsigned long long" - } else { - cRettype = "int" - } - if sysname == "exit" { - cRettype = "void" - } - - // Change p.Types to c - var cIn []string - for _, param := range in { - p := parseParam(param) - if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil { - cIn = append(cIn, "uintptr_t") - } else if p.Type == "string" { - cIn = append(cIn, "uintptr_t") - } else if regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type) != nil { - cIn = append(cIn, "uintptr_t", "size_t") - } else if p.Type == "unsafe.Pointer" { - cIn = append(cIn, "uintptr_t") - } else if p.Type == "uintptr" { - cIn = append(cIn, "uintptr_t") - } else if regexp.MustCompile(`^_`).FindStringSubmatch(p.Type) != nil { - cIn = append(cIn, "uintptr_t") - } else if p.Type == "int" { - cIn = append(cIn, "int") - } else if p.Type == "int32" { - cIn = append(cIn, "int") - } else if p.Type == "int64" { - cIn = append(cIn, "long long") - } else if p.Type == "uint32" { - cIn = append(cIn, "unsigned int") - } else if p.Type == "uint64" { - cIn = append(cIn, "unsigned long long") - } else { - cIn = append(cIn, "int") - } - } - - if funct != "fcntl" && funct != "FcntlInt" && funct != "readlen" && funct != "writelen" { - if sysname == "select" { - // select is a keyword of Go. Its name is - // changed to c_select. - cExtern += "#define c_select select\n" - } - // Imports of system calls from libc - cExtern += fmt.Sprintf("%s %s", cRettype, sysname) - cIn := strings.Join(cIn, ", ") - cExtern += fmt.Sprintf("(%s);\n", cIn) - } - - // So file name. - if *aix { - if modname == "" { - modname = "libc.a/shr_64.o" - } else { - fmt.Fprintf(os.Stderr, "%s: only syscall using libc are available\n", funct) - os.Exit(1) - } - } - - strconvfunc := "C.CString" - - // Go function header. - if outps != "" { - outps = fmt.Sprintf(" (%s)", outps) - } - if text != "" { - text += "\n" - } - - text += fmt.Sprintf("func %s(%s)%s {\n", funct, strings.Join(in, ", "), outps) - - // Prepare arguments to Syscall. - var args []string - n := 0 - argN := 0 - for _, param := range in { - p := parseParam(param) - if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil { - args = append(args, "C.uintptr_t(uintptr(unsafe.Pointer("+p.Name+")))") - } else if p.Type == "string" && errvar != "" { - text += fmt.Sprintf("\t_p%d := uintptr(unsafe.Pointer(%s(%s)))\n", n, strconvfunc, p.Name) - args = append(args, fmt.Sprintf("C.uintptr_t(_p%d)", n)) - n++ - } else if p.Type == "string" { - fmt.Fprintf(os.Stderr, path+":"+funct+" uses string arguments, but has no error return\n") - text += fmt.Sprintf("\t_p%d := uintptr(unsafe.Pointer(%s(%s)))\n", n, strconvfunc, p.Name) - args = append(args, fmt.Sprintf("C.uintptr_t(_p%d)", n)) - n++ - } else if m := regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type); m != nil { - // Convert slice into pointer, length. - // Have to be careful not to take address of &a[0] if len == 0: - // pass nil in that case. - text += fmt.Sprintf("\tvar _p%d *%s\n", n, m[1]) - text += fmt.Sprintf("\tif len(%s) > 0 {\n\t\t_p%d = &%s[0]\n\t}\n", p.Name, n, p.Name) - args = append(args, fmt.Sprintf("C.uintptr_t(uintptr(unsafe.Pointer(_p%d)))", n)) - n++ - text += fmt.Sprintf("\tvar _p%d int\n", n) - text += fmt.Sprintf("\t_p%d = len(%s)\n", n, p.Name) - args = append(args, fmt.Sprintf("C.size_t(_p%d)", n)) - n++ - } else if p.Type == "int64" && endianness != "" { - if endianness == "big-endian" { - args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name)) - } else { - args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name)) - } - n++ - } else if p.Type == "bool" { - text += fmt.Sprintf("\tvar _p%d uint32\n", n) - text += fmt.Sprintf("\tif %s {\n\t\t_p%d = 1\n\t} else {\n\t\t_p%d = 0\n\t}\n", p.Name, n, n) - args = append(args, fmt.Sprintf("_p%d", n)) - } else if regexp.MustCompile(`^_`).FindStringSubmatch(p.Type) != nil { - args = append(args, fmt.Sprintf("C.uintptr_t(uintptr(%s))", p.Name)) - } else if p.Type == "unsafe.Pointer" { - args = append(args, fmt.Sprintf("C.uintptr_t(uintptr(%s))", p.Name)) - } else if p.Type == "int" { - if (argN == 2) && ((funct == "readlen") || (funct == "writelen")) { - args = append(args, fmt.Sprintf("C.size_t(%s)", p.Name)) - } else if argN == 0 && funct == "fcntl" { - args = append(args, fmt.Sprintf("C.uintptr_t(%s)", p.Name)) - } else if (argN == 2) && ((funct == "fcntl") || (funct == "FcntlInt")) { - args = append(args, fmt.Sprintf("C.uintptr_t(%s)", p.Name)) - } else { - args = append(args, fmt.Sprintf("C.int(%s)", p.Name)) - } - } else if p.Type == "int32" { - args = append(args, fmt.Sprintf("C.int(%s)", p.Name)) - } else if p.Type == "int64" { - args = append(args, fmt.Sprintf("C.longlong(%s)", p.Name)) - } else if p.Type == "uint32" { - args = append(args, fmt.Sprintf("C.uint(%s)", p.Name)) - } else if p.Type == "uint64" { - args = append(args, fmt.Sprintf("C.ulonglong(%s)", p.Name)) - } else if p.Type == "uintptr" { - args = append(args, fmt.Sprintf("C.uintptr_t(%s)", p.Name)) - } else { - args = append(args, fmt.Sprintf("C.int(%s)", p.Name)) - } - argN++ - } - - // Actual call. - arglist := strings.Join(args, ", ") - call := "" - if sysname == "exit" { - if errvar != "" { - call += "er :=" - } else { - call += "" - } - } else if errvar != "" { - call += "r0,er :=" - } else if retvar != "" { - call += "r0,_ :=" - } else { - call += "" - } - if sysname == "select" { - // select is a keyword of Go. Its name is - // changed to c_select. - call += fmt.Sprintf("C.c_%s(%s)", sysname, arglist) - } else { - call += fmt.Sprintf("C.%s(%s)", sysname, arglist) - } - - // Assign return values. - body := "" - for i := 0; i < len(out); i++ { - p := parseParam(out[i]) - reg := "" - if p.Name == "err" { - reg = "e1" - } else { - reg = "r0" - } - if reg != "e1" { - body += fmt.Sprintf("\t%s = %s(%s)\n", p.Name, p.Type, reg) - } - } - - // verify return - if sysname != "exit" && errvar != "" { - if regexp.MustCompile(`^uintptr`).FindStringSubmatch(cRettype) != nil { - body += "\tif (uintptr(r0) ==^uintptr(0) && er != nil) {\n" - body += fmt.Sprintf("\t\t%s = er\n", errvar) - body += "\t}\n" - } else { - body += "\tif (r0 ==-1 && er != nil) {\n" - body += fmt.Sprintf("\t\t%s = er\n", errvar) - body += "\t}\n" - } - } else if errvar != "" { - body += "\tif (er != nil) {\n" - body += fmt.Sprintf("\t\t%s = er\n", errvar) - body += "\t}\n" - } - - text += fmt.Sprintf("\t%s\n", call) - text += body - - text += "\treturn\n" - text += "}\n" - } - if err := s.Err(); err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } - file.Close() - } - imp := "" - if pack != "unix" { - imp = "import \"golang.org/x/sys/unix\"\n" - - } - fmt.Printf(srcTemplate, cmdLine(), buildTags(), pack, cExtern, imp, text) -} - -const srcTemplate = `// %s -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build %s - -package %s - - -%s -*/ -import "C" -import ( - "unsafe" -) - - -%s - -%s -` diff --git a/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc64.go b/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc64.go deleted file mode 100644 index c9600995..00000000 --- a/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc64.go +++ /dev/null @@ -1,614 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -/* -This program reads a file containing function prototypes -(like syscall_aix.go) and generates system call bodies. -The prototypes are marked by lines beginning with "//sys" -and read like func declarations if //sys is replaced by func, but: - * The parameter lists must give a name for each argument. - This includes return parameters. - * The parameter lists must give a type for each argument: - the (x, y, z int) shorthand is not allowed. - * If the return parameter is an error number, it must be named err. - * If go func name needs to be different than its libc name, - * or the function is not in libc, name could be specified - * at the end, after "=" sign, like - //sys getsockopt(s int, level int, name int, val uintptr, vallen *_Socklen) (err error) = libsocket.getsockopt - - -This program will generate three files and handle both gc and gccgo implementation: - - zsyscall_aix_ppc64.go: the common part of each implementation (error handler, pointer creation) - - zsyscall_aix_ppc64_gc.go: gc part with //go_cgo_import_dynamic and a call to syscall6 - - zsyscall_aix_ppc64_gccgo.go: gccgo part with C function and conversion to C type. - - The generated code looks like this - -zsyscall_aix_ppc64.go -func asyscall(...) (n int, err error) { - // Pointer Creation - r1, e1 := callasyscall(...) - // Type Conversion - // Error Handler - return -} - -zsyscall_aix_ppc64_gc.go -//go:cgo_import_dynamic libc_asyscall asyscall "libc.a/shr_64.o" -//go:linkname libc_asyscall libc_asyscall -var asyscall syscallFunc - -func callasyscall(...) (r1 uintptr, e1 Errno) { - r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_asyscall)), "nb_args", ... ) - return -} - -zsyscall_aix_ppc64_ggcgo.go - -// int asyscall(...) - -import "C" - -func callasyscall(...) (r1 uintptr, e1 Errno) { - r1 = uintptr(C.asyscall(...)) - e1 = syscall.GetErrno() - return -} -*/ - -package main - -import ( - "bufio" - "flag" - "fmt" - "io/ioutil" - "os" - "regexp" - "strings" -) - -var ( - b32 = flag.Bool("b32", false, "32bit big-endian") - l32 = flag.Bool("l32", false, "32bit little-endian") - aix = flag.Bool("aix", false, "aix") - tags = flag.String("tags", "", "build tags") -) - -// cmdLine returns this programs's commandline arguments -func cmdLine() string { - return "go run mksyscall_aix_ppc64.go " + strings.Join(os.Args[1:], " ") -} - -// buildTags returns build tags -func buildTags() string { - return *tags -} - -// Param is function parameter -type Param struct { - Name string - Type string -} - -// usage prints the program usage -func usage() { - fmt.Fprintf(os.Stderr, "usage: go run mksyscall_aix_ppc64.go [-b32 | -l32] [-tags x,y] [file ...]\n") - os.Exit(1) -} - -// parseParamList parses parameter list and returns a slice of parameters -func parseParamList(list string) []string { - list = strings.TrimSpace(list) - if list == "" { - return []string{} - } - return regexp.MustCompile(`\s*,\s*`).Split(list, -1) -} - -// parseParam splits a parameter into name and type -func parseParam(p string) Param { - ps := regexp.MustCompile(`^(\S*) (\S*)$`).FindStringSubmatch(p) - if ps == nil { - fmt.Fprintf(os.Stderr, "malformed parameter: %s\n", p) - os.Exit(1) - } - return Param{ps[1], ps[2]} -} - -func main() { - flag.Usage = usage - flag.Parse() - if len(flag.Args()) <= 0 { - fmt.Fprintf(os.Stderr, "no files to parse provided\n") - usage() - } - - endianness := "" - if *b32 { - endianness = "big-endian" - } else if *l32 { - endianness = "little-endian" - } - - pack := "" - // GCCGO - textgccgo := "" - cExtern := "/*\n#include \n" - // GC - textgc := "" - dynimports := "" - linknames := "" - var vars []string - // COMMON - textcommon := "" - for _, path := range flag.Args() { - file, err := os.Open(path) - if err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } - s := bufio.NewScanner(file) - for s.Scan() { - t := s.Text() - t = strings.TrimSpace(t) - t = regexp.MustCompile(`\s+`).ReplaceAllString(t, ` `) - if p := regexp.MustCompile(`^package (\S+)$`).FindStringSubmatch(t); p != nil && pack == "" { - pack = p[1] - } - nonblock := regexp.MustCompile(`^\/\/sysnb `).FindStringSubmatch(t) - if regexp.MustCompile(`^\/\/sys `).FindStringSubmatch(t) == nil && nonblock == nil { - continue - } - - // Line must be of the form - // func Open(path string, mode int, perm int) (fd int, err error) - // Split into name, in params, out params. - f := regexp.MustCompile(`^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*(?:(\w*)\.)?(\w*))?$`).FindStringSubmatch(t) - if f == nil { - fmt.Fprintf(os.Stderr, "%s:%s\nmalformed //sys declaration\n", path, t) - os.Exit(1) - } - funct, inps, outps, modname, sysname := f[2], f[3], f[4], f[5], f[6] - - // Split argument lists on comma. - in := parseParamList(inps) - out := parseParamList(outps) - - inps = strings.Join(in, ", ") - outps = strings.Join(out, ", ") - - if sysname == "" { - sysname = funct - } - - onlyCommon := false - if funct == "readlen" || funct == "writelen" || funct == "FcntlInt" || funct == "FcntlFlock" { - // This function call another syscall which is already implemented. - // Therefore, the gc and gccgo part must not be generated. - onlyCommon = true - } - - // Try in vain to keep people from editing this file. - // The theory is that they jump into the middle of the file - // without reading the header. - - textcommon += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n" - if !onlyCommon { - textgccgo += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n" - textgc += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n" - } - - // Check if value return, err return available - errvar := "" - rettype := "" - for _, param := range out { - p := parseParam(param) - if p.Type == "error" { - errvar = p.Name - } else { - rettype = p.Type - } - } - - sysname = regexp.MustCompile(`([a-z])([A-Z])`).ReplaceAllString(sysname, `${1}_$2`) - sysname = strings.ToLower(sysname) // All libc functions are lowercase. - - // GCCGO Prototype return type - cRettype := "" - if rettype == "unsafe.Pointer" { - cRettype = "uintptr_t" - } else if rettype == "uintptr" { - cRettype = "uintptr_t" - } else if regexp.MustCompile(`^_`).FindStringSubmatch(rettype) != nil { - cRettype = "uintptr_t" - } else if rettype == "int" { - cRettype = "int" - } else if rettype == "int32" { - cRettype = "int" - } else if rettype == "int64" { - cRettype = "long long" - } else if rettype == "uint32" { - cRettype = "unsigned int" - } else if rettype == "uint64" { - cRettype = "unsigned long long" - } else { - cRettype = "int" - } - if sysname == "exit" { - cRettype = "void" - } - - // GCCGO Prototype arguments type - var cIn []string - for i, param := range in { - p := parseParam(param) - if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil { - cIn = append(cIn, "uintptr_t") - } else if p.Type == "string" { - cIn = append(cIn, "uintptr_t") - } else if regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type) != nil { - cIn = append(cIn, "uintptr_t", "size_t") - } else if p.Type == "unsafe.Pointer" { - cIn = append(cIn, "uintptr_t") - } else if p.Type == "uintptr" { - cIn = append(cIn, "uintptr_t") - } else if regexp.MustCompile(`^_`).FindStringSubmatch(p.Type) != nil { - cIn = append(cIn, "uintptr_t") - } else if p.Type == "int" { - if (i == 0 || i == 2) && funct == "fcntl" { - // These fcntl arguments needs to be uintptr to be able to call FcntlInt and FcntlFlock - cIn = append(cIn, "uintptr_t") - } else { - cIn = append(cIn, "int") - } - - } else if p.Type == "int32" { - cIn = append(cIn, "int") - } else if p.Type == "int64" { - cIn = append(cIn, "long long") - } else if p.Type == "uint32" { - cIn = append(cIn, "unsigned int") - } else if p.Type == "uint64" { - cIn = append(cIn, "unsigned long long") - } else { - cIn = append(cIn, "int") - } - } - - if !onlyCommon { - // GCCGO Prototype Generation - // Imports of system calls from libc - if sysname == "select" { - // select is a keyword of Go. Its name is - // changed to c_select. - cExtern += "#define c_select select\n" - } - cExtern += fmt.Sprintf("%s %s", cRettype, sysname) - cIn := strings.Join(cIn, ", ") - cExtern += fmt.Sprintf("(%s);\n", cIn) - } - // GC Library name - if modname == "" { - modname = "libc.a/shr_64.o" - } else { - fmt.Fprintf(os.Stderr, "%s: only syscall using libc are available\n", funct) - os.Exit(1) - } - sysvarname := fmt.Sprintf("libc_%s", sysname) - - if !onlyCommon { - // GC Runtime import of function to allow cross-platform builds. - dynimports += fmt.Sprintf("//go:cgo_import_dynamic %s %s \"%s\"\n", sysvarname, sysname, modname) - // GC Link symbol to proc address variable. - linknames += fmt.Sprintf("//go:linkname %s %s\n", sysvarname, sysvarname) - // GC Library proc address variable. - vars = append(vars, sysvarname) - } - - strconvfunc := "BytePtrFromString" - strconvtype := "*byte" - - // Go function header. - if outps != "" { - outps = fmt.Sprintf(" (%s)", outps) - } - if textcommon != "" { - textcommon += "\n" - } - - textcommon += fmt.Sprintf("func %s(%s)%s {\n", funct, strings.Join(in, ", "), outps) - - // Prepare arguments tocall. - var argscommon []string // Arguments in the common part - var argscall []string // Arguments for call prototype - var argsgc []string // Arguments for gc call (with syscall6) - var argsgccgo []string // Arguments for gccgo call (with C.name_of_syscall) - n := 0 - argN := 0 - for _, param := range in { - p := parseParam(param) - if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil { - argscommon = append(argscommon, fmt.Sprintf("uintptr(unsafe.Pointer(%s))", p.Name)) - argscall = append(argscall, fmt.Sprintf("%s uintptr", p.Name)) - argsgc = append(argsgc, p.Name) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(%s)", p.Name)) - } else if p.Type == "string" && errvar != "" { - textcommon += fmt.Sprintf("\tvar _p%d %s\n", n, strconvtype) - textcommon += fmt.Sprintf("\t_p%d, %s = %s(%s)\n", n, errvar, strconvfunc, p.Name) - textcommon += fmt.Sprintf("\tif %s != nil {\n\t\treturn\n\t}\n", errvar) - - argscommon = append(argscommon, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n)) - argscall = append(argscall, fmt.Sprintf("_p%d uintptr ", n)) - argsgc = append(argsgc, fmt.Sprintf("_p%d", n)) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(_p%d)", n)) - n++ - } else if p.Type == "string" { - fmt.Fprintf(os.Stderr, path+":"+funct+" uses string arguments, but has no error return\n") - textcommon += fmt.Sprintf("\tvar _p%d %s\n", n, strconvtype) - textcommon += fmt.Sprintf("\t_p%d, %s = %s(%s)\n", n, errvar, strconvfunc, p.Name) - textcommon += fmt.Sprintf("\tif %s != nil {\n\t\treturn\n\t}\n", errvar) - - argscommon = append(argscommon, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n)) - argscall = append(argscall, fmt.Sprintf("_p%d uintptr", n)) - argsgc = append(argsgc, fmt.Sprintf("_p%d", n)) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(_p%d)", n)) - n++ - } else if m := regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type); m != nil { - // Convert slice into pointer, length. - // Have to be careful not to take address of &a[0] if len == 0: - // pass nil in that case. - textcommon += fmt.Sprintf("\tvar _p%d *%s\n", n, m[1]) - textcommon += fmt.Sprintf("\tif len(%s) > 0 {\n\t\t_p%d = &%s[0]\n\t}\n", p.Name, n, p.Name) - argscommon = append(argscommon, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n), fmt.Sprintf("len(%s)", p.Name)) - argscall = append(argscall, fmt.Sprintf("_p%d uintptr", n), fmt.Sprintf("_lenp%d int", n)) - argsgc = append(argsgc, fmt.Sprintf("_p%d", n), fmt.Sprintf("uintptr(_lenp%d)", n)) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(_p%d)", n), fmt.Sprintf("C.size_t(_lenp%d)", n)) - n++ - } else if p.Type == "int64" && endianness != "" { - fmt.Fprintf(os.Stderr, path+":"+funct+" uses int64 with 32 bits mode. Case not yet implemented\n") - } else if p.Type == "bool" { - fmt.Fprintf(os.Stderr, path+":"+funct+" uses bool. Case not yet implemented\n") - } else if regexp.MustCompile(`^_`).FindStringSubmatch(p.Type) != nil || p.Type == "unsafe.Pointer" { - argscommon = append(argscommon, fmt.Sprintf("uintptr(%s)", p.Name)) - argscall = append(argscall, fmt.Sprintf("%s uintptr", p.Name)) - argsgc = append(argsgc, p.Name) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(%s)", p.Name)) - } else if p.Type == "int" { - if (argN == 0 || argN == 2) && ((funct == "fcntl") || (funct == "FcntlInt") || (funct == "FcntlFlock")) { - // These fcntl arguments need to be uintptr to be able to call FcntlInt and FcntlFlock - argscommon = append(argscommon, fmt.Sprintf("uintptr(%s)", p.Name)) - argscall = append(argscall, fmt.Sprintf("%s uintptr", p.Name)) - argsgc = append(argsgc, p.Name) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(%s)", p.Name)) - - } else { - argscommon = append(argscommon, p.Name) - argscall = append(argscall, fmt.Sprintf("%s int", p.Name)) - argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name)) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.int(%s)", p.Name)) - } - } else if p.Type == "int32" { - argscommon = append(argscommon, p.Name) - argscall = append(argscall, fmt.Sprintf("%s int32", p.Name)) - argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name)) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.int(%s)", p.Name)) - } else if p.Type == "int64" { - argscommon = append(argscommon, p.Name) - argscall = append(argscall, fmt.Sprintf("%s int64", p.Name)) - argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name)) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.longlong(%s)", p.Name)) - } else if p.Type == "uint32" { - argscommon = append(argscommon, p.Name) - argscall = append(argscall, fmt.Sprintf("%s uint32", p.Name)) - argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name)) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.uint(%s)", p.Name)) - } else if p.Type == "uint64" { - argscommon = append(argscommon, p.Name) - argscall = append(argscall, fmt.Sprintf("%s uint64", p.Name)) - argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name)) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.ulonglong(%s)", p.Name)) - } else if p.Type == "uintptr" { - argscommon = append(argscommon, p.Name) - argscall = append(argscall, fmt.Sprintf("%s uintptr", p.Name)) - argsgc = append(argsgc, p.Name) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(%s)", p.Name)) - } else { - argscommon = append(argscommon, fmt.Sprintf("int(%s)", p.Name)) - argscall = append(argscall, fmt.Sprintf("%s int", p.Name)) - argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name)) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.int(%s)", p.Name)) - } - argN++ - } - nargs := len(argsgc) - - // COMMON function generation - argscommonlist := strings.Join(argscommon, ", ") - callcommon := fmt.Sprintf("call%s(%s)", sysname, argscommonlist) - ret := []string{"_", "_"} - body := "" - doErrno := false - for i := 0; i < len(out); i++ { - p := parseParam(out[i]) - reg := "" - if p.Name == "err" { - reg = "e1" - ret[1] = reg - doErrno = true - } else { - reg = "r0" - ret[0] = reg - } - if p.Type == "bool" { - reg = fmt.Sprintf("%s != 0", reg) - } - if reg != "e1" { - body += fmt.Sprintf("\t%s = %s(%s)\n", p.Name, p.Type, reg) - } - } - if ret[0] == "_" && ret[1] == "_" { - textcommon += fmt.Sprintf("\t%s\n", callcommon) - } else { - textcommon += fmt.Sprintf("\t%s, %s := %s\n", ret[0], ret[1], callcommon) - } - textcommon += body - - if doErrno { - textcommon += "\tif e1 != 0 {\n" - textcommon += "\t\terr = errnoErr(e1)\n" - textcommon += "\t}\n" - } - textcommon += "\treturn\n" - textcommon += "}\n" - - if onlyCommon { - continue - } - - // CALL Prototype - callProto := fmt.Sprintf("func call%s(%s) (r1 uintptr, e1 Errno) {\n", sysname, strings.Join(argscall, ", ")) - - // GC function generation - asm := "syscall6" - if nonblock != nil { - asm = "rawSyscall6" - } - - if len(argsgc) <= 6 { - for len(argsgc) < 6 { - argsgc = append(argsgc, "0") - } - } else { - fmt.Fprintf(os.Stderr, "%s: too many arguments to system call", funct) - os.Exit(1) - } - argsgclist := strings.Join(argsgc, ", ") - callgc := fmt.Sprintf("%s(uintptr(unsafe.Pointer(&%s)), %d, %s)", asm, sysvarname, nargs, argsgclist) - - textgc += callProto - textgc += fmt.Sprintf("\tr1, _, e1 = %s\n", callgc) - textgc += "\treturn\n}\n" - - // GCCGO function generation - argsgccgolist := strings.Join(argsgccgo, ", ") - var callgccgo string - if sysname == "select" { - // select is a keyword of Go. Its name is - // changed to c_select. - callgccgo = fmt.Sprintf("C.c_%s(%s)", sysname, argsgccgolist) - } else { - callgccgo = fmt.Sprintf("C.%s(%s)", sysname, argsgccgolist) - } - textgccgo += callProto - textgccgo += fmt.Sprintf("\tr1 = uintptr(%s)\n", callgccgo) - textgccgo += "\te1 = syscall.GetErrno()\n" - textgccgo += "\treturn\n}\n" - } - if err := s.Err(); err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } - file.Close() - } - imp := "" - if pack != "unix" { - imp = "import \"golang.org/x/sys/unix\"\n" - - } - - // Print zsyscall_aix_ppc64.go - err := ioutil.WriteFile("zsyscall_aix_ppc64.go", - []byte(fmt.Sprintf(srcTemplate1, cmdLine(), buildTags(), pack, imp, textcommon)), - 0644) - if err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } - - // Print zsyscall_aix_ppc64_gc.go - vardecls := "\t" + strings.Join(vars, ",\n\t") - vardecls += " syscallFunc" - err = ioutil.WriteFile("zsyscall_aix_ppc64_gc.go", - []byte(fmt.Sprintf(srcTemplate2, cmdLine(), buildTags(), pack, imp, dynimports, linknames, vardecls, textgc)), - 0644) - if err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } - - // Print zsyscall_aix_ppc64_gccgo.go - err = ioutil.WriteFile("zsyscall_aix_ppc64_gccgo.go", - []byte(fmt.Sprintf(srcTemplate3, cmdLine(), buildTags(), pack, cExtern, imp, textgccgo)), - 0644) - if err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } -} - -const srcTemplate1 = `// %s -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build %s - -package %s - -import ( - "unsafe" -) - - -%s - -%s -` -const srcTemplate2 = `// %s -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build %s -// +build !gccgo - -package %s - -import ( - "unsafe" -) -%s -%s -%s -type syscallFunc uintptr - -var ( -%s -) - -// Implemented in runtime/syscall_aix.go. -func rawSyscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno) -func syscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno) - -%s -` -const srcTemplate3 = `// %s -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build %s -// +build gccgo - -package %s - -%s -*/ -import "C" -import ( - "syscall" -) - - -%s - -%s -` diff --git a/vendor/golang.org/x/sys/unix/mksyscall_solaris.go b/vendor/golang.org/x/sys/unix/mksyscall_solaris.go deleted file mode 100644 index 3d864738..00000000 --- a/vendor/golang.org/x/sys/unix/mksyscall_solaris.go +++ /dev/null @@ -1,335 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -/* - This program reads a file containing function prototypes - (like syscall_solaris.go) and generates system call bodies. - The prototypes are marked by lines beginning with "//sys" - and read like func declarations if //sys is replaced by func, but: - * The parameter lists must give a name for each argument. - This includes return parameters. - * The parameter lists must give a type for each argument: - the (x, y, z int) shorthand is not allowed. - * If the return parameter is an error number, it must be named err. - * If go func name needs to be different than its libc name, - * or the function is not in libc, name could be specified - * at the end, after "=" sign, like - //sys getsockopt(s int, level int, name int, val uintptr, vallen *_Socklen) (err error) = libsocket.getsockopt -*/ - -package main - -import ( - "bufio" - "flag" - "fmt" - "os" - "regexp" - "strings" -) - -var ( - b32 = flag.Bool("b32", false, "32bit big-endian") - l32 = flag.Bool("l32", false, "32bit little-endian") - tags = flag.String("tags", "", "build tags") -) - -// cmdLine returns this programs's commandline arguments -func cmdLine() string { - return "go run mksyscall_solaris.go " + strings.Join(os.Args[1:], " ") -} - -// buildTags returns build tags -func buildTags() string { - return *tags -} - -// Param is function parameter -type Param struct { - Name string - Type string -} - -// usage prints the program usage -func usage() { - fmt.Fprintf(os.Stderr, "usage: go run mksyscall_solaris.go [-b32 | -l32] [-tags x,y] [file ...]\n") - os.Exit(1) -} - -// parseParamList parses parameter list and returns a slice of parameters -func parseParamList(list string) []string { - list = strings.TrimSpace(list) - if list == "" { - return []string{} - } - return regexp.MustCompile(`\s*,\s*`).Split(list, -1) -} - -// parseParam splits a parameter into name and type -func parseParam(p string) Param { - ps := regexp.MustCompile(`^(\S*) (\S*)$`).FindStringSubmatch(p) - if ps == nil { - fmt.Fprintf(os.Stderr, "malformed parameter: %s\n", p) - os.Exit(1) - } - return Param{ps[1], ps[2]} -} - -func main() { - flag.Usage = usage - flag.Parse() - if len(flag.Args()) <= 0 { - fmt.Fprintf(os.Stderr, "no files to parse provided\n") - usage() - } - - endianness := "" - if *b32 { - endianness = "big-endian" - } else if *l32 { - endianness = "little-endian" - } - - pack := "" - text := "" - dynimports := "" - linknames := "" - var vars []string - for _, path := range flag.Args() { - file, err := os.Open(path) - if err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } - s := bufio.NewScanner(file) - for s.Scan() { - t := s.Text() - t = strings.TrimSpace(t) - t = regexp.MustCompile(`\s+`).ReplaceAllString(t, ` `) - if p := regexp.MustCompile(`^package (\S+)$`).FindStringSubmatch(t); p != nil && pack == "" { - pack = p[1] - } - nonblock := regexp.MustCompile(`^\/\/sysnb `).FindStringSubmatch(t) - if regexp.MustCompile(`^\/\/sys `).FindStringSubmatch(t) == nil && nonblock == nil { - continue - } - - // Line must be of the form - // func Open(path string, mode int, perm int) (fd int, err error) - // Split into name, in params, out params. - f := regexp.MustCompile(`^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*(?:(\w*)\.)?(\w*))?$`).FindStringSubmatch(t) - if f == nil { - fmt.Fprintf(os.Stderr, "%s:%s\nmalformed //sys declaration\n", path, t) - os.Exit(1) - } - funct, inps, outps, modname, sysname := f[2], f[3], f[4], f[5], f[6] - - // Split argument lists on comma. - in := parseParamList(inps) - out := parseParamList(outps) - - inps = strings.Join(in, ", ") - outps = strings.Join(out, ", ") - - // Try in vain to keep people from editing this file. - // The theory is that they jump into the middle of the file - // without reading the header. - text += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n" - - // So file name. - if modname == "" { - modname = "libc" - } - - // System call name. - if sysname == "" { - sysname = funct - } - - // System call pointer variable name. - sysvarname := fmt.Sprintf("proc%s", sysname) - - strconvfunc := "BytePtrFromString" - strconvtype := "*byte" - - sysname = strings.ToLower(sysname) // All libc functions are lowercase. - - // Runtime import of function to allow cross-platform builds. - dynimports += fmt.Sprintf("//go:cgo_import_dynamic libc_%s %s \"%s.so\"\n", sysname, sysname, modname) - // Link symbol to proc address variable. - linknames += fmt.Sprintf("//go:linkname %s libc_%s\n", sysvarname, sysname) - // Library proc address variable. - vars = append(vars, sysvarname) - - // Go function header. - outlist := strings.Join(out, ", ") - if outlist != "" { - outlist = fmt.Sprintf(" (%s)", outlist) - } - if text != "" { - text += "\n" - } - text += fmt.Sprintf("func %s(%s)%s {\n", funct, strings.Join(in, ", "), outlist) - - // Check if err return available - errvar := "" - for _, param := range out { - p := parseParam(param) - if p.Type == "error" { - errvar = p.Name - continue - } - } - - // Prepare arguments to Syscall. - var args []string - n := 0 - for _, param := range in { - p := parseParam(param) - if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil { - args = append(args, "uintptr(unsafe.Pointer("+p.Name+"))") - } else if p.Type == "string" && errvar != "" { - text += fmt.Sprintf("\tvar _p%d %s\n", n, strconvtype) - text += fmt.Sprintf("\t_p%d, %s = %s(%s)\n", n, errvar, strconvfunc, p.Name) - text += fmt.Sprintf("\tif %s != nil {\n\t\treturn\n\t}\n", errvar) - args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n)) - n++ - } else if p.Type == "string" { - fmt.Fprintf(os.Stderr, path+":"+funct+" uses string arguments, but has no error return\n") - text += fmt.Sprintf("\tvar _p%d %s\n", n, strconvtype) - text += fmt.Sprintf("\t_p%d, _ = %s(%s)\n", n, strconvfunc, p.Name) - args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n)) - n++ - } else if s := regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type); s != nil { - // Convert slice into pointer, length. - // Have to be careful not to take address of &a[0] if len == 0: - // pass nil in that case. - text += fmt.Sprintf("\tvar _p%d *%s\n", n, s[1]) - text += fmt.Sprintf("\tif len(%s) > 0 {\n\t\t_p%d = &%s[0]\n\t}\n", p.Name, n, p.Name) - args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n), fmt.Sprintf("uintptr(len(%s))", p.Name)) - n++ - } else if p.Type == "int64" && endianness != "" { - if endianness == "big-endian" { - args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name)) - } else { - args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name)) - } - } else if p.Type == "bool" { - text += fmt.Sprintf("\tvar _p%d uint32\n", n) - text += fmt.Sprintf("\tif %s {\n\t\t_p%d = 1\n\t} else {\n\t\t_p%d = 0\n\t}\n", p.Name, n, n) - args = append(args, fmt.Sprintf("uintptr(_p%d)", n)) - n++ - } else { - args = append(args, fmt.Sprintf("uintptr(%s)", p.Name)) - } - } - nargs := len(args) - - // Determine which form to use; pad args with zeros. - asm := "sysvicall6" - if nonblock != nil { - asm = "rawSysvicall6" - } - if len(args) <= 6 { - for len(args) < 6 { - args = append(args, "0") - } - } else { - fmt.Fprintf(os.Stderr, "%s: too many arguments to system call\n", path) - os.Exit(1) - } - - // Actual call. - arglist := strings.Join(args, ", ") - call := fmt.Sprintf("%s(uintptr(unsafe.Pointer(&%s)), %d, %s)", asm, sysvarname, nargs, arglist) - - // Assign return values. - body := "" - ret := []string{"_", "_", "_"} - doErrno := false - for i := 0; i < len(out); i++ { - p := parseParam(out[i]) - reg := "" - if p.Name == "err" { - reg = "e1" - ret[2] = reg - doErrno = true - } else { - reg = fmt.Sprintf("r%d", i) - ret[i] = reg - } - if p.Type == "bool" { - reg = fmt.Sprintf("%d != 0", reg) - } - if p.Type == "int64" && endianness != "" { - // 64-bit number in r1:r0 or r0:r1. - if i+2 > len(out) { - fmt.Fprintf(os.Stderr, "%s: not enough registers for int64 return\n", path) - os.Exit(1) - } - if endianness == "big-endian" { - reg = fmt.Sprintf("int64(r%d)<<32 | int64(r%d)", i, i+1) - } else { - reg = fmt.Sprintf("int64(r%d)<<32 | int64(r%d)", i+1, i) - } - ret[i] = fmt.Sprintf("r%d", i) - ret[i+1] = fmt.Sprintf("r%d", i+1) - } - if reg != "e1" { - body += fmt.Sprintf("\t%s = %s(%s)\n", p.Name, p.Type, reg) - } - } - if ret[0] == "_" && ret[1] == "_" && ret[2] == "_" { - text += fmt.Sprintf("\t%s\n", call) - } else { - text += fmt.Sprintf("\t%s, %s, %s := %s\n", ret[0], ret[1], ret[2], call) - } - text += body - - if doErrno { - text += "\tif e1 != 0 {\n" - text += "\t\terr = e1\n" - text += "\t}\n" - } - text += "\treturn\n" - text += "}\n" - } - if err := s.Err(); err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } - file.Close() - } - imp := "" - if pack != "unix" { - imp = "import \"golang.org/x/sys/unix\"\n" - - } - vardecls := "\t" + strings.Join(vars, ",\n\t") - vardecls += " syscallFunc" - fmt.Printf(srcTemplate, cmdLine(), buildTags(), pack, imp, dynimports, linknames, vardecls, text) -} - -const srcTemplate = `// %s -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build %s - -package %s - -import ( - "syscall" - "unsafe" -) -%s -%s -%s -var ( -%s -) - -%s -` diff --git a/vendor/golang.org/x/sys/unix/mksysctl_openbsd.go b/vendor/golang.org/x/sys/unix/mksysctl_openbsd.go deleted file mode 100644 index b6b40990..00000000 --- a/vendor/golang.org/x/sys/unix/mksysctl_openbsd.go +++ /dev/null @@ -1,355 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// Parse the header files for OpenBSD and generate a Go usable sysctl MIB. -// -// Build a MIB with each entry being an array containing the level, type and -// a hash that will contain additional entries if the current entry is a node. -// We then walk this MIB and create a flattened sysctl name to OID hash. - -package main - -import ( - "bufio" - "fmt" - "os" - "path/filepath" - "regexp" - "sort" - "strings" -) - -var ( - goos, goarch string -) - -// cmdLine returns this programs's commandline arguments. -func cmdLine() string { - return "go run mksysctl_openbsd.go " + strings.Join(os.Args[1:], " ") -} - -// buildTags returns build tags. -func buildTags() string { - return fmt.Sprintf("%s,%s", goarch, goos) -} - -// reMatch performs regular expression match and stores the substring slice to value pointed by m. -func reMatch(re *regexp.Regexp, str string, m *[]string) bool { - *m = re.FindStringSubmatch(str) - if *m != nil { - return true - } - return false -} - -type nodeElement struct { - n int - t string - pE *map[string]nodeElement -} - -var ( - debugEnabled bool - mib map[string]nodeElement - node *map[string]nodeElement - nodeMap map[string]string - sysCtl []string -) - -var ( - ctlNames1RE = regexp.MustCompile(`^#define\s+(CTL_NAMES)\s+{`) - ctlNames2RE = regexp.MustCompile(`^#define\s+(CTL_(.*)_NAMES)\s+{`) - ctlNames3RE = regexp.MustCompile(`^#define\s+((.*)CTL_NAMES)\s+{`) - netInetRE = regexp.MustCompile(`^netinet/`) - netInet6RE = regexp.MustCompile(`^netinet6/`) - netRE = regexp.MustCompile(`^net/`) - bracesRE = regexp.MustCompile(`{.*}`) - ctlTypeRE = regexp.MustCompile(`{\s+"(\w+)",\s+(CTLTYPE_[A-Z]+)\s+}`) - fsNetKernRE = regexp.MustCompile(`^(fs|net|kern)_`) -) - -func debug(s string) { - if debugEnabled { - fmt.Fprintln(os.Stderr, s) - } -} - -// Walk the MIB and build a sysctl name to OID mapping. -func buildSysctl(pNode *map[string]nodeElement, name string, oid []int) { - lNode := pNode // local copy of pointer to node - var keys []string - for k := range *lNode { - keys = append(keys, k) - } - sort.Strings(keys) - - for _, key := range keys { - nodename := name - if name != "" { - nodename += "." - } - nodename += key - - nodeoid := append(oid, (*pNode)[key].n) - - if (*pNode)[key].t == `CTLTYPE_NODE` { - if _, ok := nodeMap[nodename]; ok { - lNode = &mib - ctlName := nodeMap[nodename] - for _, part := range strings.Split(ctlName, ".") { - lNode = ((*lNode)[part]).pE - } - } else { - lNode = (*pNode)[key].pE - } - buildSysctl(lNode, nodename, nodeoid) - } else if (*pNode)[key].t != "" { - oidStr := []string{} - for j := range nodeoid { - oidStr = append(oidStr, fmt.Sprintf("%d", nodeoid[j])) - } - text := "\t{ \"" + nodename + "\", []_C_int{ " + strings.Join(oidStr, ", ") + " } }, \n" - sysCtl = append(sysCtl, text) - } - } -} - -func main() { - // Get the OS (using GOOS_TARGET if it exist) - goos = os.Getenv("GOOS_TARGET") - if goos == "" { - goos = os.Getenv("GOOS") - } - // Get the architecture (using GOARCH_TARGET if it exists) - goarch = os.Getenv("GOARCH_TARGET") - if goarch == "" { - goarch = os.Getenv("GOARCH") - } - // Check if GOOS and GOARCH environment variables are defined - if goarch == "" || goos == "" { - fmt.Fprintf(os.Stderr, "GOARCH or GOOS not defined in environment\n") - os.Exit(1) - } - - mib = make(map[string]nodeElement) - headers := [...]string{ - `sys/sysctl.h`, - `sys/socket.h`, - `sys/tty.h`, - `sys/malloc.h`, - `sys/mount.h`, - `sys/namei.h`, - `sys/sem.h`, - `sys/shm.h`, - `sys/vmmeter.h`, - `uvm/uvmexp.h`, - `uvm/uvm_param.h`, - `uvm/uvm_swap_encrypt.h`, - `ddb/db_var.h`, - `net/if.h`, - `net/if_pfsync.h`, - `net/pipex.h`, - `netinet/in.h`, - `netinet/icmp_var.h`, - `netinet/igmp_var.h`, - `netinet/ip_ah.h`, - `netinet/ip_carp.h`, - `netinet/ip_divert.h`, - `netinet/ip_esp.h`, - `netinet/ip_ether.h`, - `netinet/ip_gre.h`, - `netinet/ip_ipcomp.h`, - `netinet/ip_ipip.h`, - `netinet/pim_var.h`, - `netinet/tcp_var.h`, - `netinet/udp_var.h`, - `netinet6/in6.h`, - `netinet6/ip6_divert.h`, - `netinet6/pim6_var.h`, - `netinet/icmp6.h`, - `netmpls/mpls.h`, - } - - ctls := [...]string{ - `kern`, - `vm`, - `fs`, - `net`, - //debug /* Special handling required */ - `hw`, - //machdep /* Arch specific */ - `user`, - `ddb`, - //vfs /* Special handling required */ - `fs.posix`, - `kern.forkstat`, - `kern.intrcnt`, - `kern.malloc`, - `kern.nchstats`, - `kern.seminfo`, - `kern.shminfo`, - `kern.timecounter`, - `kern.tty`, - `kern.watchdog`, - `net.bpf`, - `net.ifq`, - `net.inet`, - `net.inet.ah`, - `net.inet.carp`, - `net.inet.divert`, - `net.inet.esp`, - `net.inet.etherip`, - `net.inet.gre`, - `net.inet.icmp`, - `net.inet.igmp`, - `net.inet.ip`, - `net.inet.ip.ifq`, - `net.inet.ipcomp`, - `net.inet.ipip`, - `net.inet.mobileip`, - `net.inet.pfsync`, - `net.inet.pim`, - `net.inet.tcp`, - `net.inet.udp`, - `net.inet6`, - `net.inet6.divert`, - `net.inet6.ip6`, - `net.inet6.icmp6`, - `net.inet6.pim6`, - `net.inet6.tcp6`, - `net.inet6.udp6`, - `net.mpls`, - `net.mpls.ifq`, - `net.key`, - `net.pflow`, - `net.pfsync`, - `net.pipex`, - `net.rt`, - `vm.swapencrypt`, - //vfsgenctl /* Special handling required */ - } - - // Node name "fixups" - ctlMap := map[string]string{ - "ipproto": "net.inet", - "net.inet.ipproto": "net.inet", - "net.inet6.ipv6proto": "net.inet6", - "net.inet6.ipv6": "net.inet6.ip6", - "net.inet.icmpv6": "net.inet6.icmp6", - "net.inet6.divert6": "net.inet6.divert", - "net.inet6.tcp6": "net.inet.tcp", - "net.inet6.udp6": "net.inet.udp", - "mpls": "net.mpls", - "swpenc": "vm.swapencrypt", - } - - // Node mappings - nodeMap = map[string]string{ - "net.inet.ip.ifq": "net.ifq", - "net.inet.pfsync": "net.pfsync", - "net.mpls.ifq": "net.ifq", - } - - mCtls := make(map[string]bool) - for _, ctl := range ctls { - mCtls[ctl] = true - } - - for _, header := range headers { - debug("Processing " + header) - file, err := os.Open(filepath.Join("/usr/include", header)) - if err != nil { - fmt.Fprintf(os.Stderr, "%v\n", err) - os.Exit(1) - } - s := bufio.NewScanner(file) - for s.Scan() { - var sub []string - if reMatch(ctlNames1RE, s.Text(), &sub) || - reMatch(ctlNames2RE, s.Text(), &sub) || - reMatch(ctlNames3RE, s.Text(), &sub) { - if sub[1] == `CTL_NAMES` { - // Top level. - node = &mib - } else { - // Node. - nodename := strings.ToLower(sub[2]) - ctlName := "" - if reMatch(netInetRE, header, &sub) { - ctlName = "net.inet." + nodename - } else if reMatch(netInet6RE, header, &sub) { - ctlName = "net.inet6." + nodename - } else if reMatch(netRE, header, &sub) { - ctlName = "net." + nodename - } else { - ctlName = nodename - ctlName = fsNetKernRE.ReplaceAllString(ctlName, `$1.`) - } - - if val, ok := ctlMap[ctlName]; ok { - ctlName = val - } - if _, ok := mCtls[ctlName]; !ok { - debug("Ignoring " + ctlName + "...") - continue - } - - // Walk down from the top of the MIB. - node = &mib - for _, part := range strings.Split(ctlName, ".") { - if _, ok := (*node)[part]; !ok { - debug("Missing node " + part) - (*node)[part] = nodeElement{n: 0, t: "", pE: &map[string]nodeElement{}} - } - node = (*node)[part].pE - } - } - - // Populate current node with entries. - i := -1 - for !strings.HasPrefix(s.Text(), "}") { - s.Scan() - if reMatch(bracesRE, s.Text(), &sub) { - i++ - } - if !reMatch(ctlTypeRE, s.Text(), &sub) { - continue - } - (*node)[sub[1]] = nodeElement{n: i, t: sub[2], pE: &map[string]nodeElement{}} - } - } - } - err = s.Err() - if err != nil { - fmt.Fprintf(os.Stderr, "%v\n", err) - os.Exit(1) - } - file.Close() - } - buildSysctl(&mib, "", []int{}) - - sort.Strings(sysCtl) - text := strings.Join(sysCtl, "") - - fmt.Printf(srcTemplate, cmdLine(), buildTags(), text) -} - -const srcTemplate = `// %s -// Code generated by the command above; DO NOT EDIT. - -// +build %s - -package unix - -type mibentry struct { - ctlname string - ctloid []_C_int -} - -var sysctlMib = []mibentry { -%s -} -` diff --git a/vendor/golang.org/x/sys/unix/mksysnum.go b/vendor/golang.org/x/sys/unix/mksysnum.go deleted file mode 100644 index baa6ecd8..00000000 --- a/vendor/golang.org/x/sys/unix/mksysnum.go +++ /dev/null @@ -1,190 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// Generate system call table for DragonFly, NetBSD, -// FreeBSD, OpenBSD or Darwin from master list -// (for example, /usr/src/sys/kern/syscalls.master or -// sys/syscall.h). -package main - -import ( - "bufio" - "fmt" - "io" - "io/ioutil" - "net/http" - "os" - "regexp" - "strings" -) - -var ( - goos, goarch string -) - -// cmdLine returns this programs's commandline arguments -func cmdLine() string { - return "go run mksysnum.go " + strings.Join(os.Args[1:], " ") -} - -// buildTags returns build tags -func buildTags() string { - return fmt.Sprintf("%s,%s", goarch, goos) -} - -func checkErr(err error) { - if err != nil { - fmt.Fprintf(os.Stderr, "%v\n", err) - os.Exit(1) - } -} - -// source string and substring slice for regexp -type re struct { - str string // source string - sub []string // matched sub-string -} - -// Match performs regular expression match -func (r *re) Match(exp string) bool { - r.sub = regexp.MustCompile(exp).FindStringSubmatch(r.str) - if r.sub != nil { - return true - } - return false -} - -// fetchFile fetches a text file from URL -func fetchFile(URL string) io.Reader { - resp, err := http.Get(URL) - checkErr(err) - defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) - checkErr(err) - return strings.NewReader(string(body)) -} - -// readFile reads a text file from path -func readFile(path string) io.Reader { - file, err := os.Open(os.Args[1]) - checkErr(err) - return file -} - -func format(name, num, proto string) string { - name = strings.ToUpper(name) - // There are multiple entries for enosys and nosys, so comment them out. - nm := re{str: name} - if nm.Match(`^SYS_E?NOSYS$`) { - name = fmt.Sprintf("// %s", name) - } - if name == `SYS_SYS_EXIT` { - name = `SYS_EXIT` - } - return fmt.Sprintf(" %s = %s; // %s\n", name, num, proto) -} - -func main() { - // Get the OS (using GOOS_TARGET if it exist) - goos = os.Getenv("GOOS_TARGET") - if goos == "" { - goos = os.Getenv("GOOS") - } - // Get the architecture (using GOARCH_TARGET if it exists) - goarch = os.Getenv("GOARCH_TARGET") - if goarch == "" { - goarch = os.Getenv("GOARCH") - } - // Check if GOOS and GOARCH environment variables are defined - if goarch == "" || goos == "" { - fmt.Fprintf(os.Stderr, "GOARCH or GOOS not defined in environment\n") - os.Exit(1) - } - - file := strings.TrimSpace(os.Args[1]) - var syscalls io.Reader - if strings.HasPrefix(file, "https://") || strings.HasPrefix(file, "http://") { - // Download syscalls.master file - syscalls = fetchFile(file) - } else { - syscalls = readFile(file) - } - - var text, line string - s := bufio.NewScanner(syscalls) - for s.Scan() { - t := re{str: line} - if t.Match(`^(.*)\\$`) { - // Handle continuation - line = t.sub[1] - line += strings.TrimLeft(s.Text(), " \t") - } else { - // New line - line = s.Text() - } - t = re{str: line} - if t.Match(`\\$`) { - continue - } - t = re{str: line} - - switch goos { - case "dragonfly": - if t.Match(`^([0-9]+)\s+STD\s+({ \S+\s+(\w+).*)$`) { - num, proto := t.sub[1], t.sub[2] - name := fmt.Sprintf("SYS_%s", t.sub[3]) - text += format(name, num, proto) - } - case "freebsd": - if t.Match(`^([0-9]+)\s+\S+\s+(?:(?:NO)?STD|COMPAT10)\s+({ \S+\s+(\w+).*)$`) { - num, proto := t.sub[1], t.sub[2] - name := fmt.Sprintf("SYS_%s", t.sub[3]) - text += format(name, num, proto) - } - case "openbsd": - if t.Match(`^([0-9]+)\s+STD\s+(NOLOCK\s+)?({ \S+\s+\*?(\w+).*)$`) { - num, proto, name := t.sub[1], t.sub[3], t.sub[4] - text += format(name, num, proto) - } - case "netbsd": - if t.Match(`^([0-9]+)\s+((STD)|(NOERR))\s+(RUMP\s+)?({\s+\S+\s*\*?\s*\|(\S+)\|(\S*)\|(\w+).*\s+})(\s+(\S+))?$`) { - num, proto, compat := t.sub[1], t.sub[6], t.sub[8] - name := t.sub[7] + "_" + t.sub[9] - if t.sub[11] != "" { - name = t.sub[7] + "_" + t.sub[11] - } - name = strings.ToUpper(name) - if compat == "" || compat == "13" || compat == "30" || compat == "50" { - text += fmt.Sprintf(" %s = %s; // %s\n", name, num, proto) - } - } - case "darwin": - if t.Match(`^#define\s+SYS_(\w+)\s+([0-9]+)`) { - name, num := t.sub[1], t.sub[2] - name = strings.ToUpper(name) - text += fmt.Sprintf(" SYS_%s = %s;\n", name, num) - } - default: - fmt.Fprintf(os.Stderr, "unrecognized GOOS=%s\n", goos) - os.Exit(1) - - } - } - err := s.Err() - checkErr(err) - - fmt.Printf(template, cmdLine(), buildTags(), text) -} - -const template = `// %s -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build %s - -package unix - -const( -%s)` diff --git a/vendor/golang.org/x/sys/unix/types_aix.go b/vendor/golang.org/x/sys/unix/types_aix.go deleted file mode 100644 index 40d2beed..00000000 --- a/vendor/golang.org/x/sys/unix/types_aix.go +++ /dev/null @@ -1,237 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore -// +build aix - -/* -Input to cgo -godefs. See also mkerrors.sh and mkall.sh -*/ - -// +godefs map struct_in_addr [4]byte /* in_addr */ -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package unix - -/* -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#include -#include -#include -#include - - -#include -#include - -enum { - sizeofPtr = sizeof(void*), -}; - -union sockaddr_all { - struct sockaddr s1; // this one gets used for fields - struct sockaddr_in s2; // these pad it out - struct sockaddr_in6 s3; - struct sockaddr_un s4; - struct sockaddr_dl s5; -}; - -struct sockaddr_any { - struct sockaddr addr; - char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; -}; - -*/ -import "C" - -// Machine characteristics - -const ( - SizeofPtr = C.sizeofPtr - SizeofShort = C.sizeof_short - SizeofInt = C.sizeof_int - SizeofLong = C.sizeof_long - SizeofLongLong = C.sizeof_longlong - PathMax = C.PATH_MAX -) - -// Basic types - -type ( - _C_short C.short - _C_int C.int - _C_long C.long - _C_long_long C.longlong -) - -type off64 C.off64_t -type off C.off_t -type Mode_t C.mode_t - -// Time - -type Timespec C.struct_timespec - -type Timeval C.struct_timeval - -type Timeval32 C.struct_timeval32 - -type Timex C.struct_timex - -type Time_t C.time_t - -type Tms C.struct_tms - -type Utimbuf C.struct_utimbuf - -type Timezone C.struct_timezone - -// Processes - -type Rusage C.struct_rusage - -type Rlimit C.struct_rlimit64 - -type Pid_t C.pid_t - -type _Gid_t C.gid_t - -type dev_t C.dev_t - -// Files - -type Stat_t C.struct_stat - -type StatxTimestamp C.struct_statx_timestamp - -type Statx_t C.struct_statx - -type Dirent C.struct_dirent - -// Sockets - -type RawSockaddrInet4 C.struct_sockaddr_in - -type RawSockaddrInet6 C.struct_sockaddr_in6 - -type RawSockaddrUnix C.struct_sockaddr_un - -type RawSockaddrDatalink C.struct_sockaddr_dl - -type RawSockaddr C.struct_sockaddr - -type RawSockaddrAny C.struct_sockaddr_any - -type _Socklen C.socklen_t - -type Cmsghdr C.struct_cmsghdr - -type ICMPv6Filter C.struct_icmp6_filter - -type Iovec C.struct_iovec - -type IPMreq C.struct_ip_mreq - -type IPv6Mreq C.struct_ipv6_mreq - -type IPv6MTUInfo C.struct_ip6_mtuinfo - -type Linger C.struct_linger - -type Msghdr C.struct_msghdr - -const ( - SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in - SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - SizeofSockaddrAny = C.sizeof_struct_sockaddr_any - SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un - SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl - SizeofLinger = C.sizeof_struct_linger - SizeofIPMreq = C.sizeof_struct_ip_mreq - SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo - SizeofMsghdr = C.sizeof_struct_msghdr - SizeofCmsghdr = C.sizeof_struct_cmsghdr - SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter -) - -// Routing and interface messages - -const ( - SizeofIfMsghdr = C.sizeof_struct_if_msghdr -) - -type IfMsgHdr C.struct_if_msghdr - -// Misc - -type FdSet C.fd_set - -type Utsname C.struct_utsname - -type Ustat_t C.struct_ustat - -type Sigset_t C.sigset_t - -const ( - AT_FDCWD = C.AT_FDCWD - AT_REMOVEDIR = C.AT_REMOVEDIR - AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW -) - -// Terminal handling - -type Termios C.struct_termios - -type Termio C.struct_termio - -type Winsize C.struct_winsize - -//poll - -type PollFd struct { - Fd int32 - Events uint16 - Revents uint16 -} - -const ( - POLLERR = C.POLLERR - POLLHUP = C.POLLHUP - POLLIN = C.POLLIN - POLLNVAL = C.POLLNVAL - POLLOUT = C.POLLOUT - POLLPRI = C.POLLPRI - POLLRDBAND = C.POLLRDBAND - POLLRDNORM = C.POLLRDNORM - POLLWRBAND = C.POLLWRBAND - POLLWRNORM = C.POLLWRNORM -) - -//flock_t - -type Flock_t C.struct_flock64 - -// Statfs - -type Fsid_t C.struct_fsid_t -type Fsid64_t C.struct_fsid64_t - -type Statfs_t C.struct_statfs - -const RNDGETENTCNT = 0x80045200 diff --git a/vendor/golang.org/x/sys/unix/types_darwin.go b/vendor/golang.org/x/sys/unix/types_darwin.go deleted file mode 100644 index 155c2e69..00000000 --- a/vendor/golang.org/x/sys/unix/types_darwin.go +++ /dev/null @@ -1,283 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -/* -Input to cgo -godefs. See README.md -*/ - -// +godefs map struct_in_addr [4]byte /* in_addr */ -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package unix - -/* -#define __DARWIN_UNIX03 0 -#define KERNEL -#define _DARWIN_USE_64_BIT_INODE -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -enum { - sizeofPtr = sizeof(void*), -}; - -union sockaddr_all { - struct sockaddr s1; // this one gets used for fields - struct sockaddr_in s2; // these pad it out - struct sockaddr_in6 s3; - struct sockaddr_un s4; - struct sockaddr_dl s5; -}; - -struct sockaddr_any { - struct sockaddr addr; - char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; -}; - -*/ -import "C" - -// Machine characteristics - -const ( - SizeofPtr = C.sizeofPtr - SizeofShort = C.sizeof_short - SizeofInt = C.sizeof_int - SizeofLong = C.sizeof_long - SizeofLongLong = C.sizeof_longlong -) - -// Basic types - -type ( - _C_short C.short - _C_int C.int - _C_long C.long - _C_long_long C.longlong -) - -// Time - -type Timespec C.struct_timespec - -type Timeval C.struct_timeval - -type Timeval32 C.struct_timeval32 - -// Processes - -type Rusage C.struct_rusage - -type Rlimit C.struct_rlimit - -type _Gid_t C.gid_t - -// Files - -type Stat_t C.struct_stat64 - -type Statfs_t C.struct_statfs64 - -type Flock_t C.struct_flock - -type Fstore_t C.struct_fstore - -type Radvisory_t C.struct_radvisory - -type Fbootstraptransfer_t C.struct_fbootstraptransfer - -type Log2phys_t C.struct_log2phys - -type Fsid C.struct_fsid - -type Dirent C.struct_dirent - -// Sockets - -type RawSockaddrInet4 C.struct_sockaddr_in - -type RawSockaddrInet6 C.struct_sockaddr_in6 - -type RawSockaddrUnix C.struct_sockaddr_un - -type RawSockaddrDatalink C.struct_sockaddr_dl - -type RawSockaddr C.struct_sockaddr - -type RawSockaddrAny C.struct_sockaddr_any - -type _Socklen C.socklen_t - -type Linger C.struct_linger - -type Iovec C.struct_iovec - -type IPMreq C.struct_ip_mreq - -type IPv6Mreq C.struct_ipv6_mreq - -type Msghdr C.struct_msghdr - -type Cmsghdr C.struct_cmsghdr - -type Inet4Pktinfo C.struct_in_pktinfo - -type Inet6Pktinfo C.struct_in6_pktinfo - -type IPv6MTUInfo C.struct_ip6_mtuinfo - -type ICMPv6Filter C.struct_icmp6_filter - -const ( - SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in - SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - SizeofSockaddrAny = C.sizeof_struct_sockaddr_any - SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un - SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl - SizeofLinger = C.sizeof_struct_linger - SizeofIPMreq = C.sizeof_struct_ip_mreq - SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - SizeofMsghdr = C.sizeof_struct_msghdr - SizeofCmsghdr = C.sizeof_struct_cmsghdr - SizeofInet4Pktinfo = C.sizeof_struct_in_pktinfo - SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo - SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo - SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter -) - -// Ptrace requests - -const ( - PTRACE_TRACEME = C.PT_TRACE_ME - PTRACE_CONT = C.PT_CONTINUE - PTRACE_KILL = C.PT_KILL -) - -// Events (kqueue, kevent) - -type Kevent_t C.struct_kevent - -// Select - -type FdSet C.fd_set - -// Routing and interface messages - -const ( - SizeofIfMsghdr = C.sizeof_struct_if_msghdr - SizeofIfData = C.sizeof_struct_if_data - SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr - SizeofIfmaMsghdr = C.sizeof_struct_ifma_msghdr - SizeofIfmaMsghdr2 = C.sizeof_struct_ifma_msghdr2 - SizeofRtMsghdr = C.sizeof_struct_rt_msghdr - SizeofRtMetrics = C.sizeof_struct_rt_metrics -) - -type IfMsghdr C.struct_if_msghdr - -type IfData C.struct_if_data - -type IfaMsghdr C.struct_ifa_msghdr - -type IfmaMsghdr C.struct_ifma_msghdr - -type IfmaMsghdr2 C.struct_ifma_msghdr2 - -type RtMsghdr C.struct_rt_msghdr - -type RtMetrics C.struct_rt_metrics - -// Berkeley packet filter - -const ( - SizeofBpfVersion = C.sizeof_struct_bpf_version - SizeofBpfStat = C.sizeof_struct_bpf_stat - SizeofBpfProgram = C.sizeof_struct_bpf_program - SizeofBpfInsn = C.sizeof_struct_bpf_insn - SizeofBpfHdr = C.sizeof_struct_bpf_hdr -) - -type BpfVersion C.struct_bpf_version - -type BpfStat C.struct_bpf_stat - -type BpfProgram C.struct_bpf_program - -type BpfInsn C.struct_bpf_insn - -type BpfHdr C.struct_bpf_hdr - -// Terminal handling - -type Termios C.struct_termios - -type Winsize C.struct_winsize - -// fchmodat-like syscalls. - -const ( - AT_FDCWD = C.AT_FDCWD - AT_REMOVEDIR = C.AT_REMOVEDIR - AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW - AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW -) - -// poll - -type PollFd C.struct_pollfd - -const ( - POLLERR = C.POLLERR - POLLHUP = C.POLLHUP - POLLIN = C.POLLIN - POLLNVAL = C.POLLNVAL - POLLOUT = C.POLLOUT - POLLPRI = C.POLLPRI - POLLRDBAND = C.POLLRDBAND - POLLRDNORM = C.POLLRDNORM - POLLWRBAND = C.POLLWRBAND - POLLWRNORM = C.POLLWRNORM -) - -// uname - -type Utsname C.struct_utsname - -// Clockinfo - -const SizeofClockinfo = C.sizeof_struct_clockinfo - -type Clockinfo C.struct_clockinfo diff --git a/vendor/golang.org/x/sys/unix/types_dragonfly.go b/vendor/golang.org/x/sys/unix/types_dragonfly.go deleted file mode 100644 index 3365dd79..00000000 --- a/vendor/golang.org/x/sys/unix/types_dragonfly.go +++ /dev/null @@ -1,263 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -/* -Input to cgo -godefs. See README.md -*/ - -// +godefs map struct_in_addr [4]byte /* in_addr */ -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package unix - -/* -#define KERNEL -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -enum { - sizeofPtr = sizeof(void*), -}; - -union sockaddr_all { - struct sockaddr s1; // this one gets used for fields - struct sockaddr_in s2; // these pad it out - struct sockaddr_in6 s3; - struct sockaddr_un s4; - struct sockaddr_dl s5; -}; - -struct sockaddr_any { - struct sockaddr addr; - char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; -}; - -*/ -import "C" - -// Machine characteristics - -const ( - SizeofPtr = C.sizeofPtr - SizeofShort = C.sizeof_short - SizeofInt = C.sizeof_int - SizeofLong = C.sizeof_long - SizeofLongLong = C.sizeof_longlong -) - -// Basic types - -type ( - _C_short C.short - _C_int C.int - _C_long C.long - _C_long_long C.longlong -) - -// Time - -type Timespec C.struct_timespec - -type Timeval C.struct_timeval - -// Processes - -type Rusage C.struct_rusage - -type Rlimit C.struct_rlimit - -type _Gid_t C.gid_t - -// Files - -type Stat_t C.struct_stat - -type Statfs_t C.struct_statfs - -type Flock_t C.struct_flock - -type Dirent C.struct_dirent - -type Fsid C.struct_fsid - -// File system limits - -const ( - PathMax = C.PATH_MAX -) - -// Sockets - -type RawSockaddrInet4 C.struct_sockaddr_in - -type RawSockaddrInet6 C.struct_sockaddr_in6 - -type RawSockaddrUnix C.struct_sockaddr_un - -type RawSockaddrDatalink C.struct_sockaddr_dl - -type RawSockaddr C.struct_sockaddr - -type RawSockaddrAny C.struct_sockaddr_any - -type _Socklen C.socklen_t - -type Linger C.struct_linger - -type Iovec C.struct_iovec - -type IPMreq C.struct_ip_mreq - -type IPv6Mreq C.struct_ipv6_mreq - -type Msghdr C.struct_msghdr - -type Cmsghdr C.struct_cmsghdr - -type Inet6Pktinfo C.struct_in6_pktinfo - -type IPv6MTUInfo C.struct_ip6_mtuinfo - -type ICMPv6Filter C.struct_icmp6_filter - -const ( - SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in - SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - SizeofSockaddrAny = C.sizeof_struct_sockaddr_any - SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un - SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl - SizeofLinger = C.sizeof_struct_linger - SizeofIPMreq = C.sizeof_struct_ip_mreq - SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - SizeofMsghdr = C.sizeof_struct_msghdr - SizeofCmsghdr = C.sizeof_struct_cmsghdr - SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo - SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo - SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter -) - -// Ptrace requests - -const ( - PTRACE_TRACEME = C.PT_TRACE_ME - PTRACE_CONT = C.PT_CONTINUE - PTRACE_KILL = C.PT_KILL -) - -// Events (kqueue, kevent) - -type Kevent_t C.struct_kevent - -// Select - -type FdSet C.fd_set - -// Routing and interface messages - -const ( - SizeofIfMsghdr = C.sizeof_struct_if_msghdr - SizeofIfData = C.sizeof_struct_if_data - SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr - SizeofIfmaMsghdr = C.sizeof_struct_ifma_msghdr - SizeofIfAnnounceMsghdr = C.sizeof_struct_if_announcemsghdr - SizeofRtMsghdr = C.sizeof_struct_rt_msghdr - SizeofRtMetrics = C.sizeof_struct_rt_metrics -) - -type IfMsghdr C.struct_if_msghdr - -type IfData C.struct_if_data - -type IfaMsghdr C.struct_ifa_msghdr - -type IfmaMsghdr C.struct_ifma_msghdr - -type IfAnnounceMsghdr C.struct_if_announcemsghdr - -type RtMsghdr C.struct_rt_msghdr - -type RtMetrics C.struct_rt_metrics - -// Berkeley packet filter - -const ( - SizeofBpfVersion = C.sizeof_struct_bpf_version - SizeofBpfStat = C.sizeof_struct_bpf_stat - SizeofBpfProgram = C.sizeof_struct_bpf_program - SizeofBpfInsn = C.sizeof_struct_bpf_insn - SizeofBpfHdr = C.sizeof_struct_bpf_hdr -) - -type BpfVersion C.struct_bpf_version - -type BpfStat C.struct_bpf_stat - -type BpfProgram C.struct_bpf_program - -type BpfInsn C.struct_bpf_insn - -type BpfHdr C.struct_bpf_hdr - -// Terminal handling - -type Termios C.struct_termios - -type Winsize C.struct_winsize - -// fchmodat-like syscalls. - -const ( - AT_FDCWD = C.AT_FDCWD - AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW -) - -// poll - -type PollFd C.struct_pollfd - -const ( - POLLERR = C.POLLERR - POLLHUP = C.POLLHUP - POLLIN = C.POLLIN - POLLNVAL = C.POLLNVAL - POLLOUT = C.POLLOUT - POLLPRI = C.POLLPRI - POLLRDBAND = C.POLLRDBAND - POLLRDNORM = C.POLLRDNORM - POLLWRBAND = C.POLLWRBAND - POLLWRNORM = C.POLLWRNORM -) - -// Uname - -type Utsname C.struct_utsname diff --git a/vendor/golang.org/x/sys/unix/types_freebsd.go b/vendor/golang.org/x/sys/unix/types_freebsd.go deleted file mode 100644 index a121dc33..00000000 --- a/vendor/golang.org/x/sys/unix/types_freebsd.go +++ /dev/null @@ -1,400 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -/* -Input to cgo -godefs. See README.md -*/ - -// +godefs map struct_in_addr [4]byte /* in_addr */ -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package unix - -/* -#define _WANT_FREEBSD11_STAT 1 -#define _WANT_FREEBSD11_STATFS 1 -#define _WANT_FREEBSD11_DIRENT 1 -#define _WANT_FREEBSD11_KEVENT 1 - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -enum { - sizeofPtr = sizeof(void*), -}; - -union sockaddr_all { - struct sockaddr s1; // this one gets used for fields - struct sockaddr_in s2; // these pad it out - struct sockaddr_in6 s3; - struct sockaddr_un s4; - struct sockaddr_dl s5; -}; - -struct sockaddr_any { - struct sockaddr addr; - char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; -}; - -// This structure is a duplicate of if_data on FreeBSD 8-STABLE. -// See /usr/include/net/if.h. -struct if_data8 { - u_char ifi_type; - u_char ifi_physical; - u_char ifi_addrlen; - u_char ifi_hdrlen; - u_char ifi_link_state; - u_char ifi_spare_char1; - u_char ifi_spare_char2; - u_char ifi_datalen; - u_long ifi_mtu; - u_long ifi_metric; - u_long ifi_baudrate; - u_long ifi_ipackets; - u_long ifi_ierrors; - u_long ifi_opackets; - u_long ifi_oerrors; - u_long ifi_collisions; - u_long ifi_ibytes; - u_long ifi_obytes; - u_long ifi_imcasts; - u_long ifi_omcasts; - u_long ifi_iqdrops; - u_long ifi_noproto; - u_long ifi_hwassist; -// FIXME: these are now unions, so maybe need to change definitions? -#undef ifi_epoch - time_t ifi_epoch; -#undef ifi_lastchange - struct timeval ifi_lastchange; -}; - -// This structure is a duplicate of if_msghdr on FreeBSD 8-STABLE. -// See /usr/include/net/if.h. -struct if_msghdr8 { - u_short ifm_msglen; - u_char ifm_version; - u_char ifm_type; - int ifm_addrs; - int ifm_flags; - u_short ifm_index; - struct if_data8 ifm_data; -}; -*/ -import "C" - -// Machine characteristics - -const ( - SizeofPtr = C.sizeofPtr - SizeofShort = C.sizeof_short - SizeofInt = C.sizeof_int - SizeofLong = C.sizeof_long - SizeofLongLong = C.sizeof_longlong -) - -// Basic types - -type ( - _C_short C.short - _C_int C.int - _C_long C.long - _C_long_long C.longlong -) - -// Time - -type Timespec C.struct_timespec - -type Timeval C.struct_timeval - -// Processes - -type Rusage C.struct_rusage - -type Rlimit C.struct_rlimit - -type _Gid_t C.gid_t - -// Files - -const ( - _statfsVersion = C.STATFS_VERSION - _dirblksiz = C.DIRBLKSIZ -) - -type Stat_t C.struct_stat - -type stat_freebsd11_t C.struct_freebsd11_stat - -type Statfs_t C.struct_statfs - -type statfs_freebsd11_t C.struct_freebsd11_statfs - -type Flock_t C.struct_flock - -type Dirent C.struct_dirent - -type dirent_freebsd11 C.struct_freebsd11_dirent - -type Fsid C.struct_fsid - -// File system limits - -const ( - PathMax = C.PATH_MAX -) - -// Advice to Fadvise - -const ( - FADV_NORMAL = C.POSIX_FADV_NORMAL - FADV_RANDOM = C.POSIX_FADV_RANDOM - FADV_SEQUENTIAL = C.POSIX_FADV_SEQUENTIAL - FADV_WILLNEED = C.POSIX_FADV_WILLNEED - FADV_DONTNEED = C.POSIX_FADV_DONTNEED - FADV_NOREUSE = C.POSIX_FADV_NOREUSE -) - -// Sockets - -type RawSockaddrInet4 C.struct_sockaddr_in - -type RawSockaddrInet6 C.struct_sockaddr_in6 - -type RawSockaddrUnix C.struct_sockaddr_un - -type RawSockaddrDatalink C.struct_sockaddr_dl - -type RawSockaddr C.struct_sockaddr - -type RawSockaddrAny C.struct_sockaddr_any - -type _Socklen C.socklen_t - -type Linger C.struct_linger - -type Iovec C.struct_iovec - -type IPMreq C.struct_ip_mreq - -type IPMreqn C.struct_ip_mreqn - -type IPv6Mreq C.struct_ipv6_mreq - -type Msghdr C.struct_msghdr - -type Cmsghdr C.struct_cmsghdr - -type Inet6Pktinfo C.struct_in6_pktinfo - -type IPv6MTUInfo C.struct_ip6_mtuinfo - -type ICMPv6Filter C.struct_icmp6_filter - -const ( - SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in - SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - SizeofSockaddrAny = C.sizeof_struct_sockaddr_any - SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un - SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl - SizeofLinger = C.sizeof_struct_linger - SizeofIPMreq = C.sizeof_struct_ip_mreq - SizeofIPMreqn = C.sizeof_struct_ip_mreqn - SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - SizeofMsghdr = C.sizeof_struct_msghdr - SizeofCmsghdr = C.sizeof_struct_cmsghdr - SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo - SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo - SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter -) - -// Ptrace requests - -const ( - PTRACE_ATTACH = C.PT_ATTACH - PTRACE_CONT = C.PT_CONTINUE - PTRACE_DETACH = C.PT_DETACH - PTRACE_GETFPREGS = C.PT_GETFPREGS - PTRACE_GETFSBASE = C.PT_GETFSBASE - PTRACE_GETLWPLIST = C.PT_GETLWPLIST - PTRACE_GETNUMLWPS = C.PT_GETNUMLWPS - PTRACE_GETREGS = C.PT_GETREGS - PTRACE_GETXSTATE = C.PT_GETXSTATE - PTRACE_IO = C.PT_IO - PTRACE_KILL = C.PT_KILL - PTRACE_LWPEVENTS = C.PT_LWP_EVENTS - PTRACE_LWPINFO = C.PT_LWPINFO - PTRACE_SETFPREGS = C.PT_SETFPREGS - PTRACE_SETREGS = C.PT_SETREGS - PTRACE_SINGLESTEP = C.PT_STEP - PTRACE_TRACEME = C.PT_TRACE_ME -) - -const ( - PIOD_READ_D = C.PIOD_READ_D - PIOD_WRITE_D = C.PIOD_WRITE_D - PIOD_READ_I = C.PIOD_READ_I - PIOD_WRITE_I = C.PIOD_WRITE_I -) - -const ( - PL_FLAG_BORN = C.PL_FLAG_BORN - PL_FLAG_EXITED = C.PL_FLAG_EXITED - PL_FLAG_SI = C.PL_FLAG_SI -) - -const ( - TRAP_BRKPT = C.TRAP_BRKPT - TRAP_TRACE = C.TRAP_TRACE -) - -type PtraceLwpInfoStruct C.struct_ptrace_lwpinfo - -type __Siginfo C.struct___siginfo - -type Sigset_t C.sigset_t - -type Reg C.struct_reg - -type FpReg C.struct_fpreg - -type PtraceIoDesc C.struct_ptrace_io_desc - -// Events (kqueue, kevent) - -type Kevent_t C.struct_kevent_freebsd11 - -// Select - -type FdSet C.fd_set - -// Routing and interface messages - -const ( - sizeofIfMsghdr = C.sizeof_struct_if_msghdr - SizeofIfMsghdr = C.sizeof_struct_if_msghdr8 - sizeofIfData = C.sizeof_struct_if_data - SizeofIfData = C.sizeof_struct_if_data8 - SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr - SizeofIfmaMsghdr = C.sizeof_struct_ifma_msghdr - SizeofIfAnnounceMsghdr = C.sizeof_struct_if_announcemsghdr - SizeofRtMsghdr = C.sizeof_struct_rt_msghdr - SizeofRtMetrics = C.sizeof_struct_rt_metrics -) - -type ifMsghdr C.struct_if_msghdr - -type IfMsghdr C.struct_if_msghdr8 - -type ifData C.struct_if_data - -type IfData C.struct_if_data8 - -type IfaMsghdr C.struct_ifa_msghdr - -type IfmaMsghdr C.struct_ifma_msghdr - -type IfAnnounceMsghdr C.struct_if_announcemsghdr - -type RtMsghdr C.struct_rt_msghdr - -type RtMetrics C.struct_rt_metrics - -// Berkeley packet filter - -const ( - SizeofBpfVersion = C.sizeof_struct_bpf_version - SizeofBpfStat = C.sizeof_struct_bpf_stat - SizeofBpfZbuf = C.sizeof_struct_bpf_zbuf - SizeofBpfProgram = C.sizeof_struct_bpf_program - SizeofBpfInsn = C.sizeof_struct_bpf_insn - SizeofBpfHdr = C.sizeof_struct_bpf_hdr - SizeofBpfZbufHeader = C.sizeof_struct_bpf_zbuf_header -) - -type BpfVersion C.struct_bpf_version - -type BpfStat C.struct_bpf_stat - -type BpfZbuf C.struct_bpf_zbuf - -type BpfProgram C.struct_bpf_program - -type BpfInsn C.struct_bpf_insn - -type BpfHdr C.struct_bpf_hdr - -type BpfZbufHeader C.struct_bpf_zbuf_header - -// Terminal handling - -type Termios C.struct_termios - -type Winsize C.struct_winsize - -// fchmodat-like syscalls. - -const ( - AT_FDCWD = C.AT_FDCWD - AT_REMOVEDIR = C.AT_REMOVEDIR - AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW - AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW -) - -// poll - -type PollFd C.struct_pollfd - -const ( - POLLERR = C.POLLERR - POLLHUP = C.POLLHUP - POLLIN = C.POLLIN - POLLINIGNEOF = C.POLLINIGNEOF - POLLNVAL = C.POLLNVAL - POLLOUT = C.POLLOUT - POLLPRI = C.POLLPRI - POLLRDBAND = C.POLLRDBAND - POLLRDNORM = C.POLLRDNORM - POLLWRBAND = C.POLLWRBAND - POLLWRNORM = C.POLLWRNORM -) - -// Capabilities - -type CapRights C.struct_cap_rights - -// Uname - -type Utsname C.struct_utsname diff --git a/vendor/golang.org/x/sys/unix/types_netbsd.go b/vendor/golang.org/x/sys/unix/types_netbsd.go deleted file mode 100644 index 4a96d72c..00000000 --- a/vendor/golang.org/x/sys/unix/types_netbsd.go +++ /dev/null @@ -1,290 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -/* -Input to cgo -godefs. See README.md -*/ - -// +godefs map struct_in_addr [4]byte /* in_addr */ -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package unix - -/* -#define KERNEL -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -enum { - sizeofPtr = sizeof(void*), -}; - -union sockaddr_all { - struct sockaddr s1; // this one gets used for fields - struct sockaddr_in s2; // these pad it out - struct sockaddr_in6 s3; - struct sockaddr_un s4; - struct sockaddr_dl s5; -}; - -struct sockaddr_any { - struct sockaddr addr; - char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; -}; - -*/ -import "C" - -// Machine characteristics - -const ( - SizeofPtr = C.sizeofPtr - SizeofShort = C.sizeof_short - SizeofInt = C.sizeof_int - SizeofLong = C.sizeof_long - SizeofLongLong = C.sizeof_longlong -) - -// Basic types - -type ( - _C_short C.short - _C_int C.int - _C_long C.long - _C_long_long C.longlong -) - -// Time - -type Timespec C.struct_timespec - -type Timeval C.struct_timeval - -// Processes - -type Rusage C.struct_rusage - -type Rlimit C.struct_rlimit - -type _Gid_t C.gid_t - -// Files - -type Stat_t C.struct_stat - -type Statfs_t C.struct_statfs - -type Flock_t C.struct_flock - -type Dirent C.struct_dirent - -type Fsid C.fsid_t - -// File system limits - -const ( - PathMax = C.PATH_MAX -) - -// Advice to Fadvise - -const ( - FADV_NORMAL = C.POSIX_FADV_NORMAL - FADV_RANDOM = C.POSIX_FADV_RANDOM - FADV_SEQUENTIAL = C.POSIX_FADV_SEQUENTIAL - FADV_WILLNEED = C.POSIX_FADV_WILLNEED - FADV_DONTNEED = C.POSIX_FADV_DONTNEED - FADV_NOREUSE = C.POSIX_FADV_NOREUSE -) - -// Sockets - -type RawSockaddrInet4 C.struct_sockaddr_in - -type RawSockaddrInet6 C.struct_sockaddr_in6 - -type RawSockaddrUnix C.struct_sockaddr_un - -type RawSockaddrDatalink C.struct_sockaddr_dl - -type RawSockaddr C.struct_sockaddr - -type RawSockaddrAny C.struct_sockaddr_any - -type _Socklen C.socklen_t - -type Linger C.struct_linger - -type Iovec C.struct_iovec - -type IPMreq C.struct_ip_mreq - -type IPv6Mreq C.struct_ipv6_mreq - -type Msghdr C.struct_msghdr - -type Cmsghdr C.struct_cmsghdr - -type Inet6Pktinfo C.struct_in6_pktinfo - -type IPv6MTUInfo C.struct_ip6_mtuinfo - -type ICMPv6Filter C.struct_icmp6_filter - -const ( - SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in - SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - SizeofSockaddrAny = C.sizeof_struct_sockaddr_any - SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un - SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl - SizeofLinger = C.sizeof_struct_linger - SizeofIPMreq = C.sizeof_struct_ip_mreq - SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - SizeofMsghdr = C.sizeof_struct_msghdr - SizeofCmsghdr = C.sizeof_struct_cmsghdr - SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo - SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo - SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter -) - -// Ptrace requests - -const ( - PTRACE_TRACEME = C.PT_TRACE_ME - PTRACE_CONT = C.PT_CONTINUE - PTRACE_KILL = C.PT_KILL -) - -// Events (kqueue, kevent) - -type Kevent_t C.struct_kevent - -// Select - -type FdSet C.fd_set - -// Routing and interface messages - -const ( - SizeofIfMsghdr = C.sizeof_struct_if_msghdr - SizeofIfData = C.sizeof_struct_if_data - SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr - SizeofIfAnnounceMsghdr = C.sizeof_struct_if_announcemsghdr - SizeofRtMsghdr = C.sizeof_struct_rt_msghdr - SizeofRtMetrics = C.sizeof_struct_rt_metrics -) - -type IfMsghdr C.struct_if_msghdr - -type IfData C.struct_if_data - -type IfaMsghdr C.struct_ifa_msghdr - -type IfAnnounceMsghdr C.struct_if_announcemsghdr - -type RtMsghdr C.struct_rt_msghdr - -type RtMetrics C.struct_rt_metrics - -type Mclpool C.struct_mclpool - -// Berkeley packet filter - -const ( - SizeofBpfVersion = C.sizeof_struct_bpf_version - SizeofBpfStat = C.sizeof_struct_bpf_stat - SizeofBpfProgram = C.sizeof_struct_bpf_program - SizeofBpfInsn = C.sizeof_struct_bpf_insn - SizeofBpfHdr = C.sizeof_struct_bpf_hdr -) - -type BpfVersion C.struct_bpf_version - -type BpfStat C.struct_bpf_stat - -type BpfProgram C.struct_bpf_program - -type BpfInsn C.struct_bpf_insn - -type BpfHdr C.struct_bpf_hdr - -type BpfTimeval C.struct_bpf_timeval - -// Terminal handling - -type Termios C.struct_termios - -type Winsize C.struct_winsize - -type Ptmget C.struct_ptmget - -// fchmodat-like syscalls. - -const ( - AT_FDCWD = C.AT_FDCWD - AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW - AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW -) - -// poll - -type PollFd C.struct_pollfd - -const ( - POLLERR = C.POLLERR - POLLHUP = C.POLLHUP - POLLIN = C.POLLIN - POLLNVAL = C.POLLNVAL - POLLOUT = C.POLLOUT - POLLPRI = C.POLLPRI - POLLRDBAND = C.POLLRDBAND - POLLRDNORM = C.POLLRDNORM - POLLWRBAND = C.POLLWRBAND - POLLWRNORM = C.POLLWRNORM -) - -// Sysctl - -type Sysctlnode C.struct_sysctlnode - -// Uname - -type Utsname C.struct_utsname - -// Clockinfo - -const SizeofClockinfo = C.sizeof_struct_clockinfo - -type Clockinfo C.struct_clockinfo diff --git a/vendor/golang.org/x/sys/unix/types_openbsd.go b/vendor/golang.org/x/sys/unix/types_openbsd.go deleted file mode 100644 index 775cb57d..00000000 --- a/vendor/golang.org/x/sys/unix/types_openbsd.go +++ /dev/null @@ -1,283 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -/* -Input to cgo -godefs. See README.md -*/ - -// +godefs map struct_in_addr [4]byte /* in_addr */ -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package unix - -/* -#define KERNEL -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -enum { - sizeofPtr = sizeof(void*), -}; - -union sockaddr_all { - struct sockaddr s1; // this one gets used for fields - struct sockaddr_in s2; // these pad it out - struct sockaddr_in6 s3; - struct sockaddr_un s4; - struct sockaddr_dl s5; -}; - -struct sockaddr_any { - struct sockaddr addr; - char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; -}; - -*/ -import "C" - -// Machine characteristics - -const ( - SizeofPtr = C.sizeofPtr - SizeofShort = C.sizeof_short - SizeofInt = C.sizeof_int - SizeofLong = C.sizeof_long - SizeofLongLong = C.sizeof_longlong -) - -// Basic types - -type ( - _C_short C.short - _C_int C.int - _C_long C.long - _C_long_long C.longlong -) - -// Time - -type Timespec C.struct_timespec - -type Timeval C.struct_timeval - -// Processes - -type Rusage C.struct_rusage - -type Rlimit C.struct_rlimit - -type _Gid_t C.gid_t - -// Files - -type Stat_t C.struct_stat - -type Statfs_t C.struct_statfs - -type Flock_t C.struct_flock - -type Dirent C.struct_dirent - -type Fsid C.fsid_t - -// File system limits - -const ( - PathMax = C.PATH_MAX -) - -// Sockets - -type RawSockaddrInet4 C.struct_sockaddr_in - -type RawSockaddrInet6 C.struct_sockaddr_in6 - -type RawSockaddrUnix C.struct_sockaddr_un - -type RawSockaddrDatalink C.struct_sockaddr_dl - -type RawSockaddr C.struct_sockaddr - -type RawSockaddrAny C.struct_sockaddr_any - -type _Socklen C.socklen_t - -type Linger C.struct_linger - -type Iovec C.struct_iovec - -type IPMreq C.struct_ip_mreq - -type IPv6Mreq C.struct_ipv6_mreq - -type Msghdr C.struct_msghdr - -type Cmsghdr C.struct_cmsghdr - -type Inet6Pktinfo C.struct_in6_pktinfo - -type IPv6MTUInfo C.struct_ip6_mtuinfo - -type ICMPv6Filter C.struct_icmp6_filter - -const ( - SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in - SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - SizeofSockaddrAny = C.sizeof_struct_sockaddr_any - SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un - SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl - SizeofLinger = C.sizeof_struct_linger - SizeofIPMreq = C.sizeof_struct_ip_mreq - SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - SizeofMsghdr = C.sizeof_struct_msghdr - SizeofCmsghdr = C.sizeof_struct_cmsghdr - SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo - SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo - SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter -) - -// Ptrace requests - -const ( - PTRACE_TRACEME = C.PT_TRACE_ME - PTRACE_CONT = C.PT_CONTINUE - PTRACE_KILL = C.PT_KILL -) - -// Events (kqueue, kevent) - -type Kevent_t C.struct_kevent - -// Select - -type FdSet C.fd_set - -// Routing and interface messages - -const ( - SizeofIfMsghdr = C.sizeof_struct_if_msghdr - SizeofIfData = C.sizeof_struct_if_data - SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr - SizeofIfAnnounceMsghdr = C.sizeof_struct_if_announcemsghdr - SizeofRtMsghdr = C.sizeof_struct_rt_msghdr - SizeofRtMetrics = C.sizeof_struct_rt_metrics -) - -type IfMsghdr C.struct_if_msghdr - -type IfData C.struct_if_data - -type IfaMsghdr C.struct_ifa_msghdr - -type IfAnnounceMsghdr C.struct_if_announcemsghdr - -type RtMsghdr C.struct_rt_msghdr - -type RtMetrics C.struct_rt_metrics - -type Mclpool C.struct_mclpool - -// Berkeley packet filter - -const ( - SizeofBpfVersion = C.sizeof_struct_bpf_version - SizeofBpfStat = C.sizeof_struct_bpf_stat - SizeofBpfProgram = C.sizeof_struct_bpf_program - SizeofBpfInsn = C.sizeof_struct_bpf_insn - SizeofBpfHdr = C.sizeof_struct_bpf_hdr -) - -type BpfVersion C.struct_bpf_version - -type BpfStat C.struct_bpf_stat - -type BpfProgram C.struct_bpf_program - -type BpfInsn C.struct_bpf_insn - -type BpfHdr C.struct_bpf_hdr - -type BpfTimeval C.struct_bpf_timeval - -// Terminal handling - -type Termios C.struct_termios - -type Winsize C.struct_winsize - -// fchmodat-like syscalls. - -const ( - AT_FDCWD = C.AT_FDCWD - AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW - AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW -) - -// poll - -type PollFd C.struct_pollfd - -const ( - POLLERR = C.POLLERR - POLLHUP = C.POLLHUP - POLLIN = C.POLLIN - POLLNVAL = C.POLLNVAL - POLLOUT = C.POLLOUT - POLLPRI = C.POLLPRI - POLLRDBAND = C.POLLRDBAND - POLLRDNORM = C.POLLRDNORM - POLLWRBAND = C.POLLWRBAND - POLLWRNORM = C.POLLWRNORM -) - -// Signal Sets - -type Sigset_t C.sigset_t - -// Uname - -type Utsname C.struct_utsname - -// Uvmexp - -const SizeofUvmexp = C.sizeof_struct_uvmexp - -type Uvmexp C.struct_uvmexp - -// Clockinfo - -const SizeofClockinfo = C.sizeof_struct_clockinfo - -type Clockinfo C.struct_clockinfo diff --git a/vendor/golang.org/x/sys/unix/types_solaris.go b/vendor/golang.org/x/sys/unix/types_solaris.go deleted file mode 100644 index 2b716f93..00000000 --- a/vendor/golang.org/x/sys/unix/types_solaris.go +++ /dev/null @@ -1,266 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -/* -Input to cgo -godefs. See README.md -*/ - -// +godefs map struct_in_addr [4]byte /* in_addr */ -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package unix - -/* -#define KERNEL -// These defines ensure that builds done on newer versions of Solaris are -// backwards-compatible with older versions of Solaris and -// OpenSolaris-based derivatives. -#define __USE_SUNOS_SOCKETS__ // msghdr -#define __USE_LEGACY_PROTOTYPES__ // iovec -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -enum { - sizeofPtr = sizeof(void*), -}; - -union sockaddr_all { - struct sockaddr s1; // this one gets used for fields - struct sockaddr_in s2; // these pad it out - struct sockaddr_in6 s3; - struct sockaddr_un s4; - struct sockaddr_dl s5; -}; - -struct sockaddr_any { - struct sockaddr addr; - char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; -}; - -*/ -import "C" - -// Machine characteristics - -const ( - SizeofPtr = C.sizeofPtr - SizeofShort = C.sizeof_short - SizeofInt = C.sizeof_int - SizeofLong = C.sizeof_long - SizeofLongLong = C.sizeof_longlong - PathMax = C.PATH_MAX - MaxHostNameLen = C.MAXHOSTNAMELEN -) - -// Basic types - -type ( - _C_short C.short - _C_int C.int - _C_long C.long - _C_long_long C.longlong -) - -// Time - -type Timespec C.struct_timespec - -type Timeval C.struct_timeval - -type Timeval32 C.struct_timeval32 - -type Tms C.struct_tms - -type Utimbuf C.struct_utimbuf - -// Processes - -type Rusage C.struct_rusage - -type Rlimit C.struct_rlimit - -type _Gid_t C.gid_t - -// Files - -type Stat_t C.struct_stat - -type Flock_t C.struct_flock - -type Dirent C.struct_dirent - -// Filesystems - -type _Fsblkcnt_t C.fsblkcnt_t - -type Statvfs_t C.struct_statvfs - -// Sockets - -type RawSockaddrInet4 C.struct_sockaddr_in - -type RawSockaddrInet6 C.struct_sockaddr_in6 - -type RawSockaddrUnix C.struct_sockaddr_un - -type RawSockaddrDatalink C.struct_sockaddr_dl - -type RawSockaddr C.struct_sockaddr - -type RawSockaddrAny C.struct_sockaddr_any - -type _Socklen C.socklen_t - -type Linger C.struct_linger - -type Iovec C.struct_iovec - -type IPMreq C.struct_ip_mreq - -type IPv6Mreq C.struct_ipv6_mreq - -type Msghdr C.struct_msghdr - -type Cmsghdr C.struct_cmsghdr - -type Inet6Pktinfo C.struct_in6_pktinfo - -type IPv6MTUInfo C.struct_ip6_mtuinfo - -type ICMPv6Filter C.struct_icmp6_filter - -const ( - SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in - SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - SizeofSockaddrAny = C.sizeof_struct_sockaddr_any - SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un - SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl - SizeofLinger = C.sizeof_struct_linger - SizeofIPMreq = C.sizeof_struct_ip_mreq - SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - SizeofMsghdr = C.sizeof_struct_msghdr - SizeofCmsghdr = C.sizeof_struct_cmsghdr - SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo - SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo - SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter -) - -// Select - -type FdSet C.fd_set - -// Misc - -type Utsname C.struct_utsname - -type Ustat_t C.struct_ustat - -const ( - AT_FDCWD = C.AT_FDCWD - AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW - AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW - AT_REMOVEDIR = C.AT_REMOVEDIR - AT_EACCESS = C.AT_EACCESS -) - -// Routing and interface messages - -const ( - SizeofIfMsghdr = C.sizeof_struct_if_msghdr - SizeofIfData = C.sizeof_struct_if_data - SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr - SizeofRtMsghdr = C.sizeof_struct_rt_msghdr - SizeofRtMetrics = C.sizeof_struct_rt_metrics -) - -type IfMsghdr C.struct_if_msghdr - -type IfData C.struct_if_data - -type IfaMsghdr C.struct_ifa_msghdr - -type RtMsghdr C.struct_rt_msghdr - -type RtMetrics C.struct_rt_metrics - -// Berkeley packet filter - -const ( - SizeofBpfVersion = C.sizeof_struct_bpf_version - SizeofBpfStat = C.sizeof_struct_bpf_stat - SizeofBpfProgram = C.sizeof_struct_bpf_program - SizeofBpfInsn = C.sizeof_struct_bpf_insn - SizeofBpfHdr = C.sizeof_struct_bpf_hdr -) - -type BpfVersion C.struct_bpf_version - -type BpfStat C.struct_bpf_stat - -type BpfProgram C.struct_bpf_program - -type BpfInsn C.struct_bpf_insn - -type BpfTimeval C.struct_bpf_timeval - -type BpfHdr C.struct_bpf_hdr - -// Terminal handling - -type Termios C.struct_termios - -type Termio C.struct_termio - -type Winsize C.struct_winsize - -// poll - -type PollFd C.struct_pollfd - -const ( - POLLERR = C.POLLERR - POLLHUP = C.POLLHUP - POLLIN = C.POLLIN - POLLNVAL = C.POLLNVAL - POLLOUT = C.POLLOUT - POLLPRI = C.POLLPRI - POLLRDBAND = C.POLLRDBAND - POLLRDNORM = C.POLLRDNORM - POLLWRBAND = C.POLLWRBAND - POLLWRNORM = C.POLLWRNORM -) diff --git a/vendor/golang.org/x/text/transform/transform.go b/vendor/golang.org/x/text/transform/transform.go index fe47b9b3..520b9ada 100644 --- a/vendor/golang.org/x/text/transform/transform.go +++ b/vendor/golang.org/x/text/transform/transform.go @@ -78,8 +78,8 @@ type SpanningTransformer interface { // considering the error err. // // A nil error means that all input bytes are known to be identical to the - // output produced by the Transformer. A nil error can be be returned - // regardless of whether atEOF is true. If err is nil, then then n must + // output produced by the Transformer. A nil error can be returned + // regardless of whether atEOF is true. If err is nil, then n must // equal len(src); the converse is not necessarily true. // // ErrEndOfSpan means that the Transformer output may differ from the @@ -493,7 +493,7 @@ func (c *chain) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err erro return dstL.n, srcL.p, err } -// Deprecated: use runes.Remove instead. +// Deprecated: Use runes.Remove instead. func RemoveFunc(f func(r rune) bool) Transformer { return removeF(f) } diff --git a/vendor/golang.org/x/text/unicode/bidi/bidi.go b/vendor/golang.org/x/text/unicode/bidi/bidi.go index 3fc4a625..e8edc54c 100644 --- a/vendor/golang.org/x/text/unicode/bidi/bidi.go +++ b/vendor/golang.org/x/text/unicode/bidi/bidi.go @@ -6,7 +6,7 @@ // Package bidi contains functionality for bidirectional text support. // -// See http://www.unicode.org/reports/tr9. +// See https://www.unicode.org/reports/tr9. // // NOTE: UNDER CONSTRUCTION. This API may change in backwards incompatible ways // and without notice. diff --git a/vendor/golang.org/x/text/unicode/bidi/bracket.go b/vendor/golang.org/x/text/unicode/bidi/bracket.go index 601e2592..18539397 100644 --- a/vendor/golang.org/x/text/unicode/bidi/bracket.go +++ b/vendor/golang.org/x/text/unicode/bidi/bracket.go @@ -12,7 +12,7 @@ import ( // This file contains a port of the reference implementation of the // Bidi Parentheses Algorithm: -// http://www.unicode.org/Public/PROGRAMS/BidiReferenceJava/BidiPBAReference.java +// https://www.unicode.org/Public/PROGRAMS/BidiReferenceJava/BidiPBAReference.java // // The implementation in this file covers definitions BD14-BD16 and rule N0 // of UAX#9. @@ -246,7 +246,7 @@ func (p *bracketPairer) getStrongTypeN0(index int) Class { // assuming the given embedding direction. // // It returns ON if no strong type is found. If a single strong type is found, -// it returns this this type. Otherwise it returns the embedding direction. +// it returns this type. Otherwise it returns the embedding direction. // // TODO: use separate type for "strong" directionality. func (p *bracketPairer) classifyPairContent(loc bracketPair, dirEmbed Class) Class { diff --git a/vendor/golang.org/x/text/unicode/bidi/core.go b/vendor/golang.org/x/text/unicode/bidi/core.go index d4c1399f..48d14400 100644 --- a/vendor/golang.org/x/text/unicode/bidi/core.go +++ b/vendor/golang.org/x/text/unicode/bidi/core.go @@ -7,7 +7,7 @@ package bidi import "log" // This implementation is a port based on the reference implementation found at: -// http://www.unicode.org/Public/PROGRAMS/BidiReferenceJava/ +// https://www.unicode.org/Public/PROGRAMS/BidiReferenceJava/ // // described in Unicode Bidirectional Algorithm (UAX #9). // diff --git a/vendor/golang.org/x/text/unicode/bidi/gen.go b/vendor/golang.org/x/text/unicode/bidi/gen.go deleted file mode 100644 index 4e1c7ba0..00000000 --- a/vendor/golang.org/x/text/unicode/bidi/gen.go +++ /dev/null @@ -1,133 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -import ( - "flag" - "log" - - "golang.org/x/text/internal/gen" - "golang.org/x/text/internal/triegen" - "golang.org/x/text/internal/ucd" -) - -var outputFile = flag.String("out", "tables.go", "output file") - -func main() { - gen.Init() - gen.Repackage("gen_trieval.go", "trieval.go", "bidi") - gen.Repackage("gen_ranges.go", "ranges_test.go", "bidi") - - genTables() -} - -// bidiClass names and codes taken from class "bc" in -// http://www.unicode.org/Public/8.0.0/ucd/PropertyValueAliases.txt -var bidiClass = map[string]Class{ - "AL": AL, // ArabicLetter - "AN": AN, // ArabicNumber - "B": B, // ParagraphSeparator - "BN": BN, // BoundaryNeutral - "CS": CS, // CommonSeparator - "EN": EN, // EuropeanNumber - "ES": ES, // EuropeanSeparator - "ET": ET, // EuropeanTerminator - "L": L, // LeftToRight - "NSM": NSM, // NonspacingMark - "ON": ON, // OtherNeutral - "R": R, // RightToLeft - "S": S, // SegmentSeparator - "WS": WS, // WhiteSpace - - "FSI": Control, - "PDF": Control, - "PDI": Control, - "LRE": Control, - "LRI": Control, - "LRO": Control, - "RLE": Control, - "RLI": Control, - "RLO": Control, -} - -func genTables() { - if numClass > 0x0F { - log.Fatalf("Too many Class constants (%#x > 0x0F).", numClass) - } - w := gen.NewCodeWriter() - defer w.WriteVersionedGoFile(*outputFile, "bidi") - - gen.WriteUnicodeVersion(w) - - t := triegen.NewTrie("bidi") - - // Build data about bracket mapping. These bits need to be or-ed with - // any other bits. - orMask := map[rune]uint64{} - - xorMap := map[rune]int{} - xorMasks := []rune{0} // First value is no-op. - - ucd.Parse(gen.OpenUCDFile("BidiBrackets.txt"), func(p *ucd.Parser) { - r1 := p.Rune(0) - r2 := p.Rune(1) - xor := r1 ^ r2 - if _, ok := xorMap[xor]; !ok { - xorMap[xor] = len(xorMasks) - xorMasks = append(xorMasks, xor) - } - entry := uint64(xorMap[xor]) << xorMaskShift - switch p.String(2) { - case "o": - entry |= openMask - case "c", "n": - default: - log.Fatalf("Unknown bracket class %q.", p.String(2)) - } - orMask[r1] = entry - }) - - w.WriteComment(` - xorMasks contains masks to be xor-ed with brackets to get the reverse - version.`) - w.WriteVar("xorMasks", xorMasks) - - done := map[rune]bool{} - - insert := func(r rune, c Class) { - if !done[r] { - t.Insert(r, orMask[r]|uint64(c)) - done[r] = true - } - } - - // Insert the derived BiDi properties. - ucd.Parse(gen.OpenUCDFile("extracted/DerivedBidiClass.txt"), func(p *ucd.Parser) { - r := p.Rune(0) - class, ok := bidiClass[p.String(1)] - if !ok { - log.Fatalf("%U: Unknown BiDi class %q", r, p.String(1)) - } - insert(r, class) - }) - visitDefaults(insert) - - // TODO: use sparse blocks. This would reduce table size considerably - // from the looks of it. - - sz, err := t.Gen(w) - if err != nil { - log.Fatal(err) - } - w.Size += sz -} - -// dummy values to make methods in gen_common compile. The real versions -// will be generated by this file to tables.go. -var ( - xorMasks []rune -) diff --git a/vendor/golang.org/x/text/unicode/bidi/gen_ranges.go b/vendor/golang.org/x/text/unicode/bidi/gen_ranges.go deleted file mode 100644 index 51bd68fa..00000000 --- a/vendor/golang.org/x/text/unicode/bidi/gen_ranges.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -import ( - "unicode" - - "golang.org/x/text/internal/gen" - "golang.org/x/text/internal/ucd" - "golang.org/x/text/unicode/rangetable" -) - -// These tables are hand-extracted from: -// http://www.unicode.org/Public/8.0.0/ucd/extracted/DerivedBidiClass.txt -func visitDefaults(fn func(r rune, c Class)) { - // first write default values for ranges listed above. - visitRunes(fn, AL, []rune{ - 0x0600, 0x07BF, // Arabic - 0x08A0, 0x08FF, // Arabic Extended-A - 0xFB50, 0xFDCF, // Arabic Presentation Forms - 0xFDF0, 0xFDFF, - 0xFE70, 0xFEFF, - 0x0001EE00, 0x0001EEFF, // Arabic Mathematical Alpha Symbols - }) - visitRunes(fn, R, []rune{ - 0x0590, 0x05FF, // Hebrew - 0x07C0, 0x089F, // Nko et al. - 0xFB1D, 0xFB4F, - 0x00010800, 0x00010FFF, // Cypriot Syllabary et. al. - 0x0001E800, 0x0001EDFF, - 0x0001EF00, 0x0001EFFF, - }) - visitRunes(fn, ET, []rune{ // European Terminator - 0x20A0, 0x20Cf, // Currency symbols - }) - rangetable.Visit(unicode.Noncharacter_Code_Point, func(r rune) { - fn(r, BN) // Boundary Neutral - }) - ucd.Parse(gen.OpenUCDFile("DerivedCoreProperties.txt"), func(p *ucd.Parser) { - if p.String(1) == "Default_Ignorable_Code_Point" { - fn(p.Rune(0), BN) // Boundary Neutral - } - }) -} - -func visitRunes(fn func(r rune, c Class), c Class, runes []rune) { - for i := 0; i < len(runes); i += 2 { - lo, hi := runes[i], runes[i+1] - for j := lo; j <= hi; j++ { - fn(j, c) - } - } -} diff --git a/vendor/golang.org/x/text/unicode/bidi/gen_trieval.go b/vendor/golang.org/x/text/unicode/bidi/gen_trieval.go deleted file mode 100644 index 9cb99428..00000000 --- a/vendor/golang.org/x/text/unicode/bidi/gen_trieval.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -// Class is the Unicode BiDi class. Each rune has a single class. -type Class uint - -const ( - L Class = iota // LeftToRight - R // RightToLeft - EN // EuropeanNumber - ES // EuropeanSeparator - ET // EuropeanTerminator - AN // ArabicNumber - CS // CommonSeparator - B // ParagraphSeparator - S // SegmentSeparator - WS // WhiteSpace - ON // OtherNeutral - BN // BoundaryNeutral - NSM // NonspacingMark - AL // ArabicLetter - Control // Control LRO - PDI - - numClass - - LRO // LeftToRightOverride - RLO // RightToLeftOverride - LRE // LeftToRightEmbedding - RLE // RightToLeftEmbedding - PDF // PopDirectionalFormat - LRI // LeftToRightIsolate - RLI // RightToLeftIsolate - FSI // FirstStrongIsolate - PDI // PopDirectionalIsolate - - unknownClass = ^Class(0) -) - -var controlToClass = map[rune]Class{ - 0x202D: LRO, // LeftToRightOverride, - 0x202E: RLO, // RightToLeftOverride, - 0x202A: LRE, // LeftToRightEmbedding, - 0x202B: RLE, // RightToLeftEmbedding, - 0x202C: PDF, // PopDirectionalFormat, - 0x2066: LRI, // LeftToRightIsolate, - 0x2067: RLI, // RightToLeftIsolate, - 0x2068: FSI, // FirstStrongIsolate, - 0x2069: PDI, // PopDirectionalIsolate, -} - -// A trie entry has the following bits: -// 7..5 XOR mask for brackets -// 4 1: Bracket open, 0: Bracket close -// 3..0 Class type - -const ( - openMask = 0x10 - xorMaskShift = 5 -) diff --git a/vendor/golang.org/x/text/unicode/bidi/tables10.0.0.go b/vendor/golang.org/x/text/unicode/bidi/tables10.0.0.go index 2e1ff195..d8c94e1b 100644 --- a/vendor/golang.org/x/text/unicode/bidi/tables10.0.0.go +++ b/vendor/golang.org/x/text/unicode/bidi/tables10.0.0.go @@ -1,6 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. -// +build go1.10 +// +build go1.10,!go1.13 package bidi diff --git a/vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go b/vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go new file mode 100644 index 00000000..022e3c69 --- /dev/null +++ b/vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go @@ -0,0 +1,1887 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +// +build go1.13 + +package bidi + +// UnicodeVersion is the Unicode version from which the tables in this package are derived. +const UnicodeVersion = "11.0.0" + +// xorMasks contains masks to be xor-ed with brackets to get the reverse +// version. +var xorMasks = []int32{ // 8 elements + 0, 1, 6, 7, 3, 15, 29, 63, +} // Size: 56 bytes + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *bidiTrie) lookup(s []byte) (v uint8, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return bidiValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = bidiIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = bidiIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = bidiIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *bidiTrie) lookupUnsafe(s []byte) uint8 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return bidiValues[c0] + } + i := bidiIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = bidiIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = bidiIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *bidiTrie) lookupString(s string) (v uint8, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return bidiValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = bidiIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = bidiIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = bidiIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *bidiTrie) lookupStringUnsafe(s string) uint8 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return bidiValues[c0] + } + i := bidiIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = bidiIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = bidiIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// bidiTrie. Total size: 16512 bytes (16.12 KiB). Checksum: 2a9cf1317f2ffaa. +type bidiTrie struct{} + +func newBidiTrie(i int) *bidiTrie { + return &bidiTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *bidiTrie) lookupValue(n uint32, b byte) uint8 { + switch { + default: + return uint8(bidiValues[n<<6+uint32(b)]) + } +} + +// bidiValues: 234 blocks, 14976 entries, 14976 bytes +// The third block is the zero block. +var bidiValues = [14976]uint8{ + // Block 0x0, offset 0x0 + 0x00: 0x000b, 0x01: 0x000b, 0x02: 0x000b, 0x03: 0x000b, 0x04: 0x000b, 0x05: 0x000b, + 0x06: 0x000b, 0x07: 0x000b, 0x08: 0x000b, 0x09: 0x0008, 0x0a: 0x0007, 0x0b: 0x0008, + 0x0c: 0x0009, 0x0d: 0x0007, 0x0e: 0x000b, 0x0f: 0x000b, 0x10: 0x000b, 0x11: 0x000b, + 0x12: 0x000b, 0x13: 0x000b, 0x14: 0x000b, 0x15: 0x000b, 0x16: 0x000b, 0x17: 0x000b, + 0x18: 0x000b, 0x19: 0x000b, 0x1a: 0x000b, 0x1b: 0x000b, 0x1c: 0x0007, 0x1d: 0x0007, + 0x1e: 0x0007, 0x1f: 0x0008, 0x20: 0x0009, 0x21: 0x000a, 0x22: 0x000a, 0x23: 0x0004, + 0x24: 0x0004, 0x25: 0x0004, 0x26: 0x000a, 0x27: 0x000a, 0x28: 0x003a, 0x29: 0x002a, + 0x2a: 0x000a, 0x2b: 0x0003, 0x2c: 0x0006, 0x2d: 0x0003, 0x2e: 0x0006, 0x2f: 0x0006, + 0x30: 0x0002, 0x31: 0x0002, 0x32: 0x0002, 0x33: 0x0002, 0x34: 0x0002, 0x35: 0x0002, + 0x36: 0x0002, 0x37: 0x0002, 0x38: 0x0002, 0x39: 0x0002, 0x3a: 0x0006, 0x3b: 0x000a, + 0x3c: 0x000a, 0x3d: 0x000a, 0x3e: 0x000a, 0x3f: 0x000a, + // Block 0x1, offset 0x40 + 0x40: 0x000a, + 0x5b: 0x005a, 0x5c: 0x000a, 0x5d: 0x004a, + 0x5e: 0x000a, 0x5f: 0x000a, 0x60: 0x000a, + 0x7b: 0x005a, + 0x7c: 0x000a, 0x7d: 0x004a, 0x7e: 0x000a, 0x7f: 0x000b, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc0: 0x000b, 0xc1: 0x000b, 0xc2: 0x000b, 0xc3: 0x000b, 0xc4: 0x000b, 0xc5: 0x0007, + 0xc6: 0x000b, 0xc7: 0x000b, 0xc8: 0x000b, 0xc9: 0x000b, 0xca: 0x000b, 0xcb: 0x000b, + 0xcc: 0x000b, 0xcd: 0x000b, 0xce: 0x000b, 0xcf: 0x000b, 0xd0: 0x000b, 0xd1: 0x000b, + 0xd2: 0x000b, 0xd3: 0x000b, 0xd4: 0x000b, 0xd5: 0x000b, 0xd6: 0x000b, 0xd7: 0x000b, + 0xd8: 0x000b, 0xd9: 0x000b, 0xda: 0x000b, 0xdb: 0x000b, 0xdc: 0x000b, 0xdd: 0x000b, + 0xde: 0x000b, 0xdf: 0x000b, 0xe0: 0x0006, 0xe1: 0x000a, 0xe2: 0x0004, 0xe3: 0x0004, + 0xe4: 0x0004, 0xe5: 0x0004, 0xe6: 0x000a, 0xe7: 0x000a, 0xe8: 0x000a, 0xe9: 0x000a, + 0xeb: 0x000a, 0xec: 0x000a, 0xed: 0x000b, 0xee: 0x000a, 0xef: 0x000a, + 0xf0: 0x0004, 0xf1: 0x0004, 0xf2: 0x0002, 0xf3: 0x0002, 0xf4: 0x000a, + 0xf6: 0x000a, 0xf7: 0x000a, 0xf8: 0x000a, 0xf9: 0x0002, 0xfb: 0x000a, + 0xfc: 0x000a, 0xfd: 0x000a, 0xfe: 0x000a, 0xff: 0x000a, + // Block 0x4, offset 0x100 + 0x117: 0x000a, + 0x137: 0x000a, + // Block 0x5, offset 0x140 + 0x179: 0x000a, 0x17a: 0x000a, + // Block 0x6, offset 0x180 + 0x182: 0x000a, 0x183: 0x000a, 0x184: 0x000a, 0x185: 0x000a, + 0x186: 0x000a, 0x187: 0x000a, 0x188: 0x000a, 0x189: 0x000a, 0x18a: 0x000a, 0x18b: 0x000a, + 0x18c: 0x000a, 0x18d: 0x000a, 0x18e: 0x000a, 0x18f: 0x000a, + 0x192: 0x000a, 0x193: 0x000a, 0x194: 0x000a, 0x195: 0x000a, 0x196: 0x000a, 0x197: 0x000a, + 0x198: 0x000a, 0x199: 0x000a, 0x19a: 0x000a, 0x19b: 0x000a, 0x19c: 0x000a, 0x19d: 0x000a, + 0x19e: 0x000a, 0x19f: 0x000a, + 0x1a5: 0x000a, 0x1a6: 0x000a, 0x1a7: 0x000a, 0x1a8: 0x000a, 0x1a9: 0x000a, + 0x1aa: 0x000a, 0x1ab: 0x000a, 0x1ac: 0x000a, 0x1ad: 0x000a, 0x1af: 0x000a, + 0x1b0: 0x000a, 0x1b1: 0x000a, 0x1b2: 0x000a, 0x1b3: 0x000a, 0x1b4: 0x000a, 0x1b5: 0x000a, + 0x1b6: 0x000a, 0x1b7: 0x000a, 0x1b8: 0x000a, 0x1b9: 0x000a, 0x1ba: 0x000a, 0x1bb: 0x000a, + 0x1bc: 0x000a, 0x1bd: 0x000a, 0x1be: 0x000a, 0x1bf: 0x000a, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x000c, 0x1c1: 0x000c, 0x1c2: 0x000c, 0x1c3: 0x000c, 0x1c4: 0x000c, 0x1c5: 0x000c, + 0x1c6: 0x000c, 0x1c7: 0x000c, 0x1c8: 0x000c, 0x1c9: 0x000c, 0x1ca: 0x000c, 0x1cb: 0x000c, + 0x1cc: 0x000c, 0x1cd: 0x000c, 0x1ce: 0x000c, 0x1cf: 0x000c, 0x1d0: 0x000c, 0x1d1: 0x000c, + 0x1d2: 0x000c, 0x1d3: 0x000c, 0x1d4: 0x000c, 0x1d5: 0x000c, 0x1d6: 0x000c, 0x1d7: 0x000c, + 0x1d8: 0x000c, 0x1d9: 0x000c, 0x1da: 0x000c, 0x1db: 0x000c, 0x1dc: 0x000c, 0x1dd: 0x000c, + 0x1de: 0x000c, 0x1df: 0x000c, 0x1e0: 0x000c, 0x1e1: 0x000c, 0x1e2: 0x000c, 0x1e3: 0x000c, + 0x1e4: 0x000c, 0x1e5: 0x000c, 0x1e6: 0x000c, 0x1e7: 0x000c, 0x1e8: 0x000c, 0x1e9: 0x000c, + 0x1ea: 0x000c, 0x1eb: 0x000c, 0x1ec: 0x000c, 0x1ed: 0x000c, 0x1ee: 0x000c, 0x1ef: 0x000c, + 0x1f0: 0x000c, 0x1f1: 0x000c, 0x1f2: 0x000c, 0x1f3: 0x000c, 0x1f4: 0x000c, 0x1f5: 0x000c, + 0x1f6: 0x000c, 0x1f7: 0x000c, 0x1f8: 0x000c, 0x1f9: 0x000c, 0x1fa: 0x000c, 0x1fb: 0x000c, + 0x1fc: 0x000c, 0x1fd: 0x000c, 0x1fe: 0x000c, 0x1ff: 0x000c, + // Block 0x8, offset 0x200 + 0x200: 0x000c, 0x201: 0x000c, 0x202: 0x000c, 0x203: 0x000c, 0x204: 0x000c, 0x205: 0x000c, + 0x206: 0x000c, 0x207: 0x000c, 0x208: 0x000c, 0x209: 0x000c, 0x20a: 0x000c, 0x20b: 0x000c, + 0x20c: 0x000c, 0x20d: 0x000c, 0x20e: 0x000c, 0x20f: 0x000c, 0x210: 0x000c, 0x211: 0x000c, + 0x212: 0x000c, 0x213: 0x000c, 0x214: 0x000c, 0x215: 0x000c, 0x216: 0x000c, 0x217: 0x000c, + 0x218: 0x000c, 0x219: 0x000c, 0x21a: 0x000c, 0x21b: 0x000c, 0x21c: 0x000c, 0x21d: 0x000c, + 0x21e: 0x000c, 0x21f: 0x000c, 0x220: 0x000c, 0x221: 0x000c, 0x222: 0x000c, 0x223: 0x000c, + 0x224: 0x000c, 0x225: 0x000c, 0x226: 0x000c, 0x227: 0x000c, 0x228: 0x000c, 0x229: 0x000c, + 0x22a: 0x000c, 0x22b: 0x000c, 0x22c: 0x000c, 0x22d: 0x000c, 0x22e: 0x000c, 0x22f: 0x000c, + 0x234: 0x000a, 0x235: 0x000a, + 0x23e: 0x000a, + // Block 0x9, offset 0x240 + 0x244: 0x000a, 0x245: 0x000a, + 0x247: 0x000a, + // Block 0xa, offset 0x280 + 0x2b6: 0x000a, + // Block 0xb, offset 0x2c0 + 0x2c3: 0x000c, 0x2c4: 0x000c, 0x2c5: 0x000c, + 0x2c6: 0x000c, 0x2c7: 0x000c, 0x2c8: 0x000c, 0x2c9: 0x000c, + // Block 0xc, offset 0x300 + 0x30a: 0x000a, + 0x30d: 0x000a, 0x30e: 0x000a, 0x30f: 0x0004, 0x310: 0x0001, 0x311: 0x000c, + 0x312: 0x000c, 0x313: 0x000c, 0x314: 0x000c, 0x315: 0x000c, 0x316: 0x000c, 0x317: 0x000c, + 0x318: 0x000c, 0x319: 0x000c, 0x31a: 0x000c, 0x31b: 0x000c, 0x31c: 0x000c, 0x31d: 0x000c, + 0x31e: 0x000c, 0x31f: 0x000c, 0x320: 0x000c, 0x321: 0x000c, 0x322: 0x000c, 0x323: 0x000c, + 0x324: 0x000c, 0x325: 0x000c, 0x326: 0x000c, 0x327: 0x000c, 0x328: 0x000c, 0x329: 0x000c, + 0x32a: 0x000c, 0x32b: 0x000c, 0x32c: 0x000c, 0x32d: 0x000c, 0x32e: 0x000c, 0x32f: 0x000c, + 0x330: 0x000c, 0x331: 0x000c, 0x332: 0x000c, 0x333: 0x000c, 0x334: 0x000c, 0x335: 0x000c, + 0x336: 0x000c, 0x337: 0x000c, 0x338: 0x000c, 0x339: 0x000c, 0x33a: 0x000c, 0x33b: 0x000c, + 0x33c: 0x000c, 0x33d: 0x000c, 0x33e: 0x0001, 0x33f: 0x000c, + // Block 0xd, offset 0x340 + 0x340: 0x0001, 0x341: 0x000c, 0x342: 0x000c, 0x343: 0x0001, 0x344: 0x000c, 0x345: 0x000c, + 0x346: 0x0001, 0x347: 0x000c, 0x348: 0x0001, 0x349: 0x0001, 0x34a: 0x0001, 0x34b: 0x0001, + 0x34c: 0x0001, 0x34d: 0x0001, 0x34e: 0x0001, 0x34f: 0x0001, 0x350: 0x0001, 0x351: 0x0001, + 0x352: 0x0001, 0x353: 0x0001, 0x354: 0x0001, 0x355: 0x0001, 0x356: 0x0001, 0x357: 0x0001, + 0x358: 0x0001, 0x359: 0x0001, 0x35a: 0x0001, 0x35b: 0x0001, 0x35c: 0x0001, 0x35d: 0x0001, + 0x35e: 0x0001, 0x35f: 0x0001, 0x360: 0x0001, 0x361: 0x0001, 0x362: 0x0001, 0x363: 0x0001, + 0x364: 0x0001, 0x365: 0x0001, 0x366: 0x0001, 0x367: 0x0001, 0x368: 0x0001, 0x369: 0x0001, + 0x36a: 0x0001, 0x36b: 0x0001, 0x36c: 0x0001, 0x36d: 0x0001, 0x36e: 0x0001, 0x36f: 0x0001, + 0x370: 0x0001, 0x371: 0x0001, 0x372: 0x0001, 0x373: 0x0001, 0x374: 0x0001, 0x375: 0x0001, + 0x376: 0x0001, 0x377: 0x0001, 0x378: 0x0001, 0x379: 0x0001, 0x37a: 0x0001, 0x37b: 0x0001, + 0x37c: 0x0001, 0x37d: 0x0001, 0x37e: 0x0001, 0x37f: 0x0001, + // Block 0xe, offset 0x380 + 0x380: 0x0005, 0x381: 0x0005, 0x382: 0x0005, 0x383: 0x0005, 0x384: 0x0005, 0x385: 0x0005, + 0x386: 0x000a, 0x387: 0x000a, 0x388: 0x000d, 0x389: 0x0004, 0x38a: 0x0004, 0x38b: 0x000d, + 0x38c: 0x0006, 0x38d: 0x000d, 0x38e: 0x000a, 0x38f: 0x000a, 0x390: 0x000c, 0x391: 0x000c, + 0x392: 0x000c, 0x393: 0x000c, 0x394: 0x000c, 0x395: 0x000c, 0x396: 0x000c, 0x397: 0x000c, + 0x398: 0x000c, 0x399: 0x000c, 0x39a: 0x000c, 0x39b: 0x000d, 0x39c: 0x000d, 0x39d: 0x000d, + 0x39e: 0x000d, 0x39f: 0x000d, 0x3a0: 0x000d, 0x3a1: 0x000d, 0x3a2: 0x000d, 0x3a3: 0x000d, + 0x3a4: 0x000d, 0x3a5: 0x000d, 0x3a6: 0x000d, 0x3a7: 0x000d, 0x3a8: 0x000d, 0x3a9: 0x000d, + 0x3aa: 0x000d, 0x3ab: 0x000d, 0x3ac: 0x000d, 0x3ad: 0x000d, 0x3ae: 0x000d, 0x3af: 0x000d, + 0x3b0: 0x000d, 0x3b1: 0x000d, 0x3b2: 0x000d, 0x3b3: 0x000d, 0x3b4: 0x000d, 0x3b5: 0x000d, + 0x3b6: 0x000d, 0x3b7: 0x000d, 0x3b8: 0x000d, 0x3b9: 0x000d, 0x3ba: 0x000d, 0x3bb: 0x000d, + 0x3bc: 0x000d, 0x3bd: 0x000d, 0x3be: 0x000d, 0x3bf: 0x000d, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x000d, 0x3c1: 0x000d, 0x3c2: 0x000d, 0x3c3: 0x000d, 0x3c4: 0x000d, 0x3c5: 0x000d, + 0x3c6: 0x000d, 0x3c7: 0x000d, 0x3c8: 0x000d, 0x3c9: 0x000d, 0x3ca: 0x000d, 0x3cb: 0x000c, + 0x3cc: 0x000c, 0x3cd: 0x000c, 0x3ce: 0x000c, 0x3cf: 0x000c, 0x3d0: 0x000c, 0x3d1: 0x000c, + 0x3d2: 0x000c, 0x3d3: 0x000c, 0x3d4: 0x000c, 0x3d5: 0x000c, 0x3d6: 0x000c, 0x3d7: 0x000c, + 0x3d8: 0x000c, 0x3d9: 0x000c, 0x3da: 0x000c, 0x3db: 0x000c, 0x3dc: 0x000c, 0x3dd: 0x000c, + 0x3de: 0x000c, 0x3df: 0x000c, 0x3e0: 0x0005, 0x3e1: 0x0005, 0x3e2: 0x0005, 0x3e3: 0x0005, + 0x3e4: 0x0005, 0x3e5: 0x0005, 0x3e6: 0x0005, 0x3e7: 0x0005, 0x3e8: 0x0005, 0x3e9: 0x0005, + 0x3ea: 0x0004, 0x3eb: 0x0005, 0x3ec: 0x0005, 0x3ed: 0x000d, 0x3ee: 0x000d, 0x3ef: 0x000d, + 0x3f0: 0x000c, 0x3f1: 0x000d, 0x3f2: 0x000d, 0x3f3: 0x000d, 0x3f4: 0x000d, 0x3f5: 0x000d, + 0x3f6: 0x000d, 0x3f7: 0x000d, 0x3f8: 0x000d, 0x3f9: 0x000d, 0x3fa: 0x000d, 0x3fb: 0x000d, + 0x3fc: 0x000d, 0x3fd: 0x000d, 0x3fe: 0x000d, 0x3ff: 0x000d, + // Block 0x10, offset 0x400 + 0x400: 0x000d, 0x401: 0x000d, 0x402: 0x000d, 0x403: 0x000d, 0x404: 0x000d, 0x405: 0x000d, + 0x406: 0x000d, 0x407: 0x000d, 0x408: 0x000d, 0x409: 0x000d, 0x40a: 0x000d, 0x40b: 0x000d, + 0x40c: 0x000d, 0x40d: 0x000d, 0x40e: 0x000d, 0x40f: 0x000d, 0x410: 0x000d, 0x411: 0x000d, + 0x412: 0x000d, 0x413: 0x000d, 0x414: 0x000d, 0x415: 0x000d, 0x416: 0x000d, 0x417: 0x000d, + 0x418: 0x000d, 0x419: 0x000d, 0x41a: 0x000d, 0x41b: 0x000d, 0x41c: 0x000d, 0x41d: 0x000d, + 0x41e: 0x000d, 0x41f: 0x000d, 0x420: 0x000d, 0x421: 0x000d, 0x422: 0x000d, 0x423: 0x000d, + 0x424: 0x000d, 0x425: 0x000d, 0x426: 0x000d, 0x427: 0x000d, 0x428: 0x000d, 0x429: 0x000d, + 0x42a: 0x000d, 0x42b: 0x000d, 0x42c: 0x000d, 0x42d: 0x000d, 0x42e: 0x000d, 0x42f: 0x000d, + 0x430: 0x000d, 0x431: 0x000d, 0x432: 0x000d, 0x433: 0x000d, 0x434: 0x000d, 0x435: 0x000d, + 0x436: 0x000d, 0x437: 0x000d, 0x438: 0x000d, 0x439: 0x000d, 0x43a: 0x000d, 0x43b: 0x000d, + 0x43c: 0x000d, 0x43d: 0x000d, 0x43e: 0x000d, 0x43f: 0x000d, + // Block 0x11, offset 0x440 + 0x440: 0x000d, 0x441: 0x000d, 0x442: 0x000d, 0x443: 0x000d, 0x444: 0x000d, 0x445: 0x000d, + 0x446: 0x000d, 0x447: 0x000d, 0x448: 0x000d, 0x449: 0x000d, 0x44a: 0x000d, 0x44b: 0x000d, + 0x44c: 0x000d, 0x44d: 0x000d, 0x44e: 0x000d, 0x44f: 0x000d, 0x450: 0x000d, 0x451: 0x000d, + 0x452: 0x000d, 0x453: 0x000d, 0x454: 0x000d, 0x455: 0x000d, 0x456: 0x000c, 0x457: 0x000c, + 0x458: 0x000c, 0x459: 0x000c, 0x45a: 0x000c, 0x45b: 0x000c, 0x45c: 0x000c, 0x45d: 0x0005, + 0x45e: 0x000a, 0x45f: 0x000c, 0x460: 0x000c, 0x461: 0x000c, 0x462: 0x000c, 0x463: 0x000c, + 0x464: 0x000c, 0x465: 0x000d, 0x466: 0x000d, 0x467: 0x000c, 0x468: 0x000c, 0x469: 0x000a, + 0x46a: 0x000c, 0x46b: 0x000c, 0x46c: 0x000c, 0x46d: 0x000c, 0x46e: 0x000d, 0x46f: 0x000d, + 0x470: 0x0002, 0x471: 0x0002, 0x472: 0x0002, 0x473: 0x0002, 0x474: 0x0002, 0x475: 0x0002, + 0x476: 0x0002, 0x477: 0x0002, 0x478: 0x0002, 0x479: 0x0002, 0x47a: 0x000d, 0x47b: 0x000d, + 0x47c: 0x000d, 0x47d: 0x000d, 0x47e: 0x000d, 0x47f: 0x000d, + // Block 0x12, offset 0x480 + 0x480: 0x000d, 0x481: 0x000d, 0x482: 0x000d, 0x483: 0x000d, 0x484: 0x000d, 0x485: 0x000d, + 0x486: 0x000d, 0x487: 0x000d, 0x488: 0x000d, 0x489: 0x000d, 0x48a: 0x000d, 0x48b: 0x000d, + 0x48c: 0x000d, 0x48d: 0x000d, 0x48e: 0x000d, 0x48f: 0x000d, 0x490: 0x000d, 0x491: 0x000c, + 0x492: 0x000d, 0x493: 0x000d, 0x494: 0x000d, 0x495: 0x000d, 0x496: 0x000d, 0x497: 0x000d, + 0x498: 0x000d, 0x499: 0x000d, 0x49a: 0x000d, 0x49b: 0x000d, 0x49c: 0x000d, 0x49d: 0x000d, + 0x49e: 0x000d, 0x49f: 0x000d, 0x4a0: 0x000d, 0x4a1: 0x000d, 0x4a2: 0x000d, 0x4a3: 0x000d, + 0x4a4: 0x000d, 0x4a5: 0x000d, 0x4a6: 0x000d, 0x4a7: 0x000d, 0x4a8: 0x000d, 0x4a9: 0x000d, + 0x4aa: 0x000d, 0x4ab: 0x000d, 0x4ac: 0x000d, 0x4ad: 0x000d, 0x4ae: 0x000d, 0x4af: 0x000d, + 0x4b0: 0x000c, 0x4b1: 0x000c, 0x4b2: 0x000c, 0x4b3: 0x000c, 0x4b4: 0x000c, 0x4b5: 0x000c, + 0x4b6: 0x000c, 0x4b7: 0x000c, 0x4b8: 0x000c, 0x4b9: 0x000c, 0x4ba: 0x000c, 0x4bb: 0x000c, + 0x4bc: 0x000c, 0x4bd: 0x000c, 0x4be: 0x000c, 0x4bf: 0x000c, + // Block 0x13, offset 0x4c0 + 0x4c0: 0x000c, 0x4c1: 0x000c, 0x4c2: 0x000c, 0x4c3: 0x000c, 0x4c4: 0x000c, 0x4c5: 0x000c, + 0x4c6: 0x000c, 0x4c7: 0x000c, 0x4c8: 0x000c, 0x4c9: 0x000c, 0x4ca: 0x000c, 0x4cb: 0x000d, + 0x4cc: 0x000d, 0x4cd: 0x000d, 0x4ce: 0x000d, 0x4cf: 0x000d, 0x4d0: 0x000d, 0x4d1: 0x000d, + 0x4d2: 0x000d, 0x4d3: 0x000d, 0x4d4: 0x000d, 0x4d5: 0x000d, 0x4d6: 0x000d, 0x4d7: 0x000d, + 0x4d8: 0x000d, 0x4d9: 0x000d, 0x4da: 0x000d, 0x4db: 0x000d, 0x4dc: 0x000d, 0x4dd: 0x000d, + 0x4de: 0x000d, 0x4df: 0x000d, 0x4e0: 0x000d, 0x4e1: 0x000d, 0x4e2: 0x000d, 0x4e3: 0x000d, + 0x4e4: 0x000d, 0x4e5: 0x000d, 0x4e6: 0x000d, 0x4e7: 0x000d, 0x4e8: 0x000d, 0x4e9: 0x000d, + 0x4ea: 0x000d, 0x4eb: 0x000d, 0x4ec: 0x000d, 0x4ed: 0x000d, 0x4ee: 0x000d, 0x4ef: 0x000d, + 0x4f0: 0x000d, 0x4f1: 0x000d, 0x4f2: 0x000d, 0x4f3: 0x000d, 0x4f4: 0x000d, 0x4f5: 0x000d, + 0x4f6: 0x000d, 0x4f7: 0x000d, 0x4f8: 0x000d, 0x4f9: 0x000d, 0x4fa: 0x000d, 0x4fb: 0x000d, + 0x4fc: 0x000d, 0x4fd: 0x000d, 0x4fe: 0x000d, 0x4ff: 0x000d, + // Block 0x14, offset 0x500 + 0x500: 0x000d, 0x501: 0x000d, 0x502: 0x000d, 0x503: 0x000d, 0x504: 0x000d, 0x505: 0x000d, + 0x506: 0x000d, 0x507: 0x000d, 0x508: 0x000d, 0x509: 0x000d, 0x50a: 0x000d, 0x50b: 0x000d, + 0x50c: 0x000d, 0x50d: 0x000d, 0x50e: 0x000d, 0x50f: 0x000d, 0x510: 0x000d, 0x511: 0x000d, + 0x512: 0x000d, 0x513: 0x000d, 0x514: 0x000d, 0x515: 0x000d, 0x516: 0x000d, 0x517: 0x000d, + 0x518: 0x000d, 0x519: 0x000d, 0x51a: 0x000d, 0x51b: 0x000d, 0x51c: 0x000d, 0x51d: 0x000d, + 0x51e: 0x000d, 0x51f: 0x000d, 0x520: 0x000d, 0x521: 0x000d, 0x522: 0x000d, 0x523: 0x000d, + 0x524: 0x000d, 0x525: 0x000d, 0x526: 0x000c, 0x527: 0x000c, 0x528: 0x000c, 0x529: 0x000c, + 0x52a: 0x000c, 0x52b: 0x000c, 0x52c: 0x000c, 0x52d: 0x000c, 0x52e: 0x000c, 0x52f: 0x000c, + 0x530: 0x000c, 0x531: 0x000d, 0x532: 0x000d, 0x533: 0x000d, 0x534: 0x000d, 0x535: 0x000d, + 0x536: 0x000d, 0x537: 0x000d, 0x538: 0x000d, 0x539: 0x000d, 0x53a: 0x000d, 0x53b: 0x000d, + 0x53c: 0x000d, 0x53d: 0x000d, 0x53e: 0x000d, 0x53f: 0x000d, + // Block 0x15, offset 0x540 + 0x540: 0x0001, 0x541: 0x0001, 0x542: 0x0001, 0x543: 0x0001, 0x544: 0x0001, 0x545: 0x0001, + 0x546: 0x0001, 0x547: 0x0001, 0x548: 0x0001, 0x549: 0x0001, 0x54a: 0x0001, 0x54b: 0x0001, + 0x54c: 0x0001, 0x54d: 0x0001, 0x54e: 0x0001, 0x54f: 0x0001, 0x550: 0x0001, 0x551: 0x0001, + 0x552: 0x0001, 0x553: 0x0001, 0x554: 0x0001, 0x555: 0x0001, 0x556: 0x0001, 0x557: 0x0001, + 0x558: 0x0001, 0x559: 0x0001, 0x55a: 0x0001, 0x55b: 0x0001, 0x55c: 0x0001, 0x55d: 0x0001, + 0x55e: 0x0001, 0x55f: 0x0001, 0x560: 0x0001, 0x561: 0x0001, 0x562: 0x0001, 0x563: 0x0001, + 0x564: 0x0001, 0x565: 0x0001, 0x566: 0x0001, 0x567: 0x0001, 0x568: 0x0001, 0x569: 0x0001, + 0x56a: 0x0001, 0x56b: 0x000c, 0x56c: 0x000c, 0x56d: 0x000c, 0x56e: 0x000c, 0x56f: 0x000c, + 0x570: 0x000c, 0x571: 0x000c, 0x572: 0x000c, 0x573: 0x000c, 0x574: 0x0001, 0x575: 0x0001, + 0x576: 0x000a, 0x577: 0x000a, 0x578: 0x000a, 0x579: 0x000a, 0x57a: 0x0001, 0x57b: 0x0001, + 0x57c: 0x0001, 0x57d: 0x000c, 0x57e: 0x0001, 0x57f: 0x0001, + // Block 0x16, offset 0x580 + 0x580: 0x0001, 0x581: 0x0001, 0x582: 0x0001, 0x583: 0x0001, 0x584: 0x0001, 0x585: 0x0001, + 0x586: 0x0001, 0x587: 0x0001, 0x588: 0x0001, 0x589: 0x0001, 0x58a: 0x0001, 0x58b: 0x0001, + 0x58c: 0x0001, 0x58d: 0x0001, 0x58e: 0x0001, 0x58f: 0x0001, 0x590: 0x0001, 0x591: 0x0001, + 0x592: 0x0001, 0x593: 0x0001, 0x594: 0x0001, 0x595: 0x0001, 0x596: 0x000c, 0x597: 0x000c, + 0x598: 0x000c, 0x599: 0x000c, 0x59a: 0x0001, 0x59b: 0x000c, 0x59c: 0x000c, 0x59d: 0x000c, + 0x59e: 0x000c, 0x59f: 0x000c, 0x5a0: 0x000c, 0x5a1: 0x000c, 0x5a2: 0x000c, 0x5a3: 0x000c, + 0x5a4: 0x0001, 0x5a5: 0x000c, 0x5a6: 0x000c, 0x5a7: 0x000c, 0x5a8: 0x0001, 0x5a9: 0x000c, + 0x5aa: 0x000c, 0x5ab: 0x000c, 0x5ac: 0x000c, 0x5ad: 0x000c, 0x5ae: 0x0001, 0x5af: 0x0001, + 0x5b0: 0x0001, 0x5b1: 0x0001, 0x5b2: 0x0001, 0x5b3: 0x0001, 0x5b4: 0x0001, 0x5b5: 0x0001, + 0x5b6: 0x0001, 0x5b7: 0x0001, 0x5b8: 0x0001, 0x5b9: 0x0001, 0x5ba: 0x0001, 0x5bb: 0x0001, + 0x5bc: 0x0001, 0x5bd: 0x0001, 0x5be: 0x0001, 0x5bf: 0x0001, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x0001, 0x5c1: 0x0001, 0x5c2: 0x0001, 0x5c3: 0x0001, 0x5c4: 0x0001, 0x5c5: 0x0001, + 0x5c6: 0x0001, 0x5c7: 0x0001, 0x5c8: 0x0001, 0x5c9: 0x0001, 0x5ca: 0x0001, 0x5cb: 0x0001, + 0x5cc: 0x0001, 0x5cd: 0x0001, 0x5ce: 0x0001, 0x5cf: 0x0001, 0x5d0: 0x0001, 0x5d1: 0x0001, + 0x5d2: 0x0001, 0x5d3: 0x0001, 0x5d4: 0x0001, 0x5d5: 0x0001, 0x5d6: 0x0001, 0x5d7: 0x0001, + 0x5d8: 0x0001, 0x5d9: 0x000c, 0x5da: 0x000c, 0x5db: 0x000c, 0x5dc: 0x0001, 0x5dd: 0x0001, + 0x5de: 0x0001, 0x5df: 0x0001, 0x5e0: 0x000d, 0x5e1: 0x000d, 0x5e2: 0x000d, 0x5e3: 0x000d, + 0x5e4: 0x000d, 0x5e5: 0x000d, 0x5e6: 0x000d, 0x5e7: 0x000d, 0x5e8: 0x000d, 0x5e9: 0x000d, + 0x5ea: 0x000d, 0x5eb: 0x000d, 0x5ec: 0x000d, 0x5ed: 0x000d, 0x5ee: 0x000d, 0x5ef: 0x000d, + 0x5f0: 0x0001, 0x5f1: 0x0001, 0x5f2: 0x0001, 0x5f3: 0x0001, 0x5f4: 0x0001, 0x5f5: 0x0001, + 0x5f6: 0x0001, 0x5f7: 0x0001, 0x5f8: 0x0001, 0x5f9: 0x0001, 0x5fa: 0x0001, 0x5fb: 0x0001, + 0x5fc: 0x0001, 0x5fd: 0x0001, 0x5fe: 0x0001, 0x5ff: 0x0001, + // Block 0x18, offset 0x600 + 0x600: 0x0001, 0x601: 0x0001, 0x602: 0x0001, 0x603: 0x0001, 0x604: 0x0001, 0x605: 0x0001, + 0x606: 0x0001, 0x607: 0x0001, 0x608: 0x0001, 0x609: 0x0001, 0x60a: 0x0001, 0x60b: 0x0001, + 0x60c: 0x0001, 0x60d: 0x0001, 0x60e: 0x0001, 0x60f: 0x0001, 0x610: 0x0001, 0x611: 0x0001, + 0x612: 0x0001, 0x613: 0x0001, 0x614: 0x0001, 0x615: 0x0001, 0x616: 0x0001, 0x617: 0x0001, + 0x618: 0x0001, 0x619: 0x0001, 0x61a: 0x0001, 0x61b: 0x0001, 0x61c: 0x0001, 0x61d: 0x0001, + 0x61e: 0x0001, 0x61f: 0x0001, 0x620: 0x000d, 0x621: 0x000d, 0x622: 0x000d, 0x623: 0x000d, + 0x624: 0x000d, 0x625: 0x000d, 0x626: 0x000d, 0x627: 0x000d, 0x628: 0x000d, 0x629: 0x000d, + 0x62a: 0x000d, 0x62b: 0x000d, 0x62c: 0x000d, 0x62d: 0x000d, 0x62e: 0x000d, 0x62f: 0x000d, + 0x630: 0x000d, 0x631: 0x000d, 0x632: 0x000d, 0x633: 0x000d, 0x634: 0x000d, 0x635: 0x000d, + 0x636: 0x000d, 0x637: 0x000d, 0x638: 0x000d, 0x639: 0x000d, 0x63a: 0x000d, 0x63b: 0x000d, + 0x63c: 0x000d, 0x63d: 0x000d, 0x63e: 0x000d, 0x63f: 0x000d, + // Block 0x19, offset 0x640 + 0x640: 0x000d, 0x641: 0x000d, 0x642: 0x000d, 0x643: 0x000d, 0x644: 0x000d, 0x645: 0x000d, + 0x646: 0x000d, 0x647: 0x000d, 0x648: 0x000d, 0x649: 0x000d, 0x64a: 0x000d, 0x64b: 0x000d, + 0x64c: 0x000d, 0x64d: 0x000d, 0x64e: 0x000d, 0x64f: 0x000d, 0x650: 0x000d, 0x651: 0x000d, + 0x652: 0x000d, 0x653: 0x000c, 0x654: 0x000c, 0x655: 0x000c, 0x656: 0x000c, 0x657: 0x000c, + 0x658: 0x000c, 0x659: 0x000c, 0x65a: 0x000c, 0x65b: 0x000c, 0x65c: 0x000c, 0x65d: 0x000c, + 0x65e: 0x000c, 0x65f: 0x000c, 0x660: 0x000c, 0x661: 0x000c, 0x662: 0x0005, 0x663: 0x000c, + 0x664: 0x000c, 0x665: 0x000c, 0x666: 0x000c, 0x667: 0x000c, 0x668: 0x000c, 0x669: 0x000c, + 0x66a: 0x000c, 0x66b: 0x000c, 0x66c: 0x000c, 0x66d: 0x000c, 0x66e: 0x000c, 0x66f: 0x000c, + 0x670: 0x000c, 0x671: 0x000c, 0x672: 0x000c, 0x673: 0x000c, 0x674: 0x000c, 0x675: 0x000c, + 0x676: 0x000c, 0x677: 0x000c, 0x678: 0x000c, 0x679: 0x000c, 0x67a: 0x000c, 0x67b: 0x000c, + 0x67c: 0x000c, 0x67d: 0x000c, 0x67e: 0x000c, 0x67f: 0x000c, + // Block 0x1a, offset 0x680 + 0x680: 0x000c, 0x681: 0x000c, 0x682: 0x000c, + 0x6ba: 0x000c, + 0x6bc: 0x000c, + // Block 0x1b, offset 0x6c0 + 0x6c1: 0x000c, 0x6c2: 0x000c, 0x6c3: 0x000c, 0x6c4: 0x000c, 0x6c5: 0x000c, + 0x6c6: 0x000c, 0x6c7: 0x000c, 0x6c8: 0x000c, + 0x6cd: 0x000c, 0x6d1: 0x000c, + 0x6d2: 0x000c, 0x6d3: 0x000c, 0x6d4: 0x000c, 0x6d5: 0x000c, 0x6d6: 0x000c, 0x6d7: 0x000c, + 0x6e2: 0x000c, 0x6e3: 0x000c, + // Block 0x1c, offset 0x700 + 0x701: 0x000c, + 0x73c: 0x000c, + // Block 0x1d, offset 0x740 + 0x741: 0x000c, 0x742: 0x000c, 0x743: 0x000c, 0x744: 0x000c, + 0x74d: 0x000c, + 0x762: 0x000c, 0x763: 0x000c, + 0x772: 0x0004, 0x773: 0x0004, + 0x77b: 0x0004, + 0x77e: 0x000c, + // Block 0x1e, offset 0x780 + 0x781: 0x000c, 0x782: 0x000c, + 0x7bc: 0x000c, + // Block 0x1f, offset 0x7c0 + 0x7c1: 0x000c, 0x7c2: 0x000c, + 0x7c7: 0x000c, 0x7c8: 0x000c, 0x7cb: 0x000c, + 0x7cc: 0x000c, 0x7cd: 0x000c, 0x7d1: 0x000c, + 0x7f0: 0x000c, 0x7f1: 0x000c, 0x7f5: 0x000c, + // Block 0x20, offset 0x800 + 0x801: 0x000c, 0x802: 0x000c, 0x803: 0x000c, 0x804: 0x000c, 0x805: 0x000c, + 0x807: 0x000c, 0x808: 0x000c, + 0x80d: 0x000c, + 0x822: 0x000c, 0x823: 0x000c, + 0x831: 0x0004, + 0x83a: 0x000c, 0x83b: 0x000c, + 0x83c: 0x000c, 0x83d: 0x000c, 0x83e: 0x000c, 0x83f: 0x000c, + // Block 0x21, offset 0x840 + 0x841: 0x000c, + 0x87c: 0x000c, 0x87f: 0x000c, + // Block 0x22, offset 0x880 + 0x881: 0x000c, 0x882: 0x000c, 0x883: 0x000c, 0x884: 0x000c, + 0x88d: 0x000c, + 0x896: 0x000c, + 0x8a2: 0x000c, 0x8a3: 0x000c, + // Block 0x23, offset 0x8c0 + 0x8c2: 0x000c, + // Block 0x24, offset 0x900 + 0x900: 0x000c, + 0x90d: 0x000c, + 0x933: 0x000a, 0x934: 0x000a, 0x935: 0x000a, + 0x936: 0x000a, 0x937: 0x000a, 0x938: 0x000a, 0x939: 0x0004, 0x93a: 0x000a, + // Block 0x25, offset 0x940 + 0x940: 0x000c, 0x944: 0x000c, + 0x97e: 0x000c, 0x97f: 0x000c, + // Block 0x26, offset 0x980 + 0x980: 0x000c, + 0x986: 0x000c, 0x987: 0x000c, 0x988: 0x000c, 0x98a: 0x000c, 0x98b: 0x000c, + 0x98c: 0x000c, 0x98d: 0x000c, + 0x995: 0x000c, 0x996: 0x000c, + 0x9a2: 0x000c, 0x9a3: 0x000c, + 0x9b8: 0x000a, 0x9b9: 0x000a, 0x9ba: 0x000a, 0x9bb: 0x000a, + 0x9bc: 0x000a, 0x9bd: 0x000a, 0x9be: 0x000a, + // Block 0x27, offset 0x9c0 + 0x9cc: 0x000c, 0x9cd: 0x000c, + 0x9e2: 0x000c, 0x9e3: 0x000c, + // Block 0x28, offset 0xa00 + 0xa00: 0x000c, 0xa01: 0x000c, + 0xa3b: 0x000c, + 0xa3c: 0x000c, + // Block 0x29, offset 0xa40 + 0xa41: 0x000c, 0xa42: 0x000c, 0xa43: 0x000c, 0xa44: 0x000c, + 0xa4d: 0x000c, + 0xa62: 0x000c, 0xa63: 0x000c, + // Block 0x2a, offset 0xa80 + 0xa8a: 0x000c, + 0xa92: 0x000c, 0xa93: 0x000c, 0xa94: 0x000c, 0xa96: 0x000c, + // Block 0x2b, offset 0xac0 + 0xaf1: 0x000c, 0xaf4: 0x000c, 0xaf5: 0x000c, + 0xaf6: 0x000c, 0xaf7: 0x000c, 0xaf8: 0x000c, 0xaf9: 0x000c, 0xafa: 0x000c, + 0xaff: 0x0004, + // Block 0x2c, offset 0xb00 + 0xb07: 0x000c, 0xb08: 0x000c, 0xb09: 0x000c, 0xb0a: 0x000c, 0xb0b: 0x000c, + 0xb0c: 0x000c, 0xb0d: 0x000c, 0xb0e: 0x000c, + // Block 0x2d, offset 0xb40 + 0xb71: 0x000c, 0xb74: 0x000c, 0xb75: 0x000c, + 0xb76: 0x000c, 0xb77: 0x000c, 0xb78: 0x000c, 0xb79: 0x000c, 0xb7b: 0x000c, + 0xb7c: 0x000c, + // Block 0x2e, offset 0xb80 + 0xb88: 0x000c, 0xb89: 0x000c, 0xb8a: 0x000c, 0xb8b: 0x000c, + 0xb8c: 0x000c, 0xb8d: 0x000c, + // Block 0x2f, offset 0xbc0 + 0xbd8: 0x000c, 0xbd9: 0x000c, + 0xbf5: 0x000c, + 0xbf7: 0x000c, 0xbf9: 0x000c, 0xbfa: 0x003a, 0xbfb: 0x002a, + 0xbfc: 0x003a, 0xbfd: 0x002a, + // Block 0x30, offset 0xc00 + 0xc31: 0x000c, 0xc32: 0x000c, 0xc33: 0x000c, 0xc34: 0x000c, 0xc35: 0x000c, + 0xc36: 0x000c, 0xc37: 0x000c, 0xc38: 0x000c, 0xc39: 0x000c, 0xc3a: 0x000c, 0xc3b: 0x000c, + 0xc3c: 0x000c, 0xc3d: 0x000c, 0xc3e: 0x000c, + // Block 0x31, offset 0xc40 + 0xc40: 0x000c, 0xc41: 0x000c, 0xc42: 0x000c, 0xc43: 0x000c, 0xc44: 0x000c, + 0xc46: 0x000c, 0xc47: 0x000c, + 0xc4d: 0x000c, 0xc4e: 0x000c, 0xc4f: 0x000c, 0xc50: 0x000c, 0xc51: 0x000c, + 0xc52: 0x000c, 0xc53: 0x000c, 0xc54: 0x000c, 0xc55: 0x000c, 0xc56: 0x000c, 0xc57: 0x000c, + 0xc59: 0x000c, 0xc5a: 0x000c, 0xc5b: 0x000c, 0xc5c: 0x000c, 0xc5d: 0x000c, + 0xc5e: 0x000c, 0xc5f: 0x000c, 0xc60: 0x000c, 0xc61: 0x000c, 0xc62: 0x000c, 0xc63: 0x000c, + 0xc64: 0x000c, 0xc65: 0x000c, 0xc66: 0x000c, 0xc67: 0x000c, 0xc68: 0x000c, 0xc69: 0x000c, + 0xc6a: 0x000c, 0xc6b: 0x000c, 0xc6c: 0x000c, 0xc6d: 0x000c, 0xc6e: 0x000c, 0xc6f: 0x000c, + 0xc70: 0x000c, 0xc71: 0x000c, 0xc72: 0x000c, 0xc73: 0x000c, 0xc74: 0x000c, 0xc75: 0x000c, + 0xc76: 0x000c, 0xc77: 0x000c, 0xc78: 0x000c, 0xc79: 0x000c, 0xc7a: 0x000c, 0xc7b: 0x000c, + 0xc7c: 0x000c, + // Block 0x32, offset 0xc80 + 0xc86: 0x000c, + // Block 0x33, offset 0xcc0 + 0xced: 0x000c, 0xcee: 0x000c, 0xcef: 0x000c, + 0xcf0: 0x000c, 0xcf2: 0x000c, 0xcf3: 0x000c, 0xcf4: 0x000c, 0xcf5: 0x000c, + 0xcf6: 0x000c, 0xcf7: 0x000c, 0xcf9: 0x000c, 0xcfa: 0x000c, + 0xcfd: 0x000c, 0xcfe: 0x000c, + // Block 0x34, offset 0xd00 + 0xd18: 0x000c, 0xd19: 0x000c, + 0xd1e: 0x000c, 0xd1f: 0x000c, 0xd20: 0x000c, + 0xd31: 0x000c, 0xd32: 0x000c, 0xd33: 0x000c, 0xd34: 0x000c, + // Block 0x35, offset 0xd40 + 0xd42: 0x000c, 0xd45: 0x000c, + 0xd46: 0x000c, + 0xd4d: 0x000c, + 0xd5d: 0x000c, + // Block 0x36, offset 0xd80 + 0xd9d: 0x000c, + 0xd9e: 0x000c, 0xd9f: 0x000c, + // Block 0x37, offset 0xdc0 + 0xdd0: 0x000a, 0xdd1: 0x000a, + 0xdd2: 0x000a, 0xdd3: 0x000a, 0xdd4: 0x000a, 0xdd5: 0x000a, 0xdd6: 0x000a, 0xdd7: 0x000a, + 0xdd8: 0x000a, 0xdd9: 0x000a, + // Block 0x38, offset 0xe00 + 0xe00: 0x000a, + // Block 0x39, offset 0xe40 + 0xe40: 0x0009, + 0xe5b: 0x007a, 0xe5c: 0x006a, + // Block 0x3a, offset 0xe80 + 0xe92: 0x000c, 0xe93: 0x000c, 0xe94: 0x000c, + 0xeb2: 0x000c, 0xeb3: 0x000c, 0xeb4: 0x000c, + // Block 0x3b, offset 0xec0 + 0xed2: 0x000c, 0xed3: 0x000c, + 0xef2: 0x000c, 0xef3: 0x000c, + // Block 0x3c, offset 0xf00 + 0xf34: 0x000c, 0xf35: 0x000c, + 0xf37: 0x000c, 0xf38: 0x000c, 0xf39: 0x000c, 0xf3a: 0x000c, 0xf3b: 0x000c, + 0xf3c: 0x000c, 0xf3d: 0x000c, + // Block 0x3d, offset 0xf40 + 0xf46: 0x000c, 0xf49: 0x000c, 0xf4a: 0x000c, 0xf4b: 0x000c, + 0xf4c: 0x000c, 0xf4d: 0x000c, 0xf4e: 0x000c, 0xf4f: 0x000c, 0xf50: 0x000c, 0xf51: 0x000c, + 0xf52: 0x000c, 0xf53: 0x000c, + 0xf5b: 0x0004, 0xf5d: 0x000c, + 0xf70: 0x000a, 0xf71: 0x000a, 0xf72: 0x000a, 0xf73: 0x000a, 0xf74: 0x000a, 0xf75: 0x000a, + 0xf76: 0x000a, 0xf77: 0x000a, 0xf78: 0x000a, 0xf79: 0x000a, + // Block 0x3e, offset 0xf80 + 0xf80: 0x000a, 0xf81: 0x000a, 0xf82: 0x000a, 0xf83: 0x000a, 0xf84: 0x000a, 0xf85: 0x000a, + 0xf86: 0x000a, 0xf87: 0x000a, 0xf88: 0x000a, 0xf89: 0x000a, 0xf8a: 0x000a, 0xf8b: 0x000c, + 0xf8c: 0x000c, 0xf8d: 0x000c, 0xf8e: 0x000b, + // Block 0x3f, offset 0xfc0 + 0xfc5: 0x000c, + 0xfc6: 0x000c, + 0xfe9: 0x000c, + // Block 0x40, offset 0x1000 + 0x1020: 0x000c, 0x1021: 0x000c, 0x1022: 0x000c, + 0x1027: 0x000c, 0x1028: 0x000c, + 0x1032: 0x000c, + 0x1039: 0x000c, 0x103a: 0x000c, 0x103b: 0x000c, + // Block 0x41, offset 0x1040 + 0x1040: 0x000a, 0x1044: 0x000a, 0x1045: 0x000a, + // Block 0x42, offset 0x1080 + 0x109e: 0x000a, 0x109f: 0x000a, 0x10a0: 0x000a, 0x10a1: 0x000a, 0x10a2: 0x000a, 0x10a3: 0x000a, + 0x10a4: 0x000a, 0x10a5: 0x000a, 0x10a6: 0x000a, 0x10a7: 0x000a, 0x10a8: 0x000a, 0x10a9: 0x000a, + 0x10aa: 0x000a, 0x10ab: 0x000a, 0x10ac: 0x000a, 0x10ad: 0x000a, 0x10ae: 0x000a, 0x10af: 0x000a, + 0x10b0: 0x000a, 0x10b1: 0x000a, 0x10b2: 0x000a, 0x10b3: 0x000a, 0x10b4: 0x000a, 0x10b5: 0x000a, + 0x10b6: 0x000a, 0x10b7: 0x000a, 0x10b8: 0x000a, 0x10b9: 0x000a, 0x10ba: 0x000a, 0x10bb: 0x000a, + 0x10bc: 0x000a, 0x10bd: 0x000a, 0x10be: 0x000a, 0x10bf: 0x000a, + // Block 0x43, offset 0x10c0 + 0x10d7: 0x000c, + 0x10d8: 0x000c, 0x10db: 0x000c, + // Block 0x44, offset 0x1100 + 0x1116: 0x000c, + 0x1118: 0x000c, 0x1119: 0x000c, 0x111a: 0x000c, 0x111b: 0x000c, 0x111c: 0x000c, 0x111d: 0x000c, + 0x111e: 0x000c, 0x1120: 0x000c, 0x1122: 0x000c, + 0x1125: 0x000c, 0x1126: 0x000c, 0x1127: 0x000c, 0x1128: 0x000c, 0x1129: 0x000c, + 0x112a: 0x000c, 0x112b: 0x000c, 0x112c: 0x000c, + 0x1133: 0x000c, 0x1134: 0x000c, 0x1135: 0x000c, + 0x1136: 0x000c, 0x1137: 0x000c, 0x1138: 0x000c, 0x1139: 0x000c, 0x113a: 0x000c, 0x113b: 0x000c, + 0x113c: 0x000c, 0x113f: 0x000c, + // Block 0x45, offset 0x1140 + 0x1170: 0x000c, 0x1171: 0x000c, 0x1172: 0x000c, 0x1173: 0x000c, 0x1174: 0x000c, 0x1175: 0x000c, + 0x1176: 0x000c, 0x1177: 0x000c, 0x1178: 0x000c, 0x1179: 0x000c, 0x117a: 0x000c, 0x117b: 0x000c, + 0x117c: 0x000c, 0x117d: 0x000c, 0x117e: 0x000c, + // Block 0x46, offset 0x1180 + 0x1180: 0x000c, 0x1181: 0x000c, 0x1182: 0x000c, 0x1183: 0x000c, + 0x11b4: 0x000c, + 0x11b6: 0x000c, 0x11b7: 0x000c, 0x11b8: 0x000c, 0x11b9: 0x000c, 0x11ba: 0x000c, + 0x11bc: 0x000c, + // Block 0x47, offset 0x11c0 + 0x11c2: 0x000c, + 0x11eb: 0x000c, 0x11ec: 0x000c, 0x11ed: 0x000c, 0x11ee: 0x000c, 0x11ef: 0x000c, + 0x11f0: 0x000c, 0x11f1: 0x000c, 0x11f2: 0x000c, 0x11f3: 0x000c, + // Block 0x48, offset 0x1200 + 0x1200: 0x000c, 0x1201: 0x000c, + 0x1222: 0x000c, 0x1223: 0x000c, + 0x1224: 0x000c, 0x1225: 0x000c, 0x1228: 0x000c, 0x1229: 0x000c, + 0x122b: 0x000c, 0x122c: 0x000c, 0x122d: 0x000c, + // Block 0x49, offset 0x1240 + 0x1266: 0x000c, 0x1268: 0x000c, 0x1269: 0x000c, + 0x126d: 0x000c, 0x126f: 0x000c, + 0x1270: 0x000c, 0x1271: 0x000c, + // Block 0x4a, offset 0x1280 + 0x12ac: 0x000c, 0x12ad: 0x000c, 0x12ae: 0x000c, 0x12af: 0x000c, + 0x12b0: 0x000c, 0x12b1: 0x000c, 0x12b2: 0x000c, 0x12b3: 0x000c, + 0x12b6: 0x000c, 0x12b7: 0x000c, + // Block 0x4b, offset 0x12c0 + 0x12d0: 0x000c, 0x12d1: 0x000c, + 0x12d2: 0x000c, 0x12d4: 0x000c, 0x12d5: 0x000c, 0x12d6: 0x000c, 0x12d7: 0x000c, + 0x12d8: 0x000c, 0x12d9: 0x000c, 0x12da: 0x000c, 0x12db: 0x000c, 0x12dc: 0x000c, 0x12dd: 0x000c, + 0x12de: 0x000c, 0x12df: 0x000c, 0x12e0: 0x000c, 0x12e2: 0x000c, 0x12e3: 0x000c, + 0x12e4: 0x000c, 0x12e5: 0x000c, 0x12e6: 0x000c, 0x12e7: 0x000c, 0x12e8: 0x000c, + 0x12ed: 0x000c, + 0x12f4: 0x000c, + 0x12f8: 0x000c, 0x12f9: 0x000c, + // Block 0x4c, offset 0x1300 + 0x1300: 0x000c, 0x1301: 0x000c, 0x1302: 0x000c, 0x1303: 0x000c, 0x1304: 0x000c, 0x1305: 0x000c, + 0x1306: 0x000c, 0x1307: 0x000c, 0x1308: 0x000c, 0x1309: 0x000c, 0x130a: 0x000c, 0x130b: 0x000c, + 0x130c: 0x000c, 0x130d: 0x000c, 0x130e: 0x000c, 0x130f: 0x000c, 0x1310: 0x000c, 0x1311: 0x000c, + 0x1312: 0x000c, 0x1313: 0x000c, 0x1314: 0x000c, 0x1315: 0x000c, 0x1316: 0x000c, 0x1317: 0x000c, + 0x1318: 0x000c, 0x1319: 0x000c, 0x131a: 0x000c, 0x131b: 0x000c, 0x131c: 0x000c, 0x131d: 0x000c, + 0x131e: 0x000c, 0x131f: 0x000c, 0x1320: 0x000c, 0x1321: 0x000c, 0x1322: 0x000c, 0x1323: 0x000c, + 0x1324: 0x000c, 0x1325: 0x000c, 0x1326: 0x000c, 0x1327: 0x000c, 0x1328: 0x000c, 0x1329: 0x000c, + 0x132a: 0x000c, 0x132b: 0x000c, 0x132c: 0x000c, 0x132d: 0x000c, 0x132e: 0x000c, 0x132f: 0x000c, + 0x1330: 0x000c, 0x1331: 0x000c, 0x1332: 0x000c, 0x1333: 0x000c, 0x1334: 0x000c, 0x1335: 0x000c, + 0x1336: 0x000c, 0x1337: 0x000c, 0x1338: 0x000c, 0x1339: 0x000c, 0x133b: 0x000c, + 0x133c: 0x000c, 0x133d: 0x000c, 0x133e: 0x000c, 0x133f: 0x000c, + // Block 0x4d, offset 0x1340 + 0x137d: 0x000a, 0x137f: 0x000a, + // Block 0x4e, offset 0x1380 + 0x1380: 0x000a, 0x1381: 0x000a, + 0x138d: 0x000a, 0x138e: 0x000a, 0x138f: 0x000a, + 0x139d: 0x000a, + 0x139e: 0x000a, 0x139f: 0x000a, + 0x13ad: 0x000a, 0x13ae: 0x000a, 0x13af: 0x000a, + 0x13bd: 0x000a, 0x13be: 0x000a, + // Block 0x4f, offset 0x13c0 + 0x13c0: 0x0009, 0x13c1: 0x0009, 0x13c2: 0x0009, 0x13c3: 0x0009, 0x13c4: 0x0009, 0x13c5: 0x0009, + 0x13c6: 0x0009, 0x13c7: 0x0009, 0x13c8: 0x0009, 0x13c9: 0x0009, 0x13ca: 0x0009, 0x13cb: 0x000b, + 0x13cc: 0x000b, 0x13cd: 0x000b, 0x13cf: 0x0001, 0x13d0: 0x000a, 0x13d1: 0x000a, + 0x13d2: 0x000a, 0x13d3: 0x000a, 0x13d4: 0x000a, 0x13d5: 0x000a, 0x13d6: 0x000a, 0x13d7: 0x000a, + 0x13d8: 0x000a, 0x13d9: 0x000a, 0x13da: 0x000a, 0x13db: 0x000a, 0x13dc: 0x000a, 0x13dd: 0x000a, + 0x13de: 0x000a, 0x13df: 0x000a, 0x13e0: 0x000a, 0x13e1: 0x000a, 0x13e2: 0x000a, 0x13e3: 0x000a, + 0x13e4: 0x000a, 0x13e5: 0x000a, 0x13e6: 0x000a, 0x13e7: 0x000a, 0x13e8: 0x0009, 0x13e9: 0x0007, + 0x13ea: 0x000e, 0x13eb: 0x000e, 0x13ec: 0x000e, 0x13ed: 0x000e, 0x13ee: 0x000e, 0x13ef: 0x0006, + 0x13f0: 0x0004, 0x13f1: 0x0004, 0x13f2: 0x0004, 0x13f3: 0x0004, 0x13f4: 0x0004, 0x13f5: 0x000a, + 0x13f6: 0x000a, 0x13f7: 0x000a, 0x13f8: 0x000a, 0x13f9: 0x000a, 0x13fa: 0x000a, 0x13fb: 0x000a, + 0x13fc: 0x000a, 0x13fd: 0x000a, 0x13fe: 0x000a, 0x13ff: 0x000a, + // Block 0x50, offset 0x1400 + 0x1400: 0x000a, 0x1401: 0x000a, 0x1402: 0x000a, 0x1403: 0x000a, 0x1404: 0x0006, 0x1405: 0x009a, + 0x1406: 0x008a, 0x1407: 0x000a, 0x1408: 0x000a, 0x1409: 0x000a, 0x140a: 0x000a, 0x140b: 0x000a, + 0x140c: 0x000a, 0x140d: 0x000a, 0x140e: 0x000a, 0x140f: 0x000a, 0x1410: 0x000a, 0x1411: 0x000a, + 0x1412: 0x000a, 0x1413: 0x000a, 0x1414: 0x000a, 0x1415: 0x000a, 0x1416: 0x000a, 0x1417: 0x000a, + 0x1418: 0x000a, 0x1419: 0x000a, 0x141a: 0x000a, 0x141b: 0x000a, 0x141c: 0x000a, 0x141d: 0x000a, + 0x141e: 0x000a, 0x141f: 0x0009, 0x1420: 0x000b, 0x1421: 0x000b, 0x1422: 0x000b, 0x1423: 0x000b, + 0x1424: 0x000b, 0x1425: 0x000b, 0x1426: 0x000e, 0x1427: 0x000e, 0x1428: 0x000e, 0x1429: 0x000e, + 0x142a: 0x000b, 0x142b: 0x000b, 0x142c: 0x000b, 0x142d: 0x000b, 0x142e: 0x000b, 0x142f: 0x000b, + 0x1430: 0x0002, 0x1434: 0x0002, 0x1435: 0x0002, + 0x1436: 0x0002, 0x1437: 0x0002, 0x1438: 0x0002, 0x1439: 0x0002, 0x143a: 0x0003, 0x143b: 0x0003, + 0x143c: 0x000a, 0x143d: 0x009a, 0x143e: 0x008a, + // Block 0x51, offset 0x1440 + 0x1440: 0x0002, 0x1441: 0x0002, 0x1442: 0x0002, 0x1443: 0x0002, 0x1444: 0x0002, 0x1445: 0x0002, + 0x1446: 0x0002, 0x1447: 0x0002, 0x1448: 0x0002, 0x1449: 0x0002, 0x144a: 0x0003, 0x144b: 0x0003, + 0x144c: 0x000a, 0x144d: 0x009a, 0x144e: 0x008a, + 0x1460: 0x0004, 0x1461: 0x0004, 0x1462: 0x0004, 0x1463: 0x0004, + 0x1464: 0x0004, 0x1465: 0x0004, 0x1466: 0x0004, 0x1467: 0x0004, 0x1468: 0x0004, 0x1469: 0x0004, + 0x146a: 0x0004, 0x146b: 0x0004, 0x146c: 0x0004, 0x146d: 0x0004, 0x146e: 0x0004, 0x146f: 0x0004, + 0x1470: 0x0004, 0x1471: 0x0004, 0x1472: 0x0004, 0x1473: 0x0004, 0x1474: 0x0004, 0x1475: 0x0004, + 0x1476: 0x0004, 0x1477: 0x0004, 0x1478: 0x0004, 0x1479: 0x0004, 0x147a: 0x0004, 0x147b: 0x0004, + 0x147c: 0x0004, 0x147d: 0x0004, 0x147e: 0x0004, 0x147f: 0x0004, + // Block 0x52, offset 0x1480 + 0x1480: 0x0004, 0x1481: 0x0004, 0x1482: 0x0004, 0x1483: 0x0004, 0x1484: 0x0004, 0x1485: 0x0004, + 0x1486: 0x0004, 0x1487: 0x0004, 0x1488: 0x0004, 0x1489: 0x0004, 0x148a: 0x0004, 0x148b: 0x0004, + 0x148c: 0x0004, 0x148d: 0x0004, 0x148e: 0x0004, 0x148f: 0x0004, 0x1490: 0x000c, 0x1491: 0x000c, + 0x1492: 0x000c, 0x1493: 0x000c, 0x1494: 0x000c, 0x1495: 0x000c, 0x1496: 0x000c, 0x1497: 0x000c, + 0x1498: 0x000c, 0x1499: 0x000c, 0x149a: 0x000c, 0x149b: 0x000c, 0x149c: 0x000c, 0x149d: 0x000c, + 0x149e: 0x000c, 0x149f: 0x000c, 0x14a0: 0x000c, 0x14a1: 0x000c, 0x14a2: 0x000c, 0x14a3: 0x000c, + 0x14a4: 0x000c, 0x14a5: 0x000c, 0x14a6: 0x000c, 0x14a7: 0x000c, 0x14a8: 0x000c, 0x14a9: 0x000c, + 0x14aa: 0x000c, 0x14ab: 0x000c, 0x14ac: 0x000c, 0x14ad: 0x000c, 0x14ae: 0x000c, 0x14af: 0x000c, + 0x14b0: 0x000c, + // Block 0x53, offset 0x14c0 + 0x14c0: 0x000a, 0x14c1: 0x000a, 0x14c3: 0x000a, 0x14c4: 0x000a, 0x14c5: 0x000a, + 0x14c6: 0x000a, 0x14c8: 0x000a, 0x14c9: 0x000a, + 0x14d4: 0x000a, 0x14d6: 0x000a, 0x14d7: 0x000a, + 0x14d8: 0x000a, + 0x14de: 0x000a, 0x14df: 0x000a, 0x14e0: 0x000a, 0x14e1: 0x000a, 0x14e2: 0x000a, 0x14e3: 0x000a, + 0x14e5: 0x000a, 0x14e7: 0x000a, 0x14e9: 0x000a, + 0x14ee: 0x0004, + 0x14fa: 0x000a, 0x14fb: 0x000a, + // Block 0x54, offset 0x1500 + 0x1500: 0x000a, 0x1501: 0x000a, 0x1502: 0x000a, 0x1503: 0x000a, 0x1504: 0x000a, + 0x150a: 0x000a, 0x150b: 0x000a, + 0x150c: 0x000a, 0x150d: 0x000a, 0x1510: 0x000a, 0x1511: 0x000a, + 0x1512: 0x000a, 0x1513: 0x000a, 0x1514: 0x000a, 0x1515: 0x000a, 0x1516: 0x000a, 0x1517: 0x000a, + 0x1518: 0x000a, 0x1519: 0x000a, 0x151a: 0x000a, 0x151b: 0x000a, 0x151c: 0x000a, 0x151d: 0x000a, + 0x151e: 0x000a, 0x151f: 0x000a, + // Block 0x55, offset 0x1540 + 0x1549: 0x000a, 0x154a: 0x000a, 0x154b: 0x000a, + 0x1550: 0x000a, 0x1551: 0x000a, + 0x1552: 0x000a, 0x1553: 0x000a, 0x1554: 0x000a, 0x1555: 0x000a, 0x1556: 0x000a, 0x1557: 0x000a, + 0x1558: 0x000a, 0x1559: 0x000a, 0x155a: 0x000a, 0x155b: 0x000a, 0x155c: 0x000a, 0x155d: 0x000a, + 0x155e: 0x000a, 0x155f: 0x000a, 0x1560: 0x000a, 0x1561: 0x000a, 0x1562: 0x000a, 0x1563: 0x000a, + 0x1564: 0x000a, 0x1565: 0x000a, 0x1566: 0x000a, 0x1567: 0x000a, 0x1568: 0x000a, 0x1569: 0x000a, + 0x156a: 0x000a, 0x156b: 0x000a, 0x156c: 0x000a, 0x156d: 0x000a, 0x156e: 0x000a, 0x156f: 0x000a, + 0x1570: 0x000a, 0x1571: 0x000a, 0x1572: 0x000a, 0x1573: 0x000a, 0x1574: 0x000a, 0x1575: 0x000a, + 0x1576: 0x000a, 0x1577: 0x000a, 0x1578: 0x000a, 0x1579: 0x000a, 0x157a: 0x000a, 0x157b: 0x000a, + 0x157c: 0x000a, 0x157d: 0x000a, 0x157e: 0x000a, 0x157f: 0x000a, + // Block 0x56, offset 0x1580 + 0x1580: 0x000a, 0x1581: 0x000a, 0x1582: 0x000a, 0x1583: 0x000a, 0x1584: 0x000a, 0x1585: 0x000a, + 0x1586: 0x000a, 0x1587: 0x000a, 0x1588: 0x000a, 0x1589: 0x000a, 0x158a: 0x000a, 0x158b: 0x000a, + 0x158c: 0x000a, 0x158d: 0x000a, 0x158e: 0x000a, 0x158f: 0x000a, 0x1590: 0x000a, 0x1591: 0x000a, + 0x1592: 0x000a, 0x1593: 0x000a, 0x1594: 0x000a, 0x1595: 0x000a, 0x1596: 0x000a, 0x1597: 0x000a, + 0x1598: 0x000a, 0x1599: 0x000a, 0x159a: 0x000a, 0x159b: 0x000a, 0x159c: 0x000a, 0x159d: 0x000a, + 0x159e: 0x000a, 0x159f: 0x000a, 0x15a0: 0x000a, 0x15a1: 0x000a, 0x15a2: 0x000a, 0x15a3: 0x000a, + 0x15a4: 0x000a, 0x15a5: 0x000a, 0x15a6: 0x000a, 0x15a7: 0x000a, 0x15a8: 0x000a, 0x15a9: 0x000a, + 0x15aa: 0x000a, 0x15ab: 0x000a, 0x15ac: 0x000a, 0x15ad: 0x000a, 0x15ae: 0x000a, 0x15af: 0x000a, + 0x15b0: 0x000a, 0x15b1: 0x000a, 0x15b2: 0x000a, 0x15b3: 0x000a, 0x15b4: 0x000a, 0x15b5: 0x000a, + 0x15b6: 0x000a, 0x15b7: 0x000a, 0x15b8: 0x000a, 0x15b9: 0x000a, 0x15ba: 0x000a, 0x15bb: 0x000a, + 0x15bc: 0x000a, 0x15bd: 0x000a, 0x15be: 0x000a, 0x15bf: 0x000a, + // Block 0x57, offset 0x15c0 + 0x15c0: 0x000a, 0x15c1: 0x000a, 0x15c2: 0x000a, 0x15c3: 0x000a, 0x15c4: 0x000a, 0x15c5: 0x000a, + 0x15c6: 0x000a, 0x15c7: 0x000a, 0x15c8: 0x000a, 0x15c9: 0x000a, 0x15ca: 0x000a, 0x15cb: 0x000a, + 0x15cc: 0x000a, 0x15cd: 0x000a, 0x15ce: 0x000a, 0x15cf: 0x000a, 0x15d0: 0x000a, 0x15d1: 0x000a, + 0x15d2: 0x0003, 0x15d3: 0x0004, 0x15d4: 0x000a, 0x15d5: 0x000a, 0x15d6: 0x000a, 0x15d7: 0x000a, + 0x15d8: 0x000a, 0x15d9: 0x000a, 0x15da: 0x000a, 0x15db: 0x000a, 0x15dc: 0x000a, 0x15dd: 0x000a, + 0x15de: 0x000a, 0x15df: 0x000a, 0x15e0: 0x000a, 0x15e1: 0x000a, 0x15e2: 0x000a, 0x15e3: 0x000a, + 0x15e4: 0x000a, 0x15e5: 0x000a, 0x15e6: 0x000a, 0x15e7: 0x000a, 0x15e8: 0x000a, 0x15e9: 0x000a, + 0x15ea: 0x000a, 0x15eb: 0x000a, 0x15ec: 0x000a, 0x15ed: 0x000a, 0x15ee: 0x000a, 0x15ef: 0x000a, + 0x15f0: 0x000a, 0x15f1: 0x000a, 0x15f2: 0x000a, 0x15f3: 0x000a, 0x15f4: 0x000a, 0x15f5: 0x000a, + 0x15f6: 0x000a, 0x15f7: 0x000a, 0x15f8: 0x000a, 0x15f9: 0x000a, 0x15fa: 0x000a, 0x15fb: 0x000a, + 0x15fc: 0x000a, 0x15fd: 0x000a, 0x15fe: 0x000a, 0x15ff: 0x000a, + // Block 0x58, offset 0x1600 + 0x1600: 0x000a, 0x1601: 0x000a, 0x1602: 0x000a, 0x1603: 0x000a, 0x1604: 0x000a, 0x1605: 0x000a, + 0x1606: 0x000a, 0x1607: 0x000a, 0x1608: 0x003a, 0x1609: 0x002a, 0x160a: 0x003a, 0x160b: 0x002a, + 0x160c: 0x000a, 0x160d: 0x000a, 0x160e: 0x000a, 0x160f: 0x000a, 0x1610: 0x000a, 0x1611: 0x000a, + 0x1612: 0x000a, 0x1613: 0x000a, 0x1614: 0x000a, 0x1615: 0x000a, 0x1616: 0x000a, 0x1617: 0x000a, + 0x1618: 0x000a, 0x1619: 0x000a, 0x161a: 0x000a, 0x161b: 0x000a, 0x161c: 0x000a, 0x161d: 0x000a, + 0x161e: 0x000a, 0x161f: 0x000a, 0x1620: 0x000a, 0x1621: 0x000a, 0x1622: 0x000a, 0x1623: 0x000a, + 0x1624: 0x000a, 0x1625: 0x000a, 0x1626: 0x000a, 0x1627: 0x000a, 0x1628: 0x000a, 0x1629: 0x009a, + 0x162a: 0x008a, 0x162b: 0x000a, 0x162c: 0x000a, 0x162d: 0x000a, 0x162e: 0x000a, 0x162f: 0x000a, + 0x1630: 0x000a, 0x1631: 0x000a, 0x1632: 0x000a, 0x1633: 0x000a, 0x1634: 0x000a, 0x1635: 0x000a, + // Block 0x59, offset 0x1640 + 0x167b: 0x000a, + 0x167c: 0x000a, 0x167d: 0x000a, 0x167e: 0x000a, 0x167f: 0x000a, + // Block 0x5a, offset 0x1680 + 0x1680: 0x000a, 0x1681: 0x000a, 0x1682: 0x000a, 0x1683: 0x000a, 0x1684: 0x000a, 0x1685: 0x000a, + 0x1686: 0x000a, 0x1687: 0x000a, 0x1688: 0x000a, 0x1689: 0x000a, 0x168a: 0x000a, 0x168b: 0x000a, + 0x168c: 0x000a, 0x168d: 0x000a, 0x168e: 0x000a, 0x168f: 0x000a, 0x1690: 0x000a, 0x1691: 0x000a, + 0x1692: 0x000a, 0x1693: 0x000a, 0x1694: 0x000a, 0x1696: 0x000a, 0x1697: 0x000a, + 0x1698: 0x000a, 0x1699: 0x000a, 0x169a: 0x000a, 0x169b: 0x000a, 0x169c: 0x000a, 0x169d: 0x000a, + 0x169e: 0x000a, 0x169f: 0x000a, 0x16a0: 0x000a, 0x16a1: 0x000a, 0x16a2: 0x000a, 0x16a3: 0x000a, + 0x16a4: 0x000a, 0x16a5: 0x000a, 0x16a6: 0x000a, 0x16a7: 0x000a, 0x16a8: 0x000a, 0x16a9: 0x000a, + 0x16aa: 0x000a, 0x16ab: 0x000a, 0x16ac: 0x000a, 0x16ad: 0x000a, 0x16ae: 0x000a, 0x16af: 0x000a, + 0x16b0: 0x000a, 0x16b1: 0x000a, 0x16b2: 0x000a, 0x16b3: 0x000a, 0x16b4: 0x000a, 0x16b5: 0x000a, + 0x16b6: 0x000a, 0x16b7: 0x000a, 0x16b8: 0x000a, 0x16b9: 0x000a, 0x16ba: 0x000a, 0x16bb: 0x000a, + 0x16bc: 0x000a, 0x16bd: 0x000a, 0x16be: 0x000a, 0x16bf: 0x000a, + // Block 0x5b, offset 0x16c0 + 0x16c0: 0x000a, 0x16c1: 0x000a, 0x16c2: 0x000a, 0x16c3: 0x000a, 0x16c4: 0x000a, 0x16c5: 0x000a, + 0x16c6: 0x000a, 0x16c7: 0x000a, 0x16c8: 0x000a, 0x16c9: 0x000a, 0x16ca: 0x000a, 0x16cb: 0x000a, + 0x16cc: 0x000a, 0x16cd: 0x000a, 0x16ce: 0x000a, 0x16cf: 0x000a, 0x16d0: 0x000a, 0x16d1: 0x000a, + 0x16d2: 0x000a, 0x16d3: 0x000a, 0x16d4: 0x000a, 0x16d5: 0x000a, 0x16d6: 0x000a, 0x16d7: 0x000a, + 0x16d8: 0x000a, 0x16d9: 0x000a, 0x16da: 0x000a, 0x16db: 0x000a, 0x16dc: 0x000a, 0x16dd: 0x000a, + 0x16de: 0x000a, 0x16df: 0x000a, 0x16e0: 0x000a, 0x16e1: 0x000a, 0x16e2: 0x000a, 0x16e3: 0x000a, + 0x16e4: 0x000a, 0x16e5: 0x000a, 0x16e6: 0x000a, + // Block 0x5c, offset 0x1700 + 0x1700: 0x000a, 0x1701: 0x000a, 0x1702: 0x000a, 0x1703: 0x000a, 0x1704: 0x000a, 0x1705: 0x000a, + 0x1706: 0x000a, 0x1707: 0x000a, 0x1708: 0x000a, 0x1709: 0x000a, 0x170a: 0x000a, + 0x1720: 0x000a, 0x1721: 0x000a, 0x1722: 0x000a, 0x1723: 0x000a, + 0x1724: 0x000a, 0x1725: 0x000a, 0x1726: 0x000a, 0x1727: 0x000a, 0x1728: 0x000a, 0x1729: 0x000a, + 0x172a: 0x000a, 0x172b: 0x000a, 0x172c: 0x000a, 0x172d: 0x000a, 0x172e: 0x000a, 0x172f: 0x000a, + 0x1730: 0x000a, 0x1731: 0x000a, 0x1732: 0x000a, 0x1733: 0x000a, 0x1734: 0x000a, 0x1735: 0x000a, + 0x1736: 0x000a, 0x1737: 0x000a, 0x1738: 0x000a, 0x1739: 0x000a, 0x173a: 0x000a, 0x173b: 0x000a, + 0x173c: 0x000a, 0x173d: 0x000a, 0x173e: 0x000a, 0x173f: 0x000a, + // Block 0x5d, offset 0x1740 + 0x1740: 0x000a, 0x1741: 0x000a, 0x1742: 0x000a, 0x1743: 0x000a, 0x1744: 0x000a, 0x1745: 0x000a, + 0x1746: 0x000a, 0x1747: 0x000a, 0x1748: 0x0002, 0x1749: 0x0002, 0x174a: 0x0002, 0x174b: 0x0002, + 0x174c: 0x0002, 0x174d: 0x0002, 0x174e: 0x0002, 0x174f: 0x0002, 0x1750: 0x0002, 0x1751: 0x0002, + 0x1752: 0x0002, 0x1753: 0x0002, 0x1754: 0x0002, 0x1755: 0x0002, 0x1756: 0x0002, 0x1757: 0x0002, + 0x1758: 0x0002, 0x1759: 0x0002, 0x175a: 0x0002, 0x175b: 0x0002, + // Block 0x5e, offset 0x1780 + 0x17aa: 0x000a, 0x17ab: 0x000a, 0x17ac: 0x000a, 0x17ad: 0x000a, 0x17ae: 0x000a, 0x17af: 0x000a, + 0x17b0: 0x000a, 0x17b1: 0x000a, 0x17b2: 0x000a, 0x17b3: 0x000a, 0x17b4: 0x000a, 0x17b5: 0x000a, + 0x17b6: 0x000a, 0x17b7: 0x000a, 0x17b8: 0x000a, 0x17b9: 0x000a, 0x17ba: 0x000a, 0x17bb: 0x000a, + 0x17bc: 0x000a, 0x17bd: 0x000a, 0x17be: 0x000a, 0x17bf: 0x000a, + // Block 0x5f, offset 0x17c0 + 0x17c0: 0x000a, 0x17c1: 0x000a, 0x17c2: 0x000a, 0x17c3: 0x000a, 0x17c4: 0x000a, 0x17c5: 0x000a, + 0x17c6: 0x000a, 0x17c7: 0x000a, 0x17c8: 0x000a, 0x17c9: 0x000a, 0x17ca: 0x000a, 0x17cb: 0x000a, + 0x17cc: 0x000a, 0x17cd: 0x000a, 0x17ce: 0x000a, 0x17cf: 0x000a, 0x17d0: 0x000a, 0x17d1: 0x000a, + 0x17d2: 0x000a, 0x17d3: 0x000a, 0x17d4: 0x000a, 0x17d5: 0x000a, 0x17d6: 0x000a, 0x17d7: 0x000a, + 0x17d8: 0x000a, 0x17d9: 0x000a, 0x17da: 0x000a, 0x17db: 0x000a, 0x17dc: 0x000a, 0x17dd: 0x000a, + 0x17de: 0x000a, 0x17df: 0x000a, 0x17e0: 0x000a, 0x17e1: 0x000a, 0x17e2: 0x000a, 0x17e3: 0x000a, + 0x17e4: 0x000a, 0x17e5: 0x000a, 0x17e6: 0x000a, 0x17e7: 0x000a, 0x17e8: 0x000a, 0x17e9: 0x000a, + 0x17ea: 0x000a, 0x17eb: 0x000a, 0x17ed: 0x000a, 0x17ee: 0x000a, 0x17ef: 0x000a, + 0x17f0: 0x000a, 0x17f1: 0x000a, 0x17f2: 0x000a, 0x17f3: 0x000a, 0x17f4: 0x000a, 0x17f5: 0x000a, + 0x17f6: 0x000a, 0x17f7: 0x000a, 0x17f8: 0x000a, 0x17f9: 0x000a, 0x17fa: 0x000a, 0x17fb: 0x000a, + 0x17fc: 0x000a, 0x17fd: 0x000a, 0x17fe: 0x000a, 0x17ff: 0x000a, + // Block 0x60, offset 0x1800 + 0x1800: 0x000a, 0x1801: 0x000a, 0x1802: 0x000a, 0x1803: 0x000a, 0x1804: 0x000a, 0x1805: 0x000a, + 0x1806: 0x000a, 0x1807: 0x000a, 0x1808: 0x000a, 0x1809: 0x000a, 0x180a: 0x000a, 0x180b: 0x000a, + 0x180c: 0x000a, 0x180d: 0x000a, 0x180e: 0x000a, 0x180f: 0x000a, 0x1810: 0x000a, 0x1811: 0x000a, + 0x1812: 0x000a, 0x1813: 0x000a, 0x1814: 0x000a, 0x1815: 0x000a, 0x1816: 0x000a, 0x1817: 0x000a, + 0x1818: 0x000a, 0x1819: 0x000a, 0x181a: 0x000a, 0x181b: 0x000a, 0x181c: 0x000a, 0x181d: 0x000a, + 0x181e: 0x000a, 0x181f: 0x000a, 0x1820: 0x000a, 0x1821: 0x000a, 0x1822: 0x000a, 0x1823: 0x000a, + 0x1824: 0x000a, 0x1825: 0x000a, 0x1826: 0x000a, 0x1827: 0x000a, 0x1828: 0x003a, 0x1829: 0x002a, + 0x182a: 0x003a, 0x182b: 0x002a, 0x182c: 0x003a, 0x182d: 0x002a, 0x182e: 0x003a, 0x182f: 0x002a, + 0x1830: 0x003a, 0x1831: 0x002a, 0x1832: 0x003a, 0x1833: 0x002a, 0x1834: 0x003a, 0x1835: 0x002a, + 0x1836: 0x000a, 0x1837: 0x000a, 0x1838: 0x000a, 0x1839: 0x000a, 0x183a: 0x000a, 0x183b: 0x000a, + 0x183c: 0x000a, 0x183d: 0x000a, 0x183e: 0x000a, 0x183f: 0x000a, + // Block 0x61, offset 0x1840 + 0x1840: 0x000a, 0x1841: 0x000a, 0x1842: 0x000a, 0x1843: 0x000a, 0x1844: 0x000a, 0x1845: 0x009a, + 0x1846: 0x008a, 0x1847: 0x000a, 0x1848: 0x000a, 0x1849: 0x000a, 0x184a: 0x000a, 0x184b: 0x000a, + 0x184c: 0x000a, 0x184d: 0x000a, 0x184e: 0x000a, 0x184f: 0x000a, 0x1850: 0x000a, 0x1851: 0x000a, + 0x1852: 0x000a, 0x1853: 0x000a, 0x1854: 0x000a, 0x1855: 0x000a, 0x1856: 0x000a, 0x1857: 0x000a, + 0x1858: 0x000a, 0x1859: 0x000a, 0x185a: 0x000a, 0x185b: 0x000a, 0x185c: 0x000a, 0x185d: 0x000a, + 0x185e: 0x000a, 0x185f: 0x000a, 0x1860: 0x000a, 0x1861: 0x000a, 0x1862: 0x000a, 0x1863: 0x000a, + 0x1864: 0x000a, 0x1865: 0x000a, 0x1866: 0x003a, 0x1867: 0x002a, 0x1868: 0x003a, 0x1869: 0x002a, + 0x186a: 0x003a, 0x186b: 0x002a, 0x186c: 0x003a, 0x186d: 0x002a, 0x186e: 0x003a, 0x186f: 0x002a, + 0x1870: 0x000a, 0x1871: 0x000a, 0x1872: 0x000a, 0x1873: 0x000a, 0x1874: 0x000a, 0x1875: 0x000a, + 0x1876: 0x000a, 0x1877: 0x000a, 0x1878: 0x000a, 0x1879: 0x000a, 0x187a: 0x000a, 0x187b: 0x000a, + 0x187c: 0x000a, 0x187d: 0x000a, 0x187e: 0x000a, 0x187f: 0x000a, + // Block 0x62, offset 0x1880 + 0x1880: 0x000a, 0x1881: 0x000a, 0x1882: 0x000a, 0x1883: 0x007a, 0x1884: 0x006a, 0x1885: 0x009a, + 0x1886: 0x008a, 0x1887: 0x00ba, 0x1888: 0x00aa, 0x1889: 0x009a, 0x188a: 0x008a, 0x188b: 0x007a, + 0x188c: 0x006a, 0x188d: 0x00da, 0x188e: 0x002a, 0x188f: 0x003a, 0x1890: 0x00ca, 0x1891: 0x009a, + 0x1892: 0x008a, 0x1893: 0x007a, 0x1894: 0x006a, 0x1895: 0x009a, 0x1896: 0x008a, 0x1897: 0x00ba, + 0x1898: 0x00aa, 0x1899: 0x000a, 0x189a: 0x000a, 0x189b: 0x000a, 0x189c: 0x000a, 0x189d: 0x000a, + 0x189e: 0x000a, 0x189f: 0x000a, 0x18a0: 0x000a, 0x18a1: 0x000a, 0x18a2: 0x000a, 0x18a3: 0x000a, + 0x18a4: 0x000a, 0x18a5: 0x000a, 0x18a6: 0x000a, 0x18a7: 0x000a, 0x18a8: 0x000a, 0x18a9: 0x000a, + 0x18aa: 0x000a, 0x18ab: 0x000a, 0x18ac: 0x000a, 0x18ad: 0x000a, 0x18ae: 0x000a, 0x18af: 0x000a, + 0x18b0: 0x000a, 0x18b1: 0x000a, 0x18b2: 0x000a, 0x18b3: 0x000a, 0x18b4: 0x000a, 0x18b5: 0x000a, + 0x18b6: 0x000a, 0x18b7: 0x000a, 0x18b8: 0x000a, 0x18b9: 0x000a, 0x18ba: 0x000a, 0x18bb: 0x000a, + 0x18bc: 0x000a, 0x18bd: 0x000a, 0x18be: 0x000a, 0x18bf: 0x000a, + // Block 0x63, offset 0x18c0 + 0x18c0: 0x000a, 0x18c1: 0x000a, 0x18c2: 0x000a, 0x18c3: 0x000a, 0x18c4: 0x000a, 0x18c5: 0x000a, + 0x18c6: 0x000a, 0x18c7: 0x000a, 0x18c8: 0x000a, 0x18c9: 0x000a, 0x18ca: 0x000a, 0x18cb: 0x000a, + 0x18cc: 0x000a, 0x18cd: 0x000a, 0x18ce: 0x000a, 0x18cf: 0x000a, 0x18d0: 0x000a, 0x18d1: 0x000a, + 0x18d2: 0x000a, 0x18d3: 0x000a, 0x18d4: 0x000a, 0x18d5: 0x000a, 0x18d6: 0x000a, 0x18d7: 0x000a, + 0x18d8: 0x003a, 0x18d9: 0x002a, 0x18da: 0x003a, 0x18db: 0x002a, 0x18dc: 0x000a, 0x18dd: 0x000a, + 0x18de: 0x000a, 0x18df: 0x000a, 0x18e0: 0x000a, 0x18e1: 0x000a, 0x18e2: 0x000a, 0x18e3: 0x000a, + 0x18e4: 0x000a, 0x18e5: 0x000a, 0x18e6: 0x000a, 0x18e7: 0x000a, 0x18e8: 0x000a, 0x18e9: 0x000a, + 0x18ea: 0x000a, 0x18eb: 0x000a, 0x18ec: 0x000a, 0x18ed: 0x000a, 0x18ee: 0x000a, 0x18ef: 0x000a, + 0x18f0: 0x000a, 0x18f1: 0x000a, 0x18f2: 0x000a, 0x18f3: 0x000a, 0x18f4: 0x000a, 0x18f5: 0x000a, + 0x18f6: 0x000a, 0x18f7: 0x000a, 0x18f8: 0x000a, 0x18f9: 0x000a, 0x18fa: 0x000a, 0x18fb: 0x000a, + 0x18fc: 0x003a, 0x18fd: 0x002a, 0x18fe: 0x000a, 0x18ff: 0x000a, + // Block 0x64, offset 0x1900 + 0x1900: 0x000a, 0x1901: 0x000a, 0x1902: 0x000a, 0x1903: 0x000a, 0x1904: 0x000a, 0x1905: 0x000a, + 0x1906: 0x000a, 0x1907: 0x000a, 0x1908: 0x000a, 0x1909: 0x000a, 0x190a: 0x000a, 0x190b: 0x000a, + 0x190c: 0x000a, 0x190d: 0x000a, 0x190e: 0x000a, 0x190f: 0x000a, 0x1910: 0x000a, 0x1911: 0x000a, + 0x1912: 0x000a, 0x1913: 0x000a, 0x1914: 0x000a, 0x1915: 0x000a, 0x1916: 0x000a, 0x1917: 0x000a, + 0x1918: 0x000a, 0x1919: 0x000a, 0x191a: 0x000a, 0x191b: 0x000a, 0x191c: 0x000a, 0x191d: 0x000a, + 0x191e: 0x000a, 0x191f: 0x000a, 0x1920: 0x000a, 0x1921: 0x000a, 0x1922: 0x000a, 0x1923: 0x000a, + 0x1924: 0x000a, 0x1925: 0x000a, 0x1926: 0x000a, 0x1927: 0x000a, 0x1928: 0x000a, 0x1929: 0x000a, + 0x192a: 0x000a, 0x192b: 0x000a, 0x192c: 0x000a, 0x192d: 0x000a, 0x192e: 0x000a, 0x192f: 0x000a, + 0x1930: 0x000a, 0x1931: 0x000a, 0x1932: 0x000a, 0x1933: 0x000a, + 0x1936: 0x000a, 0x1937: 0x000a, 0x1938: 0x000a, 0x1939: 0x000a, 0x193a: 0x000a, 0x193b: 0x000a, + 0x193c: 0x000a, 0x193d: 0x000a, 0x193e: 0x000a, 0x193f: 0x000a, + // Block 0x65, offset 0x1940 + 0x1940: 0x000a, 0x1941: 0x000a, 0x1942: 0x000a, 0x1943: 0x000a, 0x1944: 0x000a, 0x1945: 0x000a, + 0x1946: 0x000a, 0x1947: 0x000a, 0x1948: 0x000a, 0x1949: 0x000a, 0x194a: 0x000a, 0x194b: 0x000a, + 0x194c: 0x000a, 0x194d: 0x000a, 0x194e: 0x000a, 0x194f: 0x000a, 0x1950: 0x000a, 0x1951: 0x000a, + 0x1952: 0x000a, 0x1953: 0x000a, 0x1954: 0x000a, 0x1955: 0x000a, + 0x1958: 0x000a, 0x1959: 0x000a, 0x195a: 0x000a, 0x195b: 0x000a, 0x195c: 0x000a, 0x195d: 0x000a, + 0x195e: 0x000a, 0x195f: 0x000a, 0x1960: 0x000a, 0x1961: 0x000a, 0x1962: 0x000a, 0x1963: 0x000a, + 0x1964: 0x000a, 0x1965: 0x000a, 0x1966: 0x000a, 0x1967: 0x000a, 0x1968: 0x000a, 0x1969: 0x000a, + 0x196a: 0x000a, 0x196b: 0x000a, 0x196c: 0x000a, 0x196d: 0x000a, 0x196e: 0x000a, 0x196f: 0x000a, + 0x1970: 0x000a, 0x1971: 0x000a, 0x1972: 0x000a, 0x1973: 0x000a, 0x1974: 0x000a, 0x1975: 0x000a, + 0x1976: 0x000a, 0x1977: 0x000a, 0x1978: 0x000a, 0x1979: 0x000a, 0x197a: 0x000a, 0x197b: 0x000a, + 0x197c: 0x000a, 0x197d: 0x000a, 0x197e: 0x000a, 0x197f: 0x000a, + // Block 0x66, offset 0x1980 + 0x1980: 0x000a, 0x1981: 0x000a, 0x1982: 0x000a, 0x1983: 0x000a, 0x1984: 0x000a, 0x1985: 0x000a, + 0x1986: 0x000a, 0x1987: 0x000a, 0x1988: 0x000a, 0x198a: 0x000a, 0x198b: 0x000a, + 0x198c: 0x000a, 0x198d: 0x000a, 0x198e: 0x000a, 0x198f: 0x000a, 0x1990: 0x000a, 0x1991: 0x000a, + 0x1992: 0x000a, 0x1993: 0x000a, 0x1994: 0x000a, 0x1995: 0x000a, 0x1996: 0x000a, 0x1997: 0x000a, + 0x1998: 0x000a, 0x1999: 0x000a, 0x199a: 0x000a, 0x199b: 0x000a, 0x199c: 0x000a, 0x199d: 0x000a, + 0x199e: 0x000a, 0x199f: 0x000a, 0x19a0: 0x000a, 0x19a1: 0x000a, 0x19a2: 0x000a, 0x19a3: 0x000a, + 0x19a4: 0x000a, 0x19a5: 0x000a, 0x19a6: 0x000a, 0x19a7: 0x000a, 0x19a8: 0x000a, 0x19a9: 0x000a, + 0x19aa: 0x000a, 0x19ab: 0x000a, 0x19ac: 0x000a, 0x19ad: 0x000a, 0x19ae: 0x000a, 0x19af: 0x000a, + 0x19b0: 0x000a, 0x19b1: 0x000a, 0x19b2: 0x000a, 0x19b3: 0x000a, 0x19b4: 0x000a, 0x19b5: 0x000a, + 0x19b6: 0x000a, 0x19b7: 0x000a, 0x19b8: 0x000a, 0x19b9: 0x000a, 0x19ba: 0x000a, 0x19bb: 0x000a, + 0x19bc: 0x000a, 0x19bd: 0x000a, 0x19be: 0x000a, + // Block 0x67, offset 0x19c0 + 0x19e5: 0x000a, 0x19e6: 0x000a, 0x19e7: 0x000a, 0x19e8: 0x000a, 0x19e9: 0x000a, + 0x19ea: 0x000a, 0x19ef: 0x000c, + 0x19f0: 0x000c, 0x19f1: 0x000c, + 0x19f9: 0x000a, 0x19fa: 0x000a, 0x19fb: 0x000a, + 0x19fc: 0x000a, 0x19fd: 0x000a, 0x19fe: 0x000a, 0x19ff: 0x000a, + // Block 0x68, offset 0x1a00 + 0x1a3f: 0x000c, + // Block 0x69, offset 0x1a40 + 0x1a60: 0x000c, 0x1a61: 0x000c, 0x1a62: 0x000c, 0x1a63: 0x000c, + 0x1a64: 0x000c, 0x1a65: 0x000c, 0x1a66: 0x000c, 0x1a67: 0x000c, 0x1a68: 0x000c, 0x1a69: 0x000c, + 0x1a6a: 0x000c, 0x1a6b: 0x000c, 0x1a6c: 0x000c, 0x1a6d: 0x000c, 0x1a6e: 0x000c, 0x1a6f: 0x000c, + 0x1a70: 0x000c, 0x1a71: 0x000c, 0x1a72: 0x000c, 0x1a73: 0x000c, 0x1a74: 0x000c, 0x1a75: 0x000c, + 0x1a76: 0x000c, 0x1a77: 0x000c, 0x1a78: 0x000c, 0x1a79: 0x000c, 0x1a7a: 0x000c, 0x1a7b: 0x000c, + 0x1a7c: 0x000c, 0x1a7d: 0x000c, 0x1a7e: 0x000c, 0x1a7f: 0x000c, + // Block 0x6a, offset 0x1a80 + 0x1a80: 0x000a, 0x1a81: 0x000a, 0x1a82: 0x000a, 0x1a83: 0x000a, 0x1a84: 0x000a, 0x1a85: 0x000a, + 0x1a86: 0x000a, 0x1a87: 0x000a, 0x1a88: 0x000a, 0x1a89: 0x000a, 0x1a8a: 0x000a, 0x1a8b: 0x000a, + 0x1a8c: 0x000a, 0x1a8d: 0x000a, 0x1a8e: 0x000a, 0x1a8f: 0x000a, 0x1a90: 0x000a, 0x1a91: 0x000a, + 0x1a92: 0x000a, 0x1a93: 0x000a, 0x1a94: 0x000a, 0x1a95: 0x000a, 0x1a96: 0x000a, 0x1a97: 0x000a, + 0x1a98: 0x000a, 0x1a99: 0x000a, 0x1a9a: 0x000a, 0x1a9b: 0x000a, 0x1a9c: 0x000a, 0x1a9d: 0x000a, + 0x1a9e: 0x000a, 0x1a9f: 0x000a, 0x1aa0: 0x000a, 0x1aa1: 0x000a, 0x1aa2: 0x003a, 0x1aa3: 0x002a, + 0x1aa4: 0x003a, 0x1aa5: 0x002a, 0x1aa6: 0x003a, 0x1aa7: 0x002a, 0x1aa8: 0x003a, 0x1aa9: 0x002a, + 0x1aaa: 0x000a, 0x1aab: 0x000a, 0x1aac: 0x000a, 0x1aad: 0x000a, 0x1aae: 0x000a, 0x1aaf: 0x000a, + 0x1ab0: 0x000a, 0x1ab1: 0x000a, 0x1ab2: 0x000a, 0x1ab3: 0x000a, 0x1ab4: 0x000a, 0x1ab5: 0x000a, + 0x1ab6: 0x000a, 0x1ab7: 0x000a, 0x1ab8: 0x000a, 0x1ab9: 0x000a, 0x1aba: 0x000a, 0x1abb: 0x000a, + 0x1abc: 0x000a, 0x1abd: 0x000a, 0x1abe: 0x000a, 0x1abf: 0x000a, + // Block 0x6b, offset 0x1ac0 + 0x1ac0: 0x000a, 0x1ac1: 0x000a, 0x1ac2: 0x000a, 0x1ac3: 0x000a, 0x1ac4: 0x000a, 0x1ac5: 0x000a, + 0x1ac6: 0x000a, 0x1ac7: 0x000a, 0x1ac8: 0x000a, 0x1ac9: 0x000a, 0x1aca: 0x000a, 0x1acb: 0x000a, + 0x1acc: 0x000a, 0x1acd: 0x000a, 0x1ace: 0x000a, + // Block 0x6c, offset 0x1b00 + 0x1b00: 0x000a, 0x1b01: 0x000a, 0x1b02: 0x000a, 0x1b03: 0x000a, 0x1b04: 0x000a, 0x1b05: 0x000a, + 0x1b06: 0x000a, 0x1b07: 0x000a, 0x1b08: 0x000a, 0x1b09: 0x000a, 0x1b0a: 0x000a, 0x1b0b: 0x000a, + 0x1b0c: 0x000a, 0x1b0d: 0x000a, 0x1b0e: 0x000a, 0x1b0f: 0x000a, 0x1b10: 0x000a, 0x1b11: 0x000a, + 0x1b12: 0x000a, 0x1b13: 0x000a, 0x1b14: 0x000a, 0x1b15: 0x000a, 0x1b16: 0x000a, 0x1b17: 0x000a, + 0x1b18: 0x000a, 0x1b19: 0x000a, 0x1b1b: 0x000a, 0x1b1c: 0x000a, 0x1b1d: 0x000a, + 0x1b1e: 0x000a, 0x1b1f: 0x000a, 0x1b20: 0x000a, 0x1b21: 0x000a, 0x1b22: 0x000a, 0x1b23: 0x000a, + 0x1b24: 0x000a, 0x1b25: 0x000a, 0x1b26: 0x000a, 0x1b27: 0x000a, 0x1b28: 0x000a, 0x1b29: 0x000a, + 0x1b2a: 0x000a, 0x1b2b: 0x000a, 0x1b2c: 0x000a, 0x1b2d: 0x000a, 0x1b2e: 0x000a, 0x1b2f: 0x000a, + 0x1b30: 0x000a, 0x1b31: 0x000a, 0x1b32: 0x000a, 0x1b33: 0x000a, 0x1b34: 0x000a, 0x1b35: 0x000a, + 0x1b36: 0x000a, 0x1b37: 0x000a, 0x1b38: 0x000a, 0x1b39: 0x000a, 0x1b3a: 0x000a, 0x1b3b: 0x000a, + 0x1b3c: 0x000a, 0x1b3d: 0x000a, 0x1b3e: 0x000a, 0x1b3f: 0x000a, + // Block 0x6d, offset 0x1b40 + 0x1b40: 0x000a, 0x1b41: 0x000a, 0x1b42: 0x000a, 0x1b43: 0x000a, 0x1b44: 0x000a, 0x1b45: 0x000a, + 0x1b46: 0x000a, 0x1b47: 0x000a, 0x1b48: 0x000a, 0x1b49: 0x000a, 0x1b4a: 0x000a, 0x1b4b: 0x000a, + 0x1b4c: 0x000a, 0x1b4d: 0x000a, 0x1b4e: 0x000a, 0x1b4f: 0x000a, 0x1b50: 0x000a, 0x1b51: 0x000a, + 0x1b52: 0x000a, 0x1b53: 0x000a, 0x1b54: 0x000a, 0x1b55: 0x000a, 0x1b56: 0x000a, 0x1b57: 0x000a, + 0x1b58: 0x000a, 0x1b59: 0x000a, 0x1b5a: 0x000a, 0x1b5b: 0x000a, 0x1b5c: 0x000a, 0x1b5d: 0x000a, + 0x1b5e: 0x000a, 0x1b5f: 0x000a, 0x1b60: 0x000a, 0x1b61: 0x000a, 0x1b62: 0x000a, 0x1b63: 0x000a, + 0x1b64: 0x000a, 0x1b65: 0x000a, 0x1b66: 0x000a, 0x1b67: 0x000a, 0x1b68: 0x000a, 0x1b69: 0x000a, + 0x1b6a: 0x000a, 0x1b6b: 0x000a, 0x1b6c: 0x000a, 0x1b6d: 0x000a, 0x1b6e: 0x000a, 0x1b6f: 0x000a, + 0x1b70: 0x000a, 0x1b71: 0x000a, 0x1b72: 0x000a, 0x1b73: 0x000a, + // Block 0x6e, offset 0x1b80 + 0x1b80: 0x000a, 0x1b81: 0x000a, 0x1b82: 0x000a, 0x1b83: 0x000a, 0x1b84: 0x000a, 0x1b85: 0x000a, + 0x1b86: 0x000a, 0x1b87: 0x000a, 0x1b88: 0x000a, 0x1b89: 0x000a, 0x1b8a: 0x000a, 0x1b8b: 0x000a, + 0x1b8c: 0x000a, 0x1b8d: 0x000a, 0x1b8e: 0x000a, 0x1b8f: 0x000a, 0x1b90: 0x000a, 0x1b91: 0x000a, + 0x1b92: 0x000a, 0x1b93: 0x000a, 0x1b94: 0x000a, 0x1b95: 0x000a, + 0x1bb0: 0x000a, 0x1bb1: 0x000a, 0x1bb2: 0x000a, 0x1bb3: 0x000a, 0x1bb4: 0x000a, 0x1bb5: 0x000a, + 0x1bb6: 0x000a, 0x1bb7: 0x000a, 0x1bb8: 0x000a, 0x1bb9: 0x000a, 0x1bba: 0x000a, 0x1bbb: 0x000a, + // Block 0x6f, offset 0x1bc0 + 0x1bc0: 0x0009, 0x1bc1: 0x000a, 0x1bc2: 0x000a, 0x1bc3: 0x000a, 0x1bc4: 0x000a, + 0x1bc8: 0x003a, 0x1bc9: 0x002a, 0x1bca: 0x003a, 0x1bcb: 0x002a, + 0x1bcc: 0x003a, 0x1bcd: 0x002a, 0x1bce: 0x003a, 0x1bcf: 0x002a, 0x1bd0: 0x003a, 0x1bd1: 0x002a, + 0x1bd2: 0x000a, 0x1bd3: 0x000a, 0x1bd4: 0x003a, 0x1bd5: 0x002a, 0x1bd6: 0x003a, 0x1bd7: 0x002a, + 0x1bd8: 0x003a, 0x1bd9: 0x002a, 0x1bda: 0x003a, 0x1bdb: 0x002a, 0x1bdc: 0x000a, 0x1bdd: 0x000a, + 0x1bde: 0x000a, 0x1bdf: 0x000a, 0x1be0: 0x000a, + 0x1bea: 0x000c, 0x1beb: 0x000c, 0x1bec: 0x000c, 0x1bed: 0x000c, + 0x1bf0: 0x000a, + 0x1bf6: 0x000a, 0x1bf7: 0x000a, + 0x1bfd: 0x000a, 0x1bfe: 0x000a, 0x1bff: 0x000a, + // Block 0x70, offset 0x1c00 + 0x1c19: 0x000c, 0x1c1a: 0x000c, 0x1c1b: 0x000a, 0x1c1c: 0x000a, + 0x1c20: 0x000a, + // Block 0x71, offset 0x1c40 + 0x1c7b: 0x000a, + // Block 0x72, offset 0x1c80 + 0x1c80: 0x000a, 0x1c81: 0x000a, 0x1c82: 0x000a, 0x1c83: 0x000a, 0x1c84: 0x000a, 0x1c85: 0x000a, + 0x1c86: 0x000a, 0x1c87: 0x000a, 0x1c88: 0x000a, 0x1c89: 0x000a, 0x1c8a: 0x000a, 0x1c8b: 0x000a, + 0x1c8c: 0x000a, 0x1c8d: 0x000a, 0x1c8e: 0x000a, 0x1c8f: 0x000a, 0x1c90: 0x000a, 0x1c91: 0x000a, + 0x1c92: 0x000a, 0x1c93: 0x000a, 0x1c94: 0x000a, 0x1c95: 0x000a, 0x1c96: 0x000a, 0x1c97: 0x000a, + 0x1c98: 0x000a, 0x1c99: 0x000a, 0x1c9a: 0x000a, 0x1c9b: 0x000a, 0x1c9c: 0x000a, 0x1c9d: 0x000a, + 0x1c9e: 0x000a, 0x1c9f: 0x000a, 0x1ca0: 0x000a, 0x1ca1: 0x000a, 0x1ca2: 0x000a, 0x1ca3: 0x000a, + // Block 0x73, offset 0x1cc0 + 0x1cdd: 0x000a, + 0x1cde: 0x000a, + // Block 0x74, offset 0x1d00 + 0x1d10: 0x000a, 0x1d11: 0x000a, + 0x1d12: 0x000a, 0x1d13: 0x000a, 0x1d14: 0x000a, 0x1d15: 0x000a, 0x1d16: 0x000a, 0x1d17: 0x000a, + 0x1d18: 0x000a, 0x1d19: 0x000a, 0x1d1a: 0x000a, 0x1d1b: 0x000a, 0x1d1c: 0x000a, 0x1d1d: 0x000a, + 0x1d1e: 0x000a, 0x1d1f: 0x000a, + 0x1d3c: 0x000a, 0x1d3d: 0x000a, 0x1d3e: 0x000a, + // Block 0x75, offset 0x1d40 + 0x1d71: 0x000a, 0x1d72: 0x000a, 0x1d73: 0x000a, 0x1d74: 0x000a, 0x1d75: 0x000a, + 0x1d76: 0x000a, 0x1d77: 0x000a, 0x1d78: 0x000a, 0x1d79: 0x000a, 0x1d7a: 0x000a, 0x1d7b: 0x000a, + 0x1d7c: 0x000a, 0x1d7d: 0x000a, 0x1d7e: 0x000a, 0x1d7f: 0x000a, + // Block 0x76, offset 0x1d80 + 0x1d8c: 0x000a, 0x1d8d: 0x000a, 0x1d8e: 0x000a, 0x1d8f: 0x000a, + // Block 0x77, offset 0x1dc0 + 0x1df7: 0x000a, 0x1df8: 0x000a, 0x1df9: 0x000a, 0x1dfa: 0x000a, + // Block 0x78, offset 0x1e00 + 0x1e1e: 0x000a, 0x1e1f: 0x000a, + 0x1e3f: 0x000a, + // Block 0x79, offset 0x1e40 + 0x1e50: 0x000a, 0x1e51: 0x000a, + 0x1e52: 0x000a, 0x1e53: 0x000a, 0x1e54: 0x000a, 0x1e55: 0x000a, 0x1e56: 0x000a, 0x1e57: 0x000a, + 0x1e58: 0x000a, 0x1e59: 0x000a, 0x1e5a: 0x000a, 0x1e5b: 0x000a, 0x1e5c: 0x000a, 0x1e5d: 0x000a, + 0x1e5e: 0x000a, 0x1e5f: 0x000a, 0x1e60: 0x000a, 0x1e61: 0x000a, 0x1e62: 0x000a, 0x1e63: 0x000a, + 0x1e64: 0x000a, 0x1e65: 0x000a, 0x1e66: 0x000a, 0x1e67: 0x000a, 0x1e68: 0x000a, 0x1e69: 0x000a, + 0x1e6a: 0x000a, 0x1e6b: 0x000a, 0x1e6c: 0x000a, 0x1e6d: 0x000a, 0x1e6e: 0x000a, 0x1e6f: 0x000a, + 0x1e70: 0x000a, 0x1e71: 0x000a, 0x1e72: 0x000a, 0x1e73: 0x000a, 0x1e74: 0x000a, 0x1e75: 0x000a, + 0x1e76: 0x000a, 0x1e77: 0x000a, 0x1e78: 0x000a, 0x1e79: 0x000a, 0x1e7a: 0x000a, 0x1e7b: 0x000a, + 0x1e7c: 0x000a, 0x1e7d: 0x000a, 0x1e7e: 0x000a, 0x1e7f: 0x000a, + // Block 0x7a, offset 0x1e80 + 0x1e80: 0x000a, 0x1e81: 0x000a, 0x1e82: 0x000a, 0x1e83: 0x000a, 0x1e84: 0x000a, 0x1e85: 0x000a, + 0x1e86: 0x000a, + // Block 0x7b, offset 0x1ec0 + 0x1ecd: 0x000a, 0x1ece: 0x000a, 0x1ecf: 0x000a, + // Block 0x7c, offset 0x1f00 + 0x1f2f: 0x000c, + 0x1f30: 0x000c, 0x1f31: 0x000c, 0x1f32: 0x000c, 0x1f33: 0x000a, 0x1f34: 0x000c, 0x1f35: 0x000c, + 0x1f36: 0x000c, 0x1f37: 0x000c, 0x1f38: 0x000c, 0x1f39: 0x000c, 0x1f3a: 0x000c, 0x1f3b: 0x000c, + 0x1f3c: 0x000c, 0x1f3d: 0x000c, 0x1f3e: 0x000a, 0x1f3f: 0x000a, + // Block 0x7d, offset 0x1f40 + 0x1f5e: 0x000c, 0x1f5f: 0x000c, + // Block 0x7e, offset 0x1f80 + 0x1fb0: 0x000c, 0x1fb1: 0x000c, + // Block 0x7f, offset 0x1fc0 + 0x1fc0: 0x000a, 0x1fc1: 0x000a, 0x1fc2: 0x000a, 0x1fc3: 0x000a, 0x1fc4: 0x000a, 0x1fc5: 0x000a, + 0x1fc6: 0x000a, 0x1fc7: 0x000a, 0x1fc8: 0x000a, 0x1fc9: 0x000a, 0x1fca: 0x000a, 0x1fcb: 0x000a, + 0x1fcc: 0x000a, 0x1fcd: 0x000a, 0x1fce: 0x000a, 0x1fcf: 0x000a, 0x1fd0: 0x000a, 0x1fd1: 0x000a, + 0x1fd2: 0x000a, 0x1fd3: 0x000a, 0x1fd4: 0x000a, 0x1fd5: 0x000a, 0x1fd6: 0x000a, 0x1fd7: 0x000a, + 0x1fd8: 0x000a, 0x1fd9: 0x000a, 0x1fda: 0x000a, 0x1fdb: 0x000a, 0x1fdc: 0x000a, 0x1fdd: 0x000a, + 0x1fde: 0x000a, 0x1fdf: 0x000a, 0x1fe0: 0x000a, 0x1fe1: 0x000a, + // Block 0x80, offset 0x2000 + 0x2008: 0x000a, + // Block 0x81, offset 0x2040 + 0x2042: 0x000c, + 0x2046: 0x000c, 0x204b: 0x000c, + 0x2065: 0x000c, 0x2066: 0x000c, 0x2068: 0x000a, 0x2069: 0x000a, + 0x206a: 0x000a, 0x206b: 0x000a, + 0x2078: 0x0004, 0x2079: 0x0004, + // Block 0x82, offset 0x2080 + 0x20b4: 0x000a, 0x20b5: 0x000a, + 0x20b6: 0x000a, 0x20b7: 0x000a, + // Block 0x83, offset 0x20c0 + 0x20c4: 0x000c, 0x20c5: 0x000c, + 0x20e0: 0x000c, 0x20e1: 0x000c, 0x20e2: 0x000c, 0x20e3: 0x000c, + 0x20e4: 0x000c, 0x20e5: 0x000c, 0x20e6: 0x000c, 0x20e7: 0x000c, 0x20e8: 0x000c, 0x20e9: 0x000c, + 0x20ea: 0x000c, 0x20eb: 0x000c, 0x20ec: 0x000c, 0x20ed: 0x000c, 0x20ee: 0x000c, 0x20ef: 0x000c, + 0x20f0: 0x000c, 0x20f1: 0x000c, + 0x20ff: 0x000c, + // Block 0x84, offset 0x2100 + 0x2126: 0x000c, 0x2127: 0x000c, 0x2128: 0x000c, 0x2129: 0x000c, + 0x212a: 0x000c, 0x212b: 0x000c, 0x212c: 0x000c, 0x212d: 0x000c, + // Block 0x85, offset 0x2140 + 0x2147: 0x000c, 0x2148: 0x000c, 0x2149: 0x000c, 0x214a: 0x000c, 0x214b: 0x000c, + 0x214c: 0x000c, 0x214d: 0x000c, 0x214e: 0x000c, 0x214f: 0x000c, 0x2150: 0x000c, 0x2151: 0x000c, + // Block 0x86, offset 0x2180 + 0x2180: 0x000c, 0x2181: 0x000c, 0x2182: 0x000c, + 0x21b3: 0x000c, + 0x21b6: 0x000c, 0x21b7: 0x000c, 0x21b8: 0x000c, 0x21b9: 0x000c, + 0x21bc: 0x000c, + // Block 0x87, offset 0x21c0 + 0x21e5: 0x000c, + // Block 0x88, offset 0x2200 + 0x2229: 0x000c, + 0x222a: 0x000c, 0x222b: 0x000c, 0x222c: 0x000c, 0x222d: 0x000c, 0x222e: 0x000c, + 0x2231: 0x000c, 0x2232: 0x000c, 0x2235: 0x000c, + 0x2236: 0x000c, + // Block 0x89, offset 0x2240 + 0x2243: 0x000c, + 0x224c: 0x000c, + 0x227c: 0x000c, + // Block 0x8a, offset 0x2280 + 0x22b0: 0x000c, 0x22b2: 0x000c, 0x22b3: 0x000c, 0x22b4: 0x000c, + 0x22b7: 0x000c, 0x22b8: 0x000c, + 0x22be: 0x000c, 0x22bf: 0x000c, + // Block 0x8b, offset 0x22c0 + 0x22c1: 0x000c, + 0x22ec: 0x000c, 0x22ed: 0x000c, + 0x22f6: 0x000c, + // Block 0x8c, offset 0x2300 + 0x2325: 0x000c, 0x2328: 0x000c, + 0x232d: 0x000c, + // Block 0x8d, offset 0x2340 + 0x235d: 0x0001, + 0x235e: 0x000c, 0x235f: 0x0001, 0x2360: 0x0001, 0x2361: 0x0001, 0x2362: 0x0001, 0x2363: 0x0001, + 0x2364: 0x0001, 0x2365: 0x0001, 0x2366: 0x0001, 0x2367: 0x0001, 0x2368: 0x0001, 0x2369: 0x0003, + 0x236a: 0x0001, 0x236b: 0x0001, 0x236c: 0x0001, 0x236d: 0x0001, 0x236e: 0x0001, 0x236f: 0x0001, + 0x2370: 0x0001, 0x2371: 0x0001, 0x2372: 0x0001, 0x2373: 0x0001, 0x2374: 0x0001, 0x2375: 0x0001, + 0x2376: 0x0001, 0x2377: 0x0001, 0x2378: 0x0001, 0x2379: 0x0001, 0x237a: 0x0001, 0x237b: 0x0001, + 0x237c: 0x0001, 0x237d: 0x0001, 0x237e: 0x0001, 0x237f: 0x0001, + // Block 0x8e, offset 0x2380 + 0x2380: 0x0001, 0x2381: 0x0001, 0x2382: 0x0001, 0x2383: 0x0001, 0x2384: 0x0001, 0x2385: 0x0001, + 0x2386: 0x0001, 0x2387: 0x0001, 0x2388: 0x0001, 0x2389: 0x0001, 0x238a: 0x0001, 0x238b: 0x0001, + 0x238c: 0x0001, 0x238d: 0x0001, 0x238e: 0x0001, 0x238f: 0x0001, 0x2390: 0x000d, 0x2391: 0x000d, + 0x2392: 0x000d, 0x2393: 0x000d, 0x2394: 0x000d, 0x2395: 0x000d, 0x2396: 0x000d, 0x2397: 0x000d, + 0x2398: 0x000d, 0x2399: 0x000d, 0x239a: 0x000d, 0x239b: 0x000d, 0x239c: 0x000d, 0x239d: 0x000d, + 0x239e: 0x000d, 0x239f: 0x000d, 0x23a0: 0x000d, 0x23a1: 0x000d, 0x23a2: 0x000d, 0x23a3: 0x000d, + 0x23a4: 0x000d, 0x23a5: 0x000d, 0x23a6: 0x000d, 0x23a7: 0x000d, 0x23a8: 0x000d, 0x23a9: 0x000d, + 0x23aa: 0x000d, 0x23ab: 0x000d, 0x23ac: 0x000d, 0x23ad: 0x000d, 0x23ae: 0x000d, 0x23af: 0x000d, + 0x23b0: 0x000d, 0x23b1: 0x000d, 0x23b2: 0x000d, 0x23b3: 0x000d, 0x23b4: 0x000d, 0x23b5: 0x000d, + 0x23b6: 0x000d, 0x23b7: 0x000d, 0x23b8: 0x000d, 0x23b9: 0x000d, 0x23ba: 0x000d, 0x23bb: 0x000d, + 0x23bc: 0x000d, 0x23bd: 0x000d, 0x23be: 0x000d, 0x23bf: 0x000d, + // Block 0x8f, offset 0x23c0 + 0x23c0: 0x000d, 0x23c1: 0x000d, 0x23c2: 0x000d, 0x23c3: 0x000d, 0x23c4: 0x000d, 0x23c5: 0x000d, + 0x23c6: 0x000d, 0x23c7: 0x000d, 0x23c8: 0x000d, 0x23c9: 0x000d, 0x23ca: 0x000d, 0x23cb: 0x000d, + 0x23cc: 0x000d, 0x23cd: 0x000d, 0x23ce: 0x000d, 0x23cf: 0x000d, 0x23d0: 0x000d, 0x23d1: 0x000d, + 0x23d2: 0x000d, 0x23d3: 0x000d, 0x23d4: 0x000d, 0x23d5: 0x000d, 0x23d6: 0x000d, 0x23d7: 0x000d, + 0x23d8: 0x000d, 0x23d9: 0x000d, 0x23da: 0x000d, 0x23db: 0x000d, 0x23dc: 0x000d, 0x23dd: 0x000d, + 0x23de: 0x000d, 0x23df: 0x000d, 0x23e0: 0x000d, 0x23e1: 0x000d, 0x23e2: 0x000d, 0x23e3: 0x000d, + 0x23e4: 0x000d, 0x23e5: 0x000d, 0x23e6: 0x000d, 0x23e7: 0x000d, 0x23e8: 0x000d, 0x23e9: 0x000d, + 0x23ea: 0x000d, 0x23eb: 0x000d, 0x23ec: 0x000d, 0x23ed: 0x000d, 0x23ee: 0x000d, 0x23ef: 0x000d, + 0x23f0: 0x000d, 0x23f1: 0x000d, 0x23f2: 0x000d, 0x23f3: 0x000d, 0x23f4: 0x000d, 0x23f5: 0x000d, + 0x23f6: 0x000d, 0x23f7: 0x000d, 0x23f8: 0x000d, 0x23f9: 0x000d, 0x23fa: 0x000d, 0x23fb: 0x000d, + 0x23fc: 0x000d, 0x23fd: 0x000d, 0x23fe: 0x000a, 0x23ff: 0x000a, + // Block 0x90, offset 0x2400 + 0x2400: 0x000d, 0x2401: 0x000d, 0x2402: 0x000d, 0x2403: 0x000d, 0x2404: 0x000d, 0x2405: 0x000d, + 0x2406: 0x000d, 0x2407: 0x000d, 0x2408: 0x000d, 0x2409: 0x000d, 0x240a: 0x000d, 0x240b: 0x000d, + 0x240c: 0x000d, 0x240d: 0x000d, 0x240e: 0x000d, 0x240f: 0x000d, 0x2410: 0x000b, 0x2411: 0x000b, + 0x2412: 0x000b, 0x2413: 0x000b, 0x2414: 0x000b, 0x2415: 0x000b, 0x2416: 0x000b, 0x2417: 0x000b, + 0x2418: 0x000b, 0x2419: 0x000b, 0x241a: 0x000b, 0x241b: 0x000b, 0x241c: 0x000b, 0x241d: 0x000b, + 0x241e: 0x000b, 0x241f: 0x000b, 0x2420: 0x000b, 0x2421: 0x000b, 0x2422: 0x000b, 0x2423: 0x000b, + 0x2424: 0x000b, 0x2425: 0x000b, 0x2426: 0x000b, 0x2427: 0x000b, 0x2428: 0x000b, 0x2429: 0x000b, + 0x242a: 0x000b, 0x242b: 0x000b, 0x242c: 0x000b, 0x242d: 0x000b, 0x242e: 0x000b, 0x242f: 0x000b, + 0x2430: 0x000d, 0x2431: 0x000d, 0x2432: 0x000d, 0x2433: 0x000d, 0x2434: 0x000d, 0x2435: 0x000d, + 0x2436: 0x000d, 0x2437: 0x000d, 0x2438: 0x000d, 0x2439: 0x000d, 0x243a: 0x000d, 0x243b: 0x000d, + 0x243c: 0x000d, 0x243d: 0x000a, 0x243e: 0x000d, 0x243f: 0x000d, + // Block 0x91, offset 0x2440 + 0x2440: 0x000c, 0x2441: 0x000c, 0x2442: 0x000c, 0x2443: 0x000c, 0x2444: 0x000c, 0x2445: 0x000c, + 0x2446: 0x000c, 0x2447: 0x000c, 0x2448: 0x000c, 0x2449: 0x000c, 0x244a: 0x000c, 0x244b: 0x000c, + 0x244c: 0x000c, 0x244d: 0x000c, 0x244e: 0x000c, 0x244f: 0x000c, 0x2450: 0x000a, 0x2451: 0x000a, + 0x2452: 0x000a, 0x2453: 0x000a, 0x2454: 0x000a, 0x2455: 0x000a, 0x2456: 0x000a, 0x2457: 0x000a, + 0x2458: 0x000a, 0x2459: 0x000a, + 0x2460: 0x000c, 0x2461: 0x000c, 0x2462: 0x000c, 0x2463: 0x000c, + 0x2464: 0x000c, 0x2465: 0x000c, 0x2466: 0x000c, 0x2467: 0x000c, 0x2468: 0x000c, 0x2469: 0x000c, + 0x246a: 0x000c, 0x246b: 0x000c, 0x246c: 0x000c, 0x246d: 0x000c, 0x246e: 0x000c, 0x246f: 0x000c, + 0x2470: 0x000a, 0x2471: 0x000a, 0x2472: 0x000a, 0x2473: 0x000a, 0x2474: 0x000a, 0x2475: 0x000a, + 0x2476: 0x000a, 0x2477: 0x000a, 0x2478: 0x000a, 0x2479: 0x000a, 0x247a: 0x000a, 0x247b: 0x000a, + 0x247c: 0x000a, 0x247d: 0x000a, 0x247e: 0x000a, 0x247f: 0x000a, + // Block 0x92, offset 0x2480 + 0x2480: 0x000a, 0x2481: 0x000a, 0x2482: 0x000a, 0x2483: 0x000a, 0x2484: 0x000a, 0x2485: 0x000a, + 0x2486: 0x000a, 0x2487: 0x000a, 0x2488: 0x000a, 0x2489: 0x000a, 0x248a: 0x000a, 0x248b: 0x000a, + 0x248c: 0x000a, 0x248d: 0x000a, 0x248e: 0x000a, 0x248f: 0x000a, 0x2490: 0x0006, 0x2491: 0x000a, + 0x2492: 0x0006, 0x2494: 0x000a, 0x2495: 0x0006, 0x2496: 0x000a, 0x2497: 0x000a, + 0x2498: 0x000a, 0x2499: 0x009a, 0x249a: 0x008a, 0x249b: 0x007a, 0x249c: 0x006a, 0x249d: 0x009a, + 0x249e: 0x008a, 0x249f: 0x0004, 0x24a0: 0x000a, 0x24a1: 0x000a, 0x24a2: 0x0003, 0x24a3: 0x0003, + 0x24a4: 0x000a, 0x24a5: 0x000a, 0x24a6: 0x000a, 0x24a8: 0x000a, 0x24a9: 0x0004, + 0x24aa: 0x0004, 0x24ab: 0x000a, + 0x24b0: 0x000d, 0x24b1: 0x000d, 0x24b2: 0x000d, 0x24b3: 0x000d, 0x24b4: 0x000d, 0x24b5: 0x000d, + 0x24b6: 0x000d, 0x24b7: 0x000d, 0x24b8: 0x000d, 0x24b9: 0x000d, 0x24ba: 0x000d, 0x24bb: 0x000d, + 0x24bc: 0x000d, 0x24bd: 0x000d, 0x24be: 0x000d, 0x24bf: 0x000d, + // Block 0x93, offset 0x24c0 + 0x24c0: 0x000d, 0x24c1: 0x000d, 0x24c2: 0x000d, 0x24c3: 0x000d, 0x24c4: 0x000d, 0x24c5: 0x000d, + 0x24c6: 0x000d, 0x24c7: 0x000d, 0x24c8: 0x000d, 0x24c9: 0x000d, 0x24ca: 0x000d, 0x24cb: 0x000d, + 0x24cc: 0x000d, 0x24cd: 0x000d, 0x24ce: 0x000d, 0x24cf: 0x000d, 0x24d0: 0x000d, 0x24d1: 0x000d, + 0x24d2: 0x000d, 0x24d3: 0x000d, 0x24d4: 0x000d, 0x24d5: 0x000d, 0x24d6: 0x000d, 0x24d7: 0x000d, + 0x24d8: 0x000d, 0x24d9: 0x000d, 0x24da: 0x000d, 0x24db: 0x000d, 0x24dc: 0x000d, 0x24dd: 0x000d, + 0x24de: 0x000d, 0x24df: 0x000d, 0x24e0: 0x000d, 0x24e1: 0x000d, 0x24e2: 0x000d, 0x24e3: 0x000d, + 0x24e4: 0x000d, 0x24e5: 0x000d, 0x24e6: 0x000d, 0x24e7: 0x000d, 0x24e8: 0x000d, 0x24e9: 0x000d, + 0x24ea: 0x000d, 0x24eb: 0x000d, 0x24ec: 0x000d, 0x24ed: 0x000d, 0x24ee: 0x000d, 0x24ef: 0x000d, + 0x24f0: 0x000d, 0x24f1: 0x000d, 0x24f2: 0x000d, 0x24f3: 0x000d, 0x24f4: 0x000d, 0x24f5: 0x000d, + 0x24f6: 0x000d, 0x24f7: 0x000d, 0x24f8: 0x000d, 0x24f9: 0x000d, 0x24fa: 0x000d, 0x24fb: 0x000d, + 0x24fc: 0x000d, 0x24fd: 0x000d, 0x24fe: 0x000d, 0x24ff: 0x000b, + // Block 0x94, offset 0x2500 + 0x2501: 0x000a, 0x2502: 0x000a, 0x2503: 0x0004, 0x2504: 0x0004, 0x2505: 0x0004, + 0x2506: 0x000a, 0x2507: 0x000a, 0x2508: 0x003a, 0x2509: 0x002a, 0x250a: 0x000a, 0x250b: 0x0003, + 0x250c: 0x0006, 0x250d: 0x0003, 0x250e: 0x0006, 0x250f: 0x0006, 0x2510: 0x0002, 0x2511: 0x0002, + 0x2512: 0x0002, 0x2513: 0x0002, 0x2514: 0x0002, 0x2515: 0x0002, 0x2516: 0x0002, 0x2517: 0x0002, + 0x2518: 0x0002, 0x2519: 0x0002, 0x251a: 0x0006, 0x251b: 0x000a, 0x251c: 0x000a, 0x251d: 0x000a, + 0x251e: 0x000a, 0x251f: 0x000a, 0x2520: 0x000a, + 0x253b: 0x005a, + 0x253c: 0x000a, 0x253d: 0x004a, 0x253e: 0x000a, 0x253f: 0x000a, + // Block 0x95, offset 0x2540 + 0x2540: 0x000a, + 0x255b: 0x005a, 0x255c: 0x000a, 0x255d: 0x004a, + 0x255e: 0x000a, 0x255f: 0x00fa, 0x2560: 0x00ea, 0x2561: 0x000a, 0x2562: 0x003a, 0x2563: 0x002a, + 0x2564: 0x000a, 0x2565: 0x000a, + // Block 0x96, offset 0x2580 + 0x25a0: 0x0004, 0x25a1: 0x0004, 0x25a2: 0x000a, 0x25a3: 0x000a, + 0x25a4: 0x000a, 0x25a5: 0x0004, 0x25a6: 0x0004, 0x25a8: 0x000a, 0x25a9: 0x000a, + 0x25aa: 0x000a, 0x25ab: 0x000a, 0x25ac: 0x000a, 0x25ad: 0x000a, 0x25ae: 0x000a, + 0x25b0: 0x000b, 0x25b1: 0x000b, 0x25b2: 0x000b, 0x25b3: 0x000b, 0x25b4: 0x000b, 0x25b5: 0x000b, + 0x25b6: 0x000b, 0x25b7: 0x000b, 0x25b8: 0x000b, 0x25b9: 0x000a, 0x25ba: 0x000a, 0x25bb: 0x000a, + 0x25bc: 0x000a, 0x25bd: 0x000a, 0x25be: 0x000b, 0x25bf: 0x000b, + // Block 0x97, offset 0x25c0 + 0x25c1: 0x000a, + // Block 0x98, offset 0x2600 + 0x2600: 0x000a, 0x2601: 0x000a, 0x2602: 0x000a, 0x2603: 0x000a, 0x2604: 0x000a, 0x2605: 0x000a, + 0x2606: 0x000a, 0x2607: 0x000a, 0x2608: 0x000a, 0x2609: 0x000a, 0x260a: 0x000a, 0x260b: 0x000a, + 0x260c: 0x000a, 0x2610: 0x000a, 0x2611: 0x000a, + 0x2612: 0x000a, 0x2613: 0x000a, 0x2614: 0x000a, 0x2615: 0x000a, 0x2616: 0x000a, 0x2617: 0x000a, + 0x2618: 0x000a, 0x2619: 0x000a, 0x261a: 0x000a, 0x261b: 0x000a, + 0x2620: 0x000a, + // Block 0x99, offset 0x2640 + 0x267d: 0x000c, + // Block 0x9a, offset 0x2680 + 0x26a0: 0x000c, 0x26a1: 0x0002, 0x26a2: 0x0002, 0x26a3: 0x0002, + 0x26a4: 0x0002, 0x26a5: 0x0002, 0x26a6: 0x0002, 0x26a7: 0x0002, 0x26a8: 0x0002, 0x26a9: 0x0002, + 0x26aa: 0x0002, 0x26ab: 0x0002, 0x26ac: 0x0002, 0x26ad: 0x0002, 0x26ae: 0x0002, 0x26af: 0x0002, + 0x26b0: 0x0002, 0x26b1: 0x0002, 0x26b2: 0x0002, 0x26b3: 0x0002, 0x26b4: 0x0002, 0x26b5: 0x0002, + 0x26b6: 0x0002, 0x26b7: 0x0002, 0x26b8: 0x0002, 0x26b9: 0x0002, 0x26ba: 0x0002, 0x26bb: 0x0002, + // Block 0x9b, offset 0x26c0 + 0x26f6: 0x000c, 0x26f7: 0x000c, 0x26f8: 0x000c, 0x26f9: 0x000c, 0x26fa: 0x000c, + // Block 0x9c, offset 0x2700 + 0x2700: 0x0001, 0x2701: 0x0001, 0x2702: 0x0001, 0x2703: 0x0001, 0x2704: 0x0001, 0x2705: 0x0001, + 0x2706: 0x0001, 0x2707: 0x0001, 0x2708: 0x0001, 0x2709: 0x0001, 0x270a: 0x0001, 0x270b: 0x0001, + 0x270c: 0x0001, 0x270d: 0x0001, 0x270e: 0x0001, 0x270f: 0x0001, 0x2710: 0x0001, 0x2711: 0x0001, + 0x2712: 0x0001, 0x2713: 0x0001, 0x2714: 0x0001, 0x2715: 0x0001, 0x2716: 0x0001, 0x2717: 0x0001, + 0x2718: 0x0001, 0x2719: 0x0001, 0x271a: 0x0001, 0x271b: 0x0001, 0x271c: 0x0001, 0x271d: 0x0001, + 0x271e: 0x0001, 0x271f: 0x0001, 0x2720: 0x0001, 0x2721: 0x0001, 0x2722: 0x0001, 0x2723: 0x0001, + 0x2724: 0x0001, 0x2725: 0x0001, 0x2726: 0x0001, 0x2727: 0x0001, 0x2728: 0x0001, 0x2729: 0x0001, + 0x272a: 0x0001, 0x272b: 0x0001, 0x272c: 0x0001, 0x272d: 0x0001, 0x272e: 0x0001, 0x272f: 0x0001, + 0x2730: 0x0001, 0x2731: 0x0001, 0x2732: 0x0001, 0x2733: 0x0001, 0x2734: 0x0001, 0x2735: 0x0001, + 0x2736: 0x0001, 0x2737: 0x0001, 0x2738: 0x0001, 0x2739: 0x0001, 0x273a: 0x0001, 0x273b: 0x0001, + 0x273c: 0x0001, 0x273d: 0x0001, 0x273e: 0x0001, 0x273f: 0x0001, + // Block 0x9d, offset 0x2740 + 0x2740: 0x0001, 0x2741: 0x0001, 0x2742: 0x0001, 0x2743: 0x0001, 0x2744: 0x0001, 0x2745: 0x0001, + 0x2746: 0x0001, 0x2747: 0x0001, 0x2748: 0x0001, 0x2749: 0x0001, 0x274a: 0x0001, 0x274b: 0x0001, + 0x274c: 0x0001, 0x274d: 0x0001, 0x274e: 0x0001, 0x274f: 0x0001, 0x2750: 0x0001, 0x2751: 0x0001, + 0x2752: 0x0001, 0x2753: 0x0001, 0x2754: 0x0001, 0x2755: 0x0001, 0x2756: 0x0001, 0x2757: 0x0001, + 0x2758: 0x0001, 0x2759: 0x0001, 0x275a: 0x0001, 0x275b: 0x0001, 0x275c: 0x0001, 0x275d: 0x0001, + 0x275e: 0x0001, 0x275f: 0x000a, 0x2760: 0x0001, 0x2761: 0x0001, 0x2762: 0x0001, 0x2763: 0x0001, + 0x2764: 0x0001, 0x2765: 0x0001, 0x2766: 0x0001, 0x2767: 0x0001, 0x2768: 0x0001, 0x2769: 0x0001, + 0x276a: 0x0001, 0x276b: 0x0001, 0x276c: 0x0001, 0x276d: 0x0001, 0x276e: 0x0001, 0x276f: 0x0001, + 0x2770: 0x0001, 0x2771: 0x0001, 0x2772: 0x0001, 0x2773: 0x0001, 0x2774: 0x0001, 0x2775: 0x0001, + 0x2776: 0x0001, 0x2777: 0x0001, 0x2778: 0x0001, 0x2779: 0x0001, 0x277a: 0x0001, 0x277b: 0x0001, + 0x277c: 0x0001, 0x277d: 0x0001, 0x277e: 0x0001, 0x277f: 0x0001, + // Block 0x9e, offset 0x2780 + 0x2780: 0x0001, 0x2781: 0x000c, 0x2782: 0x000c, 0x2783: 0x000c, 0x2784: 0x0001, 0x2785: 0x000c, + 0x2786: 0x000c, 0x2787: 0x0001, 0x2788: 0x0001, 0x2789: 0x0001, 0x278a: 0x0001, 0x278b: 0x0001, + 0x278c: 0x000c, 0x278d: 0x000c, 0x278e: 0x000c, 0x278f: 0x000c, 0x2790: 0x0001, 0x2791: 0x0001, + 0x2792: 0x0001, 0x2793: 0x0001, 0x2794: 0x0001, 0x2795: 0x0001, 0x2796: 0x0001, 0x2797: 0x0001, + 0x2798: 0x0001, 0x2799: 0x0001, 0x279a: 0x0001, 0x279b: 0x0001, 0x279c: 0x0001, 0x279d: 0x0001, + 0x279e: 0x0001, 0x279f: 0x0001, 0x27a0: 0x0001, 0x27a1: 0x0001, 0x27a2: 0x0001, 0x27a3: 0x0001, + 0x27a4: 0x0001, 0x27a5: 0x0001, 0x27a6: 0x0001, 0x27a7: 0x0001, 0x27a8: 0x0001, 0x27a9: 0x0001, + 0x27aa: 0x0001, 0x27ab: 0x0001, 0x27ac: 0x0001, 0x27ad: 0x0001, 0x27ae: 0x0001, 0x27af: 0x0001, + 0x27b0: 0x0001, 0x27b1: 0x0001, 0x27b2: 0x0001, 0x27b3: 0x0001, 0x27b4: 0x0001, 0x27b5: 0x0001, + 0x27b6: 0x0001, 0x27b7: 0x0001, 0x27b8: 0x000c, 0x27b9: 0x000c, 0x27ba: 0x000c, 0x27bb: 0x0001, + 0x27bc: 0x0001, 0x27bd: 0x0001, 0x27be: 0x0001, 0x27bf: 0x000c, + // Block 0x9f, offset 0x27c0 + 0x27c0: 0x0001, 0x27c1: 0x0001, 0x27c2: 0x0001, 0x27c3: 0x0001, 0x27c4: 0x0001, 0x27c5: 0x0001, + 0x27c6: 0x0001, 0x27c7: 0x0001, 0x27c8: 0x0001, 0x27c9: 0x0001, 0x27ca: 0x0001, 0x27cb: 0x0001, + 0x27cc: 0x0001, 0x27cd: 0x0001, 0x27ce: 0x0001, 0x27cf: 0x0001, 0x27d0: 0x0001, 0x27d1: 0x0001, + 0x27d2: 0x0001, 0x27d3: 0x0001, 0x27d4: 0x0001, 0x27d5: 0x0001, 0x27d6: 0x0001, 0x27d7: 0x0001, + 0x27d8: 0x0001, 0x27d9: 0x0001, 0x27da: 0x0001, 0x27db: 0x0001, 0x27dc: 0x0001, 0x27dd: 0x0001, + 0x27de: 0x0001, 0x27df: 0x0001, 0x27e0: 0x0001, 0x27e1: 0x0001, 0x27e2: 0x0001, 0x27e3: 0x0001, + 0x27e4: 0x0001, 0x27e5: 0x000c, 0x27e6: 0x000c, 0x27e7: 0x0001, 0x27e8: 0x0001, 0x27e9: 0x0001, + 0x27ea: 0x0001, 0x27eb: 0x0001, 0x27ec: 0x0001, 0x27ed: 0x0001, 0x27ee: 0x0001, 0x27ef: 0x0001, + 0x27f0: 0x0001, 0x27f1: 0x0001, 0x27f2: 0x0001, 0x27f3: 0x0001, 0x27f4: 0x0001, 0x27f5: 0x0001, + 0x27f6: 0x0001, 0x27f7: 0x0001, 0x27f8: 0x0001, 0x27f9: 0x0001, 0x27fa: 0x0001, 0x27fb: 0x0001, + 0x27fc: 0x0001, 0x27fd: 0x0001, 0x27fe: 0x0001, 0x27ff: 0x0001, + // Block 0xa0, offset 0x2800 + 0x2800: 0x0001, 0x2801: 0x0001, 0x2802: 0x0001, 0x2803: 0x0001, 0x2804: 0x0001, 0x2805: 0x0001, + 0x2806: 0x0001, 0x2807: 0x0001, 0x2808: 0x0001, 0x2809: 0x0001, 0x280a: 0x0001, 0x280b: 0x0001, + 0x280c: 0x0001, 0x280d: 0x0001, 0x280e: 0x0001, 0x280f: 0x0001, 0x2810: 0x0001, 0x2811: 0x0001, + 0x2812: 0x0001, 0x2813: 0x0001, 0x2814: 0x0001, 0x2815: 0x0001, 0x2816: 0x0001, 0x2817: 0x0001, + 0x2818: 0x0001, 0x2819: 0x0001, 0x281a: 0x0001, 0x281b: 0x0001, 0x281c: 0x0001, 0x281d: 0x0001, + 0x281e: 0x0001, 0x281f: 0x0001, 0x2820: 0x0001, 0x2821: 0x0001, 0x2822: 0x0001, 0x2823: 0x0001, + 0x2824: 0x0001, 0x2825: 0x0001, 0x2826: 0x0001, 0x2827: 0x0001, 0x2828: 0x0001, 0x2829: 0x0001, + 0x282a: 0x0001, 0x282b: 0x0001, 0x282c: 0x0001, 0x282d: 0x0001, 0x282e: 0x0001, 0x282f: 0x0001, + 0x2830: 0x0001, 0x2831: 0x0001, 0x2832: 0x0001, 0x2833: 0x0001, 0x2834: 0x0001, 0x2835: 0x0001, + 0x2836: 0x0001, 0x2837: 0x0001, 0x2838: 0x0001, 0x2839: 0x000a, 0x283a: 0x000a, 0x283b: 0x000a, + 0x283c: 0x000a, 0x283d: 0x000a, 0x283e: 0x000a, 0x283f: 0x000a, + // Block 0xa1, offset 0x2840 + 0x2840: 0x000d, 0x2841: 0x000d, 0x2842: 0x000d, 0x2843: 0x000d, 0x2844: 0x000d, 0x2845: 0x000d, + 0x2846: 0x000d, 0x2847: 0x000d, 0x2848: 0x000d, 0x2849: 0x000d, 0x284a: 0x000d, 0x284b: 0x000d, + 0x284c: 0x000d, 0x284d: 0x000d, 0x284e: 0x000d, 0x284f: 0x000d, 0x2850: 0x000d, 0x2851: 0x000d, + 0x2852: 0x000d, 0x2853: 0x000d, 0x2854: 0x000d, 0x2855: 0x000d, 0x2856: 0x000d, 0x2857: 0x000d, + 0x2858: 0x000d, 0x2859: 0x000d, 0x285a: 0x000d, 0x285b: 0x000d, 0x285c: 0x000d, 0x285d: 0x000d, + 0x285e: 0x000d, 0x285f: 0x000d, 0x2860: 0x000d, 0x2861: 0x000d, 0x2862: 0x000d, 0x2863: 0x000d, + 0x2864: 0x000c, 0x2865: 0x000c, 0x2866: 0x000c, 0x2867: 0x000c, 0x2868: 0x000d, 0x2869: 0x000d, + 0x286a: 0x000d, 0x286b: 0x000d, 0x286c: 0x000d, 0x286d: 0x000d, 0x286e: 0x000d, 0x286f: 0x000d, + 0x2870: 0x0005, 0x2871: 0x0005, 0x2872: 0x0005, 0x2873: 0x0005, 0x2874: 0x0005, 0x2875: 0x0005, + 0x2876: 0x0005, 0x2877: 0x0005, 0x2878: 0x0005, 0x2879: 0x0005, 0x287a: 0x000d, 0x287b: 0x000d, + 0x287c: 0x000d, 0x287d: 0x000d, 0x287e: 0x000d, 0x287f: 0x000d, + // Block 0xa2, offset 0x2880 + 0x2880: 0x0001, 0x2881: 0x0001, 0x2882: 0x0001, 0x2883: 0x0001, 0x2884: 0x0001, 0x2885: 0x0001, + 0x2886: 0x0001, 0x2887: 0x0001, 0x2888: 0x0001, 0x2889: 0x0001, 0x288a: 0x0001, 0x288b: 0x0001, + 0x288c: 0x0001, 0x288d: 0x0001, 0x288e: 0x0001, 0x288f: 0x0001, 0x2890: 0x0001, 0x2891: 0x0001, + 0x2892: 0x0001, 0x2893: 0x0001, 0x2894: 0x0001, 0x2895: 0x0001, 0x2896: 0x0001, 0x2897: 0x0001, + 0x2898: 0x0001, 0x2899: 0x0001, 0x289a: 0x0001, 0x289b: 0x0001, 0x289c: 0x0001, 0x289d: 0x0001, + 0x289e: 0x0001, 0x289f: 0x0001, 0x28a0: 0x0005, 0x28a1: 0x0005, 0x28a2: 0x0005, 0x28a3: 0x0005, + 0x28a4: 0x0005, 0x28a5: 0x0005, 0x28a6: 0x0005, 0x28a7: 0x0005, 0x28a8: 0x0005, 0x28a9: 0x0005, + 0x28aa: 0x0005, 0x28ab: 0x0005, 0x28ac: 0x0005, 0x28ad: 0x0005, 0x28ae: 0x0005, 0x28af: 0x0005, + 0x28b0: 0x0005, 0x28b1: 0x0005, 0x28b2: 0x0005, 0x28b3: 0x0005, 0x28b4: 0x0005, 0x28b5: 0x0005, + 0x28b6: 0x0005, 0x28b7: 0x0005, 0x28b8: 0x0005, 0x28b9: 0x0005, 0x28ba: 0x0005, 0x28bb: 0x0005, + 0x28bc: 0x0005, 0x28bd: 0x0005, 0x28be: 0x0005, 0x28bf: 0x0001, + // Block 0xa3, offset 0x28c0 + 0x28c0: 0x0001, 0x28c1: 0x0001, 0x28c2: 0x0001, 0x28c3: 0x0001, 0x28c4: 0x0001, 0x28c5: 0x0001, + 0x28c6: 0x0001, 0x28c7: 0x0001, 0x28c8: 0x0001, 0x28c9: 0x0001, 0x28ca: 0x0001, 0x28cb: 0x0001, + 0x28cc: 0x0001, 0x28cd: 0x0001, 0x28ce: 0x0001, 0x28cf: 0x0001, 0x28d0: 0x0001, 0x28d1: 0x0001, + 0x28d2: 0x0001, 0x28d3: 0x0001, 0x28d4: 0x0001, 0x28d5: 0x0001, 0x28d6: 0x0001, 0x28d7: 0x0001, + 0x28d8: 0x0001, 0x28d9: 0x0001, 0x28da: 0x0001, 0x28db: 0x0001, 0x28dc: 0x0001, 0x28dd: 0x0001, + 0x28de: 0x0001, 0x28df: 0x0001, 0x28e0: 0x0001, 0x28e1: 0x0001, 0x28e2: 0x0001, 0x28e3: 0x0001, + 0x28e4: 0x0001, 0x28e5: 0x0001, 0x28e6: 0x0001, 0x28e7: 0x0001, 0x28e8: 0x0001, 0x28e9: 0x0001, + 0x28ea: 0x0001, 0x28eb: 0x0001, 0x28ec: 0x0001, 0x28ed: 0x0001, 0x28ee: 0x0001, 0x28ef: 0x0001, + 0x28f0: 0x000d, 0x28f1: 0x000d, 0x28f2: 0x000d, 0x28f3: 0x000d, 0x28f4: 0x000d, 0x28f5: 0x000d, + 0x28f6: 0x000d, 0x28f7: 0x000d, 0x28f8: 0x000d, 0x28f9: 0x000d, 0x28fa: 0x000d, 0x28fb: 0x000d, + 0x28fc: 0x000d, 0x28fd: 0x000d, 0x28fe: 0x000d, 0x28ff: 0x000d, + // Block 0xa4, offset 0x2900 + 0x2900: 0x000d, 0x2901: 0x000d, 0x2902: 0x000d, 0x2903: 0x000d, 0x2904: 0x000d, 0x2905: 0x000d, + 0x2906: 0x000c, 0x2907: 0x000c, 0x2908: 0x000c, 0x2909: 0x000c, 0x290a: 0x000c, 0x290b: 0x000c, + 0x290c: 0x000c, 0x290d: 0x000c, 0x290e: 0x000c, 0x290f: 0x000c, 0x2910: 0x000c, 0x2911: 0x000d, + 0x2912: 0x000d, 0x2913: 0x000d, 0x2914: 0x000d, 0x2915: 0x000d, 0x2916: 0x000d, 0x2917: 0x000d, + 0x2918: 0x000d, 0x2919: 0x000d, 0x291a: 0x000d, 0x291b: 0x000d, 0x291c: 0x000d, 0x291d: 0x000d, + 0x291e: 0x000d, 0x291f: 0x000d, 0x2920: 0x000d, 0x2921: 0x000d, 0x2922: 0x000d, 0x2923: 0x000d, + 0x2924: 0x000d, 0x2925: 0x000d, 0x2926: 0x000d, 0x2927: 0x000d, 0x2928: 0x000d, 0x2929: 0x000d, + 0x292a: 0x000d, 0x292b: 0x000d, 0x292c: 0x000d, 0x292d: 0x000d, 0x292e: 0x000d, 0x292f: 0x000d, + 0x2930: 0x0001, 0x2931: 0x0001, 0x2932: 0x0001, 0x2933: 0x0001, 0x2934: 0x0001, 0x2935: 0x0001, + 0x2936: 0x0001, 0x2937: 0x0001, 0x2938: 0x0001, 0x2939: 0x0001, 0x293a: 0x0001, 0x293b: 0x0001, + 0x293c: 0x0001, 0x293d: 0x0001, 0x293e: 0x0001, 0x293f: 0x0001, + // Block 0xa5, offset 0x2940 + 0x2941: 0x000c, + 0x2978: 0x000c, 0x2979: 0x000c, 0x297a: 0x000c, 0x297b: 0x000c, + 0x297c: 0x000c, 0x297d: 0x000c, 0x297e: 0x000c, 0x297f: 0x000c, + // Block 0xa6, offset 0x2980 + 0x2980: 0x000c, 0x2981: 0x000c, 0x2982: 0x000c, 0x2983: 0x000c, 0x2984: 0x000c, 0x2985: 0x000c, + 0x2986: 0x000c, + 0x2992: 0x000a, 0x2993: 0x000a, 0x2994: 0x000a, 0x2995: 0x000a, 0x2996: 0x000a, 0x2997: 0x000a, + 0x2998: 0x000a, 0x2999: 0x000a, 0x299a: 0x000a, 0x299b: 0x000a, 0x299c: 0x000a, 0x299d: 0x000a, + 0x299e: 0x000a, 0x299f: 0x000a, 0x29a0: 0x000a, 0x29a1: 0x000a, 0x29a2: 0x000a, 0x29a3: 0x000a, + 0x29a4: 0x000a, 0x29a5: 0x000a, + 0x29bf: 0x000c, + // Block 0xa7, offset 0x29c0 + 0x29c0: 0x000c, 0x29c1: 0x000c, + 0x29f3: 0x000c, 0x29f4: 0x000c, 0x29f5: 0x000c, + 0x29f6: 0x000c, 0x29f9: 0x000c, 0x29fa: 0x000c, + // Block 0xa8, offset 0x2a00 + 0x2a00: 0x000c, 0x2a01: 0x000c, 0x2a02: 0x000c, + 0x2a27: 0x000c, 0x2a28: 0x000c, 0x2a29: 0x000c, + 0x2a2a: 0x000c, 0x2a2b: 0x000c, 0x2a2d: 0x000c, 0x2a2e: 0x000c, 0x2a2f: 0x000c, + 0x2a30: 0x000c, 0x2a31: 0x000c, 0x2a32: 0x000c, 0x2a33: 0x000c, 0x2a34: 0x000c, + // Block 0xa9, offset 0x2a40 + 0x2a73: 0x000c, + // Block 0xaa, offset 0x2a80 + 0x2a80: 0x000c, 0x2a81: 0x000c, + 0x2ab6: 0x000c, 0x2ab7: 0x000c, 0x2ab8: 0x000c, 0x2ab9: 0x000c, 0x2aba: 0x000c, 0x2abb: 0x000c, + 0x2abc: 0x000c, 0x2abd: 0x000c, 0x2abe: 0x000c, + // Block 0xab, offset 0x2ac0 + 0x2ac9: 0x000c, 0x2aca: 0x000c, 0x2acb: 0x000c, + 0x2acc: 0x000c, + // Block 0xac, offset 0x2b00 + 0x2b2f: 0x000c, + 0x2b30: 0x000c, 0x2b31: 0x000c, 0x2b34: 0x000c, + 0x2b36: 0x000c, 0x2b37: 0x000c, + 0x2b3e: 0x000c, + // Block 0xad, offset 0x2b40 + 0x2b5f: 0x000c, 0x2b63: 0x000c, + 0x2b64: 0x000c, 0x2b65: 0x000c, 0x2b66: 0x000c, 0x2b67: 0x000c, 0x2b68: 0x000c, 0x2b69: 0x000c, + 0x2b6a: 0x000c, + // Block 0xae, offset 0x2b80 + 0x2b80: 0x000c, + 0x2ba6: 0x000c, 0x2ba7: 0x000c, 0x2ba8: 0x000c, 0x2ba9: 0x000c, + 0x2baa: 0x000c, 0x2bab: 0x000c, 0x2bac: 0x000c, + 0x2bb0: 0x000c, 0x2bb1: 0x000c, 0x2bb2: 0x000c, 0x2bb3: 0x000c, 0x2bb4: 0x000c, + // Block 0xaf, offset 0x2bc0 + 0x2bf8: 0x000c, 0x2bf9: 0x000c, 0x2bfa: 0x000c, 0x2bfb: 0x000c, + 0x2bfc: 0x000c, 0x2bfd: 0x000c, 0x2bfe: 0x000c, 0x2bff: 0x000c, + // Block 0xb0, offset 0x2c00 + 0x2c02: 0x000c, 0x2c03: 0x000c, 0x2c04: 0x000c, + 0x2c06: 0x000c, + 0x2c1e: 0x000c, + // Block 0xb1, offset 0x2c40 + 0x2c73: 0x000c, 0x2c74: 0x000c, 0x2c75: 0x000c, + 0x2c76: 0x000c, 0x2c77: 0x000c, 0x2c78: 0x000c, 0x2c7a: 0x000c, + 0x2c7f: 0x000c, + // Block 0xb2, offset 0x2c80 + 0x2c80: 0x000c, 0x2c82: 0x000c, 0x2c83: 0x000c, + // Block 0xb3, offset 0x2cc0 + 0x2cf2: 0x000c, 0x2cf3: 0x000c, 0x2cf4: 0x000c, 0x2cf5: 0x000c, + 0x2cfc: 0x000c, 0x2cfd: 0x000c, 0x2cff: 0x000c, + // Block 0xb4, offset 0x2d00 + 0x2d00: 0x000c, + 0x2d1c: 0x000c, 0x2d1d: 0x000c, + // Block 0xb5, offset 0x2d40 + 0x2d73: 0x000c, 0x2d74: 0x000c, 0x2d75: 0x000c, + 0x2d76: 0x000c, 0x2d77: 0x000c, 0x2d78: 0x000c, 0x2d79: 0x000c, 0x2d7a: 0x000c, + 0x2d7d: 0x000c, 0x2d7f: 0x000c, + // Block 0xb6, offset 0x2d80 + 0x2d80: 0x000c, + 0x2da0: 0x000a, 0x2da1: 0x000a, 0x2da2: 0x000a, 0x2da3: 0x000a, + 0x2da4: 0x000a, 0x2da5: 0x000a, 0x2da6: 0x000a, 0x2da7: 0x000a, 0x2da8: 0x000a, 0x2da9: 0x000a, + 0x2daa: 0x000a, 0x2dab: 0x000a, 0x2dac: 0x000a, + // Block 0xb7, offset 0x2dc0 + 0x2deb: 0x000c, 0x2ded: 0x000c, + 0x2df0: 0x000c, 0x2df1: 0x000c, 0x2df2: 0x000c, 0x2df3: 0x000c, 0x2df4: 0x000c, 0x2df5: 0x000c, + 0x2df7: 0x000c, + // Block 0xb8, offset 0x2e00 + 0x2e1d: 0x000c, + 0x2e1e: 0x000c, 0x2e1f: 0x000c, 0x2e22: 0x000c, 0x2e23: 0x000c, + 0x2e24: 0x000c, 0x2e25: 0x000c, 0x2e27: 0x000c, 0x2e28: 0x000c, 0x2e29: 0x000c, + 0x2e2a: 0x000c, 0x2e2b: 0x000c, + // Block 0xb9, offset 0x2e40 + 0x2e6f: 0x000c, + 0x2e70: 0x000c, 0x2e71: 0x000c, 0x2e72: 0x000c, 0x2e73: 0x000c, 0x2e74: 0x000c, 0x2e75: 0x000c, + 0x2e76: 0x000c, 0x2e77: 0x000c, 0x2e79: 0x000c, 0x2e7a: 0x000c, + // Block 0xba, offset 0x2e80 + 0x2e81: 0x000c, 0x2e82: 0x000c, 0x2e83: 0x000c, 0x2e84: 0x000c, 0x2e85: 0x000c, + 0x2e86: 0x000c, 0x2e89: 0x000c, 0x2e8a: 0x000c, + 0x2eb3: 0x000c, 0x2eb4: 0x000c, 0x2eb5: 0x000c, + 0x2eb6: 0x000c, 0x2eb7: 0x000c, 0x2eb8: 0x000c, 0x2ebb: 0x000c, + 0x2ebc: 0x000c, 0x2ebd: 0x000c, 0x2ebe: 0x000c, + // Block 0xbb, offset 0x2ec0 + 0x2ec7: 0x000c, + 0x2ed1: 0x000c, + 0x2ed2: 0x000c, 0x2ed3: 0x000c, 0x2ed4: 0x000c, 0x2ed5: 0x000c, 0x2ed6: 0x000c, + 0x2ed9: 0x000c, 0x2eda: 0x000c, 0x2edb: 0x000c, + // Block 0xbc, offset 0x2f00 + 0x2f0a: 0x000c, 0x2f0b: 0x000c, + 0x2f0c: 0x000c, 0x2f0d: 0x000c, 0x2f0e: 0x000c, 0x2f0f: 0x000c, 0x2f10: 0x000c, 0x2f11: 0x000c, + 0x2f12: 0x000c, 0x2f13: 0x000c, 0x2f14: 0x000c, 0x2f15: 0x000c, 0x2f16: 0x000c, + 0x2f18: 0x000c, 0x2f19: 0x000c, + // Block 0xbd, offset 0x2f40 + 0x2f70: 0x000c, 0x2f71: 0x000c, 0x2f72: 0x000c, 0x2f73: 0x000c, 0x2f74: 0x000c, 0x2f75: 0x000c, + 0x2f76: 0x000c, 0x2f78: 0x000c, 0x2f79: 0x000c, 0x2f7a: 0x000c, 0x2f7b: 0x000c, + 0x2f7c: 0x000c, 0x2f7d: 0x000c, + // Block 0xbe, offset 0x2f80 + 0x2f92: 0x000c, 0x2f93: 0x000c, 0x2f94: 0x000c, 0x2f95: 0x000c, 0x2f96: 0x000c, 0x2f97: 0x000c, + 0x2f98: 0x000c, 0x2f99: 0x000c, 0x2f9a: 0x000c, 0x2f9b: 0x000c, 0x2f9c: 0x000c, 0x2f9d: 0x000c, + 0x2f9e: 0x000c, 0x2f9f: 0x000c, 0x2fa0: 0x000c, 0x2fa1: 0x000c, 0x2fa2: 0x000c, 0x2fa3: 0x000c, + 0x2fa4: 0x000c, 0x2fa5: 0x000c, 0x2fa6: 0x000c, 0x2fa7: 0x000c, + 0x2faa: 0x000c, 0x2fab: 0x000c, 0x2fac: 0x000c, 0x2fad: 0x000c, 0x2fae: 0x000c, 0x2faf: 0x000c, + 0x2fb0: 0x000c, 0x2fb2: 0x000c, 0x2fb3: 0x000c, 0x2fb5: 0x000c, + 0x2fb6: 0x000c, + // Block 0xbf, offset 0x2fc0 + 0x2ff1: 0x000c, 0x2ff2: 0x000c, 0x2ff3: 0x000c, 0x2ff4: 0x000c, 0x2ff5: 0x000c, + 0x2ff6: 0x000c, 0x2ffa: 0x000c, + 0x2ffc: 0x000c, 0x2ffd: 0x000c, 0x2fff: 0x000c, + // Block 0xc0, offset 0x3000 + 0x3000: 0x000c, 0x3001: 0x000c, 0x3002: 0x000c, 0x3003: 0x000c, 0x3004: 0x000c, 0x3005: 0x000c, + 0x3007: 0x000c, + // Block 0xc1, offset 0x3040 + 0x3050: 0x000c, 0x3051: 0x000c, + 0x3055: 0x000c, 0x3057: 0x000c, + // Block 0xc2, offset 0x3080 + 0x30b3: 0x000c, 0x30b4: 0x000c, + // Block 0xc3, offset 0x30c0 + 0x30f0: 0x000c, 0x30f1: 0x000c, 0x30f2: 0x000c, 0x30f3: 0x000c, 0x30f4: 0x000c, + // Block 0xc4, offset 0x3100 + 0x3130: 0x000c, 0x3131: 0x000c, 0x3132: 0x000c, 0x3133: 0x000c, 0x3134: 0x000c, 0x3135: 0x000c, + 0x3136: 0x000c, + // Block 0xc5, offset 0x3140 + 0x314f: 0x000c, 0x3150: 0x000c, 0x3151: 0x000c, + 0x3152: 0x000c, + // Block 0xc6, offset 0x3180 + 0x319d: 0x000c, + 0x319e: 0x000c, 0x31a0: 0x000b, 0x31a1: 0x000b, 0x31a2: 0x000b, 0x31a3: 0x000b, + // Block 0xc7, offset 0x31c0 + 0x31e7: 0x000c, 0x31e8: 0x000c, 0x31e9: 0x000c, + 0x31f3: 0x000b, 0x31f4: 0x000b, 0x31f5: 0x000b, + 0x31f6: 0x000b, 0x31f7: 0x000b, 0x31f8: 0x000b, 0x31f9: 0x000b, 0x31fa: 0x000b, 0x31fb: 0x000c, + 0x31fc: 0x000c, 0x31fd: 0x000c, 0x31fe: 0x000c, 0x31ff: 0x000c, + // Block 0xc8, offset 0x3200 + 0x3200: 0x000c, 0x3201: 0x000c, 0x3202: 0x000c, 0x3205: 0x000c, + 0x3206: 0x000c, 0x3207: 0x000c, 0x3208: 0x000c, 0x3209: 0x000c, 0x320a: 0x000c, 0x320b: 0x000c, + 0x322a: 0x000c, 0x322b: 0x000c, 0x322c: 0x000c, 0x322d: 0x000c, + // Block 0xc9, offset 0x3240 + 0x3240: 0x000a, 0x3241: 0x000a, 0x3242: 0x000c, 0x3243: 0x000c, 0x3244: 0x000c, 0x3245: 0x000a, + // Block 0xca, offset 0x3280 + 0x3280: 0x000a, 0x3281: 0x000a, 0x3282: 0x000a, 0x3283: 0x000a, 0x3284: 0x000a, 0x3285: 0x000a, + 0x3286: 0x000a, 0x3287: 0x000a, 0x3288: 0x000a, 0x3289: 0x000a, 0x328a: 0x000a, 0x328b: 0x000a, + 0x328c: 0x000a, 0x328d: 0x000a, 0x328e: 0x000a, 0x328f: 0x000a, 0x3290: 0x000a, 0x3291: 0x000a, + 0x3292: 0x000a, 0x3293: 0x000a, 0x3294: 0x000a, 0x3295: 0x000a, 0x3296: 0x000a, + // Block 0xcb, offset 0x32c0 + 0x32db: 0x000a, + // Block 0xcc, offset 0x3300 + 0x3315: 0x000a, + // Block 0xcd, offset 0x3340 + 0x334f: 0x000a, + // Block 0xce, offset 0x3380 + 0x3389: 0x000a, + // Block 0xcf, offset 0x33c0 + 0x33c3: 0x000a, + 0x33ce: 0x0002, 0x33cf: 0x0002, 0x33d0: 0x0002, 0x33d1: 0x0002, + 0x33d2: 0x0002, 0x33d3: 0x0002, 0x33d4: 0x0002, 0x33d5: 0x0002, 0x33d6: 0x0002, 0x33d7: 0x0002, + 0x33d8: 0x0002, 0x33d9: 0x0002, 0x33da: 0x0002, 0x33db: 0x0002, 0x33dc: 0x0002, 0x33dd: 0x0002, + 0x33de: 0x0002, 0x33df: 0x0002, 0x33e0: 0x0002, 0x33e1: 0x0002, 0x33e2: 0x0002, 0x33e3: 0x0002, + 0x33e4: 0x0002, 0x33e5: 0x0002, 0x33e6: 0x0002, 0x33e7: 0x0002, 0x33e8: 0x0002, 0x33e9: 0x0002, + 0x33ea: 0x0002, 0x33eb: 0x0002, 0x33ec: 0x0002, 0x33ed: 0x0002, 0x33ee: 0x0002, 0x33ef: 0x0002, + 0x33f0: 0x0002, 0x33f1: 0x0002, 0x33f2: 0x0002, 0x33f3: 0x0002, 0x33f4: 0x0002, 0x33f5: 0x0002, + 0x33f6: 0x0002, 0x33f7: 0x0002, 0x33f8: 0x0002, 0x33f9: 0x0002, 0x33fa: 0x0002, 0x33fb: 0x0002, + 0x33fc: 0x0002, 0x33fd: 0x0002, 0x33fe: 0x0002, 0x33ff: 0x0002, + // Block 0xd0, offset 0x3400 + 0x3400: 0x000c, 0x3401: 0x000c, 0x3402: 0x000c, 0x3403: 0x000c, 0x3404: 0x000c, 0x3405: 0x000c, + 0x3406: 0x000c, 0x3407: 0x000c, 0x3408: 0x000c, 0x3409: 0x000c, 0x340a: 0x000c, 0x340b: 0x000c, + 0x340c: 0x000c, 0x340d: 0x000c, 0x340e: 0x000c, 0x340f: 0x000c, 0x3410: 0x000c, 0x3411: 0x000c, + 0x3412: 0x000c, 0x3413: 0x000c, 0x3414: 0x000c, 0x3415: 0x000c, 0x3416: 0x000c, 0x3417: 0x000c, + 0x3418: 0x000c, 0x3419: 0x000c, 0x341a: 0x000c, 0x341b: 0x000c, 0x341c: 0x000c, 0x341d: 0x000c, + 0x341e: 0x000c, 0x341f: 0x000c, 0x3420: 0x000c, 0x3421: 0x000c, 0x3422: 0x000c, 0x3423: 0x000c, + 0x3424: 0x000c, 0x3425: 0x000c, 0x3426: 0x000c, 0x3427: 0x000c, 0x3428: 0x000c, 0x3429: 0x000c, + 0x342a: 0x000c, 0x342b: 0x000c, 0x342c: 0x000c, 0x342d: 0x000c, 0x342e: 0x000c, 0x342f: 0x000c, + 0x3430: 0x000c, 0x3431: 0x000c, 0x3432: 0x000c, 0x3433: 0x000c, 0x3434: 0x000c, 0x3435: 0x000c, + 0x3436: 0x000c, 0x343b: 0x000c, + 0x343c: 0x000c, 0x343d: 0x000c, 0x343e: 0x000c, 0x343f: 0x000c, + // Block 0xd1, offset 0x3440 + 0x3440: 0x000c, 0x3441: 0x000c, 0x3442: 0x000c, 0x3443: 0x000c, 0x3444: 0x000c, 0x3445: 0x000c, + 0x3446: 0x000c, 0x3447: 0x000c, 0x3448: 0x000c, 0x3449: 0x000c, 0x344a: 0x000c, 0x344b: 0x000c, + 0x344c: 0x000c, 0x344d: 0x000c, 0x344e: 0x000c, 0x344f: 0x000c, 0x3450: 0x000c, 0x3451: 0x000c, + 0x3452: 0x000c, 0x3453: 0x000c, 0x3454: 0x000c, 0x3455: 0x000c, 0x3456: 0x000c, 0x3457: 0x000c, + 0x3458: 0x000c, 0x3459: 0x000c, 0x345a: 0x000c, 0x345b: 0x000c, 0x345c: 0x000c, 0x345d: 0x000c, + 0x345e: 0x000c, 0x345f: 0x000c, 0x3460: 0x000c, 0x3461: 0x000c, 0x3462: 0x000c, 0x3463: 0x000c, + 0x3464: 0x000c, 0x3465: 0x000c, 0x3466: 0x000c, 0x3467: 0x000c, 0x3468: 0x000c, 0x3469: 0x000c, + 0x346a: 0x000c, 0x346b: 0x000c, 0x346c: 0x000c, + 0x3475: 0x000c, + // Block 0xd2, offset 0x3480 + 0x3484: 0x000c, + 0x349b: 0x000c, 0x349c: 0x000c, 0x349d: 0x000c, + 0x349e: 0x000c, 0x349f: 0x000c, 0x34a1: 0x000c, 0x34a2: 0x000c, 0x34a3: 0x000c, + 0x34a4: 0x000c, 0x34a5: 0x000c, 0x34a6: 0x000c, 0x34a7: 0x000c, 0x34a8: 0x000c, 0x34a9: 0x000c, + 0x34aa: 0x000c, 0x34ab: 0x000c, 0x34ac: 0x000c, 0x34ad: 0x000c, 0x34ae: 0x000c, 0x34af: 0x000c, + // Block 0xd3, offset 0x34c0 + 0x34c0: 0x000c, 0x34c1: 0x000c, 0x34c2: 0x000c, 0x34c3: 0x000c, 0x34c4: 0x000c, 0x34c5: 0x000c, + 0x34c6: 0x000c, 0x34c8: 0x000c, 0x34c9: 0x000c, 0x34ca: 0x000c, 0x34cb: 0x000c, + 0x34cc: 0x000c, 0x34cd: 0x000c, 0x34ce: 0x000c, 0x34cf: 0x000c, 0x34d0: 0x000c, 0x34d1: 0x000c, + 0x34d2: 0x000c, 0x34d3: 0x000c, 0x34d4: 0x000c, 0x34d5: 0x000c, 0x34d6: 0x000c, 0x34d7: 0x000c, + 0x34d8: 0x000c, 0x34db: 0x000c, 0x34dc: 0x000c, 0x34dd: 0x000c, + 0x34de: 0x000c, 0x34df: 0x000c, 0x34e0: 0x000c, 0x34e1: 0x000c, 0x34e3: 0x000c, + 0x34e4: 0x000c, 0x34e6: 0x000c, 0x34e7: 0x000c, 0x34e8: 0x000c, 0x34e9: 0x000c, + 0x34ea: 0x000c, + // Block 0xd4, offset 0x3500 + 0x3500: 0x0001, 0x3501: 0x0001, 0x3502: 0x0001, 0x3503: 0x0001, 0x3504: 0x0001, 0x3505: 0x0001, + 0x3506: 0x0001, 0x3507: 0x0001, 0x3508: 0x0001, 0x3509: 0x0001, 0x350a: 0x0001, 0x350b: 0x0001, + 0x350c: 0x0001, 0x350d: 0x0001, 0x350e: 0x0001, 0x350f: 0x0001, 0x3510: 0x000c, 0x3511: 0x000c, + 0x3512: 0x000c, 0x3513: 0x000c, 0x3514: 0x000c, 0x3515: 0x000c, 0x3516: 0x000c, 0x3517: 0x0001, + 0x3518: 0x0001, 0x3519: 0x0001, 0x351a: 0x0001, 0x351b: 0x0001, 0x351c: 0x0001, 0x351d: 0x0001, + 0x351e: 0x0001, 0x351f: 0x0001, 0x3520: 0x0001, 0x3521: 0x0001, 0x3522: 0x0001, 0x3523: 0x0001, + 0x3524: 0x0001, 0x3525: 0x0001, 0x3526: 0x0001, 0x3527: 0x0001, 0x3528: 0x0001, 0x3529: 0x0001, + 0x352a: 0x0001, 0x352b: 0x0001, 0x352c: 0x0001, 0x352d: 0x0001, 0x352e: 0x0001, 0x352f: 0x0001, + 0x3530: 0x0001, 0x3531: 0x0001, 0x3532: 0x0001, 0x3533: 0x0001, 0x3534: 0x0001, 0x3535: 0x0001, + 0x3536: 0x0001, 0x3537: 0x0001, 0x3538: 0x0001, 0x3539: 0x0001, 0x353a: 0x0001, 0x353b: 0x0001, + 0x353c: 0x0001, 0x353d: 0x0001, 0x353e: 0x0001, 0x353f: 0x0001, + // Block 0xd5, offset 0x3540 + 0x3540: 0x0001, 0x3541: 0x0001, 0x3542: 0x0001, 0x3543: 0x0001, 0x3544: 0x000c, 0x3545: 0x000c, + 0x3546: 0x000c, 0x3547: 0x000c, 0x3548: 0x000c, 0x3549: 0x000c, 0x354a: 0x000c, 0x354b: 0x0001, + 0x354c: 0x0001, 0x354d: 0x0001, 0x354e: 0x0001, 0x354f: 0x0001, 0x3550: 0x0001, 0x3551: 0x0001, + 0x3552: 0x0001, 0x3553: 0x0001, 0x3554: 0x0001, 0x3555: 0x0001, 0x3556: 0x0001, 0x3557: 0x0001, + 0x3558: 0x0001, 0x3559: 0x0001, 0x355a: 0x0001, 0x355b: 0x0001, 0x355c: 0x0001, 0x355d: 0x0001, + 0x355e: 0x0001, 0x355f: 0x0001, 0x3560: 0x0001, 0x3561: 0x0001, 0x3562: 0x0001, 0x3563: 0x0001, + 0x3564: 0x0001, 0x3565: 0x0001, 0x3566: 0x0001, 0x3567: 0x0001, 0x3568: 0x0001, 0x3569: 0x0001, + 0x356a: 0x0001, 0x356b: 0x0001, 0x356c: 0x0001, 0x356d: 0x0001, 0x356e: 0x0001, 0x356f: 0x0001, + 0x3570: 0x0001, 0x3571: 0x0001, 0x3572: 0x0001, 0x3573: 0x0001, 0x3574: 0x0001, 0x3575: 0x0001, + 0x3576: 0x0001, 0x3577: 0x0001, 0x3578: 0x0001, 0x3579: 0x0001, 0x357a: 0x0001, 0x357b: 0x0001, + 0x357c: 0x0001, 0x357d: 0x0001, 0x357e: 0x0001, 0x357f: 0x0001, + // Block 0xd6, offset 0x3580 + 0x3580: 0x000d, 0x3581: 0x000d, 0x3582: 0x000d, 0x3583: 0x000d, 0x3584: 0x000d, 0x3585: 0x000d, + 0x3586: 0x000d, 0x3587: 0x000d, 0x3588: 0x000d, 0x3589: 0x000d, 0x358a: 0x000d, 0x358b: 0x000d, + 0x358c: 0x000d, 0x358d: 0x000d, 0x358e: 0x000d, 0x358f: 0x000d, 0x3590: 0x000d, 0x3591: 0x000d, + 0x3592: 0x000d, 0x3593: 0x000d, 0x3594: 0x000d, 0x3595: 0x000d, 0x3596: 0x000d, 0x3597: 0x000d, + 0x3598: 0x000d, 0x3599: 0x000d, 0x359a: 0x000d, 0x359b: 0x000d, 0x359c: 0x000d, 0x359d: 0x000d, + 0x359e: 0x000d, 0x359f: 0x000d, 0x35a0: 0x000d, 0x35a1: 0x000d, 0x35a2: 0x000d, 0x35a3: 0x000d, + 0x35a4: 0x000d, 0x35a5: 0x000d, 0x35a6: 0x000d, 0x35a7: 0x000d, 0x35a8: 0x000d, 0x35a9: 0x000d, + 0x35aa: 0x000d, 0x35ab: 0x000d, 0x35ac: 0x000d, 0x35ad: 0x000d, 0x35ae: 0x000d, 0x35af: 0x000d, + 0x35b0: 0x000a, 0x35b1: 0x000a, 0x35b2: 0x000d, 0x35b3: 0x000d, 0x35b4: 0x000d, 0x35b5: 0x000d, + 0x35b6: 0x000d, 0x35b7: 0x000d, 0x35b8: 0x000d, 0x35b9: 0x000d, 0x35ba: 0x000d, 0x35bb: 0x000d, + 0x35bc: 0x000d, 0x35bd: 0x000d, 0x35be: 0x000d, 0x35bf: 0x000d, + // Block 0xd7, offset 0x35c0 + 0x35c0: 0x000a, 0x35c1: 0x000a, 0x35c2: 0x000a, 0x35c3: 0x000a, 0x35c4: 0x000a, 0x35c5: 0x000a, + 0x35c6: 0x000a, 0x35c7: 0x000a, 0x35c8: 0x000a, 0x35c9: 0x000a, 0x35ca: 0x000a, 0x35cb: 0x000a, + 0x35cc: 0x000a, 0x35cd: 0x000a, 0x35ce: 0x000a, 0x35cf: 0x000a, 0x35d0: 0x000a, 0x35d1: 0x000a, + 0x35d2: 0x000a, 0x35d3: 0x000a, 0x35d4: 0x000a, 0x35d5: 0x000a, 0x35d6: 0x000a, 0x35d7: 0x000a, + 0x35d8: 0x000a, 0x35d9: 0x000a, 0x35da: 0x000a, 0x35db: 0x000a, 0x35dc: 0x000a, 0x35dd: 0x000a, + 0x35de: 0x000a, 0x35df: 0x000a, 0x35e0: 0x000a, 0x35e1: 0x000a, 0x35e2: 0x000a, 0x35e3: 0x000a, + 0x35e4: 0x000a, 0x35e5: 0x000a, 0x35e6: 0x000a, 0x35e7: 0x000a, 0x35e8: 0x000a, 0x35e9: 0x000a, + 0x35ea: 0x000a, 0x35eb: 0x000a, + 0x35f0: 0x000a, 0x35f1: 0x000a, 0x35f2: 0x000a, 0x35f3: 0x000a, 0x35f4: 0x000a, 0x35f5: 0x000a, + 0x35f6: 0x000a, 0x35f7: 0x000a, 0x35f8: 0x000a, 0x35f9: 0x000a, 0x35fa: 0x000a, 0x35fb: 0x000a, + 0x35fc: 0x000a, 0x35fd: 0x000a, 0x35fe: 0x000a, 0x35ff: 0x000a, + // Block 0xd8, offset 0x3600 + 0x3600: 0x000a, 0x3601: 0x000a, 0x3602: 0x000a, 0x3603: 0x000a, 0x3604: 0x000a, 0x3605: 0x000a, + 0x3606: 0x000a, 0x3607: 0x000a, 0x3608: 0x000a, 0x3609: 0x000a, 0x360a: 0x000a, 0x360b: 0x000a, + 0x360c: 0x000a, 0x360d: 0x000a, 0x360e: 0x000a, 0x360f: 0x000a, 0x3610: 0x000a, 0x3611: 0x000a, + 0x3612: 0x000a, 0x3613: 0x000a, + 0x3620: 0x000a, 0x3621: 0x000a, 0x3622: 0x000a, 0x3623: 0x000a, + 0x3624: 0x000a, 0x3625: 0x000a, 0x3626: 0x000a, 0x3627: 0x000a, 0x3628: 0x000a, 0x3629: 0x000a, + 0x362a: 0x000a, 0x362b: 0x000a, 0x362c: 0x000a, 0x362d: 0x000a, 0x362e: 0x000a, + 0x3631: 0x000a, 0x3632: 0x000a, 0x3633: 0x000a, 0x3634: 0x000a, 0x3635: 0x000a, + 0x3636: 0x000a, 0x3637: 0x000a, 0x3638: 0x000a, 0x3639: 0x000a, 0x363a: 0x000a, 0x363b: 0x000a, + 0x363c: 0x000a, 0x363d: 0x000a, 0x363e: 0x000a, 0x363f: 0x000a, + // Block 0xd9, offset 0x3640 + 0x3641: 0x000a, 0x3642: 0x000a, 0x3643: 0x000a, 0x3644: 0x000a, 0x3645: 0x000a, + 0x3646: 0x000a, 0x3647: 0x000a, 0x3648: 0x000a, 0x3649: 0x000a, 0x364a: 0x000a, 0x364b: 0x000a, + 0x364c: 0x000a, 0x364d: 0x000a, 0x364e: 0x000a, 0x364f: 0x000a, 0x3651: 0x000a, + 0x3652: 0x000a, 0x3653: 0x000a, 0x3654: 0x000a, 0x3655: 0x000a, 0x3656: 0x000a, 0x3657: 0x000a, + 0x3658: 0x000a, 0x3659: 0x000a, 0x365a: 0x000a, 0x365b: 0x000a, 0x365c: 0x000a, 0x365d: 0x000a, + 0x365e: 0x000a, 0x365f: 0x000a, 0x3660: 0x000a, 0x3661: 0x000a, 0x3662: 0x000a, 0x3663: 0x000a, + 0x3664: 0x000a, 0x3665: 0x000a, 0x3666: 0x000a, 0x3667: 0x000a, 0x3668: 0x000a, 0x3669: 0x000a, + 0x366a: 0x000a, 0x366b: 0x000a, 0x366c: 0x000a, 0x366d: 0x000a, 0x366e: 0x000a, 0x366f: 0x000a, + 0x3670: 0x000a, 0x3671: 0x000a, 0x3672: 0x000a, 0x3673: 0x000a, 0x3674: 0x000a, 0x3675: 0x000a, + // Block 0xda, offset 0x3680 + 0x3680: 0x0002, 0x3681: 0x0002, 0x3682: 0x0002, 0x3683: 0x0002, 0x3684: 0x0002, 0x3685: 0x0002, + 0x3686: 0x0002, 0x3687: 0x0002, 0x3688: 0x0002, 0x3689: 0x0002, 0x368a: 0x0002, 0x368b: 0x000a, + 0x368c: 0x000a, + 0x36af: 0x000a, + // Block 0xdb, offset 0x36c0 + 0x36ea: 0x000a, 0x36eb: 0x000a, + // Block 0xdc, offset 0x3700 + 0x3720: 0x000a, 0x3721: 0x000a, 0x3722: 0x000a, 0x3723: 0x000a, + 0x3724: 0x000a, 0x3725: 0x000a, + // Block 0xdd, offset 0x3740 + 0x3740: 0x000a, 0x3741: 0x000a, 0x3742: 0x000a, 0x3743: 0x000a, 0x3744: 0x000a, 0x3745: 0x000a, + 0x3746: 0x000a, 0x3747: 0x000a, 0x3748: 0x000a, 0x3749: 0x000a, 0x374a: 0x000a, 0x374b: 0x000a, + 0x374c: 0x000a, 0x374d: 0x000a, 0x374e: 0x000a, 0x374f: 0x000a, 0x3750: 0x000a, 0x3751: 0x000a, + 0x3752: 0x000a, 0x3753: 0x000a, 0x3754: 0x000a, + 0x3760: 0x000a, 0x3761: 0x000a, 0x3762: 0x000a, 0x3763: 0x000a, + 0x3764: 0x000a, 0x3765: 0x000a, 0x3766: 0x000a, 0x3767: 0x000a, 0x3768: 0x000a, 0x3769: 0x000a, + 0x376a: 0x000a, 0x376b: 0x000a, 0x376c: 0x000a, + 0x3770: 0x000a, 0x3771: 0x000a, 0x3772: 0x000a, 0x3773: 0x000a, 0x3774: 0x000a, 0x3775: 0x000a, + 0x3776: 0x000a, 0x3777: 0x000a, 0x3778: 0x000a, 0x3779: 0x000a, + // Block 0xde, offset 0x3780 + 0x3780: 0x000a, 0x3781: 0x000a, 0x3782: 0x000a, 0x3783: 0x000a, 0x3784: 0x000a, 0x3785: 0x000a, + 0x3786: 0x000a, 0x3787: 0x000a, 0x3788: 0x000a, 0x3789: 0x000a, 0x378a: 0x000a, 0x378b: 0x000a, + 0x378c: 0x000a, 0x378d: 0x000a, 0x378e: 0x000a, 0x378f: 0x000a, 0x3790: 0x000a, 0x3791: 0x000a, + 0x3792: 0x000a, 0x3793: 0x000a, 0x3794: 0x000a, 0x3795: 0x000a, 0x3796: 0x000a, 0x3797: 0x000a, + 0x3798: 0x000a, + // Block 0xdf, offset 0x37c0 + 0x37c0: 0x000a, 0x37c1: 0x000a, 0x37c2: 0x000a, 0x37c3: 0x000a, 0x37c4: 0x000a, 0x37c5: 0x000a, + 0x37c6: 0x000a, 0x37c7: 0x000a, 0x37c8: 0x000a, 0x37c9: 0x000a, 0x37ca: 0x000a, 0x37cb: 0x000a, + 0x37d0: 0x000a, 0x37d1: 0x000a, + 0x37d2: 0x000a, 0x37d3: 0x000a, 0x37d4: 0x000a, 0x37d5: 0x000a, 0x37d6: 0x000a, 0x37d7: 0x000a, + 0x37d8: 0x000a, 0x37d9: 0x000a, 0x37da: 0x000a, 0x37db: 0x000a, 0x37dc: 0x000a, 0x37dd: 0x000a, + 0x37de: 0x000a, 0x37df: 0x000a, 0x37e0: 0x000a, 0x37e1: 0x000a, 0x37e2: 0x000a, 0x37e3: 0x000a, + 0x37e4: 0x000a, 0x37e5: 0x000a, 0x37e6: 0x000a, 0x37e7: 0x000a, 0x37e8: 0x000a, 0x37e9: 0x000a, + 0x37ea: 0x000a, 0x37eb: 0x000a, 0x37ec: 0x000a, 0x37ed: 0x000a, 0x37ee: 0x000a, 0x37ef: 0x000a, + 0x37f0: 0x000a, 0x37f1: 0x000a, 0x37f2: 0x000a, 0x37f3: 0x000a, 0x37f4: 0x000a, 0x37f5: 0x000a, + 0x37f6: 0x000a, 0x37f7: 0x000a, 0x37f8: 0x000a, 0x37f9: 0x000a, 0x37fa: 0x000a, 0x37fb: 0x000a, + 0x37fc: 0x000a, 0x37fd: 0x000a, 0x37fe: 0x000a, 0x37ff: 0x000a, + // Block 0xe0, offset 0x3800 + 0x3800: 0x000a, 0x3801: 0x000a, 0x3802: 0x000a, 0x3803: 0x000a, 0x3804: 0x000a, 0x3805: 0x000a, + 0x3806: 0x000a, 0x3807: 0x000a, + 0x3810: 0x000a, 0x3811: 0x000a, + 0x3812: 0x000a, 0x3813: 0x000a, 0x3814: 0x000a, 0x3815: 0x000a, 0x3816: 0x000a, 0x3817: 0x000a, + 0x3818: 0x000a, 0x3819: 0x000a, + 0x3820: 0x000a, 0x3821: 0x000a, 0x3822: 0x000a, 0x3823: 0x000a, + 0x3824: 0x000a, 0x3825: 0x000a, 0x3826: 0x000a, 0x3827: 0x000a, 0x3828: 0x000a, 0x3829: 0x000a, + 0x382a: 0x000a, 0x382b: 0x000a, 0x382c: 0x000a, 0x382d: 0x000a, 0x382e: 0x000a, 0x382f: 0x000a, + 0x3830: 0x000a, 0x3831: 0x000a, 0x3832: 0x000a, 0x3833: 0x000a, 0x3834: 0x000a, 0x3835: 0x000a, + 0x3836: 0x000a, 0x3837: 0x000a, 0x3838: 0x000a, 0x3839: 0x000a, 0x383a: 0x000a, 0x383b: 0x000a, + 0x383c: 0x000a, 0x383d: 0x000a, 0x383e: 0x000a, 0x383f: 0x000a, + // Block 0xe1, offset 0x3840 + 0x3840: 0x000a, 0x3841: 0x000a, 0x3842: 0x000a, 0x3843: 0x000a, 0x3844: 0x000a, 0x3845: 0x000a, + 0x3846: 0x000a, 0x3847: 0x000a, + 0x3850: 0x000a, 0x3851: 0x000a, + 0x3852: 0x000a, 0x3853: 0x000a, 0x3854: 0x000a, 0x3855: 0x000a, 0x3856: 0x000a, 0x3857: 0x000a, + 0x3858: 0x000a, 0x3859: 0x000a, 0x385a: 0x000a, 0x385b: 0x000a, 0x385c: 0x000a, 0x385d: 0x000a, + 0x385e: 0x000a, 0x385f: 0x000a, 0x3860: 0x000a, 0x3861: 0x000a, 0x3862: 0x000a, 0x3863: 0x000a, + 0x3864: 0x000a, 0x3865: 0x000a, 0x3866: 0x000a, 0x3867: 0x000a, 0x3868: 0x000a, 0x3869: 0x000a, + 0x386a: 0x000a, 0x386b: 0x000a, 0x386c: 0x000a, 0x386d: 0x000a, + // Block 0xe2, offset 0x3880 + 0x3880: 0x000a, 0x3881: 0x000a, 0x3882: 0x000a, 0x3883: 0x000a, 0x3884: 0x000a, 0x3885: 0x000a, + 0x3886: 0x000a, 0x3887: 0x000a, 0x3888: 0x000a, 0x3889: 0x000a, 0x388a: 0x000a, 0x388b: 0x000a, + 0x3890: 0x000a, 0x3891: 0x000a, + 0x3892: 0x000a, 0x3893: 0x000a, 0x3894: 0x000a, 0x3895: 0x000a, 0x3896: 0x000a, 0x3897: 0x000a, + 0x3898: 0x000a, 0x3899: 0x000a, 0x389a: 0x000a, 0x389b: 0x000a, 0x389c: 0x000a, 0x389d: 0x000a, + 0x389e: 0x000a, 0x389f: 0x000a, 0x38a0: 0x000a, 0x38a1: 0x000a, 0x38a2: 0x000a, 0x38a3: 0x000a, + 0x38a4: 0x000a, 0x38a5: 0x000a, 0x38a6: 0x000a, 0x38a7: 0x000a, 0x38a8: 0x000a, 0x38a9: 0x000a, + 0x38aa: 0x000a, 0x38ab: 0x000a, 0x38ac: 0x000a, 0x38ad: 0x000a, 0x38ae: 0x000a, 0x38af: 0x000a, + 0x38b0: 0x000a, 0x38b1: 0x000a, 0x38b2: 0x000a, 0x38b3: 0x000a, 0x38b4: 0x000a, 0x38b5: 0x000a, + 0x38b6: 0x000a, 0x38b7: 0x000a, 0x38b8: 0x000a, 0x38b9: 0x000a, 0x38ba: 0x000a, 0x38bb: 0x000a, + 0x38bc: 0x000a, 0x38bd: 0x000a, 0x38be: 0x000a, + // Block 0xe3, offset 0x38c0 + 0x38c0: 0x000a, 0x38c1: 0x000a, 0x38c2: 0x000a, 0x38c3: 0x000a, 0x38c4: 0x000a, 0x38c5: 0x000a, + 0x38c6: 0x000a, 0x38c7: 0x000a, 0x38c8: 0x000a, 0x38c9: 0x000a, 0x38ca: 0x000a, 0x38cb: 0x000a, + 0x38cc: 0x000a, 0x38cd: 0x000a, 0x38ce: 0x000a, 0x38cf: 0x000a, 0x38d0: 0x000a, 0x38d1: 0x000a, + 0x38d2: 0x000a, 0x38d3: 0x000a, 0x38d4: 0x000a, 0x38d5: 0x000a, 0x38d6: 0x000a, 0x38d7: 0x000a, + 0x38d8: 0x000a, 0x38d9: 0x000a, 0x38da: 0x000a, 0x38db: 0x000a, 0x38dc: 0x000a, 0x38dd: 0x000a, + 0x38de: 0x000a, 0x38df: 0x000a, 0x38e0: 0x000a, 0x38e1: 0x000a, 0x38e2: 0x000a, 0x38e3: 0x000a, + 0x38e4: 0x000a, 0x38e5: 0x000a, 0x38e6: 0x000a, 0x38e7: 0x000a, 0x38e8: 0x000a, 0x38e9: 0x000a, + 0x38ea: 0x000a, 0x38eb: 0x000a, 0x38ec: 0x000a, 0x38ed: 0x000a, 0x38ee: 0x000a, 0x38ef: 0x000a, + 0x38f0: 0x000a, 0x38f3: 0x000a, 0x38f4: 0x000a, 0x38f5: 0x000a, + 0x38f6: 0x000a, 0x38fa: 0x000a, + 0x38fc: 0x000a, 0x38fd: 0x000a, 0x38fe: 0x000a, 0x38ff: 0x000a, + // Block 0xe4, offset 0x3900 + 0x3900: 0x000a, 0x3901: 0x000a, 0x3902: 0x000a, 0x3903: 0x000a, 0x3904: 0x000a, 0x3905: 0x000a, + 0x3906: 0x000a, 0x3907: 0x000a, 0x3908: 0x000a, 0x3909: 0x000a, 0x390a: 0x000a, 0x390b: 0x000a, + 0x390c: 0x000a, 0x390d: 0x000a, 0x390e: 0x000a, 0x390f: 0x000a, 0x3910: 0x000a, 0x3911: 0x000a, + 0x3912: 0x000a, 0x3913: 0x000a, 0x3914: 0x000a, 0x3915: 0x000a, 0x3916: 0x000a, 0x3917: 0x000a, + 0x3918: 0x000a, 0x3919: 0x000a, 0x391a: 0x000a, 0x391b: 0x000a, 0x391c: 0x000a, 0x391d: 0x000a, + 0x391e: 0x000a, 0x391f: 0x000a, 0x3920: 0x000a, 0x3921: 0x000a, 0x3922: 0x000a, + 0x3930: 0x000a, 0x3931: 0x000a, 0x3932: 0x000a, 0x3933: 0x000a, 0x3934: 0x000a, 0x3935: 0x000a, + 0x3936: 0x000a, 0x3937: 0x000a, 0x3938: 0x000a, 0x3939: 0x000a, + // Block 0xe5, offset 0x3940 + 0x3940: 0x000a, 0x3941: 0x000a, 0x3942: 0x000a, + 0x3950: 0x000a, 0x3951: 0x000a, + 0x3952: 0x000a, 0x3953: 0x000a, 0x3954: 0x000a, 0x3955: 0x000a, 0x3956: 0x000a, 0x3957: 0x000a, + 0x3958: 0x000a, 0x3959: 0x000a, 0x395a: 0x000a, 0x395b: 0x000a, 0x395c: 0x000a, 0x395d: 0x000a, + 0x395e: 0x000a, 0x395f: 0x000a, 0x3960: 0x000a, 0x3961: 0x000a, 0x3962: 0x000a, 0x3963: 0x000a, + 0x3964: 0x000a, 0x3965: 0x000a, 0x3966: 0x000a, 0x3967: 0x000a, 0x3968: 0x000a, 0x3969: 0x000a, + 0x396a: 0x000a, 0x396b: 0x000a, 0x396c: 0x000a, 0x396d: 0x000a, 0x396e: 0x000a, 0x396f: 0x000a, + 0x3970: 0x000a, 0x3971: 0x000a, 0x3972: 0x000a, 0x3973: 0x000a, 0x3974: 0x000a, 0x3975: 0x000a, + 0x3976: 0x000a, 0x3977: 0x000a, 0x3978: 0x000a, 0x3979: 0x000a, 0x397a: 0x000a, 0x397b: 0x000a, + 0x397c: 0x000a, 0x397d: 0x000a, 0x397e: 0x000a, 0x397f: 0x000a, + // Block 0xe6, offset 0x3980 + 0x39a0: 0x000a, 0x39a1: 0x000a, 0x39a2: 0x000a, 0x39a3: 0x000a, + 0x39a4: 0x000a, 0x39a5: 0x000a, 0x39a6: 0x000a, 0x39a7: 0x000a, 0x39a8: 0x000a, 0x39a9: 0x000a, + 0x39aa: 0x000a, 0x39ab: 0x000a, 0x39ac: 0x000a, 0x39ad: 0x000a, + // Block 0xe7, offset 0x39c0 + 0x39fe: 0x000b, 0x39ff: 0x000b, + // Block 0xe8, offset 0x3a00 + 0x3a00: 0x000b, 0x3a01: 0x000b, 0x3a02: 0x000b, 0x3a03: 0x000b, 0x3a04: 0x000b, 0x3a05: 0x000b, + 0x3a06: 0x000b, 0x3a07: 0x000b, 0x3a08: 0x000b, 0x3a09: 0x000b, 0x3a0a: 0x000b, 0x3a0b: 0x000b, + 0x3a0c: 0x000b, 0x3a0d: 0x000b, 0x3a0e: 0x000b, 0x3a0f: 0x000b, 0x3a10: 0x000b, 0x3a11: 0x000b, + 0x3a12: 0x000b, 0x3a13: 0x000b, 0x3a14: 0x000b, 0x3a15: 0x000b, 0x3a16: 0x000b, 0x3a17: 0x000b, + 0x3a18: 0x000b, 0x3a19: 0x000b, 0x3a1a: 0x000b, 0x3a1b: 0x000b, 0x3a1c: 0x000b, 0x3a1d: 0x000b, + 0x3a1e: 0x000b, 0x3a1f: 0x000b, 0x3a20: 0x000b, 0x3a21: 0x000b, 0x3a22: 0x000b, 0x3a23: 0x000b, + 0x3a24: 0x000b, 0x3a25: 0x000b, 0x3a26: 0x000b, 0x3a27: 0x000b, 0x3a28: 0x000b, 0x3a29: 0x000b, + 0x3a2a: 0x000b, 0x3a2b: 0x000b, 0x3a2c: 0x000b, 0x3a2d: 0x000b, 0x3a2e: 0x000b, 0x3a2f: 0x000b, + 0x3a30: 0x000b, 0x3a31: 0x000b, 0x3a32: 0x000b, 0x3a33: 0x000b, 0x3a34: 0x000b, 0x3a35: 0x000b, + 0x3a36: 0x000b, 0x3a37: 0x000b, 0x3a38: 0x000b, 0x3a39: 0x000b, 0x3a3a: 0x000b, 0x3a3b: 0x000b, + 0x3a3c: 0x000b, 0x3a3d: 0x000b, 0x3a3e: 0x000b, 0x3a3f: 0x000b, + // Block 0xe9, offset 0x3a40 + 0x3a40: 0x000c, 0x3a41: 0x000c, 0x3a42: 0x000c, 0x3a43: 0x000c, 0x3a44: 0x000c, 0x3a45: 0x000c, + 0x3a46: 0x000c, 0x3a47: 0x000c, 0x3a48: 0x000c, 0x3a49: 0x000c, 0x3a4a: 0x000c, 0x3a4b: 0x000c, + 0x3a4c: 0x000c, 0x3a4d: 0x000c, 0x3a4e: 0x000c, 0x3a4f: 0x000c, 0x3a50: 0x000c, 0x3a51: 0x000c, + 0x3a52: 0x000c, 0x3a53: 0x000c, 0x3a54: 0x000c, 0x3a55: 0x000c, 0x3a56: 0x000c, 0x3a57: 0x000c, + 0x3a58: 0x000c, 0x3a59: 0x000c, 0x3a5a: 0x000c, 0x3a5b: 0x000c, 0x3a5c: 0x000c, 0x3a5d: 0x000c, + 0x3a5e: 0x000c, 0x3a5f: 0x000c, 0x3a60: 0x000c, 0x3a61: 0x000c, 0x3a62: 0x000c, 0x3a63: 0x000c, + 0x3a64: 0x000c, 0x3a65: 0x000c, 0x3a66: 0x000c, 0x3a67: 0x000c, 0x3a68: 0x000c, 0x3a69: 0x000c, + 0x3a6a: 0x000c, 0x3a6b: 0x000c, 0x3a6c: 0x000c, 0x3a6d: 0x000c, 0x3a6e: 0x000c, 0x3a6f: 0x000c, + 0x3a70: 0x000b, 0x3a71: 0x000b, 0x3a72: 0x000b, 0x3a73: 0x000b, 0x3a74: 0x000b, 0x3a75: 0x000b, + 0x3a76: 0x000b, 0x3a77: 0x000b, 0x3a78: 0x000b, 0x3a79: 0x000b, 0x3a7a: 0x000b, 0x3a7b: 0x000b, + 0x3a7c: 0x000b, 0x3a7d: 0x000b, 0x3a7e: 0x000b, 0x3a7f: 0x000b, +} + +// bidiIndex: 24 blocks, 1536 entries, 1536 bytes +// Block 0 is the zero block. +var bidiIndex = [1536]uint8{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x01, 0xc3: 0x02, + 0xca: 0x03, 0xcb: 0x04, 0xcc: 0x05, 0xcd: 0x06, 0xce: 0x07, 0xcf: 0x08, + 0xd2: 0x09, 0xd6: 0x0a, 0xd7: 0x0b, + 0xd8: 0x0c, 0xd9: 0x0d, 0xda: 0x0e, 0xdb: 0x0f, 0xdc: 0x10, 0xdd: 0x11, 0xde: 0x12, 0xdf: 0x13, + 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, 0xe4: 0x06, + 0xea: 0x07, 0xef: 0x08, + 0xf0: 0x11, 0xf1: 0x12, 0xf2: 0x12, 0xf3: 0x14, 0xf4: 0x15, + // Block 0x4, offset 0x100 + 0x120: 0x14, 0x121: 0x15, 0x122: 0x16, 0x123: 0x17, 0x124: 0x18, 0x125: 0x19, 0x126: 0x1a, 0x127: 0x1b, + 0x128: 0x1c, 0x129: 0x1d, 0x12a: 0x1c, 0x12b: 0x1e, 0x12c: 0x1f, 0x12d: 0x20, 0x12e: 0x21, 0x12f: 0x22, + 0x130: 0x23, 0x131: 0x24, 0x132: 0x1a, 0x133: 0x25, 0x134: 0x26, 0x135: 0x27, 0x137: 0x28, + 0x138: 0x29, 0x139: 0x2a, 0x13a: 0x2b, 0x13b: 0x2c, 0x13c: 0x2d, 0x13d: 0x2e, 0x13e: 0x2f, 0x13f: 0x30, + // Block 0x5, offset 0x140 + 0x140: 0x31, 0x141: 0x32, 0x142: 0x33, + 0x14d: 0x34, 0x14e: 0x35, + 0x150: 0x36, + 0x15a: 0x37, 0x15c: 0x38, 0x15d: 0x39, 0x15e: 0x3a, 0x15f: 0x3b, + 0x160: 0x3c, 0x162: 0x3d, 0x164: 0x3e, 0x165: 0x3f, 0x167: 0x40, + 0x168: 0x41, 0x169: 0x42, 0x16a: 0x43, 0x16c: 0x44, 0x16d: 0x45, 0x16e: 0x46, 0x16f: 0x47, + 0x170: 0x48, 0x173: 0x49, 0x177: 0x4a, + 0x17e: 0x4b, 0x17f: 0x4c, + // Block 0x6, offset 0x180 + 0x180: 0x4d, 0x181: 0x4e, 0x182: 0x4f, 0x183: 0x50, 0x184: 0x51, 0x185: 0x52, 0x186: 0x53, 0x187: 0x54, + 0x188: 0x55, 0x189: 0x54, 0x18a: 0x54, 0x18b: 0x54, 0x18c: 0x56, 0x18d: 0x57, 0x18e: 0x58, 0x18f: 0x54, + 0x190: 0x59, 0x191: 0x5a, 0x192: 0x5b, 0x193: 0x5c, 0x194: 0x54, 0x195: 0x54, 0x196: 0x54, 0x197: 0x54, + 0x198: 0x54, 0x199: 0x54, 0x19a: 0x5d, 0x19b: 0x54, 0x19c: 0x54, 0x19d: 0x5e, 0x19e: 0x54, 0x19f: 0x5f, + 0x1a4: 0x54, 0x1a5: 0x54, 0x1a6: 0x60, 0x1a7: 0x61, + 0x1a8: 0x54, 0x1a9: 0x54, 0x1aa: 0x54, 0x1ab: 0x54, 0x1ac: 0x54, 0x1ad: 0x62, 0x1ae: 0x63, 0x1af: 0x64, + 0x1b3: 0x65, 0x1b5: 0x66, 0x1b7: 0x67, + 0x1b8: 0x68, 0x1b9: 0x69, 0x1ba: 0x6a, 0x1bb: 0x6b, 0x1bc: 0x54, 0x1bd: 0x54, 0x1be: 0x54, 0x1bf: 0x6c, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x6d, 0x1c2: 0x6e, 0x1c3: 0x6f, 0x1c7: 0x70, + 0x1c8: 0x71, 0x1c9: 0x72, 0x1ca: 0x73, 0x1cb: 0x74, 0x1cd: 0x75, 0x1cf: 0x76, + // Block 0x8, offset 0x200 + 0x237: 0x54, + // Block 0x9, offset 0x240 + 0x252: 0x77, 0x253: 0x78, + 0x258: 0x79, 0x259: 0x7a, 0x25a: 0x7b, 0x25b: 0x7c, 0x25c: 0x7d, 0x25e: 0x7e, + 0x260: 0x7f, 0x261: 0x80, 0x263: 0x81, 0x264: 0x82, 0x265: 0x83, 0x266: 0x84, 0x267: 0x85, + 0x268: 0x86, 0x269: 0x87, 0x26a: 0x88, 0x26b: 0x89, 0x26f: 0x8a, + // Block 0xa, offset 0x280 + 0x2ac: 0x8b, 0x2ad: 0x8c, 0x2ae: 0x0e, 0x2af: 0x0e, + 0x2b0: 0x0e, 0x2b1: 0x0e, 0x2b2: 0x0e, 0x2b3: 0x0e, 0x2b4: 0x8d, 0x2b5: 0x0e, 0x2b6: 0x0e, 0x2b7: 0x8e, + 0x2b8: 0x8f, 0x2b9: 0x90, 0x2ba: 0x0e, 0x2bb: 0x91, 0x2bc: 0x92, 0x2bd: 0x93, 0x2bf: 0x94, + // Block 0xb, offset 0x2c0 + 0x2c4: 0x95, 0x2c5: 0x54, 0x2c6: 0x96, 0x2c7: 0x97, + 0x2cb: 0x98, 0x2cd: 0x99, + 0x2e0: 0x9a, 0x2e1: 0x9a, 0x2e2: 0x9a, 0x2e3: 0x9a, 0x2e4: 0x9b, 0x2e5: 0x9a, 0x2e6: 0x9a, 0x2e7: 0x9a, + 0x2e8: 0x9c, 0x2e9: 0x9a, 0x2ea: 0x9a, 0x2eb: 0x9d, 0x2ec: 0x9e, 0x2ed: 0x9a, 0x2ee: 0x9a, 0x2ef: 0x9a, + 0x2f0: 0x9a, 0x2f1: 0x9a, 0x2f2: 0x9a, 0x2f3: 0x9a, 0x2f4: 0x9f, 0x2f5: 0x9a, 0x2f6: 0x9a, 0x2f7: 0x9a, + 0x2f8: 0x9a, 0x2f9: 0xa0, 0x2fa: 0x9a, 0x2fb: 0x9a, 0x2fc: 0xa1, 0x2fd: 0xa2, 0x2fe: 0x9a, 0x2ff: 0x9a, + // Block 0xc, offset 0x300 + 0x300: 0xa3, 0x301: 0xa4, 0x302: 0xa5, 0x304: 0xa6, 0x305: 0xa7, 0x306: 0xa8, 0x307: 0xa9, + 0x308: 0xaa, 0x30b: 0xab, 0x30c: 0x26, 0x30d: 0xac, + 0x310: 0xad, 0x311: 0xae, 0x312: 0xaf, 0x313: 0xb0, 0x316: 0xb1, 0x317: 0xb2, + 0x318: 0xb3, 0x319: 0xb4, 0x31a: 0xb5, 0x31c: 0xb6, + 0x320: 0xb7, + 0x328: 0xb8, 0x329: 0xb9, 0x32a: 0xba, + 0x330: 0xbb, 0x332: 0xbc, 0x334: 0xbd, 0x335: 0xbe, 0x336: 0xbf, + 0x33b: 0xc0, + // Block 0xd, offset 0x340 + 0x36b: 0xc1, 0x36c: 0xc2, + 0x37e: 0xc3, + // Block 0xe, offset 0x380 + 0x3b2: 0xc4, + // Block 0xf, offset 0x3c0 + 0x3c5: 0xc5, 0x3c6: 0xc6, + 0x3c8: 0x54, 0x3c9: 0xc7, 0x3cc: 0x54, 0x3cd: 0xc8, + 0x3db: 0xc9, 0x3dc: 0xca, 0x3dd: 0xcb, 0x3de: 0xcc, 0x3df: 0xcd, + 0x3e8: 0xce, 0x3e9: 0xcf, 0x3ea: 0xd0, + // Block 0x10, offset 0x400 + 0x400: 0xd1, + 0x420: 0x9a, 0x421: 0x9a, 0x422: 0x9a, 0x423: 0xd2, 0x424: 0x9a, 0x425: 0xd3, 0x426: 0x9a, 0x427: 0x9a, + 0x428: 0x9a, 0x429: 0x9a, 0x42a: 0x9a, 0x42b: 0x9a, 0x42c: 0x9a, 0x42d: 0x9a, 0x42e: 0x9a, 0x42f: 0x9a, + 0x430: 0x9a, 0x431: 0xa1, 0x432: 0x0e, 0x433: 0x9a, 0x434: 0x9a, 0x435: 0x9a, 0x436: 0x9a, 0x437: 0x9a, + 0x438: 0x0e, 0x439: 0x0e, 0x43a: 0x0e, 0x43b: 0xd4, 0x43c: 0x9a, 0x43d: 0x9a, 0x43e: 0x9a, 0x43f: 0x9a, + // Block 0x11, offset 0x440 + 0x440: 0xd5, 0x441: 0x54, 0x442: 0xd6, 0x443: 0xd7, 0x444: 0xd8, 0x445: 0xd9, + 0x449: 0xda, 0x44c: 0x54, 0x44d: 0x54, 0x44e: 0x54, 0x44f: 0x54, + 0x450: 0x54, 0x451: 0x54, 0x452: 0x54, 0x453: 0x54, 0x454: 0x54, 0x455: 0x54, 0x456: 0x54, 0x457: 0x54, + 0x458: 0x54, 0x459: 0x54, 0x45a: 0x54, 0x45b: 0xdb, 0x45c: 0x54, 0x45d: 0x6b, 0x45e: 0x54, 0x45f: 0xdc, + 0x460: 0xdd, 0x461: 0xde, 0x462: 0xdf, 0x464: 0xe0, 0x465: 0xe1, 0x466: 0xe2, 0x467: 0xe3, + 0x469: 0xe4, + 0x47f: 0xe5, + // Block 0x12, offset 0x480 + 0x4bf: 0xe5, + // Block 0x13, offset 0x4c0 + 0x4d0: 0x09, 0x4d1: 0x0a, 0x4d6: 0x0b, + 0x4db: 0x0c, 0x4dd: 0x0d, 0x4de: 0x0e, 0x4df: 0x0f, + 0x4ef: 0x10, + 0x4ff: 0x10, + // Block 0x14, offset 0x500 + 0x50f: 0x10, + 0x51f: 0x10, + 0x52f: 0x10, + 0x53f: 0x10, + // Block 0x15, offset 0x540 + 0x540: 0xe6, 0x541: 0xe6, 0x542: 0xe6, 0x543: 0xe6, 0x544: 0x05, 0x545: 0x05, 0x546: 0x05, 0x547: 0xe7, + 0x548: 0xe6, 0x549: 0xe6, 0x54a: 0xe6, 0x54b: 0xe6, 0x54c: 0xe6, 0x54d: 0xe6, 0x54e: 0xe6, 0x54f: 0xe6, + 0x550: 0xe6, 0x551: 0xe6, 0x552: 0xe6, 0x553: 0xe6, 0x554: 0xe6, 0x555: 0xe6, 0x556: 0xe6, 0x557: 0xe6, + 0x558: 0xe6, 0x559: 0xe6, 0x55a: 0xe6, 0x55b: 0xe6, 0x55c: 0xe6, 0x55d: 0xe6, 0x55e: 0xe6, 0x55f: 0xe6, + 0x560: 0xe6, 0x561: 0xe6, 0x562: 0xe6, 0x563: 0xe6, 0x564: 0xe6, 0x565: 0xe6, 0x566: 0xe6, 0x567: 0xe6, + 0x568: 0xe6, 0x569: 0xe6, 0x56a: 0xe6, 0x56b: 0xe6, 0x56c: 0xe6, 0x56d: 0xe6, 0x56e: 0xe6, 0x56f: 0xe6, + 0x570: 0xe6, 0x571: 0xe6, 0x572: 0xe6, 0x573: 0xe6, 0x574: 0xe6, 0x575: 0xe6, 0x576: 0xe6, 0x577: 0xe6, + 0x578: 0xe6, 0x579: 0xe6, 0x57a: 0xe6, 0x57b: 0xe6, 0x57c: 0xe6, 0x57d: 0xe6, 0x57e: 0xe6, 0x57f: 0xe6, + // Block 0x16, offset 0x580 + 0x58f: 0x10, + 0x59f: 0x10, + 0x5a0: 0x13, + 0x5af: 0x10, + 0x5bf: 0x10, + // Block 0x17, offset 0x5c0 + 0x5cf: 0x10, +} + +// Total table size 16568 bytes (16KiB); checksum: F50EF68C diff --git a/vendor/golang.org/x/text/unicode/norm/composition.go b/vendor/golang.org/x/text/unicode/norm/composition.go index bab4c5de..e2087bce 100644 --- a/vendor/golang.org/x/text/unicode/norm/composition.go +++ b/vendor/golang.org/x/text/unicode/norm/composition.go @@ -407,7 +407,7 @@ func decomposeHangul(buf []byte, r rune) int { // decomposeHangul algorithmically decomposes a Hangul rune into // its Jamo components. -// See http://unicode.org/reports/tr15/#Hangul for details on decomposing Hangul. +// See https://unicode.org/reports/tr15/#Hangul for details on decomposing Hangul. func (rb *reorderBuffer) decomposeHangul(r rune) { r -= hangulBase x := r % jamoTCount @@ -420,7 +420,7 @@ func (rb *reorderBuffer) decomposeHangul(r rune) { } // combineHangul algorithmically combines Jamo character components into Hangul. -// See http://unicode.org/reports/tr15/#Hangul for details on combining Hangul. +// See https://unicode.org/reports/tr15/#Hangul for details on combining Hangul. func (rb *reorderBuffer) combineHangul(s, i, k int) { b := rb.rune[:] bn := rb.nrune @@ -461,6 +461,10 @@ func (rb *reorderBuffer) combineHangul(s, i, k int) { // It should only be used to recompose a single segment, as it will not // handle alternations between Hangul and non-Hangul characters correctly. func (rb *reorderBuffer) compose() { + // Lazily load the map used by the combine func below, but do + // it outside of the loop. + recompMapOnce.Do(buildRecompMap) + // UAX #15, section X5 , including Corrigendum #5 // "In any character sequence beginning with starter S, a character C is // blocked from S if and only if there is some character B between S diff --git a/vendor/golang.org/x/text/unicode/norm/forminfo.go b/vendor/golang.org/x/text/unicode/norm/forminfo.go index e67e7655..526c7033 100644 --- a/vendor/golang.org/x/text/unicode/norm/forminfo.go +++ b/vendor/golang.org/x/text/unicode/norm/forminfo.go @@ -4,6 +4,8 @@ package norm +import "encoding/binary" + // This file contains Form-specific logic and wrappers for data in tables.go. // Rune info is stored in a separate trie per composing form. A composing form @@ -178,6 +180,17 @@ func (p Properties) TrailCCC() uint8 { return ccc[p.tccc] } +func buildRecompMap() { + recompMap = make(map[uint32]rune, len(recompMapPacked)/8) + var buf [8]byte + for i := 0; i < len(recompMapPacked); i += 8 { + copy(buf[:], recompMapPacked[i:i+8]) + key := binary.BigEndian.Uint32(buf[:4]) + val := binary.BigEndian.Uint32(buf[4:]) + recompMap[key] = rune(val) + } +} + // Recomposition // We use 32-bit keys instead of 64-bit for the two codepoint keys. // This clips off the bits of three entries, but we know this will not @@ -186,8 +199,14 @@ func (p Properties) TrailCCC() uint8 { // Note that the recomposition map for NFC and NFKC are identical. // combine returns the combined rune or 0 if it doesn't exist. +// +// The caller is responsible for calling +// recompMapOnce.Do(buildRecompMap) sometime before this is called. func combine(a, b rune) rune { key := uint32(uint16(a))<<16 + uint32(uint16(b)) + if recompMap == nil { + panic("caller error") // see func comment + } return recompMap[key] } diff --git a/vendor/golang.org/x/text/unicode/norm/iter.go b/vendor/golang.org/x/text/unicode/norm/iter.go index ce17f96c..417c6b26 100644 --- a/vendor/golang.org/x/text/unicode/norm/iter.go +++ b/vendor/golang.org/x/text/unicode/norm/iter.go @@ -128,8 +128,9 @@ func (i *Iter) Next() []byte { func nextASCIIBytes(i *Iter) []byte { p := i.p + 1 if p >= i.rb.nsrc { + p0 := i.p i.setDone() - return i.rb.src.bytes[i.p:p] + return i.rb.src.bytes[p0:p] } if i.rb.src.bytes[p] < utf8.RuneSelf { p0 := i.p diff --git a/vendor/golang.org/x/text/unicode/norm/maketables.go b/vendor/golang.org/x/text/unicode/norm/maketables.go deleted file mode 100644 index 338c395e..00000000 --- a/vendor/golang.org/x/text/unicode/norm/maketables.go +++ /dev/null @@ -1,976 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// Normalization table generator. -// Data read from the web. -// See forminfo.go for a description of the trie values associated with each rune. - -package main - -import ( - "bytes" - "flag" - "fmt" - "io" - "log" - "sort" - "strconv" - "strings" - - "golang.org/x/text/internal/gen" - "golang.org/x/text/internal/triegen" - "golang.org/x/text/internal/ucd" -) - -func main() { - gen.Init() - loadUnicodeData() - compactCCC() - loadCompositionExclusions() - completeCharFields(FCanonical) - completeCharFields(FCompatibility) - computeNonStarterCounts() - verifyComputed() - printChars() - testDerived() - printTestdata() - makeTables() -} - -var ( - tablelist = flag.String("tables", - "all", - "comma-separated list of which tables to generate; "+ - "can be 'decomp', 'recomp', 'info' and 'all'") - test = flag.Bool("test", - false, - "test existing tables against DerivedNormalizationProps and generate test data for regression testing") - verbose = flag.Bool("verbose", - false, - "write data to stdout as it is parsed") -) - -const MaxChar = 0x10FFFF // anything above this shouldn't exist - -// Quick Check properties of runes allow us to quickly -// determine whether a rune may occur in a normal form. -// For a given normal form, a rune may be guaranteed to occur -// verbatim (QC=Yes), may or may not combine with another -// rune (QC=Maybe), or may not occur (QC=No). -type QCResult int - -const ( - QCUnknown QCResult = iota - QCYes - QCNo - QCMaybe -) - -func (r QCResult) String() string { - switch r { - case QCYes: - return "Yes" - case QCNo: - return "No" - case QCMaybe: - return "Maybe" - } - return "***UNKNOWN***" -} - -const ( - FCanonical = iota // NFC or NFD - FCompatibility // NFKC or NFKD - FNumberOfFormTypes -) - -const ( - MComposed = iota // NFC or NFKC - MDecomposed // NFD or NFKD - MNumberOfModes -) - -// This contains only the properties we're interested in. -type Char struct { - name string - codePoint rune // if zero, this index is not a valid code point. - ccc uint8 // canonical combining class - origCCC uint8 - excludeInComp bool // from CompositionExclusions.txt - compatDecomp bool // it has a compatibility expansion - - nTrailingNonStarters uint8 - nLeadingNonStarters uint8 // must be equal to trailing if non-zero - - forms [FNumberOfFormTypes]FormInfo // For FCanonical and FCompatibility - - state State -} - -var chars = make([]Char, MaxChar+1) -var cccMap = make(map[uint8]uint8) - -func (c Char) String() string { - buf := new(bytes.Buffer) - - fmt.Fprintf(buf, "%U [%s]:\n", c.codePoint, c.name) - fmt.Fprintf(buf, " ccc: %v\n", c.ccc) - fmt.Fprintf(buf, " excludeInComp: %v\n", c.excludeInComp) - fmt.Fprintf(buf, " compatDecomp: %v\n", c.compatDecomp) - fmt.Fprintf(buf, " state: %v\n", c.state) - fmt.Fprintf(buf, " NFC:\n") - fmt.Fprint(buf, c.forms[FCanonical]) - fmt.Fprintf(buf, " NFKC:\n") - fmt.Fprint(buf, c.forms[FCompatibility]) - - return buf.String() -} - -// In UnicodeData.txt, some ranges are marked like this: -// 3400;;Lo;0;L;;;;;N;;;;; -// 4DB5;;Lo;0;L;;;;;N;;;;; -// parseCharacter keeps a state variable indicating the weirdness. -type State int - -const ( - SNormal State = iota // known to be zero for the type - SFirst - SLast - SMissing -) - -var lastChar = rune('\u0000') - -func (c Char) isValid() bool { - return c.codePoint != 0 && c.state != SMissing -} - -type FormInfo struct { - quickCheck [MNumberOfModes]QCResult // index: MComposed or MDecomposed - verified [MNumberOfModes]bool // index: MComposed or MDecomposed - - combinesForward bool // May combine with rune on the right - combinesBackward bool // May combine with rune on the left - isOneWay bool // Never appears in result - inDecomp bool // Some decompositions result in this char. - decomp Decomposition - expandedDecomp Decomposition -} - -func (f FormInfo) String() string { - buf := bytes.NewBuffer(make([]byte, 0)) - - fmt.Fprintf(buf, " quickCheck[C]: %v\n", f.quickCheck[MComposed]) - fmt.Fprintf(buf, " quickCheck[D]: %v\n", f.quickCheck[MDecomposed]) - fmt.Fprintf(buf, " cmbForward: %v\n", f.combinesForward) - fmt.Fprintf(buf, " cmbBackward: %v\n", f.combinesBackward) - fmt.Fprintf(buf, " isOneWay: %v\n", f.isOneWay) - fmt.Fprintf(buf, " inDecomp: %v\n", f.inDecomp) - fmt.Fprintf(buf, " decomposition: %X\n", f.decomp) - fmt.Fprintf(buf, " expandedDecomp: %X\n", f.expandedDecomp) - - return buf.String() -} - -type Decomposition []rune - -func parseDecomposition(s string, skipfirst bool) (a []rune, err error) { - decomp := strings.Split(s, " ") - if len(decomp) > 0 && skipfirst { - decomp = decomp[1:] - } - for _, d := range decomp { - point, err := strconv.ParseUint(d, 16, 64) - if err != nil { - return a, err - } - a = append(a, rune(point)) - } - return a, nil -} - -func loadUnicodeData() { - f := gen.OpenUCDFile("UnicodeData.txt") - defer f.Close() - p := ucd.New(f) - for p.Next() { - r := p.Rune(ucd.CodePoint) - char := &chars[r] - - char.ccc = uint8(p.Uint(ucd.CanonicalCombiningClass)) - decmap := p.String(ucd.DecompMapping) - - exp, err := parseDecomposition(decmap, false) - isCompat := false - if err != nil { - if len(decmap) > 0 { - exp, err = parseDecomposition(decmap, true) - if err != nil { - log.Fatalf(`%U: bad decomp |%v|: "%s"`, r, decmap, err) - } - isCompat = true - } - } - - char.name = p.String(ucd.Name) - char.codePoint = r - char.forms[FCompatibility].decomp = exp - if !isCompat { - char.forms[FCanonical].decomp = exp - } else { - char.compatDecomp = true - } - if len(decmap) > 0 { - char.forms[FCompatibility].decomp = exp - } - } - if err := p.Err(); err != nil { - log.Fatal(err) - } -} - -// compactCCC converts the sparse set of CCC values to a continguous one, -// reducing the number of bits needed from 8 to 6. -func compactCCC() { - m := make(map[uint8]uint8) - for i := range chars { - c := &chars[i] - m[c.ccc] = 0 - } - cccs := []int{} - for v, _ := range m { - cccs = append(cccs, int(v)) - } - sort.Ints(cccs) - for i, c := range cccs { - cccMap[uint8(i)] = uint8(c) - m[uint8(c)] = uint8(i) - } - for i := range chars { - c := &chars[i] - c.origCCC = c.ccc - c.ccc = m[c.ccc] - } - if len(m) >= 1<<6 { - log.Fatalf("too many difference CCC values: %d >= 64", len(m)) - } -} - -// CompositionExclusions.txt has form: -// 0958 # ... -// See http://unicode.org/reports/tr44/ for full explanation -func loadCompositionExclusions() { - f := gen.OpenUCDFile("CompositionExclusions.txt") - defer f.Close() - p := ucd.New(f) - for p.Next() { - c := &chars[p.Rune(0)] - if c.excludeInComp { - log.Fatalf("%U: Duplicate entry in exclusions.", c.codePoint) - } - c.excludeInComp = true - } - if e := p.Err(); e != nil { - log.Fatal(e) - } -} - -// hasCompatDecomp returns true if any of the recursive -// decompositions contains a compatibility expansion. -// In this case, the character may not occur in NFK*. -func hasCompatDecomp(r rune) bool { - c := &chars[r] - if c.compatDecomp { - return true - } - for _, d := range c.forms[FCompatibility].decomp { - if hasCompatDecomp(d) { - return true - } - } - return false -} - -// Hangul related constants. -const ( - HangulBase = 0xAC00 - HangulEnd = 0xD7A4 // hangulBase + Jamo combinations (19 * 21 * 28) - - JamoLBase = 0x1100 - JamoLEnd = 0x1113 - JamoVBase = 0x1161 - JamoVEnd = 0x1176 - JamoTBase = 0x11A8 - JamoTEnd = 0x11C3 - - JamoLVTCount = 19 * 21 * 28 - JamoTCount = 28 -) - -func isHangul(r rune) bool { - return HangulBase <= r && r < HangulEnd -} - -func isHangulWithoutJamoT(r rune) bool { - if !isHangul(r) { - return false - } - r -= HangulBase - return r < JamoLVTCount && r%JamoTCount == 0 -} - -func ccc(r rune) uint8 { - return chars[r].ccc -} - -// Insert a rune in a buffer, ordered by Canonical Combining Class. -func insertOrdered(b Decomposition, r rune) Decomposition { - n := len(b) - b = append(b, 0) - cc := ccc(r) - if cc > 0 { - // Use bubble sort. - for ; n > 0; n-- { - if ccc(b[n-1]) <= cc { - break - } - b[n] = b[n-1] - } - } - b[n] = r - return b -} - -// Recursively decompose. -func decomposeRecursive(form int, r rune, d Decomposition) Decomposition { - dcomp := chars[r].forms[form].decomp - if len(dcomp) == 0 { - return insertOrdered(d, r) - } - for _, c := range dcomp { - d = decomposeRecursive(form, c, d) - } - return d -} - -func completeCharFields(form int) { - // Phase 0: pre-expand decomposition. - for i := range chars { - f := &chars[i].forms[form] - if len(f.decomp) == 0 { - continue - } - exp := make(Decomposition, 0) - for _, c := range f.decomp { - exp = decomposeRecursive(form, c, exp) - } - f.expandedDecomp = exp - } - - // Phase 1: composition exclusion, mark decomposition. - for i := range chars { - c := &chars[i] - f := &c.forms[form] - - // Marks script-specific exclusions and version restricted. - f.isOneWay = c.excludeInComp - - // Singletons - f.isOneWay = f.isOneWay || len(f.decomp) == 1 - - // Non-starter decompositions - if len(f.decomp) > 1 { - chk := c.ccc != 0 || chars[f.decomp[0]].ccc != 0 - f.isOneWay = f.isOneWay || chk - } - - // Runes that decompose into more than two runes. - f.isOneWay = f.isOneWay || len(f.decomp) > 2 - - if form == FCompatibility { - f.isOneWay = f.isOneWay || hasCompatDecomp(c.codePoint) - } - - for _, r := range f.decomp { - chars[r].forms[form].inDecomp = true - } - } - - // Phase 2: forward and backward combining. - for i := range chars { - c := &chars[i] - f := &c.forms[form] - - if !f.isOneWay && len(f.decomp) == 2 { - f0 := &chars[f.decomp[0]].forms[form] - f1 := &chars[f.decomp[1]].forms[form] - if !f0.isOneWay { - f0.combinesForward = true - } - if !f1.isOneWay { - f1.combinesBackward = true - } - } - if isHangulWithoutJamoT(rune(i)) { - f.combinesForward = true - } - } - - // Phase 3: quick check values. - for i := range chars { - c := &chars[i] - f := &c.forms[form] - - switch { - case len(f.decomp) > 0: - f.quickCheck[MDecomposed] = QCNo - case isHangul(rune(i)): - f.quickCheck[MDecomposed] = QCNo - default: - f.quickCheck[MDecomposed] = QCYes - } - switch { - case f.isOneWay: - f.quickCheck[MComposed] = QCNo - case (i & 0xffff00) == JamoLBase: - f.quickCheck[MComposed] = QCYes - if JamoLBase <= i && i < JamoLEnd { - f.combinesForward = true - } - if JamoVBase <= i && i < JamoVEnd { - f.quickCheck[MComposed] = QCMaybe - f.combinesBackward = true - f.combinesForward = true - } - if JamoTBase <= i && i < JamoTEnd { - f.quickCheck[MComposed] = QCMaybe - f.combinesBackward = true - } - case !f.combinesBackward: - f.quickCheck[MComposed] = QCYes - default: - f.quickCheck[MComposed] = QCMaybe - } - } -} - -func computeNonStarterCounts() { - // Phase 4: leading and trailing non-starter count - for i := range chars { - c := &chars[i] - - runes := []rune{rune(i)} - // We always use FCompatibility so that the CGJ insertion points do not - // change for repeated normalizations with different forms. - if exp := c.forms[FCompatibility].expandedDecomp; len(exp) > 0 { - runes = exp - } - // We consider runes that combine backwards to be non-starters for the - // purpose of Stream-Safe Text Processing. - for _, r := range runes { - if cr := &chars[r]; cr.ccc == 0 && !cr.forms[FCompatibility].combinesBackward { - break - } - c.nLeadingNonStarters++ - } - for i := len(runes) - 1; i >= 0; i-- { - if cr := &chars[runes[i]]; cr.ccc == 0 && !cr.forms[FCompatibility].combinesBackward { - break - } - c.nTrailingNonStarters++ - } - if c.nTrailingNonStarters > 3 { - log.Fatalf("%U: Decomposition with more than 3 (%d) trailing modifiers (%U)", i, c.nTrailingNonStarters, runes) - } - - if isHangul(rune(i)) { - c.nTrailingNonStarters = 2 - if isHangulWithoutJamoT(rune(i)) { - c.nTrailingNonStarters = 1 - } - } - - if l, t := c.nLeadingNonStarters, c.nTrailingNonStarters; l > 0 && l != t { - log.Fatalf("%U: number of leading and trailing non-starters should be equal (%d vs %d)", i, l, t) - } - if t := c.nTrailingNonStarters; t > 3 { - log.Fatalf("%U: number of trailing non-starters is %d > 3", t) - } - } -} - -func printBytes(w io.Writer, b []byte, name string) { - fmt.Fprintf(w, "// %s: %d bytes\n", name, len(b)) - fmt.Fprintf(w, "var %s = [...]byte {", name) - for i, c := range b { - switch { - case i%64 == 0: - fmt.Fprintf(w, "\n// Bytes %x - %x\n", i, i+63) - case i%8 == 0: - fmt.Fprintf(w, "\n") - } - fmt.Fprintf(w, "0x%.2X, ", c) - } - fmt.Fprint(w, "\n}\n\n") -} - -// See forminfo.go for format. -func makeEntry(f *FormInfo, c *Char) uint16 { - e := uint16(0) - if r := c.codePoint; HangulBase <= r && r < HangulEnd { - e |= 0x40 - } - if f.combinesForward { - e |= 0x20 - } - if f.quickCheck[MDecomposed] == QCNo { - e |= 0x4 - } - switch f.quickCheck[MComposed] { - case QCYes: - case QCNo: - e |= 0x10 - case QCMaybe: - e |= 0x18 - default: - log.Fatalf("Illegal quickcheck value %v.", f.quickCheck[MComposed]) - } - e |= uint16(c.nTrailingNonStarters) - return e -} - -// decompSet keeps track of unique decompositions, grouped by whether -// the decomposition is followed by a trailing and/or leading CCC. -type decompSet [7]map[string]bool - -const ( - normalDecomp = iota - firstMulti - firstCCC - endMulti - firstLeadingCCC - firstCCCZeroExcept - firstStarterWithNLead - lastDecomp -) - -var cname = []string{"firstMulti", "firstCCC", "endMulti", "firstLeadingCCC", "firstCCCZeroExcept", "firstStarterWithNLead", "lastDecomp"} - -func makeDecompSet() decompSet { - m := decompSet{} - for i := range m { - m[i] = make(map[string]bool) - } - return m -} -func (m *decompSet) insert(key int, s string) { - m[key][s] = true -} - -func printCharInfoTables(w io.Writer) int { - mkstr := func(r rune, f *FormInfo) (int, string) { - d := f.expandedDecomp - s := string([]rune(d)) - if max := 1 << 6; len(s) >= max { - const msg = "%U: too many bytes in decomposition: %d >= %d" - log.Fatalf(msg, r, len(s), max) - } - head := uint8(len(s)) - if f.quickCheck[MComposed] != QCYes { - head |= 0x40 - } - if f.combinesForward { - head |= 0x80 - } - s = string([]byte{head}) + s - - lccc := ccc(d[0]) - tccc := ccc(d[len(d)-1]) - cc := ccc(r) - if cc != 0 && lccc == 0 && tccc == 0 { - log.Fatalf("%U: trailing and leading ccc are 0 for non-zero ccc %d", r, cc) - } - if tccc < lccc && lccc != 0 { - const msg = "%U: lccc (%d) must be <= tcc (%d)" - log.Fatalf(msg, r, lccc, tccc) - } - index := normalDecomp - nTrail := chars[r].nTrailingNonStarters - nLead := chars[r].nLeadingNonStarters - if tccc > 0 || lccc > 0 || nTrail > 0 { - tccc <<= 2 - tccc |= nTrail - s += string([]byte{tccc}) - index = endMulti - for _, r := range d[1:] { - if ccc(r) == 0 { - index = firstCCC - } - } - if lccc > 0 || nLead > 0 { - s += string([]byte{lccc}) - if index == firstCCC { - log.Fatalf("%U: multi-segment decomposition not supported for decompositions with leading CCC != 0", r) - } - index = firstLeadingCCC - } - if cc != lccc { - if cc != 0 { - log.Fatalf("%U: for lccc != ccc, expected ccc to be 0; was %d", r, cc) - } - index = firstCCCZeroExcept - } - } else if len(d) > 1 { - index = firstMulti - } - return index, s - } - - decompSet := makeDecompSet() - const nLeadStr = "\x00\x01" // 0-byte length and tccc with nTrail. - decompSet.insert(firstStarterWithNLead, nLeadStr) - - // Store the uniqued decompositions in a byte buffer, - // preceded by their byte length. - for _, c := range chars { - for _, f := range c.forms { - if len(f.expandedDecomp) == 0 { - continue - } - if f.combinesBackward { - log.Fatalf("%U: combinesBackward and decompose", c.codePoint) - } - index, s := mkstr(c.codePoint, &f) - decompSet.insert(index, s) - } - } - - decompositions := bytes.NewBuffer(make([]byte, 0, 10000)) - size := 0 - positionMap := make(map[string]uint16) - decompositions.WriteString("\000") - fmt.Fprintln(w, "const (") - for i, m := range decompSet { - sa := []string{} - for s := range m { - sa = append(sa, s) - } - sort.Strings(sa) - for _, s := range sa { - p := decompositions.Len() - decompositions.WriteString(s) - positionMap[s] = uint16(p) - } - if cname[i] != "" { - fmt.Fprintf(w, "%s = 0x%X\n", cname[i], decompositions.Len()) - } - } - fmt.Fprintln(w, "maxDecomp = 0x8000") - fmt.Fprintln(w, ")") - b := decompositions.Bytes() - printBytes(w, b, "decomps") - size += len(b) - - varnames := []string{"nfc", "nfkc"} - for i := 0; i < FNumberOfFormTypes; i++ { - trie := triegen.NewTrie(varnames[i]) - - for r, c := range chars { - f := c.forms[i] - d := f.expandedDecomp - if len(d) != 0 { - _, key := mkstr(c.codePoint, &f) - trie.Insert(rune(r), uint64(positionMap[key])) - if c.ccc != ccc(d[0]) { - // We assume the lead ccc of a decomposition !=0 in this case. - if ccc(d[0]) == 0 { - log.Fatalf("Expected leading CCC to be non-zero; ccc is %d", c.ccc) - } - } - } else if c.nLeadingNonStarters > 0 && len(f.expandedDecomp) == 0 && c.ccc == 0 && !f.combinesBackward { - // Handle cases where it can't be detected that the nLead should be equal - // to nTrail. - trie.Insert(c.codePoint, uint64(positionMap[nLeadStr])) - } else if v := makeEntry(&f, &c)<<8 | uint16(c.ccc); v != 0 { - trie.Insert(c.codePoint, uint64(0x8000|v)) - } - } - sz, err := trie.Gen(w, triegen.Compact(&normCompacter{name: varnames[i]})) - if err != nil { - log.Fatal(err) - } - size += sz - } - return size -} - -func contains(sa []string, s string) bool { - for _, a := range sa { - if a == s { - return true - } - } - return false -} - -func makeTables() { - w := &bytes.Buffer{} - - size := 0 - if *tablelist == "" { - return - } - list := strings.Split(*tablelist, ",") - if *tablelist == "all" { - list = []string{"recomp", "info"} - } - - // Compute maximum decomposition size. - max := 0 - for _, c := range chars { - if n := len(string(c.forms[FCompatibility].expandedDecomp)); n > max { - max = n - } - } - - fmt.Fprintln(w, "const (") - fmt.Fprintln(w, "\t// Version is the Unicode edition from which the tables are derived.") - fmt.Fprintf(w, "\tVersion = %q\n", gen.UnicodeVersion()) - fmt.Fprintln(w) - fmt.Fprintln(w, "\t// MaxTransformChunkSize indicates the maximum number of bytes that Transform") - fmt.Fprintln(w, "\t// may need to write atomically for any Form. Making a destination buffer at") - fmt.Fprintln(w, "\t// least this size ensures that Transform can always make progress and that") - fmt.Fprintln(w, "\t// the user does not need to grow the buffer on an ErrShortDst.") - fmt.Fprintf(w, "\tMaxTransformChunkSize = %d+maxNonStarters*4\n", len(string(0x034F))+max) - fmt.Fprintln(w, ")\n") - - // Print the CCC remap table. - size += len(cccMap) - fmt.Fprintf(w, "var ccc = [%d]uint8{", len(cccMap)) - for i := 0; i < len(cccMap); i++ { - if i%8 == 0 { - fmt.Fprintln(w) - } - fmt.Fprintf(w, "%3d, ", cccMap[uint8(i)]) - } - fmt.Fprintln(w, "\n}\n") - - if contains(list, "info") { - size += printCharInfoTables(w) - } - - if contains(list, "recomp") { - // Note that we use 32 bit keys, instead of 64 bit. - // This clips the bits of three entries, but we know - // this won't cause a collision. The compiler will catch - // any changes made to UnicodeData.txt that introduces - // a collision. - // Note that the recomposition map for NFC and NFKC - // are identical. - - // Recomposition map - nrentries := 0 - for _, c := range chars { - f := c.forms[FCanonical] - if !f.isOneWay && len(f.decomp) > 0 { - nrentries++ - } - } - sz := nrentries * 8 - size += sz - fmt.Fprintf(w, "// recompMap: %d bytes (entries only)\n", sz) - fmt.Fprintln(w, "var recompMap = map[uint32]rune{") - for i, c := range chars { - f := c.forms[FCanonical] - d := f.decomp - if !f.isOneWay && len(d) > 0 { - key := uint32(uint16(d[0]))<<16 + uint32(uint16(d[1])) - fmt.Fprintf(w, "0x%.8X: 0x%.4X,\n", key, i) - } - } - fmt.Fprintf(w, "}\n\n") - } - - fmt.Fprintf(w, "// Total size of tables: %dKB (%d bytes)\n", (size+512)/1024, size) - gen.WriteVersionedGoFile("tables.go", "norm", w.Bytes()) -} - -func printChars() { - if *verbose { - for _, c := range chars { - if !c.isValid() || c.state == SMissing { - continue - } - fmt.Println(c) - } - } -} - -// verifyComputed does various consistency tests. -func verifyComputed() { - for i, c := range chars { - for _, f := range c.forms { - isNo := (f.quickCheck[MDecomposed] == QCNo) - if (len(f.decomp) > 0) != isNo && !isHangul(rune(i)) { - log.Fatalf("%U: NF*D QC must be No if rune decomposes", i) - } - - isMaybe := f.quickCheck[MComposed] == QCMaybe - if f.combinesBackward != isMaybe { - log.Fatalf("%U: NF*C QC must be Maybe if combinesBackward", i) - } - if len(f.decomp) > 0 && f.combinesForward && isMaybe { - log.Fatalf("%U: NF*C QC must be Yes or No if combinesForward and decomposes", i) - } - - if len(f.expandedDecomp) != 0 { - continue - } - if a, b := c.nLeadingNonStarters > 0, (c.ccc > 0 || f.combinesBackward); a != b { - // We accept these runes to be treated differently (it only affects - // segment breaking in iteration, most likely on improper use), but - // reconsider if more characters are added. - // U+FF9E HALFWIDTH KATAKANA VOICED SOUND MARK;Lm;0;L; 3099;;;;N;;;;; - // U+FF9F HALFWIDTH KATAKANA SEMI-VOICED SOUND MARK;Lm;0;L; 309A;;;;N;;;;; - // U+3133 HANGUL LETTER KIYEOK-SIOS;Lo;0;L; 11AA;;;;N;HANGUL LETTER GIYEOG SIOS;;;; - // U+318E HANGUL LETTER ARAEAE;Lo;0;L; 11A1;;;;N;HANGUL LETTER ALAE AE;;;; - // U+FFA3 HALFWIDTH HANGUL LETTER KIYEOK-SIOS;Lo;0;L; 3133;;;;N;HALFWIDTH HANGUL LETTER GIYEOG SIOS;;;; - // U+FFDC HALFWIDTH HANGUL LETTER I;Lo;0;L; 3163;;;;N;;;;; - if i != 0xFF9E && i != 0xFF9F && !(0x3133 <= i && i <= 0x318E) && !(0xFFA3 <= i && i <= 0xFFDC) { - log.Fatalf("%U: nLead was %v; want %v", i, a, b) - } - } - } - nfc := c.forms[FCanonical] - nfkc := c.forms[FCompatibility] - if nfc.combinesBackward != nfkc.combinesBackward { - log.Fatalf("%U: Cannot combine combinesBackward\n", c.codePoint) - } - } -} - -// Use values in DerivedNormalizationProps.txt to compare against the -// values we computed. -// DerivedNormalizationProps.txt has form: -// 00C0..00C5 ; NFD_QC; N # ... -// 0374 ; NFD_QC; N # ... -// See http://unicode.org/reports/tr44/ for full explanation -func testDerived() { - f := gen.OpenUCDFile("DerivedNormalizationProps.txt") - defer f.Close() - p := ucd.New(f) - for p.Next() { - r := p.Rune(0) - c := &chars[r] - - var ftype, mode int - qt := p.String(1) - switch qt { - case "NFC_QC": - ftype, mode = FCanonical, MComposed - case "NFD_QC": - ftype, mode = FCanonical, MDecomposed - case "NFKC_QC": - ftype, mode = FCompatibility, MComposed - case "NFKD_QC": - ftype, mode = FCompatibility, MDecomposed - default: - continue - } - var qr QCResult - switch p.String(2) { - case "Y": - qr = QCYes - case "N": - qr = QCNo - case "M": - qr = QCMaybe - default: - log.Fatalf(`Unexpected quick check value "%s"`, p.String(2)) - } - if got := c.forms[ftype].quickCheck[mode]; got != qr { - log.Printf("%U: FAILED %s (was %v need %v)\n", r, qt, got, qr) - } - c.forms[ftype].verified[mode] = true - } - if err := p.Err(); err != nil { - log.Fatal(err) - } - // Any unspecified value must be QCYes. Verify this. - for i, c := range chars { - for j, fd := range c.forms { - for k, qr := range fd.quickCheck { - if !fd.verified[k] && qr != QCYes { - m := "%U: FAIL F:%d M:%d (was %v need Yes) %s\n" - log.Printf(m, i, j, k, qr, c.name) - } - } - } - } -} - -var testHeader = `const ( - Yes = iota - No - Maybe -) - -type formData struct { - qc uint8 - combinesForward bool - decomposition string -} - -type runeData struct { - r rune - ccc uint8 - nLead uint8 - nTrail uint8 - f [2]formData // 0: canonical; 1: compatibility -} - -func f(qc uint8, cf bool, dec string) [2]formData { - return [2]formData{{qc, cf, dec}, {qc, cf, dec}} -} - -func g(qc, qck uint8, cf, cfk bool, d, dk string) [2]formData { - return [2]formData{{qc, cf, d}, {qck, cfk, dk}} -} - -var testData = []runeData{ -` - -func printTestdata() { - type lastInfo struct { - ccc uint8 - nLead uint8 - nTrail uint8 - f string - } - - last := lastInfo{} - w := &bytes.Buffer{} - fmt.Fprintf(w, testHeader) - for r, c := range chars { - f := c.forms[FCanonical] - qc, cf, d := f.quickCheck[MComposed], f.combinesForward, string(f.expandedDecomp) - f = c.forms[FCompatibility] - qck, cfk, dk := f.quickCheck[MComposed], f.combinesForward, string(f.expandedDecomp) - s := "" - if d == dk && qc == qck && cf == cfk { - s = fmt.Sprintf("f(%s, %v, %q)", qc, cf, d) - } else { - s = fmt.Sprintf("g(%s, %s, %v, %v, %q, %q)", qc, qck, cf, cfk, d, dk) - } - current := lastInfo{c.ccc, c.nLeadingNonStarters, c.nTrailingNonStarters, s} - if last != current { - fmt.Fprintf(w, "\t{0x%x, %d, %d, %d, %s},\n", r, c.origCCC, c.nLeadingNonStarters, c.nTrailingNonStarters, s) - last = current - } - } - fmt.Fprintln(w, "}") - gen.WriteVersionedGoFile("data_test.go", "norm", w.Bytes()) -} diff --git a/vendor/golang.org/x/text/unicode/norm/normalize.go b/vendor/golang.org/x/text/unicode/norm/normalize.go index e28ac641..95efcf26 100644 --- a/vendor/golang.org/x/text/unicode/norm/normalize.go +++ b/vendor/golang.org/x/text/unicode/norm/normalize.go @@ -29,8 +29,8 @@ import ( // proceed independently on both sides: // f(x) == append(f(x[0:n]), f(x[n:])...) // -// References: http://unicode.org/reports/tr15/ and -// http://unicode.org/notes/tn5/. +// References: https://unicode.org/reports/tr15/ and +// https://unicode.org/notes/tn5/. type Form int const ( diff --git a/vendor/golang.org/x/text/unicode/norm/readwriter.go b/vendor/golang.org/x/text/unicode/norm/readwriter.go index d926ee90..b38096f5 100644 --- a/vendor/golang.org/x/text/unicode/norm/readwriter.go +++ b/vendor/golang.org/x/text/unicode/norm/readwriter.go @@ -60,8 +60,8 @@ func (w *normWriter) Close() error { } // Writer returns a new writer that implements Write(b) -// by writing f(b) to w. The returned writer may use an -// an internal buffer to maintain state across Write calls. +// by writing f(b) to w. The returned writer may use an +// internal buffer to maintain state across Write calls. // Calling its Close method writes any buffered data to w. func (f Form) Writer(w io.Writer) io.WriteCloser { wr := &normWriter{rb: reorderBuffer{}, w: w} diff --git a/vendor/golang.org/x/text/unicode/norm/tables10.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables10.0.0.go index 44dd3978..26fbd55a 100644 --- a/vendor/golang.org/x/text/unicode/norm/tables10.0.0.go +++ b/vendor/golang.org/x/text/unicode/norm/tables10.0.0.go @@ -1,9 +1,11 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. -// +build go1.10 +// +build go1.10,!go1.13 package norm +import "sync" + const ( // Version is the Unicode edition from which the tables are derived. Version = "10.0.0" @@ -6707,947 +6709,949 @@ var nfkcSparseValues = [869]valueRange{ } // recompMap: 7520 bytes (entries only) -var recompMap = map[uint32]rune{ - 0x00410300: 0x00C0, - 0x00410301: 0x00C1, - 0x00410302: 0x00C2, - 0x00410303: 0x00C3, - 0x00410308: 0x00C4, - 0x0041030A: 0x00C5, - 0x00430327: 0x00C7, - 0x00450300: 0x00C8, - 0x00450301: 0x00C9, - 0x00450302: 0x00CA, - 0x00450308: 0x00CB, - 0x00490300: 0x00CC, - 0x00490301: 0x00CD, - 0x00490302: 0x00CE, - 0x00490308: 0x00CF, - 0x004E0303: 0x00D1, - 0x004F0300: 0x00D2, - 0x004F0301: 0x00D3, - 0x004F0302: 0x00D4, - 0x004F0303: 0x00D5, - 0x004F0308: 0x00D6, - 0x00550300: 0x00D9, - 0x00550301: 0x00DA, - 0x00550302: 0x00DB, - 0x00550308: 0x00DC, - 0x00590301: 0x00DD, - 0x00610300: 0x00E0, - 0x00610301: 0x00E1, - 0x00610302: 0x00E2, - 0x00610303: 0x00E3, - 0x00610308: 0x00E4, - 0x0061030A: 0x00E5, - 0x00630327: 0x00E7, - 0x00650300: 0x00E8, - 0x00650301: 0x00E9, - 0x00650302: 0x00EA, - 0x00650308: 0x00EB, - 0x00690300: 0x00EC, - 0x00690301: 0x00ED, - 0x00690302: 0x00EE, - 0x00690308: 0x00EF, - 0x006E0303: 0x00F1, - 0x006F0300: 0x00F2, - 0x006F0301: 0x00F3, - 0x006F0302: 0x00F4, - 0x006F0303: 0x00F5, - 0x006F0308: 0x00F6, - 0x00750300: 0x00F9, - 0x00750301: 0x00FA, - 0x00750302: 0x00FB, - 0x00750308: 0x00FC, - 0x00790301: 0x00FD, - 0x00790308: 0x00FF, - 0x00410304: 0x0100, - 0x00610304: 0x0101, - 0x00410306: 0x0102, - 0x00610306: 0x0103, - 0x00410328: 0x0104, - 0x00610328: 0x0105, - 0x00430301: 0x0106, - 0x00630301: 0x0107, - 0x00430302: 0x0108, - 0x00630302: 0x0109, - 0x00430307: 0x010A, - 0x00630307: 0x010B, - 0x0043030C: 0x010C, - 0x0063030C: 0x010D, - 0x0044030C: 0x010E, - 0x0064030C: 0x010F, - 0x00450304: 0x0112, - 0x00650304: 0x0113, - 0x00450306: 0x0114, - 0x00650306: 0x0115, - 0x00450307: 0x0116, - 0x00650307: 0x0117, - 0x00450328: 0x0118, - 0x00650328: 0x0119, - 0x0045030C: 0x011A, - 0x0065030C: 0x011B, - 0x00470302: 0x011C, - 0x00670302: 0x011D, - 0x00470306: 0x011E, - 0x00670306: 0x011F, - 0x00470307: 0x0120, - 0x00670307: 0x0121, - 0x00470327: 0x0122, - 0x00670327: 0x0123, - 0x00480302: 0x0124, - 0x00680302: 0x0125, - 0x00490303: 0x0128, - 0x00690303: 0x0129, - 0x00490304: 0x012A, - 0x00690304: 0x012B, - 0x00490306: 0x012C, - 0x00690306: 0x012D, - 0x00490328: 0x012E, - 0x00690328: 0x012F, - 0x00490307: 0x0130, - 0x004A0302: 0x0134, - 0x006A0302: 0x0135, - 0x004B0327: 0x0136, - 0x006B0327: 0x0137, - 0x004C0301: 0x0139, - 0x006C0301: 0x013A, - 0x004C0327: 0x013B, - 0x006C0327: 0x013C, - 0x004C030C: 0x013D, - 0x006C030C: 0x013E, - 0x004E0301: 0x0143, - 0x006E0301: 0x0144, - 0x004E0327: 0x0145, - 0x006E0327: 0x0146, - 0x004E030C: 0x0147, - 0x006E030C: 0x0148, - 0x004F0304: 0x014C, - 0x006F0304: 0x014D, - 0x004F0306: 0x014E, - 0x006F0306: 0x014F, - 0x004F030B: 0x0150, - 0x006F030B: 0x0151, - 0x00520301: 0x0154, - 0x00720301: 0x0155, - 0x00520327: 0x0156, - 0x00720327: 0x0157, - 0x0052030C: 0x0158, - 0x0072030C: 0x0159, - 0x00530301: 0x015A, - 0x00730301: 0x015B, - 0x00530302: 0x015C, - 0x00730302: 0x015D, - 0x00530327: 0x015E, - 0x00730327: 0x015F, - 0x0053030C: 0x0160, - 0x0073030C: 0x0161, - 0x00540327: 0x0162, - 0x00740327: 0x0163, - 0x0054030C: 0x0164, - 0x0074030C: 0x0165, - 0x00550303: 0x0168, - 0x00750303: 0x0169, - 0x00550304: 0x016A, - 0x00750304: 0x016B, - 0x00550306: 0x016C, - 0x00750306: 0x016D, - 0x0055030A: 0x016E, - 0x0075030A: 0x016F, - 0x0055030B: 0x0170, - 0x0075030B: 0x0171, - 0x00550328: 0x0172, - 0x00750328: 0x0173, - 0x00570302: 0x0174, - 0x00770302: 0x0175, - 0x00590302: 0x0176, - 0x00790302: 0x0177, - 0x00590308: 0x0178, - 0x005A0301: 0x0179, - 0x007A0301: 0x017A, - 0x005A0307: 0x017B, - 0x007A0307: 0x017C, - 0x005A030C: 0x017D, - 0x007A030C: 0x017E, - 0x004F031B: 0x01A0, - 0x006F031B: 0x01A1, - 0x0055031B: 0x01AF, - 0x0075031B: 0x01B0, - 0x0041030C: 0x01CD, - 0x0061030C: 0x01CE, - 0x0049030C: 0x01CF, - 0x0069030C: 0x01D0, - 0x004F030C: 0x01D1, - 0x006F030C: 0x01D2, - 0x0055030C: 0x01D3, - 0x0075030C: 0x01D4, - 0x00DC0304: 0x01D5, - 0x00FC0304: 0x01D6, - 0x00DC0301: 0x01D7, - 0x00FC0301: 0x01D8, - 0x00DC030C: 0x01D9, - 0x00FC030C: 0x01DA, - 0x00DC0300: 0x01DB, - 0x00FC0300: 0x01DC, - 0x00C40304: 0x01DE, - 0x00E40304: 0x01DF, - 0x02260304: 0x01E0, - 0x02270304: 0x01E1, - 0x00C60304: 0x01E2, - 0x00E60304: 0x01E3, - 0x0047030C: 0x01E6, - 0x0067030C: 0x01E7, - 0x004B030C: 0x01E8, - 0x006B030C: 0x01E9, - 0x004F0328: 0x01EA, - 0x006F0328: 0x01EB, - 0x01EA0304: 0x01EC, - 0x01EB0304: 0x01ED, - 0x01B7030C: 0x01EE, - 0x0292030C: 0x01EF, - 0x006A030C: 0x01F0, - 0x00470301: 0x01F4, - 0x00670301: 0x01F5, - 0x004E0300: 0x01F8, - 0x006E0300: 0x01F9, - 0x00C50301: 0x01FA, - 0x00E50301: 0x01FB, - 0x00C60301: 0x01FC, - 0x00E60301: 0x01FD, - 0x00D80301: 0x01FE, - 0x00F80301: 0x01FF, - 0x0041030F: 0x0200, - 0x0061030F: 0x0201, - 0x00410311: 0x0202, - 0x00610311: 0x0203, - 0x0045030F: 0x0204, - 0x0065030F: 0x0205, - 0x00450311: 0x0206, - 0x00650311: 0x0207, - 0x0049030F: 0x0208, - 0x0069030F: 0x0209, - 0x00490311: 0x020A, - 0x00690311: 0x020B, - 0x004F030F: 0x020C, - 0x006F030F: 0x020D, - 0x004F0311: 0x020E, - 0x006F0311: 0x020F, - 0x0052030F: 0x0210, - 0x0072030F: 0x0211, - 0x00520311: 0x0212, - 0x00720311: 0x0213, - 0x0055030F: 0x0214, - 0x0075030F: 0x0215, - 0x00550311: 0x0216, - 0x00750311: 0x0217, - 0x00530326: 0x0218, - 0x00730326: 0x0219, - 0x00540326: 0x021A, - 0x00740326: 0x021B, - 0x0048030C: 0x021E, - 0x0068030C: 0x021F, - 0x00410307: 0x0226, - 0x00610307: 0x0227, - 0x00450327: 0x0228, - 0x00650327: 0x0229, - 0x00D60304: 0x022A, - 0x00F60304: 0x022B, - 0x00D50304: 0x022C, - 0x00F50304: 0x022D, - 0x004F0307: 0x022E, - 0x006F0307: 0x022F, - 0x022E0304: 0x0230, - 0x022F0304: 0x0231, - 0x00590304: 0x0232, - 0x00790304: 0x0233, - 0x00A80301: 0x0385, - 0x03910301: 0x0386, - 0x03950301: 0x0388, - 0x03970301: 0x0389, - 0x03990301: 0x038A, - 0x039F0301: 0x038C, - 0x03A50301: 0x038E, - 0x03A90301: 0x038F, - 0x03CA0301: 0x0390, - 0x03990308: 0x03AA, - 0x03A50308: 0x03AB, - 0x03B10301: 0x03AC, - 0x03B50301: 0x03AD, - 0x03B70301: 0x03AE, - 0x03B90301: 0x03AF, - 0x03CB0301: 0x03B0, - 0x03B90308: 0x03CA, - 0x03C50308: 0x03CB, - 0x03BF0301: 0x03CC, - 0x03C50301: 0x03CD, - 0x03C90301: 0x03CE, - 0x03D20301: 0x03D3, - 0x03D20308: 0x03D4, - 0x04150300: 0x0400, - 0x04150308: 0x0401, - 0x04130301: 0x0403, - 0x04060308: 0x0407, - 0x041A0301: 0x040C, - 0x04180300: 0x040D, - 0x04230306: 0x040E, - 0x04180306: 0x0419, - 0x04380306: 0x0439, - 0x04350300: 0x0450, - 0x04350308: 0x0451, - 0x04330301: 0x0453, - 0x04560308: 0x0457, - 0x043A0301: 0x045C, - 0x04380300: 0x045D, - 0x04430306: 0x045E, - 0x0474030F: 0x0476, - 0x0475030F: 0x0477, - 0x04160306: 0x04C1, - 0x04360306: 0x04C2, - 0x04100306: 0x04D0, - 0x04300306: 0x04D1, - 0x04100308: 0x04D2, - 0x04300308: 0x04D3, - 0x04150306: 0x04D6, - 0x04350306: 0x04D7, - 0x04D80308: 0x04DA, - 0x04D90308: 0x04DB, - 0x04160308: 0x04DC, - 0x04360308: 0x04DD, - 0x04170308: 0x04DE, - 0x04370308: 0x04DF, - 0x04180304: 0x04E2, - 0x04380304: 0x04E3, - 0x04180308: 0x04E4, - 0x04380308: 0x04E5, - 0x041E0308: 0x04E6, - 0x043E0308: 0x04E7, - 0x04E80308: 0x04EA, - 0x04E90308: 0x04EB, - 0x042D0308: 0x04EC, - 0x044D0308: 0x04ED, - 0x04230304: 0x04EE, - 0x04430304: 0x04EF, - 0x04230308: 0x04F0, - 0x04430308: 0x04F1, - 0x0423030B: 0x04F2, - 0x0443030B: 0x04F3, - 0x04270308: 0x04F4, - 0x04470308: 0x04F5, - 0x042B0308: 0x04F8, - 0x044B0308: 0x04F9, - 0x06270653: 0x0622, - 0x06270654: 0x0623, - 0x06480654: 0x0624, - 0x06270655: 0x0625, - 0x064A0654: 0x0626, - 0x06D50654: 0x06C0, - 0x06C10654: 0x06C2, - 0x06D20654: 0x06D3, - 0x0928093C: 0x0929, - 0x0930093C: 0x0931, - 0x0933093C: 0x0934, - 0x09C709BE: 0x09CB, - 0x09C709D7: 0x09CC, - 0x0B470B56: 0x0B48, - 0x0B470B3E: 0x0B4B, - 0x0B470B57: 0x0B4C, - 0x0B920BD7: 0x0B94, - 0x0BC60BBE: 0x0BCA, - 0x0BC70BBE: 0x0BCB, - 0x0BC60BD7: 0x0BCC, - 0x0C460C56: 0x0C48, - 0x0CBF0CD5: 0x0CC0, - 0x0CC60CD5: 0x0CC7, - 0x0CC60CD6: 0x0CC8, - 0x0CC60CC2: 0x0CCA, - 0x0CCA0CD5: 0x0CCB, - 0x0D460D3E: 0x0D4A, - 0x0D470D3E: 0x0D4B, - 0x0D460D57: 0x0D4C, - 0x0DD90DCA: 0x0DDA, - 0x0DD90DCF: 0x0DDC, - 0x0DDC0DCA: 0x0DDD, - 0x0DD90DDF: 0x0DDE, - 0x1025102E: 0x1026, - 0x1B051B35: 0x1B06, - 0x1B071B35: 0x1B08, - 0x1B091B35: 0x1B0A, - 0x1B0B1B35: 0x1B0C, - 0x1B0D1B35: 0x1B0E, - 0x1B111B35: 0x1B12, - 0x1B3A1B35: 0x1B3B, - 0x1B3C1B35: 0x1B3D, - 0x1B3E1B35: 0x1B40, - 0x1B3F1B35: 0x1B41, - 0x1B421B35: 0x1B43, - 0x00410325: 0x1E00, - 0x00610325: 0x1E01, - 0x00420307: 0x1E02, - 0x00620307: 0x1E03, - 0x00420323: 0x1E04, - 0x00620323: 0x1E05, - 0x00420331: 0x1E06, - 0x00620331: 0x1E07, - 0x00C70301: 0x1E08, - 0x00E70301: 0x1E09, - 0x00440307: 0x1E0A, - 0x00640307: 0x1E0B, - 0x00440323: 0x1E0C, - 0x00640323: 0x1E0D, - 0x00440331: 0x1E0E, - 0x00640331: 0x1E0F, - 0x00440327: 0x1E10, - 0x00640327: 0x1E11, - 0x0044032D: 0x1E12, - 0x0064032D: 0x1E13, - 0x01120300: 0x1E14, - 0x01130300: 0x1E15, - 0x01120301: 0x1E16, - 0x01130301: 0x1E17, - 0x0045032D: 0x1E18, - 0x0065032D: 0x1E19, - 0x00450330: 0x1E1A, - 0x00650330: 0x1E1B, - 0x02280306: 0x1E1C, - 0x02290306: 0x1E1D, - 0x00460307: 0x1E1E, - 0x00660307: 0x1E1F, - 0x00470304: 0x1E20, - 0x00670304: 0x1E21, - 0x00480307: 0x1E22, - 0x00680307: 0x1E23, - 0x00480323: 0x1E24, - 0x00680323: 0x1E25, - 0x00480308: 0x1E26, - 0x00680308: 0x1E27, - 0x00480327: 0x1E28, - 0x00680327: 0x1E29, - 0x0048032E: 0x1E2A, - 0x0068032E: 0x1E2B, - 0x00490330: 0x1E2C, - 0x00690330: 0x1E2D, - 0x00CF0301: 0x1E2E, - 0x00EF0301: 0x1E2F, - 0x004B0301: 0x1E30, - 0x006B0301: 0x1E31, - 0x004B0323: 0x1E32, - 0x006B0323: 0x1E33, - 0x004B0331: 0x1E34, - 0x006B0331: 0x1E35, - 0x004C0323: 0x1E36, - 0x006C0323: 0x1E37, - 0x1E360304: 0x1E38, - 0x1E370304: 0x1E39, - 0x004C0331: 0x1E3A, - 0x006C0331: 0x1E3B, - 0x004C032D: 0x1E3C, - 0x006C032D: 0x1E3D, - 0x004D0301: 0x1E3E, - 0x006D0301: 0x1E3F, - 0x004D0307: 0x1E40, - 0x006D0307: 0x1E41, - 0x004D0323: 0x1E42, - 0x006D0323: 0x1E43, - 0x004E0307: 0x1E44, - 0x006E0307: 0x1E45, - 0x004E0323: 0x1E46, - 0x006E0323: 0x1E47, - 0x004E0331: 0x1E48, - 0x006E0331: 0x1E49, - 0x004E032D: 0x1E4A, - 0x006E032D: 0x1E4B, - 0x00D50301: 0x1E4C, - 0x00F50301: 0x1E4D, - 0x00D50308: 0x1E4E, - 0x00F50308: 0x1E4F, - 0x014C0300: 0x1E50, - 0x014D0300: 0x1E51, - 0x014C0301: 0x1E52, - 0x014D0301: 0x1E53, - 0x00500301: 0x1E54, - 0x00700301: 0x1E55, - 0x00500307: 0x1E56, - 0x00700307: 0x1E57, - 0x00520307: 0x1E58, - 0x00720307: 0x1E59, - 0x00520323: 0x1E5A, - 0x00720323: 0x1E5B, - 0x1E5A0304: 0x1E5C, - 0x1E5B0304: 0x1E5D, - 0x00520331: 0x1E5E, - 0x00720331: 0x1E5F, - 0x00530307: 0x1E60, - 0x00730307: 0x1E61, - 0x00530323: 0x1E62, - 0x00730323: 0x1E63, - 0x015A0307: 0x1E64, - 0x015B0307: 0x1E65, - 0x01600307: 0x1E66, - 0x01610307: 0x1E67, - 0x1E620307: 0x1E68, - 0x1E630307: 0x1E69, - 0x00540307: 0x1E6A, - 0x00740307: 0x1E6B, - 0x00540323: 0x1E6C, - 0x00740323: 0x1E6D, - 0x00540331: 0x1E6E, - 0x00740331: 0x1E6F, - 0x0054032D: 0x1E70, - 0x0074032D: 0x1E71, - 0x00550324: 0x1E72, - 0x00750324: 0x1E73, - 0x00550330: 0x1E74, - 0x00750330: 0x1E75, - 0x0055032D: 0x1E76, - 0x0075032D: 0x1E77, - 0x01680301: 0x1E78, - 0x01690301: 0x1E79, - 0x016A0308: 0x1E7A, - 0x016B0308: 0x1E7B, - 0x00560303: 0x1E7C, - 0x00760303: 0x1E7D, - 0x00560323: 0x1E7E, - 0x00760323: 0x1E7F, - 0x00570300: 0x1E80, - 0x00770300: 0x1E81, - 0x00570301: 0x1E82, - 0x00770301: 0x1E83, - 0x00570308: 0x1E84, - 0x00770308: 0x1E85, - 0x00570307: 0x1E86, - 0x00770307: 0x1E87, - 0x00570323: 0x1E88, - 0x00770323: 0x1E89, - 0x00580307: 0x1E8A, - 0x00780307: 0x1E8B, - 0x00580308: 0x1E8C, - 0x00780308: 0x1E8D, - 0x00590307: 0x1E8E, - 0x00790307: 0x1E8F, - 0x005A0302: 0x1E90, - 0x007A0302: 0x1E91, - 0x005A0323: 0x1E92, - 0x007A0323: 0x1E93, - 0x005A0331: 0x1E94, - 0x007A0331: 0x1E95, - 0x00680331: 0x1E96, - 0x00740308: 0x1E97, - 0x0077030A: 0x1E98, - 0x0079030A: 0x1E99, - 0x017F0307: 0x1E9B, - 0x00410323: 0x1EA0, - 0x00610323: 0x1EA1, - 0x00410309: 0x1EA2, - 0x00610309: 0x1EA3, - 0x00C20301: 0x1EA4, - 0x00E20301: 0x1EA5, - 0x00C20300: 0x1EA6, - 0x00E20300: 0x1EA7, - 0x00C20309: 0x1EA8, - 0x00E20309: 0x1EA9, - 0x00C20303: 0x1EAA, - 0x00E20303: 0x1EAB, - 0x1EA00302: 0x1EAC, - 0x1EA10302: 0x1EAD, - 0x01020301: 0x1EAE, - 0x01030301: 0x1EAF, - 0x01020300: 0x1EB0, - 0x01030300: 0x1EB1, - 0x01020309: 0x1EB2, - 0x01030309: 0x1EB3, - 0x01020303: 0x1EB4, - 0x01030303: 0x1EB5, - 0x1EA00306: 0x1EB6, - 0x1EA10306: 0x1EB7, - 0x00450323: 0x1EB8, - 0x00650323: 0x1EB9, - 0x00450309: 0x1EBA, - 0x00650309: 0x1EBB, - 0x00450303: 0x1EBC, - 0x00650303: 0x1EBD, - 0x00CA0301: 0x1EBE, - 0x00EA0301: 0x1EBF, - 0x00CA0300: 0x1EC0, - 0x00EA0300: 0x1EC1, - 0x00CA0309: 0x1EC2, - 0x00EA0309: 0x1EC3, - 0x00CA0303: 0x1EC4, - 0x00EA0303: 0x1EC5, - 0x1EB80302: 0x1EC6, - 0x1EB90302: 0x1EC7, - 0x00490309: 0x1EC8, - 0x00690309: 0x1EC9, - 0x00490323: 0x1ECA, - 0x00690323: 0x1ECB, - 0x004F0323: 0x1ECC, - 0x006F0323: 0x1ECD, - 0x004F0309: 0x1ECE, - 0x006F0309: 0x1ECF, - 0x00D40301: 0x1ED0, - 0x00F40301: 0x1ED1, - 0x00D40300: 0x1ED2, - 0x00F40300: 0x1ED3, - 0x00D40309: 0x1ED4, - 0x00F40309: 0x1ED5, - 0x00D40303: 0x1ED6, - 0x00F40303: 0x1ED7, - 0x1ECC0302: 0x1ED8, - 0x1ECD0302: 0x1ED9, - 0x01A00301: 0x1EDA, - 0x01A10301: 0x1EDB, - 0x01A00300: 0x1EDC, - 0x01A10300: 0x1EDD, - 0x01A00309: 0x1EDE, - 0x01A10309: 0x1EDF, - 0x01A00303: 0x1EE0, - 0x01A10303: 0x1EE1, - 0x01A00323: 0x1EE2, - 0x01A10323: 0x1EE3, - 0x00550323: 0x1EE4, - 0x00750323: 0x1EE5, - 0x00550309: 0x1EE6, - 0x00750309: 0x1EE7, - 0x01AF0301: 0x1EE8, - 0x01B00301: 0x1EE9, - 0x01AF0300: 0x1EEA, - 0x01B00300: 0x1EEB, - 0x01AF0309: 0x1EEC, - 0x01B00309: 0x1EED, - 0x01AF0303: 0x1EEE, - 0x01B00303: 0x1EEF, - 0x01AF0323: 0x1EF0, - 0x01B00323: 0x1EF1, - 0x00590300: 0x1EF2, - 0x00790300: 0x1EF3, - 0x00590323: 0x1EF4, - 0x00790323: 0x1EF5, - 0x00590309: 0x1EF6, - 0x00790309: 0x1EF7, - 0x00590303: 0x1EF8, - 0x00790303: 0x1EF9, - 0x03B10313: 0x1F00, - 0x03B10314: 0x1F01, - 0x1F000300: 0x1F02, - 0x1F010300: 0x1F03, - 0x1F000301: 0x1F04, - 0x1F010301: 0x1F05, - 0x1F000342: 0x1F06, - 0x1F010342: 0x1F07, - 0x03910313: 0x1F08, - 0x03910314: 0x1F09, - 0x1F080300: 0x1F0A, - 0x1F090300: 0x1F0B, - 0x1F080301: 0x1F0C, - 0x1F090301: 0x1F0D, - 0x1F080342: 0x1F0E, - 0x1F090342: 0x1F0F, - 0x03B50313: 0x1F10, - 0x03B50314: 0x1F11, - 0x1F100300: 0x1F12, - 0x1F110300: 0x1F13, - 0x1F100301: 0x1F14, - 0x1F110301: 0x1F15, - 0x03950313: 0x1F18, - 0x03950314: 0x1F19, - 0x1F180300: 0x1F1A, - 0x1F190300: 0x1F1B, - 0x1F180301: 0x1F1C, - 0x1F190301: 0x1F1D, - 0x03B70313: 0x1F20, - 0x03B70314: 0x1F21, - 0x1F200300: 0x1F22, - 0x1F210300: 0x1F23, - 0x1F200301: 0x1F24, - 0x1F210301: 0x1F25, - 0x1F200342: 0x1F26, - 0x1F210342: 0x1F27, - 0x03970313: 0x1F28, - 0x03970314: 0x1F29, - 0x1F280300: 0x1F2A, - 0x1F290300: 0x1F2B, - 0x1F280301: 0x1F2C, - 0x1F290301: 0x1F2D, - 0x1F280342: 0x1F2E, - 0x1F290342: 0x1F2F, - 0x03B90313: 0x1F30, - 0x03B90314: 0x1F31, - 0x1F300300: 0x1F32, - 0x1F310300: 0x1F33, - 0x1F300301: 0x1F34, - 0x1F310301: 0x1F35, - 0x1F300342: 0x1F36, - 0x1F310342: 0x1F37, - 0x03990313: 0x1F38, - 0x03990314: 0x1F39, - 0x1F380300: 0x1F3A, - 0x1F390300: 0x1F3B, - 0x1F380301: 0x1F3C, - 0x1F390301: 0x1F3D, - 0x1F380342: 0x1F3E, - 0x1F390342: 0x1F3F, - 0x03BF0313: 0x1F40, - 0x03BF0314: 0x1F41, - 0x1F400300: 0x1F42, - 0x1F410300: 0x1F43, - 0x1F400301: 0x1F44, - 0x1F410301: 0x1F45, - 0x039F0313: 0x1F48, - 0x039F0314: 0x1F49, - 0x1F480300: 0x1F4A, - 0x1F490300: 0x1F4B, - 0x1F480301: 0x1F4C, - 0x1F490301: 0x1F4D, - 0x03C50313: 0x1F50, - 0x03C50314: 0x1F51, - 0x1F500300: 0x1F52, - 0x1F510300: 0x1F53, - 0x1F500301: 0x1F54, - 0x1F510301: 0x1F55, - 0x1F500342: 0x1F56, - 0x1F510342: 0x1F57, - 0x03A50314: 0x1F59, - 0x1F590300: 0x1F5B, - 0x1F590301: 0x1F5D, - 0x1F590342: 0x1F5F, - 0x03C90313: 0x1F60, - 0x03C90314: 0x1F61, - 0x1F600300: 0x1F62, - 0x1F610300: 0x1F63, - 0x1F600301: 0x1F64, - 0x1F610301: 0x1F65, - 0x1F600342: 0x1F66, - 0x1F610342: 0x1F67, - 0x03A90313: 0x1F68, - 0x03A90314: 0x1F69, - 0x1F680300: 0x1F6A, - 0x1F690300: 0x1F6B, - 0x1F680301: 0x1F6C, - 0x1F690301: 0x1F6D, - 0x1F680342: 0x1F6E, - 0x1F690342: 0x1F6F, - 0x03B10300: 0x1F70, - 0x03B50300: 0x1F72, - 0x03B70300: 0x1F74, - 0x03B90300: 0x1F76, - 0x03BF0300: 0x1F78, - 0x03C50300: 0x1F7A, - 0x03C90300: 0x1F7C, - 0x1F000345: 0x1F80, - 0x1F010345: 0x1F81, - 0x1F020345: 0x1F82, - 0x1F030345: 0x1F83, - 0x1F040345: 0x1F84, - 0x1F050345: 0x1F85, - 0x1F060345: 0x1F86, - 0x1F070345: 0x1F87, - 0x1F080345: 0x1F88, - 0x1F090345: 0x1F89, - 0x1F0A0345: 0x1F8A, - 0x1F0B0345: 0x1F8B, - 0x1F0C0345: 0x1F8C, - 0x1F0D0345: 0x1F8D, - 0x1F0E0345: 0x1F8E, - 0x1F0F0345: 0x1F8F, - 0x1F200345: 0x1F90, - 0x1F210345: 0x1F91, - 0x1F220345: 0x1F92, - 0x1F230345: 0x1F93, - 0x1F240345: 0x1F94, - 0x1F250345: 0x1F95, - 0x1F260345: 0x1F96, - 0x1F270345: 0x1F97, - 0x1F280345: 0x1F98, - 0x1F290345: 0x1F99, - 0x1F2A0345: 0x1F9A, - 0x1F2B0345: 0x1F9B, - 0x1F2C0345: 0x1F9C, - 0x1F2D0345: 0x1F9D, - 0x1F2E0345: 0x1F9E, - 0x1F2F0345: 0x1F9F, - 0x1F600345: 0x1FA0, - 0x1F610345: 0x1FA1, - 0x1F620345: 0x1FA2, - 0x1F630345: 0x1FA3, - 0x1F640345: 0x1FA4, - 0x1F650345: 0x1FA5, - 0x1F660345: 0x1FA6, - 0x1F670345: 0x1FA7, - 0x1F680345: 0x1FA8, - 0x1F690345: 0x1FA9, - 0x1F6A0345: 0x1FAA, - 0x1F6B0345: 0x1FAB, - 0x1F6C0345: 0x1FAC, - 0x1F6D0345: 0x1FAD, - 0x1F6E0345: 0x1FAE, - 0x1F6F0345: 0x1FAF, - 0x03B10306: 0x1FB0, - 0x03B10304: 0x1FB1, - 0x1F700345: 0x1FB2, - 0x03B10345: 0x1FB3, - 0x03AC0345: 0x1FB4, - 0x03B10342: 0x1FB6, - 0x1FB60345: 0x1FB7, - 0x03910306: 0x1FB8, - 0x03910304: 0x1FB9, - 0x03910300: 0x1FBA, - 0x03910345: 0x1FBC, - 0x00A80342: 0x1FC1, - 0x1F740345: 0x1FC2, - 0x03B70345: 0x1FC3, - 0x03AE0345: 0x1FC4, - 0x03B70342: 0x1FC6, - 0x1FC60345: 0x1FC7, - 0x03950300: 0x1FC8, - 0x03970300: 0x1FCA, - 0x03970345: 0x1FCC, - 0x1FBF0300: 0x1FCD, - 0x1FBF0301: 0x1FCE, - 0x1FBF0342: 0x1FCF, - 0x03B90306: 0x1FD0, - 0x03B90304: 0x1FD1, - 0x03CA0300: 0x1FD2, - 0x03B90342: 0x1FD6, - 0x03CA0342: 0x1FD7, - 0x03990306: 0x1FD8, - 0x03990304: 0x1FD9, - 0x03990300: 0x1FDA, - 0x1FFE0300: 0x1FDD, - 0x1FFE0301: 0x1FDE, - 0x1FFE0342: 0x1FDF, - 0x03C50306: 0x1FE0, - 0x03C50304: 0x1FE1, - 0x03CB0300: 0x1FE2, - 0x03C10313: 0x1FE4, - 0x03C10314: 0x1FE5, - 0x03C50342: 0x1FE6, - 0x03CB0342: 0x1FE7, - 0x03A50306: 0x1FE8, - 0x03A50304: 0x1FE9, - 0x03A50300: 0x1FEA, - 0x03A10314: 0x1FEC, - 0x00A80300: 0x1FED, - 0x1F7C0345: 0x1FF2, - 0x03C90345: 0x1FF3, - 0x03CE0345: 0x1FF4, - 0x03C90342: 0x1FF6, - 0x1FF60345: 0x1FF7, - 0x039F0300: 0x1FF8, - 0x03A90300: 0x1FFA, - 0x03A90345: 0x1FFC, - 0x21900338: 0x219A, - 0x21920338: 0x219B, - 0x21940338: 0x21AE, - 0x21D00338: 0x21CD, - 0x21D40338: 0x21CE, - 0x21D20338: 0x21CF, - 0x22030338: 0x2204, - 0x22080338: 0x2209, - 0x220B0338: 0x220C, - 0x22230338: 0x2224, - 0x22250338: 0x2226, - 0x223C0338: 0x2241, - 0x22430338: 0x2244, - 0x22450338: 0x2247, - 0x22480338: 0x2249, - 0x003D0338: 0x2260, - 0x22610338: 0x2262, - 0x224D0338: 0x226D, - 0x003C0338: 0x226E, - 0x003E0338: 0x226F, - 0x22640338: 0x2270, - 0x22650338: 0x2271, - 0x22720338: 0x2274, - 0x22730338: 0x2275, - 0x22760338: 0x2278, - 0x22770338: 0x2279, - 0x227A0338: 0x2280, - 0x227B0338: 0x2281, - 0x22820338: 0x2284, - 0x22830338: 0x2285, - 0x22860338: 0x2288, - 0x22870338: 0x2289, - 0x22A20338: 0x22AC, - 0x22A80338: 0x22AD, - 0x22A90338: 0x22AE, - 0x22AB0338: 0x22AF, - 0x227C0338: 0x22E0, - 0x227D0338: 0x22E1, - 0x22910338: 0x22E2, - 0x22920338: 0x22E3, - 0x22B20338: 0x22EA, - 0x22B30338: 0x22EB, - 0x22B40338: 0x22EC, - 0x22B50338: 0x22ED, - 0x304B3099: 0x304C, - 0x304D3099: 0x304E, - 0x304F3099: 0x3050, - 0x30513099: 0x3052, - 0x30533099: 0x3054, - 0x30553099: 0x3056, - 0x30573099: 0x3058, - 0x30593099: 0x305A, - 0x305B3099: 0x305C, - 0x305D3099: 0x305E, - 0x305F3099: 0x3060, - 0x30613099: 0x3062, - 0x30643099: 0x3065, - 0x30663099: 0x3067, - 0x30683099: 0x3069, - 0x306F3099: 0x3070, - 0x306F309A: 0x3071, - 0x30723099: 0x3073, - 0x3072309A: 0x3074, - 0x30753099: 0x3076, - 0x3075309A: 0x3077, - 0x30783099: 0x3079, - 0x3078309A: 0x307A, - 0x307B3099: 0x307C, - 0x307B309A: 0x307D, - 0x30463099: 0x3094, - 0x309D3099: 0x309E, - 0x30AB3099: 0x30AC, - 0x30AD3099: 0x30AE, - 0x30AF3099: 0x30B0, - 0x30B13099: 0x30B2, - 0x30B33099: 0x30B4, - 0x30B53099: 0x30B6, - 0x30B73099: 0x30B8, - 0x30B93099: 0x30BA, - 0x30BB3099: 0x30BC, - 0x30BD3099: 0x30BE, - 0x30BF3099: 0x30C0, - 0x30C13099: 0x30C2, - 0x30C43099: 0x30C5, - 0x30C63099: 0x30C7, - 0x30C83099: 0x30C9, - 0x30CF3099: 0x30D0, - 0x30CF309A: 0x30D1, - 0x30D23099: 0x30D3, - 0x30D2309A: 0x30D4, - 0x30D53099: 0x30D6, - 0x30D5309A: 0x30D7, - 0x30D83099: 0x30D9, - 0x30D8309A: 0x30DA, - 0x30DB3099: 0x30DC, - 0x30DB309A: 0x30DD, - 0x30A63099: 0x30F4, - 0x30EF3099: 0x30F7, - 0x30F03099: 0x30F8, - 0x30F13099: 0x30F9, - 0x30F23099: 0x30FA, - 0x30FD3099: 0x30FE, - 0x109910BA: 0x1109A, - 0x109B10BA: 0x1109C, - 0x10A510BA: 0x110AB, - 0x11311127: 0x1112E, - 0x11321127: 0x1112F, - 0x1347133E: 0x1134B, - 0x13471357: 0x1134C, - 0x14B914BA: 0x114BB, - 0x14B914B0: 0x114BC, - 0x14B914BD: 0x114BE, - 0x15B815AF: 0x115BA, - 0x15B915AF: 0x115BB, -} +var recompMap map[uint32]rune +var recompMapOnce sync.Once -// Total size of tables: 53KB (54226 bytes) +const recompMapPacked = "" + + "\x00A\x03\x00\x00\x00\x00\xc0" + // 0x00410300: 0x000000C0 + "\x00A\x03\x01\x00\x00\x00\xc1" + // 0x00410301: 0x000000C1 + "\x00A\x03\x02\x00\x00\x00\xc2" + // 0x00410302: 0x000000C2 + "\x00A\x03\x03\x00\x00\x00\xc3" + // 0x00410303: 0x000000C3 + "\x00A\x03\b\x00\x00\x00\xc4" + // 0x00410308: 0x000000C4 + "\x00A\x03\n\x00\x00\x00\xc5" + // 0x0041030A: 0x000000C5 + "\x00C\x03'\x00\x00\x00\xc7" + // 0x00430327: 0x000000C7 + "\x00E\x03\x00\x00\x00\x00\xc8" + // 0x00450300: 0x000000C8 + "\x00E\x03\x01\x00\x00\x00\xc9" + // 0x00450301: 0x000000C9 + "\x00E\x03\x02\x00\x00\x00\xca" + // 0x00450302: 0x000000CA + "\x00E\x03\b\x00\x00\x00\xcb" + // 0x00450308: 0x000000CB + "\x00I\x03\x00\x00\x00\x00\xcc" + // 0x00490300: 0x000000CC + "\x00I\x03\x01\x00\x00\x00\xcd" + // 0x00490301: 0x000000CD + "\x00I\x03\x02\x00\x00\x00\xce" + // 0x00490302: 0x000000CE + "\x00I\x03\b\x00\x00\x00\xcf" + // 0x00490308: 0x000000CF + "\x00N\x03\x03\x00\x00\x00\xd1" + // 0x004E0303: 0x000000D1 + "\x00O\x03\x00\x00\x00\x00\xd2" + // 0x004F0300: 0x000000D2 + "\x00O\x03\x01\x00\x00\x00\xd3" + // 0x004F0301: 0x000000D3 + "\x00O\x03\x02\x00\x00\x00\xd4" + // 0x004F0302: 0x000000D4 + "\x00O\x03\x03\x00\x00\x00\xd5" + // 0x004F0303: 0x000000D5 + "\x00O\x03\b\x00\x00\x00\xd6" + // 0x004F0308: 0x000000D6 + "\x00U\x03\x00\x00\x00\x00\xd9" + // 0x00550300: 0x000000D9 + "\x00U\x03\x01\x00\x00\x00\xda" + // 0x00550301: 0x000000DA + "\x00U\x03\x02\x00\x00\x00\xdb" + // 0x00550302: 0x000000DB + "\x00U\x03\b\x00\x00\x00\xdc" + // 0x00550308: 0x000000DC + "\x00Y\x03\x01\x00\x00\x00\xdd" + // 0x00590301: 0x000000DD + "\x00a\x03\x00\x00\x00\x00\xe0" + // 0x00610300: 0x000000E0 + "\x00a\x03\x01\x00\x00\x00\xe1" + // 0x00610301: 0x000000E1 + "\x00a\x03\x02\x00\x00\x00\xe2" + // 0x00610302: 0x000000E2 + "\x00a\x03\x03\x00\x00\x00\xe3" + // 0x00610303: 0x000000E3 + "\x00a\x03\b\x00\x00\x00\xe4" + // 0x00610308: 0x000000E4 + "\x00a\x03\n\x00\x00\x00\xe5" + // 0x0061030A: 0x000000E5 + "\x00c\x03'\x00\x00\x00\xe7" + // 0x00630327: 0x000000E7 + "\x00e\x03\x00\x00\x00\x00\xe8" + // 0x00650300: 0x000000E8 + "\x00e\x03\x01\x00\x00\x00\xe9" + // 0x00650301: 0x000000E9 + "\x00e\x03\x02\x00\x00\x00\xea" + // 0x00650302: 0x000000EA + "\x00e\x03\b\x00\x00\x00\xeb" + // 0x00650308: 0x000000EB + "\x00i\x03\x00\x00\x00\x00\xec" + // 0x00690300: 0x000000EC + "\x00i\x03\x01\x00\x00\x00\xed" + // 0x00690301: 0x000000ED + "\x00i\x03\x02\x00\x00\x00\xee" + // 0x00690302: 0x000000EE + "\x00i\x03\b\x00\x00\x00\xef" + // 0x00690308: 0x000000EF + "\x00n\x03\x03\x00\x00\x00\xf1" + // 0x006E0303: 0x000000F1 + "\x00o\x03\x00\x00\x00\x00\xf2" + // 0x006F0300: 0x000000F2 + "\x00o\x03\x01\x00\x00\x00\xf3" + // 0x006F0301: 0x000000F3 + "\x00o\x03\x02\x00\x00\x00\xf4" + // 0x006F0302: 0x000000F4 + "\x00o\x03\x03\x00\x00\x00\xf5" + // 0x006F0303: 0x000000F5 + "\x00o\x03\b\x00\x00\x00\xf6" + // 0x006F0308: 0x000000F6 + "\x00u\x03\x00\x00\x00\x00\xf9" + // 0x00750300: 0x000000F9 + "\x00u\x03\x01\x00\x00\x00\xfa" + // 0x00750301: 0x000000FA + "\x00u\x03\x02\x00\x00\x00\xfb" + // 0x00750302: 0x000000FB + "\x00u\x03\b\x00\x00\x00\xfc" + // 0x00750308: 0x000000FC + "\x00y\x03\x01\x00\x00\x00\xfd" + // 0x00790301: 0x000000FD + "\x00y\x03\b\x00\x00\x00\xff" + // 0x00790308: 0x000000FF + "\x00A\x03\x04\x00\x00\x01\x00" + // 0x00410304: 0x00000100 + "\x00a\x03\x04\x00\x00\x01\x01" + // 0x00610304: 0x00000101 + "\x00A\x03\x06\x00\x00\x01\x02" + // 0x00410306: 0x00000102 + "\x00a\x03\x06\x00\x00\x01\x03" + // 0x00610306: 0x00000103 + "\x00A\x03(\x00\x00\x01\x04" + // 0x00410328: 0x00000104 + "\x00a\x03(\x00\x00\x01\x05" + // 0x00610328: 0x00000105 + "\x00C\x03\x01\x00\x00\x01\x06" + // 0x00430301: 0x00000106 + "\x00c\x03\x01\x00\x00\x01\a" + // 0x00630301: 0x00000107 + "\x00C\x03\x02\x00\x00\x01\b" + // 0x00430302: 0x00000108 + "\x00c\x03\x02\x00\x00\x01\t" + // 0x00630302: 0x00000109 + "\x00C\x03\a\x00\x00\x01\n" + // 0x00430307: 0x0000010A + "\x00c\x03\a\x00\x00\x01\v" + // 0x00630307: 0x0000010B + "\x00C\x03\f\x00\x00\x01\f" + // 0x0043030C: 0x0000010C + "\x00c\x03\f\x00\x00\x01\r" + // 0x0063030C: 0x0000010D + "\x00D\x03\f\x00\x00\x01\x0e" + // 0x0044030C: 0x0000010E + "\x00d\x03\f\x00\x00\x01\x0f" + // 0x0064030C: 0x0000010F + "\x00E\x03\x04\x00\x00\x01\x12" + // 0x00450304: 0x00000112 + "\x00e\x03\x04\x00\x00\x01\x13" + // 0x00650304: 0x00000113 + "\x00E\x03\x06\x00\x00\x01\x14" + // 0x00450306: 0x00000114 + "\x00e\x03\x06\x00\x00\x01\x15" + // 0x00650306: 0x00000115 + "\x00E\x03\a\x00\x00\x01\x16" + // 0x00450307: 0x00000116 + "\x00e\x03\a\x00\x00\x01\x17" + // 0x00650307: 0x00000117 + "\x00E\x03(\x00\x00\x01\x18" + // 0x00450328: 0x00000118 + "\x00e\x03(\x00\x00\x01\x19" + // 0x00650328: 0x00000119 + "\x00E\x03\f\x00\x00\x01\x1a" + // 0x0045030C: 0x0000011A + "\x00e\x03\f\x00\x00\x01\x1b" + // 0x0065030C: 0x0000011B + "\x00G\x03\x02\x00\x00\x01\x1c" + // 0x00470302: 0x0000011C + "\x00g\x03\x02\x00\x00\x01\x1d" + // 0x00670302: 0x0000011D + "\x00G\x03\x06\x00\x00\x01\x1e" + // 0x00470306: 0x0000011E + "\x00g\x03\x06\x00\x00\x01\x1f" + // 0x00670306: 0x0000011F + "\x00G\x03\a\x00\x00\x01 " + // 0x00470307: 0x00000120 + "\x00g\x03\a\x00\x00\x01!" + // 0x00670307: 0x00000121 + "\x00G\x03'\x00\x00\x01\"" + // 0x00470327: 0x00000122 + "\x00g\x03'\x00\x00\x01#" + // 0x00670327: 0x00000123 + "\x00H\x03\x02\x00\x00\x01$" + // 0x00480302: 0x00000124 + "\x00h\x03\x02\x00\x00\x01%" + // 0x00680302: 0x00000125 + "\x00I\x03\x03\x00\x00\x01(" + // 0x00490303: 0x00000128 + "\x00i\x03\x03\x00\x00\x01)" + // 0x00690303: 0x00000129 + "\x00I\x03\x04\x00\x00\x01*" + // 0x00490304: 0x0000012A + "\x00i\x03\x04\x00\x00\x01+" + // 0x00690304: 0x0000012B + "\x00I\x03\x06\x00\x00\x01," + // 0x00490306: 0x0000012C + "\x00i\x03\x06\x00\x00\x01-" + // 0x00690306: 0x0000012D + "\x00I\x03(\x00\x00\x01." + // 0x00490328: 0x0000012E + "\x00i\x03(\x00\x00\x01/" + // 0x00690328: 0x0000012F + "\x00I\x03\a\x00\x00\x010" + // 0x00490307: 0x00000130 + "\x00J\x03\x02\x00\x00\x014" + // 0x004A0302: 0x00000134 + "\x00j\x03\x02\x00\x00\x015" + // 0x006A0302: 0x00000135 + "\x00K\x03'\x00\x00\x016" + // 0x004B0327: 0x00000136 + "\x00k\x03'\x00\x00\x017" + // 0x006B0327: 0x00000137 + "\x00L\x03\x01\x00\x00\x019" + // 0x004C0301: 0x00000139 + "\x00l\x03\x01\x00\x00\x01:" + // 0x006C0301: 0x0000013A + "\x00L\x03'\x00\x00\x01;" + // 0x004C0327: 0x0000013B + "\x00l\x03'\x00\x00\x01<" + // 0x006C0327: 0x0000013C + "\x00L\x03\f\x00\x00\x01=" + // 0x004C030C: 0x0000013D + "\x00l\x03\f\x00\x00\x01>" + // 0x006C030C: 0x0000013E + "\x00N\x03\x01\x00\x00\x01C" + // 0x004E0301: 0x00000143 + "\x00n\x03\x01\x00\x00\x01D" + // 0x006E0301: 0x00000144 + "\x00N\x03'\x00\x00\x01E" + // 0x004E0327: 0x00000145 + "\x00n\x03'\x00\x00\x01F" + // 0x006E0327: 0x00000146 + "\x00N\x03\f\x00\x00\x01G" + // 0x004E030C: 0x00000147 + "\x00n\x03\f\x00\x00\x01H" + // 0x006E030C: 0x00000148 + "\x00O\x03\x04\x00\x00\x01L" + // 0x004F0304: 0x0000014C + "\x00o\x03\x04\x00\x00\x01M" + // 0x006F0304: 0x0000014D + "\x00O\x03\x06\x00\x00\x01N" + // 0x004F0306: 0x0000014E + "\x00o\x03\x06\x00\x00\x01O" + // 0x006F0306: 0x0000014F + "\x00O\x03\v\x00\x00\x01P" + // 0x004F030B: 0x00000150 + "\x00o\x03\v\x00\x00\x01Q" + // 0x006F030B: 0x00000151 + "\x00R\x03\x01\x00\x00\x01T" + // 0x00520301: 0x00000154 + "\x00r\x03\x01\x00\x00\x01U" + // 0x00720301: 0x00000155 + "\x00R\x03'\x00\x00\x01V" + // 0x00520327: 0x00000156 + "\x00r\x03'\x00\x00\x01W" + // 0x00720327: 0x00000157 + "\x00R\x03\f\x00\x00\x01X" + // 0x0052030C: 0x00000158 + "\x00r\x03\f\x00\x00\x01Y" + // 0x0072030C: 0x00000159 + "\x00S\x03\x01\x00\x00\x01Z" + // 0x00530301: 0x0000015A + "\x00s\x03\x01\x00\x00\x01[" + // 0x00730301: 0x0000015B + "\x00S\x03\x02\x00\x00\x01\\" + // 0x00530302: 0x0000015C + "\x00s\x03\x02\x00\x00\x01]" + // 0x00730302: 0x0000015D + "\x00S\x03'\x00\x00\x01^" + // 0x00530327: 0x0000015E + "\x00s\x03'\x00\x00\x01_" + // 0x00730327: 0x0000015F + "\x00S\x03\f\x00\x00\x01`" + // 0x0053030C: 0x00000160 + "\x00s\x03\f\x00\x00\x01a" + // 0x0073030C: 0x00000161 + "\x00T\x03'\x00\x00\x01b" + // 0x00540327: 0x00000162 + "\x00t\x03'\x00\x00\x01c" + // 0x00740327: 0x00000163 + "\x00T\x03\f\x00\x00\x01d" + // 0x0054030C: 0x00000164 + "\x00t\x03\f\x00\x00\x01e" + // 0x0074030C: 0x00000165 + "\x00U\x03\x03\x00\x00\x01h" + // 0x00550303: 0x00000168 + "\x00u\x03\x03\x00\x00\x01i" + // 0x00750303: 0x00000169 + "\x00U\x03\x04\x00\x00\x01j" + // 0x00550304: 0x0000016A + "\x00u\x03\x04\x00\x00\x01k" + // 0x00750304: 0x0000016B + "\x00U\x03\x06\x00\x00\x01l" + // 0x00550306: 0x0000016C + "\x00u\x03\x06\x00\x00\x01m" + // 0x00750306: 0x0000016D + "\x00U\x03\n\x00\x00\x01n" + // 0x0055030A: 0x0000016E + "\x00u\x03\n\x00\x00\x01o" + // 0x0075030A: 0x0000016F + "\x00U\x03\v\x00\x00\x01p" + // 0x0055030B: 0x00000170 + "\x00u\x03\v\x00\x00\x01q" + // 0x0075030B: 0x00000171 + "\x00U\x03(\x00\x00\x01r" + // 0x00550328: 0x00000172 + "\x00u\x03(\x00\x00\x01s" + // 0x00750328: 0x00000173 + "\x00W\x03\x02\x00\x00\x01t" + // 0x00570302: 0x00000174 + "\x00w\x03\x02\x00\x00\x01u" + // 0x00770302: 0x00000175 + "\x00Y\x03\x02\x00\x00\x01v" + // 0x00590302: 0x00000176 + "\x00y\x03\x02\x00\x00\x01w" + // 0x00790302: 0x00000177 + "\x00Y\x03\b\x00\x00\x01x" + // 0x00590308: 0x00000178 + "\x00Z\x03\x01\x00\x00\x01y" + // 0x005A0301: 0x00000179 + "\x00z\x03\x01\x00\x00\x01z" + // 0x007A0301: 0x0000017A + "\x00Z\x03\a\x00\x00\x01{" + // 0x005A0307: 0x0000017B + "\x00z\x03\a\x00\x00\x01|" + // 0x007A0307: 0x0000017C + "\x00Z\x03\f\x00\x00\x01}" + // 0x005A030C: 0x0000017D + "\x00z\x03\f\x00\x00\x01~" + // 0x007A030C: 0x0000017E + "\x00O\x03\x1b\x00\x00\x01\xa0" + // 0x004F031B: 0x000001A0 + "\x00o\x03\x1b\x00\x00\x01\xa1" + // 0x006F031B: 0x000001A1 + "\x00U\x03\x1b\x00\x00\x01\xaf" + // 0x0055031B: 0x000001AF + "\x00u\x03\x1b\x00\x00\x01\xb0" + // 0x0075031B: 0x000001B0 + "\x00A\x03\f\x00\x00\x01\xcd" + // 0x0041030C: 0x000001CD + "\x00a\x03\f\x00\x00\x01\xce" + // 0x0061030C: 0x000001CE + "\x00I\x03\f\x00\x00\x01\xcf" + // 0x0049030C: 0x000001CF + "\x00i\x03\f\x00\x00\x01\xd0" + // 0x0069030C: 0x000001D0 + "\x00O\x03\f\x00\x00\x01\xd1" + // 0x004F030C: 0x000001D1 + "\x00o\x03\f\x00\x00\x01\xd2" + // 0x006F030C: 0x000001D2 + "\x00U\x03\f\x00\x00\x01\xd3" + // 0x0055030C: 0x000001D3 + "\x00u\x03\f\x00\x00\x01\xd4" + // 0x0075030C: 0x000001D4 + "\x00\xdc\x03\x04\x00\x00\x01\xd5" + // 0x00DC0304: 0x000001D5 + "\x00\xfc\x03\x04\x00\x00\x01\xd6" + // 0x00FC0304: 0x000001D6 + "\x00\xdc\x03\x01\x00\x00\x01\xd7" + // 0x00DC0301: 0x000001D7 + "\x00\xfc\x03\x01\x00\x00\x01\xd8" + // 0x00FC0301: 0x000001D8 + "\x00\xdc\x03\f\x00\x00\x01\xd9" + // 0x00DC030C: 0x000001D9 + "\x00\xfc\x03\f\x00\x00\x01\xda" + // 0x00FC030C: 0x000001DA + "\x00\xdc\x03\x00\x00\x00\x01\xdb" + // 0x00DC0300: 0x000001DB + "\x00\xfc\x03\x00\x00\x00\x01\xdc" + // 0x00FC0300: 0x000001DC + "\x00\xc4\x03\x04\x00\x00\x01\xde" + // 0x00C40304: 0x000001DE + "\x00\xe4\x03\x04\x00\x00\x01\xdf" + // 0x00E40304: 0x000001DF + "\x02&\x03\x04\x00\x00\x01\xe0" + // 0x02260304: 0x000001E0 + "\x02'\x03\x04\x00\x00\x01\xe1" + // 0x02270304: 0x000001E1 + "\x00\xc6\x03\x04\x00\x00\x01\xe2" + // 0x00C60304: 0x000001E2 + "\x00\xe6\x03\x04\x00\x00\x01\xe3" + // 0x00E60304: 0x000001E3 + "\x00G\x03\f\x00\x00\x01\xe6" + // 0x0047030C: 0x000001E6 + "\x00g\x03\f\x00\x00\x01\xe7" + // 0x0067030C: 0x000001E7 + "\x00K\x03\f\x00\x00\x01\xe8" + // 0x004B030C: 0x000001E8 + "\x00k\x03\f\x00\x00\x01\xe9" + // 0x006B030C: 0x000001E9 + "\x00O\x03(\x00\x00\x01\xea" + // 0x004F0328: 0x000001EA + "\x00o\x03(\x00\x00\x01\xeb" + // 0x006F0328: 0x000001EB + "\x01\xea\x03\x04\x00\x00\x01\xec" + // 0x01EA0304: 0x000001EC + "\x01\xeb\x03\x04\x00\x00\x01\xed" + // 0x01EB0304: 0x000001ED + "\x01\xb7\x03\f\x00\x00\x01\xee" + // 0x01B7030C: 0x000001EE + "\x02\x92\x03\f\x00\x00\x01\xef" + // 0x0292030C: 0x000001EF + "\x00j\x03\f\x00\x00\x01\xf0" + // 0x006A030C: 0x000001F0 + "\x00G\x03\x01\x00\x00\x01\xf4" + // 0x00470301: 0x000001F4 + "\x00g\x03\x01\x00\x00\x01\xf5" + // 0x00670301: 0x000001F5 + "\x00N\x03\x00\x00\x00\x01\xf8" + // 0x004E0300: 0x000001F8 + "\x00n\x03\x00\x00\x00\x01\xf9" + // 0x006E0300: 0x000001F9 + "\x00\xc5\x03\x01\x00\x00\x01\xfa" + // 0x00C50301: 0x000001FA + "\x00\xe5\x03\x01\x00\x00\x01\xfb" + // 0x00E50301: 0x000001FB + "\x00\xc6\x03\x01\x00\x00\x01\xfc" + // 0x00C60301: 0x000001FC + "\x00\xe6\x03\x01\x00\x00\x01\xfd" + // 0x00E60301: 0x000001FD + "\x00\xd8\x03\x01\x00\x00\x01\xfe" + // 0x00D80301: 0x000001FE + "\x00\xf8\x03\x01\x00\x00\x01\xff" + // 0x00F80301: 0x000001FF + "\x00A\x03\x0f\x00\x00\x02\x00" + // 0x0041030F: 0x00000200 + "\x00a\x03\x0f\x00\x00\x02\x01" + // 0x0061030F: 0x00000201 + "\x00A\x03\x11\x00\x00\x02\x02" + // 0x00410311: 0x00000202 + "\x00a\x03\x11\x00\x00\x02\x03" + // 0x00610311: 0x00000203 + "\x00E\x03\x0f\x00\x00\x02\x04" + // 0x0045030F: 0x00000204 + "\x00e\x03\x0f\x00\x00\x02\x05" + // 0x0065030F: 0x00000205 + "\x00E\x03\x11\x00\x00\x02\x06" + // 0x00450311: 0x00000206 + "\x00e\x03\x11\x00\x00\x02\a" + // 0x00650311: 0x00000207 + "\x00I\x03\x0f\x00\x00\x02\b" + // 0x0049030F: 0x00000208 + "\x00i\x03\x0f\x00\x00\x02\t" + // 0x0069030F: 0x00000209 + "\x00I\x03\x11\x00\x00\x02\n" + // 0x00490311: 0x0000020A + "\x00i\x03\x11\x00\x00\x02\v" + // 0x00690311: 0x0000020B + "\x00O\x03\x0f\x00\x00\x02\f" + // 0x004F030F: 0x0000020C + "\x00o\x03\x0f\x00\x00\x02\r" + // 0x006F030F: 0x0000020D + "\x00O\x03\x11\x00\x00\x02\x0e" + // 0x004F0311: 0x0000020E + "\x00o\x03\x11\x00\x00\x02\x0f" + // 0x006F0311: 0x0000020F + "\x00R\x03\x0f\x00\x00\x02\x10" + // 0x0052030F: 0x00000210 + "\x00r\x03\x0f\x00\x00\x02\x11" + // 0x0072030F: 0x00000211 + "\x00R\x03\x11\x00\x00\x02\x12" + // 0x00520311: 0x00000212 + "\x00r\x03\x11\x00\x00\x02\x13" + // 0x00720311: 0x00000213 + "\x00U\x03\x0f\x00\x00\x02\x14" + // 0x0055030F: 0x00000214 + "\x00u\x03\x0f\x00\x00\x02\x15" + // 0x0075030F: 0x00000215 + "\x00U\x03\x11\x00\x00\x02\x16" + // 0x00550311: 0x00000216 + "\x00u\x03\x11\x00\x00\x02\x17" + // 0x00750311: 0x00000217 + "\x00S\x03&\x00\x00\x02\x18" + // 0x00530326: 0x00000218 + "\x00s\x03&\x00\x00\x02\x19" + // 0x00730326: 0x00000219 + "\x00T\x03&\x00\x00\x02\x1a" + // 0x00540326: 0x0000021A + "\x00t\x03&\x00\x00\x02\x1b" + // 0x00740326: 0x0000021B + "\x00H\x03\f\x00\x00\x02\x1e" + // 0x0048030C: 0x0000021E + "\x00h\x03\f\x00\x00\x02\x1f" + // 0x0068030C: 0x0000021F + "\x00A\x03\a\x00\x00\x02&" + // 0x00410307: 0x00000226 + "\x00a\x03\a\x00\x00\x02'" + // 0x00610307: 0x00000227 + "\x00E\x03'\x00\x00\x02(" + // 0x00450327: 0x00000228 + "\x00e\x03'\x00\x00\x02)" + // 0x00650327: 0x00000229 + "\x00\xd6\x03\x04\x00\x00\x02*" + // 0x00D60304: 0x0000022A + "\x00\xf6\x03\x04\x00\x00\x02+" + // 0x00F60304: 0x0000022B + "\x00\xd5\x03\x04\x00\x00\x02," + // 0x00D50304: 0x0000022C + "\x00\xf5\x03\x04\x00\x00\x02-" + // 0x00F50304: 0x0000022D + "\x00O\x03\a\x00\x00\x02." + // 0x004F0307: 0x0000022E + "\x00o\x03\a\x00\x00\x02/" + // 0x006F0307: 0x0000022F + "\x02.\x03\x04\x00\x00\x020" + // 0x022E0304: 0x00000230 + "\x02/\x03\x04\x00\x00\x021" + // 0x022F0304: 0x00000231 + "\x00Y\x03\x04\x00\x00\x022" + // 0x00590304: 0x00000232 + "\x00y\x03\x04\x00\x00\x023" + // 0x00790304: 0x00000233 + "\x00\xa8\x03\x01\x00\x00\x03\x85" + // 0x00A80301: 0x00000385 + "\x03\x91\x03\x01\x00\x00\x03\x86" + // 0x03910301: 0x00000386 + "\x03\x95\x03\x01\x00\x00\x03\x88" + // 0x03950301: 0x00000388 + "\x03\x97\x03\x01\x00\x00\x03\x89" + // 0x03970301: 0x00000389 + "\x03\x99\x03\x01\x00\x00\x03\x8a" + // 0x03990301: 0x0000038A + "\x03\x9f\x03\x01\x00\x00\x03\x8c" + // 0x039F0301: 0x0000038C + "\x03\xa5\x03\x01\x00\x00\x03\x8e" + // 0x03A50301: 0x0000038E + "\x03\xa9\x03\x01\x00\x00\x03\x8f" + // 0x03A90301: 0x0000038F + "\x03\xca\x03\x01\x00\x00\x03\x90" + // 0x03CA0301: 0x00000390 + "\x03\x99\x03\b\x00\x00\x03\xaa" + // 0x03990308: 0x000003AA + "\x03\xa5\x03\b\x00\x00\x03\xab" + // 0x03A50308: 0x000003AB + "\x03\xb1\x03\x01\x00\x00\x03\xac" + // 0x03B10301: 0x000003AC + "\x03\xb5\x03\x01\x00\x00\x03\xad" + // 0x03B50301: 0x000003AD + "\x03\xb7\x03\x01\x00\x00\x03\xae" + // 0x03B70301: 0x000003AE + "\x03\xb9\x03\x01\x00\x00\x03\xaf" + // 0x03B90301: 0x000003AF + "\x03\xcb\x03\x01\x00\x00\x03\xb0" + // 0x03CB0301: 0x000003B0 + "\x03\xb9\x03\b\x00\x00\x03\xca" + // 0x03B90308: 0x000003CA + "\x03\xc5\x03\b\x00\x00\x03\xcb" + // 0x03C50308: 0x000003CB + "\x03\xbf\x03\x01\x00\x00\x03\xcc" + // 0x03BF0301: 0x000003CC + "\x03\xc5\x03\x01\x00\x00\x03\xcd" + // 0x03C50301: 0x000003CD + "\x03\xc9\x03\x01\x00\x00\x03\xce" + // 0x03C90301: 0x000003CE + "\x03\xd2\x03\x01\x00\x00\x03\xd3" + // 0x03D20301: 0x000003D3 + "\x03\xd2\x03\b\x00\x00\x03\xd4" + // 0x03D20308: 0x000003D4 + "\x04\x15\x03\x00\x00\x00\x04\x00" + // 0x04150300: 0x00000400 + "\x04\x15\x03\b\x00\x00\x04\x01" + // 0x04150308: 0x00000401 + "\x04\x13\x03\x01\x00\x00\x04\x03" + // 0x04130301: 0x00000403 + "\x04\x06\x03\b\x00\x00\x04\a" + // 0x04060308: 0x00000407 + "\x04\x1a\x03\x01\x00\x00\x04\f" + // 0x041A0301: 0x0000040C + "\x04\x18\x03\x00\x00\x00\x04\r" + // 0x04180300: 0x0000040D + "\x04#\x03\x06\x00\x00\x04\x0e" + // 0x04230306: 0x0000040E + "\x04\x18\x03\x06\x00\x00\x04\x19" + // 0x04180306: 0x00000419 + "\x048\x03\x06\x00\x00\x049" + // 0x04380306: 0x00000439 + "\x045\x03\x00\x00\x00\x04P" + // 0x04350300: 0x00000450 + "\x045\x03\b\x00\x00\x04Q" + // 0x04350308: 0x00000451 + "\x043\x03\x01\x00\x00\x04S" + // 0x04330301: 0x00000453 + "\x04V\x03\b\x00\x00\x04W" + // 0x04560308: 0x00000457 + "\x04:\x03\x01\x00\x00\x04\\" + // 0x043A0301: 0x0000045C + "\x048\x03\x00\x00\x00\x04]" + // 0x04380300: 0x0000045D + "\x04C\x03\x06\x00\x00\x04^" + // 0x04430306: 0x0000045E + "\x04t\x03\x0f\x00\x00\x04v" + // 0x0474030F: 0x00000476 + "\x04u\x03\x0f\x00\x00\x04w" + // 0x0475030F: 0x00000477 + "\x04\x16\x03\x06\x00\x00\x04\xc1" + // 0x04160306: 0x000004C1 + "\x046\x03\x06\x00\x00\x04\xc2" + // 0x04360306: 0x000004C2 + "\x04\x10\x03\x06\x00\x00\x04\xd0" + // 0x04100306: 0x000004D0 + "\x040\x03\x06\x00\x00\x04\xd1" + // 0x04300306: 0x000004D1 + "\x04\x10\x03\b\x00\x00\x04\xd2" + // 0x04100308: 0x000004D2 + "\x040\x03\b\x00\x00\x04\xd3" + // 0x04300308: 0x000004D3 + "\x04\x15\x03\x06\x00\x00\x04\xd6" + // 0x04150306: 0x000004D6 + "\x045\x03\x06\x00\x00\x04\xd7" + // 0x04350306: 0x000004D7 + "\x04\xd8\x03\b\x00\x00\x04\xda" + // 0x04D80308: 0x000004DA + "\x04\xd9\x03\b\x00\x00\x04\xdb" + // 0x04D90308: 0x000004DB + "\x04\x16\x03\b\x00\x00\x04\xdc" + // 0x04160308: 0x000004DC + "\x046\x03\b\x00\x00\x04\xdd" + // 0x04360308: 0x000004DD + "\x04\x17\x03\b\x00\x00\x04\xde" + // 0x04170308: 0x000004DE + "\x047\x03\b\x00\x00\x04\xdf" + // 0x04370308: 0x000004DF + "\x04\x18\x03\x04\x00\x00\x04\xe2" + // 0x04180304: 0x000004E2 + "\x048\x03\x04\x00\x00\x04\xe3" + // 0x04380304: 0x000004E3 + "\x04\x18\x03\b\x00\x00\x04\xe4" + // 0x04180308: 0x000004E4 + "\x048\x03\b\x00\x00\x04\xe5" + // 0x04380308: 0x000004E5 + "\x04\x1e\x03\b\x00\x00\x04\xe6" + // 0x041E0308: 0x000004E6 + "\x04>\x03\b\x00\x00\x04\xe7" + // 0x043E0308: 0x000004E7 + "\x04\xe8\x03\b\x00\x00\x04\xea" + // 0x04E80308: 0x000004EA + "\x04\xe9\x03\b\x00\x00\x04\xeb" + // 0x04E90308: 0x000004EB + "\x04-\x03\b\x00\x00\x04\xec" + // 0x042D0308: 0x000004EC + "\x04M\x03\b\x00\x00\x04\xed" + // 0x044D0308: 0x000004ED + "\x04#\x03\x04\x00\x00\x04\xee" + // 0x04230304: 0x000004EE + "\x04C\x03\x04\x00\x00\x04\xef" + // 0x04430304: 0x000004EF + "\x04#\x03\b\x00\x00\x04\xf0" + // 0x04230308: 0x000004F0 + "\x04C\x03\b\x00\x00\x04\xf1" + // 0x04430308: 0x000004F1 + "\x04#\x03\v\x00\x00\x04\xf2" + // 0x0423030B: 0x000004F2 + "\x04C\x03\v\x00\x00\x04\xf3" + // 0x0443030B: 0x000004F3 + "\x04'\x03\b\x00\x00\x04\xf4" + // 0x04270308: 0x000004F4 + "\x04G\x03\b\x00\x00\x04\xf5" + // 0x04470308: 0x000004F5 + "\x04+\x03\b\x00\x00\x04\xf8" + // 0x042B0308: 0x000004F8 + "\x04K\x03\b\x00\x00\x04\xf9" + // 0x044B0308: 0x000004F9 + "\x06'\x06S\x00\x00\x06\"" + // 0x06270653: 0x00000622 + "\x06'\x06T\x00\x00\x06#" + // 0x06270654: 0x00000623 + "\x06H\x06T\x00\x00\x06$" + // 0x06480654: 0x00000624 + "\x06'\x06U\x00\x00\x06%" + // 0x06270655: 0x00000625 + "\x06J\x06T\x00\x00\x06&" + // 0x064A0654: 0x00000626 + "\x06\xd5\x06T\x00\x00\x06\xc0" + // 0x06D50654: 0x000006C0 + "\x06\xc1\x06T\x00\x00\x06\xc2" + // 0x06C10654: 0x000006C2 + "\x06\xd2\x06T\x00\x00\x06\xd3" + // 0x06D20654: 0x000006D3 + "\t(\t<\x00\x00\t)" + // 0x0928093C: 0x00000929 + "\t0\t<\x00\x00\t1" + // 0x0930093C: 0x00000931 + "\t3\t<\x00\x00\t4" + // 0x0933093C: 0x00000934 + "\t\xc7\t\xbe\x00\x00\t\xcb" + // 0x09C709BE: 0x000009CB + "\t\xc7\t\xd7\x00\x00\t\xcc" + // 0x09C709D7: 0x000009CC + "\vG\vV\x00\x00\vH" + // 0x0B470B56: 0x00000B48 + "\vG\v>\x00\x00\vK" + // 0x0B470B3E: 0x00000B4B + "\vG\vW\x00\x00\vL" + // 0x0B470B57: 0x00000B4C + "\v\x92\v\xd7\x00\x00\v\x94" + // 0x0B920BD7: 0x00000B94 + "\v\xc6\v\xbe\x00\x00\v\xca" + // 0x0BC60BBE: 0x00000BCA + "\v\xc7\v\xbe\x00\x00\v\xcb" + // 0x0BC70BBE: 0x00000BCB + "\v\xc6\v\xd7\x00\x00\v\xcc" + // 0x0BC60BD7: 0x00000BCC + "\fF\fV\x00\x00\fH" + // 0x0C460C56: 0x00000C48 + "\f\xbf\f\xd5\x00\x00\f\xc0" + // 0x0CBF0CD5: 0x00000CC0 + "\f\xc6\f\xd5\x00\x00\f\xc7" + // 0x0CC60CD5: 0x00000CC7 + "\f\xc6\f\xd6\x00\x00\f\xc8" + // 0x0CC60CD6: 0x00000CC8 + "\f\xc6\f\xc2\x00\x00\f\xca" + // 0x0CC60CC2: 0x00000CCA + "\f\xca\f\xd5\x00\x00\f\xcb" + // 0x0CCA0CD5: 0x00000CCB + "\rF\r>\x00\x00\rJ" + // 0x0D460D3E: 0x00000D4A + "\rG\r>\x00\x00\rK" + // 0x0D470D3E: 0x00000D4B + "\rF\rW\x00\x00\rL" + // 0x0D460D57: 0x00000D4C + "\r\xd9\r\xca\x00\x00\r\xda" + // 0x0DD90DCA: 0x00000DDA + "\r\xd9\r\xcf\x00\x00\r\xdc" + // 0x0DD90DCF: 0x00000DDC + "\r\xdc\r\xca\x00\x00\r\xdd" + // 0x0DDC0DCA: 0x00000DDD + "\r\xd9\r\xdf\x00\x00\r\xde" + // 0x0DD90DDF: 0x00000DDE + "\x10%\x10.\x00\x00\x10&" + // 0x1025102E: 0x00001026 + "\x1b\x05\x1b5\x00\x00\x1b\x06" + // 0x1B051B35: 0x00001B06 + "\x1b\a\x1b5\x00\x00\x1b\b" + // 0x1B071B35: 0x00001B08 + "\x1b\t\x1b5\x00\x00\x1b\n" + // 0x1B091B35: 0x00001B0A + "\x1b\v\x1b5\x00\x00\x1b\f" + // 0x1B0B1B35: 0x00001B0C + "\x1b\r\x1b5\x00\x00\x1b\x0e" + // 0x1B0D1B35: 0x00001B0E + "\x1b\x11\x1b5\x00\x00\x1b\x12" + // 0x1B111B35: 0x00001B12 + "\x1b:\x1b5\x00\x00\x1b;" + // 0x1B3A1B35: 0x00001B3B + "\x1b<\x1b5\x00\x00\x1b=" + // 0x1B3C1B35: 0x00001B3D + "\x1b>\x1b5\x00\x00\x1b@" + // 0x1B3E1B35: 0x00001B40 + "\x1b?\x1b5\x00\x00\x1bA" + // 0x1B3F1B35: 0x00001B41 + "\x1bB\x1b5\x00\x00\x1bC" + // 0x1B421B35: 0x00001B43 + "\x00A\x03%\x00\x00\x1e\x00" + // 0x00410325: 0x00001E00 + "\x00a\x03%\x00\x00\x1e\x01" + // 0x00610325: 0x00001E01 + "\x00B\x03\a\x00\x00\x1e\x02" + // 0x00420307: 0x00001E02 + "\x00b\x03\a\x00\x00\x1e\x03" + // 0x00620307: 0x00001E03 + "\x00B\x03#\x00\x00\x1e\x04" + // 0x00420323: 0x00001E04 + "\x00b\x03#\x00\x00\x1e\x05" + // 0x00620323: 0x00001E05 + "\x00B\x031\x00\x00\x1e\x06" + // 0x00420331: 0x00001E06 + "\x00b\x031\x00\x00\x1e\a" + // 0x00620331: 0x00001E07 + "\x00\xc7\x03\x01\x00\x00\x1e\b" + // 0x00C70301: 0x00001E08 + "\x00\xe7\x03\x01\x00\x00\x1e\t" + // 0x00E70301: 0x00001E09 + "\x00D\x03\a\x00\x00\x1e\n" + // 0x00440307: 0x00001E0A + "\x00d\x03\a\x00\x00\x1e\v" + // 0x00640307: 0x00001E0B + "\x00D\x03#\x00\x00\x1e\f" + // 0x00440323: 0x00001E0C + "\x00d\x03#\x00\x00\x1e\r" + // 0x00640323: 0x00001E0D + "\x00D\x031\x00\x00\x1e\x0e" + // 0x00440331: 0x00001E0E + "\x00d\x031\x00\x00\x1e\x0f" + // 0x00640331: 0x00001E0F + "\x00D\x03'\x00\x00\x1e\x10" + // 0x00440327: 0x00001E10 + "\x00d\x03'\x00\x00\x1e\x11" + // 0x00640327: 0x00001E11 + "\x00D\x03-\x00\x00\x1e\x12" + // 0x0044032D: 0x00001E12 + "\x00d\x03-\x00\x00\x1e\x13" + // 0x0064032D: 0x00001E13 + "\x01\x12\x03\x00\x00\x00\x1e\x14" + // 0x01120300: 0x00001E14 + "\x01\x13\x03\x00\x00\x00\x1e\x15" + // 0x01130300: 0x00001E15 + "\x01\x12\x03\x01\x00\x00\x1e\x16" + // 0x01120301: 0x00001E16 + "\x01\x13\x03\x01\x00\x00\x1e\x17" + // 0x01130301: 0x00001E17 + "\x00E\x03-\x00\x00\x1e\x18" + // 0x0045032D: 0x00001E18 + "\x00e\x03-\x00\x00\x1e\x19" + // 0x0065032D: 0x00001E19 + "\x00E\x030\x00\x00\x1e\x1a" + // 0x00450330: 0x00001E1A + "\x00e\x030\x00\x00\x1e\x1b" + // 0x00650330: 0x00001E1B + "\x02(\x03\x06\x00\x00\x1e\x1c" + // 0x02280306: 0x00001E1C + "\x02)\x03\x06\x00\x00\x1e\x1d" + // 0x02290306: 0x00001E1D + "\x00F\x03\a\x00\x00\x1e\x1e" + // 0x00460307: 0x00001E1E + "\x00f\x03\a\x00\x00\x1e\x1f" + // 0x00660307: 0x00001E1F + "\x00G\x03\x04\x00\x00\x1e " + // 0x00470304: 0x00001E20 + "\x00g\x03\x04\x00\x00\x1e!" + // 0x00670304: 0x00001E21 + "\x00H\x03\a\x00\x00\x1e\"" + // 0x00480307: 0x00001E22 + "\x00h\x03\a\x00\x00\x1e#" + // 0x00680307: 0x00001E23 + "\x00H\x03#\x00\x00\x1e$" + // 0x00480323: 0x00001E24 + "\x00h\x03#\x00\x00\x1e%" + // 0x00680323: 0x00001E25 + "\x00H\x03\b\x00\x00\x1e&" + // 0x00480308: 0x00001E26 + "\x00h\x03\b\x00\x00\x1e'" + // 0x00680308: 0x00001E27 + "\x00H\x03'\x00\x00\x1e(" + // 0x00480327: 0x00001E28 + "\x00h\x03'\x00\x00\x1e)" + // 0x00680327: 0x00001E29 + "\x00H\x03.\x00\x00\x1e*" + // 0x0048032E: 0x00001E2A + "\x00h\x03.\x00\x00\x1e+" + // 0x0068032E: 0x00001E2B + "\x00I\x030\x00\x00\x1e," + // 0x00490330: 0x00001E2C + "\x00i\x030\x00\x00\x1e-" + // 0x00690330: 0x00001E2D + "\x00\xcf\x03\x01\x00\x00\x1e." + // 0x00CF0301: 0x00001E2E + "\x00\xef\x03\x01\x00\x00\x1e/" + // 0x00EF0301: 0x00001E2F + "\x00K\x03\x01\x00\x00\x1e0" + // 0x004B0301: 0x00001E30 + "\x00k\x03\x01\x00\x00\x1e1" + // 0x006B0301: 0x00001E31 + "\x00K\x03#\x00\x00\x1e2" + // 0x004B0323: 0x00001E32 + "\x00k\x03#\x00\x00\x1e3" + // 0x006B0323: 0x00001E33 + "\x00K\x031\x00\x00\x1e4" + // 0x004B0331: 0x00001E34 + "\x00k\x031\x00\x00\x1e5" + // 0x006B0331: 0x00001E35 + "\x00L\x03#\x00\x00\x1e6" + // 0x004C0323: 0x00001E36 + "\x00l\x03#\x00\x00\x1e7" + // 0x006C0323: 0x00001E37 + "\x1e6\x03\x04\x00\x00\x1e8" + // 0x1E360304: 0x00001E38 + "\x1e7\x03\x04\x00\x00\x1e9" + // 0x1E370304: 0x00001E39 + "\x00L\x031\x00\x00\x1e:" + // 0x004C0331: 0x00001E3A + "\x00l\x031\x00\x00\x1e;" + // 0x006C0331: 0x00001E3B + "\x00L\x03-\x00\x00\x1e<" + // 0x004C032D: 0x00001E3C + "\x00l\x03-\x00\x00\x1e=" + // 0x006C032D: 0x00001E3D + "\x00M\x03\x01\x00\x00\x1e>" + // 0x004D0301: 0x00001E3E + "\x00m\x03\x01\x00\x00\x1e?" + // 0x006D0301: 0x00001E3F + "\x00M\x03\a\x00\x00\x1e@" + // 0x004D0307: 0x00001E40 + "\x00m\x03\a\x00\x00\x1eA" + // 0x006D0307: 0x00001E41 + "\x00M\x03#\x00\x00\x1eB" + // 0x004D0323: 0x00001E42 + "\x00m\x03#\x00\x00\x1eC" + // 0x006D0323: 0x00001E43 + "\x00N\x03\a\x00\x00\x1eD" + // 0x004E0307: 0x00001E44 + "\x00n\x03\a\x00\x00\x1eE" + // 0x006E0307: 0x00001E45 + "\x00N\x03#\x00\x00\x1eF" + // 0x004E0323: 0x00001E46 + "\x00n\x03#\x00\x00\x1eG" + // 0x006E0323: 0x00001E47 + "\x00N\x031\x00\x00\x1eH" + // 0x004E0331: 0x00001E48 + "\x00n\x031\x00\x00\x1eI" + // 0x006E0331: 0x00001E49 + "\x00N\x03-\x00\x00\x1eJ" + // 0x004E032D: 0x00001E4A + "\x00n\x03-\x00\x00\x1eK" + // 0x006E032D: 0x00001E4B + "\x00\xd5\x03\x01\x00\x00\x1eL" + // 0x00D50301: 0x00001E4C + "\x00\xf5\x03\x01\x00\x00\x1eM" + // 0x00F50301: 0x00001E4D + "\x00\xd5\x03\b\x00\x00\x1eN" + // 0x00D50308: 0x00001E4E + "\x00\xf5\x03\b\x00\x00\x1eO" + // 0x00F50308: 0x00001E4F + "\x01L\x03\x00\x00\x00\x1eP" + // 0x014C0300: 0x00001E50 + "\x01M\x03\x00\x00\x00\x1eQ" + // 0x014D0300: 0x00001E51 + "\x01L\x03\x01\x00\x00\x1eR" + // 0x014C0301: 0x00001E52 + "\x01M\x03\x01\x00\x00\x1eS" + // 0x014D0301: 0x00001E53 + "\x00P\x03\x01\x00\x00\x1eT" + // 0x00500301: 0x00001E54 + "\x00p\x03\x01\x00\x00\x1eU" + // 0x00700301: 0x00001E55 + "\x00P\x03\a\x00\x00\x1eV" + // 0x00500307: 0x00001E56 + "\x00p\x03\a\x00\x00\x1eW" + // 0x00700307: 0x00001E57 + "\x00R\x03\a\x00\x00\x1eX" + // 0x00520307: 0x00001E58 + "\x00r\x03\a\x00\x00\x1eY" + // 0x00720307: 0x00001E59 + "\x00R\x03#\x00\x00\x1eZ" + // 0x00520323: 0x00001E5A + "\x00r\x03#\x00\x00\x1e[" + // 0x00720323: 0x00001E5B + "\x1eZ\x03\x04\x00\x00\x1e\\" + // 0x1E5A0304: 0x00001E5C + "\x1e[\x03\x04\x00\x00\x1e]" + // 0x1E5B0304: 0x00001E5D + "\x00R\x031\x00\x00\x1e^" + // 0x00520331: 0x00001E5E + "\x00r\x031\x00\x00\x1e_" + // 0x00720331: 0x00001E5F + "\x00S\x03\a\x00\x00\x1e`" + // 0x00530307: 0x00001E60 + "\x00s\x03\a\x00\x00\x1ea" + // 0x00730307: 0x00001E61 + "\x00S\x03#\x00\x00\x1eb" + // 0x00530323: 0x00001E62 + "\x00s\x03#\x00\x00\x1ec" + // 0x00730323: 0x00001E63 + "\x01Z\x03\a\x00\x00\x1ed" + // 0x015A0307: 0x00001E64 + "\x01[\x03\a\x00\x00\x1ee" + // 0x015B0307: 0x00001E65 + "\x01`\x03\a\x00\x00\x1ef" + // 0x01600307: 0x00001E66 + "\x01a\x03\a\x00\x00\x1eg" + // 0x01610307: 0x00001E67 + "\x1eb\x03\a\x00\x00\x1eh" + // 0x1E620307: 0x00001E68 + "\x1ec\x03\a\x00\x00\x1ei" + // 0x1E630307: 0x00001E69 + "\x00T\x03\a\x00\x00\x1ej" + // 0x00540307: 0x00001E6A + "\x00t\x03\a\x00\x00\x1ek" + // 0x00740307: 0x00001E6B + "\x00T\x03#\x00\x00\x1el" + // 0x00540323: 0x00001E6C + "\x00t\x03#\x00\x00\x1em" + // 0x00740323: 0x00001E6D + "\x00T\x031\x00\x00\x1en" + // 0x00540331: 0x00001E6E + "\x00t\x031\x00\x00\x1eo" + // 0x00740331: 0x00001E6F + "\x00T\x03-\x00\x00\x1ep" + // 0x0054032D: 0x00001E70 + "\x00t\x03-\x00\x00\x1eq" + // 0x0074032D: 0x00001E71 + "\x00U\x03$\x00\x00\x1er" + // 0x00550324: 0x00001E72 + "\x00u\x03$\x00\x00\x1es" + // 0x00750324: 0x00001E73 + "\x00U\x030\x00\x00\x1et" + // 0x00550330: 0x00001E74 + "\x00u\x030\x00\x00\x1eu" + // 0x00750330: 0x00001E75 + "\x00U\x03-\x00\x00\x1ev" + // 0x0055032D: 0x00001E76 + "\x00u\x03-\x00\x00\x1ew" + // 0x0075032D: 0x00001E77 + "\x01h\x03\x01\x00\x00\x1ex" + // 0x01680301: 0x00001E78 + "\x01i\x03\x01\x00\x00\x1ey" + // 0x01690301: 0x00001E79 + "\x01j\x03\b\x00\x00\x1ez" + // 0x016A0308: 0x00001E7A + "\x01k\x03\b\x00\x00\x1e{" + // 0x016B0308: 0x00001E7B + "\x00V\x03\x03\x00\x00\x1e|" + // 0x00560303: 0x00001E7C + "\x00v\x03\x03\x00\x00\x1e}" + // 0x00760303: 0x00001E7D + "\x00V\x03#\x00\x00\x1e~" + // 0x00560323: 0x00001E7E + "\x00v\x03#\x00\x00\x1e\u007f" + // 0x00760323: 0x00001E7F + "\x00W\x03\x00\x00\x00\x1e\x80" + // 0x00570300: 0x00001E80 + "\x00w\x03\x00\x00\x00\x1e\x81" + // 0x00770300: 0x00001E81 + "\x00W\x03\x01\x00\x00\x1e\x82" + // 0x00570301: 0x00001E82 + "\x00w\x03\x01\x00\x00\x1e\x83" + // 0x00770301: 0x00001E83 + "\x00W\x03\b\x00\x00\x1e\x84" + // 0x00570308: 0x00001E84 + "\x00w\x03\b\x00\x00\x1e\x85" + // 0x00770308: 0x00001E85 + "\x00W\x03\a\x00\x00\x1e\x86" + // 0x00570307: 0x00001E86 + "\x00w\x03\a\x00\x00\x1e\x87" + // 0x00770307: 0x00001E87 + "\x00W\x03#\x00\x00\x1e\x88" + // 0x00570323: 0x00001E88 + "\x00w\x03#\x00\x00\x1e\x89" + // 0x00770323: 0x00001E89 + "\x00X\x03\a\x00\x00\x1e\x8a" + // 0x00580307: 0x00001E8A + "\x00x\x03\a\x00\x00\x1e\x8b" + // 0x00780307: 0x00001E8B + "\x00X\x03\b\x00\x00\x1e\x8c" + // 0x00580308: 0x00001E8C + "\x00x\x03\b\x00\x00\x1e\x8d" + // 0x00780308: 0x00001E8D + "\x00Y\x03\a\x00\x00\x1e\x8e" + // 0x00590307: 0x00001E8E + "\x00y\x03\a\x00\x00\x1e\x8f" + // 0x00790307: 0x00001E8F + "\x00Z\x03\x02\x00\x00\x1e\x90" + // 0x005A0302: 0x00001E90 + "\x00z\x03\x02\x00\x00\x1e\x91" + // 0x007A0302: 0x00001E91 + "\x00Z\x03#\x00\x00\x1e\x92" + // 0x005A0323: 0x00001E92 + "\x00z\x03#\x00\x00\x1e\x93" + // 0x007A0323: 0x00001E93 + "\x00Z\x031\x00\x00\x1e\x94" + // 0x005A0331: 0x00001E94 + "\x00z\x031\x00\x00\x1e\x95" + // 0x007A0331: 0x00001E95 + "\x00h\x031\x00\x00\x1e\x96" + // 0x00680331: 0x00001E96 + "\x00t\x03\b\x00\x00\x1e\x97" + // 0x00740308: 0x00001E97 + "\x00w\x03\n\x00\x00\x1e\x98" + // 0x0077030A: 0x00001E98 + "\x00y\x03\n\x00\x00\x1e\x99" + // 0x0079030A: 0x00001E99 + "\x01\u007f\x03\a\x00\x00\x1e\x9b" + // 0x017F0307: 0x00001E9B + "\x00A\x03#\x00\x00\x1e\xa0" + // 0x00410323: 0x00001EA0 + "\x00a\x03#\x00\x00\x1e\xa1" + // 0x00610323: 0x00001EA1 + "\x00A\x03\t\x00\x00\x1e\xa2" + // 0x00410309: 0x00001EA2 + "\x00a\x03\t\x00\x00\x1e\xa3" + // 0x00610309: 0x00001EA3 + "\x00\xc2\x03\x01\x00\x00\x1e\xa4" + // 0x00C20301: 0x00001EA4 + "\x00\xe2\x03\x01\x00\x00\x1e\xa5" + // 0x00E20301: 0x00001EA5 + "\x00\xc2\x03\x00\x00\x00\x1e\xa6" + // 0x00C20300: 0x00001EA6 + "\x00\xe2\x03\x00\x00\x00\x1e\xa7" + // 0x00E20300: 0x00001EA7 + "\x00\xc2\x03\t\x00\x00\x1e\xa8" + // 0x00C20309: 0x00001EA8 + "\x00\xe2\x03\t\x00\x00\x1e\xa9" + // 0x00E20309: 0x00001EA9 + "\x00\xc2\x03\x03\x00\x00\x1e\xaa" + // 0x00C20303: 0x00001EAA + "\x00\xe2\x03\x03\x00\x00\x1e\xab" + // 0x00E20303: 0x00001EAB + "\x1e\xa0\x03\x02\x00\x00\x1e\xac" + // 0x1EA00302: 0x00001EAC + "\x1e\xa1\x03\x02\x00\x00\x1e\xad" + // 0x1EA10302: 0x00001EAD + "\x01\x02\x03\x01\x00\x00\x1e\xae" + // 0x01020301: 0x00001EAE + "\x01\x03\x03\x01\x00\x00\x1e\xaf" + // 0x01030301: 0x00001EAF + "\x01\x02\x03\x00\x00\x00\x1e\xb0" + // 0x01020300: 0x00001EB0 + "\x01\x03\x03\x00\x00\x00\x1e\xb1" + // 0x01030300: 0x00001EB1 + "\x01\x02\x03\t\x00\x00\x1e\xb2" + // 0x01020309: 0x00001EB2 + "\x01\x03\x03\t\x00\x00\x1e\xb3" + // 0x01030309: 0x00001EB3 + "\x01\x02\x03\x03\x00\x00\x1e\xb4" + // 0x01020303: 0x00001EB4 + "\x01\x03\x03\x03\x00\x00\x1e\xb5" + // 0x01030303: 0x00001EB5 + "\x1e\xa0\x03\x06\x00\x00\x1e\xb6" + // 0x1EA00306: 0x00001EB6 + "\x1e\xa1\x03\x06\x00\x00\x1e\xb7" + // 0x1EA10306: 0x00001EB7 + "\x00E\x03#\x00\x00\x1e\xb8" + // 0x00450323: 0x00001EB8 + "\x00e\x03#\x00\x00\x1e\xb9" + // 0x00650323: 0x00001EB9 + "\x00E\x03\t\x00\x00\x1e\xba" + // 0x00450309: 0x00001EBA + "\x00e\x03\t\x00\x00\x1e\xbb" + // 0x00650309: 0x00001EBB + "\x00E\x03\x03\x00\x00\x1e\xbc" + // 0x00450303: 0x00001EBC + "\x00e\x03\x03\x00\x00\x1e\xbd" + // 0x00650303: 0x00001EBD + "\x00\xca\x03\x01\x00\x00\x1e\xbe" + // 0x00CA0301: 0x00001EBE + "\x00\xea\x03\x01\x00\x00\x1e\xbf" + // 0x00EA0301: 0x00001EBF + "\x00\xca\x03\x00\x00\x00\x1e\xc0" + // 0x00CA0300: 0x00001EC0 + "\x00\xea\x03\x00\x00\x00\x1e\xc1" + // 0x00EA0300: 0x00001EC1 + "\x00\xca\x03\t\x00\x00\x1e\xc2" + // 0x00CA0309: 0x00001EC2 + "\x00\xea\x03\t\x00\x00\x1e\xc3" + // 0x00EA0309: 0x00001EC3 + "\x00\xca\x03\x03\x00\x00\x1e\xc4" + // 0x00CA0303: 0x00001EC4 + "\x00\xea\x03\x03\x00\x00\x1e\xc5" + // 0x00EA0303: 0x00001EC5 + "\x1e\xb8\x03\x02\x00\x00\x1e\xc6" + // 0x1EB80302: 0x00001EC6 + "\x1e\xb9\x03\x02\x00\x00\x1e\xc7" + // 0x1EB90302: 0x00001EC7 + "\x00I\x03\t\x00\x00\x1e\xc8" + // 0x00490309: 0x00001EC8 + "\x00i\x03\t\x00\x00\x1e\xc9" + // 0x00690309: 0x00001EC9 + "\x00I\x03#\x00\x00\x1e\xca" + // 0x00490323: 0x00001ECA + "\x00i\x03#\x00\x00\x1e\xcb" + // 0x00690323: 0x00001ECB + "\x00O\x03#\x00\x00\x1e\xcc" + // 0x004F0323: 0x00001ECC + "\x00o\x03#\x00\x00\x1e\xcd" + // 0x006F0323: 0x00001ECD + "\x00O\x03\t\x00\x00\x1e\xce" + // 0x004F0309: 0x00001ECE + "\x00o\x03\t\x00\x00\x1e\xcf" + // 0x006F0309: 0x00001ECF + "\x00\xd4\x03\x01\x00\x00\x1e\xd0" + // 0x00D40301: 0x00001ED0 + "\x00\xf4\x03\x01\x00\x00\x1e\xd1" + // 0x00F40301: 0x00001ED1 + "\x00\xd4\x03\x00\x00\x00\x1e\xd2" + // 0x00D40300: 0x00001ED2 + "\x00\xf4\x03\x00\x00\x00\x1e\xd3" + // 0x00F40300: 0x00001ED3 + "\x00\xd4\x03\t\x00\x00\x1e\xd4" + // 0x00D40309: 0x00001ED4 + "\x00\xf4\x03\t\x00\x00\x1e\xd5" + // 0x00F40309: 0x00001ED5 + "\x00\xd4\x03\x03\x00\x00\x1e\xd6" + // 0x00D40303: 0x00001ED6 + "\x00\xf4\x03\x03\x00\x00\x1e\xd7" + // 0x00F40303: 0x00001ED7 + "\x1e\xcc\x03\x02\x00\x00\x1e\xd8" + // 0x1ECC0302: 0x00001ED8 + "\x1e\xcd\x03\x02\x00\x00\x1e\xd9" + // 0x1ECD0302: 0x00001ED9 + "\x01\xa0\x03\x01\x00\x00\x1e\xda" + // 0x01A00301: 0x00001EDA + "\x01\xa1\x03\x01\x00\x00\x1e\xdb" + // 0x01A10301: 0x00001EDB + "\x01\xa0\x03\x00\x00\x00\x1e\xdc" + // 0x01A00300: 0x00001EDC + "\x01\xa1\x03\x00\x00\x00\x1e\xdd" + // 0x01A10300: 0x00001EDD + "\x01\xa0\x03\t\x00\x00\x1e\xde" + // 0x01A00309: 0x00001EDE + "\x01\xa1\x03\t\x00\x00\x1e\xdf" + // 0x01A10309: 0x00001EDF + "\x01\xa0\x03\x03\x00\x00\x1e\xe0" + // 0x01A00303: 0x00001EE0 + "\x01\xa1\x03\x03\x00\x00\x1e\xe1" + // 0x01A10303: 0x00001EE1 + "\x01\xa0\x03#\x00\x00\x1e\xe2" + // 0x01A00323: 0x00001EE2 + "\x01\xa1\x03#\x00\x00\x1e\xe3" + // 0x01A10323: 0x00001EE3 + "\x00U\x03#\x00\x00\x1e\xe4" + // 0x00550323: 0x00001EE4 + "\x00u\x03#\x00\x00\x1e\xe5" + // 0x00750323: 0x00001EE5 + "\x00U\x03\t\x00\x00\x1e\xe6" + // 0x00550309: 0x00001EE6 + "\x00u\x03\t\x00\x00\x1e\xe7" + // 0x00750309: 0x00001EE7 + "\x01\xaf\x03\x01\x00\x00\x1e\xe8" + // 0x01AF0301: 0x00001EE8 + "\x01\xb0\x03\x01\x00\x00\x1e\xe9" + // 0x01B00301: 0x00001EE9 + "\x01\xaf\x03\x00\x00\x00\x1e\xea" + // 0x01AF0300: 0x00001EEA + "\x01\xb0\x03\x00\x00\x00\x1e\xeb" + // 0x01B00300: 0x00001EEB + "\x01\xaf\x03\t\x00\x00\x1e\xec" + // 0x01AF0309: 0x00001EEC + "\x01\xb0\x03\t\x00\x00\x1e\xed" + // 0x01B00309: 0x00001EED + "\x01\xaf\x03\x03\x00\x00\x1e\xee" + // 0x01AF0303: 0x00001EEE + "\x01\xb0\x03\x03\x00\x00\x1e\xef" + // 0x01B00303: 0x00001EEF + "\x01\xaf\x03#\x00\x00\x1e\xf0" + // 0x01AF0323: 0x00001EF0 + "\x01\xb0\x03#\x00\x00\x1e\xf1" + // 0x01B00323: 0x00001EF1 + "\x00Y\x03\x00\x00\x00\x1e\xf2" + // 0x00590300: 0x00001EF2 + "\x00y\x03\x00\x00\x00\x1e\xf3" + // 0x00790300: 0x00001EF3 + "\x00Y\x03#\x00\x00\x1e\xf4" + // 0x00590323: 0x00001EF4 + "\x00y\x03#\x00\x00\x1e\xf5" + // 0x00790323: 0x00001EF5 + "\x00Y\x03\t\x00\x00\x1e\xf6" + // 0x00590309: 0x00001EF6 + "\x00y\x03\t\x00\x00\x1e\xf7" + // 0x00790309: 0x00001EF7 + "\x00Y\x03\x03\x00\x00\x1e\xf8" + // 0x00590303: 0x00001EF8 + "\x00y\x03\x03\x00\x00\x1e\xf9" + // 0x00790303: 0x00001EF9 + "\x03\xb1\x03\x13\x00\x00\x1f\x00" + // 0x03B10313: 0x00001F00 + "\x03\xb1\x03\x14\x00\x00\x1f\x01" + // 0x03B10314: 0x00001F01 + "\x1f\x00\x03\x00\x00\x00\x1f\x02" + // 0x1F000300: 0x00001F02 + "\x1f\x01\x03\x00\x00\x00\x1f\x03" + // 0x1F010300: 0x00001F03 + "\x1f\x00\x03\x01\x00\x00\x1f\x04" + // 0x1F000301: 0x00001F04 + "\x1f\x01\x03\x01\x00\x00\x1f\x05" + // 0x1F010301: 0x00001F05 + "\x1f\x00\x03B\x00\x00\x1f\x06" + // 0x1F000342: 0x00001F06 + "\x1f\x01\x03B\x00\x00\x1f\a" + // 0x1F010342: 0x00001F07 + "\x03\x91\x03\x13\x00\x00\x1f\b" + // 0x03910313: 0x00001F08 + "\x03\x91\x03\x14\x00\x00\x1f\t" + // 0x03910314: 0x00001F09 + "\x1f\b\x03\x00\x00\x00\x1f\n" + // 0x1F080300: 0x00001F0A + "\x1f\t\x03\x00\x00\x00\x1f\v" + // 0x1F090300: 0x00001F0B + "\x1f\b\x03\x01\x00\x00\x1f\f" + // 0x1F080301: 0x00001F0C + "\x1f\t\x03\x01\x00\x00\x1f\r" + // 0x1F090301: 0x00001F0D + "\x1f\b\x03B\x00\x00\x1f\x0e" + // 0x1F080342: 0x00001F0E + "\x1f\t\x03B\x00\x00\x1f\x0f" + // 0x1F090342: 0x00001F0F + "\x03\xb5\x03\x13\x00\x00\x1f\x10" + // 0x03B50313: 0x00001F10 + "\x03\xb5\x03\x14\x00\x00\x1f\x11" + // 0x03B50314: 0x00001F11 + "\x1f\x10\x03\x00\x00\x00\x1f\x12" + // 0x1F100300: 0x00001F12 + "\x1f\x11\x03\x00\x00\x00\x1f\x13" + // 0x1F110300: 0x00001F13 + "\x1f\x10\x03\x01\x00\x00\x1f\x14" + // 0x1F100301: 0x00001F14 + "\x1f\x11\x03\x01\x00\x00\x1f\x15" + // 0x1F110301: 0x00001F15 + "\x03\x95\x03\x13\x00\x00\x1f\x18" + // 0x03950313: 0x00001F18 + "\x03\x95\x03\x14\x00\x00\x1f\x19" + // 0x03950314: 0x00001F19 + "\x1f\x18\x03\x00\x00\x00\x1f\x1a" + // 0x1F180300: 0x00001F1A + "\x1f\x19\x03\x00\x00\x00\x1f\x1b" + // 0x1F190300: 0x00001F1B + "\x1f\x18\x03\x01\x00\x00\x1f\x1c" + // 0x1F180301: 0x00001F1C + "\x1f\x19\x03\x01\x00\x00\x1f\x1d" + // 0x1F190301: 0x00001F1D + "\x03\xb7\x03\x13\x00\x00\x1f " + // 0x03B70313: 0x00001F20 + "\x03\xb7\x03\x14\x00\x00\x1f!" + // 0x03B70314: 0x00001F21 + "\x1f \x03\x00\x00\x00\x1f\"" + // 0x1F200300: 0x00001F22 + "\x1f!\x03\x00\x00\x00\x1f#" + // 0x1F210300: 0x00001F23 + "\x1f \x03\x01\x00\x00\x1f$" + // 0x1F200301: 0x00001F24 + "\x1f!\x03\x01\x00\x00\x1f%" + // 0x1F210301: 0x00001F25 + "\x1f \x03B\x00\x00\x1f&" + // 0x1F200342: 0x00001F26 + "\x1f!\x03B\x00\x00\x1f'" + // 0x1F210342: 0x00001F27 + "\x03\x97\x03\x13\x00\x00\x1f(" + // 0x03970313: 0x00001F28 + "\x03\x97\x03\x14\x00\x00\x1f)" + // 0x03970314: 0x00001F29 + "\x1f(\x03\x00\x00\x00\x1f*" + // 0x1F280300: 0x00001F2A + "\x1f)\x03\x00\x00\x00\x1f+" + // 0x1F290300: 0x00001F2B + "\x1f(\x03\x01\x00\x00\x1f," + // 0x1F280301: 0x00001F2C + "\x1f)\x03\x01\x00\x00\x1f-" + // 0x1F290301: 0x00001F2D + "\x1f(\x03B\x00\x00\x1f." + // 0x1F280342: 0x00001F2E + "\x1f)\x03B\x00\x00\x1f/" + // 0x1F290342: 0x00001F2F + "\x03\xb9\x03\x13\x00\x00\x1f0" + // 0x03B90313: 0x00001F30 + "\x03\xb9\x03\x14\x00\x00\x1f1" + // 0x03B90314: 0x00001F31 + "\x1f0\x03\x00\x00\x00\x1f2" + // 0x1F300300: 0x00001F32 + "\x1f1\x03\x00\x00\x00\x1f3" + // 0x1F310300: 0x00001F33 + "\x1f0\x03\x01\x00\x00\x1f4" + // 0x1F300301: 0x00001F34 + "\x1f1\x03\x01\x00\x00\x1f5" + // 0x1F310301: 0x00001F35 + "\x1f0\x03B\x00\x00\x1f6" + // 0x1F300342: 0x00001F36 + "\x1f1\x03B\x00\x00\x1f7" + // 0x1F310342: 0x00001F37 + "\x03\x99\x03\x13\x00\x00\x1f8" + // 0x03990313: 0x00001F38 + "\x03\x99\x03\x14\x00\x00\x1f9" + // 0x03990314: 0x00001F39 + "\x1f8\x03\x00\x00\x00\x1f:" + // 0x1F380300: 0x00001F3A + "\x1f9\x03\x00\x00\x00\x1f;" + // 0x1F390300: 0x00001F3B + "\x1f8\x03\x01\x00\x00\x1f<" + // 0x1F380301: 0x00001F3C + "\x1f9\x03\x01\x00\x00\x1f=" + // 0x1F390301: 0x00001F3D + "\x1f8\x03B\x00\x00\x1f>" + // 0x1F380342: 0x00001F3E + "\x1f9\x03B\x00\x00\x1f?" + // 0x1F390342: 0x00001F3F + "\x03\xbf\x03\x13\x00\x00\x1f@" + // 0x03BF0313: 0x00001F40 + "\x03\xbf\x03\x14\x00\x00\x1fA" + // 0x03BF0314: 0x00001F41 + "\x1f@\x03\x00\x00\x00\x1fB" + // 0x1F400300: 0x00001F42 + "\x1fA\x03\x00\x00\x00\x1fC" + // 0x1F410300: 0x00001F43 + "\x1f@\x03\x01\x00\x00\x1fD" + // 0x1F400301: 0x00001F44 + "\x1fA\x03\x01\x00\x00\x1fE" + // 0x1F410301: 0x00001F45 + "\x03\x9f\x03\x13\x00\x00\x1fH" + // 0x039F0313: 0x00001F48 + "\x03\x9f\x03\x14\x00\x00\x1fI" + // 0x039F0314: 0x00001F49 + "\x1fH\x03\x00\x00\x00\x1fJ" + // 0x1F480300: 0x00001F4A + "\x1fI\x03\x00\x00\x00\x1fK" + // 0x1F490300: 0x00001F4B + "\x1fH\x03\x01\x00\x00\x1fL" + // 0x1F480301: 0x00001F4C + "\x1fI\x03\x01\x00\x00\x1fM" + // 0x1F490301: 0x00001F4D + "\x03\xc5\x03\x13\x00\x00\x1fP" + // 0x03C50313: 0x00001F50 + "\x03\xc5\x03\x14\x00\x00\x1fQ" + // 0x03C50314: 0x00001F51 + "\x1fP\x03\x00\x00\x00\x1fR" + // 0x1F500300: 0x00001F52 + "\x1fQ\x03\x00\x00\x00\x1fS" + // 0x1F510300: 0x00001F53 + "\x1fP\x03\x01\x00\x00\x1fT" + // 0x1F500301: 0x00001F54 + "\x1fQ\x03\x01\x00\x00\x1fU" + // 0x1F510301: 0x00001F55 + "\x1fP\x03B\x00\x00\x1fV" + // 0x1F500342: 0x00001F56 + "\x1fQ\x03B\x00\x00\x1fW" + // 0x1F510342: 0x00001F57 + "\x03\xa5\x03\x14\x00\x00\x1fY" + // 0x03A50314: 0x00001F59 + "\x1fY\x03\x00\x00\x00\x1f[" + // 0x1F590300: 0x00001F5B + "\x1fY\x03\x01\x00\x00\x1f]" + // 0x1F590301: 0x00001F5D + "\x1fY\x03B\x00\x00\x1f_" + // 0x1F590342: 0x00001F5F + "\x03\xc9\x03\x13\x00\x00\x1f`" + // 0x03C90313: 0x00001F60 + "\x03\xc9\x03\x14\x00\x00\x1fa" + // 0x03C90314: 0x00001F61 + "\x1f`\x03\x00\x00\x00\x1fb" + // 0x1F600300: 0x00001F62 + "\x1fa\x03\x00\x00\x00\x1fc" + // 0x1F610300: 0x00001F63 + "\x1f`\x03\x01\x00\x00\x1fd" + // 0x1F600301: 0x00001F64 + "\x1fa\x03\x01\x00\x00\x1fe" + // 0x1F610301: 0x00001F65 + "\x1f`\x03B\x00\x00\x1ff" + // 0x1F600342: 0x00001F66 + "\x1fa\x03B\x00\x00\x1fg" + // 0x1F610342: 0x00001F67 + "\x03\xa9\x03\x13\x00\x00\x1fh" + // 0x03A90313: 0x00001F68 + "\x03\xa9\x03\x14\x00\x00\x1fi" + // 0x03A90314: 0x00001F69 + "\x1fh\x03\x00\x00\x00\x1fj" + // 0x1F680300: 0x00001F6A + "\x1fi\x03\x00\x00\x00\x1fk" + // 0x1F690300: 0x00001F6B + "\x1fh\x03\x01\x00\x00\x1fl" + // 0x1F680301: 0x00001F6C + "\x1fi\x03\x01\x00\x00\x1fm" + // 0x1F690301: 0x00001F6D + "\x1fh\x03B\x00\x00\x1fn" + // 0x1F680342: 0x00001F6E + "\x1fi\x03B\x00\x00\x1fo" + // 0x1F690342: 0x00001F6F + "\x03\xb1\x03\x00\x00\x00\x1fp" + // 0x03B10300: 0x00001F70 + "\x03\xb5\x03\x00\x00\x00\x1fr" + // 0x03B50300: 0x00001F72 + "\x03\xb7\x03\x00\x00\x00\x1ft" + // 0x03B70300: 0x00001F74 + "\x03\xb9\x03\x00\x00\x00\x1fv" + // 0x03B90300: 0x00001F76 + "\x03\xbf\x03\x00\x00\x00\x1fx" + // 0x03BF0300: 0x00001F78 + "\x03\xc5\x03\x00\x00\x00\x1fz" + // 0x03C50300: 0x00001F7A + "\x03\xc9\x03\x00\x00\x00\x1f|" + // 0x03C90300: 0x00001F7C + "\x1f\x00\x03E\x00\x00\x1f\x80" + // 0x1F000345: 0x00001F80 + "\x1f\x01\x03E\x00\x00\x1f\x81" + // 0x1F010345: 0x00001F81 + "\x1f\x02\x03E\x00\x00\x1f\x82" + // 0x1F020345: 0x00001F82 + "\x1f\x03\x03E\x00\x00\x1f\x83" + // 0x1F030345: 0x00001F83 + "\x1f\x04\x03E\x00\x00\x1f\x84" + // 0x1F040345: 0x00001F84 + "\x1f\x05\x03E\x00\x00\x1f\x85" + // 0x1F050345: 0x00001F85 + "\x1f\x06\x03E\x00\x00\x1f\x86" + // 0x1F060345: 0x00001F86 + "\x1f\a\x03E\x00\x00\x1f\x87" + // 0x1F070345: 0x00001F87 + "\x1f\b\x03E\x00\x00\x1f\x88" + // 0x1F080345: 0x00001F88 + "\x1f\t\x03E\x00\x00\x1f\x89" + // 0x1F090345: 0x00001F89 + "\x1f\n\x03E\x00\x00\x1f\x8a" + // 0x1F0A0345: 0x00001F8A + "\x1f\v\x03E\x00\x00\x1f\x8b" + // 0x1F0B0345: 0x00001F8B + "\x1f\f\x03E\x00\x00\x1f\x8c" + // 0x1F0C0345: 0x00001F8C + "\x1f\r\x03E\x00\x00\x1f\x8d" + // 0x1F0D0345: 0x00001F8D + "\x1f\x0e\x03E\x00\x00\x1f\x8e" + // 0x1F0E0345: 0x00001F8E + "\x1f\x0f\x03E\x00\x00\x1f\x8f" + // 0x1F0F0345: 0x00001F8F + "\x1f \x03E\x00\x00\x1f\x90" + // 0x1F200345: 0x00001F90 + "\x1f!\x03E\x00\x00\x1f\x91" + // 0x1F210345: 0x00001F91 + "\x1f\"\x03E\x00\x00\x1f\x92" + // 0x1F220345: 0x00001F92 + "\x1f#\x03E\x00\x00\x1f\x93" + // 0x1F230345: 0x00001F93 + "\x1f$\x03E\x00\x00\x1f\x94" + // 0x1F240345: 0x00001F94 + "\x1f%\x03E\x00\x00\x1f\x95" + // 0x1F250345: 0x00001F95 + "\x1f&\x03E\x00\x00\x1f\x96" + // 0x1F260345: 0x00001F96 + "\x1f'\x03E\x00\x00\x1f\x97" + // 0x1F270345: 0x00001F97 + "\x1f(\x03E\x00\x00\x1f\x98" + // 0x1F280345: 0x00001F98 + "\x1f)\x03E\x00\x00\x1f\x99" + // 0x1F290345: 0x00001F99 + "\x1f*\x03E\x00\x00\x1f\x9a" + // 0x1F2A0345: 0x00001F9A + "\x1f+\x03E\x00\x00\x1f\x9b" + // 0x1F2B0345: 0x00001F9B + "\x1f,\x03E\x00\x00\x1f\x9c" + // 0x1F2C0345: 0x00001F9C + "\x1f-\x03E\x00\x00\x1f\x9d" + // 0x1F2D0345: 0x00001F9D + "\x1f.\x03E\x00\x00\x1f\x9e" + // 0x1F2E0345: 0x00001F9E + "\x1f/\x03E\x00\x00\x1f\x9f" + // 0x1F2F0345: 0x00001F9F + "\x1f`\x03E\x00\x00\x1f\xa0" + // 0x1F600345: 0x00001FA0 + "\x1fa\x03E\x00\x00\x1f\xa1" + // 0x1F610345: 0x00001FA1 + "\x1fb\x03E\x00\x00\x1f\xa2" + // 0x1F620345: 0x00001FA2 + "\x1fc\x03E\x00\x00\x1f\xa3" + // 0x1F630345: 0x00001FA3 + "\x1fd\x03E\x00\x00\x1f\xa4" + // 0x1F640345: 0x00001FA4 + "\x1fe\x03E\x00\x00\x1f\xa5" + // 0x1F650345: 0x00001FA5 + "\x1ff\x03E\x00\x00\x1f\xa6" + // 0x1F660345: 0x00001FA6 + "\x1fg\x03E\x00\x00\x1f\xa7" + // 0x1F670345: 0x00001FA7 + "\x1fh\x03E\x00\x00\x1f\xa8" + // 0x1F680345: 0x00001FA8 + "\x1fi\x03E\x00\x00\x1f\xa9" + // 0x1F690345: 0x00001FA9 + "\x1fj\x03E\x00\x00\x1f\xaa" + // 0x1F6A0345: 0x00001FAA + "\x1fk\x03E\x00\x00\x1f\xab" + // 0x1F6B0345: 0x00001FAB + "\x1fl\x03E\x00\x00\x1f\xac" + // 0x1F6C0345: 0x00001FAC + "\x1fm\x03E\x00\x00\x1f\xad" + // 0x1F6D0345: 0x00001FAD + "\x1fn\x03E\x00\x00\x1f\xae" + // 0x1F6E0345: 0x00001FAE + "\x1fo\x03E\x00\x00\x1f\xaf" + // 0x1F6F0345: 0x00001FAF + "\x03\xb1\x03\x06\x00\x00\x1f\xb0" + // 0x03B10306: 0x00001FB0 + "\x03\xb1\x03\x04\x00\x00\x1f\xb1" + // 0x03B10304: 0x00001FB1 + "\x1fp\x03E\x00\x00\x1f\xb2" + // 0x1F700345: 0x00001FB2 + "\x03\xb1\x03E\x00\x00\x1f\xb3" + // 0x03B10345: 0x00001FB3 + "\x03\xac\x03E\x00\x00\x1f\xb4" + // 0x03AC0345: 0x00001FB4 + "\x03\xb1\x03B\x00\x00\x1f\xb6" + // 0x03B10342: 0x00001FB6 + "\x1f\xb6\x03E\x00\x00\x1f\xb7" + // 0x1FB60345: 0x00001FB7 + "\x03\x91\x03\x06\x00\x00\x1f\xb8" + // 0x03910306: 0x00001FB8 + "\x03\x91\x03\x04\x00\x00\x1f\xb9" + // 0x03910304: 0x00001FB9 + "\x03\x91\x03\x00\x00\x00\x1f\xba" + // 0x03910300: 0x00001FBA + "\x03\x91\x03E\x00\x00\x1f\xbc" + // 0x03910345: 0x00001FBC + "\x00\xa8\x03B\x00\x00\x1f\xc1" + // 0x00A80342: 0x00001FC1 + "\x1ft\x03E\x00\x00\x1f\xc2" + // 0x1F740345: 0x00001FC2 + "\x03\xb7\x03E\x00\x00\x1f\xc3" + // 0x03B70345: 0x00001FC3 + "\x03\xae\x03E\x00\x00\x1f\xc4" + // 0x03AE0345: 0x00001FC4 + "\x03\xb7\x03B\x00\x00\x1f\xc6" + // 0x03B70342: 0x00001FC6 + "\x1f\xc6\x03E\x00\x00\x1f\xc7" + // 0x1FC60345: 0x00001FC7 + "\x03\x95\x03\x00\x00\x00\x1f\xc8" + // 0x03950300: 0x00001FC8 + "\x03\x97\x03\x00\x00\x00\x1f\xca" + // 0x03970300: 0x00001FCA + "\x03\x97\x03E\x00\x00\x1f\xcc" + // 0x03970345: 0x00001FCC + "\x1f\xbf\x03\x00\x00\x00\x1f\xcd" + // 0x1FBF0300: 0x00001FCD + "\x1f\xbf\x03\x01\x00\x00\x1f\xce" + // 0x1FBF0301: 0x00001FCE + "\x1f\xbf\x03B\x00\x00\x1f\xcf" + // 0x1FBF0342: 0x00001FCF + "\x03\xb9\x03\x06\x00\x00\x1f\xd0" + // 0x03B90306: 0x00001FD0 + "\x03\xb9\x03\x04\x00\x00\x1f\xd1" + // 0x03B90304: 0x00001FD1 + "\x03\xca\x03\x00\x00\x00\x1f\xd2" + // 0x03CA0300: 0x00001FD2 + "\x03\xb9\x03B\x00\x00\x1f\xd6" + // 0x03B90342: 0x00001FD6 + "\x03\xca\x03B\x00\x00\x1f\xd7" + // 0x03CA0342: 0x00001FD7 + "\x03\x99\x03\x06\x00\x00\x1f\xd8" + // 0x03990306: 0x00001FD8 + "\x03\x99\x03\x04\x00\x00\x1f\xd9" + // 0x03990304: 0x00001FD9 + "\x03\x99\x03\x00\x00\x00\x1f\xda" + // 0x03990300: 0x00001FDA + "\x1f\xfe\x03\x00\x00\x00\x1f\xdd" + // 0x1FFE0300: 0x00001FDD + "\x1f\xfe\x03\x01\x00\x00\x1f\xde" + // 0x1FFE0301: 0x00001FDE + "\x1f\xfe\x03B\x00\x00\x1f\xdf" + // 0x1FFE0342: 0x00001FDF + "\x03\xc5\x03\x06\x00\x00\x1f\xe0" + // 0x03C50306: 0x00001FE0 + "\x03\xc5\x03\x04\x00\x00\x1f\xe1" + // 0x03C50304: 0x00001FE1 + "\x03\xcb\x03\x00\x00\x00\x1f\xe2" + // 0x03CB0300: 0x00001FE2 + "\x03\xc1\x03\x13\x00\x00\x1f\xe4" + // 0x03C10313: 0x00001FE4 + "\x03\xc1\x03\x14\x00\x00\x1f\xe5" + // 0x03C10314: 0x00001FE5 + "\x03\xc5\x03B\x00\x00\x1f\xe6" + // 0x03C50342: 0x00001FE6 + "\x03\xcb\x03B\x00\x00\x1f\xe7" + // 0x03CB0342: 0x00001FE7 + "\x03\xa5\x03\x06\x00\x00\x1f\xe8" + // 0x03A50306: 0x00001FE8 + "\x03\xa5\x03\x04\x00\x00\x1f\xe9" + // 0x03A50304: 0x00001FE9 + "\x03\xa5\x03\x00\x00\x00\x1f\xea" + // 0x03A50300: 0x00001FEA + "\x03\xa1\x03\x14\x00\x00\x1f\xec" + // 0x03A10314: 0x00001FEC + "\x00\xa8\x03\x00\x00\x00\x1f\xed" + // 0x00A80300: 0x00001FED + "\x1f|\x03E\x00\x00\x1f\xf2" + // 0x1F7C0345: 0x00001FF2 + "\x03\xc9\x03E\x00\x00\x1f\xf3" + // 0x03C90345: 0x00001FF3 + "\x03\xce\x03E\x00\x00\x1f\xf4" + // 0x03CE0345: 0x00001FF4 + "\x03\xc9\x03B\x00\x00\x1f\xf6" + // 0x03C90342: 0x00001FF6 + "\x1f\xf6\x03E\x00\x00\x1f\xf7" + // 0x1FF60345: 0x00001FF7 + "\x03\x9f\x03\x00\x00\x00\x1f\xf8" + // 0x039F0300: 0x00001FF8 + "\x03\xa9\x03\x00\x00\x00\x1f\xfa" + // 0x03A90300: 0x00001FFA + "\x03\xa9\x03E\x00\x00\x1f\xfc" + // 0x03A90345: 0x00001FFC + "!\x90\x038\x00\x00!\x9a" + // 0x21900338: 0x0000219A + "!\x92\x038\x00\x00!\x9b" + // 0x21920338: 0x0000219B + "!\x94\x038\x00\x00!\xae" + // 0x21940338: 0x000021AE + "!\xd0\x038\x00\x00!\xcd" + // 0x21D00338: 0x000021CD + "!\xd4\x038\x00\x00!\xce" + // 0x21D40338: 0x000021CE + "!\xd2\x038\x00\x00!\xcf" + // 0x21D20338: 0x000021CF + "\"\x03\x038\x00\x00\"\x04" + // 0x22030338: 0x00002204 + "\"\b\x038\x00\x00\"\t" + // 0x22080338: 0x00002209 + "\"\v\x038\x00\x00\"\f" + // 0x220B0338: 0x0000220C + "\"#\x038\x00\x00\"$" + // 0x22230338: 0x00002224 + "\"%\x038\x00\x00\"&" + // 0x22250338: 0x00002226 + "\"<\x038\x00\x00\"A" + // 0x223C0338: 0x00002241 + "\"C\x038\x00\x00\"D" + // 0x22430338: 0x00002244 + "\"E\x038\x00\x00\"G" + // 0x22450338: 0x00002247 + "\"H\x038\x00\x00\"I" + // 0x22480338: 0x00002249 + "\x00=\x038\x00\x00\"`" + // 0x003D0338: 0x00002260 + "\"a\x038\x00\x00\"b" + // 0x22610338: 0x00002262 + "\"M\x038\x00\x00\"m" + // 0x224D0338: 0x0000226D + "\x00<\x038\x00\x00\"n" + // 0x003C0338: 0x0000226E + "\x00>\x038\x00\x00\"o" + // 0x003E0338: 0x0000226F + "\"d\x038\x00\x00\"p" + // 0x22640338: 0x00002270 + "\"e\x038\x00\x00\"q" + // 0x22650338: 0x00002271 + "\"r\x038\x00\x00\"t" + // 0x22720338: 0x00002274 + "\"s\x038\x00\x00\"u" + // 0x22730338: 0x00002275 + "\"v\x038\x00\x00\"x" + // 0x22760338: 0x00002278 + "\"w\x038\x00\x00\"y" + // 0x22770338: 0x00002279 + "\"z\x038\x00\x00\"\x80" + // 0x227A0338: 0x00002280 + "\"{\x038\x00\x00\"\x81" + // 0x227B0338: 0x00002281 + "\"\x82\x038\x00\x00\"\x84" + // 0x22820338: 0x00002284 + "\"\x83\x038\x00\x00\"\x85" + // 0x22830338: 0x00002285 + "\"\x86\x038\x00\x00\"\x88" + // 0x22860338: 0x00002288 + "\"\x87\x038\x00\x00\"\x89" + // 0x22870338: 0x00002289 + "\"\xa2\x038\x00\x00\"\xac" + // 0x22A20338: 0x000022AC + "\"\xa8\x038\x00\x00\"\xad" + // 0x22A80338: 0x000022AD + "\"\xa9\x038\x00\x00\"\xae" + // 0x22A90338: 0x000022AE + "\"\xab\x038\x00\x00\"\xaf" + // 0x22AB0338: 0x000022AF + "\"|\x038\x00\x00\"\xe0" + // 0x227C0338: 0x000022E0 + "\"}\x038\x00\x00\"\xe1" + // 0x227D0338: 0x000022E1 + "\"\x91\x038\x00\x00\"\xe2" + // 0x22910338: 0x000022E2 + "\"\x92\x038\x00\x00\"\xe3" + // 0x22920338: 0x000022E3 + "\"\xb2\x038\x00\x00\"\xea" + // 0x22B20338: 0x000022EA + "\"\xb3\x038\x00\x00\"\xeb" + // 0x22B30338: 0x000022EB + "\"\xb4\x038\x00\x00\"\xec" + // 0x22B40338: 0x000022EC + "\"\xb5\x038\x00\x00\"\xed" + // 0x22B50338: 0x000022ED + "0K0\x99\x00\x000L" + // 0x304B3099: 0x0000304C + "0M0\x99\x00\x000N" + // 0x304D3099: 0x0000304E + "0O0\x99\x00\x000P" + // 0x304F3099: 0x00003050 + "0Q0\x99\x00\x000R" + // 0x30513099: 0x00003052 + "0S0\x99\x00\x000T" + // 0x30533099: 0x00003054 + "0U0\x99\x00\x000V" + // 0x30553099: 0x00003056 + "0W0\x99\x00\x000X" + // 0x30573099: 0x00003058 + "0Y0\x99\x00\x000Z" + // 0x30593099: 0x0000305A + "0[0\x99\x00\x000\\" + // 0x305B3099: 0x0000305C + "0]0\x99\x00\x000^" + // 0x305D3099: 0x0000305E + "0_0\x99\x00\x000`" + // 0x305F3099: 0x00003060 + "0a0\x99\x00\x000b" + // 0x30613099: 0x00003062 + "0d0\x99\x00\x000e" + // 0x30643099: 0x00003065 + "0f0\x99\x00\x000g" + // 0x30663099: 0x00003067 + "0h0\x99\x00\x000i" + // 0x30683099: 0x00003069 + "0o0\x99\x00\x000p" + // 0x306F3099: 0x00003070 + "0o0\x9a\x00\x000q" + // 0x306F309A: 0x00003071 + "0r0\x99\x00\x000s" + // 0x30723099: 0x00003073 + "0r0\x9a\x00\x000t" + // 0x3072309A: 0x00003074 + "0u0\x99\x00\x000v" + // 0x30753099: 0x00003076 + "0u0\x9a\x00\x000w" + // 0x3075309A: 0x00003077 + "0x0\x99\x00\x000y" + // 0x30783099: 0x00003079 + "0x0\x9a\x00\x000z" + // 0x3078309A: 0x0000307A + "0{0\x99\x00\x000|" + // 0x307B3099: 0x0000307C + "0{0\x9a\x00\x000}" + // 0x307B309A: 0x0000307D + "0F0\x99\x00\x000\x94" + // 0x30463099: 0x00003094 + "0\x9d0\x99\x00\x000\x9e" + // 0x309D3099: 0x0000309E + "0\xab0\x99\x00\x000\xac" + // 0x30AB3099: 0x000030AC + "0\xad0\x99\x00\x000\xae" + // 0x30AD3099: 0x000030AE + "0\xaf0\x99\x00\x000\xb0" + // 0x30AF3099: 0x000030B0 + "0\xb10\x99\x00\x000\xb2" + // 0x30B13099: 0x000030B2 + "0\xb30\x99\x00\x000\xb4" + // 0x30B33099: 0x000030B4 + "0\xb50\x99\x00\x000\xb6" + // 0x30B53099: 0x000030B6 + "0\xb70\x99\x00\x000\xb8" + // 0x30B73099: 0x000030B8 + "0\xb90\x99\x00\x000\xba" + // 0x30B93099: 0x000030BA + "0\xbb0\x99\x00\x000\xbc" + // 0x30BB3099: 0x000030BC + "0\xbd0\x99\x00\x000\xbe" + // 0x30BD3099: 0x000030BE + "0\xbf0\x99\x00\x000\xc0" + // 0x30BF3099: 0x000030C0 + "0\xc10\x99\x00\x000\xc2" + // 0x30C13099: 0x000030C2 + "0\xc40\x99\x00\x000\xc5" + // 0x30C43099: 0x000030C5 + "0\xc60\x99\x00\x000\xc7" + // 0x30C63099: 0x000030C7 + "0\xc80\x99\x00\x000\xc9" + // 0x30C83099: 0x000030C9 + "0\xcf0\x99\x00\x000\xd0" + // 0x30CF3099: 0x000030D0 + "0\xcf0\x9a\x00\x000\xd1" + // 0x30CF309A: 0x000030D1 + "0\xd20\x99\x00\x000\xd3" + // 0x30D23099: 0x000030D3 + "0\xd20\x9a\x00\x000\xd4" + // 0x30D2309A: 0x000030D4 + "0\xd50\x99\x00\x000\xd6" + // 0x30D53099: 0x000030D6 + "0\xd50\x9a\x00\x000\xd7" + // 0x30D5309A: 0x000030D7 + "0\xd80\x99\x00\x000\xd9" + // 0x30D83099: 0x000030D9 + "0\xd80\x9a\x00\x000\xda" + // 0x30D8309A: 0x000030DA + "0\xdb0\x99\x00\x000\xdc" + // 0x30DB3099: 0x000030DC + "0\xdb0\x9a\x00\x000\xdd" + // 0x30DB309A: 0x000030DD + "0\xa60\x99\x00\x000\xf4" + // 0x30A63099: 0x000030F4 + "0\xef0\x99\x00\x000\xf7" + // 0x30EF3099: 0x000030F7 + "0\xf00\x99\x00\x000\xf8" + // 0x30F03099: 0x000030F8 + "0\xf10\x99\x00\x000\xf9" + // 0x30F13099: 0x000030F9 + "0\xf20\x99\x00\x000\xfa" + // 0x30F23099: 0x000030FA + "0\xfd0\x99\x00\x000\xfe" + // 0x30FD3099: 0x000030FE + "\x10\x99\x10\xba\x00\x01\x10\x9a" + // 0x109910BA: 0x0001109A + "\x10\x9b\x10\xba\x00\x01\x10\x9c" + // 0x109B10BA: 0x0001109C + "\x10\xa5\x10\xba\x00\x01\x10\xab" + // 0x10A510BA: 0x000110AB + "\x111\x11'\x00\x01\x11." + // 0x11311127: 0x0001112E + "\x112\x11'\x00\x01\x11/" + // 0x11321127: 0x0001112F + "\x13G\x13>\x00\x01\x13K" + // 0x1347133E: 0x0001134B + "\x13G\x13W\x00\x01\x13L" + // 0x13471357: 0x0001134C + "\x14\xb9\x14\xba\x00\x01\x14\xbb" + // 0x14B914BA: 0x000114BB + "\x14\xb9\x14\xb0\x00\x01\x14\xbc" + // 0x14B914B0: 0x000114BC + "\x14\xb9\x14\xbd\x00\x01\x14\xbe" + // 0x14B914BD: 0x000114BE + "\x15\xb8\x15\xaf\x00\x01\x15\xba" + // 0x15B815AF: 0x000115BA + "\x15\xb9\x15\xaf\x00\x01\x15\xbb" + // 0x15B915AF: 0x000115BB + "" + // Total size of tables: 53KB (54226 bytes) diff --git a/vendor/golang.org/x/text/unicode/norm/tables11.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables11.0.0.go new file mode 100644 index 00000000..7297cce3 --- /dev/null +++ b/vendor/golang.org/x/text/unicode/norm/tables11.0.0.go @@ -0,0 +1,7693 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +// +build go1.13 + +package norm + +import "sync" + +const ( + // Version is the Unicode edition from which the tables are derived. + Version = "11.0.0" + + // MaxTransformChunkSize indicates the maximum number of bytes that Transform + // may need to write atomically for any Form. Making a destination buffer at + // least this size ensures that Transform can always make progress and that + // the user does not need to grow the buffer on an ErrShortDst. + MaxTransformChunkSize = 35 + maxNonStarters*4 +) + +var ccc = [55]uint8{ + 0, 1, 7, 8, 9, 10, 11, 12, + 13, 14, 15, 16, 17, 18, 19, 20, + 21, 22, 23, 24, 25, 26, 27, 28, + 29, 30, 31, 32, 33, 34, 35, 36, + 84, 91, 103, 107, 118, 122, 129, 130, + 132, 202, 214, 216, 218, 220, 222, 224, + 226, 228, 230, 232, 233, 234, 240, +} + +const ( + firstMulti = 0x186D + firstCCC = 0x2C9E + endMulti = 0x2F60 + firstLeadingCCC = 0x49AE + firstCCCZeroExcept = 0x4A78 + firstStarterWithNLead = 0x4A9F + lastDecomp = 0x4AA1 + maxDecomp = 0x8000 +) + +// decomps: 19105 bytes +var decomps = [...]byte{ + // Bytes 0 - 3f + 0x00, 0x41, 0x20, 0x41, 0x21, 0x41, 0x22, 0x41, + 0x23, 0x41, 0x24, 0x41, 0x25, 0x41, 0x26, 0x41, + 0x27, 0x41, 0x28, 0x41, 0x29, 0x41, 0x2A, 0x41, + 0x2B, 0x41, 0x2C, 0x41, 0x2D, 0x41, 0x2E, 0x41, + 0x2F, 0x41, 0x30, 0x41, 0x31, 0x41, 0x32, 0x41, + 0x33, 0x41, 0x34, 0x41, 0x35, 0x41, 0x36, 0x41, + 0x37, 0x41, 0x38, 0x41, 0x39, 0x41, 0x3A, 0x41, + 0x3B, 0x41, 0x3C, 0x41, 0x3D, 0x41, 0x3E, 0x41, + // Bytes 40 - 7f + 0x3F, 0x41, 0x40, 0x41, 0x41, 0x41, 0x42, 0x41, + 0x43, 0x41, 0x44, 0x41, 0x45, 0x41, 0x46, 0x41, + 0x47, 0x41, 0x48, 0x41, 0x49, 0x41, 0x4A, 0x41, + 0x4B, 0x41, 0x4C, 0x41, 0x4D, 0x41, 0x4E, 0x41, + 0x4F, 0x41, 0x50, 0x41, 0x51, 0x41, 0x52, 0x41, + 0x53, 0x41, 0x54, 0x41, 0x55, 0x41, 0x56, 0x41, + 0x57, 0x41, 0x58, 0x41, 0x59, 0x41, 0x5A, 0x41, + 0x5B, 0x41, 0x5C, 0x41, 0x5D, 0x41, 0x5E, 0x41, + // Bytes 80 - bf + 0x5F, 0x41, 0x60, 0x41, 0x61, 0x41, 0x62, 0x41, + 0x63, 0x41, 0x64, 0x41, 0x65, 0x41, 0x66, 0x41, + 0x67, 0x41, 0x68, 0x41, 0x69, 0x41, 0x6A, 0x41, + 0x6B, 0x41, 0x6C, 0x41, 0x6D, 0x41, 0x6E, 0x41, + 0x6F, 0x41, 0x70, 0x41, 0x71, 0x41, 0x72, 0x41, + 0x73, 0x41, 0x74, 0x41, 0x75, 0x41, 0x76, 0x41, + 0x77, 0x41, 0x78, 0x41, 0x79, 0x41, 0x7A, 0x41, + 0x7B, 0x41, 0x7C, 0x41, 0x7D, 0x41, 0x7E, 0x42, + // Bytes c0 - ff + 0xC2, 0xA2, 0x42, 0xC2, 0xA3, 0x42, 0xC2, 0xA5, + 0x42, 0xC2, 0xA6, 0x42, 0xC2, 0xAC, 0x42, 0xC2, + 0xB7, 0x42, 0xC3, 0x86, 0x42, 0xC3, 0xB0, 0x42, + 0xC4, 0xA6, 0x42, 0xC4, 0xA7, 0x42, 0xC4, 0xB1, + 0x42, 0xC5, 0x8B, 0x42, 0xC5, 0x93, 0x42, 0xC6, + 0x8E, 0x42, 0xC6, 0x90, 0x42, 0xC6, 0xAB, 0x42, + 0xC8, 0xA2, 0x42, 0xC8, 0xB7, 0x42, 0xC9, 0x90, + 0x42, 0xC9, 0x91, 0x42, 0xC9, 0x92, 0x42, 0xC9, + // Bytes 100 - 13f + 0x94, 0x42, 0xC9, 0x95, 0x42, 0xC9, 0x99, 0x42, + 0xC9, 0x9B, 0x42, 0xC9, 0x9C, 0x42, 0xC9, 0x9F, + 0x42, 0xC9, 0xA1, 0x42, 0xC9, 0xA3, 0x42, 0xC9, + 0xA5, 0x42, 0xC9, 0xA6, 0x42, 0xC9, 0xA8, 0x42, + 0xC9, 0xA9, 0x42, 0xC9, 0xAA, 0x42, 0xC9, 0xAB, + 0x42, 0xC9, 0xAD, 0x42, 0xC9, 0xAF, 0x42, 0xC9, + 0xB0, 0x42, 0xC9, 0xB1, 0x42, 0xC9, 0xB2, 0x42, + 0xC9, 0xB3, 0x42, 0xC9, 0xB4, 0x42, 0xC9, 0xB5, + // Bytes 140 - 17f + 0x42, 0xC9, 0xB8, 0x42, 0xC9, 0xB9, 0x42, 0xC9, + 0xBB, 0x42, 0xCA, 0x81, 0x42, 0xCA, 0x82, 0x42, + 0xCA, 0x83, 0x42, 0xCA, 0x89, 0x42, 0xCA, 0x8A, + 0x42, 0xCA, 0x8B, 0x42, 0xCA, 0x8C, 0x42, 0xCA, + 0x90, 0x42, 0xCA, 0x91, 0x42, 0xCA, 0x92, 0x42, + 0xCA, 0x95, 0x42, 0xCA, 0x9D, 0x42, 0xCA, 0x9F, + 0x42, 0xCA, 0xB9, 0x42, 0xCE, 0x91, 0x42, 0xCE, + 0x92, 0x42, 0xCE, 0x93, 0x42, 0xCE, 0x94, 0x42, + // Bytes 180 - 1bf + 0xCE, 0x95, 0x42, 0xCE, 0x96, 0x42, 0xCE, 0x97, + 0x42, 0xCE, 0x98, 0x42, 0xCE, 0x99, 0x42, 0xCE, + 0x9A, 0x42, 0xCE, 0x9B, 0x42, 0xCE, 0x9C, 0x42, + 0xCE, 0x9D, 0x42, 0xCE, 0x9E, 0x42, 0xCE, 0x9F, + 0x42, 0xCE, 0xA0, 0x42, 0xCE, 0xA1, 0x42, 0xCE, + 0xA3, 0x42, 0xCE, 0xA4, 0x42, 0xCE, 0xA5, 0x42, + 0xCE, 0xA6, 0x42, 0xCE, 0xA7, 0x42, 0xCE, 0xA8, + 0x42, 0xCE, 0xA9, 0x42, 0xCE, 0xB1, 0x42, 0xCE, + // Bytes 1c0 - 1ff + 0xB2, 0x42, 0xCE, 0xB3, 0x42, 0xCE, 0xB4, 0x42, + 0xCE, 0xB5, 0x42, 0xCE, 0xB6, 0x42, 0xCE, 0xB7, + 0x42, 0xCE, 0xB8, 0x42, 0xCE, 0xB9, 0x42, 0xCE, + 0xBA, 0x42, 0xCE, 0xBB, 0x42, 0xCE, 0xBC, 0x42, + 0xCE, 0xBD, 0x42, 0xCE, 0xBE, 0x42, 0xCE, 0xBF, + 0x42, 0xCF, 0x80, 0x42, 0xCF, 0x81, 0x42, 0xCF, + 0x82, 0x42, 0xCF, 0x83, 0x42, 0xCF, 0x84, 0x42, + 0xCF, 0x85, 0x42, 0xCF, 0x86, 0x42, 0xCF, 0x87, + // Bytes 200 - 23f + 0x42, 0xCF, 0x88, 0x42, 0xCF, 0x89, 0x42, 0xCF, + 0x9C, 0x42, 0xCF, 0x9D, 0x42, 0xD0, 0xBD, 0x42, + 0xD1, 0x8A, 0x42, 0xD1, 0x8C, 0x42, 0xD7, 0x90, + 0x42, 0xD7, 0x91, 0x42, 0xD7, 0x92, 0x42, 0xD7, + 0x93, 0x42, 0xD7, 0x94, 0x42, 0xD7, 0x9B, 0x42, + 0xD7, 0x9C, 0x42, 0xD7, 0x9D, 0x42, 0xD7, 0xA2, + 0x42, 0xD7, 0xA8, 0x42, 0xD7, 0xAA, 0x42, 0xD8, + 0xA1, 0x42, 0xD8, 0xA7, 0x42, 0xD8, 0xA8, 0x42, + // Bytes 240 - 27f + 0xD8, 0xA9, 0x42, 0xD8, 0xAA, 0x42, 0xD8, 0xAB, + 0x42, 0xD8, 0xAC, 0x42, 0xD8, 0xAD, 0x42, 0xD8, + 0xAE, 0x42, 0xD8, 0xAF, 0x42, 0xD8, 0xB0, 0x42, + 0xD8, 0xB1, 0x42, 0xD8, 0xB2, 0x42, 0xD8, 0xB3, + 0x42, 0xD8, 0xB4, 0x42, 0xD8, 0xB5, 0x42, 0xD8, + 0xB6, 0x42, 0xD8, 0xB7, 0x42, 0xD8, 0xB8, 0x42, + 0xD8, 0xB9, 0x42, 0xD8, 0xBA, 0x42, 0xD9, 0x81, + 0x42, 0xD9, 0x82, 0x42, 0xD9, 0x83, 0x42, 0xD9, + // Bytes 280 - 2bf + 0x84, 0x42, 0xD9, 0x85, 0x42, 0xD9, 0x86, 0x42, + 0xD9, 0x87, 0x42, 0xD9, 0x88, 0x42, 0xD9, 0x89, + 0x42, 0xD9, 0x8A, 0x42, 0xD9, 0xAE, 0x42, 0xD9, + 0xAF, 0x42, 0xD9, 0xB1, 0x42, 0xD9, 0xB9, 0x42, + 0xD9, 0xBA, 0x42, 0xD9, 0xBB, 0x42, 0xD9, 0xBE, + 0x42, 0xD9, 0xBF, 0x42, 0xDA, 0x80, 0x42, 0xDA, + 0x83, 0x42, 0xDA, 0x84, 0x42, 0xDA, 0x86, 0x42, + 0xDA, 0x87, 0x42, 0xDA, 0x88, 0x42, 0xDA, 0x8C, + // Bytes 2c0 - 2ff + 0x42, 0xDA, 0x8D, 0x42, 0xDA, 0x8E, 0x42, 0xDA, + 0x91, 0x42, 0xDA, 0x98, 0x42, 0xDA, 0xA1, 0x42, + 0xDA, 0xA4, 0x42, 0xDA, 0xA6, 0x42, 0xDA, 0xA9, + 0x42, 0xDA, 0xAD, 0x42, 0xDA, 0xAF, 0x42, 0xDA, + 0xB1, 0x42, 0xDA, 0xB3, 0x42, 0xDA, 0xBA, 0x42, + 0xDA, 0xBB, 0x42, 0xDA, 0xBE, 0x42, 0xDB, 0x81, + 0x42, 0xDB, 0x85, 0x42, 0xDB, 0x86, 0x42, 0xDB, + 0x87, 0x42, 0xDB, 0x88, 0x42, 0xDB, 0x89, 0x42, + // Bytes 300 - 33f + 0xDB, 0x8B, 0x42, 0xDB, 0x8C, 0x42, 0xDB, 0x90, + 0x42, 0xDB, 0x92, 0x43, 0xE0, 0xBC, 0x8B, 0x43, + 0xE1, 0x83, 0x9C, 0x43, 0xE1, 0x84, 0x80, 0x43, + 0xE1, 0x84, 0x81, 0x43, 0xE1, 0x84, 0x82, 0x43, + 0xE1, 0x84, 0x83, 0x43, 0xE1, 0x84, 0x84, 0x43, + 0xE1, 0x84, 0x85, 0x43, 0xE1, 0x84, 0x86, 0x43, + 0xE1, 0x84, 0x87, 0x43, 0xE1, 0x84, 0x88, 0x43, + 0xE1, 0x84, 0x89, 0x43, 0xE1, 0x84, 0x8A, 0x43, + // Bytes 340 - 37f + 0xE1, 0x84, 0x8B, 0x43, 0xE1, 0x84, 0x8C, 0x43, + 0xE1, 0x84, 0x8D, 0x43, 0xE1, 0x84, 0x8E, 0x43, + 0xE1, 0x84, 0x8F, 0x43, 0xE1, 0x84, 0x90, 0x43, + 0xE1, 0x84, 0x91, 0x43, 0xE1, 0x84, 0x92, 0x43, + 0xE1, 0x84, 0x94, 0x43, 0xE1, 0x84, 0x95, 0x43, + 0xE1, 0x84, 0x9A, 0x43, 0xE1, 0x84, 0x9C, 0x43, + 0xE1, 0x84, 0x9D, 0x43, 0xE1, 0x84, 0x9E, 0x43, + 0xE1, 0x84, 0xA0, 0x43, 0xE1, 0x84, 0xA1, 0x43, + // Bytes 380 - 3bf + 0xE1, 0x84, 0xA2, 0x43, 0xE1, 0x84, 0xA3, 0x43, + 0xE1, 0x84, 0xA7, 0x43, 0xE1, 0x84, 0xA9, 0x43, + 0xE1, 0x84, 0xAB, 0x43, 0xE1, 0x84, 0xAC, 0x43, + 0xE1, 0x84, 0xAD, 0x43, 0xE1, 0x84, 0xAE, 0x43, + 0xE1, 0x84, 0xAF, 0x43, 0xE1, 0x84, 0xB2, 0x43, + 0xE1, 0x84, 0xB6, 0x43, 0xE1, 0x85, 0x80, 0x43, + 0xE1, 0x85, 0x87, 0x43, 0xE1, 0x85, 0x8C, 0x43, + 0xE1, 0x85, 0x97, 0x43, 0xE1, 0x85, 0x98, 0x43, + // Bytes 3c0 - 3ff + 0xE1, 0x85, 0x99, 0x43, 0xE1, 0x85, 0xA0, 0x43, + 0xE1, 0x86, 0x84, 0x43, 0xE1, 0x86, 0x85, 0x43, + 0xE1, 0x86, 0x88, 0x43, 0xE1, 0x86, 0x91, 0x43, + 0xE1, 0x86, 0x92, 0x43, 0xE1, 0x86, 0x94, 0x43, + 0xE1, 0x86, 0x9E, 0x43, 0xE1, 0x86, 0xA1, 0x43, + 0xE1, 0x87, 0x87, 0x43, 0xE1, 0x87, 0x88, 0x43, + 0xE1, 0x87, 0x8C, 0x43, 0xE1, 0x87, 0x8E, 0x43, + 0xE1, 0x87, 0x93, 0x43, 0xE1, 0x87, 0x97, 0x43, + // Bytes 400 - 43f + 0xE1, 0x87, 0x99, 0x43, 0xE1, 0x87, 0x9D, 0x43, + 0xE1, 0x87, 0x9F, 0x43, 0xE1, 0x87, 0xB1, 0x43, + 0xE1, 0x87, 0xB2, 0x43, 0xE1, 0xB4, 0x82, 0x43, + 0xE1, 0xB4, 0x96, 0x43, 0xE1, 0xB4, 0x97, 0x43, + 0xE1, 0xB4, 0x9C, 0x43, 0xE1, 0xB4, 0x9D, 0x43, + 0xE1, 0xB4, 0xA5, 0x43, 0xE1, 0xB5, 0xBB, 0x43, + 0xE1, 0xB6, 0x85, 0x43, 0xE2, 0x80, 0x82, 0x43, + 0xE2, 0x80, 0x83, 0x43, 0xE2, 0x80, 0x90, 0x43, + // Bytes 440 - 47f + 0xE2, 0x80, 0x93, 0x43, 0xE2, 0x80, 0x94, 0x43, + 0xE2, 0x82, 0xA9, 0x43, 0xE2, 0x86, 0x90, 0x43, + 0xE2, 0x86, 0x91, 0x43, 0xE2, 0x86, 0x92, 0x43, + 0xE2, 0x86, 0x93, 0x43, 0xE2, 0x88, 0x82, 0x43, + 0xE2, 0x88, 0x87, 0x43, 0xE2, 0x88, 0x91, 0x43, + 0xE2, 0x88, 0x92, 0x43, 0xE2, 0x94, 0x82, 0x43, + 0xE2, 0x96, 0xA0, 0x43, 0xE2, 0x97, 0x8B, 0x43, + 0xE2, 0xA6, 0x85, 0x43, 0xE2, 0xA6, 0x86, 0x43, + // Bytes 480 - 4bf + 0xE2, 0xB5, 0xA1, 0x43, 0xE3, 0x80, 0x81, 0x43, + 0xE3, 0x80, 0x82, 0x43, 0xE3, 0x80, 0x88, 0x43, + 0xE3, 0x80, 0x89, 0x43, 0xE3, 0x80, 0x8A, 0x43, + 0xE3, 0x80, 0x8B, 0x43, 0xE3, 0x80, 0x8C, 0x43, + 0xE3, 0x80, 0x8D, 0x43, 0xE3, 0x80, 0x8E, 0x43, + 0xE3, 0x80, 0x8F, 0x43, 0xE3, 0x80, 0x90, 0x43, + 0xE3, 0x80, 0x91, 0x43, 0xE3, 0x80, 0x92, 0x43, + 0xE3, 0x80, 0x94, 0x43, 0xE3, 0x80, 0x95, 0x43, + // Bytes 4c0 - 4ff + 0xE3, 0x80, 0x96, 0x43, 0xE3, 0x80, 0x97, 0x43, + 0xE3, 0x82, 0xA1, 0x43, 0xE3, 0x82, 0xA2, 0x43, + 0xE3, 0x82, 0xA3, 0x43, 0xE3, 0x82, 0xA4, 0x43, + 0xE3, 0x82, 0xA5, 0x43, 0xE3, 0x82, 0xA6, 0x43, + 0xE3, 0x82, 0xA7, 0x43, 0xE3, 0x82, 0xA8, 0x43, + 0xE3, 0x82, 0xA9, 0x43, 0xE3, 0x82, 0xAA, 0x43, + 0xE3, 0x82, 0xAB, 0x43, 0xE3, 0x82, 0xAD, 0x43, + 0xE3, 0x82, 0xAF, 0x43, 0xE3, 0x82, 0xB1, 0x43, + // Bytes 500 - 53f + 0xE3, 0x82, 0xB3, 0x43, 0xE3, 0x82, 0xB5, 0x43, + 0xE3, 0x82, 0xB7, 0x43, 0xE3, 0x82, 0xB9, 0x43, + 0xE3, 0x82, 0xBB, 0x43, 0xE3, 0x82, 0xBD, 0x43, + 0xE3, 0x82, 0xBF, 0x43, 0xE3, 0x83, 0x81, 0x43, + 0xE3, 0x83, 0x83, 0x43, 0xE3, 0x83, 0x84, 0x43, + 0xE3, 0x83, 0x86, 0x43, 0xE3, 0x83, 0x88, 0x43, + 0xE3, 0x83, 0x8A, 0x43, 0xE3, 0x83, 0x8B, 0x43, + 0xE3, 0x83, 0x8C, 0x43, 0xE3, 0x83, 0x8D, 0x43, + // Bytes 540 - 57f + 0xE3, 0x83, 0x8E, 0x43, 0xE3, 0x83, 0x8F, 0x43, + 0xE3, 0x83, 0x92, 0x43, 0xE3, 0x83, 0x95, 0x43, + 0xE3, 0x83, 0x98, 0x43, 0xE3, 0x83, 0x9B, 0x43, + 0xE3, 0x83, 0x9E, 0x43, 0xE3, 0x83, 0x9F, 0x43, + 0xE3, 0x83, 0xA0, 0x43, 0xE3, 0x83, 0xA1, 0x43, + 0xE3, 0x83, 0xA2, 0x43, 0xE3, 0x83, 0xA3, 0x43, + 0xE3, 0x83, 0xA4, 0x43, 0xE3, 0x83, 0xA5, 0x43, + 0xE3, 0x83, 0xA6, 0x43, 0xE3, 0x83, 0xA7, 0x43, + // Bytes 580 - 5bf + 0xE3, 0x83, 0xA8, 0x43, 0xE3, 0x83, 0xA9, 0x43, + 0xE3, 0x83, 0xAA, 0x43, 0xE3, 0x83, 0xAB, 0x43, + 0xE3, 0x83, 0xAC, 0x43, 0xE3, 0x83, 0xAD, 0x43, + 0xE3, 0x83, 0xAF, 0x43, 0xE3, 0x83, 0xB0, 0x43, + 0xE3, 0x83, 0xB1, 0x43, 0xE3, 0x83, 0xB2, 0x43, + 0xE3, 0x83, 0xB3, 0x43, 0xE3, 0x83, 0xBB, 0x43, + 0xE3, 0x83, 0xBC, 0x43, 0xE3, 0x92, 0x9E, 0x43, + 0xE3, 0x92, 0xB9, 0x43, 0xE3, 0x92, 0xBB, 0x43, + // Bytes 5c0 - 5ff + 0xE3, 0x93, 0x9F, 0x43, 0xE3, 0x94, 0x95, 0x43, + 0xE3, 0x9B, 0xAE, 0x43, 0xE3, 0x9B, 0xBC, 0x43, + 0xE3, 0x9E, 0x81, 0x43, 0xE3, 0xA0, 0xAF, 0x43, + 0xE3, 0xA1, 0xA2, 0x43, 0xE3, 0xA1, 0xBC, 0x43, + 0xE3, 0xA3, 0x87, 0x43, 0xE3, 0xA3, 0xA3, 0x43, + 0xE3, 0xA4, 0x9C, 0x43, 0xE3, 0xA4, 0xBA, 0x43, + 0xE3, 0xA8, 0xAE, 0x43, 0xE3, 0xA9, 0xAC, 0x43, + 0xE3, 0xAB, 0xA4, 0x43, 0xE3, 0xAC, 0x88, 0x43, + // Bytes 600 - 63f + 0xE3, 0xAC, 0x99, 0x43, 0xE3, 0xAD, 0x89, 0x43, + 0xE3, 0xAE, 0x9D, 0x43, 0xE3, 0xB0, 0x98, 0x43, + 0xE3, 0xB1, 0x8E, 0x43, 0xE3, 0xB4, 0xB3, 0x43, + 0xE3, 0xB6, 0x96, 0x43, 0xE3, 0xBA, 0xAC, 0x43, + 0xE3, 0xBA, 0xB8, 0x43, 0xE3, 0xBC, 0x9B, 0x43, + 0xE3, 0xBF, 0xBC, 0x43, 0xE4, 0x80, 0x88, 0x43, + 0xE4, 0x80, 0x98, 0x43, 0xE4, 0x80, 0xB9, 0x43, + 0xE4, 0x81, 0x86, 0x43, 0xE4, 0x82, 0x96, 0x43, + // Bytes 640 - 67f + 0xE4, 0x83, 0xA3, 0x43, 0xE4, 0x84, 0xAF, 0x43, + 0xE4, 0x88, 0x82, 0x43, 0xE4, 0x88, 0xA7, 0x43, + 0xE4, 0x8A, 0xA0, 0x43, 0xE4, 0x8C, 0x81, 0x43, + 0xE4, 0x8C, 0xB4, 0x43, 0xE4, 0x8D, 0x99, 0x43, + 0xE4, 0x8F, 0x95, 0x43, 0xE4, 0x8F, 0x99, 0x43, + 0xE4, 0x90, 0x8B, 0x43, 0xE4, 0x91, 0xAB, 0x43, + 0xE4, 0x94, 0xAB, 0x43, 0xE4, 0x95, 0x9D, 0x43, + 0xE4, 0x95, 0xA1, 0x43, 0xE4, 0x95, 0xAB, 0x43, + // Bytes 680 - 6bf + 0xE4, 0x97, 0x97, 0x43, 0xE4, 0x97, 0xB9, 0x43, + 0xE4, 0x98, 0xB5, 0x43, 0xE4, 0x9A, 0xBE, 0x43, + 0xE4, 0x9B, 0x87, 0x43, 0xE4, 0xA6, 0x95, 0x43, + 0xE4, 0xA7, 0xA6, 0x43, 0xE4, 0xA9, 0xAE, 0x43, + 0xE4, 0xA9, 0xB6, 0x43, 0xE4, 0xAA, 0xB2, 0x43, + 0xE4, 0xAC, 0xB3, 0x43, 0xE4, 0xAF, 0x8E, 0x43, + 0xE4, 0xB3, 0x8E, 0x43, 0xE4, 0xB3, 0xAD, 0x43, + 0xE4, 0xB3, 0xB8, 0x43, 0xE4, 0xB5, 0x96, 0x43, + // Bytes 6c0 - 6ff + 0xE4, 0xB8, 0x80, 0x43, 0xE4, 0xB8, 0x81, 0x43, + 0xE4, 0xB8, 0x83, 0x43, 0xE4, 0xB8, 0x89, 0x43, + 0xE4, 0xB8, 0x8A, 0x43, 0xE4, 0xB8, 0x8B, 0x43, + 0xE4, 0xB8, 0x8D, 0x43, 0xE4, 0xB8, 0x99, 0x43, + 0xE4, 0xB8, 0xA6, 0x43, 0xE4, 0xB8, 0xA8, 0x43, + 0xE4, 0xB8, 0xAD, 0x43, 0xE4, 0xB8, 0xB2, 0x43, + 0xE4, 0xB8, 0xB6, 0x43, 0xE4, 0xB8, 0xB8, 0x43, + 0xE4, 0xB8, 0xB9, 0x43, 0xE4, 0xB8, 0xBD, 0x43, + // Bytes 700 - 73f + 0xE4, 0xB8, 0xBF, 0x43, 0xE4, 0xB9, 0x81, 0x43, + 0xE4, 0xB9, 0x99, 0x43, 0xE4, 0xB9, 0x9D, 0x43, + 0xE4, 0xBA, 0x82, 0x43, 0xE4, 0xBA, 0x85, 0x43, + 0xE4, 0xBA, 0x86, 0x43, 0xE4, 0xBA, 0x8C, 0x43, + 0xE4, 0xBA, 0x94, 0x43, 0xE4, 0xBA, 0xA0, 0x43, + 0xE4, 0xBA, 0xA4, 0x43, 0xE4, 0xBA, 0xAE, 0x43, + 0xE4, 0xBA, 0xBA, 0x43, 0xE4, 0xBB, 0x80, 0x43, + 0xE4, 0xBB, 0x8C, 0x43, 0xE4, 0xBB, 0xA4, 0x43, + // Bytes 740 - 77f + 0xE4, 0xBC, 0x81, 0x43, 0xE4, 0xBC, 0x91, 0x43, + 0xE4, 0xBD, 0xA0, 0x43, 0xE4, 0xBE, 0x80, 0x43, + 0xE4, 0xBE, 0x86, 0x43, 0xE4, 0xBE, 0x8B, 0x43, + 0xE4, 0xBE, 0xAE, 0x43, 0xE4, 0xBE, 0xBB, 0x43, + 0xE4, 0xBE, 0xBF, 0x43, 0xE5, 0x80, 0x82, 0x43, + 0xE5, 0x80, 0xAB, 0x43, 0xE5, 0x81, 0xBA, 0x43, + 0xE5, 0x82, 0x99, 0x43, 0xE5, 0x83, 0x8F, 0x43, + 0xE5, 0x83, 0x9A, 0x43, 0xE5, 0x83, 0xA7, 0x43, + // Bytes 780 - 7bf + 0xE5, 0x84, 0xAA, 0x43, 0xE5, 0x84, 0xBF, 0x43, + 0xE5, 0x85, 0x80, 0x43, 0xE5, 0x85, 0x85, 0x43, + 0xE5, 0x85, 0x8D, 0x43, 0xE5, 0x85, 0x94, 0x43, + 0xE5, 0x85, 0xA4, 0x43, 0xE5, 0x85, 0xA5, 0x43, + 0xE5, 0x85, 0xA7, 0x43, 0xE5, 0x85, 0xA8, 0x43, + 0xE5, 0x85, 0xA9, 0x43, 0xE5, 0x85, 0xAB, 0x43, + 0xE5, 0x85, 0xAD, 0x43, 0xE5, 0x85, 0xB7, 0x43, + 0xE5, 0x86, 0x80, 0x43, 0xE5, 0x86, 0x82, 0x43, + // Bytes 7c0 - 7ff + 0xE5, 0x86, 0x8D, 0x43, 0xE5, 0x86, 0x92, 0x43, + 0xE5, 0x86, 0x95, 0x43, 0xE5, 0x86, 0x96, 0x43, + 0xE5, 0x86, 0x97, 0x43, 0xE5, 0x86, 0x99, 0x43, + 0xE5, 0x86, 0xA4, 0x43, 0xE5, 0x86, 0xAB, 0x43, + 0xE5, 0x86, 0xAC, 0x43, 0xE5, 0x86, 0xB5, 0x43, + 0xE5, 0x86, 0xB7, 0x43, 0xE5, 0x87, 0x89, 0x43, + 0xE5, 0x87, 0x8C, 0x43, 0xE5, 0x87, 0x9C, 0x43, + 0xE5, 0x87, 0x9E, 0x43, 0xE5, 0x87, 0xA0, 0x43, + // Bytes 800 - 83f + 0xE5, 0x87, 0xB5, 0x43, 0xE5, 0x88, 0x80, 0x43, + 0xE5, 0x88, 0x83, 0x43, 0xE5, 0x88, 0x87, 0x43, + 0xE5, 0x88, 0x97, 0x43, 0xE5, 0x88, 0x9D, 0x43, + 0xE5, 0x88, 0xA9, 0x43, 0xE5, 0x88, 0xBA, 0x43, + 0xE5, 0x88, 0xBB, 0x43, 0xE5, 0x89, 0x86, 0x43, + 0xE5, 0x89, 0x8D, 0x43, 0xE5, 0x89, 0xB2, 0x43, + 0xE5, 0x89, 0xB7, 0x43, 0xE5, 0x8A, 0x89, 0x43, + 0xE5, 0x8A, 0x9B, 0x43, 0xE5, 0x8A, 0xA3, 0x43, + // Bytes 840 - 87f + 0xE5, 0x8A, 0xB3, 0x43, 0xE5, 0x8A, 0xB4, 0x43, + 0xE5, 0x8B, 0x87, 0x43, 0xE5, 0x8B, 0x89, 0x43, + 0xE5, 0x8B, 0x92, 0x43, 0xE5, 0x8B, 0x9E, 0x43, + 0xE5, 0x8B, 0xA4, 0x43, 0xE5, 0x8B, 0xB5, 0x43, + 0xE5, 0x8B, 0xB9, 0x43, 0xE5, 0x8B, 0xBA, 0x43, + 0xE5, 0x8C, 0x85, 0x43, 0xE5, 0x8C, 0x86, 0x43, + 0xE5, 0x8C, 0x95, 0x43, 0xE5, 0x8C, 0x97, 0x43, + 0xE5, 0x8C, 0x9A, 0x43, 0xE5, 0x8C, 0xB8, 0x43, + // Bytes 880 - 8bf + 0xE5, 0x8C, 0xBB, 0x43, 0xE5, 0x8C, 0xBF, 0x43, + 0xE5, 0x8D, 0x81, 0x43, 0xE5, 0x8D, 0x84, 0x43, + 0xE5, 0x8D, 0x85, 0x43, 0xE5, 0x8D, 0x89, 0x43, + 0xE5, 0x8D, 0x91, 0x43, 0xE5, 0x8D, 0x94, 0x43, + 0xE5, 0x8D, 0x9A, 0x43, 0xE5, 0x8D, 0x9C, 0x43, + 0xE5, 0x8D, 0xA9, 0x43, 0xE5, 0x8D, 0xB0, 0x43, + 0xE5, 0x8D, 0xB3, 0x43, 0xE5, 0x8D, 0xB5, 0x43, + 0xE5, 0x8D, 0xBD, 0x43, 0xE5, 0x8D, 0xBF, 0x43, + // Bytes 8c0 - 8ff + 0xE5, 0x8E, 0x82, 0x43, 0xE5, 0x8E, 0xB6, 0x43, + 0xE5, 0x8F, 0x83, 0x43, 0xE5, 0x8F, 0x88, 0x43, + 0xE5, 0x8F, 0x8A, 0x43, 0xE5, 0x8F, 0x8C, 0x43, + 0xE5, 0x8F, 0x9F, 0x43, 0xE5, 0x8F, 0xA3, 0x43, + 0xE5, 0x8F, 0xA5, 0x43, 0xE5, 0x8F, 0xAB, 0x43, + 0xE5, 0x8F, 0xAF, 0x43, 0xE5, 0x8F, 0xB1, 0x43, + 0xE5, 0x8F, 0xB3, 0x43, 0xE5, 0x90, 0x86, 0x43, + 0xE5, 0x90, 0x88, 0x43, 0xE5, 0x90, 0x8D, 0x43, + // Bytes 900 - 93f + 0xE5, 0x90, 0x8F, 0x43, 0xE5, 0x90, 0x9D, 0x43, + 0xE5, 0x90, 0xB8, 0x43, 0xE5, 0x90, 0xB9, 0x43, + 0xE5, 0x91, 0x82, 0x43, 0xE5, 0x91, 0x88, 0x43, + 0xE5, 0x91, 0xA8, 0x43, 0xE5, 0x92, 0x9E, 0x43, + 0xE5, 0x92, 0xA2, 0x43, 0xE5, 0x92, 0xBD, 0x43, + 0xE5, 0x93, 0xB6, 0x43, 0xE5, 0x94, 0x90, 0x43, + 0xE5, 0x95, 0x8F, 0x43, 0xE5, 0x95, 0x93, 0x43, + 0xE5, 0x95, 0x95, 0x43, 0xE5, 0x95, 0xA3, 0x43, + // Bytes 940 - 97f + 0xE5, 0x96, 0x84, 0x43, 0xE5, 0x96, 0x87, 0x43, + 0xE5, 0x96, 0x99, 0x43, 0xE5, 0x96, 0x9D, 0x43, + 0xE5, 0x96, 0xAB, 0x43, 0xE5, 0x96, 0xB3, 0x43, + 0xE5, 0x96, 0xB6, 0x43, 0xE5, 0x97, 0x80, 0x43, + 0xE5, 0x97, 0x82, 0x43, 0xE5, 0x97, 0xA2, 0x43, + 0xE5, 0x98, 0x86, 0x43, 0xE5, 0x99, 0x91, 0x43, + 0xE5, 0x99, 0xA8, 0x43, 0xE5, 0x99, 0xB4, 0x43, + 0xE5, 0x9B, 0x97, 0x43, 0xE5, 0x9B, 0x9B, 0x43, + // Bytes 980 - 9bf + 0xE5, 0x9B, 0xB9, 0x43, 0xE5, 0x9C, 0x96, 0x43, + 0xE5, 0x9C, 0x97, 0x43, 0xE5, 0x9C, 0x9F, 0x43, + 0xE5, 0x9C, 0xB0, 0x43, 0xE5, 0x9E, 0x8B, 0x43, + 0xE5, 0x9F, 0x8E, 0x43, 0xE5, 0x9F, 0xB4, 0x43, + 0xE5, 0xA0, 0x8D, 0x43, 0xE5, 0xA0, 0xB1, 0x43, + 0xE5, 0xA0, 0xB2, 0x43, 0xE5, 0xA1, 0x80, 0x43, + 0xE5, 0xA1, 0x9A, 0x43, 0xE5, 0xA1, 0x9E, 0x43, + 0xE5, 0xA2, 0xA8, 0x43, 0xE5, 0xA2, 0xAC, 0x43, + // Bytes 9c0 - 9ff + 0xE5, 0xA2, 0xB3, 0x43, 0xE5, 0xA3, 0x98, 0x43, + 0xE5, 0xA3, 0x9F, 0x43, 0xE5, 0xA3, 0xAB, 0x43, + 0xE5, 0xA3, 0xAE, 0x43, 0xE5, 0xA3, 0xB0, 0x43, + 0xE5, 0xA3, 0xB2, 0x43, 0xE5, 0xA3, 0xB7, 0x43, + 0xE5, 0xA4, 0x82, 0x43, 0xE5, 0xA4, 0x86, 0x43, + 0xE5, 0xA4, 0x8A, 0x43, 0xE5, 0xA4, 0x95, 0x43, + 0xE5, 0xA4, 0x9A, 0x43, 0xE5, 0xA4, 0x9C, 0x43, + 0xE5, 0xA4, 0xA2, 0x43, 0xE5, 0xA4, 0xA7, 0x43, + // Bytes a00 - a3f + 0xE5, 0xA4, 0xA9, 0x43, 0xE5, 0xA5, 0x84, 0x43, + 0xE5, 0xA5, 0x88, 0x43, 0xE5, 0xA5, 0x91, 0x43, + 0xE5, 0xA5, 0x94, 0x43, 0xE5, 0xA5, 0xA2, 0x43, + 0xE5, 0xA5, 0xB3, 0x43, 0xE5, 0xA7, 0x98, 0x43, + 0xE5, 0xA7, 0xAC, 0x43, 0xE5, 0xA8, 0x9B, 0x43, + 0xE5, 0xA8, 0xA7, 0x43, 0xE5, 0xA9, 0xA2, 0x43, + 0xE5, 0xA9, 0xA6, 0x43, 0xE5, 0xAA, 0xB5, 0x43, + 0xE5, 0xAC, 0x88, 0x43, 0xE5, 0xAC, 0xA8, 0x43, + // Bytes a40 - a7f + 0xE5, 0xAC, 0xBE, 0x43, 0xE5, 0xAD, 0x90, 0x43, + 0xE5, 0xAD, 0x97, 0x43, 0xE5, 0xAD, 0xA6, 0x43, + 0xE5, 0xAE, 0x80, 0x43, 0xE5, 0xAE, 0x85, 0x43, + 0xE5, 0xAE, 0x97, 0x43, 0xE5, 0xAF, 0x83, 0x43, + 0xE5, 0xAF, 0x98, 0x43, 0xE5, 0xAF, 0xA7, 0x43, + 0xE5, 0xAF, 0xAE, 0x43, 0xE5, 0xAF, 0xB3, 0x43, + 0xE5, 0xAF, 0xB8, 0x43, 0xE5, 0xAF, 0xBF, 0x43, + 0xE5, 0xB0, 0x86, 0x43, 0xE5, 0xB0, 0x8F, 0x43, + // Bytes a80 - abf + 0xE5, 0xB0, 0xA2, 0x43, 0xE5, 0xB0, 0xB8, 0x43, + 0xE5, 0xB0, 0xBF, 0x43, 0xE5, 0xB1, 0xA0, 0x43, + 0xE5, 0xB1, 0xA2, 0x43, 0xE5, 0xB1, 0xA4, 0x43, + 0xE5, 0xB1, 0xA5, 0x43, 0xE5, 0xB1, 0xAE, 0x43, + 0xE5, 0xB1, 0xB1, 0x43, 0xE5, 0xB2, 0x8D, 0x43, + 0xE5, 0xB3, 0x80, 0x43, 0xE5, 0xB4, 0x99, 0x43, + 0xE5, 0xB5, 0x83, 0x43, 0xE5, 0xB5, 0x90, 0x43, + 0xE5, 0xB5, 0xAB, 0x43, 0xE5, 0xB5, 0xAE, 0x43, + // Bytes ac0 - aff + 0xE5, 0xB5, 0xBC, 0x43, 0xE5, 0xB6, 0xB2, 0x43, + 0xE5, 0xB6, 0xBA, 0x43, 0xE5, 0xB7, 0x9B, 0x43, + 0xE5, 0xB7, 0xA1, 0x43, 0xE5, 0xB7, 0xA2, 0x43, + 0xE5, 0xB7, 0xA5, 0x43, 0xE5, 0xB7, 0xA6, 0x43, + 0xE5, 0xB7, 0xB1, 0x43, 0xE5, 0xB7, 0xBD, 0x43, + 0xE5, 0xB7, 0xBE, 0x43, 0xE5, 0xB8, 0xA8, 0x43, + 0xE5, 0xB8, 0xBD, 0x43, 0xE5, 0xB9, 0xA9, 0x43, + 0xE5, 0xB9, 0xB2, 0x43, 0xE5, 0xB9, 0xB4, 0x43, + // Bytes b00 - b3f + 0xE5, 0xB9, 0xBA, 0x43, 0xE5, 0xB9, 0xBC, 0x43, + 0xE5, 0xB9, 0xBF, 0x43, 0xE5, 0xBA, 0xA6, 0x43, + 0xE5, 0xBA, 0xB0, 0x43, 0xE5, 0xBA, 0xB3, 0x43, + 0xE5, 0xBA, 0xB6, 0x43, 0xE5, 0xBB, 0x89, 0x43, + 0xE5, 0xBB, 0x8A, 0x43, 0xE5, 0xBB, 0x92, 0x43, + 0xE5, 0xBB, 0x93, 0x43, 0xE5, 0xBB, 0x99, 0x43, + 0xE5, 0xBB, 0xAC, 0x43, 0xE5, 0xBB, 0xB4, 0x43, + 0xE5, 0xBB, 0xBE, 0x43, 0xE5, 0xBC, 0x84, 0x43, + // Bytes b40 - b7f + 0xE5, 0xBC, 0x8B, 0x43, 0xE5, 0xBC, 0x93, 0x43, + 0xE5, 0xBC, 0xA2, 0x43, 0xE5, 0xBD, 0x90, 0x43, + 0xE5, 0xBD, 0x93, 0x43, 0xE5, 0xBD, 0xA1, 0x43, + 0xE5, 0xBD, 0xA2, 0x43, 0xE5, 0xBD, 0xA9, 0x43, + 0xE5, 0xBD, 0xAB, 0x43, 0xE5, 0xBD, 0xB3, 0x43, + 0xE5, 0xBE, 0x8B, 0x43, 0xE5, 0xBE, 0x8C, 0x43, + 0xE5, 0xBE, 0x97, 0x43, 0xE5, 0xBE, 0x9A, 0x43, + 0xE5, 0xBE, 0xA9, 0x43, 0xE5, 0xBE, 0xAD, 0x43, + // Bytes b80 - bbf + 0xE5, 0xBF, 0x83, 0x43, 0xE5, 0xBF, 0x8D, 0x43, + 0xE5, 0xBF, 0x97, 0x43, 0xE5, 0xBF, 0xB5, 0x43, + 0xE5, 0xBF, 0xB9, 0x43, 0xE6, 0x80, 0x92, 0x43, + 0xE6, 0x80, 0x9C, 0x43, 0xE6, 0x81, 0xB5, 0x43, + 0xE6, 0x82, 0x81, 0x43, 0xE6, 0x82, 0x94, 0x43, + 0xE6, 0x83, 0x87, 0x43, 0xE6, 0x83, 0x98, 0x43, + 0xE6, 0x83, 0xA1, 0x43, 0xE6, 0x84, 0x88, 0x43, + 0xE6, 0x85, 0x84, 0x43, 0xE6, 0x85, 0x88, 0x43, + // Bytes bc0 - bff + 0xE6, 0x85, 0x8C, 0x43, 0xE6, 0x85, 0x8E, 0x43, + 0xE6, 0x85, 0xA0, 0x43, 0xE6, 0x85, 0xA8, 0x43, + 0xE6, 0x85, 0xBA, 0x43, 0xE6, 0x86, 0x8E, 0x43, + 0xE6, 0x86, 0x90, 0x43, 0xE6, 0x86, 0xA4, 0x43, + 0xE6, 0x86, 0xAF, 0x43, 0xE6, 0x86, 0xB2, 0x43, + 0xE6, 0x87, 0x9E, 0x43, 0xE6, 0x87, 0xB2, 0x43, + 0xE6, 0x87, 0xB6, 0x43, 0xE6, 0x88, 0x80, 0x43, + 0xE6, 0x88, 0x88, 0x43, 0xE6, 0x88, 0x90, 0x43, + // Bytes c00 - c3f + 0xE6, 0x88, 0x9B, 0x43, 0xE6, 0x88, 0xAE, 0x43, + 0xE6, 0x88, 0xB4, 0x43, 0xE6, 0x88, 0xB6, 0x43, + 0xE6, 0x89, 0x8B, 0x43, 0xE6, 0x89, 0x93, 0x43, + 0xE6, 0x89, 0x9D, 0x43, 0xE6, 0x8A, 0x95, 0x43, + 0xE6, 0x8A, 0xB1, 0x43, 0xE6, 0x8B, 0x89, 0x43, + 0xE6, 0x8B, 0x8F, 0x43, 0xE6, 0x8B, 0x93, 0x43, + 0xE6, 0x8B, 0x94, 0x43, 0xE6, 0x8B, 0xBC, 0x43, + 0xE6, 0x8B, 0xBE, 0x43, 0xE6, 0x8C, 0x87, 0x43, + // Bytes c40 - c7f + 0xE6, 0x8C, 0xBD, 0x43, 0xE6, 0x8D, 0x90, 0x43, + 0xE6, 0x8D, 0x95, 0x43, 0xE6, 0x8D, 0xA8, 0x43, + 0xE6, 0x8D, 0xBB, 0x43, 0xE6, 0x8E, 0x83, 0x43, + 0xE6, 0x8E, 0xA0, 0x43, 0xE6, 0x8E, 0xA9, 0x43, + 0xE6, 0x8F, 0x84, 0x43, 0xE6, 0x8F, 0x85, 0x43, + 0xE6, 0x8F, 0xA4, 0x43, 0xE6, 0x90, 0x9C, 0x43, + 0xE6, 0x90, 0xA2, 0x43, 0xE6, 0x91, 0x92, 0x43, + 0xE6, 0x91, 0xA9, 0x43, 0xE6, 0x91, 0xB7, 0x43, + // Bytes c80 - cbf + 0xE6, 0x91, 0xBE, 0x43, 0xE6, 0x92, 0x9A, 0x43, + 0xE6, 0x92, 0x9D, 0x43, 0xE6, 0x93, 0x84, 0x43, + 0xE6, 0x94, 0xAF, 0x43, 0xE6, 0x94, 0xB4, 0x43, + 0xE6, 0x95, 0x8F, 0x43, 0xE6, 0x95, 0x96, 0x43, + 0xE6, 0x95, 0xAC, 0x43, 0xE6, 0x95, 0xB8, 0x43, + 0xE6, 0x96, 0x87, 0x43, 0xE6, 0x96, 0x97, 0x43, + 0xE6, 0x96, 0x99, 0x43, 0xE6, 0x96, 0xA4, 0x43, + 0xE6, 0x96, 0xB0, 0x43, 0xE6, 0x96, 0xB9, 0x43, + // Bytes cc0 - cff + 0xE6, 0x97, 0x85, 0x43, 0xE6, 0x97, 0xA0, 0x43, + 0xE6, 0x97, 0xA2, 0x43, 0xE6, 0x97, 0xA3, 0x43, + 0xE6, 0x97, 0xA5, 0x43, 0xE6, 0x98, 0x93, 0x43, + 0xE6, 0x98, 0xA0, 0x43, 0xE6, 0x99, 0x89, 0x43, + 0xE6, 0x99, 0xB4, 0x43, 0xE6, 0x9A, 0x88, 0x43, + 0xE6, 0x9A, 0x91, 0x43, 0xE6, 0x9A, 0x9C, 0x43, + 0xE6, 0x9A, 0xB4, 0x43, 0xE6, 0x9B, 0x86, 0x43, + 0xE6, 0x9B, 0xB0, 0x43, 0xE6, 0x9B, 0xB4, 0x43, + // Bytes d00 - d3f + 0xE6, 0x9B, 0xB8, 0x43, 0xE6, 0x9C, 0x80, 0x43, + 0xE6, 0x9C, 0x88, 0x43, 0xE6, 0x9C, 0x89, 0x43, + 0xE6, 0x9C, 0x97, 0x43, 0xE6, 0x9C, 0x9B, 0x43, + 0xE6, 0x9C, 0xA1, 0x43, 0xE6, 0x9C, 0xA8, 0x43, + 0xE6, 0x9D, 0x8E, 0x43, 0xE6, 0x9D, 0x93, 0x43, + 0xE6, 0x9D, 0x96, 0x43, 0xE6, 0x9D, 0x9E, 0x43, + 0xE6, 0x9D, 0xBB, 0x43, 0xE6, 0x9E, 0x85, 0x43, + 0xE6, 0x9E, 0x97, 0x43, 0xE6, 0x9F, 0xB3, 0x43, + // Bytes d40 - d7f + 0xE6, 0x9F, 0xBA, 0x43, 0xE6, 0xA0, 0x97, 0x43, + 0xE6, 0xA0, 0x9F, 0x43, 0xE6, 0xA0, 0xAA, 0x43, + 0xE6, 0xA1, 0x92, 0x43, 0xE6, 0xA2, 0x81, 0x43, + 0xE6, 0xA2, 0x85, 0x43, 0xE6, 0xA2, 0x8E, 0x43, + 0xE6, 0xA2, 0xA8, 0x43, 0xE6, 0xA4, 0x94, 0x43, + 0xE6, 0xA5, 0x82, 0x43, 0xE6, 0xA6, 0xA3, 0x43, + 0xE6, 0xA7, 0xAA, 0x43, 0xE6, 0xA8, 0x82, 0x43, + 0xE6, 0xA8, 0x93, 0x43, 0xE6, 0xAA, 0xA8, 0x43, + // Bytes d80 - dbf + 0xE6, 0xAB, 0x93, 0x43, 0xE6, 0xAB, 0x9B, 0x43, + 0xE6, 0xAC, 0x84, 0x43, 0xE6, 0xAC, 0xA0, 0x43, + 0xE6, 0xAC, 0xA1, 0x43, 0xE6, 0xAD, 0x94, 0x43, + 0xE6, 0xAD, 0xA2, 0x43, 0xE6, 0xAD, 0xA3, 0x43, + 0xE6, 0xAD, 0xB2, 0x43, 0xE6, 0xAD, 0xB7, 0x43, + 0xE6, 0xAD, 0xB9, 0x43, 0xE6, 0xAE, 0x9F, 0x43, + 0xE6, 0xAE, 0xAE, 0x43, 0xE6, 0xAE, 0xB3, 0x43, + 0xE6, 0xAE, 0xBA, 0x43, 0xE6, 0xAE, 0xBB, 0x43, + // Bytes dc0 - dff + 0xE6, 0xAF, 0x8B, 0x43, 0xE6, 0xAF, 0x8D, 0x43, + 0xE6, 0xAF, 0x94, 0x43, 0xE6, 0xAF, 0x9B, 0x43, + 0xE6, 0xB0, 0x8F, 0x43, 0xE6, 0xB0, 0x94, 0x43, + 0xE6, 0xB0, 0xB4, 0x43, 0xE6, 0xB1, 0x8E, 0x43, + 0xE6, 0xB1, 0xA7, 0x43, 0xE6, 0xB2, 0x88, 0x43, + 0xE6, 0xB2, 0xBF, 0x43, 0xE6, 0xB3, 0x8C, 0x43, + 0xE6, 0xB3, 0x8D, 0x43, 0xE6, 0xB3, 0xA5, 0x43, + 0xE6, 0xB3, 0xA8, 0x43, 0xE6, 0xB4, 0x96, 0x43, + // Bytes e00 - e3f + 0xE6, 0xB4, 0x9B, 0x43, 0xE6, 0xB4, 0x9E, 0x43, + 0xE6, 0xB4, 0xB4, 0x43, 0xE6, 0xB4, 0xBE, 0x43, + 0xE6, 0xB5, 0x81, 0x43, 0xE6, 0xB5, 0xA9, 0x43, + 0xE6, 0xB5, 0xAA, 0x43, 0xE6, 0xB5, 0xB7, 0x43, + 0xE6, 0xB5, 0xB8, 0x43, 0xE6, 0xB6, 0x85, 0x43, + 0xE6, 0xB7, 0x8B, 0x43, 0xE6, 0xB7, 0x9A, 0x43, + 0xE6, 0xB7, 0xAA, 0x43, 0xE6, 0xB7, 0xB9, 0x43, + 0xE6, 0xB8, 0x9A, 0x43, 0xE6, 0xB8, 0xAF, 0x43, + // Bytes e40 - e7f + 0xE6, 0xB9, 0xAE, 0x43, 0xE6, 0xBA, 0x80, 0x43, + 0xE6, 0xBA, 0x9C, 0x43, 0xE6, 0xBA, 0xBA, 0x43, + 0xE6, 0xBB, 0x87, 0x43, 0xE6, 0xBB, 0x8B, 0x43, + 0xE6, 0xBB, 0x91, 0x43, 0xE6, 0xBB, 0x9B, 0x43, + 0xE6, 0xBC, 0x8F, 0x43, 0xE6, 0xBC, 0x94, 0x43, + 0xE6, 0xBC, 0xA2, 0x43, 0xE6, 0xBC, 0xA3, 0x43, + 0xE6, 0xBD, 0xAE, 0x43, 0xE6, 0xBF, 0x86, 0x43, + 0xE6, 0xBF, 0xAB, 0x43, 0xE6, 0xBF, 0xBE, 0x43, + // Bytes e80 - ebf + 0xE7, 0x80, 0x9B, 0x43, 0xE7, 0x80, 0x9E, 0x43, + 0xE7, 0x80, 0xB9, 0x43, 0xE7, 0x81, 0x8A, 0x43, + 0xE7, 0x81, 0xAB, 0x43, 0xE7, 0x81, 0xB0, 0x43, + 0xE7, 0x81, 0xB7, 0x43, 0xE7, 0x81, 0xBD, 0x43, + 0xE7, 0x82, 0x99, 0x43, 0xE7, 0x82, 0xAD, 0x43, + 0xE7, 0x83, 0x88, 0x43, 0xE7, 0x83, 0x99, 0x43, + 0xE7, 0x84, 0xA1, 0x43, 0xE7, 0x85, 0x85, 0x43, + 0xE7, 0x85, 0x89, 0x43, 0xE7, 0x85, 0xAE, 0x43, + // Bytes ec0 - eff + 0xE7, 0x86, 0x9C, 0x43, 0xE7, 0x87, 0x8E, 0x43, + 0xE7, 0x87, 0x90, 0x43, 0xE7, 0x88, 0x90, 0x43, + 0xE7, 0x88, 0x9B, 0x43, 0xE7, 0x88, 0xA8, 0x43, + 0xE7, 0x88, 0xAA, 0x43, 0xE7, 0x88, 0xAB, 0x43, + 0xE7, 0x88, 0xB5, 0x43, 0xE7, 0x88, 0xB6, 0x43, + 0xE7, 0x88, 0xBB, 0x43, 0xE7, 0x88, 0xBF, 0x43, + 0xE7, 0x89, 0x87, 0x43, 0xE7, 0x89, 0x90, 0x43, + 0xE7, 0x89, 0x99, 0x43, 0xE7, 0x89, 0x9B, 0x43, + // Bytes f00 - f3f + 0xE7, 0x89, 0xA2, 0x43, 0xE7, 0x89, 0xB9, 0x43, + 0xE7, 0x8A, 0x80, 0x43, 0xE7, 0x8A, 0x95, 0x43, + 0xE7, 0x8A, 0xAC, 0x43, 0xE7, 0x8A, 0xAF, 0x43, + 0xE7, 0x8B, 0x80, 0x43, 0xE7, 0x8B, 0xBC, 0x43, + 0xE7, 0x8C, 0xAA, 0x43, 0xE7, 0x8D, 0xB5, 0x43, + 0xE7, 0x8D, 0xBA, 0x43, 0xE7, 0x8E, 0x84, 0x43, + 0xE7, 0x8E, 0x87, 0x43, 0xE7, 0x8E, 0x89, 0x43, + 0xE7, 0x8E, 0x8B, 0x43, 0xE7, 0x8E, 0xA5, 0x43, + // Bytes f40 - f7f + 0xE7, 0x8E, 0xB2, 0x43, 0xE7, 0x8F, 0x9E, 0x43, + 0xE7, 0x90, 0x86, 0x43, 0xE7, 0x90, 0x89, 0x43, + 0xE7, 0x90, 0xA2, 0x43, 0xE7, 0x91, 0x87, 0x43, + 0xE7, 0x91, 0x9C, 0x43, 0xE7, 0x91, 0xA9, 0x43, + 0xE7, 0x91, 0xB1, 0x43, 0xE7, 0x92, 0x85, 0x43, + 0xE7, 0x92, 0x89, 0x43, 0xE7, 0x92, 0x98, 0x43, + 0xE7, 0x93, 0x8A, 0x43, 0xE7, 0x93, 0x9C, 0x43, + 0xE7, 0x93, 0xA6, 0x43, 0xE7, 0x94, 0x86, 0x43, + // Bytes f80 - fbf + 0xE7, 0x94, 0x98, 0x43, 0xE7, 0x94, 0x9F, 0x43, + 0xE7, 0x94, 0xA4, 0x43, 0xE7, 0x94, 0xA8, 0x43, + 0xE7, 0x94, 0xB0, 0x43, 0xE7, 0x94, 0xB2, 0x43, + 0xE7, 0x94, 0xB3, 0x43, 0xE7, 0x94, 0xB7, 0x43, + 0xE7, 0x94, 0xBB, 0x43, 0xE7, 0x94, 0xBE, 0x43, + 0xE7, 0x95, 0x99, 0x43, 0xE7, 0x95, 0xA5, 0x43, + 0xE7, 0x95, 0xB0, 0x43, 0xE7, 0x96, 0x8B, 0x43, + 0xE7, 0x96, 0x92, 0x43, 0xE7, 0x97, 0xA2, 0x43, + // Bytes fc0 - fff + 0xE7, 0x98, 0x90, 0x43, 0xE7, 0x98, 0x9D, 0x43, + 0xE7, 0x98, 0x9F, 0x43, 0xE7, 0x99, 0x82, 0x43, + 0xE7, 0x99, 0xA9, 0x43, 0xE7, 0x99, 0xB6, 0x43, + 0xE7, 0x99, 0xBD, 0x43, 0xE7, 0x9A, 0xAE, 0x43, + 0xE7, 0x9A, 0xBF, 0x43, 0xE7, 0x9B, 0x8A, 0x43, + 0xE7, 0x9B, 0x9B, 0x43, 0xE7, 0x9B, 0xA3, 0x43, + 0xE7, 0x9B, 0xA7, 0x43, 0xE7, 0x9B, 0xAE, 0x43, + 0xE7, 0x9B, 0xB4, 0x43, 0xE7, 0x9C, 0x81, 0x43, + // Bytes 1000 - 103f + 0xE7, 0x9C, 0x9E, 0x43, 0xE7, 0x9C, 0x9F, 0x43, + 0xE7, 0x9D, 0x80, 0x43, 0xE7, 0x9D, 0x8A, 0x43, + 0xE7, 0x9E, 0x8B, 0x43, 0xE7, 0x9E, 0xA7, 0x43, + 0xE7, 0x9F, 0x9B, 0x43, 0xE7, 0x9F, 0xA2, 0x43, + 0xE7, 0x9F, 0xB3, 0x43, 0xE7, 0xA1, 0x8E, 0x43, + 0xE7, 0xA1, 0xAB, 0x43, 0xE7, 0xA2, 0x8C, 0x43, + 0xE7, 0xA2, 0x91, 0x43, 0xE7, 0xA3, 0x8A, 0x43, + 0xE7, 0xA3, 0x8C, 0x43, 0xE7, 0xA3, 0xBB, 0x43, + // Bytes 1040 - 107f + 0xE7, 0xA4, 0xAA, 0x43, 0xE7, 0xA4, 0xBA, 0x43, + 0xE7, 0xA4, 0xBC, 0x43, 0xE7, 0xA4, 0xBE, 0x43, + 0xE7, 0xA5, 0x88, 0x43, 0xE7, 0xA5, 0x89, 0x43, + 0xE7, 0xA5, 0x90, 0x43, 0xE7, 0xA5, 0x96, 0x43, + 0xE7, 0xA5, 0x9D, 0x43, 0xE7, 0xA5, 0x9E, 0x43, + 0xE7, 0xA5, 0xA5, 0x43, 0xE7, 0xA5, 0xBF, 0x43, + 0xE7, 0xA6, 0x81, 0x43, 0xE7, 0xA6, 0x8D, 0x43, + 0xE7, 0xA6, 0x8E, 0x43, 0xE7, 0xA6, 0x8F, 0x43, + // Bytes 1080 - 10bf + 0xE7, 0xA6, 0xAE, 0x43, 0xE7, 0xA6, 0xB8, 0x43, + 0xE7, 0xA6, 0xBE, 0x43, 0xE7, 0xA7, 0x8A, 0x43, + 0xE7, 0xA7, 0x98, 0x43, 0xE7, 0xA7, 0xAB, 0x43, + 0xE7, 0xA8, 0x9C, 0x43, 0xE7, 0xA9, 0x80, 0x43, + 0xE7, 0xA9, 0x8A, 0x43, 0xE7, 0xA9, 0x8F, 0x43, + 0xE7, 0xA9, 0xB4, 0x43, 0xE7, 0xA9, 0xBA, 0x43, + 0xE7, 0xAA, 0x81, 0x43, 0xE7, 0xAA, 0xB1, 0x43, + 0xE7, 0xAB, 0x8B, 0x43, 0xE7, 0xAB, 0xAE, 0x43, + // Bytes 10c0 - 10ff + 0xE7, 0xAB, 0xB9, 0x43, 0xE7, 0xAC, 0xA0, 0x43, + 0xE7, 0xAE, 0x8F, 0x43, 0xE7, 0xAF, 0x80, 0x43, + 0xE7, 0xAF, 0x86, 0x43, 0xE7, 0xAF, 0x89, 0x43, + 0xE7, 0xB0, 0xBE, 0x43, 0xE7, 0xB1, 0xA0, 0x43, + 0xE7, 0xB1, 0xB3, 0x43, 0xE7, 0xB1, 0xBB, 0x43, + 0xE7, 0xB2, 0x92, 0x43, 0xE7, 0xB2, 0xBE, 0x43, + 0xE7, 0xB3, 0x92, 0x43, 0xE7, 0xB3, 0x96, 0x43, + 0xE7, 0xB3, 0xA3, 0x43, 0xE7, 0xB3, 0xA7, 0x43, + // Bytes 1100 - 113f + 0xE7, 0xB3, 0xA8, 0x43, 0xE7, 0xB3, 0xB8, 0x43, + 0xE7, 0xB4, 0x80, 0x43, 0xE7, 0xB4, 0x90, 0x43, + 0xE7, 0xB4, 0xA2, 0x43, 0xE7, 0xB4, 0xAF, 0x43, + 0xE7, 0xB5, 0x82, 0x43, 0xE7, 0xB5, 0x9B, 0x43, + 0xE7, 0xB5, 0xA3, 0x43, 0xE7, 0xB6, 0xA0, 0x43, + 0xE7, 0xB6, 0xBE, 0x43, 0xE7, 0xB7, 0x87, 0x43, + 0xE7, 0xB7, 0xB4, 0x43, 0xE7, 0xB8, 0x82, 0x43, + 0xE7, 0xB8, 0x89, 0x43, 0xE7, 0xB8, 0xB7, 0x43, + // Bytes 1140 - 117f + 0xE7, 0xB9, 0x81, 0x43, 0xE7, 0xB9, 0x85, 0x43, + 0xE7, 0xBC, 0xB6, 0x43, 0xE7, 0xBC, 0xBE, 0x43, + 0xE7, 0xBD, 0x91, 0x43, 0xE7, 0xBD, 0xB2, 0x43, + 0xE7, 0xBD, 0xB9, 0x43, 0xE7, 0xBD, 0xBA, 0x43, + 0xE7, 0xBE, 0x85, 0x43, 0xE7, 0xBE, 0x8A, 0x43, + 0xE7, 0xBE, 0x95, 0x43, 0xE7, 0xBE, 0x9A, 0x43, + 0xE7, 0xBE, 0xBD, 0x43, 0xE7, 0xBF, 0xBA, 0x43, + 0xE8, 0x80, 0x81, 0x43, 0xE8, 0x80, 0x85, 0x43, + // Bytes 1180 - 11bf + 0xE8, 0x80, 0x8C, 0x43, 0xE8, 0x80, 0x92, 0x43, + 0xE8, 0x80, 0xB3, 0x43, 0xE8, 0x81, 0x86, 0x43, + 0xE8, 0x81, 0xA0, 0x43, 0xE8, 0x81, 0xAF, 0x43, + 0xE8, 0x81, 0xB0, 0x43, 0xE8, 0x81, 0xBE, 0x43, + 0xE8, 0x81, 0xBF, 0x43, 0xE8, 0x82, 0x89, 0x43, + 0xE8, 0x82, 0x8B, 0x43, 0xE8, 0x82, 0xAD, 0x43, + 0xE8, 0x82, 0xB2, 0x43, 0xE8, 0x84, 0x83, 0x43, + 0xE8, 0x84, 0xBE, 0x43, 0xE8, 0x87, 0x98, 0x43, + // Bytes 11c0 - 11ff + 0xE8, 0x87, 0xA3, 0x43, 0xE8, 0x87, 0xA8, 0x43, + 0xE8, 0x87, 0xAA, 0x43, 0xE8, 0x87, 0xAD, 0x43, + 0xE8, 0x87, 0xB3, 0x43, 0xE8, 0x87, 0xBC, 0x43, + 0xE8, 0x88, 0x81, 0x43, 0xE8, 0x88, 0x84, 0x43, + 0xE8, 0x88, 0x8C, 0x43, 0xE8, 0x88, 0x98, 0x43, + 0xE8, 0x88, 0x9B, 0x43, 0xE8, 0x88, 0x9F, 0x43, + 0xE8, 0x89, 0xAE, 0x43, 0xE8, 0x89, 0xAF, 0x43, + 0xE8, 0x89, 0xB2, 0x43, 0xE8, 0x89, 0xB8, 0x43, + // Bytes 1200 - 123f + 0xE8, 0x89, 0xB9, 0x43, 0xE8, 0x8A, 0x8B, 0x43, + 0xE8, 0x8A, 0x91, 0x43, 0xE8, 0x8A, 0x9D, 0x43, + 0xE8, 0x8A, 0xB1, 0x43, 0xE8, 0x8A, 0xB3, 0x43, + 0xE8, 0x8A, 0xBD, 0x43, 0xE8, 0x8B, 0xA5, 0x43, + 0xE8, 0x8B, 0xA6, 0x43, 0xE8, 0x8C, 0x9D, 0x43, + 0xE8, 0x8C, 0xA3, 0x43, 0xE8, 0x8C, 0xB6, 0x43, + 0xE8, 0x8D, 0x92, 0x43, 0xE8, 0x8D, 0x93, 0x43, + 0xE8, 0x8D, 0xA3, 0x43, 0xE8, 0x8E, 0xAD, 0x43, + // Bytes 1240 - 127f + 0xE8, 0x8E, 0xBD, 0x43, 0xE8, 0x8F, 0x89, 0x43, + 0xE8, 0x8F, 0x8A, 0x43, 0xE8, 0x8F, 0x8C, 0x43, + 0xE8, 0x8F, 0x9C, 0x43, 0xE8, 0x8F, 0xA7, 0x43, + 0xE8, 0x8F, 0xAF, 0x43, 0xE8, 0x8F, 0xB1, 0x43, + 0xE8, 0x90, 0xBD, 0x43, 0xE8, 0x91, 0x89, 0x43, + 0xE8, 0x91, 0x97, 0x43, 0xE8, 0x93, 0xAE, 0x43, + 0xE8, 0x93, 0xB1, 0x43, 0xE8, 0x93, 0xB3, 0x43, + 0xE8, 0x93, 0xBC, 0x43, 0xE8, 0x94, 0x96, 0x43, + // Bytes 1280 - 12bf + 0xE8, 0x95, 0xA4, 0x43, 0xE8, 0x97, 0x8D, 0x43, + 0xE8, 0x97, 0xBA, 0x43, 0xE8, 0x98, 0x86, 0x43, + 0xE8, 0x98, 0x92, 0x43, 0xE8, 0x98, 0xAD, 0x43, + 0xE8, 0x98, 0xBF, 0x43, 0xE8, 0x99, 0x8D, 0x43, + 0xE8, 0x99, 0x90, 0x43, 0xE8, 0x99, 0x9C, 0x43, + 0xE8, 0x99, 0xA7, 0x43, 0xE8, 0x99, 0xA9, 0x43, + 0xE8, 0x99, 0xAB, 0x43, 0xE8, 0x9A, 0x88, 0x43, + 0xE8, 0x9A, 0xA9, 0x43, 0xE8, 0x9B, 0xA2, 0x43, + // Bytes 12c0 - 12ff + 0xE8, 0x9C, 0x8E, 0x43, 0xE8, 0x9C, 0xA8, 0x43, + 0xE8, 0x9D, 0xAB, 0x43, 0xE8, 0x9D, 0xB9, 0x43, + 0xE8, 0x9E, 0x86, 0x43, 0xE8, 0x9E, 0xBA, 0x43, + 0xE8, 0x9F, 0xA1, 0x43, 0xE8, 0xA0, 0x81, 0x43, + 0xE8, 0xA0, 0x9F, 0x43, 0xE8, 0xA1, 0x80, 0x43, + 0xE8, 0xA1, 0x8C, 0x43, 0xE8, 0xA1, 0xA0, 0x43, + 0xE8, 0xA1, 0xA3, 0x43, 0xE8, 0xA3, 0x82, 0x43, + 0xE8, 0xA3, 0x8F, 0x43, 0xE8, 0xA3, 0x97, 0x43, + // Bytes 1300 - 133f + 0xE8, 0xA3, 0x9E, 0x43, 0xE8, 0xA3, 0xA1, 0x43, + 0xE8, 0xA3, 0xB8, 0x43, 0xE8, 0xA3, 0xBA, 0x43, + 0xE8, 0xA4, 0x90, 0x43, 0xE8, 0xA5, 0x81, 0x43, + 0xE8, 0xA5, 0xA4, 0x43, 0xE8, 0xA5, 0xBE, 0x43, + 0xE8, 0xA6, 0x86, 0x43, 0xE8, 0xA6, 0x8B, 0x43, + 0xE8, 0xA6, 0x96, 0x43, 0xE8, 0xA7, 0x92, 0x43, + 0xE8, 0xA7, 0xA3, 0x43, 0xE8, 0xA8, 0x80, 0x43, + 0xE8, 0xAA, 0xA0, 0x43, 0xE8, 0xAA, 0xAA, 0x43, + // Bytes 1340 - 137f + 0xE8, 0xAA, 0xBF, 0x43, 0xE8, 0xAB, 0x8B, 0x43, + 0xE8, 0xAB, 0x92, 0x43, 0xE8, 0xAB, 0x96, 0x43, + 0xE8, 0xAB, 0xAD, 0x43, 0xE8, 0xAB, 0xB8, 0x43, + 0xE8, 0xAB, 0xBE, 0x43, 0xE8, 0xAC, 0x81, 0x43, + 0xE8, 0xAC, 0xB9, 0x43, 0xE8, 0xAD, 0x98, 0x43, + 0xE8, 0xAE, 0x80, 0x43, 0xE8, 0xAE, 0x8A, 0x43, + 0xE8, 0xB0, 0xB7, 0x43, 0xE8, 0xB1, 0x86, 0x43, + 0xE8, 0xB1, 0x88, 0x43, 0xE8, 0xB1, 0x95, 0x43, + // Bytes 1380 - 13bf + 0xE8, 0xB1, 0xB8, 0x43, 0xE8, 0xB2, 0x9D, 0x43, + 0xE8, 0xB2, 0xA1, 0x43, 0xE8, 0xB2, 0xA9, 0x43, + 0xE8, 0xB2, 0xAB, 0x43, 0xE8, 0xB3, 0x81, 0x43, + 0xE8, 0xB3, 0x82, 0x43, 0xE8, 0xB3, 0x87, 0x43, + 0xE8, 0xB3, 0x88, 0x43, 0xE8, 0xB3, 0x93, 0x43, + 0xE8, 0xB4, 0x88, 0x43, 0xE8, 0xB4, 0x9B, 0x43, + 0xE8, 0xB5, 0xA4, 0x43, 0xE8, 0xB5, 0xB0, 0x43, + 0xE8, 0xB5, 0xB7, 0x43, 0xE8, 0xB6, 0xB3, 0x43, + // Bytes 13c0 - 13ff + 0xE8, 0xB6, 0xBC, 0x43, 0xE8, 0xB7, 0x8B, 0x43, + 0xE8, 0xB7, 0xAF, 0x43, 0xE8, 0xB7, 0xB0, 0x43, + 0xE8, 0xBA, 0xAB, 0x43, 0xE8, 0xBB, 0x8A, 0x43, + 0xE8, 0xBB, 0x94, 0x43, 0xE8, 0xBC, 0xA6, 0x43, + 0xE8, 0xBC, 0xAA, 0x43, 0xE8, 0xBC, 0xB8, 0x43, + 0xE8, 0xBC, 0xBB, 0x43, 0xE8, 0xBD, 0xA2, 0x43, + 0xE8, 0xBE, 0x9B, 0x43, 0xE8, 0xBE, 0x9E, 0x43, + 0xE8, 0xBE, 0xB0, 0x43, 0xE8, 0xBE, 0xB5, 0x43, + // Bytes 1400 - 143f + 0xE8, 0xBE, 0xB6, 0x43, 0xE9, 0x80, 0xA3, 0x43, + 0xE9, 0x80, 0xB8, 0x43, 0xE9, 0x81, 0x8A, 0x43, + 0xE9, 0x81, 0xA9, 0x43, 0xE9, 0x81, 0xB2, 0x43, + 0xE9, 0x81, 0xBC, 0x43, 0xE9, 0x82, 0x8F, 0x43, + 0xE9, 0x82, 0x91, 0x43, 0xE9, 0x82, 0x94, 0x43, + 0xE9, 0x83, 0x8E, 0x43, 0xE9, 0x83, 0x9E, 0x43, + 0xE9, 0x83, 0xB1, 0x43, 0xE9, 0x83, 0xBD, 0x43, + 0xE9, 0x84, 0x91, 0x43, 0xE9, 0x84, 0x9B, 0x43, + // Bytes 1440 - 147f + 0xE9, 0x85, 0x89, 0x43, 0xE9, 0x85, 0x8D, 0x43, + 0xE9, 0x85, 0xAA, 0x43, 0xE9, 0x86, 0x99, 0x43, + 0xE9, 0x86, 0xB4, 0x43, 0xE9, 0x87, 0x86, 0x43, + 0xE9, 0x87, 0x8C, 0x43, 0xE9, 0x87, 0x8F, 0x43, + 0xE9, 0x87, 0x91, 0x43, 0xE9, 0x88, 0xB4, 0x43, + 0xE9, 0x88, 0xB8, 0x43, 0xE9, 0x89, 0xB6, 0x43, + 0xE9, 0x89, 0xBC, 0x43, 0xE9, 0x8B, 0x97, 0x43, + 0xE9, 0x8B, 0x98, 0x43, 0xE9, 0x8C, 0x84, 0x43, + // Bytes 1480 - 14bf + 0xE9, 0x8D, 0x8A, 0x43, 0xE9, 0x8F, 0xB9, 0x43, + 0xE9, 0x90, 0x95, 0x43, 0xE9, 0x95, 0xB7, 0x43, + 0xE9, 0x96, 0x80, 0x43, 0xE9, 0x96, 0x8B, 0x43, + 0xE9, 0x96, 0xAD, 0x43, 0xE9, 0x96, 0xB7, 0x43, + 0xE9, 0x98, 0x9C, 0x43, 0xE9, 0x98, 0xAE, 0x43, + 0xE9, 0x99, 0x8B, 0x43, 0xE9, 0x99, 0x8D, 0x43, + 0xE9, 0x99, 0xB5, 0x43, 0xE9, 0x99, 0xB8, 0x43, + 0xE9, 0x99, 0xBC, 0x43, 0xE9, 0x9A, 0x86, 0x43, + // Bytes 14c0 - 14ff + 0xE9, 0x9A, 0xA3, 0x43, 0xE9, 0x9A, 0xB6, 0x43, + 0xE9, 0x9A, 0xB7, 0x43, 0xE9, 0x9A, 0xB8, 0x43, + 0xE9, 0x9A, 0xB9, 0x43, 0xE9, 0x9B, 0x83, 0x43, + 0xE9, 0x9B, 0xA2, 0x43, 0xE9, 0x9B, 0xA3, 0x43, + 0xE9, 0x9B, 0xA8, 0x43, 0xE9, 0x9B, 0xB6, 0x43, + 0xE9, 0x9B, 0xB7, 0x43, 0xE9, 0x9C, 0xA3, 0x43, + 0xE9, 0x9C, 0xB2, 0x43, 0xE9, 0x9D, 0x88, 0x43, + 0xE9, 0x9D, 0x91, 0x43, 0xE9, 0x9D, 0x96, 0x43, + // Bytes 1500 - 153f + 0xE9, 0x9D, 0x9E, 0x43, 0xE9, 0x9D, 0xA2, 0x43, + 0xE9, 0x9D, 0xA9, 0x43, 0xE9, 0x9F, 0x8B, 0x43, + 0xE9, 0x9F, 0x9B, 0x43, 0xE9, 0x9F, 0xA0, 0x43, + 0xE9, 0x9F, 0xAD, 0x43, 0xE9, 0x9F, 0xB3, 0x43, + 0xE9, 0x9F, 0xBF, 0x43, 0xE9, 0xA0, 0x81, 0x43, + 0xE9, 0xA0, 0x85, 0x43, 0xE9, 0xA0, 0x8B, 0x43, + 0xE9, 0xA0, 0x98, 0x43, 0xE9, 0xA0, 0xA9, 0x43, + 0xE9, 0xA0, 0xBB, 0x43, 0xE9, 0xA1, 0x9E, 0x43, + // Bytes 1540 - 157f + 0xE9, 0xA2, 0xA8, 0x43, 0xE9, 0xA3, 0x9B, 0x43, + 0xE9, 0xA3, 0x9F, 0x43, 0xE9, 0xA3, 0xA2, 0x43, + 0xE9, 0xA3, 0xAF, 0x43, 0xE9, 0xA3, 0xBC, 0x43, + 0xE9, 0xA4, 0xA8, 0x43, 0xE9, 0xA4, 0xA9, 0x43, + 0xE9, 0xA6, 0x96, 0x43, 0xE9, 0xA6, 0x99, 0x43, + 0xE9, 0xA6, 0xA7, 0x43, 0xE9, 0xA6, 0xAC, 0x43, + 0xE9, 0xA7, 0x82, 0x43, 0xE9, 0xA7, 0xB1, 0x43, + 0xE9, 0xA7, 0xBE, 0x43, 0xE9, 0xA9, 0xAA, 0x43, + // Bytes 1580 - 15bf + 0xE9, 0xAA, 0xA8, 0x43, 0xE9, 0xAB, 0x98, 0x43, + 0xE9, 0xAB, 0x9F, 0x43, 0xE9, 0xAC, 0x92, 0x43, + 0xE9, 0xAC, 0xA5, 0x43, 0xE9, 0xAC, 0xAF, 0x43, + 0xE9, 0xAC, 0xB2, 0x43, 0xE9, 0xAC, 0xBC, 0x43, + 0xE9, 0xAD, 0x9A, 0x43, 0xE9, 0xAD, 0xAF, 0x43, + 0xE9, 0xB1, 0x80, 0x43, 0xE9, 0xB1, 0x97, 0x43, + 0xE9, 0xB3, 0xA5, 0x43, 0xE9, 0xB3, 0xBD, 0x43, + 0xE9, 0xB5, 0xA7, 0x43, 0xE9, 0xB6, 0xB4, 0x43, + // Bytes 15c0 - 15ff + 0xE9, 0xB7, 0xBA, 0x43, 0xE9, 0xB8, 0x9E, 0x43, + 0xE9, 0xB9, 0xB5, 0x43, 0xE9, 0xB9, 0xBF, 0x43, + 0xE9, 0xBA, 0x97, 0x43, 0xE9, 0xBA, 0x9F, 0x43, + 0xE9, 0xBA, 0xA5, 0x43, 0xE9, 0xBA, 0xBB, 0x43, + 0xE9, 0xBB, 0x83, 0x43, 0xE9, 0xBB, 0x8D, 0x43, + 0xE9, 0xBB, 0x8E, 0x43, 0xE9, 0xBB, 0x91, 0x43, + 0xE9, 0xBB, 0xB9, 0x43, 0xE9, 0xBB, 0xBD, 0x43, + 0xE9, 0xBB, 0xBE, 0x43, 0xE9, 0xBC, 0x85, 0x43, + // Bytes 1600 - 163f + 0xE9, 0xBC, 0x8E, 0x43, 0xE9, 0xBC, 0x8F, 0x43, + 0xE9, 0xBC, 0x93, 0x43, 0xE9, 0xBC, 0x96, 0x43, + 0xE9, 0xBC, 0xA0, 0x43, 0xE9, 0xBC, 0xBB, 0x43, + 0xE9, 0xBD, 0x83, 0x43, 0xE9, 0xBD, 0x8A, 0x43, + 0xE9, 0xBD, 0x92, 0x43, 0xE9, 0xBE, 0x8D, 0x43, + 0xE9, 0xBE, 0x8E, 0x43, 0xE9, 0xBE, 0x9C, 0x43, + 0xE9, 0xBE, 0x9F, 0x43, 0xE9, 0xBE, 0xA0, 0x43, + 0xEA, 0x9C, 0xA7, 0x43, 0xEA, 0x9D, 0xAF, 0x43, + // Bytes 1640 - 167f + 0xEA, 0xAC, 0xB7, 0x43, 0xEA, 0xAD, 0x92, 0x44, + 0xF0, 0xA0, 0x84, 0xA2, 0x44, 0xF0, 0xA0, 0x94, + 0x9C, 0x44, 0xF0, 0xA0, 0x94, 0xA5, 0x44, 0xF0, + 0xA0, 0x95, 0x8B, 0x44, 0xF0, 0xA0, 0x98, 0xBA, + 0x44, 0xF0, 0xA0, 0xA0, 0x84, 0x44, 0xF0, 0xA0, + 0xA3, 0x9E, 0x44, 0xF0, 0xA0, 0xA8, 0xAC, 0x44, + 0xF0, 0xA0, 0xAD, 0xA3, 0x44, 0xF0, 0xA1, 0x93, + 0xA4, 0x44, 0xF0, 0xA1, 0x9A, 0xA8, 0x44, 0xF0, + // Bytes 1680 - 16bf + 0xA1, 0x9B, 0xAA, 0x44, 0xF0, 0xA1, 0xA7, 0x88, + 0x44, 0xF0, 0xA1, 0xAC, 0x98, 0x44, 0xF0, 0xA1, + 0xB4, 0x8B, 0x44, 0xF0, 0xA1, 0xB7, 0xA4, 0x44, + 0xF0, 0xA1, 0xB7, 0xA6, 0x44, 0xF0, 0xA2, 0x86, + 0x83, 0x44, 0xF0, 0xA2, 0x86, 0x9F, 0x44, 0xF0, + 0xA2, 0x8C, 0xB1, 0x44, 0xF0, 0xA2, 0x9B, 0x94, + 0x44, 0xF0, 0xA2, 0xA1, 0x84, 0x44, 0xF0, 0xA2, + 0xA1, 0x8A, 0x44, 0xF0, 0xA2, 0xAC, 0x8C, 0x44, + // Bytes 16c0 - 16ff + 0xF0, 0xA2, 0xAF, 0xB1, 0x44, 0xF0, 0xA3, 0x80, + 0x8A, 0x44, 0xF0, 0xA3, 0x8A, 0xB8, 0x44, 0xF0, + 0xA3, 0x8D, 0x9F, 0x44, 0xF0, 0xA3, 0x8E, 0x93, + 0x44, 0xF0, 0xA3, 0x8E, 0x9C, 0x44, 0xF0, 0xA3, + 0x8F, 0x83, 0x44, 0xF0, 0xA3, 0x8F, 0x95, 0x44, + 0xF0, 0xA3, 0x91, 0xAD, 0x44, 0xF0, 0xA3, 0x9A, + 0xA3, 0x44, 0xF0, 0xA3, 0xA2, 0xA7, 0x44, 0xF0, + 0xA3, 0xAA, 0x8D, 0x44, 0xF0, 0xA3, 0xAB, 0xBA, + // Bytes 1700 - 173f + 0x44, 0xF0, 0xA3, 0xB2, 0xBC, 0x44, 0xF0, 0xA3, + 0xB4, 0x9E, 0x44, 0xF0, 0xA3, 0xBB, 0x91, 0x44, + 0xF0, 0xA3, 0xBD, 0x9E, 0x44, 0xF0, 0xA3, 0xBE, + 0x8E, 0x44, 0xF0, 0xA4, 0x89, 0xA3, 0x44, 0xF0, + 0xA4, 0x8B, 0xAE, 0x44, 0xF0, 0xA4, 0x8E, 0xAB, + 0x44, 0xF0, 0xA4, 0x98, 0x88, 0x44, 0xF0, 0xA4, + 0x9C, 0xB5, 0x44, 0xF0, 0xA4, 0xA0, 0x94, 0x44, + 0xF0, 0xA4, 0xB0, 0xB6, 0x44, 0xF0, 0xA4, 0xB2, + // Bytes 1740 - 177f + 0x92, 0x44, 0xF0, 0xA4, 0xBE, 0xA1, 0x44, 0xF0, + 0xA4, 0xBE, 0xB8, 0x44, 0xF0, 0xA5, 0x81, 0x84, + 0x44, 0xF0, 0xA5, 0x83, 0xB2, 0x44, 0xF0, 0xA5, + 0x83, 0xB3, 0x44, 0xF0, 0xA5, 0x84, 0x99, 0x44, + 0xF0, 0xA5, 0x84, 0xB3, 0x44, 0xF0, 0xA5, 0x89, + 0x89, 0x44, 0xF0, 0xA5, 0x90, 0x9D, 0x44, 0xF0, + 0xA5, 0x98, 0xA6, 0x44, 0xF0, 0xA5, 0x9A, 0x9A, + 0x44, 0xF0, 0xA5, 0x9B, 0x85, 0x44, 0xF0, 0xA5, + // Bytes 1780 - 17bf + 0xA5, 0xBC, 0x44, 0xF0, 0xA5, 0xAA, 0xA7, 0x44, + 0xF0, 0xA5, 0xAE, 0xAB, 0x44, 0xF0, 0xA5, 0xB2, + 0x80, 0x44, 0xF0, 0xA5, 0xB3, 0x90, 0x44, 0xF0, + 0xA5, 0xBE, 0x86, 0x44, 0xF0, 0xA6, 0x87, 0x9A, + 0x44, 0xF0, 0xA6, 0x88, 0xA8, 0x44, 0xF0, 0xA6, + 0x89, 0x87, 0x44, 0xF0, 0xA6, 0x8B, 0x99, 0x44, + 0xF0, 0xA6, 0x8C, 0xBE, 0x44, 0xF0, 0xA6, 0x93, + 0x9A, 0x44, 0xF0, 0xA6, 0x94, 0xA3, 0x44, 0xF0, + // Bytes 17c0 - 17ff + 0xA6, 0x96, 0xA8, 0x44, 0xF0, 0xA6, 0x9E, 0xA7, + 0x44, 0xF0, 0xA6, 0x9E, 0xB5, 0x44, 0xF0, 0xA6, + 0xAC, 0xBC, 0x44, 0xF0, 0xA6, 0xB0, 0xB6, 0x44, + 0xF0, 0xA6, 0xB3, 0x95, 0x44, 0xF0, 0xA6, 0xB5, + 0xAB, 0x44, 0xF0, 0xA6, 0xBC, 0xAC, 0x44, 0xF0, + 0xA6, 0xBE, 0xB1, 0x44, 0xF0, 0xA7, 0x83, 0x92, + 0x44, 0xF0, 0xA7, 0x8F, 0x8A, 0x44, 0xF0, 0xA7, + 0x99, 0xA7, 0x44, 0xF0, 0xA7, 0xA2, 0xAE, 0x44, + // Bytes 1800 - 183f + 0xF0, 0xA7, 0xA5, 0xA6, 0x44, 0xF0, 0xA7, 0xB2, + 0xA8, 0x44, 0xF0, 0xA7, 0xBB, 0x93, 0x44, 0xF0, + 0xA7, 0xBC, 0xAF, 0x44, 0xF0, 0xA8, 0x97, 0x92, + 0x44, 0xF0, 0xA8, 0x97, 0xAD, 0x44, 0xF0, 0xA8, + 0x9C, 0xAE, 0x44, 0xF0, 0xA8, 0xAF, 0xBA, 0x44, + 0xF0, 0xA8, 0xB5, 0xB7, 0x44, 0xF0, 0xA9, 0x85, + 0x85, 0x44, 0xF0, 0xA9, 0x87, 0x9F, 0x44, 0xF0, + 0xA9, 0x88, 0x9A, 0x44, 0xF0, 0xA9, 0x90, 0x8A, + // Bytes 1840 - 187f + 0x44, 0xF0, 0xA9, 0x92, 0x96, 0x44, 0xF0, 0xA9, + 0x96, 0xB6, 0x44, 0xF0, 0xA9, 0xAC, 0xB0, 0x44, + 0xF0, 0xAA, 0x83, 0x8E, 0x44, 0xF0, 0xAA, 0x84, + 0x85, 0x44, 0xF0, 0xAA, 0x88, 0x8E, 0x44, 0xF0, + 0xAA, 0x8A, 0x91, 0x44, 0xF0, 0xAA, 0x8E, 0x92, + 0x44, 0xF0, 0xAA, 0x98, 0x80, 0x42, 0x21, 0x21, + 0x42, 0x21, 0x3F, 0x42, 0x2E, 0x2E, 0x42, 0x30, + 0x2C, 0x42, 0x30, 0x2E, 0x42, 0x31, 0x2C, 0x42, + // Bytes 1880 - 18bf + 0x31, 0x2E, 0x42, 0x31, 0x30, 0x42, 0x31, 0x31, + 0x42, 0x31, 0x32, 0x42, 0x31, 0x33, 0x42, 0x31, + 0x34, 0x42, 0x31, 0x35, 0x42, 0x31, 0x36, 0x42, + 0x31, 0x37, 0x42, 0x31, 0x38, 0x42, 0x31, 0x39, + 0x42, 0x32, 0x2C, 0x42, 0x32, 0x2E, 0x42, 0x32, + 0x30, 0x42, 0x32, 0x31, 0x42, 0x32, 0x32, 0x42, + 0x32, 0x33, 0x42, 0x32, 0x34, 0x42, 0x32, 0x35, + 0x42, 0x32, 0x36, 0x42, 0x32, 0x37, 0x42, 0x32, + // Bytes 18c0 - 18ff + 0x38, 0x42, 0x32, 0x39, 0x42, 0x33, 0x2C, 0x42, + 0x33, 0x2E, 0x42, 0x33, 0x30, 0x42, 0x33, 0x31, + 0x42, 0x33, 0x32, 0x42, 0x33, 0x33, 0x42, 0x33, + 0x34, 0x42, 0x33, 0x35, 0x42, 0x33, 0x36, 0x42, + 0x33, 0x37, 0x42, 0x33, 0x38, 0x42, 0x33, 0x39, + 0x42, 0x34, 0x2C, 0x42, 0x34, 0x2E, 0x42, 0x34, + 0x30, 0x42, 0x34, 0x31, 0x42, 0x34, 0x32, 0x42, + 0x34, 0x33, 0x42, 0x34, 0x34, 0x42, 0x34, 0x35, + // Bytes 1900 - 193f + 0x42, 0x34, 0x36, 0x42, 0x34, 0x37, 0x42, 0x34, + 0x38, 0x42, 0x34, 0x39, 0x42, 0x35, 0x2C, 0x42, + 0x35, 0x2E, 0x42, 0x35, 0x30, 0x42, 0x36, 0x2C, + 0x42, 0x36, 0x2E, 0x42, 0x37, 0x2C, 0x42, 0x37, + 0x2E, 0x42, 0x38, 0x2C, 0x42, 0x38, 0x2E, 0x42, + 0x39, 0x2C, 0x42, 0x39, 0x2E, 0x42, 0x3D, 0x3D, + 0x42, 0x3F, 0x21, 0x42, 0x3F, 0x3F, 0x42, 0x41, + 0x55, 0x42, 0x42, 0x71, 0x42, 0x43, 0x44, 0x42, + // Bytes 1940 - 197f + 0x44, 0x4A, 0x42, 0x44, 0x5A, 0x42, 0x44, 0x7A, + 0x42, 0x47, 0x42, 0x42, 0x47, 0x79, 0x42, 0x48, + 0x50, 0x42, 0x48, 0x56, 0x42, 0x48, 0x67, 0x42, + 0x48, 0x7A, 0x42, 0x49, 0x49, 0x42, 0x49, 0x4A, + 0x42, 0x49, 0x55, 0x42, 0x49, 0x56, 0x42, 0x49, + 0x58, 0x42, 0x4B, 0x42, 0x42, 0x4B, 0x4B, 0x42, + 0x4B, 0x4D, 0x42, 0x4C, 0x4A, 0x42, 0x4C, 0x6A, + 0x42, 0x4D, 0x42, 0x42, 0x4D, 0x43, 0x42, 0x4D, + // Bytes 1980 - 19bf + 0x44, 0x42, 0x4D, 0x56, 0x42, 0x4D, 0x57, 0x42, + 0x4E, 0x4A, 0x42, 0x4E, 0x6A, 0x42, 0x4E, 0x6F, + 0x42, 0x50, 0x48, 0x42, 0x50, 0x52, 0x42, 0x50, + 0x61, 0x42, 0x52, 0x73, 0x42, 0x53, 0x44, 0x42, + 0x53, 0x4D, 0x42, 0x53, 0x53, 0x42, 0x53, 0x76, + 0x42, 0x54, 0x4D, 0x42, 0x56, 0x49, 0x42, 0x57, + 0x43, 0x42, 0x57, 0x5A, 0x42, 0x57, 0x62, 0x42, + 0x58, 0x49, 0x42, 0x63, 0x63, 0x42, 0x63, 0x64, + // Bytes 19c0 - 19ff + 0x42, 0x63, 0x6D, 0x42, 0x64, 0x42, 0x42, 0x64, + 0x61, 0x42, 0x64, 0x6C, 0x42, 0x64, 0x6D, 0x42, + 0x64, 0x7A, 0x42, 0x65, 0x56, 0x42, 0x66, 0x66, + 0x42, 0x66, 0x69, 0x42, 0x66, 0x6C, 0x42, 0x66, + 0x6D, 0x42, 0x68, 0x61, 0x42, 0x69, 0x69, 0x42, + 0x69, 0x6A, 0x42, 0x69, 0x6E, 0x42, 0x69, 0x76, + 0x42, 0x69, 0x78, 0x42, 0x6B, 0x41, 0x42, 0x6B, + 0x56, 0x42, 0x6B, 0x57, 0x42, 0x6B, 0x67, 0x42, + // Bytes 1a00 - 1a3f + 0x6B, 0x6C, 0x42, 0x6B, 0x6D, 0x42, 0x6B, 0x74, + 0x42, 0x6C, 0x6A, 0x42, 0x6C, 0x6D, 0x42, 0x6C, + 0x6E, 0x42, 0x6C, 0x78, 0x42, 0x6D, 0x32, 0x42, + 0x6D, 0x33, 0x42, 0x6D, 0x41, 0x42, 0x6D, 0x56, + 0x42, 0x6D, 0x57, 0x42, 0x6D, 0x62, 0x42, 0x6D, + 0x67, 0x42, 0x6D, 0x6C, 0x42, 0x6D, 0x6D, 0x42, + 0x6D, 0x73, 0x42, 0x6E, 0x41, 0x42, 0x6E, 0x46, + 0x42, 0x6E, 0x56, 0x42, 0x6E, 0x57, 0x42, 0x6E, + // Bytes 1a40 - 1a7f + 0x6A, 0x42, 0x6E, 0x6D, 0x42, 0x6E, 0x73, 0x42, + 0x6F, 0x56, 0x42, 0x70, 0x41, 0x42, 0x70, 0x46, + 0x42, 0x70, 0x56, 0x42, 0x70, 0x57, 0x42, 0x70, + 0x63, 0x42, 0x70, 0x73, 0x42, 0x73, 0x72, 0x42, + 0x73, 0x74, 0x42, 0x76, 0x69, 0x42, 0x78, 0x69, + 0x43, 0x28, 0x31, 0x29, 0x43, 0x28, 0x32, 0x29, + 0x43, 0x28, 0x33, 0x29, 0x43, 0x28, 0x34, 0x29, + 0x43, 0x28, 0x35, 0x29, 0x43, 0x28, 0x36, 0x29, + // Bytes 1a80 - 1abf + 0x43, 0x28, 0x37, 0x29, 0x43, 0x28, 0x38, 0x29, + 0x43, 0x28, 0x39, 0x29, 0x43, 0x28, 0x41, 0x29, + 0x43, 0x28, 0x42, 0x29, 0x43, 0x28, 0x43, 0x29, + 0x43, 0x28, 0x44, 0x29, 0x43, 0x28, 0x45, 0x29, + 0x43, 0x28, 0x46, 0x29, 0x43, 0x28, 0x47, 0x29, + 0x43, 0x28, 0x48, 0x29, 0x43, 0x28, 0x49, 0x29, + 0x43, 0x28, 0x4A, 0x29, 0x43, 0x28, 0x4B, 0x29, + 0x43, 0x28, 0x4C, 0x29, 0x43, 0x28, 0x4D, 0x29, + // Bytes 1ac0 - 1aff + 0x43, 0x28, 0x4E, 0x29, 0x43, 0x28, 0x4F, 0x29, + 0x43, 0x28, 0x50, 0x29, 0x43, 0x28, 0x51, 0x29, + 0x43, 0x28, 0x52, 0x29, 0x43, 0x28, 0x53, 0x29, + 0x43, 0x28, 0x54, 0x29, 0x43, 0x28, 0x55, 0x29, + 0x43, 0x28, 0x56, 0x29, 0x43, 0x28, 0x57, 0x29, + 0x43, 0x28, 0x58, 0x29, 0x43, 0x28, 0x59, 0x29, + 0x43, 0x28, 0x5A, 0x29, 0x43, 0x28, 0x61, 0x29, + 0x43, 0x28, 0x62, 0x29, 0x43, 0x28, 0x63, 0x29, + // Bytes 1b00 - 1b3f + 0x43, 0x28, 0x64, 0x29, 0x43, 0x28, 0x65, 0x29, + 0x43, 0x28, 0x66, 0x29, 0x43, 0x28, 0x67, 0x29, + 0x43, 0x28, 0x68, 0x29, 0x43, 0x28, 0x69, 0x29, + 0x43, 0x28, 0x6A, 0x29, 0x43, 0x28, 0x6B, 0x29, + 0x43, 0x28, 0x6C, 0x29, 0x43, 0x28, 0x6D, 0x29, + 0x43, 0x28, 0x6E, 0x29, 0x43, 0x28, 0x6F, 0x29, + 0x43, 0x28, 0x70, 0x29, 0x43, 0x28, 0x71, 0x29, + 0x43, 0x28, 0x72, 0x29, 0x43, 0x28, 0x73, 0x29, + // Bytes 1b40 - 1b7f + 0x43, 0x28, 0x74, 0x29, 0x43, 0x28, 0x75, 0x29, + 0x43, 0x28, 0x76, 0x29, 0x43, 0x28, 0x77, 0x29, + 0x43, 0x28, 0x78, 0x29, 0x43, 0x28, 0x79, 0x29, + 0x43, 0x28, 0x7A, 0x29, 0x43, 0x2E, 0x2E, 0x2E, + 0x43, 0x31, 0x30, 0x2E, 0x43, 0x31, 0x31, 0x2E, + 0x43, 0x31, 0x32, 0x2E, 0x43, 0x31, 0x33, 0x2E, + 0x43, 0x31, 0x34, 0x2E, 0x43, 0x31, 0x35, 0x2E, + 0x43, 0x31, 0x36, 0x2E, 0x43, 0x31, 0x37, 0x2E, + // Bytes 1b80 - 1bbf + 0x43, 0x31, 0x38, 0x2E, 0x43, 0x31, 0x39, 0x2E, + 0x43, 0x32, 0x30, 0x2E, 0x43, 0x3A, 0x3A, 0x3D, + 0x43, 0x3D, 0x3D, 0x3D, 0x43, 0x43, 0x6F, 0x2E, + 0x43, 0x46, 0x41, 0x58, 0x43, 0x47, 0x48, 0x7A, + 0x43, 0x47, 0x50, 0x61, 0x43, 0x49, 0x49, 0x49, + 0x43, 0x4C, 0x54, 0x44, 0x43, 0x4C, 0xC2, 0xB7, + 0x43, 0x4D, 0x48, 0x7A, 0x43, 0x4D, 0x50, 0x61, + 0x43, 0x4D, 0xCE, 0xA9, 0x43, 0x50, 0x50, 0x4D, + // Bytes 1bc0 - 1bff + 0x43, 0x50, 0x50, 0x56, 0x43, 0x50, 0x54, 0x45, + 0x43, 0x54, 0x45, 0x4C, 0x43, 0x54, 0x48, 0x7A, + 0x43, 0x56, 0x49, 0x49, 0x43, 0x58, 0x49, 0x49, + 0x43, 0x61, 0x2F, 0x63, 0x43, 0x61, 0x2F, 0x73, + 0x43, 0x61, 0xCA, 0xBE, 0x43, 0x62, 0x61, 0x72, + 0x43, 0x63, 0x2F, 0x6F, 0x43, 0x63, 0x2F, 0x75, + 0x43, 0x63, 0x61, 0x6C, 0x43, 0x63, 0x6D, 0x32, + 0x43, 0x63, 0x6D, 0x33, 0x43, 0x64, 0x6D, 0x32, + // Bytes 1c00 - 1c3f + 0x43, 0x64, 0x6D, 0x33, 0x43, 0x65, 0x72, 0x67, + 0x43, 0x66, 0x66, 0x69, 0x43, 0x66, 0x66, 0x6C, + 0x43, 0x67, 0x61, 0x6C, 0x43, 0x68, 0x50, 0x61, + 0x43, 0x69, 0x69, 0x69, 0x43, 0x6B, 0x48, 0x7A, + 0x43, 0x6B, 0x50, 0x61, 0x43, 0x6B, 0x6D, 0x32, + 0x43, 0x6B, 0x6D, 0x33, 0x43, 0x6B, 0xCE, 0xA9, + 0x43, 0x6C, 0x6F, 0x67, 0x43, 0x6C, 0xC2, 0xB7, + 0x43, 0x6D, 0x69, 0x6C, 0x43, 0x6D, 0x6D, 0x32, + // Bytes 1c40 - 1c7f + 0x43, 0x6D, 0x6D, 0x33, 0x43, 0x6D, 0x6F, 0x6C, + 0x43, 0x72, 0x61, 0x64, 0x43, 0x76, 0x69, 0x69, + 0x43, 0x78, 0x69, 0x69, 0x43, 0xC2, 0xB0, 0x43, + 0x43, 0xC2, 0xB0, 0x46, 0x43, 0xCA, 0xBC, 0x6E, + 0x43, 0xCE, 0xBC, 0x41, 0x43, 0xCE, 0xBC, 0x46, + 0x43, 0xCE, 0xBC, 0x56, 0x43, 0xCE, 0xBC, 0x57, + 0x43, 0xCE, 0xBC, 0x67, 0x43, 0xCE, 0xBC, 0x6C, + 0x43, 0xCE, 0xBC, 0x6D, 0x43, 0xCE, 0xBC, 0x73, + // Bytes 1c80 - 1cbf + 0x44, 0x28, 0x31, 0x30, 0x29, 0x44, 0x28, 0x31, + 0x31, 0x29, 0x44, 0x28, 0x31, 0x32, 0x29, 0x44, + 0x28, 0x31, 0x33, 0x29, 0x44, 0x28, 0x31, 0x34, + 0x29, 0x44, 0x28, 0x31, 0x35, 0x29, 0x44, 0x28, + 0x31, 0x36, 0x29, 0x44, 0x28, 0x31, 0x37, 0x29, + 0x44, 0x28, 0x31, 0x38, 0x29, 0x44, 0x28, 0x31, + 0x39, 0x29, 0x44, 0x28, 0x32, 0x30, 0x29, 0x44, + 0x30, 0xE7, 0x82, 0xB9, 0x44, 0x31, 0xE2, 0x81, + // Bytes 1cc0 - 1cff + 0x84, 0x44, 0x31, 0xE6, 0x97, 0xA5, 0x44, 0x31, + 0xE6, 0x9C, 0x88, 0x44, 0x31, 0xE7, 0x82, 0xB9, + 0x44, 0x32, 0xE6, 0x97, 0xA5, 0x44, 0x32, 0xE6, + 0x9C, 0x88, 0x44, 0x32, 0xE7, 0x82, 0xB9, 0x44, + 0x33, 0xE6, 0x97, 0xA5, 0x44, 0x33, 0xE6, 0x9C, + 0x88, 0x44, 0x33, 0xE7, 0x82, 0xB9, 0x44, 0x34, + 0xE6, 0x97, 0xA5, 0x44, 0x34, 0xE6, 0x9C, 0x88, + 0x44, 0x34, 0xE7, 0x82, 0xB9, 0x44, 0x35, 0xE6, + // Bytes 1d00 - 1d3f + 0x97, 0xA5, 0x44, 0x35, 0xE6, 0x9C, 0x88, 0x44, + 0x35, 0xE7, 0x82, 0xB9, 0x44, 0x36, 0xE6, 0x97, + 0xA5, 0x44, 0x36, 0xE6, 0x9C, 0x88, 0x44, 0x36, + 0xE7, 0x82, 0xB9, 0x44, 0x37, 0xE6, 0x97, 0xA5, + 0x44, 0x37, 0xE6, 0x9C, 0x88, 0x44, 0x37, 0xE7, + 0x82, 0xB9, 0x44, 0x38, 0xE6, 0x97, 0xA5, 0x44, + 0x38, 0xE6, 0x9C, 0x88, 0x44, 0x38, 0xE7, 0x82, + 0xB9, 0x44, 0x39, 0xE6, 0x97, 0xA5, 0x44, 0x39, + // Bytes 1d40 - 1d7f + 0xE6, 0x9C, 0x88, 0x44, 0x39, 0xE7, 0x82, 0xB9, + 0x44, 0x56, 0x49, 0x49, 0x49, 0x44, 0x61, 0x2E, + 0x6D, 0x2E, 0x44, 0x6B, 0x63, 0x61, 0x6C, 0x44, + 0x70, 0x2E, 0x6D, 0x2E, 0x44, 0x76, 0x69, 0x69, + 0x69, 0x44, 0xD5, 0xA5, 0xD6, 0x82, 0x44, 0xD5, + 0xB4, 0xD5, 0xA5, 0x44, 0xD5, 0xB4, 0xD5, 0xAB, + 0x44, 0xD5, 0xB4, 0xD5, 0xAD, 0x44, 0xD5, 0xB4, + 0xD5, 0xB6, 0x44, 0xD5, 0xBE, 0xD5, 0xB6, 0x44, + // Bytes 1d80 - 1dbf + 0xD7, 0x90, 0xD7, 0x9C, 0x44, 0xD8, 0xA7, 0xD9, + 0xB4, 0x44, 0xD8, 0xA8, 0xD8, 0xAC, 0x44, 0xD8, + 0xA8, 0xD8, 0xAD, 0x44, 0xD8, 0xA8, 0xD8, 0xAE, + 0x44, 0xD8, 0xA8, 0xD8, 0xB1, 0x44, 0xD8, 0xA8, + 0xD8, 0xB2, 0x44, 0xD8, 0xA8, 0xD9, 0x85, 0x44, + 0xD8, 0xA8, 0xD9, 0x86, 0x44, 0xD8, 0xA8, 0xD9, + 0x87, 0x44, 0xD8, 0xA8, 0xD9, 0x89, 0x44, 0xD8, + 0xA8, 0xD9, 0x8A, 0x44, 0xD8, 0xAA, 0xD8, 0xAC, + // Bytes 1dc0 - 1dff + 0x44, 0xD8, 0xAA, 0xD8, 0xAD, 0x44, 0xD8, 0xAA, + 0xD8, 0xAE, 0x44, 0xD8, 0xAA, 0xD8, 0xB1, 0x44, + 0xD8, 0xAA, 0xD8, 0xB2, 0x44, 0xD8, 0xAA, 0xD9, + 0x85, 0x44, 0xD8, 0xAA, 0xD9, 0x86, 0x44, 0xD8, + 0xAA, 0xD9, 0x87, 0x44, 0xD8, 0xAA, 0xD9, 0x89, + 0x44, 0xD8, 0xAA, 0xD9, 0x8A, 0x44, 0xD8, 0xAB, + 0xD8, 0xAC, 0x44, 0xD8, 0xAB, 0xD8, 0xB1, 0x44, + 0xD8, 0xAB, 0xD8, 0xB2, 0x44, 0xD8, 0xAB, 0xD9, + // Bytes 1e00 - 1e3f + 0x85, 0x44, 0xD8, 0xAB, 0xD9, 0x86, 0x44, 0xD8, + 0xAB, 0xD9, 0x87, 0x44, 0xD8, 0xAB, 0xD9, 0x89, + 0x44, 0xD8, 0xAB, 0xD9, 0x8A, 0x44, 0xD8, 0xAC, + 0xD8, 0xAD, 0x44, 0xD8, 0xAC, 0xD9, 0x85, 0x44, + 0xD8, 0xAC, 0xD9, 0x89, 0x44, 0xD8, 0xAC, 0xD9, + 0x8A, 0x44, 0xD8, 0xAD, 0xD8, 0xAC, 0x44, 0xD8, + 0xAD, 0xD9, 0x85, 0x44, 0xD8, 0xAD, 0xD9, 0x89, + 0x44, 0xD8, 0xAD, 0xD9, 0x8A, 0x44, 0xD8, 0xAE, + // Bytes 1e40 - 1e7f + 0xD8, 0xAC, 0x44, 0xD8, 0xAE, 0xD8, 0xAD, 0x44, + 0xD8, 0xAE, 0xD9, 0x85, 0x44, 0xD8, 0xAE, 0xD9, + 0x89, 0x44, 0xD8, 0xAE, 0xD9, 0x8A, 0x44, 0xD8, + 0xB3, 0xD8, 0xAC, 0x44, 0xD8, 0xB3, 0xD8, 0xAD, + 0x44, 0xD8, 0xB3, 0xD8, 0xAE, 0x44, 0xD8, 0xB3, + 0xD8, 0xB1, 0x44, 0xD8, 0xB3, 0xD9, 0x85, 0x44, + 0xD8, 0xB3, 0xD9, 0x87, 0x44, 0xD8, 0xB3, 0xD9, + 0x89, 0x44, 0xD8, 0xB3, 0xD9, 0x8A, 0x44, 0xD8, + // Bytes 1e80 - 1ebf + 0xB4, 0xD8, 0xAC, 0x44, 0xD8, 0xB4, 0xD8, 0xAD, + 0x44, 0xD8, 0xB4, 0xD8, 0xAE, 0x44, 0xD8, 0xB4, + 0xD8, 0xB1, 0x44, 0xD8, 0xB4, 0xD9, 0x85, 0x44, + 0xD8, 0xB4, 0xD9, 0x87, 0x44, 0xD8, 0xB4, 0xD9, + 0x89, 0x44, 0xD8, 0xB4, 0xD9, 0x8A, 0x44, 0xD8, + 0xB5, 0xD8, 0xAD, 0x44, 0xD8, 0xB5, 0xD8, 0xAE, + 0x44, 0xD8, 0xB5, 0xD8, 0xB1, 0x44, 0xD8, 0xB5, + 0xD9, 0x85, 0x44, 0xD8, 0xB5, 0xD9, 0x89, 0x44, + // Bytes 1ec0 - 1eff + 0xD8, 0xB5, 0xD9, 0x8A, 0x44, 0xD8, 0xB6, 0xD8, + 0xAC, 0x44, 0xD8, 0xB6, 0xD8, 0xAD, 0x44, 0xD8, + 0xB6, 0xD8, 0xAE, 0x44, 0xD8, 0xB6, 0xD8, 0xB1, + 0x44, 0xD8, 0xB6, 0xD9, 0x85, 0x44, 0xD8, 0xB6, + 0xD9, 0x89, 0x44, 0xD8, 0xB6, 0xD9, 0x8A, 0x44, + 0xD8, 0xB7, 0xD8, 0xAD, 0x44, 0xD8, 0xB7, 0xD9, + 0x85, 0x44, 0xD8, 0xB7, 0xD9, 0x89, 0x44, 0xD8, + 0xB7, 0xD9, 0x8A, 0x44, 0xD8, 0xB8, 0xD9, 0x85, + // Bytes 1f00 - 1f3f + 0x44, 0xD8, 0xB9, 0xD8, 0xAC, 0x44, 0xD8, 0xB9, + 0xD9, 0x85, 0x44, 0xD8, 0xB9, 0xD9, 0x89, 0x44, + 0xD8, 0xB9, 0xD9, 0x8A, 0x44, 0xD8, 0xBA, 0xD8, + 0xAC, 0x44, 0xD8, 0xBA, 0xD9, 0x85, 0x44, 0xD8, + 0xBA, 0xD9, 0x89, 0x44, 0xD8, 0xBA, 0xD9, 0x8A, + 0x44, 0xD9, 0x81, 0xD8, 0xAC, 0x44, 0xD9, 0x81, + 0xD8, 0xAD, 0x44, 0xD9, 0x81, 0xD8, 0xAE, 0x44, + 0xD9, 0x81, 0xD9, 0x85, 0x44, 0xD9, 0x81, 0xD9, + // Bytes 1f40 - 1f7f + 0x89, 0x44, 0xD9, 0x81, 0xD9, 0x8A, 0x44, 0xD9, + 0x82, 0xD8, 0xAD, 0x44, 0xD9, 0x82, 0xD9, 0x85, + 0x44, 0xD9, 0x82, 0xD9, 0x89, 0x44, 0xD9, 0x82, + 0xD9, 0x8A, 0x44, 0xD9, 0x83, 0xD8, 0xA7, 0x44, + 0xD9, 0x83, 0xD8, 0xAC, 0x44, 0xD9, 0x83, 0xD8, + 0xAD, 0x44, 0xD9, 0x83, 0xD8, 0xAE, 0x44, 0xD9, + 0x83, 0xD9, 0x84, 0x44, 0xD9, 0x83, 0xD9, 0x85, + 0x44, 0xD9, 0x83, 0xD9, 0x89, 0x44, 0xD9, 0x83, + // Bytes 1f80 - 1fbf + 0xD9, 0x8A, 0x44, 0xD9, 0x84, 0xD8, 0xA7, 0x44, + 0xD9, 0x84, 0xD8, 0xAC, 0x44, 0xD9, 0x84, 0xD8, + 0xAD, 0x44, 0xD9, 0x84, 0xD8, 0xAE, 0x44, 0xD9, + 0x84, 0xD9, 0x85, 0x44, 0xD9, 0x84, 0xD9, 0x87, + 0x44, 0xD9, 0x84, 0xD9, 0x89, 0x44, 0xD9, 0x84, + 0xD9, 0x8A, 0x44, 0xD9, 0x85, 0xD8, 0xA7, 0x44, + 0xD9, 0x85, 0xD8, 0xAC, 0x44, 0xD9, 0x85, 0xD8, + 0xAD, 0x44, 0xD9, 0x85, 0xD8, 0xAE, 0x44, 0xD9, + // Bytes 1fc0 - 1fff + 0x85, 0xD9, 0x85, 0x44, 0xD9, 0x85, 0xD9, 0x89, + 0x44, 0xD9, 0x85, 0xD9, 0x8A, 0x44, 0xD9, 0x86, + 0xD8, 0xAC, 0x44, 0xD9, 0x86, 0xD8, 0xAD, 0x44, + 0xD9, 0x86, 0xD8, 0xAE, 0x44, 0xD9, 0x86, 0xD8, + 0xB1, 0x44, 0xD9, 0x86, 0xD8, 0xB2, 0x44, 0xD9, + 0x86, 0xD9, 0x85, 0x44, 0xD9, 0x86, 0xD9, 0x86, + 0x44, 0xD9, 0x86, 0xD9, 0x87, 0x44, 0xD9, 0x86, + 0xD9, 0x89, 0x44, 0xD9, 0x86, 0xD9, 0x8A, 0x44, + // Bytes 2000 - 203f + 0xD9, 0x87, 0xD8, 0xAC, 0x44, 0xD9, 0x87, 0xD9, + 0x85, 0x44, 0xD9, 0x87, 0xD9, 0x89, 0x44, 0xD9, + 0x87, 0xD9, 0x8A, 0x44, 0xD9, 0x88, 0xD9, 0xB4, + 0x44, 0xD9, 0x8A, 0xD8, 0xAC, 0x44, 0xD9, 0x8A, + 0xD8, 0xAD, 0x44, 0xD9, 0x8A, 0xD8, 0xAE, 0x44, + 0xD9, 0x8A, 0xD8, 0xB1, 0x44, 0xD9, 0x8A, 0xD8, + 0xB2, 0x44, 0xD9, 0x8A, 0xD9, 0x85, 0x44, 0xD9, + 0x8A, 0xD9, 0x86, 0x44, 0xD9, 0x8A, 0xD9, 0x87, + // Bytes 2040 - 207f + 0x44, 0xD9, 0x8A, 0xD9, 0x89, 0x44, 0xD9, 0x8A, + 0xD9, 0x8A, 0x44, 0xD9, 0x8A, 0xD9, 0xB4, 0x44, + 0xDB, 0x87, 0xD9, 0xB4, 0x45, 0x28, 0xE1, 0x84, + 0x80, 0x29, 0x45, 0x28, 0xE1, 0x84, 0x82, 0x29, + 0x45, 0x28, 0xE1, 0x84, 0x83, 0x29, 0x45, 0x28, + 0xE1, 0x84, 0x85, 0x29, 0x45, 0x28, 0xE1, 0x84, + 0x86, 0x29, 0x45, 0x28, 0xE1, 0x84, 0x87, 0x29, + 0x45, 0x28, 0xE1, 0x84, 0x89, 0x29, 0x45, 0x28, + // Bytes 2080 - 20bf + 0xE1, 0x84, 0x8B, 0x29, 0x45, 0x28, 0xE1, 0x84, + 0x8C, 0x29, 0x45, 0x28, 0xE1, 0x84, 0x8E, 0x29, + 0x45, 0x28, 0xE1, 0x84, 0x8F, 0x29, 0x45, 0x28, + 0xE1, 0x84, 0x90, 0x29, 0x45, 0x28, 0xE1, 0x84, + 0x91, 0x29, 0x45, 0x28, 0xE1, 0x84, 0x92, 0x29, + 0x45, 0x28, 0xE4, 0xB8, 0x80, 0x29, 0x45, 0x28, + 0xE4, 0xB8, 0x83, 0x29, 0x45, 0x28, 0xE4, 0xB8, + 0x89, 0x29, 0x45, 0x28, 0xE4, 0xB9, 0x9D, 0x29, + // Bytes 20c0 - 20ff + 0x45, 0x28, 0xE4, 0xBA, 0x8C, 0x29, 0x45, 0x28, + 0xE4, 0xBA, 0x94, 0x29, 0x45, 0x28, 0xE4, 0xBB, + 0xA3, 0x29, 0x45, 0x28, 0xE4, 0xBC, 0x81, 0x29, + 0x45, 0x28, 0xE4, 0xBC, 0x91, 0x29, 0x45, 0x28, + 0xE5, 0x85, 0xAB, 0x29, 0x45, 0x28, 0xE5, 0x85, + 0xAD, 0x29, 0x45, 0x28, 0xE5, 0x8A, 0xB4, 0x29, + 0x45, 0x28, 0xE5, 0x8D, 0x81, 0x29, 0x45, 0x28, + 0xE5, 0x8D, 0x94, 0x29, 0x45, 0x28, 0xE5, 0x90, + // Bytes 2100 - 213f + 0x8D, 0x29, 0x45, 0x28, 0xE5, 0x91, 0xBC, 0x29, + 0x45, 0x28, 0xE5, 0x9B, 0x9B, 0x29, 0x45, 0x28, + 0xE5, 0x9C, 0x9F, 0x29, 0x45, 0x28, 0xE5, 0xAD, + 0xA6, 0x29, 0x45, 0x28, 0xE6, 0x97, 0xA5, 0x29, + 0x45, 0x28, 0xE6, 0x9C, 0x88, 0x29, 0x45, 0x28, + 0xE6, 0x9C, 0x89, 0x29, 0x45, 0x28, 0xE6, 0x9C, + 0xA8, 0x29, 0x45, 0x28, 0xE6, 0xA0, 0xAA, 0x29, + 0x45, 0x28, 0xE6, 0xB0, 0xB4, 0x29, 0x45, 0x28, + // Bytes 2140 - 217f + 0xE7, 0x81, 0xAB, 0x29, 0x45, 0x28, 0xE7, 0x89, + 0xB9, 0x29, 0x45, 0x28, 0xE7, 0x9B, 0xA3, 0x29, + 0x45, 0x28, 0xE7, 0xA4, 0xBE, 0x29, 0x45, 0x28, + 0xE7, 0xA5, 0x9D, 0x29, 0x45, 0x28, 0xE7, 0xA5, + 0xAD, 0x29, 0x45, 0x28, 0xE8, 0x87, 0xAA, 0x29, + 0x45, 0x28, 0xE8, 0x87, 0xB3, 0x29, 0x45, 0x28, + 0xE8, 0xB2, 0xA1, 0x29, 0x45, 0x28, 0xE8, 0xB3, + 0x87, 0x29, 0x45, 0x28, 0xE9, 0x87, 0x91, 0x29, + // Bytes 2180 - 21bf + 0x45, 0x30, 0xE2, 0x81, 0x84, 0x33, 0x45, 0x31, + 0x30, 0xE6, 0x97, 0xA5, 0x45, 0x31, 0x30, 0xE6, + 0x9C, 0x88, 0x45, 0x31, 0x30, 0xE7, 0x82, 0xB9, + 0x45, 0x31, 0x31, 0xE6, 0x97, 0xA5, 0x45, 0x31, + 0x31, 0xE6, 0x9C, 0x88, 0x45, 0x31, 0x31, 0xE7, + 0x82, 0xB9, 0x45, 0x31, 0x32, 0xE6, 0x97, 0xA5, + 0x45, 0x31, 0x32, 0xE6, 0x9C, 0x88, 0x45, 0x31, + 0x32, 0xE7, 0x82, 0xB9, 0x45, 0x31, 0x33, 0xE6, + // Bytes 21c0 - 21ff + 0x97, 0xA5, 0x45, 0x31, 0x33, 0xE7, 0x82, 0xB9, + 0x45, 0x31, 0x34, 0xE6, 0x97, 0xA5, 0x45, 0x31, + 0x34, 0xE7, 0x82, 0xB9, 0x45, 0x31, 0x35, 0xE6, + 0x97, 0xA5, 0x45, 0x31, 0x35, 0xE7, 0x82, 0xB9, + 0x45, 0x31, 0x36, 0xE6, 0x97, 0xA5, 0x45, 0x31, + 0x36, 0xE7, 0x82, 0xB9, 0x45, 0x31, 0x37, 0xE6, + 0x97, 0xA5, 0x45, 0x31, 0x37, 0xE7, 0x82, 0xB9, + 0x45, 0x31, 0x38, 0xE6, 0x97, 0xA5, 0x45, 0x31, + // Bytes 2200 - 223f + 0x38, 0xE7, 0x82, 0xB9, 0x45, 0x31, 0x39, 0xE6, + 0x97, 0xA5, 0x45, 0x31, 0x39, 0xE7, 0x82, 0xB9, + 0x45, 0x31, 0xE2, 0x81, 0x84, 0x32, 0x45, 0x31, + 0xE2, 0x81, 0x84, 0x33, 0x45, 0x31, 0xE2, 0x81, + 0x84, 0x34, 0x45, 0x31, 0xE2, 0x81, 0x84, 0x35, + 0x45, 0x31, 0xE2, 0x81, 0x84, 0x36, 0x45, 0x31, + 0xE2, 0x81, 0x84, 0x37, 0x45, 0x31, 0xE2, 0x81, + 0x84, 0x38, 0x45, 0x31, 0xE2, 0x81, 0x84, 0x39, + // Bytes 2240 - 227f + 0x45, 0x32, 0x30, 0xE6, 0x97, 0xA5, 0x45, 0x32, + 0x30, 0xE7, 0x82, 0xB9, 0x45, 0x32, 0x31, 0xE6, + 0x97, 0xA5, 0x45, 0x32, 0x31, 0xE7, 0x82, 0xB9, + 0x45, 0x32, 0x32, 0xE6, 0x97, 0xA5, 0x45, 0x32, + 0x32, 0xE7, 0x82, 0xB9, 0x45, 0x32, 0x33, 0xE6, + 0x97, 0xA5, 0x45, 0x32, 0x33, 0xE7, 0x82, 0xB9, + 0x45, 0x32, 0x34, 0xE6, 0x97, 0xA5, 0x45, 0x32, + 0x34, 0xE7, 0x82, 0xB9, 0x45, 0x32, 0x35, 0xE6, + // Bytes 2280 - 22bf + 0x97, 0xA5, 0x45, 0x32, 0x36, 0xE6, 0x97, 0xA5, + 0x45, 0x32, 0x37, 0xE6, 0x97, 0xA5, 0x45, 0x32, + 0x38, 0xE6, 0x97, 0xA5, 0x45, 0x32, 0x39, 0xE6, + 0x97, 0xA5, 0x45, 0x32, 0xE2, 0x81, 0x84, 0x33, + 0x45, 0x32, 0xE2, 0x81, 0x84, 0x35, 0x45, 0x33, + 0x30, 0xE6, 0x97, 0xA5, 0x45, 0x33, 0x31, 0xE6, + 0x97, 0xA5, 0x45, 0x33, 0xE2, 0x81, 0x84, 0x34, + 0x45, 0x33, 0xE2, 0x81, 0x84, 0x35, 0x45, 0x33, + // Bytes 22c0 - 22ff + 0xE2, 0x81, 0x84, 0x38, 0x45, 0x34, 0xE2, 0x81, + 0x84, 0x35, 0x45, 0x35, 0xE2, 0x81, 0x84, 0x36, + 0x45, 0x35, 0xE2, 0x81, 0x84, 0x38, 0x45, 0x37, + 0xE2, 0x81, 0x84, 0x38, 0x45, 0x41, 0xE2, 0x88, + 0x95, 0x6D, 0x45, 0x56, 0xE2, 0x88, 0x95, 0x6D, + 0x45, 0x6D, 0xE2, 0x88, 0x95, 0x73, 0x46, 0x31, + 0xE2, 0x81, 0x84, 0x31, 0x30, 0x46, 0x43, 0xE2, + 0x88, 0x95, 0x6B, 0x67, 0x46, 0x6D, 0xE2, 0x88, + // Bytes 2300 - 233f + 0x95, 0x73, 0x32, 0x46, 0xD8, 0xA8, 0xD8, 0xAD, + 0xD9, 0x8A, 0x46, 0xD8, 0xA8, 0xD8, 0xAE, 0xD9, + 0x8A, 0x46, 0xD8, 0xAA, 0xD8, 0xAC, 0xD9, 0x85, + 0x46, 0xD8, 0xAA, 0xD8, 0xAC, 0xD9, 0x89, 0x46, + 0xD8, 0xAA, 0xD8, 0xAC, 0xD9, 0x8A, 0x46, 0xD8, + 0xAA, 0xD8, 0xAD, 0xD8, 0xAC, 0x46, 0xD8, 0xAA, + 0xD8, 0xAD, 0xD9, 0x85, 0x46, 0xD8, 0xAA, 0xD8, + 0xAE, 0xD9, 0x85, 0x46, 0xD8, 0xAA, 0xD8, 0xAE, + // Bytes 2340 - 237f + 0xD9, 0x89, 0x46, 0xD8, 0xAA, 0xD8, 0xAE, 0xD9, + 0x8A, 0x46, 0xD8, 0xAA, 0xD9, 0x85, 0xD8, 0xAC, + 0x46, 0xD8, 0xAA, 0xD9, 0x85, 0xD8, 0xAD, 0x46, + 0xD8, 0xAA, 0xD9, 0x85, 0xD8, 0xAE, 0x46, 0xD8, + 0xAA, 0xD9, 0x85, 0xD9, 0x89, 0x46, 0xD8, 0xAA, + 0xD9, 0x85, 0xD9, 0x8A, 0x46, 0xD8, 0xAC, 0xD8, + 0xAD, 0xD9, 0x89, 0x46, 0xD8, 0xAC, 0xD8, 0xAD, + 0xD9, 0x8A, 0x46, 0xD8, 0xAC, 0xD9, 0x85, 0xD8, + // Bytes 2380 - 23bf + 0xAD, 0x46, 0xD8, 0xAC, 0xD9, 0x85, 0xD9, 0x89, + 0x46, 0xD8, 0xAC, 0xD9, 0x85, 0xD9, 0x8A, 0x46, + 0xD8, 0xAD, 0xD8, 0xAC, 0xD9, 0x8A, 0x46, 0xD8, + 0xAD, 0xD9, 0x85, 0xD9, 0x89, 0x46, 0xD8, 0xAD, + 0xD9, 0x85, 0xD9, 0x8A, 0x46, 0xD8, 0xB3, 0xD8, + 0xAC, 0xD8, 0xAD, 0x46, 0xD8, 0xB3, 0xD8, 0xAC, + 0xD9, 0x89, 0x46, 0xD8, 0xB3, 0xD8, 0xAD, 0xD8, + 0xAC, 0x46, 0xD8, 0xB3, 0xD8, 0xAE, 0xD9, 0x89, + // Bytes 23c0 - 23ff + 0x46, 0xD8, 0xB3, 0xD8, 0xAE, 0xD9, 0x8A, 0x46, + 0xD8, 0xB3, 0xD9, 0x85, 0xD8, 0xAC, 0x46, 0xD8, + 0xB3, 0xD9, 0x85, 0xD8, 0xAD, 0x46, 0xD8, 0xB3, + 0xD9, 0x85, 0xD9, 0x85, 0x46, 0xD8, 0xB4, 0xD8, + 0xAC, 0xD9, 0x8A, 0x46, 0xD8, 0xB4, 0xD8, 0xAD, + 0xD9, 0x85, 0x46, 0xD8, 0xB4, 0xD8, 0xAD, 0xD9, + 0x8A, 0x46, 0xD8, 0xB4, 0xD9, 0x85, 0xD8, 0xAE, + 0x46, 0xD8, 0xB4, 0xD9, 0x85, 0xD9, 0x85, 0x46, + // Bytes 2400 - 243f + 0xD8, 0xB5, 0xD8, 0xAD, 0xD8, 0xAD, 0x46, 0xD8, + 0xB5, 0xD8, 0xAD, 0xD9, 0x8A, 0x46, 0xD8, 0xB5, + 0xD9, 0x84, 0xD9, 0x89, 0x46, 0xD8, 0xB5, 0xD9, + 0x84, 0xDB, 0x92, 0x46, 0xD8, 0xB5, 0xD9, 0x85, + 0xD9, 0x85, 0x46, 0xD8, 0xB6, 0xD8, 0xAD, 0xD9, + 0x89, 0x46, 0xD8, 0xB6, 0xD8, 0xAD, 0xD9, 0x8A, + 0x46, 0xD8, 0xB6, 0xD8, 0xAE, 0xD9, 0x85, 0x46, + 0xD8, 0xB7, 0xD9, 0x85, 0xD8, 0xAD, 0x46, 0xD8, + // Bytes 2440 - 247f + 0xB7, 0xD9, 0x85, 0xD9, 0x85, 0x46, 0xD8, 0xB7, + 0xD9, 0x85, 0xD9, 0x8A, 0x46, 0xD8, 0xB9, 0xD8, + 0xAC, 0xD9, 0x85, 0x46, 0xD8, 0xB9, 0xD9, 0x85, + 0xD9, 0x85, 0x46, 0xD8, 0xB9, 0xD9, 0x85, 0xD9, + 0x89, 0x46, 0xD8, 0xB9, 0xD9, 0x85, 0xD9, 0x8A, + 0x46, 0xD8, 0xBA, 0xD9, 0x85, 0xD9, 0x85, 0x46, + 0xD8, 0xBA, 0xD9, 0x85, 0xD9, 0x89, 0x46, 0xD8, + 0xBA, 0xD9, 0x85, 0xD9, 0x8A, 0x46, 0xD9, 0x81, + // Bytes 2480 - 24bf + 0xD8, 0xAE, 0xD9, 0x85, 0x46, 0xD9, 0x81, 0xD9, + 0x85, 0xD9, 0x8A, 0x46, 0xD9, 0x82, 0xD9, 0x84, + 0xDB, 0x92, 0x46, 0xD9, 0x82, 0xD9, 0x85, 0xD8, + 0xAD, 0x46, 0xD9, 0x82, 0xD9, 0x85, 0xD9, 0x85, + 0x46, 0xD9, 0x82, 0xD9, 0x85, 0xD9, 0x8A, 0x46, + 0xD9, 0x83, 0xD9, 0x85, 0xD9, 0x85, 0x46, 0xD9, + 0x83, 0xD9, 0x85, 0xD9, 0x8A, 0x46, 0xD9, 0x84, + 0xD8, 0xAC, 0xD8, 0xAC, 0x46, 0xD9, 0x84, 0xD8, + // Bytes 24c0 - 24ff + 0xAC, 0xD9, 0x85, 0x46, 0xD9, 0x84, 0xD8, 0xAC, + 0xD9, 0x8A, 0x46, 0xD9, 0x84, 0xD8, 0xAD, 0xD9, + 0x85, 0x46, 0xD9, 0x84, 0xD8, 0xAD, 0xD9, 0x89, + 0x46, 0xD9, 0x84, 0xD8, 0xAD, 0xD9, 0x8A, 0x46, + 0xD9, 0x84, 0xD8, 0xAE, 0xD9, 0x85, 0x46, 0xD9, + 0x84, 0xD9, 0x85, 0xD8, 0xAD, 0x46, 0xD9, 0x84, + 0xD9, 0x85, 0xD9, 0x8A, 0x46, 0xD9, 0x85, 0xD8, + 0xAC, 0xD8, 0xAD, 0x46, 0xD9, 0x85, 0xD8, 0xAC, + // Bytes 2500 - 253f + 0xD8, 0xAE, 0x46, 0xD9, 0x85, 0xD8, 0xAC, 0xD9, + 0x85, 0x46, 0xD9, 0x85, 0xD8, 0xAC, 0xD9, 0x8A, + 0x46, 0xD9, 0x85, 0xD8, 0xAD, 0xD8, 0xAC, 0x46, + 0xD9, 0x85, 0xD8, 0xAD, 0xD9, 0x85, 0x46, 0xD9, + 0x85, 0xD8, 0xAD, 0xD9, 0x8A, 0x46, 0xD9, 0x85, + 0xD8, 0xAE, 0xD8, 0xAC, 0x46, 0xD9, 0x85, 0xD8, + 0xAE, 0xD9, 0x85, 0x46, 0xD9, 0x85, 0xD8, 0xAE, + 0xD9, 0x8A, 0x46, 0xD9, 0x85, 0xD9, 0x85, 0xD9, + // Bytes 2540 - 257f + 0x8A, 0x46, 0xD9, 0x86, 0xD8, 0xAC, 0xD8, 0xAD, + 0x46, 0xD9, 0x86, 0xD8, 0xAC, 0xD9, 0x85, 0x46, + 0xD9, 0x86, 0xD8, 0xAC, 0xD9, 0x89, 0x46, 0xD9, + 0x86, 0xD8, 0xAC, 0xD9, 0x8A, 0x46, 0xD9, 0x86, + 0xD8, 0xAD, 0xD9, 0x85, 0x46, 0xD9, 0x86, 0xD8, + 0xAD, 0xD9, 0x89, 0x46, 0xD9, 0x86, 0xD8, 0xAD, + 0xD9, 0x8A, 0x46, 0xD9, 0x86, 0xD9, 0x85, 0xD9, + 0x89, 0x46, 0xD9, 0x86, 0xD9, 0x85, 0xD9, 0x8A, + // Bytes 2580 - 25bf + 0x46, 0xD9, 0x87, 0xD9, 0x85, 0xD8, 0xAC, 0x46, + 0xD9, 0x87, 0xD9, 0x85, 0xD9, 0x85, 0x46, 0xD9, + 0x8A, 0xD8, 0xAC, 0xD9, 0x8A, 0x46, 0xD9, 0x8A, + 0xD8, 0xAD, 0xD9, 0x8A, 0x46, 0xD9, 0x8A, 0xD9, + 0x85, 0xD9, 0x85, 0x46, 0xD9, 0x8A, 0xD9, 0x85, + 0xD9, 0x8A, 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xD8, + 0xA7, 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xD8, 0xAC, + 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xD8, 0xAD, 0x46, + // Bytes 25c0 - 25ff + 0xD9, 0x8A, 0xD9, 0x94, 0xD8, 0xAE, 0x46, 0xD9, + 0x8A, 0xD9, 0x94, 0xD8, 0xB1, 0x46, 0xD9, 0x8A, + 0xD9, 0x94, 0xD8, 0xB2, 0x46, 0xD9, 0x8A, 0xD9, + 0x94, 0xD9, 0x85, 0x46, 0xD9, 0x8A, 0xD9, 0x94, + 0xD9, 0x86, 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xD9, + 0x87, 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xD9, 0x88, + 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xD9, 0x89, 0x46, + 0xD9, 0x8A, 0xD9, 0x94, 0xD9, 0x8A, 0x46, 0xD9, + // Bytes 2600 - 263f + 0x8A, 0xD9, 0x94, 0xDB, 0x86, 0x46, 0xD9, 0x8A, + 0xD9, 0x94, 0xDB, 0x87, 0x46, 0xD9, 0x8A, 0xD9, + 0x94, 0xDB, 0x88, 0x46, 0xD9, 0x8A, 0xD9, 0x94, + 0xDB, 0x90, 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xDB, + 0x95, 0x46, 0xE0, 0xB9, 0x8D, 0xE0, 0xB8, 0xB2, + 0x46, 0xE0, 0xBA, 0xAB, 0xE0, 0xBA, 0x99, 0x46, + 0xE0, 0xBA, 0xAB, 0xE0, 0xBA, 0xA1, 0x46, 0xE0, + 0xBB, 0x8D, 0xE0, 0xBA, 0xB2, 0x46, 0xE0, 0xBD, + // Bytes 2640 - 267f + 0x80, 0xE0, 0xBE, 0xB5, 0x46, 0xE0, 0xBD, 0x82, + 0xE0, 0xBE, 0xB7, 0x46, 0xE0, 0xBD, 0x8C, 0xE0, + 0xBE, 0xB7, 0x46, 0xE0, 0xBD, 0x91, 0xE0, 0xBE, + 0xB7, 0x46, 0xE0, 0xBD, 0x96, 0xE0, 0xBE, 0xB7, + 0x46, 0xE0, 0xBD, 0x9B, 0xE0, 0xBE, 0xB7, 0x46, + 0xE0, 0xBE, 0x90, 0xE0, 0xBE, 0xB5, 0x46, 0xE0, + 0xBE, 0x92, 0xE0, 0xBE, 0xB7, 0x46, 0xE0, 0xBE, + 0x9C, 0xE0, 0xBE, 0xB7, 0x46, 0xE0, 0xBE, 0xA1, + // Bytes 2680 - 26bf + 0xE0, 0xBE, 0xB7, 0x46, 0xE0, 0xBE, 0xA6, 0xE0, + 0xBE, 0xB7, 0x46, 0xE0, 0xBE, 0xAB, 0xE0, 0xBE, + 0xB7, 0x46, 0xE2, 0x80, 0xB2, 0xE2, 0x80, 0xB2, + 0x46, 0xE2, 0x80, 0xB5, 0xE2, 0x80, 0xB5, 0x46, + 0xE2, 0x88, 0xAB, 0xE2, 0x88, 0xAB, 0x46, 0xE2, + 0x88, 0xAE, 0xE2, 0x88, 0xAE, 0x46, 0xE3, 0x81, + 0xBB, 0xE3, 0x81, 0x8B, 0x46, 0xE3, 0x82, 0x88, + 0xE3, 0x82, 0x8A, 0x46, 0xE3, 0x82, 0xAD, 0xE3, + // Bytes 26c0 - 26ff + 0x83, 0xAD, 0x46, 0xE3, 0x82, 0xB3, 0xE3, 0x82, + 0xB3, 0x46, 0xE3, 0x82, 0xB3, 0xE3, 0x83, 0x88, + 0x46, 0xE3, 0x83, 0x88, 0xE3, 0x83, 0xB3, 0x46, + 0xE3, 0x83, 0x8A, 0xE3, 0x83, 0x8E, 0x46, 0xE3, + 0x83, 0x9B, 0xE3, 0x83, 0xB3, 0x46, 0xE3, 0x83, + 0x9F, 0xE3, 0x83, 0xAA, 0x46, 0xE3, 0x83, 0xAA, + 0xE3, 0x83, 0xA9, 0x46, 0xE3, 0x83, 0xAC, 0xE3, + 0x83, 0xA0, 0x46, 0xE5, 0xA4, 0xA7, 0xE6, 0xAD, + // Bytes 2700 - 273f + 0xA3, 0x46, 0xE5, 0xB9, 0xB3, 0xE6, 0x88, 0x90, + 0x46, 0xE6, 0x98, 0x8E, 0xE6, 0xB2, 0xBB, 0x46, + 0xE6, 0x98, 0xAD, 0xE5, 0x92, 0x8C, 0x47, 0x72, + 0x61, 0x64, 0xE2, 0x88, 0x95, 0x73, 0x47, 0xE3, + 0x80, 0x94, 0x53, 0xE3, 0x80, 0x95, 0x48, 0x28, + 0xE1, 0x84, 0x80, 0xE1, 0x85, 0xA1, 0x29, 0x48, + 0x28, 0xE1, 0x84, 0x82, 0xE1, 0x85, 0xA1, 0x29, + 0x48, 0x28, 0xE1, 0x84, 0x83, 0xE1, 0x85, 0xA1, + // Bytes 2740 - 277f + 0x29, 0x48, 0x28, 0xE1, 0x84, 0x85, 0xE1, 0x85, + 0xA1, 0x29, 0x48, 0x28, 0xE1, 0x84, 0x86, 0xE1, + 0x85, 0xA1, 0x29, 0x48, 0x28, 0xE1, 0x84, 0x87, + 0xE1, 0x85, 0xA1, 0x29, 0x48, 0x28, 0xE1, 0x84, + 0x89, 0xE1, 0x85, 0xA1, 0x29, 0x48, 0x28, 0xE1, + 0x84, 0x8B, 0xE1, 0x85, 0xA1, 0x29, 0x48, 0x28, + 0xE1, 0x84, 0x8C, 0xE1, 0x85, 0xA1, 0x29, 0x48, + 0x28, 0xE1, 0x84, 0x8C, 0xE1, 0x85, 0xAE, 0x29, + // Bytes 2780 - 27bf + 0x48, 0x28, 0xE1, 0x84, 0x8E, 0xE1, 0x85, 0xA1, + 0x29, 0x48, 0x28, 0xE1, 0x84, 0x8F, 0xE1, 0x85, + 0xA1, 0x29, 0x48, 0x28, 0xE1, 0x84, 0x90, 0xE1, + 0x85, 0xA1, 0x29, 0x48, 0x28, 0xE1, 0x84, 0x91, + 0xE1, 0x85, 0xA1, 0x29, 0x48, 0x28, 0xE1, 0x84, + 0x92, 0xE1, 0x85, 0xA1, 0x29, 0x48, 0x72, 0x61, + 0x64, 0xE2, 0x88, 0x95, 0x73, 0x32, 0x48, 0xD8, + 0xA7, 0xD9, 0x83, 0xD8, 0xA8, 0xD8, 0xB1, 0x48, + // Bytes 27c0 - 27ff + 0xD8, 0xA7, 0xD9, 0x84, 0xD9, 0x84, 0xD9, 0x87, + 0x48, 0xD8, 0xB1, 0xD8, 0xB3, 0xD9, 0x88, 0xD9, + 0x84, 0x48, 0xD8, 0xB1, 0xDB, 0x8C, 0xD8, 0xA7, + 0xD9, 0x84, 0x48, 0xD8, 0xB5, 0xD9, 0x84, 0xD8, + 0xB9, 0xD9, 0x85, 0x48, 0xD8, 0xB9, 0xD9, 0x84, + 0xD9, 0x8A, 0xD9, 0x87, 0x48, 0xD9, 0x85, 0xD8, + 0xAD, 0xD9, 0x85, 0xD8, 0xAF, 0x48, 0xD9, 0x88, + 0xD8, 0xB3, 0xD9, 0x84, 0xD9, 0x85, 0x49, 0xE2, + // Bytes 2800 - 283f + 0x80, 0xB2, 0xE2, 0x80, 0xB2, 0xE2, 0x80, 0xB2, + 0x49, 0xE2, 0x80, 0xB5, 0xE2, 0x80, 0xB5, 0xE2, + 0x80, 0xB5, 0x49, 0xE2, 0x88, 0xAB, 0xE2, 0x88, + 0xAB, 0xE2, 0x88, 0xAB, 0x49, 0xE2, 0x88, 0xAE, + 0xE2, 0x88, 0xAE, 0xE2, 0x88, 0xAE, 0x49, 0xE3, + 0x80, 0x94, 0xE4, 0xB8, 0x89, 0xE3, 0x80, 0x95, + 0x49, 0xE3, 0x80, 0x94, 0xE4, 0xBA, 0x8C, 0xE3, + 0x80, 0x95, 0x49, 0xE3, 0x80, 0x94, 0xE5, 0x8B, + // Bytes 2840 - 287f + 0x9D, 0xE3, 0x80, 0x95, 0x49, 0xE3, 0x80, 0x94, + 0xE5, 0xAE, 0x89, 0xE3, 0x80, 0x95, 0x49, 0xE3, + 0x80, 0x94, 0xE6, 0x89, 0x93, 0xE3, 0x80, 0x95, + 0x49, 0xE3, 0x80, 0x94, 0xE6, 0x95, 0x97, 0xE3, + 0x80, 0x95, 0x49, 0xE3, 0x80, 0x94, 0xE6, 0x9C, + 0xAC, 0xE3, 0x80, 0x95, 0x49, 0xE3, 0x80, 0x94, + 0xE7, 0x82, 0xB9, 0xE3, 0x80, 0x95, 0x49, 0xE3, + 0x80, 0x94, 0xE7, 0x9B, 0x97, 0xE3, 0x80, 0x95, + // Bytes 2880 - 28bf + 0x49, 0xE3, 0x82, 0xA2, 0xE3, 0x83, 0xBC, 0xE3, + 0x83, 0xAB, 0x49, 0xE3, 0x82, 0xA4, 0xE3, 0x83, + 0xB3, 0xE3, 0x83, 0x81, 0x49, 0xE3, 0x82, 0xA6, + 0xE3, 0x82, 0xA9, 0xE3, 0x83, 0xB3, 0x49, 0xE3, + 0x82, 0xAA, 0xE3, 0x83, 0xB3, 0xE3, 0x82, 0xB9, + 0x49, 0xE3, 0x82, 0xAA, 0xE3, 0x83, 0xBC, 0xE3, + 0x83, 0xA0, 0x49, 0xE3, 0x82, 0xAB, 0xE3, 0x82, + 0xA4, 0xE3, 0x83, 0xAA, 0x49, 0xE3, 0x82, 0xB1, + // Bytes 28c0 - 28ff + 0xE3, 0x83, 0xBC, 0xE3, 0x82, 0xB9, 0x49, 0xE3, + 0x82, 0xB3, 0xE3, 0x83, 0xAB, 0xE3, 0x83, 0x8A, + 0x49, 0xE3, 0x82, 0xBB, 0xE3, 0x83, 0xB3, 0xE3, + 0x83, 0x81, 0x49, 0xE3, 0x82, 0xBB, 0xE3, 0x83, + 0xB3, 0xE3, 0x83, 0x88, 0x49, 0xE3, 0x83, 0x86, + 0xE3, 0x82, 0x99, 0xE3, 0x82, 0xB7, 0x49, 0xE3, + 0x83, 0x88, 0xE3, 0x82, 0x99, 0xE3, 0x83, 0xAB, + 0x49, 0xE3, 0x83, 0x8E, 0xE3, 0x83, 0x83, 0xE3, + // Bytes 2900 - 293f + 0x83, 0x88, 0x49, 0xE3, 0x83, 0x8F, 0xE3, 0x82, + 0xA4, 0xE3, 0x83, 0x84, 0x49, 0xE3, 0x83, 0x92, + 0xE3, 0x82, 0x99, 0xE3, 0x83, 0xAB, 0x49, 0xE3, + 0x83, 0x92, 0xE3, 0x82, 0x9A, 0xE3, 0x82, 0xB3, + 0x49, 0xE3, 0x83, 0x95, 0xE3, 0x83, 0xA9, 0xE3, + 0x83, 0xB3, 0x49, 0xE3, 0x83, 0x98, 0xE3, 0x82, + 0x9A, 0xE3, 0x82, 0xBD, 0x49, 0xE3, 0x83, 0x98, + 0xE3, 0x83, 0xAB, 0xE3, 0x83, 0x84, 0x49, 0xE3, + // Bytes 2940 - 297f + 0x83, 0x9B, 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0xAB, + 0x49, 0xE3, 0x83, 0x9B, 0xE3, 0x83, 0xBC, 0xE3, + 0x83, 0xB3, 0x49, 0xE3, 0x83, 0x9E, 0xE3, 0x82, + 0xA4, 0xE3, 0x83, 0xAB, 0x49, 0xE3, 0x83, 0x9E, + 0xE3, 0x83, 0x83, 0xE3, 0x83, 0x8F, 0x49, 0xE3, + 0x83, 0x9E, 0xE3, 0x83, 0xAB, 0xE3, 0x82, 0xAF, + 0x49, 0xE3, 0x83, 0xA4, 0xE3, 0x83, 0xBC, 0xE3, + 0x83, 0xAB, 0x49, 0xE3, 0x83, 0xA6, 0xE3, 0x82, + // Bytes 2980 - 29bf + 0xA2, 0xE3, 0x83, 0xB3, 0x49, 0xE3, 0x83, 0xAF, + 0xE3, 0x83, 0x83, 0xE3, 0x83, 0x88, 0x4C, 0xE2, + 0x80, 0xB2, 0xE2, 0x80, 0xB2, 0xE2, 0x80, 0xB2, + 0xE2, 0x80, 0xB2, 0x4C, 0xE2, 0x88, 0xAB, 0xE2, + 0x88, 0xAB, 0xE2, 0x88, 0xAB, 0xE2, 0x88, 0xAB, + 0x4C, 0xE3, 0x82, 0xA2, 0xE3, 0x83, 0xAB, 0xE3, + 0x83, 0x95, 0xE3, 0x82, 0xA1, 0x4C, 0xE3, 0x82, + 0xA8, 0xE3, 0x83, 0xBC, 0xE3, 0x82, 0xAB, 0xE3, + // Bytes 29c0 - 29ff + 0x83, 0xBC, 0x4C, 0xE3, 0x82, 0xAB, 0xE3, 0x82, + 0x99, 0xE3, 0x83, 0xAD, 0xE3, 0x83, 0xB3, 0x4C, + 0xE3, 0x82, 0xAB, 0xE3, 0x82, 0x99, 0xE3, 0x83, + 0xB3, 0xE3, 0x83, 0x9E, 0x4C, 0xE3, 0x82, 0xAB, + 0xE3, 0x83, 0xA9, 0xE3, 0x83, 0x83, 0xE3, 0x83, + 0x88, 0x4C, 0xE3, 0x82, 0xAB, 0xE3, 0x83, 0xAD, + 0xE3, 0x83, 0xAA, 0xE3, 0x83, 0xBC, 0x4C, 0xE3, + 0x82, 0xAD, 0xE3, 0x82, 0x99, 0xE3, 0x83, 0x8B, + // Bytes 2a00 - 2a3f + 0xE3, 0x83, 0xBC, 0x4C, 0xE3, 0x82, 0xAD, 0xE3, + 0x83, 0xA5, 0xE3, 0x83, 0xAA, 0xE3, 0x83, 0xBC, + 0x4C, 0xE3, 0x82, 0xAF, 0xE3, 0x82, 0x99, 0xE3, + 0x83, 0xA9, 0xE3, 0x83, 0xA0, 0x4C, 0xE3, 0x82, + 0xAF, 0xE3, 0x83, 0xAD, 0xE3, 0x83, 0xBC, 0xE3, + 0x83, 0x8D, 0x4C, 0xE3, 0x82, 0xB5, 0xE3, 0x82, + 0xA4, 0xE3, 0x82, 0xAF, 0xE3, 0x83, 0xAB, 0x4C, + 0xE3, 0x82, 0xBF, 0xE3, 0x82, 0x99, 0xE3, 0x83, + // Bytes 2a40 - 2a7f + 0xBC, 0xE3, 0x82, 0xB9, 0x4C, 0xE3, 0x83, 0x8F, + 0xE3, 0x82, 0x9A, 0xE3, 0x83, 0xBC, 0xE3, 0x83, + 0x84, 0x4C, 0xE3, 0x83, 0x92, 0xE3, 0x82, 0x9A, + 0xE3, 0x82, 0xAF, 0xE3, 0x83, 0xAB, 0x4C, 0xE3, + 0x83, 0x95, 0xE3, 0x82, 0xA3, 0xE3, 0x83, 0xBC, + 0xE3, 0x83, 0x88, 0x4C, 0xE3, 0x83, 0x98, 0xE3, + 0x82, 0x99, 0xE3, 0x83, 0xBC, 0xE3, 0x82, 0xBF, + 0x4C, 0xE3, 0x83, 0x98, 0xE3, 0x82, 0x9A, 0xE3, + // Bytes 2a80 - 2abf + 0x83, 0x8B, 0xE3, 0x83, 0x92, 0x4C, 0xE3, 0x83, + 0x98, 0xE3, 0x82, 0x9A, 0xE3, 0x83, 0xB3, 0xE3, + 0x82, 0xB9, 0x4C, 0xE3, 0x83, 0x9B, 0xE3, 0x82, + 0x99, 0xE3, 0x83, 0xAB, 0xE3, 0x83, 0x88, 0x4C, + 0xE3, 0x83, 0x9E, 0xE3, 0x82, 0xA4, 0xE3, 0x82, + 0xAF, 0xE3, 0x83, 0xAD, 0x4C, 0xE3, 0x83, 0x9F, + 0xE3, 0x82, 0xAF, 0xE3, 0x83, 0xAD, 0xE3, 0x83, + 0xB3, 0x4C, 0xE3, 0x83, 0xA1, 0xE3, 0x83, 0xBC, + // Bytes 2ac0 - 2aff + 0xE3, 0x83, 0x88, 0xE3, 0x83, 0xAB, 0x4C, 0xE3, + 0x83, 0xAA, 0xE3, 0x83, 0x83, 0xE3, 0x83, 0x88, + 0xE3, 0x83, 0xAB, 0x4C, 0xE3, 0x83, 0xAB, 0xE3, + 0x83, 0x92, 0xE3, 0x82, 0x9A, 0xE3, 0x83, 0xBC, + 0x4C, 0xE6, 0xA0, 0xAA, 0xE5, 0xBC, 0x8F, 0xE4, + 0xBC, 0x9A, 0xE7, 0xA4, 0xBE, 0x4E, 0x28, 0xE1, + 0x84, 0x8B, 0xE1, 0x85, 0xA9, 0xE1, 0x84, 0x92, + 0xE1, 0x85, 0xAE, 0x29, 0x4F, 0xD8, 0xAC, 0xD9, + // Bytes 2b00 - 2b3f + 0x84, 0x20, 0xD8, 0xAC, 0xD9, 0x84, 0xD8, 0xA7, + 0xD9, 0x84, 0xD9, 0x87, 0x4F, 0xE3, 0x82, 0xA2, + 0xE3, 0x83, 0x8F, 0xE3, 0x82, 0x9A, 0xE3, 0x83, + 0xBC, 0xE3, 0x83, 0x88, 0x4F, 0xE3, 0x82, 0xA2, + 0xE3, 0x83, 0xB3, 0xE3, 0x83, 0x98, 0xE3, 0x82, + 0x9A, 0xE3, 0x82, 0xA2, 0x4F, 0xE3, 0x82, 0xAD, + 0xE3, 0x83, 0xAD, 0xE3, 0x83, 0xAF, 0xE3, 0x83, + 0x83, 0xE3, 0x83, 0x88, 0x4F, 0xE3, 0x82, 0xB5, + // Bytes 2b40 - 2b7f + 0xE3, 0x83, 0xB3, 0xE3, 0x83, 0x81, 0xE3, 0x83, + 0xBC, 0xE3, 0x83, 0xA0, 0x4F, 0xE3, 0x83, 0x8F, + 0xE3, 0x82, 0x99, 0xE3, 0x83, 0xBC, 0xE3, 0x83, + 0xAC, 0xE3, 0x83, 0xAB, 0x4F, 0xE3, 0x83, 0x98, + 0xE3, 0x82, 0xAF, 0xE3, 0x82, 0xBF, 0xE3, 0x83, + 0xBC, 0xE3, 0x83, 0xAB, 0x4F, 0xE3, 0x83, 0x9B, + 0xE3, 0x82, 0x9A, 0xE3, 0x82, 0xA4, 0xE3, 0x83, + 0xB3, 0xE3, 0x83, 0x88, 0x4F, 0xE3, 0x83, 0x9E, + // Bytes 2b80 - 2bbf + 0xE3, 0x83, 0xB3, 0xE3, 0x82, 0xB7, 0xE3, 0x83, + 0xA7, 0xE3, 0x83, 0xB3, 0x4F, 0xE3, 0x83, 0xA1, + 0xE3, 0x82, 0xAB, 0xE3, 0x82, 0x99, 0xE3, 0x83, + 0x88, 0xE3, 0x83, 0xB3, 0x4F, 0xE3, 0x83, 0xAB, + 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0x95, 0xE3, 0x82, + 0x99, 0xE3, 0x83, 0xAB, 0x51, 0x28, 0xE1, 0x84, + 0x8B, 0xE1, 0x85, 0xA9, 0xE1, 0x84, 0x8C, 0xE1, + 0x85, 0xA5, 0xE1, 0x86, 0xAB, 0x29, 0x52, 0xE3, + // Bytes 2bc0 - 2bff + 0x82, 0xAD, 0xE3, 0x82, 0x99, 0xE3, 0x83, 0xAB, + 0xE3, 0x82, 0xBF, 0xE3, 0x82, 0x99, 0xE3, 0x83, + 0xBC, 0x52, 0xE3, 0x82, 0xAD, 0xE3, 0x83, 0xAD, + 0xE3, 0x82, 0xAF, 0xE3, 0x82, 0x99, 0xE3, 0x83, + 0xA9, 0xE3, 0x83, 0xA0, 0x52, 0xE3, 0x82, 0xAD, + 0xE3, 0x83, 0xAD, 0xE3, 0x83, 0xA1, 0xE3, 0x83, + 0xBC, 0xE3, 0x83, 0x88, 0xE3, 0x83, 0xAB, 0x52, + 0xE3, 0x82, 0xAF, 0xE3, 0x82, 0x99, 0xE3, 0x83, + // Bytes 2c00 - 2c3f + 0xA9, 0xE3, 0x83, 0xA0, 0xE3, 0x83, 0x88, 0xE3, + 0x83, 0xB3, 0x52, 0xE3, 0x82, 0xAF, 0xE3, 0x83, + 0xAB, 0xE3, 0x82, 0xBB, 0xE3, 0x82, 0x99, 0xE3, + 0x82, 0xA4, 0xE3, 0x83, 0xAD, 0x52, 0xE3, 0x83, + 0x8F, 0xE3, 0x82, 0x9A, 0xE3, 0x83, 0xBC, 0xE3, + 0x82, 0xBB, 0xE3, 0x83, 0xB3, 0xE3, 0x83, 0x88, + 0x52, 0xE3, 0x83, 0x92, 0xE3, 0x82, 0x9A, 0xE3, + 0x82, 0xA2, 0xE3, 0x82, 0xB9, 0xE3, 0x83, 0x88, + // Bytes 2c40 - 2c7f + 0xE3, 0x83, 0xAB, 0x52, 0xE3, 0x83, 0x95, 0xE3, + 0x82, 0x99, 0xE3, 0x83, 0x83, 0xE3, 0x82, 0xB7, + 0xE3, 0x82, 0xA7, 0xE3, 0x83, 0xAB, 0x52, 0xE3, + 0x83, 0x9F, 0xE3, 0x83, 0xAA, 0xE3, 0x83, 0x8F, + 0xE3, 0x82, 0x99, 0xE3, 0x83, 0xBC, 0xE3, 0x83, + 0xAB, 0x52, 0xE3, 0x83, 0xAC, 0xE3, 0x83, 0xB3, + 0xE3, 0x83, 0x88, 0xE3, 0x82, 0xB1, 0xE3, 0x82, + 0x99, 0xE3, 0x83, 0xB3, 0x61, 0xD8, 0xB5, 0xD9, + // Bytes 2c80 - 2cbf + 0x84, 0xD9, 0x89, 0x20, 0xD8, 0xA7, 0xD9, 0x84, + 0xD9, 0x84, 0xD9, 0x87, 0x20, 0xD8, 0xB9, 0xD9, + 0x84, 0xD9, 0x8A, 0xD9, 0x87, 0x20, 0xD9, 0x88, + 0xD8, 0xB3, 0xD9, 0x84, 0xD9, 0x85, 0x06, 0xE0, + 0xA7, 0x87, 0xE0, 0xA6, 0xBE, 0x01, 0x06, 0xE0, + 0xA7, 0x87, 0xE0, 0xA7, 0x97, 0x01, 0x06, 0xE0, + 0xAD, 0x87, 0xE0, 0xAC, 0xBE, 0x01, 0x06, 0xE0, + 0xAD, 0x87, 0xE0, 0xAD, 0x96, 0x01, 0x06, 0xE0, + // Bytes 2cc0 - 2cff + 0xAD, 0x87, 0xE0, 0xAD, 0x97, 0x01, 0x06, 0xE0, + 0xAE, 0x92, 0xE0, 0xAF, 0x97, 0x01, 0x06, 0xE0, + 0xAF, 0x86, 0xE0, 0xAE, 0xBE, 0x01, 0x06, 0xE0, + 0xAF, 0x86, 0xE0, 0xAF, 0x97, 0x01, 0x06, 0xE0, + 0xAF, 0x87, 0xE0, 0xAE, 0xBE, 0x01, 0x06, 0xE0, + 0xB2, 0xBF, 0xE0, 0xB3, 0x95, 0x01, 0x06, 0xE0, + 0xB3, 0x86, 0xE0, 0xB3, 0x95, 0x01, 0x06, 0xE0, + 0xB3, 0x86, 0xE0, 0xB3, 0x96, 0x01, 0x06, 0xE0, + // Bytes 2d00 - 2d3f + 0xB5, 0x86, 0xE0, 0xB4, 0xBE, 0x01, 0x06, 0xE0, + 0xB5, 0x86, 0xE0, 0xB5, 0x97, 0x01, 0x06, 0xE0, + 0xB5, 0x87, 0xE0, 0xB4, 0xBE, 0x01, 0x06, 0xE0, + 0xB7, 0x99, 0xE0, 0xB7, 0x9F, 0x01, 0x06, 0xE1, + 0x80, 0xA5, 0xE1, 0x80, 0xAE, 0x01, 0x06, 0xE1, + 0xAC, 0x85, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, + 0xAC, 0x87, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, + 0xAC, 0x89, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, + // Bytes 2d40 - 2d7f + 0xAC, 0x8B, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, + 0xAC, 0x8D, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, + 0xAC, 0x91, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, + 0xAC, 0xBA, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, + 0xAC, 0xBC, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, + 0xAC, 0xBE, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, + 0xAC, 0xBF, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, + 0xAD, 0x82, 0xE1, 0xAC, 0xB5, 0x01, 0x08, 0xF0, + // Bytes 2d80 - 2dbf + 0x91, 0x84, 0xB1, 0xF0, 0x91, 0x84, 0xA7, 0x01, + 0x08, 0xF0, 0x91, 0x84, 0xB2, 0xF0, 0x91, 0x84, + 0xA7, 0x01, 0x08, 0xF0, 0x91, 0x8D, 0x87, 0xF0, + 0x91, 0x8C, 0xBE, 0x01, 0x08, 0xF0, 0x91, 0x8D, + 0x87, 0xF0, 0x91, 0x8D, 0x97, 0x01, 0x08, 0xF0, + 0x91, 0x92, 0xB9, 0xF0, 0x91, 0x92, 0xB0, 0x01, + 0x08, 0xF0, 0x91, 0x92, 0xB9, 0xF0, 0x91, 0x92, + 0xBA, 0x01, 0x08, 0xF0, 0x91, 0x92, 0xB9, 0xF0, + // Bytes 2dc0 - 2dff + 0x91, 0x92, 0xBD, 0x01, 0x08, 0xF0, 0x91, 0x96, + 0xB8, 0xF0, 0x91, 0x96, 0xAF, 0x01, 0x08, 0xF0, + 0x91, 0x96, 0xB9, 0xF0, 0x91, 0x96, 0xAF, 0x01, + 0x09, 0xE0, 0xB3, 0x86, 0xE0, 0xB3, 0x82, 0xE0, + 0xB3, 0x95, 0x02, 0x09, 0xE0, 0xB7, 0x99, 0xE0, + 0xB7, 0x8F, 0xE0, 0xB7, 0x8A, 0x12, 0x44, 0x44, + 0x5A, 0xCC, 0x8C, 0xC9, 0x44, 0x44, 0x7A, 0xCC, + 0x8C, 0xC9, 0x44, 0x64, 0x7A, 0xCC, 0x8C, 0xC9, + // Bytes 2e00 - 2e3f + 0x46, 0xD9, 0x84, 0xD8, 0xA7, 0xD9, 0x93, 0xC9, + 0x46, 0xD9, 0x84, 0xD8, 0xA7, 0xD9, 0x94, 0xC9, + 0x46, 0xD9, 0x84, 0xD8, 0xA7, 0xD9, 0x95, 0xB5, + 0x46, 0xE1, 0x84, 0x80, 0xE1, 0x85, 0xA1, 0x01, + 0x46, 0xE1, 0x84, 0x82, 0xE1, 0x85, 0xA1, 0x01, + 0x46, 0xE1, 0x84, 0x83, 0xE1, 0x85, 0xA1, 0x01, + 0x46, 0xE1, 0x84, 0x85, 0xE1, 0x85, 0xA1, 0x01, + 0x46, 0xE1, 0x84, 0x86, 0xE1, 0x85, 0xA1, 0x01, + // Bytes 2e40 - 2e7f + 0x46, 0xE1, 0x84, 0x87, 0xE1, 0x85, 0xA1, 0x01, + 0x46, 0xE1, 0x84, 0x89, 0xE1, 0x85, 0xA1, 0x01, + 0x46, 0xE1, 0x84, 0x8B, 0xE1, 0x85, 0xA1, 0x01, + 0x46, 0xE1, 0x84, 0x8B, 0xE1, 0x85, 0xAE, 0x01, + 0x46, 0xE1, 0x84, 0x8C, 0xE1, 0x85, 0xA1, 0x01, + 0x46, 0xE1, 0x84, 0x8E, 0xE1, 0x85, 0xA1, 0x01, + 0x46, 0xE1, 0x84, 0x8F, 0xE1, 0x85, 0xA1, 0x01, + 0x46, 0xE1, 0x84, 0x90, 0xE1, 0x85, 0xA1, 0x01, + // Bytes 2e80 - 2ebf + 0x46, 0xE1, 0x84, 0x91, 0xE1, 0x85, 0xA1, 0x01, + 0x46, 0xE1, 0x84, 0x92, 0xE1, 0x85, 0xA1, 0x01, + 0x49, 0xE3, 0x83, 0xA1, 0xE3, 0x82, 0xAB, 0xE3, + 0x82, 0x99, 0x0D, 0x4C, 0xE1, 0x84, 0x8C, 0xE1, + 0x85, 0xAE, 0xE1, 0x84, 0x8B, 0xE1, 0x85, 0xB4, + 0x01, 0x4C, 0xE3, 0x82, 0xAD, 0xE3, 0x82, 0x99, + 0xE3, 0x82, 0xAB, 0xE3, 0x82, 0x99, 0x0D, 0x4C, + 0xE3, 0x82, 0xB3, 0xE3, 0x83, 0xBC, 0xE3, 0x83, + // Bytes 2ec0 - 2eff + 0x9B, 0xE3, 0x82, 0x9A, 0x0D, 0x4C, 0xE3, 0x83, + 0xA4, 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0x88, 0xE3, + 0x82, 0x99, 0x0D, 0x4F, 0xE1, 0x84, 0x8E, 0xE1, + 0x85, 0xA1, 0xE1, 0x86, 0xB7, 0xE1, 0x84, 0x80, + 0xE1, 0x85, 0xA9, 0x01, 0x4F, 0xE3, 0x82, 0xA4, + 0xE3, 0x83, 0x8B, 0xE3, 0x83, 0xB3, 0xE3, 0x82, + 0xAF, 0xE3, 0x82, 0x99, 0x0D, 0x4F, 0xE3, 0x82, + 0xB7, 0xE3, 0x83, 0xAA, 0xE3, 0x83, 0xB3, 0xE3, + // Bytes 2f00 - 2f3f + 0x82, 0xAF, 0xE3, 0x82, 0x99, 0x0D, 0x4F, 0xE3, + 0x83, 0x98, 0xE3, 0x82, 0x9A, 0xE3, 0x83, 0xBC, + 0xE3, 0x82, 0xB7, 0xE3, 0x82, 0x99, 0x0D, 0x4F, + 0xE3, 0x83, 0x9B, 0xE3, 0x82, 0x9A, 0xE3, 0x83, + 0xB3, 0xE3, 0x83, 0x88, 0xE3, 0x82, 0x99, 0x0D, + 0x52, 0xE3, 0x82, 0xA8, 0xE3, 0x82, 0xB9, 0xE3, + 0x82, 0xAF, 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0x88, + 0xE3, 0x82, 0x99, 0x0D, 0x52, 0xE3, 0x83, 0x95, + // Bytes 2f40 - 2f7f + 0xE3, 0x82, 0xA1, 0xE3, 0x83, 0xA9, 0xE3, 0x83, + 0x83, 0xE3, 0x83, 0x88, 0xE3, 0x82, 0x99, 0x0D, + 0x86, 0xE0, 0xB3, 0x86, 0xE0, 0xB3, 0x82, 0x01, + 0x86, 0xE0, 0xB7, 0x99, 0xE0, 0xB7, 0x8F, 0x01, + 0x03, 0x3C, 0xCC, 0xB8, 0x05, 0x03, 0x3D, 0xCC, + 0xB8, 0x05, 0x03, 0x3E, 0xCC, 0xB8, 0x05, 0x03, + 0x41, 0xCC, 0x80, 0xC9, 0x03, 0x41, 0xCC, 0x81, + 0xC9, 0x03, 0x41, 0xCC, 0x83, 0xC9, 0x03, 0x41, + // Bytes 2f80 - 2fbf + 0xCC, 0x84, 0xC9, 0x03, 0x41, 0xCC, 0x89, 0xC9, + 0x03, 0x41, 0xCC, 0x8C, 0xC9, 0x03, 0x41, 0xCC, + 0x8F, 0xC9, 0x03, 0x41, 0xCC, 0x91, 0xC9, 0x03, + 0x41, 0xCC, 0xA5, 0xB5, 0x03, 0x41, 0xCC, 0xA8, + 0xA5, 0x03, 0x42, 0xCC, 0x87, 0xC9, 0x03, 0x42, + 0xCC, 0xA3, 0xB5, 0x03, 0x42, 0xCC, 0xB1, 0xB5, + 0x03, 0x43, 0xCC, 0x81, 0xC9, 0x03, 0x43, 0xCC, + 0x82, 0xC9, 0x03, 0x43, 0xCC, 0x87, 0xC9, 0x03, + // Bytes 2fc0 - 2fff + 0x43, 0xCC, 0x8C, 0xC9, 0x03, 0x44, 0xCC, 0x87, + 0xC9, 0x03, 0x44, 0xCC, 0x8C, 0xC9, 0x03, 0x44, + 0xCC, 0xA3, 0xB5, 0x03, 0x44, 0xCC, 0xA7, 0xA5, + 0x03, 0x44, 0xCC, 0xAD, 0xB5, 0x03, 0x44, 0xCC, + 0xB1, 0xB5, 0x03, 0x45, 0xCC, 0x80, 0xC9, 0x03, + 0x45, 0xCC, 0x81, 0xC9, 0x03, 0x45, 0xCC, 0x83, + 0xC9, 0x03, 0x45, 0xCC, 0x86, 0xC9, 0x03, 0x45, + 0xCC, 0x87, 0xC9, 0x03, 0x45, 0xCC, 0x88, 0xC9, + // Bytes 3000 - 303f + 0x03, 0x45, 0xCC, 0x89, 0xC9, 0x03, 0x45, 0xCC, + 0x8C, 0xC9, 0x03, 0x45, 0xCC, 0x8F, 0xC9, 0x03, + 0x45, 0xCC, 0x91, 0xC9, 0x03, 0x45, 0xCC, 0xA8, + 0xA5, 0x03, 0x45, 0xCC, 0xAD, 0xB5, 0x03, 0x45, + 0xCC, 0xB0, 0xB5, 0x03, 0x46, 0xCC, 0x87, 0xC9, + 0x03, 0x47, 0xCC, 0x81, 0xC9, 0x03, 0x47, 0xCC, + 0x82, 0xC9, 0x03, 0x47, 0xCC, 0x84, 0xC9, 0x03, + 0x47, 0xCC, 0x86, 0xC9, 0x03, 0x47, 0xCC, 0x87, + // Bytes 3040 - 307f + 0xC9, 0x03, 0x47, 0xCC, 0x8C, 0xC9, 0x03, 0x47, + 0xCC, 0xA7, 0xA5, 0x03, 0x48, 0xCC, 0x82, 0xC9, + 0x03, 0x48, 0xCC, 0x87, 0xC9, 0x03, 0x48, 0xCC, + 0x88, 0xC9, 0x03, 0x48, 0xCC, 0x8C, 0xC9, 0x03, + 0x48, 0xCC, 0xA3, 0xB5, 0x03, 0x48, 0xCC, 0xA7, + 0xA5, 0x03, 0x48, 0xCC, 0xAE, 0xB5, 0x03, 0x49, + 0xCC, 0x80, 0xC9, 0x03, 0x49, 0xCC, 0x81, 0xC9, + 0x03, 0x49, 0xCC, 0x82, 0xC9, 0x03, 0x49, 0xCC, + // Bytes 3080 - 30bf + 0x83, 0xC9, 0x03, 0x49, 0xCC, 0x84, 0xC9, 0x03, + 0x49, 0xCC, 0x86, 0xC9, 0x03, 0x49, 0xCC, 0x87, + 0xC9, 0x03, 0x49, 0xCC, 0x89, 0xC9, 0x03, 0x49, + 0xCC, 0x8C, 0xC9, 0x03, 0x49, 0xCC, 0x8F, 0xC9, + 0x03, 0x49, 0xCC, 0x91, 0xC9, 0x03, 0x49, 0xCC, + 0xA3, 0xB5, 0x03, 0x49, 0xCC, 0xA8, 0xA5, 0x03, + 0x49, 0xCC, 0xB0, 0xB5, 0x03, 0x4A, 0xCC, 0x82, + 0xC9, 0x03, 0x4B, 0xCC, 0x81, 0xC9, 0x03, 0x4B, + // Bytes 30c0 - 30ff + 0xCC, 0x8C, 0xC9, 0x03, 0x4B, 0xCC, 0xA3, 0xB5, + 0x03, 0x4B, 0xCC, 0xA7, 0xA5, 0x03, 0x4B, 0xCC, + 0xB1, 0xB5, 0x03, 0x4C, 0xCC, 0x81, 0xC9, 0x03, + 0x4C, 0xCC, 0x8C, 0xC9, 0x03, 0x4C, 0xCC, 0xA7, + 0xA5, 0x03, 0x4C, 0xCC, 0xAD, 0xB5, 0x03, 0x4C, + 0xCC, 0xB1, 0xB5, 0x03, 0x4D, 0xCC, 0x81, 0xC9, + 0x03, 0x4D, 0xCC, 0x87, 0xC9, 0x03, 0x4D, 0xCC, + 0xA3, 0xB5, 0x03, 0x4E, 0xCC, 0x80, 0xC9, 0x03, + // Bytes 3100 - 313f + 0x4E, 0xCC, 0x81, 0xC9, 0x03, 0x4E, 0xCC, 0x83, + 0xC9, 0x03, 0x4E, 0xCC, 0x87, 0xC9, 0x03, 0x4E, + 0xCC, 0x8C, 0xC9, 0x03, 0x4E, 0xCC, 0xA3, 0xB5, + 0x03, 0x4E, 0xCC, 0xA7, 0xA5, 0x03, 0x4E, 0xCC, + 0xAD, 0xB5, 0x03, 0x4E, 0xCC, 0xB1, 0xB5, 0x03, + 0x4F, 0xCC, 0x80, 0xC9, 0x03, 0x4F, 0xCC, 0x81, + 0xC9, 0x03, 0x4F, 0xCC, 0x86, 0xC9, 0x03, 0x4F, + 0xCC, 0x89, 0xC9, 0x03, 0x4F, 0xCC, 0x8B, 0xC9, + // Bytes 3140 - 317f + 0x03, 0x4F, 0xCC, 0x8C, 0xC9, 0x03, 0x4F, 0xCC, + 0x8F, 0xC9, 0x03, 0x4F, 0xCC, 0x91, 0xC9, 0x03, + 0x50, 0xCC, 0x81, 0xC9, 0x03, 0x50, 0xCC, 0x87, + 0xC9, 0x03, 0x52, 0xCC, 0x81, 0xC9, 0x03, 0x52, + 0xCC, 0x87, 0xC9, 0x03, 0x52, 0xCC, 0x8C, 0xC9, + 0x03, 0x52, 0xCC, 0x8F, 0xC9, 0x03, 0x52, 0xCC, + 0x91, 0xC9, 0x03, 0x52, 0xCC, 0xA7, 0xA5, 0x03, + 0x52, 0xCC, 0xB1, 0xB5, 0x03, 0x53, 0xCC, 0x82, + // Bytes 3180 - 31bf + 0xC9, 0x03, 0x53, 0xCC, 0x87, 0xC9, 0x03, 0x53, + 0xCC, 0xA6, 0xB5, 0x03, 0x53, 0xCC, 0xA7, 0xA5, + 0x03, 0x54, 0xCC, 0x87, 0xC9, 0x03, 0x54, 0xCC, + 0x8C, 0xC9, 0x03, 0x54, 0xCC, 0xA3, 0xB5, 0x03, + 0x54, 0xCC, 0xA6, 0xB5, 0x03, 0x54, 0xCC, 0xA7, + 0xA5, 0x03, 0x54, 0xCC, 0xAD, 0xB5, 0x03, 0x54, + 0xCC, 0xB1, 0xB5, 0x03, 0x55, 0xCC, 0x80, 0xC9, + 0x03, 0x55, 0xCC, 0x81, 0xC9, 0x03, 0x55, 0xCC, + // Bytes 31c0 - 31ff + 0x82, 0xC9, 0x03, 0x55, 0xCC, 0x86, 0xC9, 0x03, + 0x55, 0xCC, 0x89, 0xC9, 0x03, 0x55, 0xCC, 0x8A, + 0xC9, 0x03, 0x55, 0xCC, 0x8B, 0xC9, 0x03, 0x55, + 0xCC, 0x8C, 0xC9, 0x03, 0x55, 0xCC, 0x8F, 0xC9, + 0x03, 0x55, 0xCC, 0x91, 0xC9, 0x03, 0x55, 0xCC, + 0xA3, 0xB5, 0x03, 0x55, 0xCC, 0xA4, 0xB5, 0x03, + 0x55, 0xCC, 0xA8, 0xA5, 0x03, 0x55, 0xCC, 0xAD, + 0xB5, 0x03, 0x55, 0xCC, 0xB0, 0xB5, 0x03, 0x56, + // Bytes 3200 - 323f + 0xCC, 0x83, 0xC9, 0x03, 0x56, 0xCC, 0xA3, 0xB5, + 0x03, 0x57, 0xCC, 0x80, 0xC9, 0x03, 0x57, 0xCC, + 0x81, 0xC9, 0x03, 0x57, 0xCC, 0x82, 0xC9, 0x03, + 0x57, 0xCC, 0x87, 0xC9, 0x03, 0x57, 0xCC, 0x88, + 0xC9, 0x03, 0x57, 0xCC, 0xA3, 0xB5, 0x03, 0x58, + 0xCC, 0x87, 0xC9, 0x03, 0x58, 0xCC, 0x88, 0xC9, + 0x03, 0x59, 0xCC, 0x80, 0xC9, 0x03, 0x59, 0xCC, + 0x81, 0xC9, 0x03, 0x59, 0xCC, 0x82, 0xC9, 0x03, + // Bytes 3240 - 327f + 0x59, 0xCC, 0x83, 0xC9, 0x03, 0x59, 0xCC, 0x84, + 0xC9, 0x03, 0x59, 0xCC, 0x87, 0xC9, 0x03, 0x59, + 0xCC, 0x88, 0xC9, 0x03, 0x59, 0xCC, 0x89, 0xC9, + 0x03, 0x59, 0xCC, 0xA3, 0xB5, 0x03, 0x5A, 0xCC, + 0x81, 0xC9, 0x03, 0x5A, 0xCC, 0x82, 0xC9, 0x03, + 0x5A, 0xCC, 0x87, 0xC9, 0x03, 0x5A, 0xCC, 0x8C, + 0xC9, 0x03, 0x5A, 0xCC, 0xA3, 0xB5, 0x03, 0x5A, + 0xCC, 0xB1, 0xB5, 0x03, 0x61, 0xCC, 0x80, 0xC9, + // Bytes 3280 - 32bf + 0x03, 0x61, 0xCC, 0x81, 0xC9, 0x03, 0x61, 0xCC, + 0x83, 0xC9, 0x03, 0x61, 0xCC, 0x84, 0xC9, 0x03, + 0x61, 0xCC, 0x89, 0xC9, 0x03, 0x61, 0xCC, 0x8C, + 0xC9, 0x03, 0x61, 0xCC, 0x8F, 0xC9, 0x03, 0x61, + 0xCC, 0x91, 0xC9, 0x03, 0x61, 0xCC, 0xA5, 0xB5, + 0x03, 0x61, 0xCC, 0xA8, 0xA5, 0x03, 0x62, 0xCC, + 0x87, 0xC9, 0x03, 0x62, 0xCC, 0xA3, 0xB5, 0x03, + 0x62, 0xCC, 0xB1, 0xB5, 0x03, 0x63, 0xCC, 0x81, + // Bytes 32c0 - 32ff + 0xC9, 0x03, 0x63, 0xCC, 0x82, 0xC9, 0x03, 0x63, + 0xCC, 0x87, 0xC9, 0x03, 0x63, 0xCC, 0x8C, 0xC9, + 0x03, 0x64, 0xCC, 0x87, 0xC9, 0x03, 0x64, 0xCC, + 0x8C, 0xC9, 0x03, 0x64, 0xCC, 0xA3, 0xB5, 0x03, + 0x64, 0xCC, 0xA7, 0xA5, 0x03, 0x64, 0xCC, 0xAD, + 0xB5, 0x03, 0x64, 0xCC, 0xB1, 0xB5, 0x03, 0x65, + 0xCC, 0x80, 0xC9, 0x03, 0x65, 0xCC, 0x81, 0xC9, + 0x03, 0x65, 0xCC, 0x83, 0xC9, 0x03, 0x65, 0xCC, + // Bytes 3300 - 333f + 0x86, 0xC9, 0x03, 0x65, 0xCC, 0x87, 0xC9, 0x03, + 0x65, 0xCC, 0x88, 0xC9, 0x03, 0x65, 0xCC, 0x89, + 0xC9, 0x03, 0x65, 0xCC, 0x8C, 0xC9, 0x03, 0x65, + 0xCC, 0x8F, 0xC9, 0x03, 0x65, 0xCC, 0x91, 0xC9, + 0x03, 0x65, 0xCC, 0xA8, 0xA5, 0x03, 0x65, 0xCC, + 0xAD, 0xB5, 0x03, 0x65, 0xCC, 0xB0, 0xB5, 0x03, + 0x66, 0xCC, 0x87, 0xC9, 0x03, 0x67, 0xCC, 0x81, + 0xC9, 0x03, 0x67, 0xCC, 0x82, 0xC9, 0x03, 0x67, + // Bytes 3340 - 337f + 0xCC, 0x84, 0xC9, 0x03, 0x67, 0xCC, 0x86, 0xC9, + 0x03, 0x67, 0xCC, 0x87, 0xC9, 0x03, 0x67, 0xCC, + 0x8C, 0xC9, 0x03, 0x67, 0xCC, 0xA7, 0xA5, 0x03, + 0x68, 0xCC, 0x82, 0xC9, 0x03, 0x68, 0xCC, 0x87, + 0xC9, 0x03, 0x68, 0xCC, 0x88, 0xC9, 0x03, 0x68, + 0xCC, 0x8C, 0xC9, 0x03, 0x68, 0xCC, 0xA3, 0xB5, + 0x03, 0x68, 0xCC, 0xA7, 0xA5, 0x03, 0x68, 0xCC, + 0xAE, 0xB5, 0x03, 0x68, 0xCC, 0xB1, 0xB5, 0x03, + // Bytes 3380 - 33bf + 0x69, 0xCC, 0x80, 0xC9, 0x03, 0x69, 0xCC, 0x81, + 0xC9, 0x03, 0x69, 0xCC, 0x82, 0xC9, 0x03, 0x69, + 0xCC, 0x83, 0xC9, 0x03, 0x69, 0xCC, 0x84, 0xC9, + 0x03, 0x69, 0xCC, 0x86, 0xC9, 0x03, 0x69, 0xCC, + 0x89, 0xC9, 0x03, 0x69, 0xCC, 0x8C, 0xC9, 0x03, + 0x69, 0xCC, 0x8F, 0xC9, 0x03, 0x69, 0xCC, 0x91, + 0xC9, 0x03, 0x69, 0xCC, 0xA3, 0xB5, 0x03, 0x69, + 0xCC, 0xA8, 0xA5, 0x03, 0x69, 0xCC, 0xB0, 0xB5, + // Bytes 33c0 - 33ff + 0x03, 0x6A, 0xCC, 0x82, 0xC9, 0x03, 0x6A, 0xCC, + 0x8C, 0xC9, 0x03, 0x6B, 0xCC, 0x81, 0xC9, 0x03, + 0x6B, 0xCC, 0x8C, 0xC9, 0x03, 0x6B, 0xCC, 0xA3, + 0xB5, 0x03, 0x6B, 0xCC, 0xA7, 0xA5, 0x03, 0x6B, + 0xCC, 0xB1, 0xB5, 0x03, 0x6C, 0xCC, 0x81, 0xC9, + 0x03, 0x6C, 0xCC, 0x8C, 0xC9, 0x03, 0x6C, 0xCC, + 0xA7, 0xA5, 0x03, 0x6C, 0xCC, 0xAD, 0xB5, 0x03, + 0x6C, 0xCC, 0xB1, 0xB5, 0x03, 0x6D, 0xCC, 0x81, + // Bytes 3400 - 343f + 0xC9, 0x03, 0x6D, 0xCC, 0x87, 0xC9, 0x03, 0x6D, + 0xCC, 0xA3, 0xB5, 0x03, 0x6E, 0xCC, 0x80, 0xC9, + 0x03, 0x6E, 0xCC, 0x81, 0xC9, 0x03, 0x6E, 0xCC, + 0x83, 0xC9, 0x03, 0x6E, 0xCC, 0x87, 0xC9, 0x03, + 0x6E, 0xCC, 0x8C, 0xC9, 0x03, 0x6E, 0xCC, 0xA3, + 0xB5, 0x03, 0x6E, 0xCC, 0xA7, 0xA5, 0x03, 0x6E, + 0xCC, 0xAD, 0xB5, 0x03, 0x6E, 0xCC, 0xB1, 0xB5, + 0x03, 0x6F, 0xCC, 0x80, 0xC9, 0x03, 0x6F, 0xCC, + // Bytes 3440 - 347f + 0x81, 0xC9, 0x03, 0x6F, 0xCC, 0x86, 0xC9, 0x03, + 0x6F, 0xCC, 0x89, 0xC9, 0x03, 0x6F, 0xCC, 0x8B, + 0xC9, 0x03, 0x6F, 0xCC, 0x8C, 0xC9, 0x03, 0x6F, + 0xCC, 0x8F, 0xC9, 0x03, 0x6F, 0xCC, 0x91, 0xC9, + 0x03, 0x70, 0xCC, 0x81, 0xC9, 0x03, 0x70, 0xCC, + 0x87, 0xC9, 0x03, 0x72, 0xCC, 0x81, 0xC9, 0x03, + 0x72, 0xCC, 0x87, 0xC9, 0x03, 0x72, 0xCC, 0x8C, + 0xC9, 0x03, 0x72, 0xCC, 0x8F, 0xC9, 0x03, 0x72, + // Bytes 3480 - 34bf + 0xCC, 0x91, 0xC9, 0x03, 0x72, 0xCC, 0xA7, 0xA5, + 0x03, 0x72, 0xCC, 0xB1, 0xB5, 0x03, 0x73, 0xCC, + 0x82, 0xC9, 0x03, 0x73, 0xCC, 0x87, 0xC9, 0x03, + 0x73, 0xCC, 0xA6, 0xB5, 0x03, 0x73, 0xCC, 0xA7, + 0xA5, 0x03, 0x74, 0xCC, 0x87, 0xC9, 0x03, 0x74, + 0xCC, 0x88, 0xC9, 0x03, 0x74, 0xCC, 0x8C, 0xC9, + 0x03, 0x74, 0xCC, 0xA3, 0xB5, 0x03, 0x74, 0xCC, + 0xA6, 0xB5, 0x03, 0x74, 0xCC, 0xA7, 0xA5, 0x03, + // Bytes 34c0 - 34ff + 0x74, 0xCC, 0xAD, 0xB5, 0x03, 0x74, 0xCC, 0xB1, + 0xB5, 0x03, 0x75, 0xCC, 0x80, 0xC9, 0x03, 0x75, + 0xCC, 0x81, 0xC9, 0x03, 0x75, 0xCC, 0x82, 0xC9, + 0x03, 0x75, 0xCC, 0x86, 0xC9, 0x03, 0x75, 0xCC, + 0x89, 0xC9, 0x03, 0x75, 0xCC, 0x8A, 0xC9, 0x03, + 0x75, 0xCC, 0x8B, 0xC9, 0x03, 0x75, 0xCC, 0x8C, + 0xC9, 0x03, 0x75, 0xCC, 0x8F, 0xC9, 0x03, 0x75, + 0xCC, 0x91, 0xC9, 0x03, 0x75, 0xCC, 0xA3, 0xB5, + // Bytes 3500 - 353f + 0x03, 0x75, 0xCC, 0xA4, 0xB5, 0x03, 0x75, 0xCC, + 0xA8, 0xA5, 0x03, 0x75, 0xCC, 0xAD, 0xB5, 0x03, + 0x75, 0xCC, 0xB0, 0xB5, 0x03, 0x76, 0xCC, 0x83, + 0xC9, 0x03, 0x76, 0xCC, 0xA3, 0xB5, 0x03, 0x77, + 0xCC, 0x80, 0xC9, 0x03, 0x77, 0xCC, 0x81, 0xC9, + 0x03, 0x77, 0xCC, 0x82, 0xC9, 0x03, 0x77, 0xCC, + 0x87, 0xC9, 0x03, 0x77, 0xCC, 0x88, 0xC9, 0x03, + 0x77, 0xCC, 0x8A, 0xC9, 0x03, 0x77, 0xCC, 0xA3, + // Bytes 3540 - 357f + 0xB5, 0x03, 0x78, 0xCC, 0x87, 0xC9, 0x03, 0x78, + 0xCC, 0x88, 0xC9, 0x03, 0x79, 0xCC, 0x80, 0xC9, + 0x03, 0x79, 0xCC, 0x81, 0xC9, 0x03, 0x79, 0xCC, + 0x82, 0xC9, 0x03, 0x79, 0xCC, 0x83, 0xC9, 0x03, + 0x79, 0xCC, 0x84, 0xC9, 0x03, 0x79, 0xCC, 0x87, + 0xC9, 0x03, 0x79, 0xCC, 0x88, 0xC9, 0x03, 0x79, + 0xCC, 0x89, 0xC9, 0x03, 0x79, 0xCC, 0x8A, 0xC9, + 0x03, 0x79, 0xCC, 0xA3, 0xB5, 0x03, 0x7A, 0xCC, + // Bytes 3580 - 35bf + 0x81, 0xC9, 0x03, 0x7A, 0xCC, 0x82, 0xC9, 0x03, + 0x7A, 0xCC, 0x87, 0xC9, 0x03, 0x7A, 0xCC, 0x8C, + 0xC9, 0x03, 0x7A, 0xCC, 0xA3, 0xB5, 0x03, 0x7A, + 0xCC, 0xB1, 0xB5, 0x04, 0xC2, 0xA8, 0xCC, 0x80, + 0xCA, 0x04, 0xC2, 0xA8, 0xCC, 0x81, 0xCA, 0x04, + 0xC2, 0xA8, 0xCD, 0x82, 0xCA, 0x04, 0xC3, 0x86, + 0xCC, 0x81, 0xC9, 0x04, 0xC3, 0x86, 0xCC, 0x84, + 0xC9, 0x04, 0xC3, 0x98, 0xCC, 0x81, 0xC9, 0x04, + // Bytes 35c0 - 35ff + 0xC3, 0xA6, 0xCC, 0x81, 0xC9, 0x04, 0xC3, 0xA6, + 0xCC, 0x84, 0xC9, 0x04, 0xC3, 0xB8, 0xCC, 0x81, + 0xC9, 0x04, 0xC5, 0xBF, 0xCC, 0x87, 0xC9, 0x04, + 0xC6, 0xB7, 0xCC, 0x8C, 0xC9, 0x04, 0xCA, 0x92, + 0xCC, 0x8C, 0xC9, 0x04, 0xCE, 0x91, 0xCC, 0x80, + 0xC9, 0x04, 0xCE, 0x91, 0xCC, 0x81, 0xC9, 0x04, + 0xCE, 0x91, 0xCC, 0x84, 0xC9, 0x04, 0xCE, 0x91, + 0xCC, 0x86, 0xC9, 0x04, 0xCE, 0x91, 0xCD, 0x85, + // Bytes 3600 - 363f + 0xD9, 0x04, 0xCE, 0x95, 0xCC, 0x80, 0xC9, 0x04, + 0xCE, 0x95, 0xCC, 0x81, 0xC9, 0x04, 0xCE, 0x97, + 0xCC, 0x80, 0xC9, 0x04, 0xCE, 0x97, 0xCC, 0x81, + 0xC9, 0x04, 0xCE, 0x97, 0xCD, 0x85, 0xD9, 0x04, + 0xCE, 0x99, 0xCC, 0x80, 0xC9, 0x04, 0xCE, 0x99, + 0xCC, 0x81, 0xC9, 0x04, 0xCE, 0x99, 0xCC, 0x84, + 0xC9, 0x04, 0xCE, 0x99, 0xCC, 0x86, 0xC9, 0x04, + 0xCE, 0x99, 0xCC, 0x88, 0xC9, 0x04, 0xCE, 0x9F, + // Bytes 3640 - 367f + 0xCC, 0x80, 0xC9, 0x04, 0xCE, 0x9F, 0xCC, 0x81, + 0xC9, 0x04, 0xCE, 0xA1, 0xCC, 0x94, 0xC9, 0x04, + 0xCE, 0xA5, 0xCC, 0x80, 0xC9, 0x04, 0xCE, 0xA5, + 0xCC, 0x81, 0xC9, 0x04, 0xCE, 0xA5, 0xCC, 0x84, + 0xC9, 0x04, 0xCE, 0xA5, 0xCC, 0x86, 0xC9, 0x04, + 0xCE, 0xA5, 0xCC, 0x88, 0xC9, 0x04, 0xCE, 0xA9, + 0xCC, 0x80, 0xC9, 0x04, 0xCE, 0xA9, 0xCC, 0x81, + 0xC9, 0x04, 0xCE, 0xA9, 0xCD, 0x85, 0xD9, 0x04, + // Bytes 3680 - 36bf + 0xCE, 0xB1, 0xCC, 0x84, 0xC9, 0x04, 0xCE, 0xB1, + 0xCC, 0x86, 0xC9, 0x04, 0xCE, 0xB1, 0xCD, 0x85, + 0xD9, 0x04, 0xCE, 0xB5, 0xCC, 0x80, 0xC9, 0x04, + 0xCE, 0xB5, 0xCC, 0x81, 0xC9, 0x04, 0xCE, 0xB7, + 0xCD, 0x85, 0xD9, 0x04, 0xCE, 0xB9, 0xCC, 0x80, + 0xC9, 0x04, 0xCE, 0xB9, 0xCC, 0x81, 0xC9, 0x04, + 0xCE, 0xB9, 0xCC, 0x84, 0xC9, 0x04, 0xCE, 0xB9, + 0xCC, 0x86, 0xC9, 0x04, 0xCE, 0xB9, 0xCD, 0x82, + // Bytes 36c0 - 36ff + 0xC9, 0x04, 0xCE, 0xBF, 0xCC, 0x80, 0xC9, 0x04, + 0xCE, 0xBF, 0xCC, 0x81, 0xC9, 0x04, 0xCF, 0x81, + 0xCC, 0x93, 0xC9, 0x04, 0xCF, 0x81, 0xCC, 0x94, + 0xC9, 0x04, 0xCF, 0x85, 0xCC, 0x80, 0xC9, 0x04, + 0xCF, 0x85, 0xCC, 0x81, 0xC9, 0x04, 0xCF, 0x85, + 0xCC, 0x84, 0xC9, 0x04, 0xCF, 0x85, 0xCC, 0x86, + 0xC9, 0x04, 0xCF, 0x85, 0xCD, 0x82, 0xC9, 0x04, + 0xCF, 0x89, 0xCD, 0x85, 0xD9, 0x04, 0xCF, 0x92, + // Bytes 3700 - 373f + 0xCC, 0x81, 0xC9, 0x04, 0xCF, 0x92, 0xCC, 0x88, + 0xC9, 0x04, 0xD0, 0x86, 0xCC, 0x88, 0xC9, 0x04, + 0xD0, 0x90, 0xCC, 0x86, 0xC9, 0x04, 0xD0, 0x90, + 0xCC, 0x88, 0xC9, 0x04, 0xD0, 0x93, 0xCC, 0x81, + 0xC9, 0x04, 0xD0, 0x95, 0xCC, 0x80, 0xC9, 0x04, + 0xD0, 0x95, 0xCC, 0x86, 0xC9, 0x04, 0xD0, 0x95, + 0xCC, 0x88, 0xC9, 0x04, 0xD0, 0x96, 0xCC, 0x86, + 0xC9, 0x04, 0xD0, 0x96, 0xCC, 0x88, 0xC9, 0x04, + // Bytes 3740 - 377f + 0xD0, 0x97, 0xCC, 0x88, 0xC9, 0x04, 0xD0, 0x98, + 0xCC, 0x80, 0xC9, 0x04, 0xD0, 0x98, 0xCC, 0x84, + 0xC9, 0x04, 0xD0, 0x98, 0xCC, 0x86, 0xC9, 0x04, + 0xD0, 0x98, 0xCC, 0x88, 0xC9, 0x04, 0xD0, 0x9A, + 0xCC, 0x81, 0xC9, 0x04, 0xD0, 0x9E, 0xCC, 0x88, + 0xC9, 0x04, 0xD0, 0xA3, 0xCC, 0x84, 0xC9, 0x04, + 0xD0, 0xA3, 0xCC, 0x86, 0xC9, 0x04, 0xD0, 0xA3, + 0xCC, 0x88, 0xC9, 0x04, 0xD0, 0xA3, 0xCC, 0x8B, + // Bytes 3780 - 37bf + 0xC9, 0x04, 0xD0, 0xA7, 0xCC, 0x88, 0xC9, 0x04, + 0xD0, 0xAB, 0xCC, 0x88, 0xC9, 0x04, 0xD0, 0xAD, + 0xCC, 0x88, 0xC9, 0x04, 0xD0, 0xB0, 0xCC, 0x86, + 0xC9, 0x04, 0xD0, 0xB0, 0xCC, 0x88, 0xC9, 0x04, + 0xD0, 0xB3, 0xCC, 0x81, 0xC9, 0x04, 0xD0, 0xB5, + 0xCC, 0x80, 0xC9, 0x04, 0xD0, 0xB5, 0xCC, 0x86, + 0xC9, 0x04, 0xD0, 0xB5, 0xCC, 0x88, 0xC9, 0x04, + 0xD0, 0xB6, 0xCC, 0x86, 0xC9, 0x04, 0xD0, 0xB6, + // Bytes 37c0 - 37ff + 0xCC, 0x88, 0xC9, 0x04, 0xD0, 0xB7, 0xCC, 0x88, + 0xC9, 0x04, 0xD0, 0xB8, 0xCC, 0x80, 0xC9, 0x04, + 0xD0, 0xB8, 0xCC, 0x84, 0xC9, 0x04, 0xD0, 0xB8, + 0xCC, 0x86, 0xC9, 0x04, 0xD0, 0xB8, 0xCC, 0x88, + 0xC9, 0x04, 0xD0, 0xBA, 0xCC, 0x81, 0xC9, 0x04, + 0xD0, 0xBE, 0xCC, 0x88, 0xC9, 0x04, 0xD1, 0x83, + 0xCC, 0x84, 0xC9, 0x04, 0xD1, 0x83, 0xCC, 0x86, + 0xC9, 0x04, 0xD1, 0x83, 0xCC, 0x88, 0xC9, 0x04, + // Bytes 3800 - 383f + 0xD1, 0x83, 0xCC, 0x8B, 0xC9, 0x04, 0xD1, 0x87, + 0xCC, 0x88, 0xC9, 0x04, 0xD1, 0x8B, 0xCC, 0x88, + 0xC9, 0x04, 0xD1, 0x8D, 0xCC, 0x88, 0xC9, 0x04, + 0xD1, 0x96, 0xCC, 0x88, 0xC9, 0x04, 0xD1, 0xB4, + 0xCC, 0x8F, 0xC9, 0x04, 0xD1, 0xB5, 0xCC, 0x8F, + 0xC9, 0x04, 0xD3, 0x98, 0xCC, 0x88, 0xC9, 0x04, + 0xD3, 0x99, 0xCC, 0x88, 0xC9, 0x04, 0xD3, 0xA8, + 0xCC, 0x88, 0xC9, 0x04, 0xD3, 0xA9, 0xCC, 0x88, + // Bytes 3840 - 387f + 0xC9, 0x04, 0xD8, 0xA7, 0xD9, 0x93, 0xC9, 0x04, + 0xD8, 0xA7, 0xD9, 0x94, 0xC9, 0x04, 0xD8, 0xA7, + 0xD9, 0x95, 0xB5, 0x04, 0xD9, 0x88, 0xD9, 0x94, + 0xC9, 0x04, 0xD9, 0x8A, 0xD9, 0x94, 0xC9, 0x04, + 0xDB, 0x81, 0xD9, 0x94, 0xC9, 0x04, 0xDB, 0x92, + 0xD9, 0x94, 0xC9, 0x04, 0xDB, 0x95, 0xD9, 0x94, + 0xC9, 0x05, 0x41, 0xCC, 0x82, 0xCC, 0x80, 0xCA, + 0x05, 0x41, 0xCC, 0x82, 0xCC, 0x81, 0xCA, 0x05, + // Bytes 3880 - 38bf + 0x41, 0xCC, 0x82, 0xCC, 0x83, 0xCA, 0x05, 0x41, + 0xCC, 0x82, 0xCC, 0x89, 0xCA, 0x05, 0x41, 0xCC, + 0x86, 0xCC, 0x80, 0xCA, 0x05, 0x41, 0xCC, 0x86, + 0xCC, 0x81, 0xCA, 0x05, 0x41, 0xCC, 0x86, 0xCC, + 0x83, 0xCA, 0x05, 0x41, 0xCC, 0x86, 0xCC, 0x89, + 0xCA, 0x05, 0x41, 0xCC, 0x87, 0xCC, 0x84, 0xCA, + 0x05, 0x41, 0xCC, 0x88, 0xCC, 0x84, 0xCA, 0x05, + 0x41, 0xCC, 0x8A, 0xCC, 0x81, 0xCA, 0x05, 0x41, + // Bytes 38c0 - 38ff + 0xCC, 0xA3, 0xCC, 0x82, 0xCA, 0x05, 0x41, 0xCC, + 0xA3, 0xCC, 0x86, 0xCA, 0x05, 0x43, 0xCC, 0xA7, + 0xCC, 0x81, 0xCA, 0x05, 0x45, 0xCC, 0x82, 0xCC, + 0x80, 0xCA, 0x05, 0x45, 0xCC, 0x82, 0xCC, 0x81, + 0xCA, 0x05, 0x45, 0xCC, 0x82, 0xCC, 0x83, 0xCA, + 0x05, 0x45, 0xCC, 0x82, 0xCC, 0x89, 0xCA, 0x05, + 0x45, 0xCC, 0x84, 0xCC, 0x80, 0xCA, 0x05, 0x45, + 0xCC, 0x84, 0xCC, 0x81, 0xCA, 0x05, 0x45, 0xCC, + // Bytes 3900 - 393f + 0xA3, 0xCC, 0x82, 0xCA, 0x05, 0x45, 0xCC, 0xA7, + 0xCC, 0x86, 0xCA, 0x05, 0x49, 0xCC, 0x88, 0xCC, + 0x81, 0xCA, 0x05, 0x4C, 0xCC, 0xA3, 0xCC, 0x84, + 0xCA, 0x05, 0x4F, 0xCC, 0x82, 0xCC, 0x80, 0xCA, + 0x05, 0x4F, 0xCC, 0x82, 0xCC, 0x81, 0xCA, 0x05, + 0x4F, 0xCC, 0x82, 0xCC, 0x83, 0xCA, 0x05, 0x4F, + 0xCC, 0x82, 0xCC, 0x89, 0xCA, 0x05, 0x4F, 0xCC, + 0x83, 0xCC, 0x81, 0xCA, 0x05, 0x4F, 0xCC, 0x83, + // Bytes 3940 - 397f + 0xCC, 0x84, 0xCA, 0x05, 0x4F, 0xCC, 0x83, 0xCC, + 0x88, 0xCA, 0x05, 0x4F, 0xCC, 0x84, 0xCC, 0x80, + 0xCA, 0x05, 0x4F, 0xCC, 0x84, 0xCC, 0x81, 0xCA, + 0x05, 0x4F, 0xCC, 0x87, 0xCC, 0x84, 0xCA, 0x05, + 0x4F, 0xCC, 0x88, 0xCC, 0x84, 0xCA, 0x05, 0x4F, + 0xCC, 0x9B, 0xCC, 0x80, 0xCA, 0x05, 0x4F, 0xCC, + 0x9B, 0xCC, 0x81, 0xCA, 0x05, 0x4F, 0xCC, 0x9B, + 0xCC, 0x83, 0xCA, 0x05, 0x4F, 0xCC, 0x9B, 0xCC, + // Bytes 3980 - 39bf + 0x89, 0xCA, 0x05, 0x4F, 0xCC, 0x9B, 0xCC, 0xA3, + 0xB6, 0x05, 0x4F, 0xCC, 0xA3, 0xCC, 0x82, 0xCA, + 0x05, 0x4F, 0xCC, 0xA8, 0xCC, 0x84, 0xCA, 0x05, + 0x52, 0xCC, 0xA3, 0xCC, 0x84, 0xCA, 0x05, 0x53, + 0xCC, 0x81, 0xCC, 0x87, 0xCA, 0x05, 0x53, 0xCC, + 0x8C, 0xCC, 0x87, 0xCA, 0x05, 0x53, 0xCC, 0xA3, + 0xCC, 0x87, 0xCA, 0x05, 0x55, 0xCC, 0x83, 0xCC, + 0x81, 0xCA, 0x05, 0x55, 0xCC, 0x84, 0xCC, 0x88, + // Bytes 39c0 - 39ff + 0xCA, 0x05, 0x55, 0xCC, 0x88, 0xCC, 0x80, 0xCA, + 0x05, 0x55, 0xCC, 0x88, 0xCC, 0x81, 0xCA, 0x05, + 0x55, 0xCC, 0x88, 0xCC, 0x84, 0xCA, 0x05, 0x55, + 0xCC, 0x88, 0xCC, 0x8C, 0xCA, 0x05, 0x55, 0xCC, + 0x9B, 0xCC, 0x80, 0xCA, 0x05, 0x55, 0xCC, 0x9B, + 0xCC, 0x81, 0xCA, 0x05, 0x55, 0xCC, 0x9B, 0xCC, + 0x83, 0xCA, 0x05, 0x55, 0xCC, 0x9B, 0xCC, 0x89, + 0xCA, 0x05, 0x55, 0xCC, 0x9B, 0xCC, 0xA3, 0xB6, + // Bytes 3a00 - 3a3f + 0x05, 0x61, 0xCC, 0x82, 0xCC, 0x80, 0xCA, 0x05, + 0x61, 0xCC, 0x82, 0xCC, 0x81, 0xCA, 0x05, 0x61, + 0xCC, 0x82, 0xCC, 0x83, 0xCA, 0x05, 0x61, 0xCC, + 0x82, 0xCC, 0x89, 0xCA, 0x05, 0x61, 0xCC, 0x86, + 0xCC, 0x80, 0xCA, 0x05, 0x61, 0xCC, 0x86, 0xCC, + 0x81, 0xCA, 0x05, 0x61, 0xCC, 0x86, 0xCC, 0x83, + 0xCA, 0x05, 0x61, 0xCC, 0x86, 0xCC, 0x89, 0xCA, + 0x05, 0x61, 0xCC, 0x87, 0xCC, 0x84, 0xCA, 0x05, + // Bytes 3a40 - 3a7f + 0x61, 0xCC, 0x88, 0xCC, 0x84, 0xCA, 0x05, 0x61, + 0xCC, 0x8A, 0xCC, 0x81, 0xCA, 0x05, 0x61, 0xCC, + 0xA3, 0xCC, 0x82, 0xCA, 0x05, 0x61, 0xCC, 0xA3, + 0xCC, 0x86, 0xCA, 0x05, 0x63, 0xCC, 0xA7, 0xCC, + 0x81, 0xCA, 0x05, 0x65, 0xCC, 0x82, 0xCC, 0x80, + 0xCA, 0x05, 0x65, 0xCC, 0x82, 0xCC, 0x81, 0xCA, + 0x05, 0x65, 0xCC, 0x82, 0xCC, 0x83, 0xCA, 0x05, + 0x65, 0xCC, 0x82, 0xCC, 0x89, 0xCA, 0x05, 0x65, + // Bytes 3a80 - 3abf + 0xCC, 0x84, 0xCC, 0x80, 0xCA, 0x05, 0x65, 0xCC, + 0x84, 0xCC, 0x81, 0xCA, 0x05, 0x65, 0xCC, 0xA3, + 0xCC, 0x82, 0xCA, 0x05, 0x65, 0xCC, 0xA7, 0xCC, + 0x86, 0xCA, 0x05, 0x69, 0xCC, 0x88, 0xCC, 0x81, + 0xCA, 0x05, 0x6C, 0xCC, 0xA3, 0xCC, 0x84, 0xCA, + 0x05, 0x6F, 0xCC, 0x82, 0xCC, 0x80, 0xCA, 0x05, + 0x6F, 0xCC, 0x82, 0xCC, 0x81, 0xCA, 0x05, 0x6F, + 0xCC, 0x82, 0xCC, 0x83, 0xCA, 0x05, 0x6F, 0xCC, + // Bytes 3ac0 - 3aff + 0x82, 0xCC, 0x89, 0xCA, 0x05, 0x6F, 0xCC, 0x83, + 0xCC, 0x81, 0xCA, 0x05, 0x6F, 0xCC, 0x83, 0xCC, + 0x84, 0xCA, 0x05, 0x6F, 0xCC, 0x83, 0xCC, 0x88, + 0xCA, 0x05, 0x6F, 0xCC, 0x84, 0xCC, 0x80, 0xCA, + 0x05, 0x6F, 0xCC, 0x84, 0xCC, 0x81, 0xCA, 0x05, + 0x6F, 0xCC, 0x87, 0xCC, 0x84, 0xCA, 0x05, 0x6F, + 0xCC, 0x88, 0xCC, 0x84, 0xCA, 0x05, 0x6F, 0xCC, + 0x9B, 0xCC, 0x80, 0xCA, 0x05, 0x6F, 0xCC, 0x9B, + // Bytes 3b00 - 3b3f + 0xCC, 0x81, 0xCA, 0x05, 0x6F, 0xCC, 0x9B, 0xCC, + 0x83, 0xCA, 0x05, 0x6F, 0xCC, 0x9B, 0xCC, 0x89, + 0xCA, 0x05, 0x6F, 0xCC, 0x9B, 0xCC, 0xA3, 0xB6, + 0x05, 0x6F, 0xCC, 0xA3, 0xCC, 0x82, 0xCA, 0x05, + 0x6F, 0xCC, 0xA8, 0xCC, 0x84, 0xCA, 0x05, 0x72, + 0xCC, 0xA3, 0xCC, 0x84, 0xCA, 0x05, 0x73, 0xCC, + 0x81, 0xCC, 0x87, 0xCA, 0x05, 0x73, 0xCC, 0x8C, + 0xCC, 0x87, 0xCA, 0x05, 0x73, 0xCC, 0xA3, 0xCC, + // Bytes 3b40 - 3b7f + 0x87, 0xCA, 0x05, 0x75, 0xCC, 0x83, 0xCC, 0x81, + 0xCA, 0x05, 0x75, 0xCC, 0x84, 0xCC, 0x88, 0xCA, + 0x05, 0x75, 0xCC, 0x88, 0xCC, 0x80, 0xCA, 0x05, + 0x75, 0xCC, 0x88, 0xCC, 0x81, 0xCA, 0x05, 0x75, + 0xCC, 0x88, 0xCC, 0x84, 0xCA, 0x05, 0x75, 0xCC, + 0x88, 0xCC, 0x8C, 0xCA, 0x05, 0x75, 0xCC, 0x9B, + 0xCC, 0x80, 0xCA, 0x05, 0x75, 0xCC, 0x9B, 0xCC, + 0x81, 0xCA, 0x05, 0x75, 0xCC, 0x9B, 0xCC, 0x83, + // Bytes 3b80 - 3bbf + 0xCA, 0x05, 0x75, 0xCC, 0x9B, 0xCC, 0x89, 0xCA, + 0x05, 0x75, 0xCC, 0x9B, 0xCC, 0xA3, 0xB6, 0x05, + 0xE1, 0xBE, 0xBF, 0xCC, 0x80, 0xCA, 0x05, 0xE1, + 0xBE, 0xBF, 0xCC, 0x81, 0xCA, 0x05, 0xE1, 0xBE, + 0xBF, 0xCD, 0x82, 0xCA, 0x05, 0xE1, 0xBF, 0xBE, + 0xCC, 0x80, 0xCA, 0x05, 0xE1, 0xBF, 0xBE, 0xCC, + 0x81, 0xCA, 0x05, 0xE1, 0xBF, 0xBE, 0xCD, 0x82, + 0xCA, 0x05, 0xE2, 0x86, 0x90, 0xCC, 0xB8, 0x05, + // Bytes 3bc0 - 3bff + 0x05, 0xE2, 0x86, 0x92, 0xCC, 0xB8, 0x05, 0x05, + 0xE2, 0x86, 0x94, 0xCC, 0xB8, 0x05, 0x05, 0xE2, + 0x87, 0x90, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x87, + 0x92, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x87, 0x94, + 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x88, 0x83, 0xCC, + 0xB8, 0x05, 0x05, 0xE2, 0x88, 0x88, 0xCC, 0xB8, + 0x05, 0x05, 0xE2, 0x88, 0x8B, 0xCC, 0xB8, 0x05, + 0x05, 0xE2, 0x88, 0xA3, 0xCC, 0xB8, 0x05, 0x05, + // Bytes 3c00 - 3c3f + 0xE2, 0x88, 0xA5, 0xCC, 0xB8, 0x05, 0x05, 0xE2, + 0x88, 0xBC, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89, + 0x83, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89, 0x85, + 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89, 0x88, 0xCC, + 0xB8, 0x05, 0x05, 0xE2, 0x89, 0x8D, 0xCC, 0xB8, + 0x05, 0x05, 0xE2, 0x89, 0xA1, 0xCC, 0xB8, 0x05, + 0x05, 0xE2, 0x89, 0xA4, 0xCC, 0xB8, 0x05, 0x05, + 0xE2, 0x89, 0xA5, 0xCC, 0xB8, 0x05, 0x05, 0xE2, + // Bytes 3c40 - 3c7f + 0x89, 0xB2, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89, + 0xB3, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89, 0xB6, + 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89, 0xB7, 0xCC, + 0xB8, 0x05, 0x05, 0xE2, 0x89, 0xBA, 0xCC, 0xB8, + 0x05, 0x05, 0xE2, 0x89, 0xBB, 0xCC, 0xB8, 0x05, + 0x05, 0xE2, 0x89, 0xBC, 0xCC, 0xB8, 0x05, 0x05, + 0xE2, 0x89, 0xBD, 0xCC, 0xB8, 0x05, 0x05, 0xE2, + 0x8A, 0x82, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A, + // Bytes 3c80 - 3cbf + 0x83, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A, 0x86, + 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A, 0x87, 0xCC, + 0xB8, 0x05, 0x05, 0xE2, 0x8A, 0x91, 0xCC, 0xB8, + 0x05, 0x05, 0xE2, 0x8A, 0x92, 0xCC, 0xB8, 0x05, + 0x05, 0xE2, 0x8A, 0xA2, 0xCC, 0xB8, 0x05, 0x05, + 0xE2, 0x8A, 0xA8, 0xCC, 0xB8, 0x05, 0x05, 0xE2, + 0x8A, 0xA9, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A, + 0xAB, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A, 0xB2, + // Bytes 3cc0 - 3cff + 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A, 0xB3, 0xCC, + 0xB8, 0x05, 0x05, 0xE2, 0x8A, 0xB4, 0xCC, 0xB8, + 0x05, 0x05, 0xE2, 0x8A, 0xB5, 0xCC, 0xB8, 0x05, + 0x06, 0xCE, 0x91, 0xCC, 0x93, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0x91, 0xCC, 0x94, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0x95, 0xCC, 0x93, 0xCC, 0x80, 0xCA, + 0x06, 0xCE, 0x95, 0xCC, 0x93, 0xCC, 0x81, 0xCA, + 0x06, 0xCE, 0x95, 0xCC, 0x94, 0xCC, 0x80, 0xCA, + // Bytes 3d00 - 3d3f + 0x06, 0xCE, 0x95, 0xCC, 0x94, 0xCC, 0x81, 0xCA, + 0x06, 0xCE, 0x97, 0xCC, 0x93, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0x97, 0xCC, 0x94, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0x99, 0xCC, 0x93, 0xCC, 0x80, 0xCA, + 0x06, 0xCE, 0x99, 0xCC, 0x93, 0xCC, 0x81, 0xCA, + 0x06, 0xCE, 0x99, 0xCC, 0x93, 0xCD, 0x82, 0xCA, + 0x06, 0xCE, 0x99, 0xCC, 0x94, 0xCC, 0x80, 0xCA, + 0x06, 0xCE, 0x99, 0xCC, 0x94, 0xCC, 0x81, 0xCA, + // Bytes 3d40 - 3d7f + 0x06, 0xCE, 0x99, 0xCC, 0x94, 0xCD, 0x82, 0xCA, + 0x06, 0xCE, 0x9F, 0xCC, 0x93, 0xCC, 0x80, 0xCA, + 0x06, 0xCE, 0x9F, 0xCC, 0x93, 0xCC, 0x81, 0xCA, + 0x06, 0xCE, 0x9F, 0xCC, 0x94, 0xCC, 0x80, 0xCA, + 0x06, 0xCE, 0x9F, 0xCC, 0x94, 0xCC, 0x81, 0xCA, + 0x06, 0xCE, 0xA5, 0xCC, 0x94, 0xCC, 0x80, 0xCA, + 0x06, 0xCE, 0xA5, 0xCC, 0x94, 0xCC, 0x81, 0xCA, + 0x06, 0xCE, 0xA5, 0xCC, 0x94, 0xCD, 0x82, 0xCA, + // Bytes 3d80 - 3dbf + 0x06, 0xCE, 0xA9, 0xCC, 0x93, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0xA9, 0xCC, 0x94, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0xB1, 0xCC, 0x80, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0xB1, 0xCC, 0x81, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0xB1, 0xCC, 0x93, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0xB1, 0xCC, 0x94, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0xB1, 0xCD, 0x82, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0xB5, 0xCC, 0x93, 0xCC, 0x80, 0xCA, + // Bytes 3dc0 - 3dff + 0x06, 0xCE, 0xB5, 0xCC, 0x93, 0xCC, 0x81, 0xCA, + 0x06, 0xCE, 0xB5, 0xCC, 0x94, 0xCC, 0x80, 0xCA, + 0x06, 0xCE, 0xB5, 0xCC, 0x94, 0xCC, 0x81, 0xCA, + 0x06, 0xCE, 0xB7, 0xCC, 0x80, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0xB7, 0xCC, 0x81, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0xB7, 0xCC, 0x93, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0xB7, 0xCC, 0x94, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0xB7, 0xCD, 0x82, 0xCD, 0x85, 0xDA, + // Bytes 3e00 - 3e3f + 0x06, 0xCE, 0xB9, 0xCC, 0x88, 0xCC, 0x80, 0xCA, + 0x06, 0xCE, 0xB9, 0xCC, 0x88, 0xCC, 0x81, 0xCA, + 0x06, 0xCE, 0xB9, 0xCC, 0x88, 0xCD, 0x82, 0xCA, + 0x06, 0xCE, 0xB9, 0xCC, 0x93, 0xCC, 0x80, 0xCA, + 0x06, 0xCE, 0xB9, 0xCC, 0x93, 0xCC, 0x81, 0xCA, + 0x06, 0xCE, 0xB9, 0xCC, 0x93, 0xCD, 0x82, 0xCA, + 0x06, 0xCE, 0xB9, 0xCC, 0x94, 0xCC, 0x80, 0xCA, + 0x06, 0xCE, 0xB9, 0xCC, 0x94, 0xCC, 0x81, 0xCA, + // Bytes 3e40 - 3e7f + 0x06, 0xCE, 0xB9, 0xCC, 0x94, 0xCD, 0x82, 0xCA, + 0x06, 0xCE, 0xBF, 0xCC, 0x93, 0xCC, 0x80, 0xCA, + 0x06, 0xCE, 0xBF, 0xCC, 0x93, 0xCC, 0x81, 0xCA, + 0x06, 0xCE, 0xBF, 0xCC, 0x94, 0xCC, 0x80, 0xCA, + 0x06, 0xCE, 0xBF, 0xCC, 0x94, 0xCC, 0x81, 0xCA, + 0x06, 0xCF, 0x85, 0xCC, 0x88, 0xCC, 0x80, 0xCA, + 0x06, 0xCF, 0x85, 0xCC, 0x88, 0xCC, 0x81, 0xCA, + 0x06, 0xCF, 0x85, 0xCC, 0x88, 0xCD, 0x82, 0xCA, + // Bytes 3e80 - 3ebf + 0x06, 0xCF, 0x85, 0xCC, 0x93, 0xCC, 0x80, 0xCA, + 0x06, 0xCF, 0x85, 0xCC, 0x93, 0xCC, 0x81, 0xCA, + 0x06, 0xCF, 0x85, 0xCC, 0x93, 0xCD, 0x82, 0xCA, + 0x06, 0xCF, 0x85, 0xCC, 0x94, 0xCC, 0x80, 0xCA, + 0x06, 0xCF, 0x85, 0xCC, 0x94, 0xCC, 0x81, 0xCA, + 0x06, 0xCF, 0x85, 0xCC, 0x94, 0xCD, 0x82, 0xCA, + 0x06, 0xCF, 0x89, 0xCC, 0x80, 0xCD, 0x85, 0xDA, + 0x06, 0xCF, 0x89, 0xCC, 0x81, 0xCD, 0x85, 0xDA, + // Bytes 3ec0 - 3eff + 0x06, 0xCF, 0x89, 0xCC, 0x93, 0xCD, 0x85, 0xDA, + 0x06, 0xCF, 0x89, 0xCC, 0x94, 0xCD, 0x85, 0xDA, + 0x06, 0xCF, 0x89, 0xCD, 0x82, 0xCD, 0x85, 0xDA, + 0x06, 0xE0, 0xA4, 0xA8, 0xE0, 0xA4, 0xBC, 0x09, + 0x06, 0xE0, 0xA4, 0xB0, 0xE0, 0xA4, 0xBC, 0x09, + 0x06, 0xE0, 0xA4, 0xB3, 0xE0, 0xA4, 0xBC, 0x09, + 0x06, 0xE0, 0xB1, 0x86, 0xE0, 0xB1, 0x96, 0x85, + 0x06, 0xE0, 0xB7, 0x99, 0xE0, 0xB7, 0x8A, 0x11, + // Bytes 3f00 - 3f3f + 0x06, 0xE3, 0x81, 0x86, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0x8B, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0x8D, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0x8F, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0x91, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0x93, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0x95, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0x97, 0xE3, 0x82, 0x99, 0x0D, + // Bytes 3f40 - 3f7f + 0x06, 0xE3, 0x81, 0x99, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0x9B, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0x9D, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0x9F, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0xA1, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0xA4, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0xA6, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0xA8, 0xE3, 0x82, 0x99, 0x0D, + // Bytes 3f80 - 3fbf + 0x06, 0xE3, 0x81, 0xAF, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0xAF, 0xE3, 0x82, 0x9A, 0x0D, + 0x06, 0xE3, 0x81, 0xB2, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0xB2, 0xE3, 0x82, 0x9A, 0x0D, + 0x06, 0xE3, 0x81, 0xB5, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0xB5, 0xE3, 0x82, 0x9A, 0x0D, + 0x06, 0xE3, 0x81, 0xB8, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0xB8, 0xE3, 0x82, 0x9A, 0x0D, + // Bytes 3fc0 - 3fff + 0x06, 0xE3, 0x81, 0xBB, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0xBB, 0xE3, 0x82, 0x9A, 0x0D, + 0x06, 0xE3, 0x82, 0x9D, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x82, 0xA6, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x82, 0xAB, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x82, 0xAD, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x82, 0xAF, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x82, 0xB1, 0xE3, 0x82, 0x99, 0x0D, + // Bytes 4000 - 403f + 0x06, 0xE3, 0x82, 0xB3, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x82, 0xB5, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x82, 0xB7, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x82, 0xB9, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x82, 0xBB, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x82, 0xBD, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x82, 0xBF, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x83, 0x81, 0xE3, 0x82, 0x99, 0x0D, + // Bytes 4040 - 407f + 0x06, 0xE3, 0x83, 0x84, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x83, 0x86, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x83, 0x88, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x83, 0x8F, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x83, 0x8F, 0xE3, 0x82, 0x9A, 0x0D, + 0x06, 0xE3, 0x83, 0x92, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x83, 0x92, 0xE3, 0x82, 0x9A, 0x0D, + 0x06, 0xE3, 0x83, 0x95, 0xE3, 0x82, 0x99, 0x0D, + // Bytes 4080 - 40bf + 0x06, 0xE3, 0x83, 0x95, 0xE3, 0x82, 0x9A, 0x0D, + 0x06, 0xE3, 0x83, 0x98, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x83, 0x98, 0xE3, 0x82, 0x9A, 0x0D, + 0x06, 0xE3, 0x83, 0x9B, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x83, 0x9B, 0xE3, 0x82, 0x9A, 0x0D, + 0x06, 0xE3, 0x83, 0xAF, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x83, 0xB0, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x83, 0xB1, 0xE3, 0x82, 0x99, 0x0D, + // Bytes 40c0 - 40ff + 0x06, 0xE3, 0x83, 0xB2, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x83, 0xBD, 0xE3, 0x82, 0x99, 0x0D, + 0x08, 0xCE, 0x91, 0xCC, 0x93, 0xCC, 0x80, 0xCD, + 0x85, 0xDB, 0x08, 0xCE, 0x91, 0xCC, 0x93, 0xCC, + 0x81, 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0x91, 0xCC, + 0x93, 0xCD, 0x82, 0xCD, 0x85, 0xDB, 0x08, 0xCE, + 0x91, 0xCC, 0x94, 0xCC, 0x80, 0xCD, 0x85, 0xDB, + 0x08, 0xCE, 0x91, 0xCC, 0x94, 0xCC, 0x81, 0xCD, + // Bytes 4100 - 413f + 0x85, 0xDB, 0x08, 0xCE, 0x91, 0xCC, 0x94, 0xCD, + 0x82, 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0x97, 0xCC, + 0x93, 0xCC, 0x80, 0xCD, 0x85, 0xDB, 0x08, 0xCE, + 0x97, 0xCC, 0x93, 0xCC, 0x81, 0xCD, 0x85, 0xDB, + 0x08, 0xCE, 0x97, 0xCC, 0x93, 0xCD, 0x82, 0xCD, + 0x85, 0xDB, 0x08, 0xCE, 0x97, 0xCC, 0x94, 0xCC, + 0x80, 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0x97, 0xCC, + 0x94, 0xCC, 0x81, 0xCD, 0x85, 0xDB, 0x08, 0xCE, + // Bytes 4140 - 417f + 0x97, 0xCC, 0x94, 0xCD, 0x82, 0xCD, 0x85, 0xDB, + 0x08, 0xCE, 0xA9, 0xCC, 0x93, 0xCC, 0x80, 0xCD, + 0x85, 0xDB, 0x08, 0xCE, 0xA9, 0xCC, 0x93, 0xCC, + 0x81, 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0xA9, 0xCC, + 0x93, 0xCD, 0x82, 0xCD, 0x85, 0xDB, 0x08, 0xCE, + 0xA9, 0xCC, 0x94, 0xCC, 0x80, 0xCD, 0x85, 0xDB, + 0x08, 0xCE, 0xA9, 0xCC, 0x94, 0xCC, 0x81, 0xCD, + 0x85, 0xDB, 0x08, 0xCE, 0xA9, 0xCC, 0x94, 0xCD, + // Bytes 4180 - 41bf + 0x82, 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0xB1, 0xCC, + 0x93, 0xCC, 0x80, 0xCD, 0x85, 0xDB, 0x08, 0xCE, + 0xB1, 0xCC, 0x93, 0xCC, 0x81, 0xCD, 0x85, 0xDB, + 0x08, 0xCE, 0xB1, 0xCC, 0x93, 0xCD, 0x82, 0xCD, + 0x85, 0xDB, 0x08, 0xCE, 0xB1, 0xCC, 0x94, 0xCC, + 0x80, 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0xB1, 0xCC, + 0x94, 0xCC, 0x81, 0xCD, 0x85, 0xDB, 0x08, 0xCE, + 0xB1, 0xCC, 0x94, 0xCD, 0x82, 0xCD, 0x85, 0xDB, + // Bytes 41c0 - 41ff + 0x08, 0xCE, 0xB7, 0xCC, 0x93, 0xCC, 0x80, 0xCD, + 0x85, 0xDB, 0x08, 0xCE, 0xB7, 0xCC, 0x93, 0xCC, + 0x81, 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0xB7, 0xCC, + 0x93, 0xCD, 0x82, 0xCD, 0x85, 0xDB, 0x08, 0xCE, + 0xB7, 0xCC, 0x94, 0xCC, 0x80, 0xCD, 0x85, 0xDB, + 0x08, 0xCE, 0xB7, 0xCC, 0x94, 0xCC, 0x81, 0xCD, + 0x85, 0xDB, 0x08, 0xCE, 0xB7, 0xCC, 0x94, 0xCD, + 0x82, 0xCD, 0x85, 0xDB, 0x08, 0xCF, 0x89, 0xCC, + // Bytes 4200 - 423f + 0x93, 0xCC, 0x80, 0xCD, 0x85, 0xDB, 0x08, 0xCF, + 0x89, 0xCC, 0x93, 0xCC, 0x81, 0xCD, 0x85, 0xDB, + 0x08, 0xCF, 0x89, 0xCC, 0x93, 0xCD, 0x82, 0xCD, + 0x85, 0xDB, 0x08, 0xCF, 0x89, 0xCC, 0x94, 0xCC, + 0x80, 0xCD, 0x85, 0xDB, 0x08, 0xCF, 0x89, 0xCC, + 0x94, 0xCC, 0x81, 0xCD, 0x85, 0xDB, 0x08, 0xCF, + 0x89, 0xCC, 0x94, 0xCD, 0x82, 0xCD, 0x85, 0xDB, + 0x08, 0xF0, 0x91, 0x82, 0x99, 0xF0, 0x91, 0x82, + // Bytes 4240 - 427f + 0xBA, 0x09, 0x08, 0xF0, 0x91, 0x82, 0x9B, 0xF0, + 0x91, 0x82, 0xBA, 0x09, 0x08, 0xF0, 0x91, 0x82, + 0xA5, 0xF0, 0x91, 0x82, 0xBA, 0x09, 0x42, 0xC2, + 0xB4, 0x01, 0x43, 0x20, 0xCC, 0x81, 0xC9, 0x43, + 0x20, 0xCC, 0x83, 0xC9, 0x43, 0x20, 0xCC, 0x84, + 0xC9, 0x43, 0x20, 0xCC, 0x85, 0xC9, 0x43, 0x20, + 0xCC, 0x86, 0xC9, 0x43, 0x20, 0xCC, 0x87, 0xC9, + 0x43, 0x20, 0xCC, 0x88, 0xC9, 0x43, 0x20, 0xCC, + // Bytes 4280 - 42bf + 0x8A, 0xC9, 0x43, 0x20, 0xCC, 0x8B, 0xC9, 0x43, + 0x20, 0xCC, 0x93, 0xC9, 0x43, 0x20, 0xCC, 0x94, + 0xC9, 0x43, 0x20, 0xCC, 0xA7, 0xA5, 0x43, 0x20, + 0xCC, 0xA8, 0xA5, 0x43, 0x20, 0xCC, 0xB3, 0xB5, + 0x43, 0x20, 0xCD, 0x82, 0xC9, 0x43, 0x20, 0xCD, + 0x85, 0xD9, 0x43, 0x20, 0xD9, 0x8B, 0x59, 0x43, + 0x20, 0xD9, 0x8C, 0x5D, 0x43, 0x20, 0xD9, 0x8D, + 0x61, 0x43, 0x20, 0xD9, 0x8E, 0x65, 0x43, 0x20, + // Bytes 42c0 - 42ff + 0xD9, 0x8F, 0x69, 0x43, 0x20, 0xD9, 0x90, 0x6D, + 0x43, 0x20, 0xD9, 0x91, 0x71, 0x43, 0x20, 0xD9, + 0x92, 0x75, 0x43, 0x41, 0xCC, 0x8A, 0xC9, 0x43, + 0x73, 0xCC, 0x87, 0xC9, 0x44, 0x20, 0xE3, 0x82, + 0x99, 0x0D, 0x44, 0x20, 0xE3, 0x82, 0x9A, 0x0D, + 0x44, 0xC2, 0xA8, 0xCC, 0x81, 0xCA, 0x44, 0xCE, + 0x91, 0xCC, 0x81, 0xC9, 0x44, 0xCE, 0x95, 0xCC, + 0x81, 0xC9, 0x44, 0xCE, 0x97, 0xCC, 0x81, 0xC9, + // Bytes 4300 - 433f + 0x44, 0xCE, 0x99, 0xCC, 0x81, 0xC9, 0x44, 0xCE, + 0x9F, 0xCC, 0x81, 0xC9, 0x44, 0xCE, 0xA5, 0xCC, + 0x81, 0xC9, 0x44, 0xCE, 0xA5, 0xCC, 0x88, 0xC9, + 0x44, 0xCE, 0xA9, 0xCC, 0x81, 0xC9, 0x44, 0xCE, + 0xB1, 0xCC, 0x81, 0xC9, 0x44, 0xCE, 0xB5, 0xCC, + 0x81, 0xC9, 0x44, 0xCE, 0xB7, 0xCC, 0x81, 0xC9, + 0x44, 0xCE, 0xB9, 0xCC, 0x81, 0xC9, 0x44, 0xCE, + 0xBF, 0xCC, 0x81, 0xC9, 0x44, 0xCF, 0x85, 0xCC, + // Bytes 4340 - 437f + 0x81, 0xC9, 0x44, 0xCF, 0x89, 0xCC, 0x81, 0xC9, + 0x44, 0xD7, 0x90, 0xD6, 0xB7, 0x31, 0x44, 0xD7, + 0x90, 0xD6, 0xB8, 0x35, 0x44, 0xD7, 0x90, 0xD6, + 0xBC, 0x41, 0x44, 0xD7, 0x91, 0xD6, 0xBC, 0x41, + 0x44, 0xD7, 0x91, 0xD6, 0xBF, 0x49, 0x44, 0xD7, + 0x92, 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0x93, 0xD6, + 0xBC, 0x41, 0x44, 0xD7, 0x94, 0xD6, 0xBC, 0x41, + 0x44, 0xD7, 0x95, 0xD6, 0xB9, 0x39, 0x44, 0xD7, + // Bytes 4380 - 43bf + 0x95, 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0x96, 0xD6, + 0xBC, 0x41, 0x44, 0xD7, 0x98, 0xD6, 0xBC, 0x41, + 0x44, 0xD7, 0x99, 0xD6, 0xB4, 0x25, 0x44, 0xD7, + 0x99, 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0x9A, 0xD6, + 0xBC, 0x41, 0x44, 0xD7, 0x9B, 0xD6, 0xBC, 0x41, + 0x44, 0xD7, 0x9B, 0xD6, 0xBF, 0x49, 0x44, 0xD7, + 0x9C, 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0x9E, 0xD6, + 0xBC, 0x41, 0x44, 0xD7, 0xA0, 0xD6, 0xBC, 0x41, + // Bytes 43c0 - 43ff + 0x44, 0xD7, 0xA1, 0xD6, 0xBC, 0x41, 0x44, 0xD7, + 0xA3, 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0xA4, 0xD6, + 0xBC, 0x41, 0x44, 0xD7, 0xA4, 0xD6, 0xBF, 0x49, + 0x44, 0xD7, 0xA6, 0xD6, 0xBC, 0x41, 0x44, 0xD7, + 0xA7, 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0xA8, 0xD6, + 0xBC, 0x41, 0x44, 0xD7, 0xA9, 0xD6, 0xBC, 0x41, + 0x44, 0xD7, 0xA9, 0xD7, 0x81, 0x4D, 0x44, 0xD7, + 0xA9, 0xD7, 0x82, 0x51, 0x44, 0xD7, 0xAA, 0xD6, + // Bytes 4400 - 443f + 0xBC, 0x41, 0x44, 0xD7, 0xB2, 0xD6, 0xB7, 0x31, + 0x44, 0xD8, 0xA7, 0xD9, 0x8B, 0x59, 0x44, 0xD8, + 0xA7, 0xD9, 0x93, 0xC9, 0x44, 0xD8, 0xA7, 0xD9, + 0x94, 0xC9, 0x44, 0xD8, 0xA7, 0xD9, 0x95, 0xB5, + 0x44, 0xD8, 0xB0, 0xD9, 0xB0, 0x79, 0x44, 0xD8, + 0xB1, 0xD9, 0xB0, 0x79, 0x44, 0xD9, 0x80, 0xD9, + 0x8B, 0x59, 0x44, 0xD9, 0x80, 0xD9, 0x8E, 0x65, + 0x44, 0xD9, 0x80, 0xD9, 0x8F, 0x69, 0x44, 0xD9, + // Bytes 4440 - 447f + 0x80, 0xD9, 0x90, 0x6D, 0x44, 0xD9, 0x80, 0xD9, + 0x91, 0x71, 0x44, 0xD9, 0x80, 0xD9, 0x92, 0x75, + 0x44, 0xD9, 0x87, 0xD9, 0xB0, 0x79, 0x44, 0xD9, + 0x88, 0xD9, 0x94, 0xC9, 0x44, 0xD9, 0x89, 0xD9, + 0xB0, 0x79, 0x44, 0xD9, 0x8A, 0xD9, 0x94, 0xC9, + 0x44, 0xDB, 0x92, 0xD9, 0x94, 0xC9, 0x44, 0xDB, + 0x95, 0xD9, 0x94, 0xC9, 0x45, 0x20, 0xCC, 0x88, + 0xCC, 0x80, 0xCA, 0x45, 0x20, 0xCC, 0x88, 0xCC, + // Bytes 4480 - 44bf + 0x81, 0xCA, 0x45, 0x20, 0xCC, 0x88, 0xCD, 0x82, + 0xCA, 0x45, 0x20, 0xCC, 0x93, 0xCC, 0x80, 0xCA, + 0x45, 0x20, 0xCC, 0x93, 0xCC, 0x81, 0xCA, 0x45, + 0x20, 0xCC, 0x93, 0xCD, 0x82, 0xCA, 0x45, 0x20, + 0xCC, 0x94, 0xCC, 0x80, 0xCA, 0x45, 0x20, 0xCC, + 0x94, 0xCC, 0x81, 0xCA, 0x45, 0x20, 0xCC, 0x94, + 0xCD, 0x82, 0xCA, 0x45, 0x20, 0xD9, 0x8C, 0xD9, + 0x91, 0x72, 0x45, 0x20, 0xD9, 0x8D, 0xD9, 0x91, + // Bytes 44c0 - 44ff + 0x72, 0x45, 0x20, 0xD9, 0x8E, 0xD9, 0x91, 0x72, + 0x45, 0x20, 0xD9, 0x8F, 0xD9, 0x91, 0x72, 0x45, + 0x20, 0xD9, 0x90, 0xD9, 0x91, 0x72, 0x45, 0x20, + 0xD9, 0x91, 0xD9, 0xB0, 0x7A, 0x45, 0xE2, 0xAB, + 0x9D, 0xCC, 0xB8, 0x05, 0x46, 0xCE, 0xB9, 0xCC, + 0x88, 0xCC, 0x81, 0xCA, 0x46, 0xCF, 0x85, 0xCC, + 0x88, 0xCC, 0x81, 0xCA, 0x46, 0xD7, 0xA9, 0xD6, + 0xBC, 0xD7, 0x81, 0x4E, 0x46, 0xD7, 0xA9, 0xD6, + // Bytes 4500 - 453f + 0xBC, 0xD7, 0x82, 0x52, 0x46, 0xD9, 0x80, 0xD9, + 0x8E, 0xD9, 0x91, 0x72, 0x46, 0xD9, 0x80, 0xD9, + 0x8F, 0xD9, 0x91, 0x72, 0x46, 0xD9, 0x80, 0xD9, + 0x90, 0xD9, 0x91, 0x72, 0x46, 0xE0, 0xA4, 0x95, + 0xE0, 0xA4, 0xBC, 0x09, 0x46, 0xE0, 0xA4, 0x96, + 0xE0, 0xA4, 0xBC, 0x09, 0x46, 0xE0, 0xA4, 0x97, + 0xE0, 0xA4, 0xBC, 0x09, 0x46, 0xE0, 0xA4, 0x9C, + 0xE0, 0xA4, 0xBC, 0x09, 0x46, 0xE0, 0xA4, 0xA1, + // Bytes 4540 - 457f + 0xE0, 0xA4, 0xBC, 0x09, 0x46, 0xE0, 0xA4, 0xA2, + 0xE0, 0xA4, 0xBC, 0x09, 0x46, 0xE0, 0xA4, 0xAB, + 0xE0, 0xA4, 0xBC, 0x09, 0x46, 0xE0, 0xA4, 0xAF, + 0xE0, 0xA4, 0xBC, 0x09, 0x46, 0xE0, 0xA6, 0xA1, + 0xE0, 0xA6, 0xBC, 0x09, 0x46, 0xE0, 0xA6, 0xA2, + 0xE0, 0xA6, 0xBC, 0x09, 0x46, 0xE0, 0xA6, 0xAF, + 0xE0, 0xA6, 0xBC, 0x09, 0x46, 0xE0, 0xA8, 0x96, + 0xE0, 0xA8, 0xBC, 0x09, 0x46, 0xE0, 0xA8, 0x97, + // Bytes 4580 - 45bf + 0xE0, 0xA8, 0xBC, 0x09, 0x46, 0xE0, 0xA8, 0x9C, + 0xE0, 0xA8, 0xBC, 0x09, 0x46, 0xE0, 0xA8, 0xAB, + 0xE0, 0xA8, 0xBC, 0x09, 0x46, 0xE0, 0xA8, 0xB2, + 0xE0, 0xA8, 0xBC, 0x09, 0x46, 0xE0, 0xA8, 0xB8, + 0xE0, 0xA8, 0xBC, 0x09, 0x46, 0xE0, 0xAC, 0xA1, + 0xE0, 0xAC, 0xBC, 0x09, 0x46, 0xE0, 0xAC, 0xA2, + 0xE0, 0xAC, 0xBC, 0x09, 0x46, 0xE0, 0xBE, 0xB2, + 0xE0, 0xBE, 0x80, 0x9D, 0x46, 0xE0, 0xBE, 0xB3, + // Bytes 45c0 - 45ff + 0xE0, 0xBE, 0x80, 0x9D, 0x46, 0xE3, 0x83, 0x86, + 0xE3, 0x82, 0x99, 0x0D, 0x48, 0xF0, 0x9D, 0x85, + 0x97, 0xF0, 0x9D, 0x85, 0xA5, 0xAD, 0x48, 0xF0, + 0x9D, 0x85, 0x98, 0xF0, 0x9D, 0x85, 0xA5, 0xAD, + 0x48, 0xF0, 0x9D, 0x86, 0xB9, 0xF0, 0x9D, 0x85, + 0xA5, 0xAD, 0x48, 0xF0, 0x9D, 0x86, 0xBA, 0xF0, + 0x9D, 0x85, 0xA5, 0xAD, 0x49, 0xE0, 0xBE, 0xB2, + 0xE0, 0xBD, 0xB1, 0xE0, 0xBE, 0x80, 0x9E, 0x49, + // Bytes 4600 - 463f + 0xE0, 0xBE, 0xB3, 0xE0, 0xBD, 0xB1, 0xE0, 0xBE, + 0x80, 0x9E, 0x4C, 0xF0, 0x9D, 0x85, 0x98, 0xF0, + 0x9D, 0x85, 0xA5, 0xF0, 0x9D, 0x85, 0xAE, 0xAE, + 0x4C, 0xF0, 0x9D, 0x85, 0x98, 0xF0, 0x9D, 0x85, + 0xA5, 0xF0, 0x9D, 0x85, 0xAF, 0xAE, 0x4C, 0xF0, + 0x9D, 0x85, 0x98, 0xF0, 0x9D, 0x85, 0xA5, 0xF0, + 0x9D, 0x85, 0xB0, 0xAE, 0x4C, 0xF0, 0x9D, 0x85, + 0x98, 0xF0, 0x9D, 0x85, 0xA5, 0xF0, 0x9D, 0x85, + // Bytes 4640 - 467f + 0xB1, 0xAE, 0x4C, 0xF0, 0x9D, 0x85, 0x98, 0xF0, + 0x9D, 0x85, 0xA5, 0xF0, 0x9D, 0x85, 0xB2, 0xAE, + 0x4C, 0xF0, 0x9D, 0x86, 0xB9, 0xF0, 0x9D, 0x85, + 0xA5, 0xF0, 0x9D, 0x85, 0xAE, 0xAE, 0x4C, 0xF0, + 0x9D, 0x86, 0xB9, 0xF0, 0x9D, 0x85, 0xA5, 0xF0, + 0x9D, 0x85, 0xAF, 0xAE, 0x4C, 0xF0, 0x9D, 0x86, + 0xBA, 0xF0, 0x9D, 0x85, 0xA5, 0xF0, 0x9D, 0x85, + 0xAE, 0xAE, 0x4C, 0xF0, 0x9D, 0x86, 0xBA, 0xF0, + // Bytes 4680 - 46bf + 0x9D, 0x85, 0xA5, 0xF0, 0x9D, 0x85, 0xAF, 0xAE, + 0x83, 0x41, 0xCC, 0x82, 0xC9, 0x83, 0x41, 0xCC, + 0x86, 0xC9, 0x83, 0x41, 0xCC, 0x87, 0xC9, 0x83, + 0x41, 0xCC, 0x88, 0xC9, 0x83, 0x41, 0xCC, 0x8A, + 0xC9, 0x83, 0x41, 0xCC, 0xA3, 0xB5, 0x83, 0x43, + 0xCC, 0xA7, 0xA5, 0x83, 0x45, 0xCC, 0x82, 0xC9, + 0x83, 0x45, 0xCC, 0x84, 0xC9, 0x83, 0x45, 0xCC, + 0xA3, 0xB5, 0x83, 0x45, 0xCC, 0xA7, 0xA5, 0x83, + // Bytes 46c0 - 46ff + 0x49, 0xCC, 0x88, 0xC9, 0x83, 0x4C, 0xCC, 0xA3, + 0xB5, 0x83, 0x4F, 0xCC, 0x82, 0xC9, 0x83, 0x4F, + 0xCC, 0x83, 0xC9, 0x83, 0x4F, 0xCC, 0x84, 0xC9, + 0x83, 0x4F, 0xCC, 0x87, 0xC9, 0x83, 0x4F, 0xCC, + 0x88, 0xC9, 0x83, 0x4F, 0xCC, 0x9B, 0xAD, 0x83, + 0x4F, 0xCC, 0xA3, 0xB5, 0x83, 0x4F, 0xCC, 0xA8, + 0xA5, 0x83, 0x52, 0xCC, 0xA3, 0xB5, 0x83, 0x53, + 0xCC, 0x81, 0xC9, 0x83, 0x53, 0xCC, 0x8C, 0xC9, + // Bytes 4700 - 473f + 0x83, 0x53, 0xCC, 0xA3, 0xB5, 0x83, 0x55, 0xCC, + 0x83, 0xC9, 0x83, 0x55, 0xCC, 0x84, 0xC9, 0x83, + 0x55, 0xCC, 0x88, 0xC9, 0x83, 0x55, 0xCC, 0x9B, + 0xAD, 0x83, 0x61, 0xCC, 0x82, 0xC9, 0x83, 0x61, + 0xCC, 0x86, 0xC9, 0x83, 0x61, 0xCC, 0x87, 0xC9, + 0x83, 0x61, 0xCC, 0x88, 0xC9, 0x83, 0x61, 0xCC, + 0x8A, 0xC9, 0x83, 0x61, 0xCC, 0xA3, 0xB5, 0x83, + 0x63, 0xCC, 0xA7, 0xA5, 0x83, 0x65, 0xCC, 0x82, + // Bytes 4740 - 477f + 0xC9, 0x83, 0x65, 0xCC, 0x84, 0xC9, 0x83, 0x65, + 0xCC, 0xA3, 0xB5, 0x83, 0x65, 0xCC, 0xA7, 0xA5, + 0x83, 0x69, 0xCC, 0x88, 0xC9, 0x83, 0x6C, 0xCC, + 0xA3, 0xB5, 0x83, 0x6F, 0xCC, 0x82, 0xC9, 0x83, + 0x6F, 0xCC, 0x83, 0xC9, 0x83, 0x6F, 0xCC, 0x84, + 0xC9, 0x83, 0x6F, 0xCC, 0x87, 0xC9, 0x83, 0x6F, + 0xCC, 0x88, 0xC9, 0x83, 0x6F, 0xCC, 0x9B, 0xAD, + 0x83, 0x6F, 0xCC, 0xA3, 0xB5, 0x83, 0x6F, 0xCC, + // Bytes 4780 - 47bf + 0xA8, 0xA5, 0x83, 0x72, 0xCC, 0xA3, 0xB5, 0x83, + 0x73, 0xCC, 0x81, 0xC9, 0x83, 0x73, 0xCC, 0x8C, + 0xC9, 0x83, 0x73, 0xCC, 0xA3, 0xB5, 0x83, 0x75, + 0xCC, 0x83, 0xC9, 0x83, 0x75, 0xCC, 0x84, 0xC9, + 0x83, 0x75, 0xCC, 0x88, 0xC9, 0x83, 0x75, 0xCC, + 0x9B, 0xAD, 0x84, 0xCE, 0x91, 0xCC, 0x93, 0xC9, + 0x84, 0xCE, 0x91, 0xCC, 0x94, 0xC9, 0x84, 0xCE, + 0x95, 0xCC, 0x93, 0xC9, 0x84, 0xCE, 0x95, 0xCC, + // Bytes 47c0 - 47ff + 0x94, 0xC9, 0x84, 0xCE, 0x97, 0xCC, 0x93, 0xC9, + 0x84, 0xCE, 0x97, 0xCC, 0x94, 0xC9, 0x84, 0xCE, + 0x99, 0xCC, 0x93, 0xC9, 0x84, 0xCE, 0x99, 0xCC, + 0x94, 0xC9, 0x84, 0xCE, 0x9F, 0xCC, 0x93, 0xC9, + 0x84, 0xCE, 0x9F, 0xCC, 0x94, 0xC9, 0x84, 0xCE, + 0xA5, 0xCC, 0x94, 0xC9, 0x84, 0xCE, 0xA9, 0xCC, + 0x93, 0xC9, 0x84, 0xCE, 0xA9, 0xCC, 0x94, 0xC9, + 0x84, 0xCE, 0xB1, 0xCC, 0x80, 0xC9, 0x84, 0xCE, + // Bytes 4800 - 483f + 0xB1, 0xCC, 0x81, 0xC9, 0x84, 0xCE, 0xB1, 0xCC, + 0x93, 0xC9, 0x84, 0xCE, 0xB1, 0xCC, 0x94, 0xC9, + 0x84, 0xCE, 0xB1, 0xCD, 0x82, 0xC9, 0x84, 0xCE, + 0xB5, 0xCC, 0x93, 0xC9, 0x84, 0xCE, 0xB5, 0xCC, + 0x94, 0xC9, 0x84, 0xCE, 0xB7, 0xCC, 0x80, 0xC9, + 0x84, 0xCE, 0xB7, 0xCC, 0x81, 0xC9, 0x84, 0xCE, + 0xB7, 0xCC, 0x93, 0xC9, 0x84, 0xCE, 0xB7, 0xCC, + 0x94, 0xC9, 0x84, 0xCE, 0xB7, 0xCD, 0x82, 0xC9, + // Bytes 4840 - 487f + 0x84, 0xCE, 0xB9, 0xCC, 0x88, 0xC9, 0x84, 0xCE, + 0xB9, 0xCC, 0x93, 0xC9, 0x84, 0xCE, 0xB9, 0xCC, + 0x94, 0xC9, 0x84, 0xCE, 0xBF, 0xCC, 0x93, 0xC9, + 0x84, 0xCE, 0xBF, 0xCC, 0x94, 0xC9, 0x84, 0xCF, + 0x85, 0xCC, 0x88, 0xC9, 0x84, 0xCF, 0x85, 0xCC, + 0x93, 0xC9, 0x84, 0xCF, 0x85, 0xCC, 0x94, 0xC9, + 0x84, 0xCF, 0x89, 0xCC, 0x80, 0xC9, 0x84, 0xCF, + 0x89, 0xCC, 0x81, 0xC9, 0x84, 0xCF, 0x89, 0xCC, + // Bytes 4880 - 48bf + 0x93, 0xC9, 0x84, 0xCF, 0x89, 0xCC, 0x94, 0xC9, + 0x84, 0xCF, 0x89, 0xCD, 0x82, 0xC9, 0x86, 0xCE, + 0x91, 0xCC, 0x93, 0xCC, 0x80, 0xCA, 0x86, 0xCE, + 0x91, 0xCC, 0x93, 0xCC, 0x81, 0xCA, 0x86, 0xCE, + 0x91, 0xCC, 0x93, 0xCD, 0x82, 0xCA, 0x86, 0xCE, + 0x91, 0xCC, 0x94, 0xCC, 0x80, 0xCA, 0x86, 0xCE, + 0x91, 0xCC, 0x94, 0xCC, 0x81, 0xCA, 0x86, 0xCE, + 0x91, 0xCC, 0x94, 0xCD, 0x82, 0xCA, 0x86, 0xCE, + // Bytes 48c0 - 48ff + 0x97, 0xCC, 0x93, 0xCC, 0x80, 0xCA, 0x86, 0xCE, + 0x97, 0xCC, 0x93, 0xCC, 0x81, 0xCA, 0x86, 0xCE, + 0x97, 0xCC, 0x93, 0xCD, 0x82, 0xCA, 0x86, 0xCE, + 0x97, 0xCC, 0x94, 0xCC, 0x80, 0xCA, 0x86, 0xCE, + 0x97, 0xCC, 0x94, 0xCC, 0x81, 0xCA, 0x86, 0xCE, + 0x97, 0xCC, 0x94, 0xCD, 0x82, 0xCA, 0x86, 0xCE, + 0xA9, 0xCC, 0x93, 0xCC, 0x80, 0xCA, 0x86, 0xCE, + 0xA9, 0xCC, 0x93, 0xCC, 0x81, 0xCA, 0x86, 0xCE, + // Bytes 4900 - 493f + 0xA9, 0xCC, 0x93, 0xCD, 0x82, 0xCA, 0x86, 0xCE, + 0xA9, 0xCC, 0x94, 0xCC, 0x80, 0xCA, 0x86, 0xCE, + 0xA9, 0xCC, 0x94, 0xCC, 0x81, 0xCA, 0x86, 0xCE, + 0xA9, 0xCC, 0x94, 0xCD, 0x82, 0xCA, 0x86, 0xCE, + 0xB1, 0xCC, 0x93, 0xCC, 0x80, 0xCA, 0x86, 0xCE, + 0xB1, 0xCC, 0x93, 0xCC, 0x81, 0xCA, 0x86, 0xCE, + 0xB1, 0xCC, 0x93, 0xCD, 0x82, 0xCA, 0x86, 0xCE, + 0xB1, 0xCC, 0x94, 0xCC, 0x80, 0xCA, 0x86, 0xCE, + // Bytes 4940 - 497f + 0xB1, 0xCC, 0x94, 0xCC, 0x81, 0xCA, 0x86, 0xCE, + 0xB1, 0xCC, 0x94, 0xCD, 0x82, 0xCA, 0x86, 0xCE, + 0xB7, 0xCC, 0x93, 0xCC, 0x80, 0xCA, 0x86, 0xCE, + 0xB7, 0xCC, 0x93, 0xCC, 0x81, 0xCA, 0x86, 0xCE, + 0xB7, 0xCC, 0x93, 0xCD, 0x82, 0xCA, 0x86, 0xCE, + 0xB7, 0xCC, 0x94, 0xCC, 0x80, 0xCA, 0x86, 0xCE, + 0xB7, 0xCC, 0x94, 0xCC, 0x81, 0xCA, 0x86, 0xCE, + 0xB7, 0xCC, 0x94, 0xCD, 0x82, 0xCA, 0x86, 0xCF, + // Bytes 4980 - 49bf + 0x89, 0xCC, 0x93, 0xCC, 0x80, 0xCA, 0x86, 0xCF, + 0x89, 0xCC, 0x93, 0xCC, 0x81, 0xCA, 0x86, 0xCF, + 0x89, 0xCC, 0x93, 0xCD, 0x82, 0xCA, 0x86, 0xCF, + 0x89, 0xCC, 0x94, 0xCC, 0x80, 0xCA, 0x86, 0xCF, + 0x89, 0xCC, 0x94, 0xCC, 0x81, 0xCA, 0x86, 0xCF, + 0x89, 0xCC, 0x94, 0xCD, 0x82, 0xCA, 0x42, 0xCC, + 0x80, 0xC9, 0x32, 0x42, 0xCC, 0x81, 0xC9, 0x32, + 0x42, 0xCC, 0x93, 0xC9, 0x32, 0x43, 0xE1, 0x85, + // Bytes 49c0 - 49ff + 0xA1, 0x01, 0x00, 0x43, 0xE1, 0x85, 0xA2, 0x01, + 0x00, 0x43, 0xE1, 0x85, 0xA3, 0x01, 0x00, 0x43, + 0xE1, 0x85, 0xA4, 0x01, 0x00, 0x43, 0xE1, 0x85, + 0xA5, 0x01, 0x00, 0x43, 0xE1, 0x85, 0xA6, 0x01, + 0x00, 0x43, 0xE1, 0x85, 0xA7, 0x01, 0x00, 0x43, + 0xE1, 0x85, 0xA8, 0x01, 0x00, 0x43, 0xE1, 0x85, + 0xA9, 0x01, 0x00, 0x43, 0xE1, 0x85, 0xAA, 0x01, + 0x00, 0x43, 0xE1, 0x85, 0xAB, 0x01, 0x00, 0x43, + // Bytes 4a00 - 4a3f + 0xE1, 0x85, 0xAC, 0x01, 0x00, 0x43, 0xE1, 0x85, + 0xAD, 0x01, 0x00, 0x43, 0xE1, 0x85, 0xAE, 0x01, + 0x00, 0x43, 0xE1, 0x85, 0xAF, 0x01, 0x00, 0x43, + 0xE1, 0x85, 0xB0, 0x01, 0x00, 0x43, 0xE1, 0x85, + 0xB1, 0x01, 0x00, 0x43, 0xE1, 0x85, 0xB2, 0x01, + 0x00, 0x43, 0xE1, 0x85, 0xB3, 0x01, 0x00, 0x43, + 0xE1, 0x85, 0xB4, 0x01, 0x00, 0x43, 0xE1, 0x85, + 0xB5, 0x01, 0x00, 0x43, 0xE1, 0x86, 0xAA, 0x01, + // Bytes 4a40 - 4a7f + 0x00, 0x43, 0xE1, 0x86, 0xAC, 0x01, 0x00, 0x43, + 0xE1, 0x86, 0xAD, 0x01, 0x00, 0x43, 0xE1, 0x86, + 0xB0, 0x01, 0x00, 0x43, 0xE1, 0x86, 0xB1, 0x01, + 0x00, 0x43, 0xE1, 0x86, 0xB2, 0x01, 0x00, 0x43, + 0xE1, 0x86, 0xB3, 0x01, 0x00, 0x43, 0xE1, 0x86, + 0xB4, 0x01, 0x00, 0x43, 0xE1, 0x86, 0xB5, 0x01, + 0x00, 0x44, 0xCC, 0x88, 0xCC, 0x81, 0xCA, 0x32, + 0x43, 0xE3, 0x82, 0x99, 0x0D, 0x03, 0x43, 0xE3, + // Bytes 4a80 - 4abf + 0x82, 0x9A, 0x0D, 0x03, 0x46, 0xE0, 0xBD, 0xB1, + 0xE0, 0xBD, 0xB2, 0x9E, 0x26, 0x46, 0xE0, 0xBD, + 0xB1, 0xE0, 0xBD, 0xB4, 0xA2, 0x26, 0x46, 0xE0, + 0xBD, 0xB1, 0xE0, 0xBE, 0x80, 0x9E, 0x26, 0x00, + 0x01, +} + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *nfcTrie) lookup(s []byte) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return nfcValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := nfcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := nfcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := nfcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = nfcIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *nfcTrie) lookupUnsafe(s []byte) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return nfcValues[c0] + } + i := nfcIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = nfcIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = nfcIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *nfcTrie) lookupString(s string) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return nfcValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := nfcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := nfcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := nfcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = nfcIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *nfcTrie) lookupStringUnsafe(s string) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return nfcValues[c0] + } + i := nfcIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = nfcIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = nfcIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// nfcTrie. Total size: 10586 bytes (10.34 KiB). Checksum: dd926e82067bee11. +type nfcTrie struct{} + +func newNfcTrie(i int) *nfcTrie { + return &nfcTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *nfcTrie) lookupValue(n uint32, b byte) uint16 { + switch { + case n < 46: + return uint16(nfcValues[n<<6+uint32(b)]) + default: + n -= 46 + return uint16(nfcSparse.lookup(n, b)) + } +} + +// nfcValues: 48 blocks, 3072 entries, 6144 bytes +// The third block is the zero block. +var nfcValues = [3072]uint16{ + // Block 0x0, offset 0x0 + 0x3c: 0xa000, 0x3d: 0xa000, 0x3e: 0xa000, + // Block 0x1, offset 0x40 + 0x41: 0xa000, 0x42: 0xa000, 0x43: 0xa000, 0x44: 0xa000, 0x45: 0xa000, + 0x46: 0xa000, 0x47: 0xa000, 0x48: 0xa000, 0x49: 0xa000, 0x4a: 0xa000, 0x4b: 0xa000, + 0x4c: 0xa000, 0x4d: 0xa000, 0x4e: 0xa000, 0x4f: 0xa000, 0x50: 0xa000, + 0x52: 0xa000, 0x53: 0xa000, 0x54: 0xa000, 0x55: 0xa000, 0x56: 0xa000, 0x57: 0xa000, + 0x58: 0xa000, 0x59: 0xa000, 0x5a: 0xa000, + 0x61: 0xa000, 0x62: 0xa000, 0x63: 0xa000, + 0x64: 0xa000, 0x65: 0xa000, 0x66: 0xa000, 0x67: 0xa000, 0x68: 0xa000, 0x69: 0xa000, + 0x6a: 0xa000, 0x6b: 0xa000, 0x6c: 0xa000, 0x6d: 0xa000, 0x6e: 0xa000, 0x6f: 0xa000, + 0x70: 0xa000, 0x72: 0xa000, 0x73: 0xa000, 0x74: 0xa000, 0x75: 0xa000, + 0x76: 0xa000, 0x77: 0xa000, 0x78: 0xa000, 0x79: 0xa000, 0x7a: 0xa000, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc0: 0x2f6f, 0xc1: 0x2f74, 0xc2: 0x4688, 0xc3: 0x2f79, 0xc4: 0x4697, 0xc5: 0x469c, + 0xc6: 0xa000, 0xc7: 0x46a6, 0xc8: 0x2fe2, 0xc9: 0x2fe7, 0xca: 0x46ab, 0xcb: 0x2ffb, + 0xcc: 0x306e, 0xcd: 0x3073, 0xce: 0x3078, 0xcf: 0x46bf, 0xd1: 0x3104, + 0xd2: 0x3127, 0xd3: 0x312c, 0xd4: 0x46c9, 0xd5: 0x46ce, 0xd6: 0x46dd, + 0xd8: 0xa000, 0xd9: 0x31b3, 0xda: 0x31b8, 0xdb: 0x31bd, 0xdc: 0x470f, 0xdd: 0x3235, + 0xe0: 0x327b, 0xe1: 0x3280, 0xe2: 0x4719, 0xe3: 0x3285, + 0xe4: 0x4728, 0xe5: 0x472d, 0xe6: 0xa000, 0xe7: 0x4737, 0xe8: 0x32ee, 0xe9: 0x32f3, + 0xea: 0x473c, 0xeb: 0x3307, 0xec: 0x337f, 0xed: 0x3384, 0xee: 0x3389, 0xef: 0x4750, + 0xf1: 0x3415, 0xf2: 0x3438, 0xf3: 0x343d, 0xf4: 0x475a, 0xf5: 0x475f, + 0xf6: 0x476e, 0xf8: 0xa000, 0xf9: 0x34c9, 0xfa: 0x34ce, 0xfb: 0x34d3, + 0xfc: 0x47a0, 0xfd: 0x3550, 0xff: 0x3569, + // Block 0x4, offset 0x100 + 0x100: 0x2f7e, 0x101: 0x328a, 0x102: 0x468d, 0x103: 0x471e, 0x104: 0x2f9c, 0x105: 0x32a8, + 0x106: 0x2fb0, 0x107: 0x32bc, 0x108: 0x2fb5, 0x109: 0x32c1, 0x10a: 0x2fba, 0x10b: 0x32c6, + 0x10c: 0x2fbf, 0x10d: 0x32cb, 0x10e: 0x2fc9, 0x10f: 0x32d5, + 0x112: 0x46b0, 0x113: 0x4741, 0x114: 0x2ff1, 0x115: 0x32fd, 0x116: 0x2ff6, 0x117: 0x3302, + 0x118: 0x3014, 0x119: 0x3320, 0x11a: 0x3005, 0x11b: 0x3311, 0x11c: 0x302d, 0x11d: 0x3339, + 0x11e: 0x3037, 0x11f: 0x3343, 0x120: 0x303c, 0x121: 0x3348, 0x122: 0x3046, 0x123: 0x3352, + 0x124: 0x304b, 0x125: 0x3357, 0x128: 0x307d, 0x129: 0x338e, + 0x12a: 0x3082, 0x12b: 0x3393, 0x12c: 0x3087, 0x12d: 0x3398, 0x12e: 0x30aa, 0x12f: 0x33b6, + 0x130: 0x308c, 0x134: 0x30b4, 0x135: 0x33c0, + 0x136: 0x30c8, 0x137: 0x33d9, 0x139: 0x30d2, 0x13a: 0x33e3, 0x13b: 0x30dc, + 0x13c: 0x33ed, 0x13d: 0x30d7, 0x13e: 0x33e8, + // Block 0x5, offset 0x140 + 0x143: 0x30ff, 0x144: 0x3410, 0x145: 0x3118, + 0x146: 0x3429, 0x147: 0x310e, 0x148: 0x341f, + 0x14c: 0x46d3, 0x14d: 0x4764, 0x14e: 0x3131, 0x14f: 0x3442, 0x150: 0x313b, 0x151: 0x344c, + 0x154: 0x3159, 0x155: 0x346a, 0x156: 0x3172, 0x157: 0x3483, + 0x158: 0x3163, 0x159: 0x3474, 0x15a: 0x46f6, 0x15b: 0x4787, 0x15c: 0x317c, 0x15d: 0x348d, + 0x15e: 0x318b, 0x15f: 0x349c, 0x160: 0x46fb, 0x161: 0x478c, 0x162: 0x31a4, 0x163: 0x34ba, + 0x164: 0x3195, 0x165: 0x34ab, 0x168: 0x4705, 0x169: 0x4796, + 0x16a: 0x470a, 0x16b: 0x479b, 0x16c: 0x31c2, 0x16d: 0x34d8, 0x16e: 0x31cc, 0x16f: 0x34e2, + 0x170: 0x31d1, 0x171: 0x34e7, 0x172: 0x31ef, 0x173: 0x3505, 0x174: 0x3212, 0x175: 0x3528, + 0x176: 0x323a, 0x177: 0x3555, 0x178: 0x324e, 0x179: 0x325d, 0x17a: 0x357d, 0x17b: 0x3267, + 0x17c: 0x3587, 0x17d: 0x326c, 0x17e: 0x358c, 0x17f: 0xa000, + // Block 0x6, offset 0x180 + 0x184: 0x8100, 0x185: 0x8100, + 0x186: 0x8100, + 0x18d: 0x2f88, 0x18e: 0x3294, 0x18f: 0x3096, 0x190: 0x33a2, 0x191: 0x3140, + 0x192: 0x3451, 0x193: 0x31d6, 0x194: 0x34ec, 0x195: 0x39cf, 0x196: 0x3b5e, 0x197: 0x39c8, + 0x198: 0x3b57, 0x199: 0x39d6, 0x19a: 0x3b65, 0x19b: 0x39c1, 0x19c: 0x3b50, + 0x19e: 0x38b0, 0x19f: 0x3a3f, 0x1a0: 0x38a9, 0x1a1: 0x3a38, 0x1a2: 0x35b3, 0x1a3: 0x35c5, + 0x1a6: 0x3041, 0x1a7: 0x334d, 0x1a8: 0x30be, 0x1a9: 0x33cf, + 0x1aa: 0x46ec, 0x1ab: 0x477d, 0x1ac: 0x3990, 0x1ad: 0x3b1f, 0x1ae: 0x35d7, 0x1af: 0x35dd, + 0x1b0: 0x33c5, 0x1b4: 0x3028, 0x1b5: 0x3334, + 0x1b8: 0x30fa, 0x1b9: 0x340b, 0x1ba: 0x38b7, 0x1bb: 0x3a46, + 0x1bc: 0x35ad, 0x1bd: 0x35bf, 0x1be: 0x35b9, 0x1bf: 0x35cb, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x2f8d, 0x1c1: 0x3299, 0x1c2: 0x2f92, 0x1c3: 0x329e, 0x1c4: 0x300a, 0x1c5: 0x3316, + 0x1c6: 0x300f, 0x1c7: 0x331b, 0x1c8: 0x309b, 0x1c9: 0x33a7, 0x1ca: 0x30a0, 0x1cb: 0x33ac, + 0x1cc: 0x3145, 0x1cd: 0x3456, 0x1ce: 0x314a, 0x1cf: 0x345b, 0x1d0: 0x3168, 0x1d1: 0x3479, + 0x1d2: 0x316d, 0x1d3: 0x347e, 0x1d4: 0x31db, 0x1d5: 0x34f1, 0x1d6: 0x31e0, 0x1d7: 0x34f6, + 0x1d8: 0x3186, 0x1d9: 0x3497, 0x1da: 0x319f, 0x1db: 0x34b5, + 0x1de: 0x305a, 0x1df: 0x3366, + 0x1e6: 0x4692, 0x1e7: 0x4723, 0x1e8: 0x46ba, 0x1e9: 0x474b, + 0x1ea: 0x395f, 0x1eb: 0x3aee, 0x1ec: 0x393c, 0x1ed: 0x3acb, 0x1ee: 0x46d8, 0x1ef: 0x4769, + 0x1f0: 0x3958, 0x1f1: 0x3ae7, 0x1f2: 0x3244, 0x1f3: 0x355f, + // Block 0x8, offset 0x200 + 0x200: 0x9932, 0x201: 0x9932, 0x202: 0x9932, 0x203: 0x9932, 0x204: 0x9932, 0x205: 0x8132, + 0x206: 0x9932, 0x207: 0x9932, 0x208: 0x9932, 0x209: 0x9932, 0x20a: 0x9932, 0x20b: 0x9932, + 0x20c: 0x9932, 0x20d: 0x8132, 0x20e: 0x8132, 0x20f: 0x9932, 0x210: 0x8132, 0x211: 0x9932, + 0x212: 0x8132, 0x213: 0x9932, 0x214: 0x9932, 0x215: 0x8133, 0x216: 0x812d, 0x217: 0x812d, + 0x218: 0x812d, 0x219: 0x812d, 0x21a: 0x8133, 0x21b: 0x992b, 0x21c: 0x812d, 0x21d: 0x812d, + 0x21e: 0x812d, 0x21f: 0x812d, 0x220: 0x812d, 0x221: 0x8129, 0x222: 0x8129, 0x223: 0x992d, + 0x224: 0x992d, 0x225: 0x992d, 0x226: 0x992d, 0x227: 0x9929, 0x228: 0x9929, 0x229: 0x812d, + 0x22a: 0x812d, 0x22b: 0x812d, 0x22c: 0x812d, 0x22d: 0x992d, 0x22e: 0x992d, 0x22f: 0x812d, + 0x230: 0x992d, 0x231: 0x992d, 0x232: 0x812d, 0x233: 0x812d, 0x234: 0x8101, 0x235: 0x8101, + 0x236: 0x8101, 0x237: 0x8101, 0x238: 0x9901, 0x239: 0x812d, 0x23a: 0x812d, 0x23b: 0x812d, + 0x23c: 0x812d, 0x23d: 0x8132, 0x23e: 0x8132, 0x23f: 0x8132, + // Block 0x9, offset 0x240 + 0x240: 0x49ae, 0x241: 0x49b3, 0x242: 0x9932, 0x243: 0x49b8, 0x244: 0x4a71, 0x245: 0x9936, + 0x246: 0x8132, 0x247: 0x812d, 0x248: 0x812d, 0x249: 0x812d, 0x24a: 0x8132, 0x24b: 0x8132, + 0x24c: 0x8132, 0x24d: 0x812d, 0x24e: 0x812d, 0x250: 0x8132, 0x251: 0x8132, + 0x252: 0x8132, 0x253: 0x812d, 0x254: 0x812d, 0x255: 0x812d, 0x256: 0x812d, 0x257: 0x8132, + 0x258: 0x8133, 0x259: 0x812d, 0x25a: 0x812d, 0x25b: 0x8132, 0x25c: 0x8134, 0x25d: 0x8135, + 0x25e: 0x8135, 0x25f: 0x8134, 0x260: 0x8135, 0x261: 0x8135, 0x262: 0x8134, 0x263: 0x8132, + 0x264: 0x8132, 0x265: 0x8132, 0x266: 0x8132, 0x267: 0x8132, 0x268: 0x8132, 0x269: 0x8132, + 0x26a: 0x8132, 0x26b: 0x8132, 0x26c: 0x8132, 0x26d: 0x8132, 0x26e: 0x8132, 0x26f: 0x8132, + 0x274: 0x0170, + 0x27a: 0x8100, + 0x27e: 0x0037, + // Block 0xa, offset 0x280 + 0x284: 0x8100, 0x285: 0x35a1, + 0x286: 0x35e9, 0x287: 0x00ce, 0x288: 0x3607, 0x289: 0x3613, 0x28a: 0x3625, + 0x28c: 0x3643, 0x28e: 0x3655, 0x28f: 0x3673, 0x290: 0x3e08, 0x291: 0xa000, + 0x295: 0xa000, 0x297: 0xa000, + 0x299: 0xa000, + 0x29f: 0xa000, 0x2a1: 0xa000, + 0x2a5: 0xa000, 0x2a9: 0xa000, + 0x2aa: 0x3637, 0x2ab: 0x3667, 0x2ac: 0x47fe, 0x2ad: 0x3697, 0x2ae: 0x4828, 0x2af: 0x36a9, + 0x2b0: 0x3e70, 0x2b1: 0xa000, 0x2b5: 0xa000, + 0x2b7: 0xa000, 0x2b9: 0xa000, + 0x2bf: 0xa000, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x3721, 0x2c1: 0x372d, 0x2c3: 0x371b, + 0x2c6: 0xa000, 0x2c7: 0x3709, + 0x2cc: 0x375d, 0x2cd: 0x3745, 0x2ce: 0x376f, 0x2d0: 0xa000, + 0x2d3: 0xa000, 0x2d5: 0xa000, 0x2d6: 0xa000, 0x2d7: 0xa000, + 0x2d8: 0xa000, 0x2d9: 0x3751, 0x2da: 0xa000, + 0x2de: 0xa000, 0x2e3: 0xa000, + 0x2e7: 0xa000, + 0x2eb: 0xa000, 0x2ed: 0xa000, + 0x2f0: 0xa000, 0x2f3: 0xa000, 0x2f5: 0xa000, + 0x2f6: 0xa000, 0x2f7: 0xa000, 0x2f8: 0xa000, 0x2f9: 0x37d5, 0x2fa: 0xa000, + 0x2fe: 0xa000, + // Block 0xc, offset 0x300 + 0x301: 0x3733, 0x302: 0x37b7, + 0x310: 0x370f, 0x311: 0x3793, + 0x312: 0x3715, 0x313: 0x3799, 0x316: 0x3727, 0x317: 0x37ab, + 0x318: 0xa000, 0x319: 0xa000, 0x31a: 0x3829, 0x31b: 0x382f, 0x31c: 0x3739, 0x31d: 0x37bd, + 0x31e: 0x373f, 0x31f: 0x37c3, 0x322: 0x374b, 0x323: 0x37cf, + 0x324: 0x3757, 0x325: 0x37db, 0x326: 0x3763, 0x327: 0x37e7, 0x328: 0xa000, 0x329: 0xa000, + 0x32a: 0x3835, 0x32b: 0x383b, 0x32c: 0x378d, 0x32d: 0x3811, 0x32e: 0x3769, 0x32f: 0x37ed, + 0x330: 0x3775, 0x331: 0x37f9, 0x332: 0x377b, 0x333: 0x37ff, 0x334: 0x3781, 0x335: 0x3805, + 0x338: 0x3787, 0x339: 0x380b, + // Block 0xd, offset 0x340 + 0x351: 0x812d, + 0x352: 0x8132, 0x353: 0x8132, 0x354: 0x8132, 0x355: 0x8132, 0x356: 0x812d, 0x357: 0x8132, + 0x358: 0x8132, 0x359: 0x8132, 0x35a: 0x812e, 0x35b: 0x812d, 0x35c: 0x8132, 0x35d: 0x8132, + 0x35e: 0x8132, 0x35f: 0x8132, 0x360: 0x8132, 0x361: 0x8132, 0x362: 0x812d, 0x363: 0x812d, + 0x364: 0x812d, 0x365: 0x812d, 0x366: 0x812d, 0x367: 0x812d, 0x368: 0x8132, 0x369: 0x8132, + 0x36a: 0x812d, 0x36b: 0x8132, 0x36c: 0x8132, 0x36d: 0x812e, 0x36e: 0x8131, 0x36f: 0x8132, + 0x370: 0x8105, 0x371: 0x8106, 0x372: 0x8107, 0x373: 0x8108, 0x374: 0x8109, 0x375: 0x810a, + 0x376: 0x810b, 0x377: 0x810c, 0x378: 0x810d, 0x379: 0x810e, 0x37a: 0x810e, 0x37b: 0x810f, + 0x37c: 0x8110, 0x37d: 0x8111, 0x37f: 0x8112, + // Block 0xe, offset 0x380 + 0x388: 0xa000, 0x38a: 0xa000, 0x38b: 0x8116, + 0x38c: 0x8117, 0x38d: 0x8118, 0x38e: 0x8119, 0x38f: 0x811a, 0x390: 0x811b, 0x391: 0x811c, + 0x392: 0x811d, 0x393: 0x9932, 0x394: 0x9932, 0x395: 0x992d, 0x396: 0x812d, 0x397: 0x8132, + 0x398: 0x8132, 0x399: 0x8132, 0x39a: 0x8132, 0x39b: 0x8132, 0x39c: 0x812d, 0x39d: 0x8132, + 0x39e: 0x8132, 0x39f: 0x812d, + 0x3b0: 0x811e, + // Block 0xf, offset 0x3c0 + 0x3d3: 0x812d, 0x3d4: 0x8132, 0x3d5: 0x8132, 0x3d6: 0x8132, 0x3d7: 0x8132, + 0x3d8: 0x8132, 0x3d9: 0x8132, 0x3da: 0x8132, 0x3db: 0x8132, 0x3dc: 0x8132, 0x3dd: 0x8132, + 0x3de: 0x8132, 0x3df: 0x8132, 0x3e0: 0x8132, 0x3e1: 0x8132, 0x3e3: 0x812d, + 0x3e4: 0x8132, 0x3e5: 0x8132, 0x3e6: 0x812d, 0x3e7: 0x8132, 0x3e8: 0x8132, 0x3e9: 0x812d, + 0x3ea: 0x8132, 0x3eb: 0x8132, 0x3ec: 0x8132, 0x3ed: 0x812d, 0x3ee: 0x812d, 0x3ef: 0x812d, + 0x3f0: 0x8116, 0x3f1: 0x8117, 0x3f2: 0x8118, 0x3f3: 0x8132, 0x3f4: 0x8132, 0x3f5: 0x8132, + 0x3f6: 0x812d, 0x3f7: 0x8132, 0x3f8: 0x8132, 0x3f9: 0x812d, 0x3fa: 0x812d, 0x3fb: 0x8132, + 0x3fc: 0x8132, 0x3fd: 0x8132, 0x3fe: 0x8132, 0x3ff: 0x8132, + // Block 0x10, offset 0x400 + 0x405: 0xa000, + 0x406: 0x2d26, 0x407: 0xa000, 0x408: 0x2d2e, 0x409: 0xa000, 0x40a: 0x2d36, 0x40b: 0xa000, + 0x40c: 0x2d3e, 0x40d: 0xa000, 0x40e: 0x2d46, 0x411: 0xa000, + 0x412: 0x2d4e, + 0x434: 0x8102, 0x435: 0x9900, + 0x43a: 0xa000, 0x43b: 0x2d56, + 0x43c: 0xa000, 0x43d: 0x2d5e, 0x43e: 0xa000, 0x43f: 0xa000, + // Block 0x11, offset 0x440 + 0x440: 0x8132, 0x441: 0x8132, 0x442: 0x812d, 0x443: 0x8132, 0x444: 0x8132, 0x445: 0x8132, + 0x446: 0x8132, 0x447: 0x8132, 0x448: 0x8132, 0x449: 0x8132, 0x44a: 0x812d, 0x44b: 0x8132, + 0x44c: 0x8132, 0x44d: 0x8135, 0x44e: 0x812a, 0x44f: 0x812d, 0x450: 0x8129, 0x451: 0x8132, + 0x452: 0x8132, 0x453: 0x8132, 0x454: 0x8132, 0x455: 0x8132, 0x456: 0x8132, 0x457: 0x8132, + 0x458: 0x8132, 0x459: 0x8132, 0x45a: 0x8132, 0x45b: 0x8132, 0x45c: 0x8132, 0x45d: 0x8132, + 0x45e: 0x8132, 0x45f: 0x8132, 0x460: 0x8132, 0x461: 0x8132, 0x462: 0x8132, 0x463: 0x8132, + 0x464: 0x8132, 0x465: 0x8132, 0x466: 0x8132, 0x467: 0x8132, 0x468: 0x8132, 0x469: 0x8132, + 0x46a: 0x8132, 0x46b: 0x8132, 0x46c: 0x8132, 0x46d: 0x8132, 0x46e: 0x8132, 0x46f: 0x8132, + 0x470: 0x8132, 0x471: 0x8132, 0x472: 0x8132, 0x473: 0x8132, 0x474: 0x8132, 0x475: 0x8132, + 0x476: 0x8133, 0x477: 0x8131, 0x478: 0x8131, 0x479: 0x812d, 0x47b: 0x8132, + 0x47c: 0x8134, 0x47d: 0x812d, 0x47e: 0x8132, 0x47f: 0x812d, + // Block 0x12, offset 0x480 + 0x480: 0x2f97, 0x481: 0x32a3, 0x482: 0x2fa1, 0x483: 0x32ad, 0x484: 0x2fa6, 0x485: 0x32b2, + 0x486: 0x2fab, 0x487: 0x32b7, 0x488: 0x38cc, 0x489: 0x3a5b, 0x48a: 0x2fc4, 0x48b: 0x32d0, + 0x48c: 0x2fce, 0x48d: 0x32da, 0x48e: 0x2fdd, 0x48f: 0x32e9, 0x490: 0x2fd3, 0x491: 0x32df, + 0x492: 0x2fd8, 0x493: 0x32e4, 0x494: 0x38ef, 0x495: 0x3a7e, 0x496: 0x38f6, 0x497: 0x3a85, + 0x498: 0x3019, 0x499: 0x3325, 0x49a: 0x301e, 0x49b: 0x332a, 0x49c: 0x3904, 0x49d: 0x3a93, + 0x49e: 0x3023, 0x49f: 0x332f, 0x4a0: 0x3032, 0x4a1: 0x333e, 0x4a2: 0x3050, 0x4a3: 0x335c, + 0x4a4: 0x305f, 0x4a5: 0x336b, 0x4a6: 0x3055, 0x4a7: 0x3361, 0x4a8: 0x3064, 0x4a9: 0x3370, + 0x4aa: 0x3069, 0x4ab: 0x3375, 0x4ac: 0x30af, 0x4ad: 0x33bb, 0x4ae: 0x390b, 0x4af: 0x3a9a, + 0x4b0: 0x30b9, 0x4b1: 0x33ca, 0x4b2: 0x30c3, 0x4b3: 0x33d4, 0x4b4: 0x30cd, 0x4b5: 0x33de, + 0x4b6: 0x46c4, 0x4b7: 0x4755, 0x4b8: 0x3912, 0x4b9: 0x3aa1, 0x4ba: 0x30e6, 0x4bb: 0x33f7, + 0x4bc: 0x30e1, 0x4bd: 0x33f2, 0x4be: 0x30eb, 0x4bf: 0x33fc, + // Block 0x13, offset 0x4c0 + 0x4c0: 0x30f0, 0x4c1: 0x3401, 0x4c2: 0x30f5, 0x4c3: 0x3406, 0x4c4: 0x3109, 0x4c5: 0x341a, + 0x4c6: 0x3113, 0x4c7: 0x3424, 0x4c8: 0x3122, 0x4c9: 0x3433, 0x4ca: 0x311d, 0x4cb: 0x342e, + 0x4cc: 0x3935, 0x4cd: 0x3ac4, 0x4ce: 0x3943, 0x4cf: 0x3ad2, 0x4d0: 0x394a, 0x4d1: 0x3ad9, + 0x4d2: 0x3951, 0x4d3: 0x3ae0, 0x4d4: 0x314f, 0x4d5: 0x3460, 0x4d6: 0x3154, 0x4d7: 0x3465, + 0x4d8: 0x315e, 0x4d9: 0x346f, 0x4da: 0x46f1, 0x4db: 0x4782, 0x4dc: 0x3997, 0x4dd: 0x3b26, + 0x4de: 0x3177, 0x4df: 0x3488, 0x4e0: 0x3181, 0x4e1: 0x3492, 0x4e2: 0x4700, 0x4e3: 0x4791, + 0x4e4: 0x399e, 0x4e5: 0x3b2d, 0x4e6: 0x39a5, 0x4e7: 0x3b34, 0x4e8: 0x39ac, 0x4e9: 0x3b3b, + 0x4ea: 0x3190, 0x4eb: 0x34a1, 0x4ec: 0x319a, 0x4ed: 0x34b0, 0x4ee: 0x31ae, 0x4ef: 0x34c4, + 0x4f0: 0x31a9, 0x4f1: 0x34bf, 0x4f2: 0x31ea, 0x4f3: 0x3500, 0x4f4: 0x31f9, 0x4f5: 0x350f, + 0x4f6: 0x31f4, 0x4f7: 0x350a, 0x4f8: 0x39b3, 0x4f9: 0x3b42, 0x4fa: 0x39ba, 0x4fb: 0x3b49, + 0x4fc: 0x31fe, 0x4fd: 0x3514, 0x4fe: 0x3203, 0x4ff: 0x3519, + // Block 0x14, offset 0x500 + 0x500: 0x3208, 0x501: 0x351e, 0x502: 0x320d, 0x503: 0x3523, 0x504: 0x321c, 0x505: 0x3532, + 0x506: 0x3217, 0x507: 0x352d, 0x508: 0x3221, 0x509: 0x353c, 0x50a: 0x3226, 0x50b: 0x3541, + 0x50c: 0x322b, 0x50d: 0x3546, 0x50e: 0x3249, 0x50f: 0x3564, 0x510: 0x3262, 0x511: 0x3582, + 0x512: 0x3271, 0x513: 0x3591, 0x514: 0x3276, 0x515: 0x3596, 0x516: 0x337a, 0x517: 0x34a6, + 0x518: 0x3537, 0x519: 0x3573, 0x51b: 0x35d1, + 0x520: 0x46a1, 0x521: 0x4732, 0x522: 0x2f83, 0x523: 0x328f, + 0x524: 0x3878, 0x525: 0x3a07, 0x526: 0x3871, 0x527: 0x3a00, 0x528: 0x3886, 0x529: 0x3a15, + 0x52a: 0x387f, 0x52b: 0x3a0e, 0x52c: 0x38be, 0x52d: 0x3a4d, 0x52e: 0x3894, 0x52f: 0x3a23, + 0x530: 0x388d, 0x531: 0x3a1c, 0x532: 0x38a2, 0x533: 0x3a31, 0x534: 0x389b, 0x535: 0x3a2a, + 0x536: 0x38c5, 0x537: 0x3a54, 0x538: 0x46b5, 0x539: 0x4746, 0x53a: 0x3000, 0x53b: 0x330c, + 0x53c: 0x2fec, 0x53d: 0x32f8, 0x53e: 0x38da, 0x53f: 0x3a69, + // Block 0x15, offset 0x540 + 0x540: 0x38d3, 0x541: 0x3a62, 0x542: 0x38e8, 0x543: 0x3a77, 0x544: 0x38e1, 0x545: 0x3a70, + 0x546: 0x38fd, 0x547: 0x3a8c, 0x548: 0x3091, 0x549: 0x339d, 0x54a: 0x30a5, 0x54b: 0x33b1, + 0x54c: 0x46e7, 0x54d: 0x4778, 0x54e: 0x3136, 0x54f: 0x3447, 0x550: 0x3920, 0x551: 0x3aaf, + 0x552: 0x3919, 0x553: 0x3aa8, 0x554: 0x392e, 0x555: 0x3abd, 0x556: 0x3927, 0x557: 0x3ab6, + 0x558: 0x3989, 0x559: 0x3b18, 0x55a: 0x396d, 0x55b: 0x3afc, 0x55c: 0x3966, 0x55d: 0x3af5, + 0x55e: 0x397b, 0x55f: 0x3b0a, 0x560: 0x3974, 0x561: 0x3b03, 0x562: 0x3982, 0x563: 0x3b11, + 0x564: 0x31e5, 0x565: 0x34fb, 0x566: 0x31c7, 0x567: 0x34dd, 0x568: 0x39e4, 0x569: 0x3b73, + 0x56a: 0x39dd, 0x56b: 0x3b6c, 0x56c: 0x39f2, 0x56d: 0x3b81, 0x56e: 0x39eb, 0x56f: 0x3b7a, + 0x570: 0x39f9, 0x571: 0x3b88, 0x572: 0x3230, 0x573: 0x354b, 0x574: 0x3258, 0x575: 0x3578, + 0x576: 0x3253, 0x577: 0x356e, 0x578: 0x323f, 0x579: 0x355a, + // Block 0x16, offset 0x580 + 0x580: 0x4804, 0x581: 0x480a, 0x582: 0x491e, 0x583: 0x4936, 0x584: 0x4926, 0x585: 0x493e, + 0x586: 0x492e, 0x587: 0x4946, 0x588: 0x47aa, 0x589: 0x47b0, 0x58a: 0x488e, 0x58b: 0x48a6, + 0x58c: 0x4896, 0x58d: 0x48ae, 0x58e: 0x489e, 0x58f: 0x48b6, 0x590: 0x4816, 0x591: 0x481c, + 0x592: 0x3db8, 0x593: 0x3dc8, 0x594: 0x3dc0, 0x595: 0x3dd0, + 0x598: 0x47b6, 0x599: 0x47bc, 0x59a: 0x3ce8, 0x59b: 0x3cf8, 0x59c: 0x3cf0, 0x59d: 0x3d00, + 0x5a0: 0x482e, 0x5a1: 0x4834, 0x5a2: 0x494e, 0x5a3: 0x4966, + 0x5a4: 0x4956, 0x5a5: 0x496e, 0x5a6: 0x495e, 0x5a7: 0x4976, 0x5a8: 0x47c2, 0x5a9: 0x47c8, + 0x5aa: 0x48be, 0x5ab: 0x48d6, 0x5ac: 0x48c6, 0x5ad: 0x48de, 0x5ae: 0x48ce, 0x5af: 0x48e6, + 0x5b0: 0x4846, 0x5b1: 0x484c, 0x5b2: 0x3e18, 0x5b3: 0x3e30, 0x5b4: 0x3e20, 0x5b5: 0x3e38, + 0x5b6: 0x3e28, 0x5b7: 0x3e40, 0x5b8: 0x47ce, 0x5b9: 0x47d4, 0x5ba: 0x3d18, 0x5bb: 0x3d30, + 0x5bc: 0x3d20, 0x5bd: 0x3d38, 0x5be: 0x3d28, 0x5bf: 0x3d40, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x4852, 0x5c1: 0x4858, 0x5c2: 0x3e48, 0x5c3: 0x3e58, 0x5c4: 0x3e50, 0x5c5: 0x3e60, + 0x5c8: 0x47da, 0x5c9: 0x47e0, 0x5ca: 0x3d48, 0x5cb: 0x3d58, + 0x5cc: 0x3d50, 0x5cd: 0x3d60, 0x5d0: 0x4864, 0x5d1: 0x486a, + 0x5d2: 0x3e80, 0x5d3: 0x3e98, 0x5d4: 0x3e88, 0x5d5: 0x3ea0, 0x5d6: 0x3e90, 0x5d7: 0x3ea8, + 0x5d9: 0x47e6, 0x5db: 0x3d68, 0x5dd: 0x3d70, + 0x5df: 0x3d78, 0x5e0: 0x487c, 0x5e1: 0x4882, 0x5e2: 0x497e, 0x5e3: 0x4996, + 0x5e4: 0x4986, 0x5e5: 0x499e, 0x5e6: 0x498e, 0x5e7: 0x49a6, 0x5e8: 0x47ec, 0x5e9: 0x47f2, + 0x5ea: 0x48ee, 0x5eb: 0x4906, 0x5ec: 0x48f6, 0x5ed: 0x490e, 0x5ee: 0x48fe, 0x5ef: 0x4916, + 0x5f0: 0x47f8, 0x5f1: 0x431e, 0x5f2: 0x3691, 0x5f3: 0x4324, 0x5f4: 0x4822, 0x5f5: 0x432a, + 0x5f6: 0x36a3, 0x5f7: 0x4330, 0x5f8: 0x36c1, 0x5f9: 0x4336, 0x5fa: 0x36d9, 0x5fb: 0x433c, + 0x5fc: 0x4870, 0x5fd: 0x4342, + // Block 0x18, offset 0x600 + 0x600: 0x3da0, 0x601: 0x3da8, 0x602: 0x4184, 0x603: 0x41a2, 0x604: 0x418e, 0x605: 0x41ac, + 0x606: 0x4198, 0x607: 0x41b6, 0x608: 0x3cd8, 0x609: 0x3ce0, 0x60a: 0x40d0, 0x60b: 0x40ee, + 0x60c: 0x40da, 0x60d: 0x40f8, 0x60e: 0x40e4, 0x60f: 0x4102, 0x610: 0x3de8, 0x611: 0x3df0, + 0x612: 0x41c0, 0x613: 0x41de, 0x614: 0x41ca, 0x615: 0x41e8, 0x616: 0x41d4, 0x617: 0x41f2, + 0x618: 0x3d08, 0x619: 0x3d10, 0x61a: 0x410c, 0x61b: 0x412a, 0x61c: 0x4116, 0x61d: 0x4134, + 0x61e: 0x4120, 0x61f: 0x413e, 0x620: 0x3ec0, 0x621: 0x3ec8, 0x622: 0x41fc, 0x623: 0x421a, + 0x624: 0x4206, 0x625: 0x4224, 0x626: 0x4210, 0x627: 0x422e, 0x628: 0x3d80, 0x629: 0x3d88, + 0x62a: 0x4148, 0x62b: 0x4166, 0x62c: 0x4152, 0x62d: 0x4170, 0x62e: 0x415c, 0x62f: 0x417a, + 0x630: 0x3685, 0x631: 0x367f, 0x632: 0x3d90, 0x633: 0x368b, 0x634: 0x3d98, + 0x636: 0x4810, 0x637: 0x3db0, 0x638: 0x35f5, 0x639: 0x35ef, 0x63a: 0x35e3, 0x63b: 0x42ee, + 0x63c: 0x35fb, 0x63d: 0x8100, 0x63e: 0x01d3, 0x63f: 0xa100, + // Block 0x19, offset 0x640 + 0x640: 0x8100, 0x641: 0x35a7, 0x642: 0x3dd8, 0x643: 0x369d, 0x644: 0x3de0, + 0x646: 0x483a, 0x647: 0x3df8, 0x648: 0x3601, 0x649: 0x42f4, 0x64a: 0x360d, 0x64b: 0x42fa, + 0x64c: 0x3619, 0x64d: 0x3b8f, 0x64e: 0x3b96, 0x64f: 0x3b9d, 0x650: 0x36b5, 0x651: 0x36af, + 0x652: 0x3e00, 0x653: 0x44e4, 0x656: 0x36bb, 0x657: 0x3e10, + 0x658: 0x3631, 0x659: 0x362b, 0x65a: 0x361f, 0x65b: 0x4300, 0x65d: 0x3ba4, + 0x65e: 0x3bab, 0x65f: 0x3bb2, 0x660: 0x36eb, 0x661: 0x36e5, 0x662: 0x3e68, 0x663: 0x44ec, + 0x664: 0x36cd, 0x665: 0x36d3, 0x666: 0x36f1, 0x667: 0x3e78, 0x668: 0x3661, 0x669: 0x365b, + 0x66a: 0x364f, 0x66b: 0x430c, 0x66c: 0x3649, 0x66d: 0x359b, 0x66e: 0x42e8, 0x66f: 0x0081, + 0x672: 0x3eb0, 0x673: 0x36f7, 0x674: 0x3eb8, + 0x676: 0x4888, 0x677: 0x3ed0, 0x678: 0x363d, 0x679: 0x4306, 0x67a: 0x366d, 0x67b: 0x4318, + 0x67c: 0x3679, 0x67d: 0x4256, 0x67e: 0xa100, + // Block 0x1a, offset 0x680 + 0x681: 0x3c06, 0x683: 0xa000, 0x684: 0x3c0d, 0x685: 0xa000, + 0x687: 0x3c14, 0x688: 0xa000, 0x689: 0x3c1b, + 0x68d: 0xa000, + 0x6a0: 0x2f65, 0x6a1: 0xa000, 0x6a2: 0x3c29, + 0x6a4: 0xa000, 0x6a5: 0xa000, + 0x6ad: 0x3c22, 0x6ae: 0x2f60, 0x6af: 0x2f6a, + 0x6b0: 0x3c30, 0x6b1: 0x3c37, 0x6b2: 0xa000, 0x6b3: 0xa000, 0x6b4: 0x3c3e, 0x6b5: 0x3c45, + 0x6b6: 0xa000, 0x6b7: 0xa000, 0x6b8: 0x3c4c, 0x6b9: 0x3c53, 0x6ba: 0xa000, 0x6bb: 0xa000, + 0x6bc: 0xa000, 0x6bd: 0xa000, + // Block 0x1b, offset 0x6c0 + 0x6c0: 0x3c5a, 0x6c1: 0x3c61, 0x6c2: 0xa000, 0x6c3: 0xa000, 0x6c4: 0x3c76, 0x6c5: 0x3c7d, + 0x6c6: 0xa000, 0x6c7: 0xa000, 0x6c8: 0x3c84, 0x6c9: 0x3c8b, + 0x6d1: 0xa000, + 0x6d2: 0xa000, + 0x6e2: 0xa000, + 0x6e8: 0xa000, 0x6e9: 0xa000, + 0x6eb: 0xa000, 0x6ec: 0x3ca0, 0x6ed: 0x3ca7, 0x6ee: 0x3cae, 0x6ef: 0x3cb5, + 0x6f2: 0xa000, 0x6f3: 0xa000, 0x6f4: 0xa000, 0x6f5: 0xa000, + // Block 0x1c, offset 0x700 + 0x706: 0xa000, 0x70b: 0xa000, + 0x70c: 0x3f08, 0x70d: 0xa000, 0x70e: 0x3f10, 0x70f: 0xa000, 0x710: 0x3f18, 0x711: 0xa000, + 0x712: 0x3f20, 0x713: 0xa000, 0x714: 0x3f28, 0x715: 0xa000, 0x716: 0x3f30, 0x717: 0xa000, + 0x718: 0x3f38, 0x719: 0xa000, 0x71a: 0x3f40, 0x71b: 0xa000, 0x71c: 0x3f48, 0x71d: 0xa000, + 0x71e: 0x3f50, 0x71f: 0xa000, 0x720: 0x3f58, 0x721: 0xa000, 0x722: 0x3f60, + 0x724: 0xa000, 0x725: 0x3f68, 0x726: 0xa000, 0x727: 0x3f70, 0x728: 0xa000, 0x729: 0x3f78, + 0x72f: 0xa000, + 0x730: 0x3f80, 0x731: 0x3f88, 0x732: 0xa000, 0x733: 0x3f90, 0x734: 0x3f98, 0x735: 0xa000, + 0x736: 0x3fa0, 0x737: 0x3fa8, 0x738: 0xa000, 0x739: 0x3fb0, 0x73a: 0x3fb8, 0x73b: 0xa000, + 0x73c: 0x3fc0, 0x73d: 0x3fc8, + // Block 0x1d, offset 0x740 + 0x754: 0x3f00, + 0x759: 0x9903, 0x75a: 0x9903, 0x75b: 0x8100, 0x75c: 0x8100, 0x75d: 0xa000, + 0x75e: 0x3fd0, + 0x766: 0xa000, + 0x76b: 0xa000, 0x76c: 0x3fe0, 0x76d: 0xa000, 0x76e: 0x3fe8, 0x76f: 0xa000, + 0x770: 0x3ff0, 0x771: 0xa000, 0x772: 0x3ff8, 0x773: 0xa000, 0x774: 0x4000, 0x775: 0xa000, + 0x776: 0x4008, 0x777: 0xa000, 0x778: 0x4010, 0x779: 0xa000, 0x77a: 0x4018, 0x77b: 0xa000, + 0x77c: 0x4020, 0x77d: 0xa000, 0x77e: 0x4028, 0x77f: 0xa000, + // Block 0x1e, offset 0x780 + 0x780: 0x4030, 0x781: 0xa000, 0x782: 0x4038, 0x784: 0xa000, 0x785: 0x4040, + 0x786: 0xa000, 0x787: 0x4048, 0x788: 0xa000, 0x789: 0x4050, + 0x78f: 0xa000, 0x790: 0x4058, 0x791: 0x4060, + 0x792: 0xa000, 0x793: 0x4068, 0x794: 0x4070, 0x795: 0xa000, 0x796: 0x4078, 0x797: 0x4080, + 0x798: 0xa000, 0x799: 0x4088, 0x79a: 0x4090, 0x79b: 0xa000, 0x79c: 0x4098, 0x79d: 0x40a0, + 0x7af: 0xa000, + 0x7b0: 0xa000, 0x7b1: 0xa000, 0x7b2: 0xa000, 0x7b4: 0x3fd8, + 0x7b7: 0x40a8, 0x7b8: 0x40b0, 0x7b9: 0x40b8, 0x7ba: 0x40c0, + 0x7bd: 0xa000, 0x7be: 0x40c8, + // Block 0x1f, offset 0x7c0 + 0x7c0: 0x1377, 0x7c1: 0x0cfb, 0x7c2: 0x13d3, 0x7c3: 0x139f, 0x7c4: 0x0e57, 0x7c5: 0x06eb, + 0x7c6: 0x08df, 0x7c7: 0x162b, 0x7c8: 0x162b, 0x7c9: 0x0a0b, 0x7ca: 0x145f, 0x7cb: 0x0943, + 0x7cc: 0x0a07, 0x7cd: 0x0bef, 0x7ce: 0x0fcf, 0x7cf: 0x115f, 0x7d0: 0x1297, 0x7d1: 0x12d3, + 0x7d2: 0x1307, 0x7d3: 0x141b, 0x7d4: 0x0d73, 0x7d5: 0x0dff, 0x7d6: 0x0eab, 0x7d7: 0x0f43, + 0x7d8: 0x125f, 0x7d9: 0x1447, 0x7da: 0x1573, 0x7db: 0x070f, 0x7dc: 0x08b3, 0x7dd: 0x0d87, + 0x7de: 0x0ecf, 0x7df: 0x1293, 0x7e0: 0x15c3, 0x7e1: 0x0ab3, 0x7e2: 0x0e77, 0x7e3: 0x1283, + 0x7e4: 0x1317, 0x7e5: 0x0c23, 0x7e6: 0x11bb, 0x7e7: 0x12df, 0x7e8: 0x0b1f, 0x7e9: 0x0d0f, + 0x7ea: 0x0e17, 0x7eb: 0x0f1b, 0x7ec: 0x1427, 0x7ed: 0x074f, 0x7ee: 0x07e7, 0x7ef: 0x0853, + 0x7f0: 0x0c8b, 0x7f1: 0x0d7f, 0x7f2: 0x0ecb, 0x7f3: 0x0fef, 0x7f4: 0x1177, 0x7f5: 0x128b, + 0x7f6: 0x12a3, 0x7f7: 0x13c7, 0x7f8: 0x14ef, 0x7f9: 0x15a3, 0x7fa: 0x15bf, 0x7fb: 0x102b, + 0x7fc: 0x106b, 0x7fd: 0x1123, 0x7fe: 0x1243, 0x7ff: 0x147b, + // Block 0x20, offset 0x800 + 0x800: 0x15cb, 0x801: 0x134b, 0x802: 0x09c7, 0x803: 0x0b3b, 0x804: 0x10db, 0x805: 0x119b, + 0x806: 0x0eff, 0x807: 0x1033, 0x808: 0x1397, 0x809: 0x14e7, 0x80a: 0x09c3, 0x80b: 0x0a8f, + 0x80c: 0x0d77, 0x80d: 0x0e2b, 0x80e: 0x0e5f, 0x80f: 0x1113, 0x810: 0x113b, 0x811: 0x14a7, + 0x812: 0x084f, 0x813: 0x11a7, 0x814: 0x07f3, 0x815: 0x07ef, 0x816: 0x1097, 0x817: 0x1127, + 0x818: 0x125b, 0x819: 0x14af, 0x81a: 0x1367, 0x81b: 0x0c27, 0x81c: 0x0d73, 0x81d: 0x1357, + 0x81e: 0x06f7, 0x81f: 0x0a63, 0x820: 0x0b93, 0x821: 0x0f2f, 0x822: 0x0faf, 0x823: 0x0873, + 0x824: 0x103b, 0x825: 0x075f, 0x826: 0x0b77, 0x827: 0x06d7, 0x828: 0x0deb, 0x829: 0x0ca3, + 0x82a: 0x110f, 0x82b: 0x08c7, 0x82c: 0x09b3, 0x82d: 0x0ffb, 0x82e: 0x1263, 0x82f: 0x133b, + 0x830: 0x0db7, 0x831: 0x13f7, 0x832: 0x0de3, 0x833: 0x0c37, 0x834: 0x121b, 0x835: 0x0c57, + 0x836: 0x0fab, 0x837: 0x072b, 0x838: 0x07a7, 0x839: 0x07eb, 0x83a: 0x0d53, 0x83b: 0x10fb, + 0x83c: 0x11f3, 0x83d: 0x1347, 0x83e: 0x145b, 0x83f: 0x085b, + // Block 0x21, offset 0x840 + 0x840: 0x090f, 0x841: 0x0a17, 0x842: 0x0b2f, 0x843: 0x0cbf, 0x844: 0x0e7b, 0x845: 0x103f, + 0x846: 0x1497, 0x847: 0x157b, 0x848: 0x15cf, 0x849: 0x15e7, 0x84a: 0x0837, 0x84b: 0x0cf3, + 0x84c: 0x0da3, 0x84d: 0x13eb, 0x84e: 0x0afb, 0x84f: 0x0bd7, 0x850: 0x0bf3, 0x851: 0x0c83, + 0x852: 0x0e6b, 0x853: 0x0eb7, 0x854: 0x0f67, 0x855: 0x108b, 0x856: 0x112f, 0x857: 0x1193, + 0x858: 0x13db, 0x859: 0x126b, 0x85a: 0x1403, 0x85b: 0x147f, 0x85c: 0x080f, 0x85d: 0x083b, + 0x85e: 0x0923, 0x85f: 0x0ea7, 0x860: 0x12f3, 0x861: 0x133b, 0x862: 0x0b1b, 0x863: 0x0b8b, + 0x864: 0x0c4f, 0x865: 0x0daf, 0x866: 0x10d7, 0x867: 0x0f23, 0x868: 0x073b, 0x869: 0x097f, + 0x86a: 0x0a63, 0x86b: 0x0ac7, 0x86c: 0x0b97, 0x86d: 0x0f3f, 0x86e: 0x0f5b, 0x86f: 0x116b, + 0x870: 0x118b, 0x871: 0x1463, 0x872: 0x14e3, 0x873: 0x14f3, 0x874: 0x152f, 0x875: 0x0753, + 0x876: 0x107f, 0x877: 0x144f, 0x878: 0x14cb, 0x879: 0x0baf, 0x87a: 0x0717, 0x87b: 0x0777, + 0x87c: 0x0a67, 0x87d: 0x0a87, 0x87e: 0x0caf, 0x87f: 0x0d73, + // Block 0x22, offset 0x880 + 0x880: 0x0ec3, 0x881: 0x0fcb, 0x882: 0x1277, 0x883: 0x1417, 0x884: 0x1623, 0x885: 0x0ce3, + 0x886: 0x14a3, 0x887: 0x0833, 0x888: 0x0d2f, 0x889: 0x0d3b, 0x88a: 0x0e0f, 0x88b: 0x0e47, + 0x88c: 0x0f4b, 0x88d: 0x0fa7, 0x88e: 0x1027, 0x88f: 0x110b, 0x890: 0x153b, 0x891: 0x07af, + 0x892: 0x0c03, 0x893: 0x14b3, 0x894: 0x0767, 0x895: 0x0aab, 0x896: 0x0e2f, 0x897: 0x13df, + 0x898: 0x0b67, 0x899: 0x0bb7, 0x89a: 0x0d43, 0x89b: 0x0f2f, 0x89c: 0x14bb, 0x89d: 0x0817, + 0x89e: 0x08ff, 0x89f: 0x0a97, 0x8a0: 0x0cd3, 0x8a1: 0x0d1f, 0x8a2: 0x0d5f, 0x8a3: 0x0df3, + 0x8a4: 0x0f47, 0x8a5: 0x0fbb, 0x8a6: 0x1157, 0x8a7: 0x12f7, 0x8a8: 0x1303, 0x8a9: 0x1457, + 0x8aa: 0x14d7, 0x8ab: 0x0883, 0x8ac: 0x0e4b, 0x8ad: 0x0903, 0x8ae: 0x0ec7, 0x8af: 0x0f6b, + 0x8b0: 0x1287, 0x8b1: 0x14bf, 0x8b2: 0x15ab, 0x8b3: 0x15d3, 0x8b4: 0x0d37, 0x8b5: 0x0e27, + 0x8b6: 0x11c3, 0x8b7: 0x10b7, 0x8b8: 0x10c3, 0x8b9: 0x10e7, 0x8ba: 0x0f17, 0x8bb: 0x0e9f, + 0x8bc: 0x1363, 0x8bd: 0x0733, 0x8be: 0x122b, 0x8bf: 0x081b, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x080b, 0x8c1: 0x0b0b, 0x8c2: 0x0c2b, 0x8c3: 0x10f3, 0x8c4: 0x0a53, 0x8c5: 0x0e03, + 0x8c6: 0x0cef, 0x8c7: 0x13e7, 0x8c8: 0x12e7, 0x8c9: 0x14ab, 0x8ca: 0x1323, 0x8cb: 0x0b27, + 0x8cc: 0x0787, 0x8cd: 0x095b, 0x8d0: 0x09af, + 0x8d2: 0x0cdf, 0x8d5: 0x07f7, 0x8d6: 0x0f1f, 0x8d7: 0x0fe3, + 0x8d8: 0x1047, 0x8d9: 0x1063, 0x8da: 0x1067, 0x8db: 0x107b, 0x8dc: 0x14fb, 0x8dd: 0x10eb, + 0x8de: 0x116f, 0x8e0: 0x128f, 0x8e2: 0x1353, + 0x8e5: 0x1407, 0x8e6: 0x1433, + 0x8ea: 0x154f, 0x8eb: 0x1553, 0x8ec: 0x1557, 0x8ed: 0x15bb, 0x8ee: 0x142b, 0x8ef: 0x14c7, + 0x8f0: 0x0757, 0x8f1: 0x077b, 0x8f2: 0x078f, 0x8f3: 0x084b, 0x8f4: 0x0857, 0x8f5: 0x0897, + 0x8f6: 0x094b, 0x8f7: 0x0967, 0x8f8: 0x096f, 0x8f9: 0x09ab, 0x8fa: 0x09b7, 0x8fb: 0x0a93, + 0x8fc: 0x0a9b, 0x8fd: 0x0ba3, 0x8fe: 0x0bcb, 0x8ff: 0x0bd3, + // Block 0x24, offset 0x900 + 0x900: 0x0beb, 0x901: 0x0c97, 0x902: 0x0cc7, 0x903: 0x0ce7, 0x904: 0x0d57, 0x905: 0x0e1b, + 0x906: 0x0e37, 0x907: 0x0e67, 0x908: 0x0ebb, 0x909: 0x0edb, 0x90a: 0x0f4f, 0x90b: 0x102f, + 0x90c: 0x104b, 0x90d: 0x1053, 0x90e: 0x104f, 0x90f: 0x1057, 0x910: 0x105b, 0x911: 0x105f, + 0x912: 0x1073, 0x913: 0x1077, 0x914: 0x109b, 0x915: 0x10af, 0x916: 0x10cb, 0x917: 0x112f, + 0x918: 0x1137, 0x919: 0x113f, 0x91a: 0x1153, 0x91b: 0x117b, 0x91c: 0x11cb, 0x91d: 0x11ff, + 0x91e: 0x11ff, 0x91f: 0x1267, 0x920: 0x130f, 0x921: 0x1327, 0x922: 0x135b, 0x923: 0x135f, + 0x924: 0x13a3, 0x925: 0x13a7, 0x926: 0x13ff, 0x927: 0x1407, 0x928: 0x14db, 0x929: 0x151f, + 0x92a: 0x1537, 0x92b: 0x0b9b, 0x92c: 0x171e, 0x92d: 0x11e3, + 0x930: 0x06df, 0x931: 0x07e3, 0x932: 0x07a3, 0x933: 0x074b, 0x934: 0x078b, 0x935: 0x07b7, + 0x936: 0x0847, 0x937: 0x0863, 0x938: 0x094b, 0x939: 0x0937, 0x93a: 0x0947, 0x93b: 0x0963, + 0x93c: 0x09af, 0x93d: 0x09bf, 0x93e: 0x0a03, 0x93f: 0x0a0f, + // Block 0x25, offset 0x940 + 0x940: 0x0a2b, 0x941: 0x0a3b, 0x942: 0x0b23, 0x943: 0x0b2b, 0x944: 0x0b5b, 0x945: 0x0b7b, + 0x946: 0x0bab, 0x947: 0x0bc3, 0x948: 0x0bb3, 0x949: 0x0bd3, 0x94a: 0x0bc7, 0x94b: 0x0beb, + 0x94c: 0x0c07, 0x94d: 0x0c5f, 0x94e: 0x0c6b, 0x94f: 0x0c73, 0x950: 0x0c9b, 0x951: 0x0cdf, + 0x952: 0x0d0f, 0x953: 0x0d13, 0x954: 0x0d27, 0x955: 0x0da7, 0x956: 0x0db7, 0x957: 0x0e0f, + 0x958: 0x0e5b, 0x959: 0x0e53, 0x95a: 0x0e67, 0x95b: 0x0e83, 0x95c: 0x0ebb, 0x95d: 0x1013, + 0x95e: 0x0edf, 0x95f: 0x0f13, 0x960: 0x0f1f, 0x961: 0x0f5f, 0x962: 0x0f7b, 0x963: 0x0f9f, + 0x964: 0x0fc3, 0x965: 0x0fc7, 0x966: 0x0fe3, 0x967: 0x0fe7, 0x968: 0x0ff7, 0x969: 0x100b, + 0x96a: 0x1007, 0x96b: 0x1037, 0x96c: 0x10b3, 0x96d: 0x10cb, 0x96e: 0x10e3, 0x96f: 0x111b, + 0x970: 0x112f, 0x971: 0x114b, 0x972: 0x117b, 0x973: 0x122f, 0x974: 0x1257, 0x975: 0x12cb, + 0x976: 0x1313, 0x977: 0x131f, 0x978: 0x1327, 0x979: 0x133f, 0x97a: 0x1353, 0x97b: 0x1343, + 0x97c: 0x135b, 0x97d: 0x1357, 0x97e: 0x134f, 0x97f: 0x135f, + // Block 0x26, offset 0x980 + 0x980: 0x136b, 0x981: 0x13a7, 0x982: 0x13e3, 0x983: 0x1413, 0x984: 0x144b, 0x985: 0x146b, + 0x986: 0x14b7, 0x987: 0x14db, 0x988: 0x14fb, 0x989: 0x150f, 0x98a: 0x151f, 0x98b: 0x152b, + 0x98c: 0x1537, 0x98d: 0x158b, 0x98e: 0x162b, 0x98f: 0x16b5, 0x990: 0x16b0, 0x991: 0x16e2, + 0x992: 0x0607, 0x993: 0x062f, 0x994: 0x0633, 0x995: 0x1764, 0x996: 0x1791, 0x997: 0x1809, + 0x998: 0x1617, 0x999: 0x1627, + // Block 0x27, offset 0x9c0 + 0x9c0: 0x06fb, 0x9c1: 0x06f3, 0x9c2: 0x0703, 0x9c3: 0x1647, 0x9c4: 0x0747, 0x9c5: 0x0757, + 0x9c6: 0x075b, 0x9c7: 0x0763, 0x9c8: 0x076b, 0x9c9: 0x076f, 0x9ca: 0x077b, 0x9cb: 0x0773, + 0x9cc: 0x05b3, 0x9cd: 0x165b, 0x9ce: 0x078f, 0x9cf: 0x0793, 0x9d0: 0x0797, 0x9d1: 0x07b3, + 0x9d2: 0x164c, 0x9d3: 0x05b7, 0x9d4: 0x079f, 0x9d5: 0x07bf, 0x9d6: 0x1656, 0x9d7: 0x07cf, + 0x9d8: 0x07d7, 0x9d9: 0x0737, 0x9da: 0x07df, 0x9db: 0x07e3, 0x9dc: 0x1831, 0x9dd: 0x07ff, + 0x9de: 0x0807, 0x9df: 0x05bf, 0x9e0: 0x081f, 0x9e1: 0x0823, 0x9e2: 0x082b, 0x9e3: 0x082f, + 0x9e4: 0x05c3, 0x9e5: 0x0847, 0x9e6: 0x084b, 0x9e7: 0x0857, 0x9e8: 0x0863, 0x9e9: 0x0867, + 0x9ea: 0x086b, 0x9eb: 0x0873, 0x9ec: 0x0893, 0x9ed: 0x0897, 0x9ee: 0x089f, 0x9ef: 0x08af, + 0x9f0: 0x08b7, 0x9f1: 0x08bb, 0x9f2: 0x08bb, 0x9f3: 0x08bb, 0x9f4: 0x166a, 0x9f5: 0x0e93, + 0x9f6: 0x08cf, 0x9f7: 0x08d7, 0x9f8: 0x166f, 0x9f9: 0x08e3, 0x9fa: 0x08eb, 0x9fb: 0x08f3, + 0x9fc: 0x091b, 0x9fd: 0x0907, 0x9fe: 0x0913, 0x9ff: 0x0917, + // Block 0x28, offset 0xa00 + 0xa00: 0x091f, 0xa01: 0x0927, 0xa02: 0x092b, 0xa03: 0x0933, 0xa04: 0x093b, 0xa05: 0x093f, + 0xa06: 0x093f, 0xa07: 0x0947, 0xa08: 0x094f, 0xa09: 0x0953, 0xa0a: 0x095f, 0xa0b: 0x0983, + 0xa0c: 0x0967, 0xa0d: 0x0987, 0xa0e: 0x096b, 0xa0f: 0x0973, 0xa10: 0x080b, 0xa11: 0x09cf, + 0xa12: 0x0997, 0xa13: 0x099b, 0xa14: 0x099f, 0xa15: 0x0993, 0xa16: 0x09a7, 0xa17: 0x09a3, + 0xa18: 0x09bb, 0xa19: 0x1674, 0xa1a: 0x09d7, 0xa1b: 0x09db, 0xa1c: 0x09e3, 0xa1d: 0x09ef, + 0xa1e: 0x09f7, 0xa1f: 0x0a13, 0xa20: 0x1679, 0xa21: 0x167e, 0xa22: 0x0a1f, 0xa23: 0x0a23, + 0xa24: 0x0a27, 0xa25: 0x0a1b, 0xa26: 0x0a2f, 0xa27: 0x05c7, 0xa28: 0x05cb, 0xa29: 0x0a37, + 0xa2a: 0x0a3f, 0xa2b: 0x0a3f, 0xa2c: 0x1683, 0xa2d: 0x0a5b, 0xa2e: 0x0a5f, 0xa2f: 0x0a63, + 0xa30: 0x0a6b, 0xa31: 0x1688, 0xa32: 0x0a73, 0xa33: 0x0a77, 0xa34: 0x0b4f, 0xa35: 0x0a7f, + 0xa36: 0x05cf, 0xa37: 0x0a8b, 0xa38: 0x0a9b, 0xa39: 0x0aa7, 0xa3a: 0x0aa3, 0xa3b: 0x1692, + 0xa3c: 0x0aaf, 0xa3d: 0x1697, 0xa3e: 0x0abb, 0xa3f: 0x0ab7, + // Block 0x29, offset 0xa40 + 0xa40: 0x0abf, 0xa41: 0x0acf, 0xa42: 0x0ad3, 0xa43: 0x05d3, 0xa44: 0x0ae3, 0xa45: 0x0aeb, + 0xa46: 0x0aef, 0xa47: 0x0af3, 0xa48: 0x05d7, 0xa49: 0x169c, 0xa4a: 0x05db, 0xa4b: 0x0b0f, + 0xa4c: 0x0b13, 0xa4d: 0x0b17, 0xa4e: 0x0b1f, 0xa4f: 0x1863, 0xa50: 0x0b37, 0xa51: 0x16a6, + 0xa52: 0x16a6, 0xa53: 0x11d7, 0xa54: 0x0b47, 0xa55: 0x0b47, 0xa56: 0x05df, 0xa57: 0x16c9, + 0xa58: 0x179b, 0xa59: 0x0b57, 0xa5a: 0x0b5f, 0xa5b: 0x05e3, 0xa5c: 0x0b73, 0xa5d: 0x0b83, + 0xa5e: 0x0b87, 0xa5f: 0x0b8f, 0xa60: 0x0b9f, 0xa61: 0x05eb, 0xa62: 0x05e7, 0xa63: 0x0ba3, + 0xa64: 0x16ab, 0xa65: 0x0ba7, 0xa66: 0x0bbb, 0xa67: 0x0bbf, 0xa68: 0x0bc3, 0xa69: 0x0bbf, + 0xa6a: 0x0bcf, 0xa6b: 0x0bd3, 0xa6c: 0x0be3, 0xa6d: 0x0bdb, 0xa6e: 0x0bdf, 0xa6f: 0x0be7, + 0xa70: 0x0beb, 0xa71: 0x0bef, 0xa72: 0x0bfb, 0xa73: 0x0bff, 0xa74: 0x0c17, 0xa75: 0x0c1f, + 0xa76: 0x0c2f, 0xa77: 0x0c43, 0xa78: 0x16ba, 0xa79: 0x0c3f, 0xa7a: 0x0c33, 0xa7b: 0x0c4b, + 0xa7c: 0x0c53, 0xa7d: 0x0c67, 0xa7e: 0x16bf, 0xa7f: 0x0c6f, + // Block 0x2a, offset 0xa80 + 0xa80: 0x0c63, 0xa81: 0x0c5b, 0xa82: 0x05ef, 0xa83: 0x0c77, 0xa84: 0x0c7f, 0xa85: 0x0c87, + 0xa86: 0x0c7b, 0xa87: 0x05f3, 0xa88: 0x0c97, 0xa89: 0x0c9f, 0xa8a: 0x16c4, 0xa8b: 0x0ccb, + 0xa8c: 0x0cff, 0xa8d: 0x0cdb, 0xa8e: 0x05ff, 0xa8f: 0x0ce7, 0xa90: 0x05fb, 0xa91: 0x05f7, + 0xa92: 0x07c3, 0xa93: 0x07c7, 0xa94: 0x0d03, 0xa95: 0x0ceb, 0xa96: 0x11ab, 0xa97: 0x0663, + 0xa98: 0x0d0f, 0xa99: 0x0d13, 0xa9a: 0x0d17, 0xa9b: 0x0d2b, 0xa9c: 0x0d23, 0xa9d: 0x16dd, + 0xa9e: 0x0603, 0xa9f: 0x0d3f, 0xaa0: 0x0d33, 0xaa1: 0x0d4f, 0xaa2: 0x0d57, 0xaa3: 0x16e7, + 0xaa4: 0x0d5b, 0xaa5: 0x0d47, 0xaa6: 0x0d63, 0xaa7: 0x0607, 0xaa8: 0x0d67, 0xaa9: 0x0d6b, + 0xaaa: 0x0d6f, 0xaab: 0x0d7b, 0xaac: 0x16ec, 0xaad: 0x0d83, 0xaae: 0x060b, 0xaaf: 0x0d8f, + 0xab0: 0x16f1, 0xab1: 0x0d93, 0xab2: 0x060f, 0xab3: 0x0d9f, 0xab4: 0x0dab, 0xab5: 0x0db7, + 0xab6: 0x0dbb, 0xab7: 0x16f6, 0xab8: 0x168d, 0xab9: 0x16fb, 0xaba: 0x0ddb, 0xabb: 0x1700, + 0xabc: 0x0de7, 0xabd: 0x0def, 0xabe: 0x0ddf, 0xabf: 0x0dfb, + // Block 0x2b, offset 0xac0 + 0xac0: 0x0e0b, 0xac1: 0x0e1b, 0xac2: 0x0e0f, 0xac3: 0x0e13, 0xac4: 0x0e1f, 0xac5: 0x0e23, + 0xac6: 0x1705, 0xac7: 0x0e07, 0xac8: 0x0e3b, 0xac9: 0x0e3f, 0xaca: 0x0613, 0xacb: 0x0e53, + 0xacc: 0x0e4f, 0xacd: 0x170a, 0xace: 0x0e33, 0xacf: 0x0e6f, 0xad0: 0x170f, 0xad1: 0x1714, + 0xad2: 0x0e73, 0xad3: 0x0e87, 0xad4: 0x0e83, 0xad5: 0x0e7f, 0xad6: 0x0617, 0xad7: 0x0e8b, + 0xad8: 0x0e9b, 0xad9: 0x0e97, 0xada: 0x0ea3, 0xadb: 0x1651, 0xadc: 0x0eb3, 0xadd: 0x1719, + 0xade: 0x0ebf, 0xadf: 0x1723, 0xae0: 0x0ed3, 0xae1: 0x0edf, 0xae2: 0x0ef3, 0xae3: 0x1728, + 0xae4: 0x0f07, 0xae5: 0x0f0b, 0xae6: 0x172d, 0xae7: 0x1732, 0xae8: 0x0f27, 0xae9: 0x0f37, + 0xaea: 0x061b, 0xaeb: 0x0f3b, 0xaec: 0x061f, 0xaed: 0x061f, 0xaee: 0x0f53, 0xaef: 0x0f57, + 0xaf0: 0x0f5f, 0xaf1: 0x0f63, 0xaf2: 0x0f6f, 0xaf3: 0x0623, 0xaf4: 0x0f87, 0xaf5: 0x1737, + 0xaf6: 0x0fa3, 0xaf7: 0x173c, 0xaf8: 0x0faf, 0xaf9: 0x16a1, 0xafa: 0x0fbf, 0xafb: 0x1741, + 0xafc: 0x1746, 0xafd: 0x174b, 0xafe: 0x0627, 0xaff: 0x062b, + // Block 0x2c, offset 0xb00 + 0xb00: 0x0ff7, 0xb01: 0x1755, 0xb02: 0x1750, 0xb03: 0x175a, 0xb04: 0x175f, 0xb05: 0x0fff, + 0xb06: 0x1003, 0xb07: 0x1003, 0xb08: 0x100b, 0xb09: 0x0633, 0xb0a: 0x100f, 0xb0b: 0x0637, + 0xb0c: 0x063b, 0xb0d: 0x1769, 0xb0e: 0x1023, 0xb0f: 0x102b, 0xb10: 0x1037, 0xb11: 0x063f, + 0xb12: 0x176e, 0xb13: 0x105b, 0xb14: 0x1773, 0xb15: 0x1778, 0xb16: 0x107b, 0xb17: 0x1093, + 0xb18: 0x0643, 0xb19: 0x109b, 0xb1a: 0x109f, 0xb1b: 0x10a3, 0xb1c: 0x177d, 0xb1d: 0x1782, + 0xb1e: 0x1782, 0xb1f: 0x10bb, 0xb20: 0x0647, 0xb21: 0x1787, 0xb22: 0x10cf, 0xb23: 0x10d3, + 0xb24: 0x064b, 0xb25: 0x178c, 0xb26: 0x10ef, 0xb27: 0x064f, 0xb28: 0x10ff, 0xb29: 0x10f7, + 0xb2a: 0x1107, 0xb2b: 0x1796, 0xb2c: 0x111f, 0xb2d: 0x0653, 0xb2e: 0x112b, 0xb2f: 0x1133, + 0xb30: 0x1143, 0xb31: 0x0657, 0xb32: 0x17a0, 0xb33: 0x17a5, 0xb34: 0x065b, 0xb35: 0x17aa, + 0xb36: 0x115b, 0xb37: 0x17af, 0xb38: 0x1167, 0xb39: 0x1173, 0xb3a: 0x117b, 0xb3b: 0x17b4, + 0xb3c: 0x17b9, 0xb3d: 0x118f, 0xb3e: 0x17be, 0xb3f: 0x1197, + // Block 0x2d, offset 0xb40 + 0xb40: 0x16ce, 0xb41: 0x065f, 0xb42: 0x11af, 0xb43: 0x11b3, 0xb44: 0x0667, 0xb45: 0x11b7, + 0xb46: 0x0a33, 0xb47: 0x17c3, 0xb48: 0x17c8, 0xb49: 0x16d3, 0xb4a: 0x16d8, 0xb4b: 0x11d7, + 0xb4c: 0x11db, 0xb4d: 0x13f3, 0xb4e: 0x066b, 0xb4f: 0x1207, 0xb50: 0x1203, 0xb51: 0x120b, + 0xb52: 0x083f, 0xb53: 0x120f, 0xb54: 0x1213, 0xb55: 0x1217, 0xb56: 0x121f, 0xb57: 0x17cd, + 0xb58: 0x121b, 0xb59: 0x1223, 0xb5a: 0x1237, 0xb5b: 0x123b, 0xb5c: 0x1227, 0xb5d: 0x123f, + 0xb5e: 0x1253, 0xb5f: 0x1267, 0xb60: 0x1233, 0xb61: 0x1247, 0xb62: 0x124b, 0xb63: 0x124f, + 0xb64: 0x17d2, 0xb65: 0x17dc, 0xb66: 0x17d7, 0xb67: 0x066f, 0xb68: 0x126f, 0xb69: 0x1273, + 0xb6a: 0x127b, 0xb6b: 0x17f0, 0xb6c: 0x127f, 0xb6d: 0x17e1, 0xb6e: 0x0673, 0xb6f: 0x0677, + 0xb70: 0x17e6, 0xb71: 0x17eb, 0xb72: 0x067b, 0xb73: 0x129f, 0xb74: 0x12a3, 0xb75: 0x12a7, + 0xb76: 0x12ab, 0xb77: 0x12b7, 0xb78: 0x12b3, 0xb79: 0x12bf, 0xb7a: 0x12bb, 0xb7b: 0x12cb, + 0xb7c: 0x12c3, 0xb7d: 0x12c7, 0xb7e: 0x12cf, 0xb7f: 0x067f, + // Block 0x2e, offset 0xb80 + 0xb80: 0x12d7, 0xb81: 0x12db, 0xb82: 0x0683, 0xb83: 0x12eb, 0xb84: 0x12ef, 0xb85: 0x17f5, + 0xb86: 0x12fb, 0xb87: 0x12ff, 0xb88: 0x0687, 0xb89: 0x130b, 0xb8a: 0x05bb, 0xb8b: 0x17fa, + 0xb8c: 0x17ff, 0xb8d: 0x068b, 0xb8e: 0x068f, 0xb8f: 0x1337, 0xb90: 0x134f, 0xb91: 0x136b, + 0xb92: 0x137b, 0xb93: 0x1804, 0xb94: 0x138f, 0xb95: 0x1393, 0xb96: 0x13ab, 0xb97: 0x13b7, + 0xb98: 0x180e, 0xb99: 0x1660, 0xb9a: 0x13c3, 0xb9b: 0x13bf, 0xb9c: 0x13cb, 0xb9d: 0x1665, + 0xb9e: 0x13d7, 0xb9f: 0x13e3, 0xba0: 0x1813, 0xba1: 0x1818, 0xba2: 0x1423, 0xba3: 0x142f, + 0xba4: 0x1437, 0xba5: 0x181d, 0xba6: 0x143b, 0xba7: 0x1467, 0xba8: 0x1473, 0xba9: 0x1477, + 0xbaa: 0x146f, 0xbab: 0x1483, 0xbac: 0x1487, 0xbad: 0x1822, 0xbae: 0x1493, 0xbaf: 0x0693, + 0xbb0: 0x149b, 0xbb1: 0x1827, 0xbb2: 0x0697, 0xbb3: 0x14d3, 0xbb4: 0x0ac3, 0xbb5: 0x14eb, + 0xbb6: 0x182c, 0xbb7: 0x1836, 0xbb8: 0x069b, 0xbb9: 0x069f, 0xbba: 0x1513, 0xbbb: 0x183b, + 0xbbc: 0x06a3, 0xbbd: 0x1840, 0xbbe: 0x152b, 0xbbf: 0x152b, + // Block 0x2f, offset 0xbc0 + 0xbc0: 0x1533, 0xbc1: 0x1845, 0xbc2: 0x154b, 0xbc3: 0x06a7, 0xbc4: 0x155b, 0xbc5: 0x1567, + 0xbc6: 0x156f, 0xbc7: 0x1577, 0xbc8: 0x06ab, 0xbc9: 0x184a, 0xbca: 0x158b, 0xbcb: 0x15a7, + 0xbcc: 0x15b3, 0xbcd: 0x06af, 0xbce: 0x06b3, 0xbcf: 0x15b7, 0xbd0: 0x184f, 0xbd1: 0x06b7, + 0xbd2: 0x1854, 0xbd3: 0x1859, 0xbd4: 0x185e, 0xbd5: 0x15db, 0xbd6: 0x06bb, 0xbd7: 0x15ef, + 0xbd8: 0x15f7, 0xbd9: 0x15fb, 0xbda: 0x1603, 0xbdb: 0x160b, 0xbdc: 0x1613, 0xbdd: 0x1868, +} + +// nfcIndex: 22 blocks, 1408 entries, 1408 bytes +// Block 0 is the zero block. +var nfcIndex = [1408]uint8{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x2e, 0xc3: 0x01, 0xc4: 0x02, 0xc5: 0x03, 0xc6: 0x2f, 0xc7: 0x04, + 0xc8: 0x05, 0xca: 0x30, 0xcb: 0x31, 0xcc: 0x06, 0xcd: 0x07, 0xce: 0x08, 0xcf: 0x32, + 0xd0: 0x09, 0xd1: 0x33, 0xd2: 0x34, 0xd3: 0x0a, 0xd6: 0x0b, 0xd7: 0x35, + 0xd8: 0x36, 0xd9: 0x0c, 0xdb: 0x37, 0xdc: 0x38, 0xdd: 0x39, 0xdf: 0x3a, + 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, + 0xea: 0x06, 0xeb: 0x07, 0xec: 0x08, 0xed: 0x09, 0xef: 0x0a, + 0xf0: 0x13, + // Block 0x4, offset 0x100 + 0x120: 0x3b, 0x121: 0x3c, 0x123: 0x0d, 0x124: 0x3d, 0x125: 0x3e, 0x126: 0x3f, 0x127: 0x40, + 0x128: 0x41, 0x129: 0x42, 0x12a: 0x43, 0x12b: 0x44, 0x12c: 0x3f, 0x12d: 0x45, 0x12e: 0x46, 0x12f: 0x47, + 0x131: 0x48, 0x132: 0x49, 0x133: 0x4a, 0x134: 0x4b, 0x135: 0x4c, 0x137: 0x4d, + 0x138: 0x4e, 0x139: 0x4f, 0x13a: 0x50, 0x13b: 0x51, 0x13c: 0x52, 0x13d: 0x53, 0x13e: 0x54, 0x13f: 0x55, + // Block 0x5, offset 0x140 + 0x140: 0x56, 0x142: 0x57, 0x144: 0x58, 0x145: 0x59, 0x146: 0x5a, 0x147: 0x5b, + 0x14d: 0x5c, + 0x15c: 0x5d, 0x15f: 0x5e, + 0x162: 0x5f, 0x164: 0x60, + 0x168: 0x61, 0x169: 0x62, 0x16a: 0x63, 0x16c: 0x0e, 0x16d: 0x64, 0x16e: 0x65, 0x16f: 0x66, + 0x170: 0x67, 0x173: 0x68, 0x177: 0x0f, + 0x178: 0x10, 0x179: 0x11, 0x17a: 0x12, 0x17b: 0x13, 0x17c: 0x14, 0x17d: 0x15, 0x17e: 0x16, 0x17f: 0x17, + // Block 0x6, offset 0x180 + 0x180: 0x69, 0x183: 0x6a, 0x184: 0x6b, 0x186: 0x6c, 0x187: 0x6d, + 0x188: 0x6e, 0x189: 0x18, 0x18a: 0x19, 0x18b: 0x6f, 0x18c: 0x70, + 0x1ab: 0x71, + 0x1b3: 0x72, 0x1b5: 0x73, 0x1b7: 0x74, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x75, 0x1c1: 0x1a, 0x1c2: 0x1b, 0x1c3: 0x1c, 0x1c4: 0x76, 0x1c5: 0x77, + 0x1c9: 0x78, 0x1cc: 0x79, 0x1cd: 0x7a, + // Block 0x8, offset 0x200 + 0x219: 0x7b, 0x21a: 0x7c, 0x21b: 0x7d, + 0x220: 0x7e, 0x223: 0x7f, 0x224: 0x80, 0x225: 0x81, 0x226: 0x82, 0x227: 0x83, + 0x22a: 0x84, 0x22b: 0x85, 0x22f: 0x86, + 0x230: 0x87, 0x231: 0x88, 0x232: 0x89, 0x233: 0x8a, 0x234: 0x8b, 0x235: 0x8c, 0x236: 0x8d, 0x237: 0x87, + 0x238: 0x88, 0x239: 0x89, 0x23a: 0x8a, 0x23b: 0x8b, 0x23c: 0x8c, 0x23d: 0x8d, 0x23e: 0x87, 0x23f: 0x88, + // Block 0x9, offset 0x240 + 0x240: 0x89, 0x241: 0x8a, 0x242: 0x8b, 0x243: 0x8c, 0x244: 0x8d, 0x245: 0x87, 0x246: 0x88, 0x247: 0x89, + 0x248: 0x8a, 0x249: 0x8b, 0x24a: 0x8c, 0x24b: 0x8d, 0x24c: 0x87, 0x24d: 0x88, 0x24e: 0x89, 0x24f: 0x8a, + 0x250: 0x8b, 0x251: 0x8c, 0x252: 0x8d, 0x253: 0x87, 0x254: 0x88, 0x255: 0x89, 0x256: 0x8a, 0x257: 0x8b, + 0x258: 0x8c, 0x259: 0x8d, 0x25a: 0x87, 0x25b: 0x88, 0x25c: 0x89, 0x25d: 0x8a, 0x25e: 0x8b, 0x25f: 0x8c, + 0x260: 0x8d, 0x261: 0x87, 0x262: 0x88, 0x263: 0x89, 0x264: 0x8a, 0x265: 0x8b, 0x266: 0x8c, 0x267: 0x8d, + 0x268: 0x87, 0x269: 0x88, 0x26a: 0x89, 0x26b: 0x8a, 0x26c: 0x8b, 0x26d: 0x8c, 0x26e: 0x8d, 0x26f: 0x87, + 0x270: 0x88, 0x271: 0x89, 0x272: 0x8a, 0x273: 0x8b, 0x274: 0x8c, 0x275: 0x8d, 0x276: 0x87, 0x277: 0x88, + 0x278: 0x89, 0x279: 0x8a, 0x27a: 0x8b, 0x27b: 0x8c, 0x27c: 0x8d, 0x27d: 0x87, 0x27e: 0x88, 0x27f: 0x89, + // Block 0xa, offset 0x280 + 0x280: 0x8a, 0x281: 0x8b, 0x282: 0x8c, 0x283: 0x8d, 0x284: 0x87, 0x285: 0x88, 0x286: 0x89, 0x287: 0x8a, + 0x288: 0x8b, 0x289: 0x8c, 0x28a: 0x8d, 0x28b: 0x87, 0x28c: 0x88, 0x28d: 0x89, 0x28e: 0x8a, 0x28f: 0x8b, + 0x290: 0x8c, 0x291: 0x8d, 0x292: 0x87, 0x293: 0x88, 0x294: 0x89, 0x295: 0x8a, 0x296: 0x8b, 0x297: 0x8c, + 0x298: 0x8d, 0x299: 0x87, 0x29a: 0x88, 0x29b: 0x89, 0x29c: 0x8a, 0x29d: 0x8b, 0x29e: 0x8c, 0x29f: 0x8d, + 0x2a0: 0x87, 0x2a1: 0x88, 0x2a2: 0x89, 0x2a3: 0x8a, 0x2a4: 0x8b, 0x2a5: 0x8c, 0x2a6: 0x8d, 0x2a7: 0x87, + 0x2a8: 0x88, 0x2a9: 0x89, 0x2aa: 0x8a, 0x2ab: 0x8b, 0x2ac: 0x8c, 0x2ad: 0x8d, 0x2ae: 0x87, 0x2af: 0x88, + 0x2b0: 0x89, 0x2b1: 0x8a, 0x2b2: 0x8b, 0x2b3: 0x8c, 0x2b4: 0x8d, 0x2b5: 0x87, 0x2b6: 0x88, 0x2b7: 0x89, + 0x2b8: 0x8a, 0x2b9: 0x8b, 0x2ba: 0x8c, 0x2bb: 0x8d, 0x2bc: 0x87, 0x2bd: 0x88, 0x2be: 0x89, 0x2bf: 0x8a, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x8b, 0x2c1: 0x8c, 0x2c2: 0x8d, 0x2c3: 0x87, 0x2c4: 0x88, 0x2c5: 0x89, 0x2c6: 0x8a, 0x2c7: 0x8b, + 0x2c8: 0x8c, 0x2c9: 0x8d, 0x2ca: 0x87, 0x2cb: 0x88, 0x2cc: 0x89, 0x2cd: 0x8a, 0x2ce: 0x8b, 0x2cf: 0x8c, + 0x2d0: 0x8d, 0x2d1: 0x87, 0x2d2: 0x88, 0x2d3: 0x89, 0x2d4: 0x8a, 0x2d5: 0x8b, 0x2d6: 0x8c, 0x2d7: 0x8d, + 0x2d8: 0x87, 0x2d9: 0x88, 0x2da: 0x89, 0x2db: 0x8a, 0x2dc: 0x8b, 0x2dd: 0x8c, 0x2de: 0x8e, + // Block 0xc, offset 0x300 + 0x324: 0x1d, 0x325: 0x1e, 0x326: 0x1f, 0x327: 0x20, + 0x328: 0x21, 0x329: 0x22, 0x32a: 0x23, 0x32b: 0x24, 0x32c: 0x8f, 0x32d: 0x90, 0x32e: 0x91, + 0x331: 0x92, 0x332: 0x93, 0x333: 0x94, 0x334: 0x95, + 0x338: 0x96, 0x339: 0x97, 0x33a: 0x98, 0x33b: 0x99, 0x33e: 0x9a, 0x33f: 0x9b, + // Block 0xd, offset 0x340 + 0x347: 0x9c, + 0x34b: 0x9d, 0x34d: 0x9e, + 0x368: 0x9f, 0x36b: 0xa0, + 0x374: 0xa1, + 0x37d: 0xa2, + // Block 0xe, offset 0x380 + 0x381: 0xa3, 0x382: 0xa4, 0x384: 0xa5, 0x385: 0x82, 0x387: 0xa6, + 0x388: 0xa7, 0x38b: 0xa8, 0x38c: 0xa9, 0x38d: 0xaa, + 0x391: 0xab, 0x392: 0xac, 0x393: 0xad, 0x396: 0xae, 0x397: 0xaf, + 0x398: 0x73, 0x39a: 0xb0, 0x39c: 0xb1, + 0x3a0: 0xb2, + 0x3a8: 0xb3, 0x3a9: 0xb4, 0x3aa: 0xb5, + 0x3b0: 0x73, 0x3b5: 0xb6, 0x3b6: 0xb7, + // Block 0xf, offset 0x3c0 + 0x3eb: 0xb8, 0x3ec: 0xb9, + // Block 0x10, offset 0x400 + 0x432: 0xba, + // Block 0x11, offset 0x440 + 0x445: 0xbb, 0x446: 0xbc, 0x447: 0xbd, + 0x449: 0xbe, + // Block 0x12, offset 0x480 + 0x480: 0xbf, + 0x4a3: 0xc0, 0x4a5: 0xc1, + // Block 0x13, offset 0x4c0 + 0x4c8: 0xc2, + // Block 0x14, offset 0x500 + 0x520: 0x25, 0x521: 0x26, 0x522: 0x27, 0x523: 0x28, 0x524: 0x29, 0x525: 0x2a, 0x526: 0x2b, 0x527: 0x2c, + 0x528: 0x2d, + // Block 0x15, offset 0x540 + 0x550: 0x0b, 0x551: 0x0c, 0x556: 0x0d, + 0x55b: 0x0e, 0x55d: 0x0f, 0x55e: 0x10, 0x55f: 0x11, + 0x56f: 0x12, +} + +// nfcSparseOffset: 149 entries, 298 bytes +var nfcSparseOffset = []uint16{0x0, 0x5, 0x9, 0xb, 0xd, 0x18, 0x28, 0x2a, 0x2f, 0x3a, 0x49, 0x56, 0x5e, 0x63, 0x68, 0x6a, 0x72, 0x79, 0x7c, 0x84, 0x88, 0x8c, 0x8e, 0x90, 0x99, 0x9d, 0xa4, 0xa9, 0xac, 0xb6, 0xb9, 0xc0, 0xc8, 0xcb, 0xcd, 0xcf, 0xd1, 0xd6, 0xe7, 0xf3, 0xf5, 0xfb, 0xfd, 0xff, 0x101, 0x103, 0x105, 0x107, 0x10a, 0x10d, 0x10f, 0x112, 0x115, 0x119, 0x11e, 0x127, 0x129, 0x12c, 0x12e, 0x139, 0x13d, 0x14b, 0x14e, 0x154, 0x15a, 0x165, 0x169, 0x16b, 0x16d, 0x16f, 0x171, 0x173, 0x179, 0x17d, 0x17f, 0x181, 0x189, 0x18d, 0x190, 0x192, 0x194, 0x196, 0x199, 0x19b, 0x19d, 0x19f, 0x1a1, 0x1a7, 0x1aa, 0x1ac, 0x1b3, 0x1b9, 0x1bf, 0x1c7, 0x1cd, 0x1d3, 0x1d9, 0x1dd, 0x1eb, 0x1f4, 0x1f7, 0x1fa, 0x1fc, 0x1ff, 0x201, 0x205, 0x20a, 0x20c, 0x20e, 0x213, 0x219, 0x21b, 0x21d, 0x21f, 0x225, 0x228, 0x22a, 0x230, 0x233, 0x23b, 0x242, 0x245, 0x248, 0x24a, 0x24d, 0x255, 0x259, 0x260, 0x263, 0x269, 0x26b, 0x26e, 0x270, 0x273, 0x275, 0x277, 0x279, 0x27c, 0x27e, 0x280, 0x282, 0x284, 0x291, 0x29b, 0x29d, 0x29f, 0x2a5, 0x2a7, 0x2aa} + +// nfcSparseValues: 684 entries, 2736 bytes +var nfcSparseValues = [684]valueRange{ + // Block 0x0, offset 0x0 + {value: 0x0000, lo: 0x04}, + {value: 0xa100, lo: 0xa8, hi: 0xa8}, + {value: 0x8100, lo: 0xaf, hi: 0xaf}, + {value: 0x8100, lo: 0xb4, hi: 0xb4}, + {value: 0x8100, lo: 0xb8, hi: 0xb8}, + // Block 0x1, offset 0x5 + {value: 0x0091, lo: 0x03}, + {value: 0x46e2, lo: 0xa0, hi: 0xa1}, + {value: 0x4714, lo: 0xaf, hi: 0xb0}, + {value: 0xa000, lo: 0xb7, hi: 0xb7}, + // Block 0x2, offset 0x9 + {value: 0x0000, lo: 0x01}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + // Block 0x3, offset 0xb + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0x98, hi: 0x9d}, + // Block 0x4, offset 0xd + {value: 0x0006, lo: 0x0a}, + {value: 0xa000, lo: 0x81, hi: 0x81}, + {value: 0xa000, lo: 0x85, hi: 0x85}, + {value: 0xa000, lo: 0x89, hi: 0x89}, + {value: 0x4840, lo: 0x8a, hi: 0x8a}, + {value: 0x485e, lo: 0x8b, hi: 0x8b}, + {value: 0x36c7, lo: 0x8c, hi: 0x8c}, + {value: 0x36df, lo: 0x8d, hi: 0x8d}, + {value: 0x4876, lo: 0x8e, hi: 0x8e}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0x36fd, lo: 0x93, hi: 0x94}, + // Block 0x5, offset 0x18 + {value: 0x0000, lo: 0x0f}, + {value: 0xa000, lo: 0x83, hi: 0x83}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0xa000, lo: 0x8b, hi: 0x8b}, + {value: 0xa000, lo: 0x8d, hi: 0x8d}, + {value: 0x37a5, lo: 0x90, hi: 0x90}, + {value: 0x37b1, lo: 0x91, hi: 0x91}, + {value: 0x379f, lo: 0x93, hi: 0x93}, + {value: 0xa000, lo: 0x96, hi: 0x96}, + {value: 0x3817, lo: 0x97, hi: 0x97}, + {value: 0x37e1, lo: 0x9c, hi: 0x9c}, + {value: 0x37c9, lo: 0x9d, hi: 0x9d}, + {value: 0x37f3, lo: 0x9e, hi: 0x9e}, + {value: 0xa000, lo: 0xb4, hi: 0xb5}, + {value: 0x381d, lo: 0xb6, hi: 0xb6}, + {value: 0x3823, lo: 0xb7, hi: 0xb7}, + // Block 0x6, offset 0x28 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0x83, hi: 0x87}, + // Block 0x7, offset 0x2a + {value: 0x0001, lo: 0x04}, + {value: 0x8113, lo: 0x81, hi: 0x82}, + {value: 0x8132, lo: 0x84, hi: 0x84}, + {value: 0x812d, lo: 0x85, hi: 0x85}, + {value: 0x810d, lo: 0x87, hi: 0x87}, + // Block 0x8, offset 0x2f + {value: 0x0000, lo: 0x0a}, + {value: 0x8132, lo: 0x90, hi: 0x97}, + {value: 0x8119, lo: 0x98, hi: 0x98}, + {value: 0x811a, lo: 0x99, hi: 0x99}, + {value: 0x811b, lo: 0x9a, hi: 0x9a}, + {value: 0x3841, lo: 0xa2, hi: 0xa2}, + {value: 0x3847, lo: 0xa3, hi: 0xa3}, + {value: 0x3853, lo: 0xa4, hi: 0xa4}, + {value: 0x384d, lo: 0xa5, hi: 0xa5}, + {value: 0x3859, lo: 0xa6, hi: 0xa6}, + {value: 0xa000, lo: 0xa7, hi: 0xa7}, + // Block 0x9, offset 0x3a + {value: 0x0000, lo: 0x0e}, + {value: 0x386b, lo: 0x80, hi: 0x80}, + {value: 0xa000, lo: 0x81, hi: 0x81}, + {value: 0x385f, lo: 0x82, hi: 0x82}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0x3865, lo: 0x93, hi: 0x93}, + {value: 0xa000, lo: 0x95, hi: 0x95}, + {value: 0x8132, lo: 0x96, hi: 0x9c}, + {value: 0x8132, lo: 0x9f, hi: 0xa2}, + {value: 0x812d, lo: 0xa3, hi: 0xa3}, + {value: 0x8132, lo: 0xa4, hi: 0xa4}, + {value: 0x8132, lo: 0xa7, hi: 0xa8}, + {value: 0x812d, lo: 0xaa, hi: 0xaa}, + {value: 0x8132, lo: 0xab, hi: 0xac}, + {value: 0x812d, lo: 0xad, hi: 0xad}, + // Block 0xa, offset 0x49 + {value: 0x0000, lo: 0x0c}, + {value: 0x811f, lo: 0x91, hi: 0x91}, + {value: 0x8132, lo: 0xb0, hi: 0xb0}, + {value: 0x812d, lo: 0xb1, hi: 0xb1}, + {value: 0x8132, lo: 0xb2, hi: 0xb3}, + {value: 0x812d, lo: 0xb4, hi: 0xb4}, + {value: 0x8132, lo: 0xb5, hi: 0xb6}, + {value: 0x812d, lo: 0xb7, hi: 0xb9}, + {value: 0x8132, lo: 0xba, hi: 0xba}, + {value: 0x812d, lo: 0xbb, hi: 0xbc}, + {value: 0x8132, lo: 0xbd, hi: 0xbd}, + {value: 0x812d, lo: 0xbe, hi: 0xbe}, + {value: 0x8132, lo: 0xbf, hi: 0xbf}, + // Block 0xb, offset 0x56 + {value: 0x0005, lo: 0x07}, + {value: 0x8132, lo: 0x80, hi: 0x80}, + {value: 0x8132, lo: 0x81, hi: 0x81}, + {value: 0x812d, lo: 0x82, hi: 0x83}, + {value: 0x812d, lo: 0x84, hi: 0x85}, + {value: 0x812d, lo: 0x86, hi: 0x87}, + {value: 0x812d, lo: 0x88, hi: 0x89}, + {value: 0x8132, lo: 0x8a, hi: 0x8a}, + // Block 0xc, offset 0x5e + {value: 0x0000, lo: 0x04}, + {value: 0x8132, lo: 0xab, hi: 0xb1}, + {value: 0x812d, lo: 0xb2, hi: 0xb2}, + {value: 0x8132, lo: 0xb3, hi: 0xb3}, + {value: 0x812d, lo: 0xbd, hi: 0xbd}, + // Block 0xd, offset 0x63 + {value: 0x0000, lo: 0x04}, + {value: 0x8132, lo: 0x96, hi: 0x99}, + {value: 0x8132, lo: 0x9b, hi: 0xa3}, + {value: 0x8132, lo: 0xa5, hi: 0xa7}, + {value: 0x8132, lo: 0xa9, hi: 0xad}, + // Block 0xe, offset 0x68 + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0x99, hi: 0x9b}, + // Block 0xf, offset 0x6a + {value: 0x0000, lo: 0x07}, + {value: 0xa000, lo: 0xa8, hi: 0xa8}, + {value: 0x3ed8, lo: 0xa9, hi: 0xa9}, + {value: 0xa000, lo: 0xb0, hi: 0xb0}, + {value: 0x3ee0, lo: 0xb1, hi: 0xb1}, + {value: 0xa000, lo: 0xb3, hi: 0xb3}, + {value: 0x3ee8, lo: 0xb4, hi: 0xb4}, + {value: 0x9902, lo: 0xbc, hi: 0xbc}, + // Block 0x10, offset 0x72 + {value: 0x0008, lo: 0x06}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x8132, lo: 0x91, hi: 0x91}, + {value: 0x812d, lo: 0x92, hi: 0x92}, + {value: 0x8132, lo: 0x93, hi: 0x93}, + {value: 0x8132, lo: 0x94, hi: 0x94}, + {value: 0x451c, lo: 0x98, hi: 0x9f}, + // Block 0x11, offset 0x79 + {value: 0x0000, lo: 0x02}, + {value: 0x8102, lo: 0xbc, hi: 0xbc}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x12, offset 0x7c + {value: 0x0008, lo: 0x07}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0x2c9e, lo: 0x8b, hi: 0x8c}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + {value: 0x455c, lo: 0x9c, hi: 0x9d}, + {value: 0x456c, lo: 0x9f, hi: 0x9f}, + {value: 0x8132, lo: 0xbe, hi: 0xbe}, + // Block 0x13, offset 0x84 + {value: 0x0000, lo: 0x03}, + {value: 0x4594, lo: 0xb3, hi: 0xb3}, + {value: 0x459c, lo: 0xb6, hi: 0xb6}, + {value: 0x8102, lo: 0xbc, hi: 0xbc}, + // Block 0x14, offset 0x88 + {value: 0x0008, lo: 0x03}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x4574, lo: 0x99, hi: 0x9b}, + {value: 0x458c, lo: 0x9e, hi: 0x9e}, + // Block 0x15, offset 0x8c + {value: 0x0000, lo: 0x01}, + {value: 0x8102, lo: 0xbc, hi: 0xbc}, + // Block 0x16, offset 0x8e + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + // Block 0x17, offset 0x90 + {value: 0x0000, lo: 0x08}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0x2cb6, lo: 0x88, hi: 0x88}, + {value: 0x2cae, lo: 0x8b, hi: 0x8b}, + {value: 0x2cbe, lo: 0x8c, hi: 0x8c}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x96, hi: 0x97}, + {value: 0x45a4, lo: 0x9c, hi: 0x9c}, + {value: 0x45ac, lo: 0x9d, hi: 0x9d}, + // Block 0x18, offset 0x99 + {value: 0x0000, lo: 0x03}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0x2cc6, lo: 0x94, hi: 0x94}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x19, offset 0x9d + {value: 0x0000, lo: 0x06}, + {value: 0xa000, lo: 0x86, hi: 0x87}, + {value: 0x2cce, lo: 0x8a, hi: 0x8a}, + {value: 0x2cde, lo: 0x8b, hi: 0x8b}, + {value: 0x2cd6, lo: 0x8c, hi: 0x8c}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + // Block 0x1a, offset 0xa4 + {value: 0x1801, lo: 0x04}, + {value: 0xa000, lo: 0x86, hi: 0x86}, + {value: 0x3ef0, lo: 0x88, hi: 0x88}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x8120, lo: 0x95, hi: 0x96}, + // Block 0x1b, offset 0xa9 + {value: 0x0000, lo: 0x02}, + {value: 0x8102, lo: 0xbc, hi: 0xbc}, + {value: 0xa000, lo: 0xbf, hi: 0xbf}, + // Block 0x1c, offset 0xac + {value: 0x0000, lo: 0x09}, + {value: 0x2ce6, lo: 0x80, hi: 0x80}, + {value: 0x9900, lo: 0x82, hi: 0x82}, + {value: 0xa000, lo: 0x86, hi: 0x86}, + {value: 0x2cee, lo: 0x87, hi: 0x87}, + {value: 0x2cf6, lo: 0x88, hi: 0x88}, + {value: 0x2f50, lo: 0x8a, hi: 0x8a}, + {value: 0x2dd8, lo: 0x8b, hi: 0x8b}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x95, hi: 0x96}, + // Block 0x1d, offset 0xb6 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0xbb, hi: 0xbc}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x1e, offset 0xb9 + {value: 0x0000, lo: 0x06}, + {value: 0xa000, lo: 0x86, hi: 0x87}, + {value: 0x2cfe, lo: 0x8a, hi: 0x8a}, + {value: 0x2d0e, lo: 0x8b, hi: 0x8b}, + {value: 0x2d06, lo: 0x8c, hi: 0x8c}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + // Block 0x1f, offset 0xc0 + {value: 0x6bea, lo: 0x07}, + {value: 0x9904, lo: 0x8a, hi: 0x8a}, + {value: 0x9900, lo: 0x8f, hi: 0x8f}, + {value: 0xa000, lo: 0x99, hi: 0x99}, + {value: 0x3ef8, lo: 0x9a, hi: 0x9a}, + {value: 0x2f58, lo: 0x9c, hi: 0x9c}, + {value: 0x2de3, lo: 0x9d, hi: 0x9d}, + {value: 0x2d16, lo: 0x9e, hi: 0x9f}, + // Block 0x20, offset 0xc8 + {value: 0x0000, lo: 0x02}, + {value: 0x8122, lo: 0xb8, hi: 0xb9}, + {value: 0x8104, lo: 0xba, hi: 0xba}, + // Block 0x21, offset 0xcb + {value: 0x0000, lo: 0x01}, + {value: 0x8123, lo: 0x88, hi: 0x8b}, + // Block 0x22, offset 0xcd + {value: 0x0000, lo: 0x01}, + {value: 0x8124, lo: 0xb8, hi: 0xb9}, + // Block 0x23, offset 0xcf + {value: 0x0000, lo: 0x01}, + {value: 0x8125, lo: 0x88, hi: 0x8b}, + // Block 0x24, offset 0xd1 + {value: 0x0000, lo: 0x04}, + {value: 0x812d, lo: 0x98, hi: 0x99}, + {value: 0x812d, lo: 0xb5, hi: 0xb5}, + {value: 0x812d, lo: 0xb7, hi: 0xb7}, + {value: 0x812b, lo: 0xb9, hi: 0xb9}, + // Block 0x25, offset 0xd6 + {value: 0x0000, lo: 0x10}, + {value: 0x2644, lo: 0x83, hi: 0x83}, + {value: 0x264b, lo: 0x8d, hi: 0x8d}, + {value: 0x2652, lo: 0x92, hi: 0x92}, + {value: 0x2659, lo: 0x97, hi: 0x97}, + {value: 0x2660, lo: 0x9c, hi: 0x9c}, + {value: 0x263d, lo: 0xa9, hi: 0xa9}, + {value: 0x8126, lo: 0xb1, hi: 0xb1}, + {value: 0x8127, lo: 0xb2, hi: 0xb2}, + {value: 0x4a84, lo: 0xb3, hi: 0xb3}, + {value: 0x8128, lo: 0xb4, hi: 0xb4}, + {value: 0x4a8d, lo: 0xb5, hi: 0xb5}, + {value: 0x45b4, lo: 0xb6, hi: 0xb6}, + {value: 0x8200, lo: 0xb7, hi: 0xb7}, + {value: 0x45bc, lo: 0xb8, hi: 0xb8}, + {value: 0x8200, lo: 0xb9, hi: 0xb9}, + {value: 0x8127, lo: 0xba, hi: 0xbd}, + // Block 0x26, offset 0xe7 + {value: 0x0000, lo: 0x0b}, + {value: 0x8127, lo: 0x80, hi: 0x80}, + {value: 0x4a96, lo: 0x81, hi: 0x81}, + {value: 0x8132, lo: 0x82, hi: 0x83}, + {value: 0x8104, lo: 0x84, hi: 0x84}, + {value: 0x8132, lo: 0x86, hi: 0x87}, + {value: 0x266e, lo: 0x93, hi: 0x93}, + {value: 0x2675, lo: 0x9d, hi: 0x9d}, + {value: 0x267c, lo: 0xa2, hi: 0xa2}, + {value: 0x2683, lo: 0xa7, hi: 0xa7}, + {value: 0x268a, lo: 0xac, hi: 0xac}, + {value: 0x2667, lo: 0xb9, hi: 0xb9}, + // Block 0x27, offset 0xf3 + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0x86, hi: 0x86}, + // Block 0x28, offset 0xf5 + {value: 0x0000, lo: 0x05}, + {value: 0xa000, lo: 0xa5, hi: 0xa5}, + {value: 0x2d1e, lo: 0xa6, hi: 0xa6}, + {value: 0x9900, lo: 0xae, hi: 0xae}, + {value: 0x8102, lo: 0xb7, hi: 0xb7}, + {value: 0x8104, lo: 0xb9, hi: 0xba}, + // Block 0x29, offset 0xfb + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0x8d, hi: 0x8d}, + // Block 0x2a, offset 0xfd + {value: 0x0000, lo: 0x01}, + {value: 0xa000, lo: 0x80, hi: 0x92}, + // Block 0x2b, offset 0xff + {value: 0x0000, lo: 0x01}, + {value: 0xb900, lo: 0xa1, hi: 0xb5}, + // Block 0x2c, offset 0x101 + {value: 0x0000, lo: 0x01}, + {value: 0x9900, lo: 0xa8, hi: 0xbf}, + // Block 0x2d, offset 0x103 + {value: 0x0000, lo: 0x01}, + {value: 0x9900, lo: 0x80, hi: 0x82}, + // Block 0x2e, offset 0x105 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0x9d, hi: 0x9f}, + // Block 0x2f, offset 0x107 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x94, hi: 0x94}, + {value: 0x8104, lo: 0xb4, hi: 0xb4}, + // Block 0x30, offset 0x10a + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x92, hi: 0x92}, + {value: 0x8132, lo: 0x9d, hi: 0x9d}, + // Block 0x31, offset 0x10d + {value: 0x0000, lo: 0x01}, + {value: 0x8131, lo: 0xa9, hi: 0xa9}, + // Block 0x32, offset 0x10f + {value: 0x0004, lo: 0x02}, + {value: 0x812e, lo: 0xb9, hi: 0xba}, + {value: 0x812d, lo: 0xbb, hi: 0xbb}, + // Block 0x33, offset 0x112 + {value: 0x0000, lo: 0x02}, + {value: 0x8132, lo: 0x97, hi: 0x97}, + {value: 0x812d, lo: 0x98, hi: 0x98}, + // Block 0x34, offset 0x115 + {value: 0x0000, lo: 0x03}, + {value: 0x8104, lo: 0xa0, hi: 0xa0}, + {value: 0x8132, lo: 0xb5, hi: 0xbc}, + {value: 0x812d, lo: 0xbf, hi: 0xbf}, + // Block 0x35, offset 0x119 + {value: 0x0000, lo: 0x04}, + {value: 0x8132, lo: 0xb0, hi: 0xb4}, + {value: 0x812d, lo: 0xb5, hi: 0xba}, + {value: 0x8132, lo: 0xbb, hi: 0xbc}, + {value: 0x812d, lo: 0xbd, hi: 0xbd}, + // Block 0x36, offset 0x11e + {value: 0x0000, lo: 0x08}, + {value: 0x2d66, lo: 0x80, hi: 0x80}, + {value: 0x2d6e, lo: 0x81, hi: 0x81}, + {value: 0xa000, lo: 0x82, hi: 0x82}, + {value: 0x2d76, lo: 0x83, hi: 0x83}, + {value: 0x8104, lo: 0x84, hi: 0x84}, + {value: 0x8132, lo: 0xab, hi: 0xab}, + {value: 0x812d, lo: 0xac, hi: 0xac}, + {value: 0x8132, lo: 0xad, hi: 0xb3}, + // Block 0x37, offset 0x127 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0xaa, hi: 0xab}, + // Block 0x38, offset 0x129 + {value: 0x0000, lo: 0x02}, + {value: 0x8102, lo: 0xa6, hi: 0xa6}, + {value: 0x8104, lo: 0xb2, hi: 0xb3}, + // Block 0x39, offset 0x12c + {value: 0x0000, lo: 0x01}, + {value: 0x8102, lo: 0xb7, hi: 0xb7}, + // Block 0x3a, offset 0x12e + {value: 0x0000, lo: 0x0a}, + {value: 0x8132, lo: 0x90, hi: 0x92}, + {value: 0x8101, lo: 0x94, hi: 0x94}, + {value: 0x812d, lo: 0x95, hi: 0x99}, + {value: 0x8132, lo: 0x9a, hi: 0x9b}, + {value: 0x812d, lo: 0x9c, hi: 0x9f}, + {value: 0x8132, lo: 0xa0, hi: 0xa0}, + {value: 0x8101, lo: 0xa2, hi: 0xa8}, + {value: 0x812d, lo: 0xad, hi: 0xad}, + {value: 0x8132, lo: 0xb4, hi: 0xb4}, + {value: 0x8132, lo: 0xb8, hi: 0xb9}, + // Block 0x3b, offset 0x139 + {value: 0x0004, lo: 0x03}, + {value: 0x0433, lo: 0x80, hi: 0x81}, + {value: 0x8100, lo: 0x97, hi: 0x97}, + {value: 0x8100, lo: 0xbe, hi: 0xbe}, + // Block 0x3c, offset 0x13d + {value: 0x0000, lo: 0x0d}, + {value: 0x8132, lo: 0x90, hi: 0x91}, + {value: 0x8101, lo: 0x92, hi: 0x93}, + {value: 0x8132, lo: 0x94, hi: 0x97}, + {value: 0x8101, lo: 0x98, hi: 0x9a}, + {value: 0x8132, lo: 0x9b, hi: 0x9c}, + {value: 0x8132, lo: 0xa1, hi: 0xa1}, + {value: 0x8101, lo: 0xa5, hi: 0xa6}, + {value: 0x8132, lo: 0xa7, hi: 0xa7}, + {value: 0x812d, lo: 0xa8, hi: 0xa8}, + {value: 0x8132, lo: 0xa9, hi: 0xa9}, + {value: 0x8101, lo: 0xaa, hi: 0xab}, + {value: 0x812d, lo: 0xac, hi: 0xaf}, + {value: 0x8132, lo: 0xb0, hi: 0xb0}, + // Block 0x3d, offset 0x14b + {value: 0x427b, lo: 0x02}, + {value: 0x01b8, lo: 0xa6, hi: 0xa6}, + {value: 0x0057, lo: 0xaa, hi: 0xab}, + // Block 0x3e, offset 0x14e + {value: 0x0007, lo: 0x05}, + {value: 0xa000, lo: 0x90, hi: 0x90}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0xa000, lo: 0x94, hi: 0x94}, + {value: 0x3bb9, lo: 0x9a, hi: 0x9b}, + {value: 0x3bc7, lo: 0xae, hi: 0xae}, + // Block 0x3f, offset 0x154 + {value: 0x000e, lo: 0x05}, + {value: 0x3bce, lo: 0x8d, hi: 0x8e}, + {value: 0x3bd5, lo: 0x8f, hi: 0x8f}, + {value: 0xa000, lo: 0x90, hi: 0x90}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0xa000, lo: 0x94, hi: 0x94}, + // Block 0x40, offset 0x15a + {value: 0x6408, lo: 0x0a}, + {value: 0xa000, lo: 0x83, hi: 0x83}, + {value: 0x3be3, lo: 0x84, hi: 0x84}, + {value: 0xa000, lo: 0x88, hi: 0x88}, + {value: 0x3bea, lo: 0x89, hi: 0x89}, + {value: 0xa000, lo: 0x8b, hi: 0x8b}, + {value: 0x3bf1, lo: 0x8c, hi: 0x8c}, + {value: 0xa000, lo: 0xa3, hi: 0xa3}, + {value: 0x3bf8, lo: 0xa4, hi: 0xa5}, + {value: 0x3bff, lo: 0xa6, hi: 0xa6}, + {value: 0xa000, lo: 0xbc, hi: 0xbc}, + // Block 0x41, offset 0x165 + {value: 0x0007, lo: 0x03}, + {value: 0x3c68, lo: 0xa0, hi: 0xa1}, + {value: 0x3c92, lo: 0xa2, hi: 0xa3}, + {value: 0x3cbc, lo: 0xaa, hi: 0xad}, + // Block 0x42, offset 0x169 + {value: 0x0004, lo: 0x01}, + {value: 0x048b, lo: 0xa9, hi: 0xaa}, + // Block 0x43, offset 0x16b + {value: 0x0000, lo: 0x01}, + {value: 0x44dd, lo: 0x9c, hi: 0x9c}, + // Block 0x44, offset 0x16d + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xaf, hi: 0xb1}, + // Block 0x45, offset 0x16f + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0xbf, hi: 0xbf}, + // Block 0x46, offset 0x171 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xa0, hi: 0xbf}, + // Block 0x47, offset 0x173 + {value: 0x0000, lo: 0x05}, + {value: 0x812c, lo: 0xaa, hi: 0xaa}, + {value: 0x8131, lo: 0xab, hi: 0xab}, + {value: 0x8133, lo: 0xac, hi: 0xac}, + {value: 0x812e, lo: 0xad, hi: 0xad}, + {value: 0x812f, lo: 0xae, hi: 0xaf}, + // Block 0x48, offset 0x179 + {value: 0x0000, lo: 0x03}, + {value: 0x4a9f, lo: 0xb3, hi: 0xb3}, + {value: 0x4a9f, lo: 0xb5, hi: 0xb6}, + {value: 0x4a9f, lo: 0xba, hi: 0xbf}, + // Block 0x49, offset 0x17d + {value: 0x0000, lo: 0x01}, + {value: 0x4a9f, lo: 0x8f, hi: 0xa3}, + // Block 0x4a, offset 0x17f + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0xae, hi: 0xbe}, + // Block 0x4b, offset 0x181 + {value: 0x0000, lo: 0x07}, + {value: 0x8100, lo: 0x84, hi: 0x84}, + {value: 0x8100, lo: 0x87, hi: 0x87}, + {value: 0x8100, lo: 0x90, hi: 0x90}, + {value: 0x8100, lo: 0x9e, hi: 0x9e}, + {value: 0x8100, lo: 0xa1, hi: 0xa1}, + {value: 0x8100, lo: 0xb2, hi: 0xb2}, + {value: 0x8100, lo: 0xbb, hi: 0xbb}, + // Block 0x4c, offset 0x189 + {value: 0x0000, lo: 0x03}, + {value: 0x8100, lo: 0x80, hi: 0x80}, + {value: 0x8100, lo: 0x8b, hi: 0x8b}, + {value: 0x8100, lo: 0x8e, hi: 0x8e}, + // Block 0x4d, offset 0x18d + {value: 0x0000, lo: 0x02}, + {value: 0x8132, lo: 0xaf, hi: 0xaf}, + {value: 0x8132, lo: 0xb4, hi: 0xbd}, + // Block 0x4e, offset 0x190 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0x9e, hi: 0x9f}, + // Block 0x4f, offset 0x192 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xb0, hi: 0xb1}, + // Block 0x50, offset 0x194 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x86, hi: 0x86}, + // Block 0x51, offset 0x196 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x84, hi: 0x84}, + {value: 0x8132, lo: 0xa0, hi: 0xb1}, + // Block 0x52, offset 0x199 + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0xab, hi: 0xad}, + // Block 0x53, offset 0x19b + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x93, hi: 0x93}, + // Block 0x54, offset 0x19d + {value: 0x0000, lo: 0x01}, + {value: 0x8102, lo: 0xb3, hi: 0xb3}, + // Block 0x55, offset 0x19f + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x80, hi: 0x80}, + // Block 0x56, offset 0x1a1 + {value: 0x0000, lo: 0x05}, + {value: 0x8132, lo: 0xb0, hi: 0xb0}, + {value: 0x8132, lo: 0xb2, hi: 0xb3}, + {value: 0x812d, lo: 0xb4, hi: 0xb4}, + {value: 0x8132, lo: 0xb7, hi: 0xb8}, + {value: 0x8132, lo: 0xbe, hi: 0xbf}, + // Block 0x57, offset 0x1a7 + {value: 0x0000, lo: 0x02}, + {value: 0x8132, lo: 0x81, hi: 0x81}, + {value: 0x8104, lo: 0xb6, hi: 0xb6}, + // Block 0x58, offset 0x1aa + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0xad, hi: 0xad}, + // Block 0x59, offset 0x1ac + {value: 0x0000, lo: 0x06}, + {value: 0xe500, lo: 0x80, hi: 0x80}, + {value: 0xc600, lo: 0x81, hi: 0x9b}, + {value: 0xe500, lo: 0x9c, hi: 0x9c}, + {value: 0xc600, lo: 0x9d, hi: 0xb7}, + {value: 0xe500, lo: 0xb8, hi: 0xb8}, + {value: 0xc600, lo: 0xb9, hi: 0xbf}, + // Block 0x5a, offset 0x1b3 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x93}, + {value: 0xe500, lo: 0x94, hi: 0x94}, + {value: 0xc600, lo: 0x95, hi: 0xaf}, + {value: 0xe500, lo: 0xb0, hi: 0xb0}, + {value: 0xc600, lo: 0xb1, hi: 0xbf}, + // Block 0x5b, offset 0x1b9 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x8b}, + {value: 0xe500, lo: 0x8c, hi: 0x8c}, + {value: 0xc600, lo: 0x8d, hi: 0xa7}, + {value: 0xe500, lo: 0xa8, hi: 0xa8}, + {value: 0xc600, lo: 0xa9, hi: 0xbf}, + // Block 0x5c, offset 0x1bf + {value: 0x0000, lo: 0x07}, + {value: 0xc600, lo: 0x80, hi: 0x83}, + {value: 0xe500, lo: 0x84, hi: 0x84}, + {value: 0xc600, lo: 0x85, hi: 0x9f}, + {value: 0xe500, lo: 0xa0, hi: 0xa0}, + {value: 0xc600, lo: 0xa1, hi: 0xbb}, + {value: 0xe500, lo: 0xbc, hi: 0xbc}, + {value: 0xc600, lo: 0xbd, hi: 0xbf}, + // Block 0x5d, offset 0x1c7 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x97}, + {value: 0xe500, lo: 0x98, hi: 0x98}, + {value: 0xc600, lo: 0x99, hi: 0xb3}, + {value: 0xe500, lo: 0xb4, hi: 0xb4}, + {value: 0xc600, lo: 0xb5, hi: 0xbf}, + // Block 0x5e, offset 0x1cd + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x8f}, + {value: 0xe500, lo: 0x90, hi: 0x90}, + {value: 0xc600, lo: 0x91, hi: 0xab}, + {value: 0xe500, lo: 0xac, hi: 0xac}, + {value: 0xc600, lo: 0xad, hi: 0xbf}, + // Block 0x5f, offset 0x1d3 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x87}, + {value: 0xe500, lo: 0x88, hi: 0x88}, + {value: 0xc600, lo: 0x89, hi: 0xa3}, + {value: 0xe500, lo: 0xa4, hi: 0xa4}, + {value: 0xc600, lo: 0xa5, hi: 0xbf}, + // Block 0x60, offset 0x1d9 + {value: 0x0000, lo: 0x03}, + {value: 0xc600, lo: 0x80, hi: 0x87}, + {value: 0xe500, lo: 0x88, hi: 0x88}, + {value: 0xc600, lo: 0x89, hi: 0xa3}, + // Block 0x61, offset 0x1dd + {value: 0x0006, lo: 0x0d}, + {value: 0x4390, lo: 0x9d, hi: 0x9d}, + {value: 0x8115, lo: 0x9e, hi: 0x9e}, + {value: 0x4402, lo: 0x9f, hi: 0x9f}, + {value: 0x43f0, lo: 0xaa, hi: 0xab}, + {value: 0x44f4, lo: 0xac, hi: 0xac}, + {value: 0x44fc, lo: 0xad, hi: 0xad}, + {value: 0x4348, lo: 0xae, hi: 0xb1}, + {value: 0x4366, lo: 0xb2, hi: 0xb4}, + {value: 0x437e, lo: 0xb5, hi: 0xb6}, + {value: 0x438a, lo: 0xb8, hi: 0xb8}, + {value: 0x4396, lo: 0xb9, hi: 0xbb}, + {value: 0x43ae, lo: 0xbc, hi: 0xbc}, + {value: 0x43b4, lo: 0xbe, hi: 0xbe}, + // Block 0x62, offset 0x1eb + {value: 0x0006, lo: 0x08}, + {value: 0x43ba, lo: 0x80, hi: 0x81}, + {value: 0x43c6, lo: 0x83, hi: 0x84}, + {value: 0x43d8, lo: 0x86, hi: 0x89}, + {value: 0x43fc, lo: 0x8a, hi: 0x8a}, + {value: 0x4378, lo: 0x8b, hi: 0x8b}, + {value: 0x4360, lo: 0x8c, hi: 0x8c}, + {value: 0x43a8, lo: 0x8d, hi: 0x8d}, + {value: 0x43d2, lo: 0x8e, hi: 0x8e}, + // Block 0x63, offset 0x1f4 + {value: 0x0000, lo: 0x02}, + {value: 0x8100, lo: 0xa4, hi: 0xa5}, + {value: 0x8100, lo: 0xb0, hi: 0xb1}, + // Block 0x64, offset 0x1f7 + {value: 0x0000, lo: 0x02}, + {value: 0x8100, lo: 0x9b, hi: 0x9d}, + {value: 0x8200, lo: 0x9e, hi: 0xa3}, + // Block 0x65, offset 0x1fa + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0x90, hi: 0x90}, + // Block 0x66, offset 0x1fc + {value: 0x0000, lo: 0x02}, + {value: 0x8100, lo: 0x99, hi: 0x99}, + {value: 0x8200, lo: 0xb2, hi: 0xb4}, + // Block 0x67, offset 0x1ff + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0xbc, hi: 0xbd}, + // Block 0x68, offset 0x201 + {value: 0x0000, lo: 0x03}, + {value: 0x8132, lo: 0xa0, hi: 0xa6}, + {value: 0x812d, lo: 0xa7, hi: 0xad}, + {value: 0x8132, lo: 0xae, hi: 0xaf}, + // Block 0x69, offset 0x205 + {value: 0x0000, lo: 0x04}, + {value: 0x8100, lo: 0x89, hi: 0x8c}, + {value: 0x8100, lo: 0xb0, hi: 0xb2}, + {value: 0x8100, lo: 0xb4, hi: 0xb4}, + {value: 0x8100, lo: 0xb6, hi: 0xbf}, + // Block 0x6a, offset 0x20a + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0x81, hi: 0x8c}, + // Block 0x6b, offset 0x20c + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0xb5, hi: 0xba}, + // Block 0x6c, offset 0x20e + {value: 0x0000, lo: 0x04}, + {value: 0x4a9f, lo: 0x9e, hi: 0x9f}, + {value: 0x4a9f, lo: 0xa3, hi: 0xa3}, + {value: 0x4a9f, lo: 0xa5, hi: 0xa6}, + {value: 0x4a9f, lo: 0xaa, hi: 0xaf}, + // Block 0x6d, offset 0x213 + {value: 0x0000, lo: 0x05}, + {value: 0x4a9f, lo: 0x82, hi: 0x87}, + {value: 0x4a9f, lo: 0x8a, hi: 0x8f}, + {value: 0x4a9f, lo: 0x92, hi: 0x97}, + {value: 0x4a9f, lo: 0x9a, hi: 0x9c}, + {value: 0x8100, lo: 0xa3, hi: 0xa3}, + // Block 0x6e, offset 0x219 + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0xbd, hi: 0xbd}, + // Block 0x6f, offset 0x21b + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0xa0, hi: 0xa0}, + // Block 0x70, offset 0x21d + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xb6, hi: 0xba}, + // Block 0x71, offset 0x21f + {value: 0x002c, lo: 0x05}, + {value: 0x812d, lo: 0x8d, hi: 0x8d}, + {value: 0x8132, lo: 0x8f, hi: 0x8f}, + {value: 0x8132, lo: 0xb8, hi: 0xb8}, + {value: 0x8101, lo: 0xb9, hi: 0xba}, + {value: 0x8104, lo: 0xbf, hi: 0xbf}, + // Block 0x72, offset 0x225 + {value: 0x0000, lo: 0x02}, + {value: 0x8132, lo: 0xa5, hi: 0xa5}, + {value: 0x812d, lo: 0xa6, hi: 0xa6}, + // Block 0x73, offset 0x228 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xa4, hi: 0xa7}, + // Block 0x74, offset 0x22a + {value: 0x0000, lo: 0x05}, + {value: 0x812d, lo: 0x86, hi: 0x87}, + {value: 0x8132, lo: 0x88, hi: 0x8a}, + {value: 0x812d, lo: 0x8b, hi: 0x8b}, + {value: 0x8132, lo: 0x8c, hi: 0x8c}, + {value: 0x812d, lo: 0x8d, hi: 0x90}, + // Block 0x75, offset 0x230 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x86, hi: 0x86}, + {value: 0x8104, lo: 0xbf, hi: 0xbf}, + // Block 0x76, offset 0x233 + {value: 0x17fe, lo: 0x07}, + {value: 0xa000, lo: 0x99, hi: 0x99}, + {value: 0x4238, lo: 0x9a, hi: 0x9a}, + {value: 0xa000, lo: 0x9b, hi: 0x9b}, + {value: 0x4242, lo: 0x9c, hi: 0x9c}, + {value: 0xa000, lo: 0xa5, hi: 0xa5}, + {value: 0x424c, lo: 0xab, hi: 0xab}, + {value: 0x8104, lo: 0xb9, hi: 0xba}, + // Block 0x77, offset 0x23b + {value: 0x0000, lo: 0x06}, + {value: 0x8132, lo: 0x80, hi: 0x82}, + {value: 0x9900, lo: 0xa7, hi: 0xa7}, + {value: 0x2d7e, lo: 0xae, hi: 0xae}, + {value: 0x2d88, lo: 0xaf, hi: 0xaf}, + {value: 0xa000, lo: 0xb1, hi: 0xb2}, + {value: 0x8104, lo: 0xb3, hi: 0xb4}, + // Block 0x78, offset 0x242 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x80, hi: 0x80}, + {value: 0x8102, lo: 0x8a, hi: 0x8a}, + // Block 0x79, offset 0x245 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0xb5, hi: 0xb5}, + {value: 0x8102, lo: 0xb6, hi: 0xb6}, + // Block 0x7a, offset 0x248 + {value: 0x0002, lo: 0x01}, + {value: 0x8102, lo: 0xa9, hi: 0xaa}, + // Block 0x7b, offset 0x24a + {value: 0x0000, lo: 0x02}, + {value: 0x8102, lo: 0xbb, hi: 0xbc}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x7c, offset 0x24d + {value: 0x0000, lo: 0x07}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0x2d92, lo: 0x8b, hi: 0x8b}, + {value: 0x2d9c, lo: 0x8c, hi: 0x8c}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + {value: 0x8132, lo: 0xa6, hi: 0xac}, + {value: 0x8132, lo: 0xb0, hi: 0xb4}, + // Block 0x7d, offset 0x255 + {value: 0x0000, lo: 0x03}, + {value: 0x8104, lo: 0x82, hi: 0x82}, + {value: 0x8102, lo: 0x86, hi: 0x86}, + {value: 0x8132, lo: 0x9e, hi: 0x9e}, + // Block 0x7e, offset 0x259 + {value: 0x6b5a, lo: 0x06}, + {value: 0x9900, lo: 0xb0, hi: 0xb0}, + {value: 0xa000, lo: 0xb9, hi: 0xb9}, + {value: 0x9900, lo: 0xba, hi: 0xba}, + {value: 0x2db0, lo: 0xbb, hi: 0xbb}, + {value: 0x2da6, lo: 0xbc, hi: 0xbd}, + {value: 0x2dba, lo: 0xbe, hi: 0xbe}, + // Block 0x7f, offset 0x260 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x82, hi: 0x82}, + {value: 0x8102, lo: 0x83, hi: 0x83}, + // Block 0x80, offset 0x263 + {value: 0x0000, lo: 0x05}, + {value: 0x9900, lo: 0xaf, hi: 0xaf}, + {value: 0xa000, lo: 0xb8, hi: 0xb9}, + {value: 0x2dc4, lo: 0xba, hi: 0xba}, + {value: 0x2dce, lo: 0xbb, hi: 0xbb}, + {value: 0x8104, lo: 0xbf, hi: 0xbf}, + // Block 0x81, offset 0x269 + {value: 0x0000, lo: 0x01}, + {value: 0x8102, lo: 0x80, hi: 0x80}, + // Block 0x82, offset 0x26b + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0xb6, hi: 0xb6}, + {value: 0x8102, lo: 0xb7, hi: 0xb7}, + // Block 0x83, offset 0x26e + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0xab, hi: 0xab}, + // Block 0x84, offset 0x270 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0xb9, hi: 0xb9}, + {value: 0x8102, lo: 0xba, hi: 0xba}, + // Block 0x85, offset 0x273 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0xb4, hi: 0xb4}, + // Block 0x86, offset 0x275 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x87, hi: 0x87}, + // Block 0x87, offset 0x277 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x99, hi: 0x99}, + // Block 0x88, offset 0x279 + {value: 0x0000, lo: 0x02}, + {value: 0x8102, lo: 0x82, hi: 0x82}, + {value: 0x8104, lo: 0x84, hi: 0x85}, + // Block 0x89, offset 0x27c + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x97, hi: 0x97}, + // Block 0x8a, offset 0x27e + {value: 0x0000, lo: 0x01}, + {value: 0x8101, lo: 0xb0, hi: 0xb4}, + // Block 0x8b, offset 0x280 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xb0, hi: 0xb6}, + // Block 0x8c, offset 0x282 + {value: 0x0000, lo: 0x01}, + {value: 0x8101, lo: 0x9e, hi: 0x9e}, + // Block 0x8d, offset 0x284 + {value: 0x0000, lo: 0x0c}, + {value: 0x45cc, lo: 0x9e, hi: 0x9e}, + {value: 0x45d6, lo: 0x9f, hi: 0x9f}, + {value: 0x460a, lo: 0xa0, hi: 0xa0}, + {value: 0x4618, lo: 0xa1, hi: 0xa1}, + {value: 0x4626, lo: 0xa2, hi: 0xa2}, + {value: 0x4634, lo: 0xa3, hi: 0xa3}, + {value: 0x4642, lo: 0xa4, hi: 0xa4}, + {value: 0x812b, lo: 0xa5, hi: 0xa6}, + {value: 0x8101, lo: 0xa7, hi: 0xa9}, + {value: 0x8130, lo: 0xad, hi: 0xad}, + {value: 0x812b, lo: 0xae, hi: 0xb2}, + {value: 0x812d, lo: 0xbb, hi: 0xbf}, + // Block 0x8e, offset 0x291 + {value: 0x0000, lo: 0x09}, + {value: 0x812d, lo: 0x80, hi: 0x82}, + {value: 0x8132, lo: 0x85, hi: 0x89}, + {value: 0x812d, lo: 0x8a, hi: 0x8b}, + {value: 0x8132, lo: 0xaa, hi: 0xad}, + {value: 0x45e0, lo: 0xbb, hi: 0xbb}, + {value: 0x45ea, lo: 0xbc, hi: 0xbc}, + {value: 0x4650, lo: 0xbd, hi: 0xbd}, + {value: 0x466c, lo: 0xbe, hi: 0xbe}, + {value: 0x465e, lo: 0xbf, hi: 0xbf}, + // Block 0x8f, offset 0x29b + {value: 0x0000, lo: 0x01}, + {value: 0x467a, lo: 0x80, hi: 0x80}, + // Block 0x90, offset 0x29d + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0x82, hi: 0x84}, + // Block 0x91, offset 0x29f + {value: 0x0000, lo: 0x05}, + {value: 0x8132, lo: 0x80, hi: 0x86}, + {value: 0x8132, lo: 0x88, hi: 0x98}, + {value: 0x8132, lo: 0x9b, hi: 0xa1}, + {value: 0x8132, lo: 0xa3, hi: 0xa4}, + {value: 0x8132, lo: 0xa6, hi: 0xaa}, + // Block 0x92, offset 0x2a5 + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0x90, hi: 0x96}, + // Block 0x93, offset 0x2a7 + {value: 0x0000, lo: 0x02}, + {value: 0x8132, lo: 0x84, hi: 0x89}, + {value: 0x8102, lo: 0x8a, hi: 0x8a}, + // Block 0x94, offset 0x2aa + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0x93, hi: 0x93}, +} + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *nfkcTrie) lookup(s []byte) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return nfkcValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := nfkcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := nfkcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfkcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := nfkcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfkcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = nfkcIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *nfkcTrie) lookupUnsafe(s []byte) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return nfkcValues[c0] + } + i := nfkcIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = nfkcIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = nfkcIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *nfkcTrie) lookupString(s string) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return nfkcValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := nfkcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := nfkcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfkcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := nfkcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfkcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = nfkcIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *nfkcTrie) lookupStringUnsafe(s string) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return nfkcValues[c0] + } + i := nfkcIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = nfkcIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = nfkcIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// nfkcTrie. Total size: 17248 bytes (16.84 KiB). Checksum: 4fb368372b6b1b27. +type nfkcTrie struct{} + +func newNfkcTrie(i int) *nfkcTrie { + return &nfkcTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *nfkcTrie) lookupValue(n uint32, b byte) uint16 { + switch { + case n < 92: + return uint16(nfkcValues[n<<6+uint32(b)]) + default: + n -= 92 + return uint16(nfkcSparse.lookup(n, b)) + } +} + +// nfkcValues: 94 blocks, 6016 entries, 12032 bytes +// The third block is the zero block. +var nfkcValues = [6016]uint16{ + // Block 0x0, offset 0x0 + 0x3c: 0xa000, 0x3d: 0xa000, 0x3e: 0xa000, + // Block 0x1, offset 0x40 + 0x41: 0xa000, 0x42: 0xa000, 0x43: 0xa000, 0x44: 0xa000, 0x45: 0xa000, + 0x46: 0xa000, 0x47: 0xa000, 0x48: 0xa000, 0x49: 0xa000, 0x4a: 0xa000, 0x4b: 0xa000, + 0x4c: 0xa000, 0x4d: 0xa000, 0x4e: 0xa000, 0x4f: 0xa000, 0x50: 0xa000, + 0x52: 0xa000, 0x53: 0xa000, 0x54: 0xa000, 0x55: 0xa000, 0x56: 0xa000, 0x57: 0xa000, + 0x58: 0xa000, 0x59: 0xa000, 0x5a: 0xa000, + 0x61: 0xa000, 0x62: 0xa000, 0x63: 0xa000, + 0x64: 0xa000, 0x65: 0xa000, 0x66: 0xa000, 0x67: 0xa000, 0x68: 0xa000, 0x69: 0xa000, + 0x6a: 0xa000, 0x6b: 0xa000, 0x6c: 0xa000, 0x6d: 0xa000, 0x6e: 0xa000, 0x6f: 0xa000, + 0x70: 0xa000, 0x72: 0xa000, 0x73: 0xa000, 0x74: 0xa000, 0x75: 0xa000, + 0x76: 0xa000, 0x77: 0xa000, 0x78: 0xa000, 0x79: 0xa000, 0x7a: 0xa000, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc0: 0x2f6f, 0xc1: 0x2f74, 0xc2: 0x4688, 0xc3: 0x2f79, 0xc4: 0x4697, 0xc5: 0x469c, + 0xc6: 0xa000, 0xc7: 0x46a6, 0xc8: 0x2fe2, 0xc9: 0x2fe7, 0xca: 0x46ab, 0xcb: 0x2ffb, + 0xcc: 0x306e, 0xcd: 0x3073, 0xce: 0x3078, 0xcf: 0x46bf, 0xd1: 0x3104, + 0xd2: 0x3127, 0xd3: 0x312c, 0xd4: 0x46c9, 0xd5: 0x46ce, 0xd6: 0x46dd, + 0xd8: 0xa000, 0xd9: 0x31b3, 0xda: 0x31b8, 0xdb: 0x31bd, 0xdc: 0x470f, 0xdd: 0x3235, + 0xe0: 0x327b, 0xe1: 0x3280, 0xe2: 0x4719, 0xe3: 0x3285, + 0xe4: 0x4728, 0xe5: 0x472d, 0xe6: 0xa000, 0xe7: 0x4737, 0xe8: 0x32ee, 0xe9: 0x32f3, + 0xea: 0x473c, 0xeb: 0x3307, 0xec: 0x337f, 0xed: 0x3384, 0xee: 0x3389, 0xef: 0x4750, + 0xf1: 0x3415, 0xf2: 0x3438, 0xf3: 0x343d, 0xf4: 0x475a, 0xf5: 0x475f, + 0xf6: 0x476e, 0xf8: 0xa000, 0xf9: 0x34c9, 0xfa: 0x34ce, 0xfb: 0x34d3, + 0xfc: 0x47a0, 0xfd: 0x3550, 0xff: 0x3569, + // Block 0x4, offset 0x100 + 0x100: 0x2f7e, 0x101: 0x328a, 0x102: 0x468d, 0x103: 0x471e, 0x104: 0x2f9c, 0x105: 0x32a8, + 0x106: 0x2fb0, 0x107: 0x32bc, 0x108: 0x2fb5, 0x109: 0x32c1, 0x10a: 0x2fba, 0x10b: 0x32c6, + 0x10c: 0x2fbf, 0x10d: 0x32cb, 0x10e: 0x2fc9, 0x10f: 0x32d5, + 0x112: 0x46b0, 0x113: 0x4741, 0x114: 0x2ff1, 0x115: 0x32fd, 0x116: 0x2ff6, 0x117: 0x3302, + 0x118: 0x3014, 0x119: 0x3320, 0x11a: 0x3005, 0x11b: 0x3311, 0x11c: 0x302d, 0x11d: 0x3339, + 0x11e: 0x3037, 0x11f: 0x3343, 0x120: 0x303c, 0x121: 0x3348, 0x122: 0x3046, 0x123: 0x3352, + 0x124: 0x304b, 0x125: 0x3357, 0x128: 0x307d, 0x129: 0x338e, + 0x12a: 0x3082, 0x12b: 0x3393, 0x12c: 0x3087, 0x12d: 0x3398, 0x12e: 0x30aa, 0x12f: 0x33b6, + 0x130: 0x308c, 0x132: 0x195d, 0x133: 0x19e7, 0x134: 0x30b4, 0x135: 0x33c0, + 0x136: 0x30c8, 0x137: 0x33d9, 0x139: 0x30d2, 0x13a: 0x33e3, 0x13b: 0x30dc, + 0x13c: 0x33ed, 0x13d: 0x30d7, 0x13e: 0x33e8, 0x13f: 0x1bac, + // Block 0x5, offset 0x140 + 0x140: 0x1c34, 0x143: 0x30ff, 0x144: 0x3410, 0x145: 0x3118, + 0x146: 0x3429, 0x147: 0x310e, 0x148: 0x341f, 0x149: 0x1c5c, + 0x14c: 0x46d3, 0x14d: 0x4764, 0x14e: 0x3131, 0x14f: 0x3442, 0x150: 0x313b, 0x151: 0x344c, + 0x154: 0x3159, 0x155: 0x346a, 0x156: 0x3172, 0x157: 0x3483, + 0x158: 0x3163, 0x159: 0x3474, 0x15a: 0x46f6, 0x15b: 0x4787, 0x15c: 0x317c, 0x15d: 0x348d, + 0x15e: 0x318b, 0x15f: 0x349c, 0x160: 0x46fb, 0x161: 0x478c, 0x162: 0x31a4, 0x163: 0x34ba, + 0x164: 0x3195, 0x165: 0x34ab, 0x168: 0x4705, 0x169: 0x4796, + 0x16a: 0x470a, 0x16b: 0x479b, 0x16c: 0x31c2, 0x16d: 0x34d8, 0x16e: 0x31cc, 0x16f: 0x34e2, + 0x170: 0x31d1, 0x171: 0x34e7, 0x172: 0x31ef, 0x173: 0x3505, 0x174: 0x3212, 0x175: 0x3528, + 0x176: 0x323a, 0x177: 0x3555, 0x178: 0x324e, 0x179: 0x325d, 0x17a: 0x357d, 0x17b: 0x3267, + 0x17c: 0x3587, 0x17d: 0x326c, 0x17e: 0x358c, 0x17f: 0x00a7, + // Block 0x6, offset 0x180 + 0x184: 0x2dee, 0x185: 0x2df4, + 0x186: 0x2dfa, 0x187: 0x1972, 0x188: 0x1975, 0x189: 0x1a08, 0x18a: 0x1987, 0x18b: 0x198a, + 0x18c: 0x1a3e, 0x18d: 0x2f88, 0x18e: 0x3294, 0x18f: 0x3096, 0x190: 0x33a2, 0x191: 0x3140, + 0x192: 0x3451, 0x193: 0x31d6, 0x194: 0x34ec, 0x195: 0x39cf, 0x196: 0x3b5e, 0x197: 0x39c8, + 0x198: 0x3b57, 0x199: 0x39d6, 0x19a: 0x3b65, 0x19b: 0x39c1, 0x19c: 0x3b50, + 0x19e: 0x38b0, 0x19f: 0x3a3f, 0x1a0: 0x38a9, 0x1a1: 0x3a38, 0x1a2: 0x35b3, 0x1a3: 0x35c5, + 0x1a6: 0x3041, 0x1a7: 0x334d, 0x1a8: 0x30be, 0x1a9: 0x33cf, + 0x1aa: 0x46ec, 0x1ab: 0x477d, 0x1ac: 0x3990, 0x1ad: 0x3b1f, 0x1ae: 0x35d7, 0x1af: 0x35dd, + 0x1b0: 0x33c5, 0x1b1: 0x1942, 0x1b2: 0x1945, 0x1b3: 0x19cf, 0x1b4: 0x3028, 0x1b5: 0x3334, + 0x1b8: 0x30fa, 0x1b9: 0x340b, 0x1ba: 0x38b7, 0x1bb: 0x3a46, + 0x1bc: 0x35ad, 0x1bd: 0x35bf, 0x1be: 0x35b9, 0x1bf: 0x35cb, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x2f8d, 0x1c1: 0x3299, 0x1c2: 0x2f92, 0x1c3: 0x329e, 0x1c4: 0x300a, 0x1c5: 0x3316, + 0x1c6: 0x300f, 0x1c7: 0x331b, 0x1c8: 0x309b, 0x1c9: 0x33a7, 0x1ca: 0x30a0, 0x1cb: 0x33ac, + 0x1cc: 0x3145, 0x1cd: 0x3456, 0x1ce: 0x314a, 0x1cf: 0x345b, 0x1d0: 0x3168, 0x1d1: 0x3479, + 0x1d2: 0x316d, 0x1d3: 0x347e, 0x1d4: 0x31db, 0x1d5: 0x34f1, 0x1d6: 0x31e0, 0x1d7: 0x34f6, + 0x1d8: 0x3186, 0x1d9: 0x3497, 0x1da: 0x319f, 0x1db: 0x34b5, + 0x1de: 0x305a, 0x1df: 0x3366, + 0x1e6: 0x4692, 0x1e7: 0x4723, 0x1e8: 0x46ba, 0x1e9: 0x474b, + 0x1ea: 0x395f, 0x1eb: 0x3aee, 0x1ec: 0x393c, 0x1ed: 0x3acb, 0x1ee: 0x46d8, 0x1ef: 0x4769, + 0x1f0: 0x3958, 0x1f1: 0x3ae7, 0x1f2: 0x3244, 0x1f3: 0x355f, + // Block 0x8, offset 0x200 + 0x200: 0x9932, 0x201: 0x9932, 0x202: 0x9932, 0x203: 0x9932, 0x204: 0x9932, 0x205: 0x8132, + 0x206: 0x9932, 0x207: 0x9932, 0x208: 0x9932, 0x209: 0x9932, 0x20a: 0x9932, 0x20b: 0x9932, + 0x20c: 0x9932, 0x20d: 0x8132, 0x20e: 0x8132, 0x20f: 0x9932, 0x210: 0x8132, 0x211: 0x9932, + 0x212: 0x8132, 0x213: 0x9932, 0x214: 0x9932, 0x215: 0x8133, 0x216: 0x812d, 0x217: 0x812d, + 0x218: 0x812d, 0x219: 0x812d, 0x21a: 0x8133, 0x21b: 0x992b, 0x21c: 0x812d, 0x21d: 0x812d, + 0x21e: 0x812d, 0x21f: 0x812d, 0x220: 0x812d, 0x221: 0x8129, 0x222: 0x8129, 0x223: 0x992d, + 0x224: 0x992d, 0x225: 0x992d, 0x226: 0x992d, 0x227: 0x9929, 0x228: 0x9929, 0x229: 0x812d, + 0x22a: 0x812d, 0x22b: 0x812d, 0x22c: 0x812d, 0x22d: 0x992d, 0x22e: 0x992d, 0x22f: 0x812d, + 0x230: 0x992d, 0x231: 0x992d, 0x232: 0x812d, 0x233: 0x812d, 0x234: 0x8101, 0x235: 0x8101, + 0x236: 0x8101, 0x237: 0x8101, 0x238: 0x9901, 0x239: 0x812d, 0x23a: 0x812d, 0x23b: 0x812d, + 0x23c: 0x812d, 0x23d: 0x8132, 0x23e: 0x8132, 0x23f: 0x8132, + // Block 0x9, offset 0x240 + 0x240: 0x49ae, 0x241: 0x49b3, 0x242: 0x9932, 0x243: 0x49b8, 0x244: 0x4a71, 0x245: 0x9936, + 0x246: 0x8132, 0x247: 0x812d, 0x248: 0x812d, 0x249: 0x812d, 0x24a: 0x8132, 0x24b: 0x8132, + 0x24c: 0x8132, 0x24d: 0x812d, 0x24e: 0x812d, 0x250: 0x8132, 0x251: 0x8132, + 0x252: 0x8132, 0x253: 0x812d, 0x254: 0x812d, 0x255: 0x812d, 0x256: 0x812d, 0x257: 0x8132, + 0x258: 0x8133, 0x259: 0x812d, 0x25a: 0x812d, 0x25b: 0x8132, 0x25c: 0x8134, 0x25d: 0x8135, + 0x25e: 0x8135, 0x25f: 0x8134, 0x260: 0x8135, 0x261: 0x8135, 0x262: 0x8134, 0x263: 0x8132, + 0x264: 0x8132, 0x265: 0x8132, 0x266: 0x8132, 0x267: 0x8132, 0x268: 0x8132, 0x269: 0x8132, + 0x26a: 0x8132, 0x26b: 0x8132, 0x26c: 0x8132, 0x26d: 0x8132, 0x26e: 0x8132, 0x26f: 0x8132, + 0x274: 0x0170, + 0x27a: 0x42a5, + 0x27e: 0x0037, + // Block 0xa, offset 0x280 + 0x284: 0x425a, 0x285: 0x447b, + 0x286: 0x35e9, 0x287: 0x00ce, 0x288: 0x3607, 0x289: 0x3613, 0x28a: 0x3625, + 0x28c: 0x3643, 0x28e: 0x3655, 0x28f: 0x3673, 0x290: 0x3e08, 0x291: 0xa000, + 0x295: 0xa000, 0x297: 0xa000, + 0x299: 0xa000, + 0x29f: 0xa000, 0x2a1: 0xa000, + 0x2a5: 0xa000, 0x2a9: 0xa000, + 0x2aa: 0x3637, 0x2ab: 0x3667, 0x2ac: 0x47fe, 0x2ad: 0x3697, 0x2ae: 0x4828, 0x2af: 0x36a9, + 0x2b0: 0x3e70, 0x2b1: 0xa000, 0x2b5: 0xa000, + 0x2b7: 0xa000, 0x2b9: 0xa000, + 0x2bf: 0xa000, + // Block 0xb, offset 0x2c0 + 0x2c1: 0xa000, 0x2c5: 0xa000, + 0x2c9: 0xa000, 0x2ca: 0x4840, 0x2cb: 0x485e, + 0x2cc: 0x36c7, 0x2cd: 0x36df, 0x2ce: 0x4876, 0x2d0: 0x01be, 0x2d1: 0x01d0, + 0x2d2: 0x01ac, 0x2d3: 0x430c, 0x2d4: 0x4312, 0x2d5: 0x01fa, 0x2d6: 0x01e8, + 0x2f0: 0x01d6, 0x2f1: 0x01eb, 0x2f2: 0x01ee, 0x2f4: 0x0188, 0x2f5: 0x01c7, + 0x2f9: 0x01a6, + // Block 0xc, offset 0x300 + 0x300: 0x3721, 0x301: 0x372d, 0x303: 0x371b, + 0x306: 0xa000, 0x307: 0x3709, + 0x30c: 0x375d, 0x30d: 0x3745, 0x30e: 0x376f, 0x310: 0xa000, + 0x313: 0xa000, 0x315: 0xa000, 0x316: 0xa000, 0x317: 0xa000, + 0x318: 0xa000, 0x319: 0x3751, 0x31a: 0xa000, + 0x31e: 0xa000, 0x323: 0xa000, + 0x327: 0xa000, + 0x32b: 0xa000, 0x32d: 0xa000, + 0x330: 0xa000, 0x333: 0xa000, 0x335: 0xa000, + 0x336: 0xa000, 0x337: 0xa000, 0x338: 0xa000, 0x339: 0x37d5, 0x33a: 0xa000, + 0x33e: 0xa000, + // Block 0xd, offset 0x340 + 0x341: 0x3733, 0x342: 0x37b7, + 0x350: 0x370f, 0x351: 0x3793, + 0x352: 0x3715, 0x353: 0x3799, 0x356: 0x3727, 0x357: 0x37ab, + 0x358: 0xa000, 0x359: 0xa000, 0x35a: 0x3829, 0x35b: 0x382f, 0x35c: 0x3739, 0x35d: 0x37bd, + 0x35e: 0x373f, 0x35f: 0x37c3, 0x362: 0x374b, 0x363: 0x37cf, + 0x364: 0x3757, 0x365: 0x37db, 0x366: 0x3763, 0x367: 0x37e7, 0x368: 0xa000, 0x369: 0xa000, + 0x36a: 0x3835, 0x36b: 0x383b, 0x36c: 0x378d, 0x36d: 0x3811, 0x36e: 0x3769, 0x36f: 0x37ed, + 0x370: 0x3775, 0x371: 0x37f9, 0x372: 0x377b, 0x373: 0x37ff, 0x374: 0x3781, 0x375: 0x3805, + 0x378: 0x3787, 0x379: 0x380b, + // Block 0xe, offset 0x380 + 0x387: 0x1d61, + 0x391: 0x812d, + 0x392: 0x8132, 0x393: 0x8132, 0x394: 0x8132, 0x395: 0x8132, 0x396: 0x812d, 0x397: 0x8132, + 0x398: 0x8132, 0x399: 0x8132, 0x39a: 0x812e, 0x39b: 0x812d, 0x39c: 0x8132, 0x39d: 0x8132, + 0x39e: 0x8132, 0x39f: 0x8132, 0x3a0: 0x8132, 0x3a1: 0x8132, 0x3a2: 0x812d, 0x3a3: 0x812d, + 0x3a4: 0x812d, 0x3a5: 0x812d, 0x3a6: 0x812d, 0x3a7: 0x812d, 0x3a8: 0x8132, 0x3a9: 0x8132, + 0x3aa: 0x812d, 0x3ab: 0x8132, 0x3ac: 0x8132, 0x3ad: 0x812e, 0x3ae: 0x8131, 0x3af: 0x8132, + 0x3b0: 0x8105, 0x3b1: 0x8106, 0x3b2: 0x8107, 0x3b3: 0x8108, 0x3b4: 0x8109, 0x3b5: 0x810a, + 0x3b6: 0x810b, 0x3b7: 0x810c, 0x3b8: 0x810d, 0x3b9: 0x810e, 0x3ba: 0x810e, 0x3bb: 0x810f, + 0x3bc: 0x8110, 0x3bd: 0x8111, 0x3bf: 0x8112, + // Block 0xf, offset 0x3c0 + 0x3c8: 0xa000, 0x3ca: 0xa000, 0x3cb: 0x8116, + 0x3cc: 0x8117, 0x3cd: 0x8118, 0x3ce: 0x8119, 0x3cf: 0x811a, 0x3d0: 0x811b, 0x3d1: 0x811c, + 0x3d2: 0x811d, 0x3d3: 0x9932, 0x3d4: 0x9932, 0x3d5: 0x992d, 0x3d6: 0x812d, 0x3d7: 0x8132, + 0x3d8: 0x8132, 0x3d9: 0x8132, 0x3da: 0x8132, 0x3db: 0x8132, 0x3dc: 0x812d, 0x3dd: 0x8132, + 0x3de: 0x8132, 0x3df: 0x812d, + 0x3f0: 0x811e, 0x3f5: 0x1d84, + 0x3f6: 0x2013, 0x3f7: 0x204f, 0x3f8: 0x204a, + // Block 0x10, offset 0x400 + 0x413: 0x812d, 0x414: 0x8132, 0x415: 0x8132, 0x416: 0x8132, 0x417: 0x8132, + 0x418: 0x8132, 0x419: 0x8132, 0x41a: 0x8132, 0x41b: 0x8132, 0x41c: 0x8132, 0x41d: 0x8132, + 0x41e: 0x8132, 0x41f: 0x8132, 0x420: 0x8132, 0x421: 0x8132, 0x423: 0x812d, + 0x424: 0x8132, 0x425: 0x8132, 0x426: 0x812d, 0x427: 0x8132, 0x428: 0x8132, 0x429: 0x812d, + 0x42a: 0x8132, 0x42b: 0x8132, 0x42c: 0x8132, 0x42d: 0x812d, 0x42e: 0x812d, 0x42f: 0x812d, + 0x430: 0x8116, 0x431: 0x8117, 0x432: 0x8118, 0x433: 0x8132, 0x434: 0x8132, 0x435: 0x8132, + 0x436: 0x812d, 0x437: 0x8132, 0x438: 0x8132, 0x439: 0x812d, 0x43a: 0x812d, 0x43b: 0x8132, + 0x43c: 0x8132, 0x43d: 0x8132, 0x43e: 0x8132, 0x43f: 0x8132, + // Block 0x11, offset 0x440 + 0x445: 0xa000, + 0x446: 0x2d26, 0x447: 0xa000, 0x448: 0x2d2e, 0x449: 0xa000, 0x44a: 0x2d36, 0x44b: 0xa000, + 0x44c: 0x2d3e, 0x44d: 0xa000, 0x44e: 0x2d46, 0x451: 0xa000, + 0x452: 0x2d4e, + 0x474: 0x8102, 0x475: 0x9900, + 0x47a: 0xa000, 0x47b: 0x2d56, + 0x47c: 0xa000, 0x47d: 0x2d5e, 0x47e: 0xa000, 0x47f: 0xa000, + // Block 0x12, offset 0x480 + 0x480: 0x0069, 0x481: 0x006b, 0x482: 0x006f, 0x483: 0x0083, 0x484: 0x00f5, 0x485: 0x00f8, + 0x486: 0x0413, 0x487: 0x0085, 0x488: 0x0089, 0x489: 0x008b, 0x48a: 0x0104, 0x48b: 0x0107, + 0x48c: 0x010a, 0x48d: 0x008f, 0x48f: 0x0097, 0x490: 0x009b, 0x491: 0x00e0, + 0x492: 0x009f, 0x493: 0x00fe, 0x494: 0x0417, 0x495: 0x041b, 0x496: 0x00a1, 0x497: 0x00a9, + 0x498: 0x00ab, 0x499: 0x0423, 0x49a: 0x012b, 0x49b: 0x00ad, 0x49c: 0x0427, 0x49d: 0x01be, + 0x49e: 0x01c1, 0x49f: 0x01c4, 0x4a0: 0x01fa, 0x4a1: 0x01fd, 0x4a2: 0x0093, 0x4a3: 0x00a5, + 0x4a4: 0x00ab, 0x4a5: 0x00ad, 0x4a6: 0x01be, 0x4a7: 0x01c1, 0x4a8: 0x01eb, 0x4a9: 0x01fa, + 0x4aa: 0x01fd, + 0x4b8: 0x020c, + // Block 0x13, offset 0x4c0 + 0x4db: 0x00fb, 0x4dc: 0x0087, 0x4dd: 0x0101, + 0x4de: 0x00d4, 0x4df: 0x010a, 0x4e0: 0x008d, 0x4e1: 0x010d, 0x4e2: 0x0110, 0x4e3: 0x0116, + 0x4e4: 0x011c, 0x4e5: 0x011f, 0x4e6: 0x0122, 0x4e7: 0x042b, 0x4e8: 0x016a, 0x4e9: 0x0128, + 0x4ea: 0x042f, 0x4eb: 0x016d, 0x4ec: 0x0131, 0x4ed: 0x012e, 0x4ee: 0x0134, 0x4ef: 0x0137, + 0x4f0: 0x013a, 0x4f1: 0x013d, 0x4f2: 0x0140, 0x4f3: 0x014c, 0x4f4: 0x014f, 0x4f5: 0x00ec, + 0x4f6: 0x0152, 0x4f7: 0x0155, 0x4f8: 0x041f, 0x4f9: 0x0158, 0x4fa: 0x015b, 0x4fb: 0x00b5, + 0x4fc: 0x015e, 0x4fd: 0x0161, 0x4fe: 0x0164, 0x4ff: 0x01d0, + // Block 0x14, offset 0x500 + 0x500: 0x8132, 0x501: 0x8132, 0x502: 0x812d, 0x503: 0x8132, 0x504: 0x8132, 0x505: 0x8132, + 0x506: 0x8132, 0x507: 0x8132, 0x508: 0x8132, 0x509: 0x8132, 0x50a: 0x812d, 0x50b: 0x8132, + 0x50c: 0x8132, 0x50d: 0x8135, 0x50e: 0x812a, 0x50f: 0x812d, 0x510: 0x8129, 0x511: 0x8132, + 0x512: 0x8132, 0x513: 0x8132, 0x514: 0x8132, 0x515: 0x8132, 0x516: 0x8132, 0x517: 0x8132, + 0x518: 0x8132, 0x519: 0x8132, 0x51a: 0x8132, 0x51b: 0x8132, 0x51c: 0x8132, 0x51d: 0x8132, + 0x51e: 0x8132, 0x51f: 0x8132, 0x520: 0x8132, 0x521: 0x8132, 0x522: 0x8132, 0x523: 0x8132, + 0x524: 0x8132, 0x525: 0x8132, 0x526: 0x8132, 0x527: 0x8132, 0x528: 0x8132, 0x529: 0x8132, + 0x52a: 0x8132, 0x52b: 0x8132, 0x52c: 0x8132, 0x52d: 0x8132, 0x52e: 0x8132, 0x52f: 0x8132, + 0x530: 0x8132, 0x531: 0x8132, 0x532: 0x8132, 0x533: 0x8132, 0x534: 0x8132, 0x535: 0x8132, + 0x536: 0x8133, 0x537: 0x8131, 0x538: 0x8131, 0x539: 0x812d, 0x53b: 0x8132, + 0x53c: 0x8134, 0x53d: 0x812d, 0x53e: 0x8132, 0x53f: 0x812d, + // Block 0x15, offset 0x540 + 0x540: 0x2f97, 0x541: 0x32a3, 0x542: 0x2fa1, 0x543: 0x32ad, 0x544: 0x2fa6, 0x545: 0x32b2, + 0x546: 0x2fab, 0x547: 0x32b7, 0x548: 0x38cc, 0x549: 0x3a5b, 0x54a: 0x2fc4, 0x54b: 0x32d0, + 0x54c: 0x2fce, 0x54d: 0x32da, 0x54e: 0x2fdd, 0x54f: 0x32e9, 0x550: 0x2fd3, 0x551: 0x32df, + 0x552: 0x2fd8, 0x553: 0x32e4, 0x554: 0x38ef, 0x555: 0x3a7e, 0x556: 0x38f6, 0x557: 0x3a85, + 0x558: 0x3019, 0x559: 0x3325, 0x55a: 0x301e, 0x55b: 0x332a, 0x55c: 0x3904, 0x55d: 0x3a93, + 0x55e: 0x3023, 0x55f: 0x332f, 0x560: 0x3032, 0x561: 0x333e, 0x562: 0x3050, 0x563: 0x335c, + 0x564: 0x305f, 0x565: 0x336b, 0x566: 0x3055, 0x567: 0x3361, 0x568: 0x3064, 0x569: 0x3370, + 0x56a: 0x3069, 0x56b: 0x3375, 0x56c: 0x30af, 0x56d: 0x33bb, 0x56e: 0x390b, 0x56f: 0x3a9a, + 0x570: 0x30b9, 0x571: 0x33ca, 0x572: 0x30c3, 0x573: 0x33d4, 0x574: 0x30cd, 0x575: 0x33de, + 0x576: 0x46c4, 0x577: 0x4755, 0x578: 0x3912, 0x579: 0x3aa1, 0x57a: 0x30e6, 0x57b: 0x33f7, + 0x57c: 0x30e1, 0x57d: 0x33f2, 0x57e: 0x30eb, 0x57f: 0x33fc, + // Block 0x16, offset 0x580 + 0x580: 0x30f0, 0x581: 0x3401, 0x582: 0x30f5, 0x583: 0x3406, 0x584: 0x3109, 0x585: 0x341a, + 0x586: 0x3113, 0x587: 0x3424, 0x588: 0x3122, 0x589: 0x3433, 0x58a: 0x311d, 0x58b: 0x342e, + 0x58c: 0x3935, 0x58d: 0x3ac4, 0x58e: 0x3943, 0x58f: 0x3ad2, 0x590: 0x394a, 0x591: 0x3ad9, + 0x592: 0x3951, 0x593: 0x3ae0, 0x594: 0x314f, 0x595: 0x3460, 0x596: 0x3154, 0x597: 0x3465, + 0x598: 0x315e, 0x599: 0x346f, 0x59a: 0x46f1, 0x59b: 0x4782, 0x59c: 0x3997, 0x59d: 0x3b26, + 0x59e: 0x3177, 0x59f: 0x3488, 0x5a0: 0x3181, 0x5a1: 0x3492, 0x5a2: 0x4700, 0x5a3: 0x4791, + 0x5a4: 0x399e, 0x5a5: 0x3b2d, 0x5a6: 0x39a5, 0x5a7: 0x3b34, 0x5a8: 0x39ac, 0x5a9: 0x3b3b, + 0x5aa: 0x3190, 0x5ab: 0x34a1, 0x5ac: 0x319a, 0x5ad: 0x34b0, 0x5ae: 0x31ae, 0x5af: 0x34c4, + 0x5b0: 0x31a9, 0x5b1: 0x34bf, 0x5b2: 0x31ea, 0x5b3: 0x3500, 0x5b4: 0x31f9, 0x5b5: 0x350f, + 0x5b6: 0x31f4, 0x5b7: 0x350a, 0x5b8: 0x39b3, 0x5b9: 0x3b42, 0x5ba: 0x39ba, 0x5bb: 0x3b49, + 0x5bc: 0x31fe, 0x5bd: 0x3514, 0x5be: 0x3203, 0x5bf: 0x3519, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x3208, 0x5c1: 0x351e, 0x5c2: 0x320d, 0x5c3: 0x3523, 0x5c4: 0x321c, 0x5c5: 0x3532, + 0x5c6: 0x3217, 0x5c7: 0x352d, 0x5c8: 0x3221, 0x5c9: 0x353c, 0x5ca: 0x3226, 0x5cb: 0x3541, + 0x5cc: 0x322b, 0x5cd: 0x3546, 0x5ce: 0x3249, 0x5cf: 0x3564, 0x5d0: 0x3262, 0x5d1: 0x3582, + 0x5d2: 0x3271, 0x5d3: 0x3591, 0x5d4: 0x3276, 0x5d5: 0x3596, 0x5d6: 0x337a, 0x5d7: 0x34a6, + 0x5d8: 0x3537, 0x5d9: 0x3573, 0x5da: 0x1be0, 0x5db: 0x42d7, + 0x5e0: 0x46a1, 0x5e1: 0x4732, 0x5e2: 0x2f83, 0x5e3: 0x328f, + 0x5e4: 0x3878, 0x5e5: 0x3a07, 0x5e6: 0x3871, 0x5e7: 0x3a00, 0x5e8: 0x3886, 0x5e9: 0x3a15, + 0x5ea: 0x387f, 0x5eb: 0x3a0e, 0x5ec: 0x38be, 0x5ed: 0x3a4d, 0x5ee: 0x3894, 0x5ef: 0x3a23, + 0x5f0: 0x388d, 0x5f1: 0x3a1c, 0x5f2: 0x38a2, 0x5f3: 0x3a31, 0x5f4: 0x389b, 0x5f5: 0x3a2a, + 0x5f6: 0x38c5, 0x5f7: 0x3a54, 0x5f8: 0x46b5, 0x5f9: 0x4746, 0x5fa: 0x3000, 0x5fb: 0x330c, + 0x5fc: 0x2fec, 0x5fd: 0x32f8, 0x5fe: 0x38da, 0x5ff: 0x3a69, + // Block 0x18, offset 0x600 + 0x600: 0x38d3, 0x601: 0x3a62, 0x602: 0x38e8, 0x603: 0x3a77, 0x604: 0x38e1, 0x605: 0x3a70, + 0x606: 0x38fd, 0x607: 0x3a8c, 0x608: 0x3091, 0x609: 0x339d, 0x60a: 0x30a5, 0x60b: 0x33b1, + 0x60c: 0x46e7, 0x60d: 0x4778, 0x60e: 0x3136, 0x60f: 0x3447, 0x610: 0x3920, 0x611: 0x3aaf, + 0x612: 0x3919, 0x613: 0x3aa8, 0x614: 0x392e, 0x615: 0x3abd, 0x616: 0x3927, 0x617: 0x3ab6, + 0x618: 0x3989, 0x619: 0x3b18, 0x61a: 0x396d, 0x61b: 0x3afc, 0x61c: 0x3966, 0x61d: 0x3af5, + 0x61e: 0x397b, 0x61f: 0x3b0a, 0x620: 0x3974, 0x621: 0x3b03, 0x622: 0x3982, 0x623: 0x3b11, + 0x624: 0x31e5, 0x625: 0x34fb, 0x626: 0x31c7, 0x627: 0x34dd, 0x628: 0x39e4, 0x629: 0x3b73, + 0x62a: 0x39dd, 0x62b: 0x3b6c, 0x62c: 0x39f2, 0x62d: 0x3b81, 0x62e: 0x39eb, 0x62f: 0x3b7a, + 0x630: 0x39f9, 0x631: 0x3b88, 0x632: 0x3230, 0x633: 0x354b, 0x634: 0x3258, 0x635: 0x3578, + 0x636: 0x3253, 0x637: 0x356e, 0x638: 0x323f, 0x639: 0x355a, + // Block 0x19, offset 0x640 + 0x640: 0x4804, 0x641: 0x480a, 0x642: 0x491e, 0x643: 0x4936, 0x644: 0x4926, 0x645: 0x493e, + 0x646: 0x492e, 0x647: 0x4946, 0x648: 0x47aa, 0x649: 0x47b0, 0x64a: 0x488e, 0x64b: 0x48a6, + 0x64c: 0x4896, 0x64d: 0x48ae, 0x64e: 0x489e, 0x64f: 0x48b6, 0x650: 0x4816, 0x651: 0x481c, + 0x652: 0x3db8, 0x653: 0x3dc8, 0x654: 0x3dc0, 0x655: 0x3dd0, + 0x658: 0x47b6, 0x659: 0x47bc, 0x65a: 0x3ce8, 0x65b: 0x3cf8, 0x65c: 0x3cf0, 0x65d: 0x3d00, + 0x660: 0x482e, 0x661: 0x4834, 0x662: 0x494e, 0x663: 0x4966, + 0x664: 0x4956, 0x665: 0x496e, 0x666: 0x495e, 0x667: 0x4976, 0x668: 0x47c2, 0x669: 0x47c8, + 0x66a: 0x48be, 0x66b: 0x48d6, 0x66c: 0x48c6, 0x66d: 0x48de, 0x66e: 0x48ce, 0x66f: 0x48e6, + 0x670: 0x4846, 0x671: 0x484c, 0x672: 0x3e18, 0x673: 0x3e30, 0x674: 0x3e20, 0x675: 0x3e38, + 0x676: 0x3e28, 0x677: 0x3e40, 0x678: 0x47ce, 0x679: 0x47d4, 0x67a: 0x3d18, 0x67b: 0x3d30, + 0x67c: 0x3d20, 0x67d: 0x3d38, 0x67e: 0x3d28, 0x67f: 0x3d40, + // Block 0x1a, offset 0x680 + 0x680: 0x4852, 0x681: 0x4858, 0x682: 0x3e48, 0x683: 0x3e58, 0x684: 0x3e50, 0x685: 0x3e60, + 0x688: 0x47da, 0x689: 0x47e0, 0x68a: 0x3d48, 0x68b: 0x3d58, + 0x68c: 0x3d50, 0x68d: 0x3d60, 0x690: 0x4864, 0x691: 0x486a, + 0x692: 0x3e80, 0x693: 0x3e98, 0x694: 0x3e88, 0x695: 0x3ea0, 0x696: 0x3e90, 0x697: 0x3ea8, + 0x699: 0x47e6, 0x69b: 0x3d68, 0x69d: 0x3d70, + 0x69f: 0x3d78, 0x6a0: 0x487c, 0x6a1: 0x4882, 0x6a2: 0x497e, 0x6a3: 0x4996, + 0x6a4: 0x4986, 0x6a5: 0x499e, 0x6a6: 0x498e, 0x6a7: 0x49a6, 0x6a8: 0x47ec, 0x6a9: 0x47f2, + 0x6aa: 0x48ee, 0x6ab: 0x4906, 0x6ac: 0x48f6, 0x6ad: 0x490e, 0x6ae: 0x48fe, 0x6af: 0x4916, + 0x6b0: 0x47f8, 0x6b1: 0x431e, 0x6b2: 0x3691, 0x6b3: 0x4324, 0x6b4: 0x4822, 0x6b5: 0x432a, + 0x6b6: 0x36a3, 0x6b7: 0x4330, 0x6b8: 0x36c1, 0x6b9: 0x4336, 0x6ba: 0x36d9, 0x6bb: 0x433c, + 0x6bc: 0x4870, 0x6bd: 0x4342, + // Block 0x1b, offset 0x6c0 + 0x6c0: 0x3da0, 0x6c1: 0x3da8, 0x6c2: 0x4184, 0x6c3: 0x41a2, 0x6c4: 0x418e, 0x6c5: 0x41ac, + 0x6c6: 0x4198, 0x6c7: 0x41b6, 0x6c8: 0x3cd8, 0x6c9: 0x3ce0, 0x6ca: 0x40d0, 0x6cb: 0x40ee, + 0x6cc: 0x40da, 0x6cd: 0x40f8, 0x6ce: 0x40e4, 0x6cf: 0x4102, 0x6d0: 0x3de8, 0x6d1: 0x3df0, + 0x6d2: 0x41c0, 0x6d3: 0x41de, 0x6d4: 0x41ca, 0x6d5: 0x41e8, 0x6d6: 0x41d4, 0x6d7: 0x41f2, + 0x6d8: 0x3d08, 0x6d9: 0x3d10, 0x6da: 0x410c, 0x6db: 0x412a, 0x6dc: 0x4116, 0x6dd: 0x4134, + 0x6de: 0x4120, 0x6df: 0x413e, 0x6e0: 0x3ec0, 0x6e1: 0x3ec8, 0x6e2: 0x41fc, 0x6e3: 0x421a, + 0x6e4: 0x4206, 0x6e5: 0x4224, 0x6e6: 0x4210, 0x6e7: 0x422e, 0x6e8: 0x3d80, 0x6e9: 0x3d88, + 0x6ea: 0x4148, 0x6eb: 0x4166, 0x6ec: 0x4152, 0x6ed: 0x4170, 0x6ee: 0x415c, 0x6ef: 0x417a, + 0x6f0: 0x3685, 0x6f1: 0x367f, 0x6f2: 0x3d90, 0x6f3: 0x368b, 0x6f4: 0x3d98, + 0x6f6: 0x4810, 0x6f7: 0x3db0, 0x6f8: 0x35f5, 0x6f9: 0x35ef, 0x6fa: 0x35e3, 0x6fb: 0x42ee, + 0x6fc: 0x35fb, 0x6fd: 0x4287, 0x6fe: 0x01d3, 0x6ff: 0x4287, + // Block 0x1c, offset 0x700 + 0x700: 0x42a0, 0x701: 0x4482, 0x702: 0x3dd8, 0x703: 0x369d, 0x704: 0x3de0, + 0x706: 0x483a, 0x707: 0x3df8, 0x708: 0x3601, 0x709: 0x42f4, 0x70a: 0x360d, 0x70b: 0x42fa, + 0x70c: 0x3619, 0x70d: 0x4489, 0x70e: 0x4490, 0x70f: 0x4497, 0x710: 0x36b5, 0x711: 0x36af, + 0x712: 0x3e00, 0x713: 0x44e4, 0x716: 0x36bb, 0x717: 0x3e10, + 0x718: 0x3631, 0x719: 0x362b, 0x71a: 0x361f, 0x71b: 0x4300, 0x71d: 0x449e, + 0x71e: 0x44a5, 0x71f: 0x44ac, 0x720: 0x36eb, 0x721: 0x36e5, 0x722: 0x3e68, 0x723: 0x44ec, + 0x724: 0x36cd, 0x725: 0x36d3, 0x726: 0x36f1, 0x727: 0x3e78, 0x728: 0x3661, 0x729: 0x365b, + 0x72a: 0x364f, 0x72b: 0x430c, 0x72c: 0x3649, 0x72d: 0x4474, 0x72e: 0x447b, 0x72f: 0x0081, + 0x732: 0x3eb0, 0x733: 0x36f7, 0x734: 0x3eb8, + 0x736: 0x4888, 0x737: 0x3ed0, 0x738: 0x363d, 0x739: 0x4306, 0x73a: 0x366d, 0x73b: 0x4318, + 0x73c: 0x3679, 0x73d: 0x425a, 0x73e: 0x428c, + // Block 0x1d, offset 0x740 + 0x740: 0x1bd8, 0x741: 0x1bdc, 0x742: 0x0047, 0x743: 0x1c54, 0x745: 0x1be8, + 0x746: 0x1bec, 0x747: 0x00e9, 0x749: 0x1c58, 0x74a: 0x008f, 0x74b: 0x0051, + 0x74c: 0x0051, 0x74d: 0x0051, 0x74e: 0x0091, 0x74f: 0x00da, 0x750: 0x0053, 0x751: 0x0053, + 0x752: 0x0059, 0x753: 0x0099, 0x755: 0x005d, 0x756: 0x198d, + 0x759: 0x0061, 0x75a: 0x0063, 0x75b: 0x0065, 0x75c: 0x0065, 0x75d: 0x0065, + 0x760: 0x199f, 0x761: 0x1bc8, 0x762: 0x19a8, + 0x764: 0x0075, 0x766: 0x01b8, 0x768: 0x0075, + 0x76a: 0x0057, 0x76b: 0x42d2, 0x76c: 0x0045, 0x76d: 0x0047, 0x76f: 0x008b, + 0x770: 0x004b, 0x771: 0x004d, 0x773: 0x005b, 0x774: 0x009f, 0x775: 0x0215, + 0x776: 0x0218, 0x777: 0x021b, 0x778: 0x021e, 0x779: 0x0093, 0x77b: 0x1b98, + 0x77c: 0x01e8, 0x77d: 0x01c1, 0x77e: 0x0179, 0x77f: 0x01a0, + // Block 0x1e, offset 0x780 + 0x780: 0x0463, 0x785: 0x0049, + 0x786: 0x0089, 0x787: 0x008b, 0x788: 0x0093, 0x789: 0x0095, + 0x790: 0x222e, 0x791: 0x223a, + 0x792: 0x22ee, 0x793: 0x2216, 0x794: 0x229a, 0x795: 0x2222, 0x796: 0x22a0, 0x797: 0x22b8, + 0x798: 0x22c4, 0x799: 0x2228, 0x79a: 0x22ca, 0x79b: 0x2234, 0x79c: 0x22be, 0x79d: 0x22d0, + 0x79e: 0x22d6, 0x79f: 0x1cbc, 0x7a0: 0x0053, 0x7a1: 0x195a, 0x7a2: 0x1ba4, 0x7a3: 0x1963, + 0x7a4: 0x006d, 0x7a5: 0x19ab, 0x7a6: 0x1bd0, 0x7a7: 0x1d48, 0x7a8: 0x1966, 0x7a9: 0x0071, + 0x7aa: 0x19b7, 0x7ab: 0x1bd4, 0x7ac: 0x0059, 0x7ad: 0x0047, 0x7ae: 0x0049, 0x7af: 0x005b, + 0x7b0: 0x0093, 0x7b1: 0x19e4, 0x7b2: 0x1c18, 0x7b3: 0x19ed, 0x7b4: 0x00ad, 0x7b5: 0x1a62, + 0x7b6: 0x1c4c, 0x7b7: 0x1d5c, 0x7b8: 0x19f0, 0x7b9: 0x00b1, 0x7ba: 0x1a65, 0x7bb: 0x1c50, + 0x7bc: 0x0099, 0x7bd: 0x0087, 0x7be: 0x0089, 0x7bf: 0x009b, + // Block 0x1f, offset 0x7c0 + 0x7c1: 0x3c06, 0x7c3: 0xa000, 0x7c4: 0x3c0d, 0x7c5: 0xa000, + 0x7c7: 0x3c14, 0x7c8: 0xa000, 0x7c9: 0x3c1b, + 0x7cd: 0xa000, + 0x7e0: 0x2f65, 0x7e1: 0xa000, 0x7e2: 0x3c29, + 0x7e4: 0xa000, 0x7e5: 0xa000, + 0x7ed: 0x3c22, 0x7ee: 0x2f60, 0x7ef: 0x2f6a, + 0x7f0: 0x3c30, 0x7f1: 0x3c37, 0x7f2: 0xa000, 0x7f3: 0xa000, 0x7f4: 0x3c3e, 0x7f5: 0x3c45, + 0x7f6: 0xa000, 0x7f7: 0xa000, 0x7f8: 0x3c4c, 0x7f9: 0x3c53, 0x7fa: 0xa000, 0x7fb: 0xa000, + 0x7fc: 0xa000, 0x7fd: 0xa000, + // Block 0x20, offset 0x800 + 0x800: 0x3c5a, 0x801: 0x3c61, 0x802: 0xa000, 0x803: 0xa000, 0x804: 0x3c76, 0x805: 0x3c7d, + 0x806: 0xa000, 0x807: 0xa000, 0x808: 0x3c84, 0x809: 0x3c8b, + 0x811: 0xa000, + 0x812: 0xa000, + 0x822: 0xa000, + 0x828: 0xa000, 0x829: 0xa000, + 0x82b: 0xa000, 0x82c: 0x3ca0, 0x82d: 0x3ca7, 0x82e: 0x3cae, 0x82f: 0x3cb5, + 0x832: 0xa000, 0x833: 0xa000, 0x834: 0xa000, 0x835: 0xa000, + // Block 0x21, offset 0x840 + 0x860: 0x0023, 0x861: 0x0025, 0x862: 0x0027, 0x863: 0x0029, + 0x864: 0x002b, 0x865: 0x002d, 0x866: 0x002f, 0x867: 0x0031, 0x868: 0x0033, 0x869: 0x1882, + 0x86a: 0x1885, 0x86b: 0x1888, 0x86c: 0x188b, 0x86d: 0x188e, 0x86e: 0x1891, 0x86f: 0x1894, + 0x870: 0x1897, 0x871: 0x189a, 0x872: 0x189d, 0x873: 0x18a6, 0x874: 0x1a68, 0x875: 0x1a6c, + 0x876: 0x1a70, 0x877: 0x1a74, 0x878: 0x1a78, 0x879: 0x1a7c, 0x87a: 0x1a80, 0x87b: 0x1a84, + 0x87c: 0x1a88, 0x87d: 0x1c80, 0x87e: 0x1c85, 0x87f: 0x1c8a, + // Block 0x22, offset 0x880 + 0x880: 0x1c8f, 0x881: 0x1c94, 0x882: 0x1c99, 0x883: 0x1c9e, 0x884: 0x1ca3, 0x885: 0x1ca8, + 0x886: 0x1cad, 0x887: 0x1cb2, 0x888: 0x187f, 0x889: 0x18a3, 0x88a: 0x18c7, 0x88b: 0x18eb, + 0x88c: 0x190f, 0x88d: 0x1918, 0x88e: 0x191e, 0x88f: 0x1924, 0x890: 0x192a, 0x891: 0x1b60, + 0x892: 0x1b64, 0x893: 0x1b68, 0x894: 0x1b6c, 0x895: 0x1b70, 0x896: 0x1b74, 0x897: 0x1b78, + 0x898: 0x1b7c, 0x899: 0x1b80, 0x89a: 0x1b84, 0x89b: 0x1b88, 0x89c: 0x1af4, 0x89d: 0x1af8, + 0x89e: 0x1afc, 0x89f: 0x1b00, 0x8a0: 0x1b04, 0x8a1: 0x1b08, 0x8a2: 0x1b0c, 0x8a3: 0x1b10, + 0x8a4: 0x1b14, 0x8a5: 0x1b18, 0x8a6: 0x1b1c, 0x8a7: 0x1b20, 0x8a8: 0x1b24, 0x8a9: 0x1b28, + 0x8aa: 0x1b2c, 0x8ab: 0x1b30, 0x8ac: 0x1b34, 0x8ad: 0x1b38, 0x8ae: 0x1b3c, 0x8af: 0x1b40, + 0x8b0: 0x1b44, 0x8b1: 0x1b48, 0x8b2: 0x1b4c, 0x8b3: 0x1b50, 0x8b4: 0x1b54, 0x8b5: 0x1b58, + 0x8b6: 0x0043, 0x8b7: 0x0045, 0x8b8: 0x0047, 0x8b9: 0x0049, 0x8ba: 0x004b, 0x8bb: 0x004d, + 0x8bc: 0x004f, 0x8bd: 0x0051, 0x8be: 0x0053, 0x8bf: 0x0055, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x06bf, 0x8c1: 0x06e3, 0x8c2: 0x06ef, 0x8c3: 0x06ff, 0x8c4: 0x0707, 0x8c5: 0x0713, + 0x8c6: 0x071b, 0x8c7: 0x0723, 0x8c8: 0x072f, 0x8c9: 0x0783, 0x8ca: 0x079b, 0x8cb: 0x07ab, + 0x8cc: 0x07bb, 0x8cd: 0x07cb, 0x8ce: 0x07db, 0x8cf: 0x07fb, 0x8d0: 0x07ff, 0x8d1: 0x0803, + 0x8d2: 0x0837, 0x8d3: 0x085f, 0x8d4: 0x086f, 0x8d5: 0x0877, 0x8d6: 0x087b, 0x8d7: 0x0887, + 0x8d8: 0x08a3, 0x8d9: 0x08a7, 0x8da: 0x08bf, 0x8db: 0x08c3, 0x8dc: 0x08cb, 0x8dd: 0x08db, + 0x8de: 0x0977, 0x8df: 0x098b, 0x8e0: 0x09cb, 0x8e1: 0x09df, 0x8e2: 0x09e7, 0x8e3: 0x09eb, + 0x8e4: 0x09fb, 0x8e5: 0x0a17, 0x8e6: 0x0a43, 0x8e7: 0x0a4f, 0x8e8: 0x0a6f, 0x8e9: 0x0a7b, + 0x8ea: 0x0a7f, 0x8eb: 0x0a83, 0x8ec: 0x0a9b, 0x8ed: 0x0a9f, 0x8ee: 0x0acb, 0x8ef: 0x0ad7, + 0x8f0: 0x0adf, 0x8f1: 0x0ae7, 0x8f2: 0x0af7, 0x8f3: 0x0aff, 0x8f4: 0x0b07, 0x8f5: 0x0b33, + 0x8f6: 0x0b37, 0x8f7: 0x0b3f, 0x8f8: 0x0b43, 0x8f9: 0x0b4b, 0x8fa: 0x0b53, 0x8fb: 0x0b63, + 0x8fc: 0x0b7f, 0x8fd: 0x0bf7, 0x8fe: 0x0c0b, 0x8ff: 0x0c0f, + // Block 0x24, offset 0x900 + 0x900: 0x0c8f, 0x901: 0x0c93, 0x902: 0x0ca7, 0x903: 0x0cab, 0x904: 0x0cb3, 0x905: 0x0cbb, + 0x906: 0x0cc3, 0x907: 0x0ccf, 0x908: 0x0cf7, 0x909: 0x0d07, 0x90a: 0x0d1b, 0x90b: 0x0d8b, + 0x90c: 0x0d97, 0x90d: 0x0da7, 0x90e: 0x0db3, 0x90f: 0x0dbf, 0x910: 0x0dc7, 0x911: 0x0dcb, + 0x912: 0x0dcf, 0x913: 0x0dd3, 0x914: 0x0dd7, 0x915: 0x0e8f, 0x916: 0x0ed7, 0x917: 0x0ee3, + 0x918: 0x0ee7, 0x919: 0x0eeb, 0x91a: 0x0eef, 0x91b: 0x0ef7, 0x91c: 0x0efb, 0x91d: 0x0f0f, + 0x91e: 0x0f2b, 0x91f: 0x0f33, 0x920: 0x0f73, 0x921: 0x0f77, 0x922: 0x0f7f, 0x923: 0x0f83, + 0x924: 0x0f8b, 0x925: 0x0f8f, 0x926: 0x0fb3, 0x927: 0x0fb7, 0x928: 0x0fd3, 0x929: 0x0fd7, + 0x92a: 0x0fdb, 0x92b: 0x0fdf, 0x92c: 0x0ff3, 0x92d: 0x1017, 0x92e: 0x101b, 0x92f: 0x101f, + 0x930: 0x1043, 0x931: 0x1083, 0x932: 0x1087, 0x933: 0x10a7, 0x934: 0x10b7, 0x935: 0x10bf, + 0x936: 0x10df, 0x937: 0x1103, 0x938: 0x1147, 0x939: 0x114f, 0x93a: 0x1163, 0x93b: 0x116f, + 0x93c: 0x1177, 0x93d: 0x117f, 0x93e: 0x1183, 0x93f: 0x1187, + // Block 0x25, offset 0x940 + 0x940: 0x119f, 0x941: 0x11a3, 0x942: 0x11bf, 0x943: 0x11c7, 0x944: 0x11cf, 0x945: 0x11d3, + 0x946: 0x11df, 0x947: 0x11e7, 0x948: 0x11eb, 0x949: 0x11ef, 0x94a: 0x11f7, 0x94b: 0x11fb, + 0x94c: 0x129b, 0x94d: 0x12af, 0x94e: 0x12e3, 0x94f: 0x12e7, 0x950: 0x12ef, 0x951: 0x131b, + 0x952: 0x1323, 0x953: 0x132b, 0x954: 0x1333, 0x955: 0x136f, 0x956: 0x1373, 0x957: 0x137b, + 0x958: 0x137f, 0x959: 0x1383, 0x95a: 0x13af, 0x95b: 0x13b3, 0x95c: 0x13bb, 0x95d: 0x13cf, + 0x95e: 0x13d3, 0x95f: 0x13ef, 0x960: 0x13f7, 0x961: 0x13fb, 0x962: 0x141f, 0x963: 0x143f, + 0x964: 0x1453, 0x965: 0x1457, 0x966: 0x145f, 0x967: 0x148b, 0x968: 0x148f, 0x969: 0x149f, + 0x96a: 0x14c3, 0x96b: 0x14cf, 0x96c: 0x14df, 0x96d: 0x14f7, 0x96e: 0x14ff, 0x96f: 0x1503, + 0x970: 0x1507, 0x971: 0x150b, 0x972: 0x1517, 0x973: 0x151b, 0x974: 0x1523, 0x975: 0x153f, + 0x976: 0x1543, 0x977: 0x1547, 0x978: 0x155f, 0x979: 0x1563, 0x97a: 0x156b, 0x97b: 0x157f, + 0x97c: 0x1583, 0x97d: 0x1587, 0x97e: 0x158f, 0x97f: 0x1593, + // Block 0x26, offset 0x980 + 0x986: 0xa000, 0x98b: 0xa000, + 0x98c: 0x3f08, 0x98d: 0xa000, 0x98e: 0x3f10, 0x98f: 0xa000, 0x990: 0x3f18, 0x991: 0xa000, + 0x992: 0x3f20, 0x993: 0xa000, 0x994: 0x3f28, 0x995: 0xa000, 0x996: 0x3f30, 0x997: 0xa000, + 0x998: 0x3f38, 0x999: 0xa000, 0x99a: 0x3f40, 0x99b: 0xa000, 0x99c: 0x3f48, 0x99d: 0xa000, + 0x99e: 0x3f50, 0x99f: 0xa000, 0x9a0: 0x3f58, 0x9a1: 0xa000, 0x9a2: 0x3f60, + 0x9a4: 0xa000, 0x9a5: 0x3f68, 0x9a6: 0xa000, 0x9a7: 0x3f70, 0x9a8: 0xa000, 0x9a9: 0x3f78, + 0x9af: 0xa000, + 0x9b0: 0x3f80, 0x9b1: 0x3f88, 0x9b2: 0xa000, 0x9b3: 0x3f90, 0x9b4: 0x3f98, 0x9b5: 0xa000, + 0x9b6: 0x3fa0, 0x9b7: 0x3fa8, 0x9b8: 0xa000, 0x9b9: 0x3fb0, 0x9ba: 0x3fb8, 0x9bb: 0xa000, + 0x9bc: 0x3fc0, 0x9bd: 0x3fc8, + // Block 0x27, offset 0x9c0 + 0x9d4: 0x3f00, + 0x9d9: 0x9903, 0x9da: 0x9903, 0x9db: 0x42dc, 0x9dc: 0x42e2, 0x9dd: 0xa000, + 0x9de: 0x3fd0, 0x9df: 0x26b4, + 0x9e6: 0xa000, + 0x9eb: 0xa000, 0x9ec: 0x3fe0, 0x9ed: 0xa000, 0x9ee: 0x3fe8, 0x9ef: 0xa000, + 0x9f0: 0x3ff0, 0x9f1: 0xa000, 0x9f2: 0x3ff8, 0x9f3: 0xa000, 0x9f4: 0x4000, 0x9f5: 0xa000, + 0x9f6: 0x4008, 0x9f7: 0xa000, 0x9f8: 0x4010, 0x9f9: 0xa000, 0x9fa: 0x4018, 0x9fb: 0xa000, + 0x9fc: 0x4020, 0x9fd: 0xa000, 0x9fe: 0x4028, 0x9ff: 0xa000, + // Block 0x28, offset 0xa00 + 0xa00: 0x4030, 0xa01: 0xa000, 0xa02: 0x4038, 0xa04: 0xa000, 0xa05: 0x4040, + 0xa06: 0xa000, 0xa07: 0x4048, 0xa08: 0xa000, 0xa09: 0x4050, + 0xa0f: 0xa000, 0xa10: 0x4058, 0xa11: 0x4060, + 0xa12: 0xa000, 0xa13: 0x4068, 0xa14: 0x4070, 0xa15: 0xa000, 0xa16: 0x4078, 0xa17: 0x4080, + 0xa18: 0xa000, 0xa19: 0x4088, 0xa1a: 0x4090, 0xa1b: 0xa000, 0xa1c: 0x4098, 0xa1d: 0x40a0, + 0xa2f: 0xa000, + 0xa30: 0xa000, 0xa31: 0xa000, 0xa32: 0xa000, 0xa34: 0x3fd8, + 0xa37: 0x40a8, 0xa38: 0x40b0, 0xa39: 0x40b8, 0xa3a: 0x40c0, + 0xa3d: 0xa000, 0xa3e: 0x40c8, 0xa3f: 0x26c9, + // Block 0x29, offset 0xa40 + 0xa40: 0x0367, 0xa41: 0x032b, 0xa42: 0x032f, 0xa43: 0x0333, 0xa44: 0x037b, 0xa45: 0x0337, + 0xa46: 0x033b, 0xa47: 0x033f, 0xa48: 0x0343, 0xa49: 0x0347, 0xa4a: 0x034b, 0xa4b: 0x034f, + 0xa4c: 0x0353, 0xa4d: 0x0357, 0xa4e: 0x035b, 0xa4f: 0x49bd, 0xa50: 0x49c3, 0xa51: 0x49c9, + 0xa52: 0x49cf, 0xa53: 0x49d5, 0xa54: 0x49db, 0xa55: 0x49e1, 0xa56: 0x49e7, 0xa57: 0x49ed, + 0xa58: 0x49f3, 0xa59: 0x49f9, 0xa5a: 0x49ff, 0xa5b: 0x4a05, 0xa5c: 0x4a0b, 0xa5d: 0x4a11, + 0xa5e: 0x4a17, 0xa5f: 0x4a1d, 0xa60: 0x4a23, 0xa61: 0x4a29, 0xa62: 0x4a2f, 0xa63: 0x4a35, + 0xa64: 0x03c3, 0xa65: 0x035f, 0xa66: 0x0363, 0xa67: 0x03e7, 0xa68: 0x03eb, 0xa69: 0x03ef, + 0xa6a: 0x03f3, 0xa6b: 0x03f7, 0xa6c: 0x03fb, 0xa6d: 0x03ff, 0xa6e: 0x036b, 0xa6f: 0x0403, + 0xa70: 0x0407, 0xa71: 0x036f, 0xa72: 0x0373, 0xa73: 0x0377, 0xa74: 0x037f, 0xa75: 0x0383, + 0xa76: 0x0387, 0xa77: 0x038b, 0xa78: 0x038f, 0xa79: 0x0393, 0xa7a: 0x0397, 0xa7b: 0x039b, + 0xa7c: 0x039f, 0xa7d: 0x03a3, 0xa7e: 0x03a7, 0xa7f: 0x03ab, + // Block 0x2a, offset 0xa80 + 0xa80: 0x03af, 0xa81: 0x03b3, 0xa82: 0x040b, 0xa83: 0x040f, 0xa84: 0x03b7, 0xa85: 0x03bb, + 0xa86: 0x03bf, 0xa87: 0x03c7, 0xa88: 0x03cb, 0xa89: 0x03cf, 0xa8a: 0x03d3, 0xa8b: 0x03d7, + 0xa8c: 0x03db, 0xa8d: 0x03df, 0xa8e: 0x03e3, + 0xa92: 0x06bf, 0xa93: 0x071b, 0xa94: 0x06cb, 0xa95: 0x097b, 0xa96: 0x06cf, 0xa97: 0x06e7, + 0xa98: 0x06d3, 0xa99: 0x0f93, 0xa9a: 0x0707, 0xa9b: 0x06db, 0xa9c: 0x06c3, 0xa9d: 0x09ff, + 0xa9e: 0x098f, 0xa9f: 0x072f, + // Block 0x2b, offset 0xac0 + 0xac0: 0x2054, 0xac1: 0x205a, 0xac2: 0x2060, 0xac3: 0x2066, 0xac4: 0x206c, 0xac5: 0x2072, + 0xac6: 0x2078, 0xac7: 0x207e, 0xac8: 0x2084, 0xac9: 0x208a, 0xaca: 0x2090, 0xacb: 0x2096, + 0xacc: 0x209c, 0xacd: 0x20a2, 0xace: 0x2726, 0xacf: 0x272f, 0xad0: 0x2738, 0xad1: 0x2741, + 0xad2: 0x274a, 0xad3: 0x2753, 0xad4: 0x275c, 0xad5: 0x2765, 0xad6: 0x276e, 0xad7: 0x2780, + 0xad8: 0x2789, 0xad9: 0x2792, 0xada: 0x279b, 0xadb: 0x27a4, 0xadc: 0x2777, 0xadd: 0x2bac, + 0xade: 0x2aed, 0xae0: 0x20a8, 0xae1: 0x20c0, 0xae2: 0x20b4, 0xae3: 0x2108, + 0xae4: 0x20c6, 0xae5: 0x20e4, 0xae6: 0x20ae, 0xae7: 0x20de, 0xae8: 0x20ba, 0xae9: 0x20f0, + 0xaea: 0x2120, 0xaeb: 0x213e, 0xaec: 0x2138, 0xaed: 0x212c, 0xaee: 0x217a, 0xaef: 0x210e, + 0xaf0: 0x211a, 0xaf1: 0x2132, 0xaf2: 0x2126, 0xaf3: 0x2150, 0xaf4: 0x20fc, 0xaf5: 0x2144, + 0xaf6: 0x216e, 0xaf7: 0x2156, 0xaf8: 0x20ea, 0xaf9: 0x20cc, 0xafa: 0x2102, 0xafb: 0x2114, + 0xafc: 0x214a, 0xafd: 0x20d2, 0xafe: 0x2174, 0xaff: 0x20f6, + // Block 0x2c, offset 0xb00 + 0xb00: 0x215c, 0xb01: 0x20d8, 0xb02: 0x2162, 0xb03: 0x2168, 0xb04: 0x092f, 0xb05: 0x0b03, + 0xb06: 0x0ca7, 0xb07: 0x10c7, + 0xb10: 0x1bc4, 0xb11: 0x18a9, + 0xb12: 0x18ac, 0xb13: 0x18af, 0xb14: 0x18b2, 0xb15: 0x18b5, 0xb16: 0x18b8, 0xb17: 0x18bb, + 0xb18: 0x18be, 0xb19: 0x18c1, 0xb1a: 0x18ca, 0xb1b: 0x18cd, 0xb1c: 0x18d0, 0xb1d: 0x18d3, + 0xb1e: 0x18d6, 0xb1f: 0x18d9, 0xb20: 0x0313, 0xb21: 0x031b, 0xb22: 0x031f, 0xb23: 0x0327, + 0xb24: 0x032b, 0xb25: 0x032f, 0xb26: 0x0337, 0xb27: 0x033f, 0xb28: 0x0343, 0xb29: 0x034b, + 0xb2a: 0x034f, 0xb2b: 0x0353, 0xb2c: 0x0357, 0xb2d: 0x035b, 0xb2e: 0x2e18, 0xb2f: 0x2e20, + 0xb30: 0x2e28, 0xb31: 0x2e30, 0xb32: 0x2e38, 0xb33: 0x2e40, 0xb34: 0x2e48, 0xb35: 0x2e50, + 0xb36: 0x2e60, 0xb37: 0x2e68, 0xb38: 0x2e70, 0xb39: 0x2e78, 0xb3a: 0x2e80, 0xb3b: 0x2e88, + 0xb3c: 0x2ed3, 0xb3d: 0x2e9b, 0xb3e: 0x2e58, + // Block 0x2d, offset 0xb40 + 0xb40: 0x06bf, 0xb41: 0x071b, 0xb42: 0x06cb, 0xb43: 0x097b, 0xb44: 0x071f, 0xb45: 0x07af, + 0xb46: 0x06c7, 0xb47: 0x07ab, 0xb48: 0x070b, 0xb49: 0x0887, 0xb4a: 0x0d07, 0xb4b: 0x0e8f, + 0xb4c: 0x0dd7, 0xb4d: 0x0d1b, 0xb4e: 0x145f, 0xb4f: 0x098b, 0xb50: 0x0ccf, 0xb51: 0x0d4b, + 0xb52: 0x0d0b, 0xb53: 0x104b, 0xb54: 0x08fb, 0xb55: 0x0f03, 0xb56: 0x1387, 0xb57: 0x105f, + 0xb58: 0x0843, 0xb59: 0x108f, 0xb5a: 0x0f9b, 0xb5b: 0x0a17, 0xb5c: 0x140f, 0xb5d: 0x077f, + 0xb5e: 0x08ab, 0xb5f: 0x0df7, 0xb60: 0x1527, 0xb61: 0x0743, 0xb62: 0x07d3, 0xb63: 0x0d9b, + 0xb64: 0x06cf, 0xb65: 0x06e7, 0xb66: 0x06d3, 0xb67: 0x0adb, 0xb68: 0x08ef, 0xb69: 0x087f, + 0xb6a: 0x0a57, 0xb6b: 0x0a4b, 0xb6c: 0x0feb, 0xb6d: 0x073f, 0xb6e: 0x139b, 0xb6f: 0x089b, + 0xb70: 0x09f3, 0xb71: 0x18dc, 0xb72: 0x18df, 0xb73: 0x18e2, 0xb74: 0x18e5, 0xb75: 0x18ee, + 0xb76: 0x18f1, 0xb77: 0x18f4, 0xb78: 0x18f7, 0xb79: 0x18fa, 0xb7a: 0x18fd, 0xb7b: 0x1900, + 0xb7c: 0x1903, 0xb7d: 0x1906, 0xb7e: 0x1909, 0xb7f: 0x1912, + // Block 0x2e, offset 0xb80 + 0xb80: 0x1cc6, 0xb81: 0x1cd5, 0xb82: 0x1ce4, 0xb83: 0x1cf3, 0xb84: 0x1d02, 0xb85: 0x1d11, + 0xb86: 0x1d20, 0xb87: 0x1d2f, 0xb88: 0x1d3e, 0xb89: 0x218c, 0xb8a: 0x219e, 0xb8b: 0x21b0, + 0xb8c: 0x1954, 0xb8d: 0x1c04, 0xb8e: 0x19d2, 0xb8f: 0x1ba8, 0xb90: 0x04cb, 0xb91: 0x04d3, + 0xb92: 0x04db, 0xb93: 0x04e3, 0xb94: 0x04eb, 0xb95: 0x04ef, 0xb96: 0x04f3, 0xb97: 0x04f7, + 0xb98: 0x04fb, 0xb99: 0x04ff, 0xb9a: 0x0503, 0xb9b: 0x0507, 0xb9c: 0x050b, 0xb9d: 0x050f, + 0xb9e: 0x0513, 0xb9f: 0x0517, 0xba0: 0x051b, 0xba1: 0x0523, 0xba2: 0x0527, 0xba3: 0x052b, + 0xba4: 0x052f, 0xba5: 0x0533, 0xba6: 0x0537, 0xba7: 0x053b, 0xba8: 0x053f, 0xba9: 0x0543, + 0xbaa: 0x0547, 0xbab: 0x054b, 0xbac: 0x054f, 0xbad: 0x0553, 0xbae: 0x0557, 0xbaf: 0x055b, + 0xbb0: 0x055f, 0xbb1: 0x0563, 0xbb2: 0x0567, 0xbb3: 0x056f, 0xbb4: 0x0577, 0xbb5: 0x057f, + 0xbb6: 0x0583, 0xbb7: 0x0587, 0xbb8: 0x058b, 0xbb9: 0x058f, 0xbba: 0x0593, 0xbbb: 0x0597, + 0xbbc: 0x059b, 0xbbd: 0x059f, 0xbbe: 0x05a3, + // Block 0x2f, offset 0xbc0 + 0xbc0: 0x2b0c, 0xbc1: 0x29a8, 0xbc2: 0x2b1c, 0xbc3: 0x2880, 0xbc4: 0x2ee4, 0xbc5: 0x288a, + 0xbc6: 0x2894, 0xbc7: 0x2f28, 0xbc8: 0x29b5, 0xbc9: 0x289e, 0xbca: 0x28a8, 0xbcb: 0x28b2, + 0xbcc: 0x29dc, 0xbcd: 0x29e9, 0xbce: 0x29c2, 0xbcf: 0x29cf, 0xbd0: 0x2ea9, 0xbd1: 0x29f6, + 0xbd2: 0x2a03, 0xbd3: 0x2bbe, 0xbd4: 0x26bb, 0xbd5: 0x2bd1, 0xbd6: 0x2be4, 0xbd7: 0x2b2c, + 0xbd8: 0x2a10, 0xbd9: 0x2bf7, 0xbda: 0x2c0a, 0xbdb: 0x2a1d, 0xbdc: 0x28bc, 0xbdd: 0x28c6, + 0xbde: 0x2eb7, 0xbdf: 0x2a2a, 0xbe0: 0x2b3c, 0xbe1: 0x2ef5, 0xbe2: 0x28d0, 0xbe3: 0x28da, + 0xbe4: 0x2a37, 0xbe5: 0x28e4, 0xbe6: 0x28ee, 0xbe7: 0x26d0, 0xbe8: 0x26d7, 0xbe9: 0x28f8, + 0xbea: 0x2902, 0xbeb: 0x2c1d, 0xbec: 0x2a44, 0xbed: 0x2b4c, 0xbee: 0x2c30, 0xbef: 0x2a51, + 0xbf0: 0x2916, 0xbf1: 0x290c, 0xbf2: 0x2f3c, 0xbf3: 0x2a5e, 0xbf4: 0x2c43, 0xbf5: 0x2920, + 0xbf6: 0x2b5c, 0xbf7: 0x292a, 0xbf8: 0x2a78, 0xbf9: 0x2934, 0xbfa: 0x2a85, 0xbfb: 0x2f06, + 0xbfc: 0x2a6b, 0xbfd: 0x2b6c, 0xbfe: 0x2a92, 0xbff: 0x26de, + // Block 0x30, offset 0xc00 + 0xc00: 0x2f17, 0xc01: 0x293e, 0xc02: 0x2948, 0xc03: 0x2a9f, 0xc04: 0x2952, 0xc05: 0x295c, + 0xc06: 0x2966, 0xc07: 0x2b7c, 0xc08: 0x2aac, 0xc09: 0x26e5, 0xc0a: 0x2c56, 0xc0b: 0x2e90, + 0xc0c: 0x2b8c, 0xc0d: 0x2ab9, 0xc0e: 0x2ec5, 0xc0f: 0x2970, 0xc10: 0x297a, 0xc11: 0x2ac6, + 0xc12: 0x26ec, 0xc13: 0x2ad3, 0xc14: 0x2b9c, 0xc15: 0x26f3, 0xc16: 0x2c69, 0xc17: 0x2984, + 0xc18: 0x1cb7, 0xc19: 0x1ccb, 0xc1a: 0x1cda, 0xc1b: 0x1ce9, 0xc1c: 0x1cf8, 0xc1d: 0x1d07, + 0xc1e: 0x1d16, 0xc1f: 0x1d25, 0xc20: 0x1d34, 0xc21: 0x1d43, 0xc22: 0x2192, 0xc23: 0x21a4, + 0xc24: 0x21b6, 0xc25: 0x21c2, 0xc26: 0x21ce, 0xc27: 0x21da, 0xc28: 0x21e6, 0xc29: 0x21f2, + 0xc2a: 0x21fe, 0xc2b: 0x220a, 0xc2c: 0x2246, 0xc2d: 0x2252, 0xc2e: 0x225e, 0xc2f: 0x226a, + 0xc30: 0x2276, 0xc31: 0x1c14, 0xc32: 0x19c6, 0xc33: 0x1936, 0xc34: 0x1be4, 0xc35: 0x1a47, + 0xc36: 0x1a56, 0xc37: 0x19cc, 0xc38: 0x1bfc, 0xc39: 0x1c00, 0xc3a: 0x1960, 0xc3b: 0x2701, + 0xc3c: 0x270f, 0xc3d: 0x26fa, 0xc3e: 0x2708, 0xc3f: 0x2ae0, + // Block 0x31, offset 0xc40 + 0xc40: 0x1a4a, 0xc41: 0x1a32, 0xc42: 0x1c60, 0xc43: 0x1a1a, 0xc44: 0x19f3, 0xc45: 0x1969, + 0xc46: 0x1978, 0xc47: 0x1948, 0xc48: 0x1bf0, 0xc49: 0x1d52, 0xc4a: 0x1a4d, 0xc4b: 0x1a35, + 0xc4c: 0x1c64, 0xc4d: 0x1c70, 0xc4e: 0x1a26, 0xc4f: 0x19fc, 0xc50: 0x1957, 0xc51: 0x1c1c, + 0xc52: 0x1bb0, 0xc53: 0x1b9c, 0xc54: 0x1bcc, 0xc55: 0x1c74, 0xc56: 0x1a29, 0xc57: 0x19c9, + 0xc58: 0x19ff, 0xc59: 0x19de, 0xc5a: 0x1a41, 0xc5b: 0x1c78, 0xc5c: 0x1a2c, 0xc5d: 0x19c0, + 0xc5e: 0x1a02, 0xc5f: 0x1c3c, 0xc60: 0x1bf4, 0xc61: 0x1a14, 0xc62: 0x1c24, 0xc63: 0x1c40, + 0xc64: 0x1bf8, 0xc65: 0x1a17, 0xc66: 0x1c28, 0xc67: 0x22e8, 0xc68: 0x22fc, 0xc69: 0x1996, + 0xc6a: 0x1c20, 0xc6b: 0x1bb4, 0xc6c: 0x1ba0, 0xc6d: 0x1c48, 0xc6e: 0x2716, 0xc6f: 0x27ad, + 0xc70: 0x1a59, 0xc71: 0x1a44, 0xc72: 0x1c7c, 0xc73: 0x1a2f, 0xc74: 0x1a50, 0xc75: 0x1a38, + 0xc76: 0x1c68, 0xc77: 0x1a1d, 0xc78: 0x19f6, 0xc79: 0x1981, 0xc7a: 0x1a53, 0xc7b: 0x1a3b, + 0xc7c: 0x1c6c, 0xc7d: 0x1a20, 0xc7e: 0x19f9, 0xc7f: 0x1984, + // Block 0x32, offset 0xc80 + 0xc80: 0x1c2c, 0xc81: 0x1bb8, 0xc82: 0x1d4d, 0xc83: 0x1939, 0xc84: 0x19ba, 0xc85: 0x19bd, + 0xc86: 0x22f5, 0xc87: 0x1b94, 0xc88: 0x19c3, 0xc89: 0x194b, 0xc8a: 0x19e1, 0xc8b: 0x194e, + 0xc8c: 0x19ea, 0xc8d: 0x196c, 0xc8e: 0x196f, 0xc8f: 0x1a05, 0xc90: 0x1a0b, 0xc91: 0x1a0e, + 0xc92: 0x1c30, 0xc93: 0x1a11, 0xc94: 0x1a23, 0xc95: 0x1c38, 0xc96: 0x1c44, 0xc97: 0x1990, + 0xc98: 0x1d57, 0xc99: 0x1bbc, 0xc9a: 0x1993, 0xc9b: 0x1a5c, 0xc9c: 0x19a5, 0xc9d: 0x19b4, + 0xc9e: 0x22e2, 0xc9f: 0x22dc, 0xca0: 0x1cc1, 0xca1: 0x1cd0, 0xca2: 0x1cdf, 0xca3: 0x1cee, + 0xca4: 0x1cfd, 0xca5: 0x1d0c, 0xca6: 0x1d1b, 0xca7: 0x1d2a, 0xca8: 0x1d39, 0xca9: 0x2186, + 0xcaa: 0x2198, 0xcab: 0x21aa, 0xcac: 0x21bc, 0xcad: 0x21c8, 0xcae: 0x21d4, 0xcaf: 0x21e0, + 0xcb0: 0x21ec, 0xcb1: 0x21f8, 0xcb2: 0x2204, 0xcb3: 0x2240, 0xcb4: 0x224c, 0xcb5: 0x2258, + 0xcb6: 0x2264, 0xcb7: 0x2270, 0xcb8: 0x227c, 0xcb9: 0x2282, 0xcba: 0x2288, 0xcbb: 0x228e, + 0xcbc: 0x2294, 0xcbd: 0x22a6, 0xcbe: 0x22ac, 0xcbf: 0x1c10, + // Block 0x33, offset 0xcc0 + 0xcc0: 0x1377, 0xcc1: 0x0cfb, 0xcc2: 0x13d3, 0xcc3: 0x139f, 0xcc4: 0x0e57, 0xcc5: 0x06eb, + 0xcc6: 0x08df, 0xcc7: 0x162b, 0xcc8: 0x162b, 0xcc9: 0x0a0b, 0xcca: 0x145f, 0xccb: 0x0943, + 0xccc: 0x0a07, 0xccd: 0x0bef, 0xcce: 0x0fcf, 0xccf: 0x115f, 0xcd0: 0x1297, 0xcd1: 0x12d3, + 0xcd2: 0x1307, 0xcd3: 0x141b, 0xcd4: 0x0d73, 0xcd5: 0x0dff, 0xcd6: 0x0eab, 0xcd7: 0x0f43, + 0xcd8: 0x125f, 0xcd9: 0x1447, 0xcda: 0x1573, 0xcdb: 0x070f, 0xcdc: 0x08b3, 0xcdd: 0x0d87, + 0xcde: 0x0ecf, 0xcdf: 0x1293, 0xce0: 0x15c3, 0xce1: 0x0ab3, 0xce2: 0x0e77, 0xce3: 0x1283, + 0xce4: 0x1317, 0xce5: 0x0c23, 0xce6: 0x11bb, 0xce7: 0x12df, 0xce8: 0x0b1f, 0xce9: 0x0d0f, + 0xcea: 0x0e17, 0xceb: 0x0f1b, 0xcec: 0x1427, 0xced: 0x074f, 0xcee: 0x07e7, 0xcef: 0x0853, + 0xcf0: 0x0c8b, 0xcf1: 0x0d7f, 0xcf2: 0x0ecb, 0xcf3: 0x0fef, 0xcf4: 0x1177, 0xcf5: 0x128b, + 0xcf6: 0x12a3, 0xcf7: 0x13c7, 0xcf8: 0x14ef, 0xcf9: 0x15a3, 0xcfa: 0x15bf, 0xcfb: 0x102b, + 0xcfc: 0x106b, 0xcfd: 0x1123, 0xcfe: 0x1243, 0xcff: 0x147b, + // Block 0x34, offset 0xd00 + 0xd00: 0x15cb, 0xd01: 0x134b, 0xd02: 0x09c7, 0xd03: 0x0b3b, 0xd04: 0x10db, 0xd05: 0x119b, + 0xd06: 0x0eff, 0xd07: 0x1033, 0xd08: 0x1397, 0xd09: 0x14e7, 0xd0a: 0x09c3, 0xd0b: 0x0a8f, + 0xd0c: 0x0d77, 0xd0d: 0x0e2b, 0xd0e: 0x0e5f, 0xd0f: 0x1113, 0xd10: 0x113b, 0xd11: 0x14a7, + 0xd12: 0x084f, 0xd13: 0x11a7, 0xd14: 0x07f3, 0xd15: 0x07ef, 0xd16: 0x1097, 0xd17: 0x1127, + 0xd18: 0x125b, 0xd19: 0x14af, 0xd1a: 0x1367, 0xd1b: 0x0c27, 0xd1c: 0x0d73, 0xd1d: 0x1357, + 0xd1e: 0x06f7, 0xd1f: 0x0a63, 0xd20: 0x0b93, 0xd21: 0x0f2f, 0xd22: 0x0faf, 0xd23: 0x0873, + 0xd24: 0x103b, 0xd25: 0x075f, 0xd26: 0x0b77, 0xd27: 0x06d7, 0xd28: 0x0deb, 0xd29: 0x0ca3, + 0xd2a: 0x110f, 0xd2b: 0x08c7, 0xd2c: 0x09b3, 0xd2d: 0x0ffb, 0xd2e: 0x1263, 0xd2f: 0x133b, + 0xd30: 0x0db7, 0xd31: 0x13f7, 0xd32: 0x0de3, 0xd33: 0x0c37, 0xd34: 0x121b, 0xd35: 0x0c57, + 0xd36: 0x0fab, 0xd37: 0x072b, 0xd38: 0x07a7, 0xd39: 0x07eb, 0xd3a: 0x0d53, 0xd3b: 0x10fb, + 0xd3c: 0x11f3, 0xd3d: 0x1347, 0xd3e: 0x145b, 0xd3f: 0x085b, + // Block 0x35, offset 0xd40 + 0xd40: 0x090f, 0xd41: 0x0a17, 0xd42: 0x0b2f, 0xd43: 0x0cbf, 0xd44: 0x0e7b, 0xd45: 0x103f, + 0xd46: 0x1497, 0xd47: 0x157b, 0xd48: 0x15cf, 0xd49: 0x15e7, 0xd4a: 0x0837, 0xd4b: 0x0cf3, + 0xd4c: 0x0da3, 0xd4d: 0x13eb, 0xd4e: 0x0afb, 0xd4f: 0x0bd7, 0xd50: 0x0bf3, 0xd51: 0x0c83, + 0xd52: 0x0e6b, 0xd53: 0x0eb7, 0xd54: 0x0f67, 0xd55: 0x108b, 0xd56: 0x112f, 0xd57: 0x1193, + 0xd58: 0x13db, 0xd59: 0x126b, 0xd5a: 0x1403, 0xd5b: 0x147f, 0xd5c: 0x080f, 0xd5d: 0x083b, + 0xd5e: 0x0923, 0xd5f: 0x0ea7, 0xd60: 0x12f3, 0xd61: 0x133b, 0xd62: 0x0b1b, 0xd63: 0x0b8b, + 0xd64: 0x0c4f, 0xd65: 0x0daf, 0xd66: 0x10d7, 0xd67: 0x0f23, 0xd68: 0x073b, 0xd69: 0x097f, + 0xd6a: 0x0a63, 0xd6b: 0x0ac7, 0xd6c: 0x0b97, 0xd6d: 0x0f3f, 0xd6e: 0x0f5b, 0xd6f: 0x116b, + 0xd70: 0x118b, 0xd71: 0x1463, 0xd72: 0x14e3, 0xd73: 0x14f3, 0xd74: 0x152f, 0xd75: 0x0753, + 0xd76: 0x107f, 0xd77: 0x144f, 0xd78: 0x14cb, 0xd79: 0x0baf, 0xd7a: 0x0717, 0xd7b: 0x0777, + 0xd7c: 0x0a67, 0xd7d: 0x0a87, 0xd7e: 0x0caf, 0xd7f: 0x0d73, + // Block 0x36, offset 0xd80 + 0xd80: 0x0ec3, 0xd81: 0x0fcb, 0xd82: 0x1277, 0xd83: 0x1417, 0xd84: 0x1623, 0xd85: 0x0ce3, + 0xd86: 0x14a3, 0xd87: 0x0833, 0xd88: 0x0d2f, 0xd89: 0x0d3b, 0xd8a: 0x0e0f, 0xd8b: 0x0e47, + 0xd8c: 0x0f4b, 0xd8d: 0x0fa7, 0xd8e: 0x1027, 0xd8f: 0x110b, 0xd90: 0x153b, 0xd91: 0x07af, + 0xd92: 0x0c03, 0xd93: 0x14b3, 0xd94: 0x0767, 0xd95: 0x0aab, 0xd96: 0x0e2f, 0xd97: 0x13df, + 0xd98: 0x0b67, 0xd99: 0x0bb7, 0xd9a: 0x0d43, 0xd9b: 0x0f2f, 0xd9c: 0x14bb, 0xd9d: 0x0817, + 0xd9e: 0x08ff, 0xd9f: 0x0a97, 0xda0: 0x0cd3, 0xda1: 0x0d1f, 0xda2: 0x0d5f, 0xda3: 0x0df3, + 0xda4: 0x0f47, 0xda5: 0x0fbb, 0xda6: 0x1157, 0xda7: 0x12f7, 0xda8: 0x1303, 0xda9: 0x1457, + 0xdaa: 0x14d7, 0xdab: 0x0883, 0xdac: 0x0e4b, 0xdad: 0x0903, 0xdae: 0x0ec7, 0xdaf: 0x0f6b, + 0xdb0: 0x1287, 0xdb1: 0x14bf, 0xdb2: 0x15ab, 0xdb3: 0x15d3, 0xdb4: 0x0d37, 0xdb5: 0x0e27, + 0xdb6: 0x11c3, 0xdb7: 0x10b7, 0xdb8: 0x10c3, 0xdb9: 0x10e7, 0xdba: 0x0f17, 0xdbb: 0x0e9f, + 0xdbc: 0x1363, 0xdbd: 0x0733, 0xdbe: 0x122b, 0xdbf: 0x081b, + // Block 0x37, offset 0xdc0 + 0xdc0: 0x080b, 0xdc1: 0x0b0b, 0xdc2: 0x0c2b, 0xdc3: 0x10f3, 0xdc4: 0x0a53, 0xdc5: 0x0e03, + 0xdc6: 0x0cef, 0xdc7: 0x13e7, 0xdc8: 0x12e7, 0xdc9: 0x14ab, 0xdca: 0x1323, 0xdcb: 0x0b27, + 0xdcc: 0x0787, 0xdcd: 0x095b, 0xdd0: 0x09af, + 0xdd2: 0x0cdf, 0xdd5: 0x07f7, 0xdd6: 0x0f1f, 0xdd7: 0x0fe3, + 0xdd8: 0x1047, 0xdd9: 0x1063, 0xdda: 0x1067, 0xddb: 0x107b, 0xddc: 0x14fb, 0xddd: 0x10eb, + 0xdde: 0x116f, 0xde0: 0x128f, 0xde2: 0x1353, + 0xde5: 0x1407, 0xde6: 0x1433, + 0xdea: 0x154f, 0xdeb: 0x1553, 0xdec: 0x1557, 0xded: 0x15bb, 0xdee: 0x142b, 0xdef: 0x14c7, + 0xdf0: 0x0757, 0xdf1: 0x077b, 0xdf2: 0x078f, 0xdf3: 0x084b, 0xdf4: 0x0857, 0xdf5: 0x0897, + 0xdf6: 0x094b, 0xdf7: 0x0967, 0xdf8: 0x096f, 0xdf9: 0x09ab, 0xdfa: 0x09b7, 0xdfb: 0x0a93, + 0xdfc: 0x0a9b, 0xdfd: 0x0ba3, 0xdfe: 0x0bcb, 0xdff: 0x0bd3, + // Block 0x38, offset 0xe00 + 0xe00: 0x0beb, 0xe01: 0x0c97, 0xe02: 0x0cc7, 0xe03: 0x0ce7, 0xe04: 0x0d57, 0xe05: 0x0e1b, + 0xe06: 0x0e37, 0xe07: 0x0e67, 0xe08: 0x0ebb, 0xe09: 0x0edb, 0xe0a: 0x0f4f, 0xe0b: 0x102f, + 0xe0c: 0x104b, 0xe0d: 0x1053, 0xe0e: 0x104f, 0xe0f: 0x1057, 0xe10: 0x105b, 0xe11: 0x105f, + 0xe12: 0x1073, 0xe13: 0x1077, 0xe14: 0x109b, 0xe15: 0x10af, 0xe16: 0x10cb, 0xe17: 0x112f, + 0xe18: 0x1137, 0xe19: 0x113f, 0xe1a: 0x1153, 0xe1b: 0x117b, 0xe1c: 0x11cb, 0xe1d: 0x11ff, + 0xe1e: 0x11ff, 0xe1f: 0x1267, 0xe20: 0x130f, 0xe21: 0x1327, 0xe22: 0x135b, 0xe23: 0x135f, + 0xe24: 0x13a3, 0xe25: 0x13a7, 0xe26: 0x13ff, 0xe27: 0x1407, 0xe28: 0x14db, 0xe29: 0x151f, + 0xe2a: 0x1537, 0xe2b: 0x0b9b, 0xe2c: 0x171e, 0xe2d: 0x11e3, + 0xe30: 0x06df, 0xe31: 0x07e3, 0xe32: 0x07a3, 0xe33: 0x074b, 0xe34: 0x078b, 0xe35: 0x07b7, + 0xe36: 0x0847, 0xe37: 0x0863, 0xe38: 0x094b, 0xe39: 0x0937, 0xe3a: 0x0947, 0xe3b: 0x0963, + 0xe3c: 0x09af, 0xe3d: 0x09bf, 0xe3e: 0x0a03, 0xe3f: 0x0a0f, + // Block 0x39, offset 0xe40 + 0xe40: 0x0a2b, 0xe41: 0x0a3b, 0xe42: 0x0b23, 0xe43: 0x0b2b, 0xe44: 0x0b5b, 0xe45: 0x0b7b, + 0xe46: 0x0bab, 0xe47: 0x0bc3, 0xe48: 0x0bb3, 0xe49: 0x0bd3, 0xe4a: 0x0bc7, 0xe4b: 0x0beb, + 0xe4c: 0x0c07, 0xe4d: 0x0c5f, 0xe4e: 0x0c6b, 0xe4f: 0x0c73, 0xe50: 0x0c9b, 0xe51: 0x0cdf, + 0xe52: 0x0d0f, 0xe53: 0x0d13, 0xe54: 0x0d27, 0xe55: 0x0da7, 0xe56: 0x0db7, 0xe57: 0x0e0f, + 0xe58: 0x0e5b, 0xe59: 0x0e53, 0xe5a: 0x0e67, 0xe5b: 0x0e83, 0xe5c: 0x0ebb, 0xe5d: 0x1013, + 0xe5e: 0x0edf, 0xe5f: 0x0f13, 0xe60: 0x0f1f, 0xe61: 0x0f5f, 0xe62: 0x0f7b, 0xe63: 0x0f9f, + 0xe64: 0x0fc3, 0xe65: 0x0fc7, 0xe66: 0x0fe3, 0xe67: 0x0fe7, 0xe68: 0x0ff7, 0xe69: 0x100b, + 0xe6a: 0x1007, 0xe6b: 0x1037, 0xe6c: 0x10b3, 0xe6d: 0x10cb, 0xe6e: 0x10e3, 0xe6f: 0x111b, + 0xe70: 0x112f, 0xe71: 0x114b, 0xe72: 0x117b, 0xe73: 0x122f, 0xe74: 0x1257, 0xe75: 0x12cb, + 0xe76: 0x1313, 0xe77: 0x131f, 0xe78: 0x1327, 0xe79: 0x133f, 0xe7a: 0x1353, 0xe7b: 0x1343, + 0xe7c: 0x135b, 0xe7d: 0x1357, 0xe7e: 0x134f, 0xe7f: 0x135f, + // Block 0x3a, offset 0xe80 + 0xe80: 0x136b, 0xe81: 0x13a7, 0xe82: 0x13e3, 0xe83: 0x1413, 0xe84: 0x144b, 0xe85: 0x146b, + 0xe86: 0x14b7, 0xe87: 0x14db, 0xe88: 0x14fb, 0xe89: 0x150f, 0xe8a: 0x151f, 0xe8b: 0x152b, + 0xe8c: 0x1537, 0xe8d: 0x158b, 0xe8e: 0x162b, 0xe8f: 0x16b5, 0xe90: 0x16b0, 0xe91: 0x16e2, + 0xe92: 0x0607, 0xe93: 0x062f, 0xe94: 0x0633, 0xe95: 0x1764, 0xe96: 0x1791, 0xe97: 0x1809, + 0xe98: 0x1617, 0xe99: 0x1627, + // Block 0x3b, offset 0xec0 + 0xec0: 0x19d5, 0xec1: 0x19d8, 0xec2: 0x19db, 0xec3: 0x1c08, 0xec4: 0x1c0c, 0xec5: 0x1a5f, + 0xec6: 0x1a5f, + 0xed3: 0x1d75, 0xed4: 0x1d66, 0xed5: 0x1d6b, 0xed6: 0x1d7a, 0xed7: 0x1d70, + 0xedd: 0x4390, + 0xede: 0x8115, 0xedf: 0x4402, 0xee0: 0x022d, 0xee1: 0x0215, 0xee2: 0x021e, 0xee3: 0x0221, + 0xee4: 0x0224, 0xee5: 0x0227, 0xee6: 0x022a, 0xee7: 0x0230, 0xee8: 0x0233, 0xee9: 0x0017, + 0xeea: 0x43f0, 0xeeb: 0x43f6, 0xeec: 0x44f4, 0xeed: 0x44fc, 0xeee: 0x4348, 0xeef: 0x434e, + 0xef0: 0x4354, 0xef1: 0x435a, 0xef2: 0x4366, 0xef3: 0x436c, 0xef4: 0x4372, 0xef5: 0x437e, + 0xef6: 0x4384, 0xef8: 0x438a, 0xef9: 0x4396, 0xefa: 0x439c, 0xefb: 0x43a2, + 0xefc: 0x43ae, 0xefe: 0x43b4, + // Block 0x3c, offset 0xf00 + 0xf00: 0x43ba, 0xf01: 0x43c0, 0xf03: 0x43c6, 0xf04: 0x43cc, + 0xf06: 0x43d8, 0xf07: 0x43de, 0xf08: 0x43e4, 0xf09: 0x43ea, 0xf0a: 0x43fc, 0xf0b: 0x4378, + 0xf0c: 0x4360, 0xf0d: 0x43a8, 0xf0e: 0x43d2, 0xf0f: 0x1d7f, 0xf10: 0x0299, 0xf11: 0x0299, + 0xf12: 0x02a2, 0xf13: 0x02a2, 0xf14: 0x02a2, 0xf15: 0x02a2, 0xf16: 0x02a5, 0xf17: 0x02a5, + 0xf18: 0x02a5, 0xf19: 0x02a5, 0xf1a: 0x02ab, 0xf1b: 0x02ab, 0xf1c: 0x02ab, 0xf1d: 0x02ab, + 0xf1e: 0x029f, 0xf1f: 0x029f, 0xf20: 0x029f, 0xf21: 0x029f, 0xf22: 0x02a8, 0xf23: 0x02a8, + 0xf24: 0x02a8, 0xf25: 0x02a8, 0xf26: 0x029c, 0xf27: 0x029c, 0xf28: 0x029c, 0xf29: 0x029c, + 0xf2a: 0x02cf, 0xf2b: 0x02cf, 0xf2c: 0x02cf, 0xf2d: 0x02cf, 0xf2e: 0x02d2, 0xf2f: 0x02d2, + 0xf30: 0x02d2, 0xf31: 0x02d2, 0xf32: 0x02b1, 0xf33: 0x02b1, 0xf34: 0x02b1, 0xf35: 0x02b1, + 0xf36: 0x02ae, 0xf37: 0x02ae, 0xf38: 0x02ae, 0xf39: 0x02ae, 0xf3a: 0x02b4, 0xf3b: 0x02b4, + 0xf3c: 0x02b4, 0xf3d: 0x02b4, 0xf3e: 0x02b7, 0xf3f: 0x02b7, + // Block 0x3d, offset 0xf40 + 0xf40: 0x02b7, 0xf41: 0x02b7, 0xf42: 0x02c0, 0xf43: 0x02c0, 0xf44: 0x02bd, 0xf45: 0x02bd, + 0xf46: 0x02c3, 0xf47: 0x02c3, 0xf48: 0x02ba, 0xf49: 0x02ba, 0xf4a: 0x02c9, 0xf4b: 0x02c9, + 0xf4c: 0x02c6, 0xf4d: 0x02c6, 0xf4e: 0x02d5, 0xf4f: 0x02d5, 0xf50: 0x02d5, 0xf51: 0x02d5, + 0xf52: 0x02db, 0xf53: 0x02db, 0xf54: 0x02db, 0xf55: 0x02db, 0xf56: 0x02e1, 0xf57: 0x02e1, + 0xf58: 0x02e1, 0xf59: 0x02e1, 0xf5a: 0x02de, 0xf5b: 0x02de, 0xf5c: 0x02de, 0xf5d: 0x02de, + 0xf5e: 0x02e4, 0xf5f: 0x02e4, 0xf60: 0x02e7, 0xf61: 0x02e7, 0xf62: 0x02e7, 0xf63: 0x02e7, + 0xf64: 0x446e, 0xf65: 0x446e, 0xf66: 0x02ed, 0xf67: 0x02ed, 0xf68: 0x02ed, 0xf69: 0x02ed, + 0xf6a: 0x02ea, 0xf6b: 0x02ea, 0xf6c: 0x02ea, 0xf6d: 0x02ea, 0xf6e: 0x0308, 0xf6f: 0x0308, + 0xf70: 0x4468, 0xf71: 0x4468, + // Block 0x3e, offset 0xf80 + 0xf93: 0x02d8, 0xf94: 0x02d8, 0xf95: 0x02d8, 0xf96: 0x02d8, 0xf97: 0x02f6, + 0xf98: 0x02f6, 0xf99: 0x02f3, 0xf9a: 0x02f3, 0xf9b: 0x02f9, 0xf9c: 0x02f9, 0xf9d: 0x204f, + 0xf9e: 0x02ff, 0xf9f: 0x02ff, 0xfa0: 0x02f0, 0xfa1: 0x02f0, 0xfa2: 0x02fc, 0xfa3: 0x02fc, + 0xfa4: 0x0305, 0xfa5: 0x0305, 0xfa6: 0x0305, 0xfa7: 0x0305, 0xfa8: 0x028d, 0xfa9: 0x028d, + 0xfaa: 0x25aa, 0xfab: 0x25aa, 0xfac: 0x261a, 0xfad: 0x261a, 0xfae: 0x25e9, 0xfaf: 0x25e9, + 0xfb0: 0x2605, 0xfb1: 0x2605, 0xfb2: 0x25fe, 0xfb3: 0x25fe, 0xfb4: 0x260c, 0xfb5: 0x260c, + 0xfb6: 0x2613, 0xfb7: 0x2613, 0xfb8: 0x2613, 0xfb9: 0x25f0, 0xfba: 0x25f0, 0xfbb: 0x25f0, + 0xfbc: 0x0302, 0xfbd: 0x0302, 0xfbe: 0x0302, 0xfbf: 0x0302, + // Block 0x3f, offset 0xfc0 + 0xfc0: 0x25b1, 0xfc1: 0x25b8, 0xfc2: 0x25d4, 0xfc3: 0x25f0, 0xfc4: 0x25f7, 0xfc5: 0x1d89, + 0xfc6: 0x1d8e, 0xfc7: 0x1d93, 0xfc8: 0x1da2, 0xfc9: 0x1db1, 0xfca: 0x1db6, 0xfcb: 0x1dbb, + 0xfcc: 0x1dc0, 0xfcd: 0x1dc5, 0xfce: 0x1dd4, 0xfcf: 0x1de3, 0xfd0: 0x1de8, 0xfd1: 0x1ded, + 0xfd2: 0x1dfc, 0xfd3: 0x1e0b, 0xfd4: 0x1e10, 0xfd5: 0x1e15, 0xfd6: 0x1e1a, 0xfd7: 0x1e29, + 0xfd8: 0x1e2e, 0xfd9: 0x1e3d, 0xfda: 0x1e42, 0xfdb: 0x1e47, 0xfdc: 0x1e56, 0xfdd: 0x1e5b, + 0xfde: 0x1e60, 0xfdf: 0x1e6a, 0xfe0: 0x1ea6, 0xfe1: 0x1eb5, 0xfe2: 0x1ec4, 0xfe3: 0x1ec9, + 0xfe4: 0x1ece, 0xfe5: 0x1ed8, 0xfe6: 0x1ee7, 0xfe7: 0x1eec, 0xfe8: 0x1efb, 0xfe9: 0x1f00, + 0xfea: 0x1f05, 0xfeb: 0x1f14, 0xfec: 0x1f19, 0xfed: 0x1f28, 0xfee: 0x1f2d, 0xfef: 0x1f32, + 0xff0: 0x1f37, 0xff1: 0x1f3c, 0xff2: 0x1f41, 0xff3: 0x1f46, 0xff4: 0x1f4b, 0xff5: 0x1f50, + 0xff6: 0x1f55, 0xff7: 0x1f5a, 0xff8: 0x1f5f, 0xff9: 0x1f64, 0xffa: 0x1f69, 0xffb: 0x1f6e, + 0xffc: 0x1f73, 0xffd: 0x1f78, 0xffe: 0x1f7d, 0xfff: 0x1f87, + // Block 0x40, offset 0x1000 + 0x1000: 0x1f8c, 0x1001: 0x1f91, 0x1002: 0x1f96, 0x1003: 0x1fa0, 0x1004: 0x1fa5, 0x1005: 0x1faf, + 0x1006: 0x1fb4, 0x1007: 0x1fb9, 0x1008: 0x1fbe, 0x1009: 0x1fc3, 0x100a: 0x1fc8, 0x100b: 0x1fcd, + 0x100c: 0x1fd2, 0x100d: 0x1fd7, 0x100e: 0x1fe6, 0x100f: 0x1ff5, 0x1010: 0x1ffa, 0x1011: 0x1fff, + 0x1012: 0x2004, 0x1013: 0x2009, 0x1014: 0x200e, 0x1015: 0x2018, 0x1016: 0x201d, 0x1017: 0x2022, + 0x1018: 0x2031, 0x1019: 0x2040, 0x101a: 0x2045, 0x101b: 0x4420, 0x101c: 0x4426, 0x101d: 0x445c, + 0x101e: 0x44b3, 0x101f: 0x44ba, 0x1020: 0x44c1, 0x1021: 0x44c8, 0x1022: 0x44cf, 0x1023: 0x44d6, + 0x1024: 0x25c6, 0x1025: 0x25cd, 0x1026: 0x25d4, 0x1027: 0x25db, 0x1028: 0x25f0, 0x1029: 0x25f7, + 0x102a: 0x1d98, 0x102b: 0x1d9d, 0x102c: 0x1da2, 0x102d: 0x1da7, 0x102e: 0x1db1, 0x102f: 0x1db6, + 0x1030: 0x1dca, 0x1031: 0x1dcf, 0x1032: 0x1dd4, 0x1033: 0x1dd9, 0x1034: 0x1de3, 0x1035: 0x1de8, + 0x1036: 0x1df2, 0x1037: 0x1df7, 0x1038: 0x1dfc, 0x1039: 0x1e01, 0x103a: 0x1e0b, 0x103b: 0x1e10, + 0x103c: 0x1f3c, 0x103d: 0x1f41, 0x103e: 0x1f50, 0x103f: 0x1f55, + // Block 0x41, offset 0x1040 + 0x1040: 0x1f5a, 0x1041: 0x1f6e, 0x1042: 0x1f73, 0x1043: 0x1f78, 0x1044: 0x1f7d, 0x1045: 0x1f96, + 0x1046: 0x1fa0, 0x1047: 0x1fa5, 0x1048: 0x1faa, 0x1049: 0x1fbe, 0x104a: 0x1fdc, 0x104b: 0x1fe1, + 0x104c: 0x1fe6, 0x104d: 0x1feb, 0x104e: 0x1ff5, 0x104f: 0x1ffa, 0x1050: 0x445c, 0x1051: 0x2027, + 0x1052: 0x202c, 0x1053: 0x2031, 0x1054: 0x2036, 0x1055: 0x2040, 0x1056: 0x2045, 0x1057: 0x25b1, + 0x1058: 0x25b8, 0x1059: 0x25bf, 0x105a: 0x25d4, 0x105b: 0x25e2, 0x105c: 0x1d89, 0x105d: 0x1d8e, + 0x105e: 0x1d93, 0x105f: 0x1da2, 0x1060: 0x1dac, 0x1061: 0x1dbb, 0x1062: 0x1dc0, 0x1063: 0x1dc5, + 0x1064: 0x1dd4, 0x1065: 0x1dde, 0x1066: 0x1dfc, 0x1067: 0x1e15, 0x1068: 0x1e1a, 0x1069: 0x1e29, + 0x106a: 0x1e2e, 0x106b: 0x1e3d, 0x106c: 0x1e47, 0x106d: 0x1e56, 0x106e: 0x1e5b, 0x106f: 0x1e60, + 0x1070: 0x1e6a, 0x1071: 0x1ea6, 0x1072: 0x1eab, 0x1073: 0x1eb5, 0x1074: 0x1ec4, 0x1075: 0x1ec9, + 0x1076: 0x1ece, 0x1077: 0x1ed8, 0x1078: 0x1ee7, 0x1079: 0x1efb, 0x107a: 0x1f00, 0x107b: 0x1f05, + 0x107c: 0x1f14, 0x107d: 0x1f19, 0x107e: 0x1f28, 0x107f: 0x1f2d, + // Block 0x42, offset 0x1080 + 0x1080: 0x1f32, 0x1081: 0x1f37, 0x1082: 0x1f46, 0x1083: 0x1f4b, 0x1084: 0x1f5f, 0x1085: 0x1f64, + 0x1086: 0x1f69, 0x1087: 0x1f6e, 0x1088: 0x1f73, 0x1089: 0x1f87, 0x108a: 0x1f8c, 0x108b: 0x1f91, + 0x108c: 0x1f96, 0x108d: 0x1f9b, 0x108e: 0x1faf, 0x108f: 0x1fb4, 0x1090: 0x1fb9, 0x1091: 0x1fbe, + 0x1092: 0x1fcd, 0x1093: 0x1fd2, 0x1094: 0x1fd7, 0x1095: 0x1fe6, 0x1096: 0x1ff0, 0x1097: 0x1fff, + 0x1098: 0x2004, 0x1099: 0x4450, 0x109a: 0x2018, 0x109b: 0x201d, 0x109c: 0x2022, 0x109d: 0x2031, + 0x109e: 0x203b, 0x109f: 0x25d4, 0x10a0: 0x25e2, 0x10a1: 0x1da2, 0x10a2: 0x1dac, 0x10a3: 0x1dd4, + 0x10a4: 0x1dde, 0x10a5: 0x1dfc, 0x10a6: 0x1e06, 0x10a7: 0x1e6a, 0x10a8: 0x1e6f, 0x10a9: 0x1e92, + 0x10aa: 0x1e97, 0x10ab: 0x1f6e, 0x10ac: 0x1f73, 0x10ad: 0x1f96, 0x10ae: 0x1fe6, 0x10af: 0x1ff0, + 0x10b0: 0x2031, 0x10b1: 0x203b, 0x10b2: 0x4504, 0x10b3: 0x450c, 0x10b4: 0x4514, 0x10b5: 0x1ef1, + 0x10b6: 0x1ef6, 0x10b7: 0x1f0a, 0x10b8: 0x1f0f, 0x10b9: 0x1f1e, 0x10ba: 0x1f23, 0x10bb: 0x1e74, + 0x10bc: 0x1e79, 0x10bd: 0x1e9c, 0x10be: 0x1ea1, 0x10bf: 0x1e33, + // Block 0x43, offset 0x10c0 + 0x10c0: 0x1e38, 0x10c1: 0x1e1f, 0x10c2: 0x1e24, 0x10c3: 0x1e4c, 0x10c4: 0x1e51, 0x10c5: 0x1eba, + 0x10c6: 0x1ebf, 0x10c7: 0x1edd, 0x10c8: 0x1ee2, 0x10c9: 0x1e7e, 0x10ca: 0x1e83, 0x10cb: 0x1e88, + 0x10cc: 0x1e92, 0x10cd: 0x1e8d, 0x10ce: 0x1e65, 0x10cf: 0x1eb0, 0x10d0: 0x1ed3, 0x10d1: 0x1ef1, + 0x10d2: 0x1ef6, 0x10d3: 0x1f0a, 0x10d4: 0x1f0f, 0x10d5: 0x1f1e, 0x10d6: 0x1f23, 0x10d7: 0x1e74, + 0x10d8: 0x1e79, 0x10d9: 0x1e9c, 0x10da: 0x1ea1, 0x10db: 0x1e33, 0x10dc: 0x1e38, 0x10dd: 0x1e1f, + 0x10de: 0x1e24, 0x10df: 0x1e4c, 0x10e0: 0x1e51, 0x10e1: 0x1eba, 0x10e2: 0x1ebf, 0x10e3: 0x1edd, + 0x10e4: 0x1ee2, 0x10e5: 0x1e7e, 0x10e6: 0x1e83, 0x10e7: 0x1e88, 0x10e8: 0x1e92, 0x10e9: 0x1e8d, + 0x10ea: 0x1e65, 0x10eb: 0x1eb0, 0x10ec: 0x1ed3, 0x10ed: 0x1e7e, 0x10ee: 0x1e83, 0x10ef: 0x1e88, + 0x10f0: 0x1e92, 0x10f1: 0x1e6f, 0x10f2: 0x1e97, 0x10f3: 0x1eec, 0x10f4: 0x1e56, 0x10f5: 0x1e5b, + 0x10f6: 0x1e60, 0x10f7: 0x1e7e, 0x10f8: 0x1e83, 0x10f9: 0x1e88, 0x10fa: 0x1eec, 0x10fb: 0x1efb, + 0x10fc: 0x4408, 0x10fd: 0x4408, + // Block 0x44, offset 0x1100 + 0x1110: 0x2311, 0x1111: 0x2326, + 0x1112: 0x2326, 0x1113: 0x232d, 0x1114: 0x2334, 0x1115: 0x2349, 0x1116: 0x2350, 0x1117: 0x2357, + 0x1118: 0x237a, 0x1119: 0x237a, 0x111a: 0x239d, 0x111b: 0x2396, 0x111c: 0x23b2, 0x111d: 0x23a4, + 0x111e: 0x23ab, 0x111f: 0x23ce, 0x1120: 0x23ce, 0x1121: 0x23c7, 0x1122: 0x23d5, 0x1123: 0x23d5, + 0x1124: 0x23ff, 0x1125: 0x23ff, 0x1126: 0x241b, 0x1127: 0x23e3, 0x1128: 0x23e3, 0x1129: 0x23dc, + 0x112a: 0x23f1, 0x112b: 0x23f1, 0x112c: 0x23f8, 0x112d: 0x23f8, 0x112e: 0x2422, 0x112f: 0x2430, + 0x1130: 0x2430, 0x1131: 0x2437, 0x1132: 0x2437, 0x1133: 0x243e, 0x1134: 0x2445, 0x1135: 0x244c, + 0x1136: 0x2453, 0x1137: 0x2453, 0x1138: 0x245a, 0x1139: 0x2468, 0x113a: 0x2476, 0x113b: 0x246f, + 0x113c: 0x247d, 0x113d: 0x247d, 0x113e: 0x2492, 0x113f: 0x2499, + // Block 0x45, offset 0x1140 + 0x1140: 0x24ca, 0x1141: 0x24d8, 0x1142: 0x24d1, 0x1143: 0x24b5, 0x1144: 0x24b5, 0x1145: 0x24df, + 0x1146: 0x24df, 0x1147: 0x24e6, 0x1148: 0x24e6, 0x1149: 0x2510, 0x114a: 0x2517, 0x114b: 0x251e, + 0x114c: 0x24f4, 0x114d: 0x2502, 0x114e: 0x2525, 0x114f: 0x252c, + 0x1152: 0x24fb, 0x1153: 0x2580, 0x1154: 0x2587, 0x1155: 0x255d, 0x1156: 0x2564, 0x1157: 0x2548, + 0x1158: 0x2548, 0x1159: 0x254f, 0x115a: 0x2579, 0x115b: 0x2572, 0x115c: 0x259c, 0x115d: 0x259c, + 0x115e: 0x230a, 0x115f: 0x231f, 0x1160: 0x2318, 0x1161: 0x2342, 0x1162: 0x233b, 0x1163: 0x2365, + 0x1164: 0x235e, 0x1165: 0x2388, 0x1166: 0x236c, 0x1167: 0x2381, 0x1168: 0x23b9, 0x1169: 0x2406, + 0x116a: 0x23ea, 0x116b: 0x2429, 0x116c: 0x24c3, 0x116d: 0x24ed, 0x116e: 0x2595, 0x116f: 0x258e, + 0x1170: 0x25a3, 0x1171: 0x253a, 0x1172: 0x24a0, 0x1173: 0x256b, 0x1174: 0x2492, 0x1175: 0x24ca, + 0x1176: 0x2461, 0x1177: 0x24ae, 0x1178: 0x2541, 0x1179: 0x2533, 0x117a: 0x24bc, 0x117b: 0x24a7, + 0x117c: 0x24bc, 0x117d: 0x2541, 0x117e: 0x2373, 0x117f: 0x238f, + // Block 0x46, offset 0x1180 + 0x1180: 0x2509, 0x1181: 0x2484, 0x1182: 0x2303, 0x1183: 0x24a7, 0x1184: 0x244c, 0x1185: 0x241b, + 0x1186: 0x23c0, 0x1187: 0x2556, + 0x11b0: 0x2414, 0x11b1: 0x248b, 0x11b2: 0x27bf, 0x11b3: 0x27b6, 0x11b4: 0x27ec, 0x11b5: 0x27da, + 0x11b6: 0x27c8, 0x11b7: 0x27e3, 0x11b8: 0x27f5, 0x11b9: 0x240d, 0x11ba: 0x2c7c, 0x11bb: 0x2afc, + 0x11bc: 0x27d1, + // Block 0x47, offset 0x11c0 + 0x11d0: 0x0019, 0x11d1: 0x0483, + 0x11d2: 0x0487, 0x11d3: 0x0035, 0x11d4: 0x0037, 0x11d5: 0x0003, 0x11d6: 0x003f, 0x11d7: 0x04bf, + 0x11d8: 0x04c3, 0x11d9: 0x1b5c, + 0x11e0: 0x8132, 0x11e1: 0x8132, 0x11e2: 0x8132, 0x11e3: 0x8132, + 0x11e4: 0x8132, 0x11e5: 0x8132, 0x11e6: 0x8132, 0x11e7: 0x812d, 0x11e8: 0x812d, 0x11e9: 0x812d, + 0x11ea: 0x812d, 0x11eb: 0x812d, 0x11ec: 0x812d, 0x11ed: 0x812d, 0x11ee: 0x8132, 0x11ef: 0x8132, + 0x11f0: 0x1873, 0x11f1: 0x0443, 0x11f2: 0x043f, 0x11f3: 0x007f, 0x11f4: 0x007f, 0x11f5: 0x0011, + 0x11f6: 0x0013, 0x11f7: 0x00b7, 0x11f8: 0x00bb, 0x11f9: 0x04b7, 0x11fa: 0x04bb, 0x11fb: 0x04ab, + 0x11fc: 0x04af, 0x11fd: 0x0493, 0x11fe: 0x0497, 0x11ff: 0x048b, + // Block 0x48, offset 0x1200 + 0x1200: 0x048f, 0x1201: 0x049b, 0x1202: 0x049f, 0x1203: 0x04a3, 0x1204: 0x04a7, + 0x1207: 0x0077, 0x1208: 0x007b, 0x1209: 0x4269, 0x120a: 0x4269, 0x120b: 0x4269, + 0x120c: 0x4269, 0x120d: 0x007f, 0x120e: 0x007f, 0x120f: 0x007f, 0x1210: 0x0019, 0x1211: 0x0483, + 0x1212: 0x001d, 0x1214: 0x0037, 0x1215: 0x0035, 0x1216: 0x003f, 0x1217: 0x0003, + 0x1218: 0x0443, 0x1219: 0x0011, 0x121a: 0x0013, 0x121b: 0x00b7, 0x121c: 0x00bb, 0x121d: 0x04b7, + 0x121e: 0x04bb, 0x121f: 0x0007, 0x1220: 0x000d, 0x1221: 0x0015, 0x1222: 0x0017, 0x1223: 0x001b, + 0x1224: 0x0039, 0x1225: 0x003d, 0x1226: 0x003b, 0x1228: 0x0079, 0x1229: 0x0009, + 0x122a: 0x000b, 0x122b: 0x0041, + 0x1230: 0x42aa, 0x1231: 0x442c, 0x1232: 0x42af, 0x1234: 0x42b4, + 0x1236: 0x42b9, 0x1237: 0x4432, 0x1238: 0x42be, 0x1239: 0x4438, 0x123a: 0x42c3, 0x123b: 0x443e, + 0x123c: 0x42c8, 0x123d: 0x4444, 0x123e: 0x42cd, 0x123f: 0x444a, + // Block 0x49, offset 0x1240 + 0x1240: 0x0236, 0x1241: 0x440e, 0x1242: 0x440e, 0x1243: 0x4414, 0x1244: 0x4414, 0x1245: 0x4456, + 0x1246: 0x4456, 0x1247: 0x441a, 0x1248: 0x441a, 0x1249: 0x4462, 0x124a: 0x4462, 0x124b: 0x4462, + 0x124c: 0x4462, 0x124d: 0x0239, 0x124e: 0x0239, 0x124f: 0x023c, 0x1250: 0x023c, 0x1251: 0x023c, + 0x1252: 0x023c, 0x1253: 0x023f, 0x1254: 0x023f, 0x1255: 0x0242, 0x1256: 0x0242, 0x1257: 0x0242, + 0x1258: 0x0242, 0x1259: 0x0245, 0x125a: 0x0245, 0x125b: 0x0245, 0x125c: 0x0245, 0x125d: 0x0248, + 0x125e: 0x0248, 0x125f: 0x0248, 0x1260: 0x0248, 0x1261: 0x024b, 0x1262: 0x024b, 0x1263: 0x024b, + 0x1264: 0x024b, 0x1265: 0x024e, 0x1266: 0x024e, 0x1267: 0x024e, 0x1268: 0x024e, 0x1269: 0x0251, + 0x126a: 0x0251, 0x126b: 0x0254, 0x126c: 0x0254, 0x126d: 0x0257, 0x126e: 0x0257, 0x126f: 0x025a, + 0x1270: 0x025a, 0x1271: 0x025d, 0x1272: 0x025d, 0x1273: 0x025d, 0x1274: 0x025d, 0x1275: 0x0260, + 0x1276: 0x0260, 0x1277: 0x0260, 0x1278: 0x0260, 0x1279: 0x0263, 0x127a: 0x0263, 0x127b: 0x0263, + 0x127c: 0x0263, 0x127d: 0x0266, 0x127e: 0x0266, 0x127f: 0x0266, + // Block 0x4a, offset 0x1280 + 0x1280: 0x0266, 0x1281: 0x0269, 0x1282: 0x0269, 0x1283: 0x0269, 0x1284: 0x0269, 0x1285: 0x026c, + 0x1286: 0x026c, 0x1287: 0x026c, 0x1288: 0x026c, 0x1289: 0x026f, 0x128a: 0x026f, 0x128b: 0x026f, + 0x128c: 0x026f, 0x128d: 0x0272, 0x128e: 0x0272, 0x128f: 0x0272, 0x1290: 0x0272, 0x1291: 0x0275, + 0x1292: 0x0275, 0x1293: 0x0275, 0x1294: 0x0275, 0x1295: 0x0278, 0x1296: 0x0278, 0x1297: 0x0278, + 0x1298: 0x0278, 0x1299: 0x027b, 0x129a: 0x027b, 0x129b: 0x027b, 0x129c: 0x027b, 0x129d: 0x027e, + 0x129e: 0x027e, 0x129f: 0x027e, 0x12a0: 0x027e, 0x12a1: 0x0281, 0x12a2: 0x0281, 0x12a3: 0x0281, + 0x12a4: 0x0281, 0x12a5: 0x0284, 0x12a6: 0x0284, 0x12a7: 0x0284, 0x12a8: 0x0284, 0x12a9: 0x0287, + 0x12aa: 0x0287, 0x12ab: 0x0287, 0x12ac: 0x0287, 0x12ad: 0x028a, 0x12ae: 0x028a, 0x12af: 0x028d, + 0x12b0: 0x028d, 0x12b1: 0x0290, 0x12b2: 0x0290, 0x12b3: 0x0290, 0x12b4: 0x0290, 0x12b5: 0x2e00, + 0x12b6: 0x2e00, 0x12b7: 0x2e08, 0x12b8: 0x2e08, 0x12b9: 0x2e10, 0x12ba: 0x2e10, 0x12bb: 0x1f82, + 0x12bc: 0x1f82, + // Block 0x4b, offset 0x12c0 + 0x12c0: 0x0081, 0x12c1: 0x0083, 0x12c2: 0x0085, 0x12c3: 0x0087, 0x12c4: 0x0089, 0x12c5: 0x008b, + 0x12c6: 0x008d, 0x12c7: 0x008f, 0x12c8: 0x0091, 0x12c9: 0x0093, 0x12ca: 0x0095, 0x12cb: 0x0097, + 0x12cc: 0x0099, 0x12cd: 0x009b, 0x12ce: 0x009d, 0x12cf: 0x009f, 0x12d0: 0x00a1, 0x12d1: 0x00a3, + 0x12d2: 0x00a5, 0x12d3: 0x00a7, 0x12d4: 0x00a9, 0x12d5: 0x00ab, 0x12d6: 0x00ad, 0x12d7: 0x00af, + 0x12d8: 0x00b1, 0x12d9: 0x00b3, 0x12da: 0x00b5, 0x12db: 0x00b7, 0x12dc: 0x00b9, 0x12dd: 0x00bb, + 0x12de: 0x00bd, 0x12df: 0x0477, 0x12e0: 0x047b, 0x12e1: 0x0487, 0x12e2: 0x049b, 0x12e3: 0x049f, + 0x12e4: 0x0483, 0x12e5: 0x05ab, 0x12e6: 0x05a3, 0x12e7: 0x04c7, 0x12e8: 0x04cf, 0x12e9: 0x04d7, + 0x12ea: 0x04df, 0x12eb: 0x04e7, 0x12ec: 0x056b, 0x12ed: 0x0573, 0x12ee: 0x057b, 0x12ef: 0x051f, + 0x12f0: 0x05af, 0x12f1: 0x04cb, 0x12f2: 0x04d3, 0x12f3: 0x04db, 0x12f4: 0x04e3, 0x12f5: 0x04eb, + 0x12f6: 0x04ef, 0x12f7: 0x04f3, 0x12f8: 0x04f7, 0x12f9: 0x04fb, 0x12fa: 0x04ff, 0x12fb: 0x0503, + 0x12fc: 0x0507, 0x12fd: 0x050b, 0x12fe: 0x050f, 0x12ff: 0x0513, + // Block 0x4c, offset 0x1300 + 0x1300: 0x0517, 0x1301: 0x051b, 0x1302: 0x0523, 0x1303: 0x0527, 0x1304: 0x052b, 0x1305: 0x052f, + 0x1306: 0x0533, 0x1307: 0x0537, 0x1308: 0x053b, 0x1309: 0x053f, 0x130a: 0x0543, 0x130b: 0x0547, + 0x130c: 0x054b, 0x130d: 0x054f, 0x130e: 0x0553, 0x130f: 0x0557, 0x1310: 0x055b, 0x1311: 0x055f, + 0x1312: 0x0563, 0x1313: 0x0567, 0x1314: 0x056f, 0x1315: 0x0577, 0x1316: 0x057f, 0x1317: 0x0583, + 0x1318: 0x0587, 0x1319: 0x058b, 0x131a: 0x058f, 0x131b: 0x0593, 0x131c: 0x0597, 0x131d: 0x05a7, + 0x131e: 0x4a78, 0x131f: 0x4a7e, 0x1320: 0x03c3, 0x1321: 0x0313, 0x1322: 0x0317, 0x1323: 0x4a3b, + 0x1324: 0x031b, 0x1325: 0x4a41, 0x1326: 0x4a47, 0x1327: 0x031f, 0x1328: 0x0323, 0x1329: 0x0327, + 0x132a: 0x4a4d, 0x132b: 0x4a53, 0x132c: 0x4a59, 0x132d: 0x4a5f, 0x132e: 0x4a65, 0x132f: 0x4a6b, + 0x1330: 0x0367, 0x1331: 0x032b, 0x1332: 0x032f, 0x1333: 0x0333, 0x1334: 0x037b, 0x1335: 0x0337, + 0x1336: 0x033b, 0x1337: 0x033f, 0x1338: 0x0343, 0x1339: 0x0347, 0x133a: 0x034b, 0x133b: 0x034f, + 0x133c: 0x0353, 0x133d: 0x0357, 0x133e: 0x035b, + // Block 0x4d, offset 0x1340 + 0x1342: 0x49bd, 0x1343: 0x49c3, 0x1344: 0x49c9, 0x1345: 0x49cf, + 0x1346: 0x49d5, 0x1347: 0x49db, 0x134a: 0x49e1, 0x134b: 0x49e7, + 0x134c: 0x49ed, 0x134d: 0x49f3, 0x134e: 0x49f9, 0x134f: 0x49ff, + 0x1352: 0x4a05, 0x1353: 0x4a0b, 0x1354: 0x4a11, 0x1355: 0x4a17, 0x1356: 0x4a1d, 0x1357: 0x4a23, + 0x135a: 0x4a29, 0x135b: 0x4a2f, 0x135c: 0x4a35, + 0x1360: 0x00bf, 0x1361: 0x00c2, 0x1362: 0x00cb, 0x1363: 0x4264, + 0x1364: 0x00c8, 0x1365: 0x00c5, 0x1366: 0x0447, 0x1368: 0x046b, 0x1369: 0x044b, + 0x136a: 0x044f, 0x136b: 0x0453, 0x136c: 0x0457, 0x136d: 0x046f, 0x136e: 0x0473, + // Block 0x4e, offset 0x1380 + 0x1380: 0x0063, 0x1381: 0x0065, 0x1382: 0x0067, 0x1383: 0x0069, 0x1384: 0x006b, 0x1385: 0x006d, + 0x1386: 0x006f, 0x1387: 0x0071, 0x1388: 0x0073, 0x1389: 0x0075, 0x138a: 0x0083, 0x138b: 0x0085, + 0x138c: 0x0087, 0x138d: 0x0089, 0x138e: 0x008b, 0x138f: 0x008d, 0x1390: 0x008f, 0x1391: 0x0091, + 0x1392: 0x0093, 0x1393: 0x0095, 0x1394: 0x0097, 0x1395: 0x0099, 0x1396: 0x009b, 0x1397: 0x009d, + 0x1398: 0x009f, 0x1399: 0x00a1, 0x139a: 0x00a3, 0x139b: 0x00a5, 0x139c: 0x00a7, 0x139d: 0x00a9, + 0x139e: 0x00ab, 0x139f: 0x00ad, 0x13a0: 0x00af, 0x13a1: 0x00b1, 0x13a2: 0x00b3, 0x13a3: 0x00b5, + 0x13a4: 0x00dd, 0x13a5: 0x00f2, 0x13a8: 0x0173, 0x13a9: 0x0176, + 0x13aa: 0x0179, 0x13ab: 0x017c, 0x13ac: 0x017f, 0x13ad: 0x0182, 0x13ae: 0x0185, 0x13af: 0x0188, + 0x13b0: 0x018b, 0x13b1: 0x018e, 0x13b2: 0x0191, 0x13b3: 0x0194, 0x13b4: 0x0197, 0x13b5: 0x019a, + 0x13b6: 0x019d, 0x13b7: 0x01a0, 0x13b8: 0x01a3, 0x13b9: 0x0188, 0x13ba: 0x01a6, 0x13bb: 0x01a9, + 0x13bc: 0x01ac, 0x13bd: 0x01af, 0x13be: 0x01b2, 0x13bf: 0x01b5, + // Block 0x4f, offset 0x13c0 + 0x13c0: 0x01fd, 0x13c1: 0x0200, 0x13c2: 0x0203, 0x13c3: 0x045b, 0x13c4: 0x01c7, 0x13c5: 0x01d0, + 0x13c6: 0x01d6, 0x13c7: 0x01fa, 0x13c8: 0x01eb, 0x13c9: 0x01e8, 0x13ca: 0x0206, 0x13cb: 0x0209, + 0x13ce: 0x0021, 0x13cf: 0x0023, 0x13d0: 0x0025, 0x13d1: 0x0027, + 0x13d2: 0x0029, 0x13d3: 0x002b, 0x13d4: 0x002d, 0x13d5: 0x002f, 0x13d6: 0x0031, 0x13d7: 0x0033, + 0x13d8: 0x0021, 0x13d9: 0x0023, 0x13da: 0x0025, 0x13db: 0x0027, 0x13dc: 0x0029, 0x13dd: 0x002b, + 0x13de: 0x002d, 0x13df: 0x002f, 0x13e0: 0x0031, 0x13e1: 0x0033, 0x13e2: 0x0021, 0x13e3: 0x0023, + 0x13e4: 0x0025, 0x13e5: 0x0027, 0x13e6: 0x0029, 0x13e7: 0x002b, 0x13e8: 0x002d, 0x13e9: 0x002f, + 0x13ea: 0x0031, 0x13eb: 0x0033, 0x13ec: 0x0021, 0x13ed: 0x0023, 0x13ee: 0x0025, 0x13ef: 0x0027, + 0x13f0: 0x0029, 0x13f1: 0x002b, 0x13f2: 0x002d, 0x13f3: 0x002f, 0x13f4: 0x0031, 0x13f5: 0x0033, + 0x13f6: 0x0021, 0x13f7: 0x0023, 0x13f8: 0x0025, 0x13f9: 0x0027, 0x13fa: 0x0029, 0x13fb: 0x002b, + 0x13fc: 0x002d, 0x13fd: 0x002f, 0x13fe: 0x0031, 0x13ff: 0x0033, + // Block 0x50, offset 0x1400 + 0x1400: 0x0239, 0x1401: 0x023c, 0x1402: 0x0248, 0x1403: 0x0251, 0x1405: 0x028a, + 0x1406: 0x025a, 0x1407: 0x024b, 0x1408: 0x0269, 0x1409: 0x0290, 0x140a: 0x027b, 0x140b: 0x027e, + 0x140c: 0x0281, 0x140d: 0x0284, 0x140e: 0x025d, 0x140f: 0x026f, 0x1410: 0x0275, 0x1411: 0x0263, + 0x1412: 0x0278, 0x1413: 0x0257, 0x1414: 0x0260, 0x1415: 0x0242, 0x1416: 0x0245, 0x1417: 0x024e, + 0x1418: 0x0254, 0x1419: 0x0266, 0x141a: 0x026c, 0x141b: 0x0272, 0x141c: 0x0293, 0x141d: 0x02e4, + 0x141e: 0x02cc, 0x141f: 0x0296, 0x1421: 0x023c, 0x1422: 0x0248, + 0x1424: 0x0287, 0x1427: 0x024b, 0x1429: 0x0290, + 0x142a: 0x027b, 0x142b: 0x027e, 0x142c: 0x0281, 0x142d: 0x0284, 0x142e: 0x025d, 0x142f: 0x026f, + 0x1430: 0x0275, 0x1431: 0x0263, 0x1432: 0x0278, 0x1434: 0x0260, 0x1435: 0x0242, + 0x1436: 0x0245, 0x1437: 0x024e, 0x1439: 0x0266, 0x143b: 0x0272, + // Block 0x51, offset 0x1440 + 0x1442: 0x0248, + 0x1447: 0x024b, 0x1449: 0x0290, 0x144b: 0x027e, + 0x144d: 0x0284, 0x144e: 0x025d, 0x144f: 0x026f, 0x1451: 0x0263, + 0x1452: 0x0278, 0x1454: 0x0260, 0x1457: 0x024e, + 0x1459: 0x0266, 0x145b: 0x0272, 0x145d: 0x02e4, + 0x145f: 0x0296, 0x1461: 0x023c, 0x1462: 0x0248, + 0x1464: 0x0287, 0x1467: 0x024b, 0x1468: 0x0269, 0x1469: 0x0290, + 0x146a: 0x027b, 0x146c: 0x0281, 0x146d: 0x0284, 0x146e: 0x025d, 0x146f: 0x026f, + 0x1470: 0x0275, 0x1471: 0x0263, 0x1472: 0x0278, 0x1474: 0x0260, 0x1475: 0x0242, + 0x1476: 0x0245, 0x1477: 0x024e, 0x1479: 0x0266, 0x147a: 0x026c, 0x147b: 0x0272, + 0x147c: 0x0293, 0x147e: 0x02cc, + // Block 0x52, offset 0x1480 + 0x1480: 0x0239, 0x1481: 0x023c, 0x1482: 0x0248, 0x1483: 0x0251, 0x1484: 0x0287, 0x1485: 0x028a, + 0x1486: 0x025a, 0x1487: 0x024b, 0x1488: 0x0269, 0x1489: 0x0290, 0x148b: 0x027e, + 0x148c: 0x0281, 0x148d: 0x0284, 0x148e: 0x025d, 0x148f: 0x026f, 0x1490: 0x0275, 0x1491: 0x0263, + 0x1492: 0x0278, 0x1493: 0x0257, 0x1494: 0x0260, 0x1495: 0x0242, 0x1496: 0x0245, 0x1497: 0x024e, + 0x1498: 0x0254, 0x1499: 0x0266, 0x149a: 0x026c, 0x149b: 0x0272, + 0x14a1: 0x023c, 0x14a2: 0x0248, 0x14a3: 0x0251, + 0x14a5: 0x028a, 0x14a6: 0x025a, 0x14a7: 0x024b, 0x14a8: 0x0269, 0x14a9: 0x0290, + 0x14ab: 0x027e, 0x14ac: 0x0281, 0x14ad: 0x0284, 0x14ae: 0x025d, 0x14af: 0x026f, + 0x14b0: 0x0275, 0x14b1: 0x0263, 0x14b2: 0x0278, 0x14b3: 0x0257, 0x14b4: 0x0260, 0x14b5: 0x0242, + 0x14b6: 0x0245, 0x14b7: 0x024e, 0x14b8: 0x0254, 0x14b9: 0x0266, 0x14ba: 0x026c, 0x14bb: 0x0272, + // Block 0x53, offset 0x14c0 + 0x14c0: 0x1879, 0x14c1: 0x1876, 0x14c2: 0x187c, 0x14c3: 0x18a0, 0x14c4: 0x18c4, 0x14c5: 0x18e8, + 0x14c6: 0x190c, 0x14c7: 0x1915, 0x14c8: 0x191b, 0x14c9: 0x1921, 0x14ca: 0x1927, + 0x14d0: 0x1a8c, 0x14d1: 0x1a90, + 0x14d2: 0x1a94, 0x14d3: 0x1a98, 0x14d4: 0x1a9c, 0x14d5: 0x1aa0, 0x14d6: 0x1aa4, 0x14d7: 0x1aa8, + 0x14d8: 0x1aac, 0x14d9: 0x1ab0, 0x14da: 0x1ab4, 0x14db: 0x1ab8, 0x14dc: 0x1abc, 0x14dd: 0x1ac0, + 0x14de: 0x1ac4, 0x14df: 0x1ac8, 0x14e0: 0x1acc, 0x14e1: 0x1ad0, 0x14e2: 0x1ad4, 0x14e3: 0x1ad8, + 0x14e4: 0x1adc, 0x14e5: 0x1ae0, 0x14e6: 0x1ae4, 0x14e7: 0x1ae8, 0x14e8: 0x1aec, 0x14e9: 0x1af0, + 0x14ea: 0x271e, 0x14eb: 0x0047, 0x14ec: 0x0065, 0x14ed: 0x193c, 0x14ee: 0x19b1, + 0x14f0: 0x0043, 0x14f1: 0x0045, 0x14f2: 0x0047, 0x14f3: 0x0049, 0x14f4: 0x004b, 0x14f5: 0x004d, + 0x14f6: 0x004f, 0x14f7: 0x0051, 0x14f8: 0x0053, 0x14f9: 0x0055, 0x14fa: 0x0057, 0x14fb: 0x0059, + 0x14fc: 0x005b, 0x14fd: 0x005d, 0x14fe: 0x005f, 0x14ff: 0x0061, + // Block 0x54, offset 0x1500 + 0x1500: 0x26ad, 0x1501: 0x26c2, 0x1502: 0x0503, + 0x1510: 0x0c0f, 0x1511: 0x0a47, + 0x1512: 0x08d3, 0x1513: 0x45c4, 0x1514: 0x071b, 0x1515: 0x09ef, 0x1516: 0x132f, 0x1517: 0x09ff, + 0x1518: 0x0727, 0x1519: 0x0cd7, 0x151a: 0x0eaf, 0x151b: 0x0caf, 0x151c: 0x0827, 0x151d: 0x0b6b, + 0x151e: 0x07bf, 0x151f: 0x0cb7, 0x1520: 0x0813, 0x1521: 0x1117, 0x1522: 0x0f83, 0x1523: 0x138b, + 0x1524: 0x09d3, 0x1525: 0x090b, 0x1526: 0x0e63, 0x1527: 0x0c1b, 0x1528: 0x0c47, 0x1529: 0x06bf, + 0x152a: 0x06cb, 0x152b: 0x140b, 0x152c: 0x0adb, 0x152d: 0x06e7, 0x152e: 0x08ef, 0x152f: 0x0c3b, + 0x1530: 0x13b3, 0x1531: 0x0c13, 0x1532: 0x106f, 0x1533: 0x10ab, 0x1534: 0x08f7, 0x1535: 0x0e43, + 0x1536: 0x0d0b, 0x1537: 0x0d07, 0x1538: 0x0f97, 0x1539: 0x082b, 0x153a: 0x0957, 0x153b: 0x1443, + // Block 0x55, offset 0x1540 + 0x1540: 0x06fb, 0x1541: 0x06f3, 0x1542: 0x0703, 0x1543: 0x1647, 0x1544: 0x0747, 0x1545: 0x0757, + 0x1546: 0x075b, 0x1547: 0x0763, 0x1548: 0x076b, 0x1549: 0x076f, 0x154a: 0x077b, 0x154b: 0x0773, + 0x154c: 0x05b3, 0x154d: 0x165b, 0x154e: 0x078f, 0x154f: 0x0793, 0x1550: 0x0797, 0x1551: 0x07b3, + 0x1552: 0x164c, 0x1553: 0x05b7, 0x1554: 0x079f, 0x1555: 0x07bf, 0x1556: 0x1656, 0x1557: 0x07cf, + 0x1558: 0x07d7, 0x1559: 0x0737, 0x155a: 0x07df, 0x155b: 0x07e3, 0x155c: 0x1831, 0x155d: 0x07ff, + 0x155e: 0x0807, 0x155f: 0x05bf, 0x1560: 0x081f, 0x1561: 0x0823, 0x1562: 0x082b, 0x1563: 0x082f, + 0x1564: 0x05c3, 0x1565: 0x0847, 0x1566: 0x084b, 0x1567: 0x0857, 0x1568: 0x0863, 0x1569: 0x0867, + 0x156a: 0x086b, 0x156b: 0x0873, 0x156c: 0x0893, 0x156d: 0x0897, 0x156e: 0x089f, 0x156f: 0x08af, + 0x1570: 0x08b7, 0x1571: 0x08bb, 0x1572: 0x08bb, 0x1573: 0x08bb, 0x1574: 0x166a, 0x1575: 0x0e93, + 0x1576: 0x08cf, 0x1577: 0x08d7, 0x1578: 0x166f, 0x1579: 0x08e3, 0x157a: 0x08eb, 0x157b: 0x08f3, + 0x157c: 0x091b, 0x157d: 0x0907, 0x157e: 0x0913, 0x157f: 0x0917, + // Block 0x56, offset 0x1580 + 0x1580: 0x091f, 0x1581: 0x0927, 0x1582: 0x092b, 0x1583: 0x0933, 0x1584: 0x093b, 0x1585: 0x093f, + 0x1586: 0x093f, 0x1587: 0x0947, 0x1588: 0x094f, 0x1589: 0x0953, 0x158a: 0x095f, 0x158b: 0x0983, + 0x158c: 0x0967, 0x158d: 0x0987, 0x158e: 0x096b, 0x158f: 0x0973, 0x1590: 0x080b, 0x1591: 0x09cf, + 0x1592: 0x0997, 0x1593: 0x099b, 0x1594: 0x099f, 0x1595: 0x0993, 0x1596: 0x09a7, 0x1597: 0x09a3, + 0x1598: 0x09bb, 0x1599: 0x1674, 0x159a: 0x09d7, 0x159b: 0x09db, 0x159c: 0x09e3, 0x159d: 0x09ef, + 0x159e: 0x09f7, 0x159f: 0x0a13, 0x15a0: 0x1679, 0x15a1: 0x167e, 0x15a2: 0x0a1f, 0x15a3: 0x0a23, + 0x15a4: 0x0a27, 0x15a5: 0x0a1b, 0x15a6: 0x0a2f, 0x15a7: 0x05c7, 0x15a8: 0x05cb, 0x15a9: 0x0a37, + 0x15aa: 0x0a3f, 0x15ab: 0x0a3f, 0x15ac: 0x1683, 0x15ad: 0x0a5b, 0x15ae: 0x0a5f, 0x15af: 0x0a63, + 0x15b0: 0x0a6b, 0x15b1: 0x1688, 0x15b2: 0x0a73, 0x15b3: 0x0a77, 0x15b4: 0x0b4f, 0x15b5: 0x0a7f, + 0x15b6: 0x05cf, 0x15b7: 0x0a8b, 0x15b8: 0x0a9b, 0x15b9: 0x0aa7, 0x15ba: 0x0aa3, 0x15bb: 0x1692, + 0x15bc: 0x0aaf, 0x15bd: 0x1697, 0x15be: 0x0abb, 0x15bf: 0x0ab7, + // Block 0x57, offset 0x15c0 + 0x15c0: 0x0abf, 0x15c1: 0x0acf, 0x15c2: 0x0ad3, 0x15c3: 0x05d3, 0x15c4: 0x0ae3, 0x15c5: 0x0aeb, + 0x15c6: 0x0aef, 0x15c7: 0x0af3, 0x15c8: 0x05d7, 0x15c9: 0x169c, 0x15ca: 0x05db, 0x15cb: 0x0b0f, + 0x15cc: 0x0b13, 0x15cd: 0x0b17, 0x15ce: 0x0b1f, 0x15cf: 0x1863, 0x15d0: 0x0b37, 0x15d1: 0x16a6, + 0x15d2: 0x16a6, 0x15d3: 0x11d7, 0x15d4: 0x0b47, 0x15d5: 0x0b47, 0x15d6: 0x05df, 0x15d7: 0x16c9, + 0x15d8: 0x179b, 0x15d9: 0x0b57, 0x15da: 0x0b5f, 0x15db: 0x05e3, 0x15dc: 0x0b73, 0x15dd: 0x0b83, + 0x15de: 0x0b87, 0x15df: 0x0b8f, 0x15e0: 0x0b9f, 0x15e1: 0x05eb, 0x15e2: 0x05e7, 0x15e3: 0x0ba3, + 0x15e4: 0x16ab, 0x15e5: 0x0ba7, 0x15e6: 0x0bbb, 0x15e7: 0x0bbf, 0x15e8: 0x0bc3, 0x15e9: 0x0bbf, + 0x15ea: 0x0bcf, 0x15eb: 0x0bd3, 0x15ec: 0x0be3, 0x15ed: 0x0bdb, 0x15ee: 0x0bdf, 0x15ef: 0x0be7, + 0x15f0: 0x0beb, 0x15f1: 0x0bef, 0x15f2: 0x0bfb, 0x15f3: 0x0bff, 0x15f4: 0x0c17, 0x15f5: 0x0c1f, + 0x15f6: 0x0c2f, 0x15f7: 0x0c43, 0x15f8: 0x16ba, 0x15f9: 0x0c3f, 0x15fa: 0x0c33, 0x15fb: 0x0c4b, + 0x15fc: 0x0c53, 0x15fd: 0x0c67, 0x15fe: 0x16bf, 0x15ff: 0x0c6f, + // Block 0x58, offset 0x1600 + 0x1600: 0x0c63, 0x1601: 0x0c5b, 0x1602: 0x05ef, 0x1603: 0x0c77, 0x1604: 0x0c7f, 0x1605: 0x0c87, + 0x1606: 0x0c7b, 0x1607: 0x05f3, 0x1608: 0x0c97, 0x1609: 0x0c9f, 0x160a: 0x16c4, 0x160b: 0x0ccb, + 0x160c: 0x0cff, 0x160d: 0x0cdb, 0x160e: 0x05ff, 0x160f: 0x0ce7, 0x1610: 0x05fb, 0x1611: 0x05f7, + 0x1612: 0x07c3, 0x1613: 0x07c7, 0x1614: 0x0d03, 0x1615: 0x0ceb, 0x1616: 0x11ab, 0x1617: 0x0663, + 0x1618: 0x0d0f, 0x1619: 0x0d13, 0x161a: 0x0d17, 0x161b: 0x0d2b, 0x161c: 0x0d23, 0x161d: 0x16dd, + 0x161e: 0x0603, 0x161f: 0x0d3f, 0x1620: 0x0d33, 0x1621: 0x0d4f, 0x1622: 0x0d57, 0x1623: 0x16e7, + 0x1624: 0x0d5b, 0x1625: 0x0d47, 0x1626: 0x0d63, 0x1627: 0x0607, 0x1628: 0x0d67, 0x1629: 0x0d6b, + 0x162a: 0x0d6f, 0x162b: 0x0d7b, 0x162c: 0x16ec, 0x162d: 0x0d83, 0x162e: 0x060b, 0x162f: 0x0d8f, + 0x1630: 0x16f1, 0x1631: 0x0d93, 0x1632: 0x060f, 0x1633: 0x0d9f, 0x1634: 0x0dab, 0x1635: 0x0db7, + 0x1636: 0x0dbb, 0x1637: 0x16f6, 0x1638: 0x168d, 0x1639: 0x16fb, 0x163a: 0x0ddb, 0x163b: 0x1700, + 0x163c: 0x0de7, 0x163d: 0x0def, 0x163e: 0x0ddf, 0x163f: 0x0dfb, + // Block 0x59, offset 0x1640 + 0x1640: 0x0e0b, 0x1641: 0x0e1b, 0x1642: 0x0e0f, 0x1643: 0x0e13, 0x1644: 0x0e1f, 0x1645: 0x0e23, + 0x1646: 0x1705, 0x1647: 0x0e07, 0x1648: 0x0e3b, 0x1649: 0x0e3f, 0x164a: 0x0613, 0x164b: 0x0e53, + 0x164c: 0x0e4f, 0x164d: 0x170a, 0x164e: 0x0e33, 0x164f: 0x0e6f, 0x1650: 0x170f, 0x1651: 0x1714, + 0x1652: 0x0e73, 0x1653: 0x0e87, 0x1654: 0x0e83, 0x1655: 0x0e7f, 0x1656: 0x0617, 0x1657: 0x0e8b, + 0x1658: 0x0e9b, 0x1659: 0x0e97, 0x165a: 0x0ea3, 0x165b: 0x1651, 0x165c: 0x0eb3, 0x165d: 0x1719, + 0x165e: 0x0ebf, 0x165f: 0x1723, 0x1660: 0x0ed3, 0x1661: 0x0edf, 0x1662: 0x0ef3, 0x1663: 0x1728, + 0x1664: 0x0f07, 0x1665: 0x0f0b, 0x1666: 0x172d, 0x1667: 0x1732, 0x1668: 0x0f27, 0x1669: 0x0f37, + 0x166a: 0x061b, 0x166b: 0x0f3b, 0x166c: 0x061f, 0x166d: 0x061f, 0x166e: 0x0f53, 0x166f: 0x0f57, + 0x1670: 0x0f5f, 0x1671: 0x0f63, 0x1672: 0x0f6f, 0x1673: 0x0623, 0x1674: 0x0f87, 0x1675: 0x1737, + 0x1676: 0x0fa3, 0x1677: 0x173c, 0x1678: 0x0faf, 0x1679: 0x16a1, 0x167a: 0x0fbf, 0x167b: 0x1741, + 0x167c: 0x1746, 0x167d: 0x174b, 0x167e: 0x0627, 0x167f: 0x062b, + // Block 0x5a, offset 0x1680 + 0x1680: 0x0ff7, 0x1681: 0x1755, 0x1682: 0x1750, 0x1683: 0x175a, 0x1684: 0x175f, 0x1685: 0x0fff, + 0x1686: 0x1003, 0x1687: 0x1003, 0x1688: 0x100b, 0x1689: 0x0633, 0x168a: 0x100f, 0x168b: 0x0637, + 0x168c: 0x063b, 0x168d: 0x1769, 0x168e: 0x1023, 0x168f: 0x102b, 0x1690: 0x1037, 0x1691: 0x063f, + 0x1692: 0x176e, 0x1693: 0x105b, 0x1694: 0x1773, 0x1695: 0x1778, 0x1696: 0x107b, 0x1697: 0x1093, + 0x1698: 0x0643, 0x1699: 0x109b, 0x169a: 0x109f, 0x169b: 0x10a3, 0x169c: 0x177d, 0x169d: 0x1782, + 0x169e: 0x1782, 0x169f: 0x10bb, 0x16a0: 0x0647, 0x16a1: 0x1787, 0x16a2: 0x10cf, 0x16a3: 0x10d3, + 0x16a4: 0x064b, 0x16a5: 0x178c, 0x16a6: 0x10ef, 0x16a7: 0x064f, 0x16a8: 0x10ff, 0x16a9: 0x10f7, + 0x16aa: 0x1107, 0x16ab: 0x1796, 0x16ac: 0x111f, 0x16ad: 0x0653, 0x16ae: 0x112b, 0x16af: 0x1133, + 0x16b0: 0x1143, 0x16b1: 0x0657, 0x16b2: 0x17a0, 0x16b3: 0x17a5, 0x16b4: 0x065b, 0x16b5: 0x17aa, + 0x16b6: 0x115b, 0x16b7: 0x17af, 0x16b8: 0x1167, 0x16b9: 0x1173, 0x16ba: 0x117b, 0x16bb: 0x17b4, + 0x16bc: 0x17b9, 0x16bd: 0x118f, 0x16be: 0x17be, 0x16bf: 0x1197, + // Block 0x5b, offset 0x16c0 + 0x16c0: 0x16ce, 0x16c1: 0x065f, 0x16c2: 0x11af, 0x16c3: 0x11b3, 0x16c4: 0x0667, 0x16c5: 0x11b7, + 0x16c6: 0x0a33, 0x16c7: 0x17c3, 0x16c8: 0x17c8, 0x16c9: 0x16d3, 0x16ca: 0x16d8, 0x16cb: 0x11d7, + 0x16cc: 0x11db, 0x16cd: 0x13f3, 0x16ce: 0x066b, 0x16cf: 0x1207, 0x16d0: 0x1203, 0x16d1: 0x120b, + 0x16d2: 0x083f, 0x16d3: 0x120f, 0x16d4: 0x1213, 0x16d5: 0x1217, 0x16d6: 0x121f, 0x16d7: 0x17cd, + 0x16d8: 0x121b, 0x16d9: 0x1223, 0x16da: 0x1237, 0x16db: 0x123b, 0x16dc: 0x1227, 0x16dd: 0x123f, + 0x16de: 0x1253, 0x16df: 0x1267, 0x16e0: 0x1233, 0x16e1: 0x1247, 0x16e2: 0x124b, 0x16e3: 0x124f, + 0x16e4: 0x17d2, 0x16e5: 0x17dc, 0x16e6: 0x17d7, 0x16e7: 0x066f, 0x16e8: 0x126f, 0x16e9: 0x1273, + 0x16ea: 0x127b, 0x16eb: 0x17f0, 0x16ec: 0x127f, 0x16ed: 0x17e1, 0x16ee: 0x0673, 0x16ef: 0x0677, + 0x16f0: 0x17e6, 0x16f1: 0x17eb, 0x16f2: 0x067b, 0x16f3: 0x129f, 0x16f4: 0x12a3, 0x16f5: 0x12a7, + 0x16f6: 0x12ab, 0x16f7: 0x12b7, 0x16f8: 0x12b3, 0x16f9: 0x12bf, 0x16fa: 0x12bb, 0x16fb: 0x12cb, + 0x16fc: 0x12c3, 0x16fd: 0x12c7, 0x16fe: 0x12cf, 0x16ff: 0x067f, + // Block 0x5c, offset 0x1700 + 0x1700: 0x12d7, 0x1701: 0x12db, 0x1702: 0x0683, 0x1703: 0x12eb, 0x1704: 0x12ef, 0x1705: 0x17f5, + 0x1706: 0x12fb, 0x1707: 0x12ff, 0x1708: 0x0687, 0x1709: 0x130b, 0x170a: 0x05bb, 0x170b: 0x17fa, + 0x170c: 0x17ff, 0x170d: 0x068b, 0x170e: 0x068f, 0x170f: 0x1337, 0x1710: 0x134f, 0x1711: 0x136b, + 0x1712: 0x137b, 0x1713: 0x1804, 0x1714: 0x138f, 0x1715: 0x1393, 0x1716: 0x13ab, 0x1717: 0x13b7, + 0x1718: 0x180e, 0x1719: 0x1660, 0x171a: 0x13c3, 0x171b: 0x13bf, 0x171c: 0x13cb, 0x171d: 0x1665, + 0x171e: 0x13d7, 0x171f: 0x13e3, 0x1720: 0x1813, 0x1721: 0x1818, 0x1722: 0x1423, 0x1723: 0x142f, + 0x1724: 0x1437, 0x1725: 0x181d, 0x1726: 0x143b, 0x1727: 0x1467, 0x1728: 0x1473, 0x1729: 0x1477, + 0x172a: 0x146f, 0x172b: 0x1483, 0x172c: 0x1487, 0x172d: 0x1822, 0x172e: 0x1493, 0x172f: 0x0693, + 0x1730: 0x149b, 0x1731: 0x1827, 0x1732: 0x0697, 0x1733: 0x14d3, 0x1734: 0x0ac3, 0x1735: 0x14eb, + 0x1736: 0x182c, 0x1737: 0x1836, 0x1738: 0x069b, 0x1739: 0x069f, 0x173a: 0x1513, 0x173b: 0x183b, + 0x173c: 0x06a3, 0x173d: 0x1840, 0x173e: 0x152b, 0x173f: 0x152b, + // Block 0x5d, offset 0x1740 + 0x1740: 0x1533, 0x1741: 0x1845, 0x1742: 0x154b, 0x1743: 0x06a7, 0x1744: 0x155b, 0x1745: 0x1567, + 0x1746: 0x156f, 0x1747: 0x1577, 0x1748: 0x06ab, 0x1749: 0x184a, 0x174a: 0x158b, 0x174b: 0x15a7, + 0x174c: 0x15b3, 0x174d: 0x06af, 0x174e: 0x06b3, 0x174f: 0x15b7, 0x1750: 0x184f, 0x1751: 0x06b7, + 0x1752: 0x1854, 0x1753: 0x1859, 0x1754: 0x185e, 0x1755: 0x15db, 0x1756: 0x06bb, 0x1757: 0x15ef, + 0x1758: 0x15f7, 0x1759: 0x15fb, 0x175a: 0x1603, 0x175b: 0x160b, 0x175c: 0x1613, 0x175d: 0x1868, +} + +// nfkcIndex: 22 blocks, 1408 entries, 1408 bytes +// Block 0 is the zero block. +var nfkcIndex = [1408]uint8{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x5c, 0xc3: 0x01, 0xc4: 0x02, 0xc5: 0x03, 0xc6: 0x5d, 0xc7: 0x04, + 0xc8: 0x05, 0xca: 0x5e, 0xcb: 0x5f, 0xcc: 0x06, 0xcd: 0x07, 0xce: 0x08, 0xcf: 0x09, + 0xd0: 0x0a, 0xd1: 0x60, 0xd2: 0x61, 0xd3: 0x0b, 0xd6: 0x0c, 0xd7: 0x62, + 0xd8: 0x63, 0xd9: 0x0d, 0xdb: 0x64, 0xdc: 0x65, 0xdd: 0x66, 0xdf: 0x67, + 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, + 0xea: 0x06, 0xeb: 0x07, 0xec: 0x08, 0xed: 0x09, 0xef: 0x0a, + 0xf0: 0x13, + // Block 0x4, offset 0x100 + 0x120: 0x68, 0x121: 0x69, 0x123: 0x0e, 0x124: 0x6a, 0x125: 0x6b, 0x126: 0x6c, 0x127: 0x6d, + 0x128: 0x6e, 0x129: 0x6f, 0x12a: 0x70, 0x12b: 0x71, 0x12c: 0x6c, 0x12d: 0x72, 0x12e: 0x73, 0x12f: 0x74, + 0x131: 0x75, 0x132: 0x76, 0x133: 0x77, 0x134: 0x78, 0x135: 0x79, 0x137: 0x7a, + 0x138: 0x7b, 0x139: 0x7c, 0x13a: 0x7d, 0x13b: 0x7e, 0x13c: 0x7f, 0x13d: 0x80, 0x13e: 0x81, 0x13f: 0x82, + // Block 0x5, offset 0x140 + 0x140: 0x83, 0x142: 0x84, 0x143: 0x85, 0x144: 0x86, 0x145: 0x87, 0x146: 0x88, 0x147: 0x89, + 0x14d: 0x8a, + 0x15c: 0x8b, 0x15f: 0x8c, + 0x162: 0x8d, 0x164: 0x8e, + 0x168: 0x8f, 0x169: 0x90, 0x16a: 0x91, 0x16c: 0x0f, 0x16d: 0x92, 0x16e: 0x93, 0x16f: 0x94, + 0x170: 0x95, 0x173: 0x96, 0x174: 0x97, 0x175: 0x10, 0x176: 0x11, 0x177: 0x12, + 0x178: 0x13, 0x179: 0x14, 0x17a: 0x15, 0x17b: 0x16, 0x17c: 0x17, 0x17d: 0x18, 0x17e: 0x19, 0x17f: 0x1a, + // Block 0x6, offset 0x180 + 0x180: 0x98, 0x181: 0x99, 0x182: 0x9a, 0x183: 0x9b, 0x184: 0x1b, 0x185: 0x1c, 0x186: 0x9c, 0x187: 0x9d, + 0x188: 0x9e, 0x189: 0x1d, 0x18a: 0x1e, 0x18b: 0x9f, 0x18c: 0xa0, + 0x191: 0x1f, 0x192: 0x20, 0x193: 0xa1, + 0x1a8: 0xa2, 0x1a9: 0xa3, 0x1ab: 0xa4, + 0x1b1: 0xa5, 0x1b3: 0xa6, 0x1b5: 0xa7, 0x1b7: 0xa8, + 0x1ba: 0xa9, 0x1bb: 0xaa, 0x1bc: 0x21, 0x1bd: 0x22, 0x1be: 0x23, 0x1bf: 0xab, + // Block 0x7, offset 0x1c0 + 0x1c0: 0xac, 0x1c1: 0x24, 0x1c2: 0x25, 0x1c3: 0x26, 0x1c4: 0xad, 0x1c5: 0x27, 0x1c6: 0x28, + 0x1c8: 0x29, 0x1c9: 0x2a, 0x1ca: 0x2b, 0x1cb: 0x2c, 0x1cc: 0x2d, 0x1cd: 0x2e, 0x1ce: 0x2f, 0x1cf: 0x30, + // Block 0x8, offset 0x200 + 0x219: 0xae, 0x21a: 0xaf, 0x21b: 0xb0, 0x21d: 0xb1, 0x21f: 0xb2, + 0x220: 0xb3, 0x223: 0xb4, 0x224: 0xb5, 0x225: 0xb6, 0x226: 0xb7, 0x227: 0xb8, + 0x22a: 0xb9, 0x22b: 0xba, 0x22d: 0xbb, 0x22f: 0xbc, + 0x230: 0xbd, 0x231: 0xbe, 0x232: 0xbf, 0x233: 0xc0, 0x234: 0xc1, 0x235: 0xc2, 0x236: 0xc3, 0x237: 0xbd, + 0x238: 0xbe, 0x239: 0xbf, 0x23a: 0xc0, 0x23b: 0xc1, 0x23c: 0xc2, 0x23d: 0xc3, 0x23e: 0xbd, 0x23f: 0xbe, + // Block 0x9, offset 0x240 + 0x240: 0xbf, 0x241: 0xc0, 0x242: 0xc1, 0x243: 0xc2, 0x244: 0xc3, 0x245: 0xbd, 0x246: 0xbe, 0x247: 0xbf, + 0x248: 0xc0, 0x249: 0xc1, 0x24a: 0xc2, 0x24b: 0xc3, 0x24c: 0xbd, 0x24d: 0xbe, 0x24e: 0xbf, 0x24f: 0xc0, + 0x250: 0xc1, 0x251: 0xc2, 0x252: 0xc3, 0x253: 0xbd, 0x254: 0xbe, 0x255: 0xbf, 0x256: 0xc0, 0x257: 0xc1, + 0x258: 0xc2, 0x259: 0xc3, 0x25a: 0xbd, 0x25b: 0xbe, 0x25c: 0xbf, 0x25d: 0xc0, 0x25e: 0xc1, 0x25f: 0xc2, + 0x260: 0xc3, 0x261: 0xbd, 0x262: 0xbe, 0x263: 0xbf, 0x264: 0xc0, 0x265: 0xc1, 0x266: 0xc2, 0x267: 0xc3, + 0x268: 0xbd, 0x269: 0xbe, 0x26a: 0xbf, 0x26b: 0xc0, 0x26c: 0xc1, 0x26d: 0xc2, 0x26e: 0xc3, 0x26f: 0xbd, + 0x270: 0xbe, 0x271: 0xbf, 0x272: 0xc0, 0x273: 0xc1, 0x274: 0xc2, 0x275: 0xc3, 0x276: 0xbd, 0x277: 0xbe, + 0x278: 0xbf, 0x279: 0xc0, 0x27a: 0xc1, 0x27b: 0xc2, 0x27c: 0xc3, 0x27d: 0xbd, 0x27e: 0xbe, 0x27f: 0xbf, + // Block 0xa, offset 0x280 + 0x280: 0xc0, 0x281: 0xc1, 0x282: 0xc2, 0x283: 0xc3, 0x284: 0xbd, 0x285: 0xbe, 0x286: 0xbf, 0x287: 0xc0, + 0x288: 0xc1, 0x289: 0xc2, 0x28a: 0xc3, 0x28b: 0xbd, 0x28c: 0xbe, 0x28d: 0xbf, 0x28e: 0xc0, 0x28f: 0xc1, + 0x290: 0xc2, 0x291: 0xc3, 0x292: 0xbd, 0x293: 0xbe, 0x294: 0xbf, 0x295: 0xc0, 0x296: 0xc1, 0x297: 0xc2, + 0x298: 0xc3, 0x299: 0xbd, 0x29a: 0xbe, 0x29b: 0xbf, 0x29c: 0xc0, 0x29d: 0xc1, 0x29e: 0xc2, 0x29f: 0xc3, + 0x2a0: 0xbd, 0x2a1: 0xbe, 0x2a2: 0xbf, 0x2a3: 0xc0, 0x2a4: 0xc1, 0x2a5: 0xc2, 0x2a6: 0xc3, 0x2a7: 0xbd, + 0x2a8: 0xbe, 0x2a9: 0xbf, 0x2aa: 0xc0, 0x2ab: 0xc1, 0x2ac: 0xc2, 0x2ad: 0xc3, 0x2ae: 0xbd, 0x2af: 0xbe, + 0x2b0: 0xbf, 0x2b1: 0xc0, 0x2b2: 0xc1, 0x2b3: 0xc2, 0x2b4: 0xc3, 0x2b5: 0xbd, 0x2b6: 0xbe, 0x2b7: 0xbf, + 0x2b8: 0xc0, 0x2b9: 0xc1, 0x2ba: 0xc2, 0x2bb: 0xc3, 0x2bc: 0xbd, 0x2bd: 0xbe, 0x2be: 0xbf, 0x2bf: 0xc0, + // Block 0xb, offset 0x2c0 + 0x2c0: 0xc1, 0x2c1: 0xc2, 0x2c2: 0xc3, 0x2c3: 0xbd, 0x2c4: 0xbe, 0x2c5: 0xbf, 0x2c6: 0xc0, 0x2c7: 0xc1, + 0x2c8: 0xc2, 0x2c9: 0xc3, 0x2ca: 0xbd, 0x2cb: 0xbe, 0x2cc: 0xbf, 0x2cd: 0xc0, 0x2ce: 0xc1, 0x2cf: 0xc2, + 0x2d0: 0xc3, 0x2d1: 0xbd, 0x2d2: 0xbe, 0x2d3: 0xbf, 0x2d4: 0xc0, 0x2d5: 0xc1, 0x2d6: 0xc2, 0x2d7: 0xc3, + 0x2d8: 0xbd, 0x2d9: 0xbe, 0x2da: 0xbf, 0x2db: 0xc0, 0x2dc: 0xc1, 0x2dd: 0xc2, 0x2de: 0xc4, + // Block 0xc, offset 0x300 + 0x324: 0x31, 0x325: 0x32, 0x326: 0x33, 0x327: 0x34, + 0x328: 0x35, 0x329: 0x36, 0x32a: 0x37, 0x32b: 0x38, 0x32c: 0x39, 0x32d: 0x3a, 0x32e: 0x3b, 0x32f: 0x3c, + 0x330: 0x3d, 0x331: 0x3e, 0x332: 0x3f, 0x333: 0x40, 0x334: 0x41, 0x335: 0x42, 0x336: 0x43, 0x337: 0x44, + 0x338: 0x45, 0x339: 0x46, 0x33a: 0x47, 0x33b: 0x48, 0x33c: 0xc5, 0x33d: 0x49, 0x33e: 0x4a, 0x33f: 0x4b, + // Block 0xd, offset 0x340 + 0x347: 0xc6, + 0x34b: 0xc7, 0x34d: 0xc8, + 0x368: 0xc9, 0x36b: 0xca, + 0x374: 0xcb, + 0x37d: 0xcc, + // Block 0xe, offset 0x380 + 0x381: 0xcd, 0x382: 0xce, 0x384: 0xcf, 0x385: 0xb7, 0x387: 0xd0, + 0x388: 0xd1, 0x38b: 0xd2, 0x38c: 0xd3, 0x38d: 0xd4, + 0x391: 0xd5, 0x392: 0xd6, 0x393: 0xd7, 0x396: 0xd8, 0x397: 0xd9, + 0x398: 0xda, 0x39a: 0xdb, 0x39c: 0xdc, + 0x3a0: 0xdd, + 0x3a8: 0xde, 0x3a9: 0xdf, 0x3aa: 0xe0, + 0x3b0: 0xda, 0x3b5: 0xe1, 0x3b6: 0xe2, + // Block 0xf, offset 0x3c0 + 0x3eb: 0xe3, 0x3ec: 0xe4, + // Block 0x10, offset 0x400 + 0x432: 0xe5, + // Block 0x11, offset 0x440 + 0x445: 0xe6, 0x446: 0xe7, 0x447: 0xe8, + 0x449: 0xe9, + 0x450: 0xea, 0x451: 0xeb, 0x452: 0xec, 0x453: 0xed, 0x454: 0xee, 0x455: 0xef, 0x456: 0xf0, 0x457: 0xf1, + 0x458: 0xf2, 0x459: 0xf3, 0x45a: 0x4c, 0x45b: 0xf4, 0x45c: 0xf5, 0x45d: 0xf6, 0x45e: 0xf7, 0x45f: 0x4d, + // Block 0x12, offset 0x480 + 0x480: 0xf8, + 0x4a3: 0xf9, 0x4a5: 0xfa, + 0x4b8: 0x4e, 0x4b9: 0x4f, 0x4ba: 0x50, + // Block 0x13, offset 0x4c0 + 0x4c4: 0x51, 0x4c5: 0xfb, 0x4c6: 0xfc, + 0x4c8: 0x52, 0x4c9: 0xfd, + // Block 0x14, offset 0x500 + 0x520: 0x53, 0x521: 0x54, 0x522: 0x55, 0x523: 0x56, 0x524: 0x57, 0x525: 0x58, 0x526: 0x59, 0x527: 0x5a, + 0x528: 0x5b, + // Block 0x15, offset 0x540 + 0x550: 0x0b, 0x551: 0x0c, 0x556: 0x0d, + 0x55b: 0x0e, 0x55d: 0x0f, 0x55e: 0x10, 0x55f: 0x11, + 0x56f: 0x12, +} + +// nfkcSparseOffset: 162 entries, 324 bytes +var nfkcSparseOffset = []uint16{0x0, 0xe, 0x12, 0x1b, 0x25, 0x35, 0x37, 0x3c, 0x47, 0x56, 0x63, 0x6b, 0x70, 0x75, 0x77, 0x7f, 0x86, 0x89, 0x91, 0x95, 0x99, 0x9b, 0x9d, 0xa6, 0xaa, 0xb1, 0xb6, 0xb9, 0xc3, 0xc6, 0xcd, 0xd5, 0xd9, 0xdb, 0xde, 0xe2, 0xe8, 0xf9, 0x105, 0x107, 0x10d, 0x10f, 0x111, 0x113, 0x115, 0x117, 0x119, 0x11b, 0x11e, 0x121, 0x123, 0x126, 0x129, 0x12d, 0x132, 0x13b, 0x13d, 0x140, 0x142, 0x14d, 0x158, 0x166, 0x174, 0x184, 0x192, 0x199, 0x19f, 0x1ae, 0x1b2, 0x1b4, 0x1b8, 0x1ba, 0x1bd, 0x1bf, 0x1c2, 0x1c4, 0x1c7, 0x1c9, 0x1cb, 0x1cd, 0x1d9, 0x1e3, 0x1ed, 0x1f0, 0x1f4, 0x1f6, 0x1f8, 0x1fa, 0x1fc, 0x1ff, 0x201, 0x203, 0x205, 0x207, 0x20d, 0x210, 0x214, 0x216, 0x21d, 0x223, 0x229, 0x231, 0x237, 0x23d, 0x243, 0x247, 0x249, 0x24b, 0x24d, 0x24f, 0x255, 0x258, 0x25a, 0x260, 0x263, 0x26b, 0x272, 0x275, 0x278, 0x27a, 0x27d, 0x285, 0x289, 0x290, 0x293, 0x299, 0x29b, 0x29d, 0x2a0, 0x2a2, 0x2a5, 0x2a7, 0x2a9, 0x2ab, 0x2ae, 0x2b0, 0x2b2, 0x2b4, 0x2b6, 0x2c3, 0x2cd, 0x2cf, 0x2d1, 0x2d5, 0x2da, 0x2e6, 0x2eb, 0x2f4, 0x2fa, 0x2ff, 0x303, 0x308, 0x30c, 0x31c, 0x32a, 0x338, 0x346, 0x34c, 0x34e, 0x351, 0x35b, 0x35d} + +// nfkcSparseValues: 871 entries, 3484 bytes +var nfkcSparseValues = [871]valueRange{ + // Block 0x0, offset 0x0 + {value: 0x0002, lo: 0x0d}, + {value: 0x0001, lo: 0xa0, hi: 0xa0}, + {value: 0x4278, lo: 0xa8, hi: 0xa8}, + {value: 0x0083, lo: 0xaa, hi: 0xaa}, + {value: 0x4264, lo: 0xaf, hi: 0xaf}, + {value: 0x0025, lo: 0xb2, hi: 0xb3}, + {value: 0x425a, lo: 0xb4, hi: 0xb4}, + {value: 0x01dc, lo: 0xb5, hi: 0xb5}, + {value: 0x4291, lo: 0xb8, hi: 0xb8}, + {value: 0x0023, lo: 0xb9, hi: 0xb9}, + {value: 0x009f, lo: 0xba, hi: 0xba}, + {value: 0x221c, lo: 0xbc, hi: 0xbc}, + {value: 0x2210, lo: 0xbd, hi: 0xbd}, + {value: 0x22b2, lo: 0xbe, hi: 0xbe}, + // Block 0x1, offset 0xe + {value: 0x0091, lo: 0x03}, + {value: 0x46e2, lo: 0xa0, hi: 0xa1}, + {value: 0x4714, lo: 0xaf, hi: 0xb0}, + {value: 0xa000, lo: 0xb7, hi: 0xb7}, + // Block 0x2, offset 0x12 + {value: 0x0003, lo: 0x08}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0x0091, lo: 0xb0, hi: 0xb0}, + {value: 0x0119, lo: 0xb1, hi: 0xb1}, + {value: 0x0095, lo: 0xb2, hi: 0xb2}, + {value: 0x00a5, lo: 0xb3, hi: 0xb3}, + {value: 0x0143, lo: 0xb4, hi: 0xb6}, + {value: 0x00af, lo: 0xb7, hi: 0xb7}, + {value: 0x00b3, lo: 0xb8, hi: 0xb8}, + // Block 0x3, offset 0x1b + {value: 0x000a, lo: 0x09}, + {value: 0x426e, lo: 0x98, hi: 0x98}, + {value: 0x4273, lo: 0x99, hi: 0x9a}, + {value: 0x4296, lo: 0x9b, hi: 0x9b}, + {value: 0x425f, lo: 0x9c, hi: 0x9c}, + {value: 0x4282, lo: 0x9d, hi: 0x9d}, + {value: 0x0113, lo: 0xa0, hi: 0xa0}, + {value: 0x0099, lo: 0xa1, hi: 0xa1}, + {value: 0x00a7, lo: 0xa2, hi: 0xa3}, + {value: 0x0167, lo: 0xa4, hi: 0xa4}, + // Block 0x4, offset 0x25 + {value: 0x0000, lo: 0x0f}, + {value: 0xa000, lo: 0x83, hi: 0x83}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0xa000, lo: 0x8b, hi: 0x8b}, + {value: 0xa000, lo: 0x8d, hi: 0x8d}, + {value: 0x37a5, lo: 0x90, hi: 0x90}, + {value: 0x37b1, lo: 0x91, hi: 0x91}, + {value: 0x379f, lo: 0x93, hi: 0x93}, + {value: 0xa000, lo: 0x96, hi: 0x96}, + {value: 0x3817, lo: 0x97, hi: 0x97}, + {value: 0x37e1, lo: 0x9c, hi: 0x9c}, + {value: 0x37c9, lo: 0x9d, hi: 0x9d}, + {value: 0x37f3, lo: 0x9e, hi: 0x9e}, + {value: 0xa000, lo: 0xb4, hi: 0xb5}, + {value: 0x381d, lo: 0xb6, hi: 0xb6}, + {value: 0x3823, lo: 0xb7, hi: 0xb7}, + // Block 0x5, offset 0x35 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0x83, hi: 0x87}, + // Block 0x6, offset 0x37 + {value: 0x0001, lo: 0x04}, + {value: 0x8113, lo: 0x81, hi: 0x82}, + {value: 0x8132, lo: 0x84, hi: 0x84}, + {value: 0x812d, lo: 0x85, hi: 0x85}, + {value: 0x810d, lo: 0x87, hi: 0x87}, + // Block 0x7, offset 0x3c + {value: 0x0000, lo: 0x0a}, + {value: 0x8132, lo: 0x90, hi: 0x97}, + {value: 0x8119, lo: 0x98, hi: 0x98}, + {value: 0x811a, lo: 0x99, hi: 0x99}, + {value: 0x811b, lo: 0x9a, hi: 0x9a}, + {value: 0x3841, lo: 0xa2, hi: 0xa2}, + {value: 0x3847, lo: 0xa3, hi: 0xa3}, + {value: 0x3853, lo: 0xa4, hi: 0xa4}, + {value: 0x384d, lo: 0xa5, hi: 0xa5}, + {value: 0x3859, lo: 0xa6, hi: 0xa6}, + {value: 0xa000, lo: 0xa7, hi: 0xa7}, + // Block 0x8, offset 0x47 + {value: 0x0000, lo: 0x0e}, + {value: 0x386b, lo: 0x80, hi: 0x80}, + {value: 0xa000, lo: 0x81, hi: 0x81}, + {value: 0x385f, lo: 0x82, hi: 0x82}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0x3865, lo: 0x93, hi: 0x93}, + {value: 0xa000, lo: 0x95, hi: 0x95}, + {value: 0x8132, lo: 0x96, hi: 0x9c}, + {value: 0x8132, lo: 0x9f, hi: 0xa2}, + {value: 0x812d, lo: 0xa3, hi: 0xa3}, + {value: 0x8132, lo: 0xa4, hi: 0xa4}, + {value: 0x8132, lo: 0xa7, hi: 0xa8}, + {value: 0x812d, lo: 0xaa, hi: 0xaa}, + {value: 0x8132, lo: 0xab, hi: 0xac}, + {value: 0x812d, lo: 0xad, hi: 0xad}, + // Block 0x9, offset 0x56 + {value: 0x0000, lo: 0x0c}, + {value: 0x811f, lo: 0x91, hi: 0x91}, + {value: 0x8132, lo: 0xb0, hi: 0xb0}, + {value: 0x812d, lo: 0xb1, hi: 0xb1}, + {value: 0x8132, lo: 0xb2, hi: 0xb3}, + {value: 0x812d, lo: 0xb4, hi: 0xb4}, + {value: 0x8132, lo: 0xb5, hi: 0xb6}, + {value: 0x812d, lo: 0xb7, hi: 0xb9}, + {value: 0x8132, lo: 0xba, hi: 0xba}, + {value: 0x812d, lo: 0xbb, hi: 0xbc}, + {value: 0x8132, lo: 0xbd, hi: 0xbd}, + {value: 0x812d, lo: 0xbe, hi: 0xbe}, + {value: 0x8132, lo: 0xbf, hi: 0xbf}, + // Block 0xa, offset 0x63 + {value: 0x0005, lo: 0x07}, + {value: 0x8132, lo: 0x80, hi: 0x80}, + {value: 0x8132, lo: 0x81, hi: 0x81}, + {value: 0x812d, lo: 0x82, hi: 0x83}, + {value: 0x812d, lo: 0x84, hi: 0x85}, + {value: 0x812d, lo: 0x86, hi: 0x87}, + {value: 0x812d, lo: 0x88, hi: 0x89}, + {value: 0x8132, lo: 0x8a, hi: 0x8a}, + // Block 0xb, offset 0x6b + {value: 0x0000, lo: 0x04}, + {value: 0x8132, lo: 0xab, hi: 0xb1}, + {value: 0x812d, lo: 0xb2, hi: 0xb2}, + {value: 0x8132, lo: 0xb3, hi: 0xb3}, + {value: 0x812d, lo: 0xbd, hi: 0xbd}, + // Block 0xc, offset 0x70 + {value: 0x0000, lo: 0x04}, + {value: 0x8132, lo: 0x96, hi: 0x99}, + {value: 0x8132, lo: 0x9b, hi: 0xa3}, + {value: 0x8132, lo: 0xa5, hi: 0xa7}, + {value: 0x8132, lo: 0xa9, hi: 0xad}, + // Block 0xd, offset 0x75 + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0x99, hi: 0x9b}, + // Block 0xe, offset 0x77 + {value: 0x0000, lo: 0x07}, + {value: 0xa000, lo: 0xa8, hi: 0xa8}, + {value: 0x3ed8, lo: 0xa9, hi: 0xa9}, + {value: 0xa000, lo: 0xb0, hi: 0xb0}, + {value: 0x3ee0, lo: 0xb1, hi: 0xb1}, + {value: 0xa000, lo: 0xb3, hi: 0xb3}, + {value: 0x3ee8, lo: 0xb4, hi: 0xb4}, + {value: 0x9902, lo: 0xbc, hi: 0xbc}, + // Block 0xf, offset 0x7f + {value: 0x0008, lo: 0x06}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x8132, lo: 0x91, hi: 0x91}, + {value: 0x812d, lo: 0x92, hi: 0x92}, + {value: 0x8132, lo: 0x93, hi: 0x93}, + {value: 0x8132, lo: 0x94, hi: 0x94}, + {value: 0x451c, lo: 0x98, hi: 0x9f}, + // Block 0x10, offset 0x86 + {value: 0x0000, lo: 0x02}, + {value: 0x8102, lo: 0xbc, hi: 0xbc}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x11, offset 0x89 + {value: 0x0008, lo: 0x07}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0x2c9e, lo: 0x8b, hi: 0x8c}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + {value: 0x455c, lo: 0x9c, hi: 0x9d}, + {value: 0x456c, lo: 0x9f, hi: 0x9f}, + {value: 0x8132, lo: 0xbe, hi: 0xbe}, + // Block 0x12, offset 0x91 + {value: 0x0000, lo: 0x03}, + {value: 0x4594, lo: 0xb3, hi: 0xb3}, + {value: 0x459c, lo: 0xb6, hi: 0xb6}, + {value: 0x8102, lo: 0xbc, hi: 0xbc}, + // Block 0x13, offset 0x95 + {value: 0x0008, lo: 0x03}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x4574, lo: 0x99, hi: 0x9b}, + {value: 0x458c, lo: 0x9e, hi: 0x9e}, + // Block 0x14, offset 0x99 + {value: 0x0000, lo: 0x01}, + {value: 0x8102, lo: 0xbc, hi: 0xbc}, + // Block 0x15, offset 0x9b + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + // Block 0x16, offset 0x9d + {value: 0x0000, lo: 0x08}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0x2cb6, lo: 0x88, hi: 0x88}, + {value: 0x2cae, lo: 0x8b, hi: 0x8b}, + {value: 0x2cbe, lo: 0x8c, hi: 0x8c}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x96, hi: 0x97}, + {value: 0x45a4, lo: 0x9c, hi: 0x9c}, + {value: 0x45ac, lo: 0x9d, hi: 0x9d}, + // Block 0x17, offset 0xa6 + {value: 0x0000, lo: 0x03}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0x2cc6, lo: 0x94, hi: 0x94}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x18, offset 0xaa + {value: 0x0000, lo: 0x06}, + {value: 0xa000, lo: 0x86, hi: 0x87}, + {value: 0x2cce, lo: 0x8a, hi: 0x8a}, + {value: 0x2cde, lo: 0x8b, hi: 0x8b}, + {value: 0x2cd6, lo: 0x8c, hi: 0x8c}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + // Block 0x19, offset 0xb1 + {value: 0x1801, lo: 0x04}, + {value: 0xa000, lo: 0x86, hi: 0x86}, + {value: 0x3ef0, lo: 0x88, hi: 0x88}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x8120, lo: 0x95, hi: 0x96}, + // Block 0x1a, offset 0xb6 + {value: 0x0000, lo: 0x02}, + {value: 0x8102, lo: 0xbc, hi: 0xbc}, + {value: 0xa000, lo: 0xbf, hi: 0xbf}, + // Block 0x1b, offset 0xb9 + {value: 0x0000, lo: 0x09}, + {value: 0x2ce6, lo: 0x80, hi: 0x80}, + {value: 0x9900, lo: 0x82, hi: 0x82}, + {value: 0xa000, lo: 0x86, hi: 0x86}, + {value: 0x2cee, lo: 0x87, hi: 0x87}, + {value: 0x2cf6, lo: 0x88, hi: 0x88}, + {value: 0x2f50, lo: 0x8a, hi: 0x8a}, + {value: 0x2dd8, lo: 0x8b, hi: 0x8b}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x95, hi: 0x96}, + // Block 0x1c, offset 0xc3 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0xbb, hi: 0xbc}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x1d, offset 0xc6 + {value: 0x0000, lo: 0x06}, + {value: 0xa000, lo: 0x86, hi: 0x87}, + {value: 0x2cfe, lo: 0x8a, hi: 0x8a}, + {value: 0x2d0e, lo: 0x8b, hi: 0x8b}, + {value: 0x2d06, lo: 0x8c, hi: 0x8c}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + // Block 0x1e, offset 0xcd + {value: 0x6bea, lo: 0x07}, + {value: 0x9904, lo: 0x8a, hi: 0x8a}, + {value: 0x9900, lo: 0x8f, hi: 0x8f}, + {value: 0xa000, lo: 0x99, hi: 0x99}, + {value: 0x3ef8, lo: 0x9a, hi: 0x9a}, + {value: 0x2f58, lo: 0x9c, hi: 0x9c}, + {value: 0x2de3, lo: 0x9d, hi: 0x9d}, + {value: 0x2d16, lo: 0x9e, hi: 0x9f}, + // Block 0x1f, offset 0xd5 + {value: 0x0000, lo: 0x03}, + {value: 0x2621, lo: 0xb3, hi: 0xb3}, + {value: 0x8122, lo: 0xb8, hi: 0xb9}, + {value: 0x8104, lo: 0xba, hi: 0xba}, + // Block 0x20, offset 0xd9 + {value: 0x0000, lo: 0x01}, + {value: 0x8123, lo: 0x88, hi: 0x8b}, + // Block 0x21, offset 0xdb + {value: 0x0000, lo: 0x02}, + {value: 0x2636, lo: 0xb3, hi: 0xb3}, + {value: 0x8124, lo: 0xb8, hi: 0xb9}, + // Block 0x22, offset 0xde + {value: 0x0000, lo: 0x03}, + {value: 0x8125, lo: 0x88, hi: 0x8b}, + {value: 0x2628, lo: 0x9c, hi: 0x9c}, + {value: 0x262f, lo: 0x9d, hi: 0x9d}, + // Block 0x23, offset 0xe2 + {value: 0x0000, lo: 0x05}, + {value: 0x030b, lo: 0x8c, hi: 0x8c}, + {value: 0x812d, lo: 0x98, hi: 0x99}, + {value: 0x812d, lo: 0xb5, hi: 0xb5}, + {value: 0x812d, lo: 0xb7, hi: 0xb7}, + {value: 0x812b, lo: 0xb9, hi: 0xb9}, + // Block 0x24, offset 0xe8 + {value: 0x0000, lo: 0x10}, + {value: 0x2644, lo: 0x83, hi: 0x83}, + {value: 0x264b, lo: 0x8d, hi: 0x8d}, + {value: 0x2652, lo: 0x92, hi: 0x92}, + {value: 0x2659, lo: 0x97, hi: 0x97}, + {value: 0x2660, lo: 0x9c, hi: 0x9c}, + {value: 0x263d, lo: 0xa9, hi: 0xa9}, + {value: 0x8126, lo: 0xb1, hi: 0xb1}, + {value: 0x8127, lo: 0xb2, hi: 0xb2}, + {value: 0x4a84, lo: 0xb3, hi: 0xb3}, + {value: 0x8128, lo: 0xb4, hi: 0xb4}, + {value: 0x4a8d, lo: 0xb5, hi: 0xb5}, + {value: 0x45b4, lo: 0xb6, hi: 0xb6}, + {value: 0x45f4, lo: 0xb7, hi: 0xb7}, + {value: 0x45bc, lo: 0xb8, hi: 0xb8}, + {value: 0x45ff, lo: 0xb9, hi: 0xb9}, + {value: 0x8127, lo: 0xba, hi: 0xbd}, + // Block 0x25, offset 0xf9 + {value: 0x0000, lo: 0x0b}, + {value: 0x8127, lo: 0x80, hi: 0x80}, + {value: 0x4a96, lo: 0x81, hi: 0x81}, + {value: 0x8132, lo: 0x82, hi: 0x83}, + {value: 0x8104, lo: 0x84, hi: 0x84}, + {value: 0x8132, lo: 0x86, hi: 0x87}, + {value: 0x266e, lo: 0x93, hi: 0x93}, + {value: 0x2675, lo: 0x9d, hi: 0x9d}, + {value: 0x267c, lo: 0xa2, hi: 0xa2}, + {value: 0x2683, lo: 0xa7, hi: 0xa7}, + {value: 0x268a, lo: 0xac, hi: 0xac}, + {value: 0x2667, lo: 0xb9, hi: 0xb9}, + // Block 0x26, offset 0x105 + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0x86, hi: 0x86}, + // Block 0x27, offset 0x107 + {value: 0x0000, lo: 0x05}, + {value: 0xa000, lo: 0xa5, hi: 0xa5}, + {value: 0x2d1e, lo: 0xa6, hi: 0xa6}, + {value: 0x9900, lo: 0xae, hi: 0xae}, + {value: 0x8102, lo: 0xb7, hi: 0xb7}, + {value: 0x8104, lo: 0xb9, hi: 0xba}, + // Block 0x28, offset 0x10d + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0x8d, hi: 0x8d}, + // Block 0x29, offset 0x10f + {value: 0x0000, lo: 0x01}, + {value: 0x030f, lo: 0xbc, hi: 0xbc}, + // Block 0x2a, offset 0x111 + {value: 0x0000, lo: 0x01}, + {value: 0xa000, lo: 0x80, hi: 0x92}, + // Block 0x2b, offset 0x113 + {value: 0x0000, lo: 0x01}, + {value: 0xb900, lo: 0xa1, hi: 0xb5}, + // Block 0x2c, offset 0x115 + {value: 0x0000, lo: 0x01}, + {value: 0x9900, lo: 0xa8, hi: 0xbf}, + // Block 0x2d, offset 0x117 + {value: 0x0000, lo: 0x01}, + {value: 0x9900, lo: 0x80, hi: 0x82}, + // Block 0x2e, offset 0x119 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0x9d, hi: 0x9f}, + // Block 0x2f, offset 0x11b + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x94, hi: 0x94}, + {value: 0x8104, lo: 0xb4, hi: 0xb4}, + // Block 0x30, offset 0x11e + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x92, hi: 0x92}, + {value: 0x8132, lo: 0x9d, hi: 0x9d}, + // Block 0x31, offset 0x121 + {value: 0x0000, lo: 0x01}, + {value: 0x8131, lo: 0xa9, hi: 0xa9}, + // Block 0x32, offset 0x123 + {value: 0x0004, lo: 0x02}, + {value: 0x812e, lo: 0xb9, hi: 0xba}, + {value: 0x812d, lo: 0xbb, hi: 0xbb}, + // Block 0x33, offset 0x126 + {value: 0x0000, lo: 0x02}, + {value: 0x8132, lo: 0x97, hi: 0x97}, + {value: 0x812d, lo: 0x98, hi: 0x98}, + // Block 0x34, offset 0x129 + {value: 0x0000, lo: 0x03}, + {value: 0x8104, lo: 0xa0, hi: 0xa0}, + {value: 0x8132, lo: 0xb5, hi: 0xbc}, + {value: 0x812d, lo: 0xbf, hi: 0xbf}, + // Block 0x35, offset 0x12d + {value: 0x0000, lo: 0x04}, + {value: 0x8132, lo: 0xb0, hi: 0xb4}, + {value: 0x812d, lo: 0xb5, hi: 0xba}, + {value: 0x8132, lo: 0xbb, hi: 0xbc}, + {value: 0x812d, lo: 0xbd, hi: 0xbd}, + // Block 0x36, offset 0x132 + {value: 0x0000, lo: 0x08}, + {value: 0x2d66, lo: 0x80, hi: 0x80}, + {value: 0x2d6e, lo: 0x81, hi: 0x81}, + {value: 0xa000, lo: 0x82, hi: 0x82}, + {value: 0x2d76, lo: 0x83, hi: 0x83}, + {value: 0x8104, lo: 0x84, hi: 0x84}, + {value: 0x8132, lo: 0xab, hi: 0xab}, + {value: 0x812d, lo: 0xac, hi: 0xac}, + {value: 0x8132, lo: 0xad, hi: 0xb3}, + // Block 0x37, offset 0x13b + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0xaa, hi: 0xab}, + // Block 0x38, offset 0x13d + {value: 0x0000, lo: 0x02}, + {value: 0x8102, lo: 0xa6, hi: 0xa6}, + {value: 0x8104, lo: 0xb2, hi: 0xb3}, + // Block 0x39, offset 0x140 + {value: 0x0000, lo: 0x01}, + {value: 0x8102, lo: 0xb7, hi: 0xb7}, + // Block 0x3a, offset 0x142 + {value: 0x0000, lo: 0x0a}, + {value: 0x8132, lo: 0x90, hi: 0x92}, + {value: 0x8101, lo: 0x94, hi: 0x94}, + {value: 0x812d, lo: 0x95, hi: 0x99}, + {value: 0x8132, lo: 0x9a, hi: 0x9b}, + {value: 0x812d, lo: 0x9c, hi: 0x9f}, + {value: 0x8132, lo: 0xa0, hi: 0xa0}, + {value: 0x8101, lo: 0xa2, hi: 0xa8}, + {value: 0x812d, lo: 0xad, hi: 0xad}, + {value: 0x8132, lo: 0xb4, hi: 0xb4}, + {value: 0x8132, lo: 0xb8, hi: 0xb9}, + // Block 0x3b, offset 0x14d + {value: 0x0002, lo: 0x0a}, + {value: 0x0043, lo: 0xac, hi: 0xac}, + {value: 0x00d1, lo: 0xad, hi: 0xad}, + {value: 0x0045, lo: 0xae, hi: 0xae}, + {value: 0x0049, lo: 0xb0, hi: 0xb1}, + {value: 0x00e6, lo: 0xb2, hi: 0xb2}, + {value: 0x004f, lo: 0xb3, hi: 0xba}, + {value: 0x005f, lo: 0xbc, hi: 0xbc}, + {value: 0x00ef, lo: 0xbd, hi: 0xbd}, + {value: 0x0061, lo: 0xbe, hi: 0xbe}, + {value: 0x0065, lo: 0xbf, hi: 0xbf}, + // Block 0x3c, offset 0x158 + {value: 0x0000, lo: 0x0d}, + {value: 0x0001, lo: 0x80, hi: 0x8a}, + {value: 0x043b, lo: 0x91, hi: 0x91}, + {value: 0x429b, lo: 0x97, hi: 0x97}, + {value: 0x001d, lo: 0xa4, hi: 0xa4}, + {value: 0x1873, lo: 0xa5, hi: 0xa5}, + {value: 0x1b5c, lo: 0xa6, hi: 0xa6}, + {value: 0x0001, lo: 0xaf, hi: 0xaf}, + {value: 0x2691, lo: 0xb3, hi: 0xb3}, + {value: 0x27fe, lo: 0xb4, hi: 0xb4}, + {value: 0x2698, lo: 0xb6, hi: 0xb6}, + {value: 0x2808, lo: 0xb7, hi: 0xb7}, + {value: 0x186d, lo: 0xbc, hi: 0xbc}, + {value: 0x4269, lo: 0xbe, hi: 0xbe}, + // Block 0x3d, offset 0x166 + {value: 0x0002, lo: 0x0d}, + {value: 0x1933, lo: 0x87, hi: 0x87}, + {value: 0x1930, lo: 0x88, hi: 0x88}, + {value: 0x1870, lo: 0x89, hi: 0x89}, + {value: 0x298e, lo: 0x97, hi: 0x97}, + {value: 0x0001, lo: 0x9f, hi: 0x9f}, + {value: 0x0021, lo: 0xb0, hi: 0xb0}, + {value: 0x0093, lo: 0xb1, hi: 0xb1}, + {value: 0x0029, lo: 0xb4, hi: 0xb9}, + {value: 0x0017, lo: 0xba, hi: 0xba}, + {value: 0x0467, lo: 0xbb, hi: 0xbb}, + {value: 0x003b, lo: 0xbc, hi: 0xbc}, + {value: 0x0011, lo: 0xbd, hi: 0xbe}, + {value: 0x009d, lo: 0xbf, hi: 0xbf}, + // Block 0x3e, offset 0x174 + {value: 0x0002, lo: 0x0f}, + {value: 0x0021, lo: 0x80, hi: 0x89}, + {value: 0x0017, lo: 0x8a, hi: 0x8a}, + {value: 0x0467, lo: 0x8b, hi: 0x8b}, + {value: 0x003b, lo: 0x8c, hi: 0x8c}, + {value: 0x0011, lo: 0x8d, hi: 0x8e}, + {value: 0x0083, lo: 0x90, hi: 0x90}, + {value: 0x008b, lo: 0x91, hi: 0x91}, + {value: 0x009f, lo: 0x92, hi: 0x92}, + {value: 0x00b1, lo: 0x93, hi: 0x93}, + {value: 0x0104, lo: 0x94, hi: 0x94}, + {value: 0x0091, lo: 0x95, hi: 0x95}, + {value: 0x0097, lo: 0x96, hi: 0x99}, + {value: 0x00a1, lo: 0x9a, hi: 0x9a}, + {value: 0x00a7, lo: 0x9b, hi: 0x9c}, + {value: 0x1999, lo: 0xa8, hi: 0xa8}, + // Block 0x3f, offset 0x184 + {value: 0x0000, lo: 0x0d}, + {value: 0x8132, lo: 0x90, hi: 0x91}, + {value: 0x8101, lo: 0x92, hi: 0x93}, + {value: 0x8132, lo: 0x94, hi: 0x97}, + {value: 0x8101, lo: 0x98, hi: 0x9a}, + {value: 0x8132, lo: 0x9b, hi: 0x9c}, + {value: 0x8132, lo: 0xa1, hi: 0xa1}, + {value: 0x8101, lo: 0xa5, hi: 0xa6}, + {value: 0x8132, lo: 0xa7, hi: 0xa7}, + {value: 0x812d, lo: 0xa8, hi: 0xa8}, + {value: 0x8132, lo: 0xa9, hi: 0xa9}, + {value: 0x8101, lo: 0xaa, hi: 0xab}, + {value: 0x812d, lo: 0xac, hi: 0xaf}, + {value: 0x8132, lo: 0xb0, hi: 0xb0}, + // Block 0x40, offset 0x192 + {value: 0x0007, lo: 0x06}, + {value: 0x2180, lo: 0x89, hi: 0x89}, + {value: 0xa000, lo: 0x90, hi: 0x90}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0xa000, lo: 0x94, hi: 0x94}, + {value: 0x3bb9, lo: 0x9a, hi: 0x9b}, + {value: 0x3bc7, lo: 0xae, hi: 0xae}, + // Block 0x41, offset 0x199 + {value: 0x000e, lo: 0x05}, + {value: 0x3bce, lo: 0x8d, hi: 0x8e}, + {value: 0x3bd5, lo: 0x8f, hi: 0x8f}, + {value: 0xa000, lo: 0x90, hi: 0x90}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0xa000, lo: 0x94, hi: 0x94}, + // Block 0x42, offset 0x19f + {value: 0x0173, lo: 0x0e}, + {value: 0xa000, lo: 0x83, hi: 0x83}, + {value: 0x3be3, lo: 0x84, hi: 0x84}, + {value: 0xa000, lo: 0x88, hi: 0x88}, + {value: 0x3bea, lo: 0x89, hi: 0x89}, + {value: 0xa000, lo: 0x8b, hi: 0x8b}, + {value: 0x3bf1, lo: 0x8c, hi: 0x8c}, + {value: 0xa000, lo: 0xa3, hi: 0xa3}, + {value: 0x3bf8, lo: 0xa4, hi: 0xa4}, + {value: 0xa000, lo: 0xa5, hi: 0xa5}, + {value: 0x3bff, lo: 0xa6, hi: 0xa6}, + {value: 0x269f, lo: 0xac, hi: 0xad}, + {value: 0x26a6, lo: 0xaf, hi: 0xaf}, + {value: 0x281c, lo: 0xb0, hi: 0xb0}, + {value: 0xa000, lo: 0xbc, hi: 0xbc}, + // Block 0x43, offset 0x1ae + {value: 0x0007, lo: 0x03}, + {value: 0x3c68, lo: 0xa0, hi: 0xa1}, + {value: 0x3c92, lo: 0xa2, hi: 0xa3}, + {value: 0x3cbc, lo: 0xaa, hi: 0xad}, + // Block 0x44, offset 0x1b2 + {value: 0x0004, lo: 0x01}, + {value: 0x048b, lo: 0xa9, hi: 0xaa}, + // Block 0x45, offset 0x1b4 + {value: 0x0002, lo: 0x03}, + {value: 0x0057, lo: 0x80, hi: 0x8f}, + {value: 0x0083, lo: 0x90, hi: 0xa9}, + {value: 0x0021, lo: 0xaa, hi: 0xaa}, + // Block 0x46, offset 0x1b8 + {value: 0x0000, lo: 0x01}, + {value: 0x299b, lo: 0x8c, hi: 0x8c}, + // Block 0x47, offset 0x1ba + {value: 0x0263, lo: 0x02}, + {value: 0x1b8c, lo: 0xb4, hi: 0xb4}, + {value: 0x192d, lo: 0xb5, hi: 0xb6}, + // Block 0x48, offset 0x1bd + {value: 0x0000, lo: 0x01}, + {value: 0x44dd, lo: 0x9c, hi: 0x9c}, + // Block 0x49, offset 0x1bf + {value: 0x0000, lo: 0x02}, + {value: 0x0095, lo: 0xbc, hi: 0xbc}, + {value: 0x006d, lo: 0xbd, hi: 0xbd}, + // Block 0x4a, offset 0x1c2 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xaf, hi: 0xb1}, + // Block 0x4b, offset 0x1c4 + {value: 0x0000, lo: 0x02}, + {value: 0x047f, lo: 0xaf, hi: 0xaf}, + {value: 0x8104, lo: 0xbf, hi: 0xbf}, + // Block 0x4c, offset 0x1c7 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xa0, hi: 0xbf}, + // Block 0x4d, offset 0x1c9 + {value: 0x0000, lo: 0x01}, + {value: 0x0dc3, lo: 0x9f, hi: 0x9f}, + // Block 0x4e, offset 0x1cb + {value: 0x0000, lo: 0x01}, + {value: 0x162f, lo: 0xb3, hi: 0xb3}, + // Block 0x4f, offset 0x1cd + {value: 0x0004, lo: 0x0b}, + {value: 0x1597, lo: 0x80, hi: 0x82}, + {value: 0x15af, lo: 0x83, hi: 0x83}, + {value: 0x15c7, lo: 0x84, hi: 0x85}, + {value: 0x15d7, lo: 0x86, hi: 0x89}, + {value: 0x15eb, lo: 0x8a, hi: 0x8c}, + {value: 0x15ff, lo: 0x8d, hi: 0x8d}, + {value: 0x1607, lo: 0x8e, hi: 0x8e}, + {value: 0x160f, lo: 0x8f, hi: 0x90}, + {value: 0x161b, lo: 0x91, hi: 0x93}, + {value: 0x162b, lo: 0x94, hi: 0x94}, + {value: 0x1633, lo: 0x95, hi: 0x95}, + // Block 0x50, offset 0x1d9 + {value: 0x0004, lo: 0x09}, + {value: 0x0001, lo: 0x80, hi: 0x80}, + {value: 0x812c, lo: 0xaa, hi: 0xaa}, + {value: 0x8131, lo: 0xab, hi: 0xab}, + {value: 0x8133, lo: 0xac, hi: 0xac}, + {value: 0x812e, lo: 0xad, hi: 0xad}, + {value: 0x812f, lo: 0xae, hi: 0xae}, + {value: 0x812f, lo: 0xaf, hi: 0xaf}, + {value: 0x04b3, lo: 0xb6, hi: 0xb6}, + {value: 0x0887, lo: 0xb8, hi: 0xba}, + // Block 0x51, offset 0x1e3 + {value: 0x0006, lo: 0x09}, + {value: 0x0313, lo: 0xb1, hi: 0xb1}, + {value: 0x0317, lo: 0xb2, hi: 0xb2}, + {value: 0x4a3b, lo: 0xb3, hi: 0xb3}, + {value: 0x031b, lo: 0xb4, hi: 0xb4}, + {value: 0x4a41, lo: 0xb5, hi: 0xb6}, + {value: 0x031f, lo: 0xb7, hi: 0xb7}, + {value: 0x0323, lo: 0xb8, hi: 0xb8}, + {value: 0x0327, lo: 0xb9, hi: 0xb9}, + {value: 0x4a4d, lo: 0xba, hi: 0xbf}, + // Block 0x52, offset 0x1ed + {value: 0x0000, lo: 0x02}, + {value: 0x8132, lo: 0xaf, hi: 0xaf}, + {value: 0x8132, lo: 0xb4, hi: 0xbd}, + // Block 0x53, offset 0x1f0 + {value: 0x0000, lo: 0x03}, + {value: 0x020f, lo: 0x9c, hi: 0x9c}, + {value: 0x0212, lo: 0x9d, hi: 0x9d}, + {value: 0x8132, lo: 0x9e, hi: 0x9f}, + // Block 0x54, offset 0x1f4 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xb0, hi: 0xb1}, + // Block 0x55, offset 0x1f6 + {value: 0x0000, lo: 0x01}, + {value: 0x163b, lo: 0xb0, hi: 0xb0}, + // Block 0x56, offset 0x1f8 + {value: 0x000c, lo: 0x01}, + {value: 0x00d7, lo: 0xb8, hi: 0xb9}, + // Block 0x57, offset 0x1fa + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x86, hi: 0x86}, + // Block 0x58, offset 0x1fc + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x84, hi: 0x84}, + {value: 0x8132, lo: 0xa0, hi: 0xb1}, + // Block 0x59, offset 0x1ff + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0xab, hi: 0xad}, + // Block 0x5a, offset 0x201 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x93, hi: 0x93}, + // Block 0x5b, offset 0x203 + {value: 0x0000, lo: 0x01}, + {value: 0x8102, lo: 0xb3, hi: 0xb3}, + // Block 0x5c, offset 0x205 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x80, hi: 0x80}, + // Block 0x5d, offset 0x207 + {value: 0x0000, lo: 0x05}, + {value: 0x8132, lo: 0xb0, hi: 0xb0}, + {value: 0x8132, lo: 0xb2, hi: 0xb3}, + {value: 0x812d, lo: 0xb4, hi: 0xb4}, + {value: 0x8132, lo: 0xb7, hi: 0xb8}, + {value: 0x8132, lo: 0xbe, hi: 0xbf}, + // Block 0x5e, offset 0x20d + {value: 0x0000, lo: 0x02}, + {value: 0x8132, lo: 0x81, hi: 0x81}, + {value: 0x8104, lo: 0xb6, hi: 0xb6}, + // Block 0x5f, offset 0x210 + {value: 0x0008, lo: 0x03}, + {value: 0x1637, lo: 0x9c, hi: 0x9d}, + {value: 0x0125, lo: 0x9e, hi: 0x9e}, + {value: 0x1643, lo: 0x9f, hi: 0x9f}, + // Block 0x60, offset 0x214 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0xad, hi: 0xad}, + // Block 0x61, offset 0x216 + {value: 0x0000, lo: 0x06}, + {value: 0xe500, lo: 0x80, hi: 0x80}, + {value: 0xc600, lo: 0x81, hi: 0x9b}, + {value: 0xe500, lo: 0x9c, hi: 0x9c}, + {value: 0xc600, lo: 0x9d, hi: 0xb7}, + {value: 0xe500, lo: 0xb8, hi: 0xb8}, + {value: 0xc600, lo: 0xb9, hi: 0xbf}, + // Block 0x62, offset 0x21d + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x93}, + {value: 0xe500, lo: 0x94, hi: 0x94}, + {value: 0xc600, lo: 0x95, hi: 0xaf}, + {value: 0xe500, lo: 0xb0, hi: 0xb0}, + {value: 0xc600, lo: 0xb1, hi: 0xbf}, + // Block 0x63, offset 0x223 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x8b}, + {value: 0xe500, lo: 0x8c, hi: 0x8c}, + {value: 0xc600, lo: 0x8d, hi: 0xa7}, + {value: 0xe500, lo: 0xa8, hi: 0xa8}, + {value: 0xc600, lo: 0xa9, hi: 0xbf}, + // Block 0x64, offset 0x229 + {value: 0x0000, lo: 0x07}, + {value: 0xc600, lo: 0x80, hi: 0x83}, + {value: 0xe500, lo: 0x84, hi: 0x84}, + {value: 0xc600, lo: 0x85, hi: 0x9f}, + {value: 0xe500, lo: 0xa0, hi: 0xa0}, + {value: 0xc600, lo: 0xa1, hi: 0xbb}, + {value: 0xe500, lo: 0xbc, hi: 0xbc}, + {value: 0xc600, lo: 0xbd, hi: 0xbf}, + // Block 0x65, offset 0x231 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x97}, + {value: 0xe500, lo: 0x98, hi: 0x98}, + {value: 0xc600, lo: 0x99, hi: 0xb3}, + {value: 0xe500, lo: 0xb4, hi: 0xb4}, + {value: 0xc600, lo: 0xb5, hi: 0xbf}, + // Block 0x66, offset 0x237 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x8f}, + {value: 0xe500, lo: 0x90, hi: 0x90}, + {value: 0xc600, lo: 0x91, hi: 0xab}, + {value: 0xe500, lo: 0xac, hi: 0xac}, + {value: 0xc600, lo: 0xad, hi: 0xbf}, + // Block 0x67, offset 0x23d + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x87}, + {value: 0xe500, lo: 0x88, hi: 0x88}, + {value: 0xc600, lo: 0x89, hi: 0xa3}, + {value: 0xe500, lo: 0xa4, hi: 0xa4}, + {value: 0xc600, lo: 0xa5, hi: 0xbf}, + // Block 0x68, offset 0x243 + {value: 0x0000, lo: 0x03}, + {value: 0xc600, lo: 0x80, hi: 0x87}, + {value: 0xe500, lo: 0x88, hi: 0x88}, + {value: 0xc600, lo: 0x89, hi: 0xa3}, + // Block 0x69, offset 0x247 + {value: 0x0002, lo: 0x01}, + {value: 0x0003, lo: 0x81, hi: 0xbf}, + // Block 0x6a, offset 0x249 + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0xbd, hi: 0xbd}, + // Block 0x6b, offset 0x24b + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0xa0, hi: 0xa0}, + // Block 0x6c, offset 0x24d + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xb6, hi: 0xba}, + // Block 0x6d, offset 0x24f + {value: 0x002c, lo: 0x05}, + {value: 0x812d, lo: 0x8d, hi: 0x8d}, + {value: 0x8132, lo: 0x8f, hi: 0x8f}, + {value: 0x8132, lo: 0xb8, hi: 0xb8}, + {value: 0x8101, lo: 0xb9, hi: 0xba}, + {value: 0x8104, lo: 0xbf, hi: 0xbf}, + // Block 0x6e, offset 0x255 + {value: 0x0000, lo: 0x02}, + {value: 0x8132, lo: 0xa5, hi: 0xa5}, + {value: 0x812d, lo: 0xa6, hi: 0xa6}, + // Block 0x6f, offset 0x258 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xa4, hi: 0xa7}, + // Block 0x70, offset 0x25a + {value: 0x0000, lo: 0x05}, + {value: 0x812d, lo: 0x86, hi: 0x87}, + {value: 0x8132, lo: 0x88, hi: 0x8a}, + {value: 0x812d, lo: 0x8b, hi: 0x8b}, + {value: 0x8132, lo: 0x8c, hi: 0x8c}, + {value: 0x812d, lo: 0x8d, hi: 0x90}, + // Block 0x71, offset 0x260 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x86, hi: 0x86}, + {value: 0x8104, lo: 0xbf, hi: 0xbf}, + // Block 0x72, offset 0x263 + {value: 0x17fe, lo: 0x07}, + {value: 0xa000, lo: 0x99, hi: 0x99}, + {value: 0x4238, lo: 0x9a, hi: 0x9a}, + {value: 0xa000, lo: 0x9b, hi: 0x9b}, + {value: 0x4242, lo: 0x9c, hi: 0x9c}, + {value: 0xa000, lo: 0xa5, hi: 0xa5}, + {value: 0x424c, lo: 0xab, hi: 0xab}, + {value: 0x8104, lo: 0xb9, hi: 0xba}, + // Block 0x73, offset 0x26b + {value: 0x0000, lo: 0x06}, + {value: 0x8132, lo: 0x80, hi: 0x82}, + {value: 0x9900, lo: 0xa7, hi: 0xa7}, + {value: 0x2d7e, lo: 0xae, hi: 0xae}, + {value: 0x2d88, lo: 0xaf, hi: 0xaf}, + {value: 0xa000, lo: 0xb1, hi: 0xb2}, + {value: 0x8104, lo: 0xb3, hi: 0xb4}, + // Block 0x74, offset 0x272 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x80, hi: 0x80}, + {value: 0x8102, lo: 0x8a, hi: 0x8a}, + // Block 0x75, offset 0x275 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0xb5, hi: 0xb5}, + {value: 0x8102, lo: 0xb6, hi: 0xb6}, + // Block 0x76, offset 0x278 + {value: 0x0002, lo: 0x01}, + {value: 0x8102, lo: 0xa9, hi: 0xaa}, + // Block 0x77, offset 0x27a + {value: 0x0000, lo: 0x02}, + {value: 0x8102, lo: 0xbb, hi: 0xbc}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x78, offset 0x27d + {value: 0x0000, lo: 0x07}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0x2d92, lo: 0x8b, hi: 0x8b}, + {value: 0x2d9c, lo: 0x8c, hi: 0x8c}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + {value: 0x8132, lo: 0xa6, hi: 0xac}, + {value: 0x8132, lo: 0xb0, hi: 0xb4}, + // Block 0x79, offset 0x285 + {value: 0x0000, lo: 0x03}, + {value: 0x8104, lo: 0x82, hi: 0x82}, + {value: 0x8102, lo: 0x86, hi: 0x86}, + {value: 0x8132, lo: 0x9e, hi: 0x9e}, + // Block 0x7a, offset 0x289 + {value: 0x6b5a, lo: 0x06}, + {value: 0x9900, lo: 0xb0, hi: 0xb0}, + {value: 0xa000, lo: 0xb9, hi: 0xb9}, + {value: 0x9900, lo: 0xba, hi: 0xba}, + {value: 0x2db0, lo: 0xbb, hi: 0xbb}, + {value: 0x2da6, lo: 0xbc, hi: 0xbd}, + {value: 0x2dba, lo: 0xbe, hi: 0xbe}, + // Block 0x7b, offset 0x290 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x82, hi: 0x82}, + {value: 0x8102, lo: 0x83, hi: 0x83}, + // Block 0x7c, offset 0x293 + {value: 0x0000, lo: 0x05}, + {value: 0x9900, lo: 0xaf, hi: 0xaf}, + {value: 0xa000, lo: 0xb8, hi: 0xb9}, + {value: 0x2dc4, lo: 0xba, hi: 0xba}, + {value: 0x2dce, lo: 0xbb, hi: 0xbb}, + {value: 0x8104, lo: 0xbf, hi: 0xbf}, + // Block 0x7d, offset 0x299 + {value: 0x0000, lo: 0x01}, + {value: 0x8102, lo: 0x80, hi: 0x80}, + // Block 0x7e, offset 0x29b + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0xbf, hi: 0xbf}, + // Block 0x7f, offset 0x29d + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0xb6, hi: 0xb6}, + {value: 0x8102, lo: 0xb7, hi: 0xb7}, + // Block 0x80, offset 0x2a0 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0xab, hi: 0xab}, + // Block 0x81, offset 0x2a2 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0xb9, hi: 0xb9}, + {value: 0x8102, lo: 0xba, hi: 0xba}, + // Block 0x82, offset 0x2a5 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0xb4, hi: 0xb4}, + // Block 0x83, offset 0x2a7 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x87, hi: 0x87}, + // Block 0x84, offset 0x2a9 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x99, hi: 0x99}, + // Block 0x85, offset 0x2ab + {value: 0x0000, lo: 0x02}, + {value: 0x8102, lo: 0x82, hi: 0x82}, + {value: 0x8104, lo: 0x84, hi: 0x85}, + // Block 0x86, offset 0x2ae + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x97, hi: 0x97}, + // Block 0x87, offset 0x2b0 + {value: 0x0000, lo: 0x01}, + {value: 0x8101, lo: 0xb0, hi: 0xb4}, + // Block 0x88, offset 0x2b2 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xb0, hi: 0xb6}, + // Block 0x89, offset 0x2b4 + {value: 0x0000, lo: 0x01}, + {value: 0x8101, lo: 0x9e, hi: 0x9e}, + // Block 0x8a, offset 0x2b6 + {value: 0x0000, lo: 0x0c}, + {value: 0x45cc, lo: 0x9e, hi: 0x9e}, + {value: 0x45d6, lo: 0x9f, hi: 0x9f}, + {value: 0x460a, lo: 0xa0, hi: 0xa0}, + {value: 0x4618, lo: 0xa1, hi: 0xa1}, + {value: 0x4626, lo: 0xa2, hi: 0xa2}, + {value: 0x4634, lo: 0xa3, hi: 0xa3}, + {value: 0x4642, lo: 0xa4, hi: 0xa4}, + {value: 0x812b, lo: 0xa5, hi: 0xa6}, + {value: 0x8101, lo: 0xa7, hi: 0xa9}, + {value: 0x8130, lo: 0xad, hi: 0xad}, + {value: 0x812b, lo: 0xae, hi: 0xb2}, + {value: 0x812d, lo: 0xbb, hi: 0xbf}, + // Block 0x8b, offset 0x2c3 + {value: 0x0000, lo: 0x09}, + {value: 0x812d, lo: 0x80, hi: 0x82}, + {value: 0x8132, lo: 0x85, hi: 0x89}, + {value: 0x812d, lo: 0x8a, hi: 0x8b}, + {value: 0x8132, lo: 0xaa, hi: 0xad}, + {value: 0x45e0, lo: 0xbb, hi: 0xbb}, + {value: 0x45ea, lo: 0xbc, hi: 0xbc}, + {value: 0x4650, lo: 0xbd, hi: 0xbd}, + {value: 0x466c, lo: 0xbe, hi: 0xbe}, + {value: 0x465e, lo: 0xbf, hi: 0xbf}, + // Block 0x8c, offset 0x2cd + {value: 0x0000, lo: 0x01}, + {value: 0x467a, lo: 0x80, hi: 0x80}, + // Block 0x8d, offset 0x2cf + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0x82, hi: 0x84}, + // Block 0x8e, offset 0x2d1 + {value: 0x0002, lo: 0x03}, + {value: 0x0043, lo: 0x80, hi: 0x99}, + {value: 0x0083, lo: 0x9a, hi: 0xb3}, + {value: 0x0043, lo: 0xb4, hi: 0xbf}, + // Block 0x8f, offset 0x2d5 + {value: 0x0002, lo: 0x04}, + {value: 0x005b, lo: 0x80, hi: 0x8d}, + {value: 0x0083, lo: 0x8e, hi: 0x94}, + {value: 0x0093, lo: 0x96, hi: 0xa7}, + {value: 0x0043, lo: 0xa8, hi: 0xbf}, + // Block 0x90, offset 0x2da + {value: 0x0002, lo: 0x0b}, + {value: 0x0073, lo: 0x80, hi: 0x81}, + {value: 0x0083, lo: 0x82, hi: 0x9b}, + {value: 0x0043, lo: 0x9c, hi: 0x9c}, + {value: 0x0047, lo: 0x9e, hi: 0x9f}, + {value: 0x004f, lo: 0xa2, hi: 0xa2}, + {value: 0x0055, lo: 0xa5, hi: 0xa6}, + {value: 0x005d, lo: 0xa9, hi: 0xac}, + {value: 0x0067, lo: 0xae, hi: 0xb5}, + {value: 0x0083, lo: 0xb6, hi: 0xb9}, + {value: 0x008d, lo: 0xbb, hi: 0xbb}, + {value: 0x0091, lo: 0xbd, hi: 0xbf}, + // Block 0x91, offset 0x2e6 + {value: 0x0002, lo: 0x04}, + {value: 0x0097, lo: 0x80, hi: 0x83}, + {value: 0x00a1, lo: 0x85, hi: 0x8f}, + {value: 0x0043, lo: 0x90, hi: 0xa9}, + {value: 0x0083, lo: 0xaa, hi: 0xbf}, + // Block 0x92, offset 0x2eb + {value: 0x0002, lo: 0x08}, + {value: 0x00af, lo: 0x80, hi: 0x83}, + {value: 0x0043, lo: 0x84, hi: 0x85}, + {value: 0x0049, lo: 0x87, hi: 0x8a}, + {value: 0x0055, lo: 0x8d, hi: 0x94}, + {value: 0x0067, lo: 0x96, hi: 0x9c}, + {value: 0x0083, lo: 0x9e, hi: 0xb7}, + {value: 0x0043, lo: 0xb8, hi: 0xb9}, + {value: 0x0049, lo: 0xbb, hi: 0xbe}, + // Block 0x93, offset 0x2f4 + {value: 0x0002, lo: 0x05}, + {value: 0x0053, lo: 0x80, hi: 0x84}, + {value: 0x005f, lo: 0x86, hi: 0x86}, + {value: 0x0067, lo: 0x8a, hi: 0x90}, + {value: 0x0083, lo: 0x92, hi: 0xab}, + {value: 0x0043, lo: 0xac, hi: 0xbf}, + // Block 0x94, offset 0x2fa + {value: 0x0002, lo: 0x04}, + {value: 0x006b, lo: 0x80, hi: 0x85}, + {value: 0x0083, lo: 0x86, hi: 0x9f}, + {value: 0x0043, lo: 0xa0, hi: 0xb9}, + {value: 0x0083, lo: 0xba, hi: 0xbf}, + // Block 0x95, offset 0x2ff + {value: 0x0002, lo: 0x03}, + {value: 0x008f, lo: 0x80, hi: 0x93}, + {value: 0x0043, lo: 0x94, hi: 0xad}, + {value: 0x0083, lo: 0xae, hi: 0xbf}, + // Block 0x96, offset 0x303 + {value: 0x0002, lo: 0x04}, + {value: 0x00a7, lo: 0x80, hi: 0x87}, + {value: 0x0043, lo: 0x88, hi: 0xa1}, + {value: 0x0083, lo: 0xa2, hi: 0xbb}, + {value: 0x0043, lo: 0xbc, hi: 0xbf}, + // Block 0x97, offset 0x308 + {value: 0x0002, lo: 0x03}, + {value: 0x004b, lo: 0x80, hi: 0x95}, + {value: 0x0083, lo: 0x96, hi: 0xaf}, + {value: 0x0043, lo: 0xb0, hi: 0xbf}, + // Block 0x98, offset 0x30c + {value: 0x0003, lo: 0x0f}, + {value: 0x01b8, lo: 0x80, hi: 0x80}, + {value: 0x045f, lo: 0x81, hi: 0x81}, + {value: 0x01bb, lo: 0x82, hi: 0x9a}, + {value: 0x045b, lo: 0x9b, hi: 0x9b}, + {value: 0x01c7, lo: 0x9c, hi: 0x9c}, + {value: 0x01d0, lo: 0x9d, hi: 0x9d}, + {value: 0x01d6, lo: 0x9e, hi: 0x9e}, + {value: 0x01fa, lo: 0x9f, hi: 0x9f}, + {value: 0x01eb, lo: 0xa0, hi: 0xa0}, + {value: 0x01e8, lo: 0xa1, hi: 0xa1}, + {value: 0x0173, lo: 0xa2, hi: 0xb2}, + {value: 0x0188, lo: 0xb3, hi: 0xb3}, + {value: 0x01a6, lo: 0xb4, hi: 0xba}, + {value: 0x045f, lo: 0xbb, hi: 0xbb}, + {value: 0x01bb, lo: 0xbc, hi: 0xbf}, + // Block 0x99, offset 0x31c + {value: 0x0003, lo: 0x0d}, + {value: 0x01c7, lo: 0x80, hi: 0x94}, + {value: 0x045b, lo: 0x95, hi: 0x95}, + {value: 0x01c7, lo: 0x96, hi: 0x96}, + {value: 0x01d0, lo: 0x97, hi: 0x97}, + {value: 0x01d6, lo: 0x98, hi: 0x98}, + {value: 0x01fa, lo: 0x99, hi: 0x99}, + {value: 0x01eb, lo: 0x9a, hi: 0x9a}, + {value: 0x01e8, lo: 0x9b, hi: 0x9b}, + {value: 0x0173, lo: 0x9c, hi: 0xac}, + {value: 0x0188, lo: 0xad, hi: 0xad}, + {value: 0x01a6, lo: 0xae, hi: 0xb4}, + {value: 0x045f, lo: 0xb5, hi: 0xb5}, + {value: 0x01bb, lo: 0xb6, hi: 0xbf}, + // Block 0x9a, offset 0x32a + {value: 0x0003, lo: 0x0d}, + {value: 0x01d9, lo: 0x80, hi: 0x8e}, + {value: 0x045b, lo: 0x8f, hi: 0x8f}, + {value: 0x01c7, lo: 0x90, hi: 0x90}, + {value: 0x01d0, lo: 0x91, hi: 0x91}, + {value: 0x01d6, lo: 0x92, hi: 0x92}, + {value: 0x01fa, lo: 0x93, hi: 0x93}, + {value: 0x01eb, lo: 0x94, hi: 0x94}, + {value: 0x01e8, lo: 0x95, hi: 0x95}, + {value: 0x0173, lo: 0x96, hi: 0xa6}, + {value: 0x0188, lo: 0xa7, hi: 0xa7}, + {value: 0x01a6, lo: 0xa8, hi: 0xae}, + {value: 0x045f, lo: 0xaf, hi: 0xaf}, + {value: 0x01bb, lo: 0xb0, hi: 0xbf}, + // Block 0x9b, offset 0x338 + {value: 0x0003, lo: 0x0d}, + {value: 0x01eb, lo: 0x80, hi: 0x88}, + {value: 0x045b, lo: 0x89, hi: 0x89}, + {value: 0x01c7, lo: 0x8a, hi: 0x8a}, + {value: 0x01d0, lo: 0x8b, hi: 0x8b}, + {value: 0x01d6, lo: 0x8c, hi: 0x8c}, + {value: 0x01fa, lo: 0x8d, hi: 0x8d}, + {value: 0x01eb, lo: 0x8e, hi: 0x8e}, + {value: 0x01e8, lo: 0x8f, hi: 0x8f}, + {value: 0x0173, lo: 0x90, hi: 0xa0}, + {value: 0x0188, lo: 0xa1, hi: 0xa1}, + {value: 0x01a6, lo: 0xa2, hi: 0xa8}, + {value: 0x045f, lo: 0xa9, hi: 0xa9}, + {value: 0x01bb, lo: 0xaa, hi: 0xbf}, + // Block 0x9c, offset 0x346 + {value: 0x0000, lo: 0x05}, + {value: 0x8132, lo: 0x80, hi: 0x86}, + {value: 0x8132, lo: 0x88, hi: 0x98}, + {value: 0x8132, lo: 0x9b, hi: 0xa1}, + {value: 0x8132, lo: 0xa3, hi: 0xa4}, + {value: 0x8132, lo: 0xa6, hi: 0xaa}, + // Block 0x9d, offset 0x34c + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0x90, hi: 0x96}, + // Block 0x9e, offset 0x34e + {value: 0x0000, lo: 0x02}, + {value: 0x8132, lo: 0x84, hi: 0x89}, + {value: 0x8102, lo: 0x8a, hi: 0x8a}, + // Block 0x9f, offset 0x351 + {value: 0x0002, lo: 0x09}, + {value: 0x0063, lo: 0x80, hi: 0x89}, + {value: 0x1951, lo: 0x8a, hi: 0x8a}, + {value: 0x1981, lo: 0x8b, hi: 0x8b}, + {value: 0x199c, lo: 0x8c, hi: 0x8c}, + {value: 0x19a2, lo: 0x8d, hi: 0x8d}, + {value: 0x1bc0, lo: 0x8e, hi: 0x8e}, + {value: 0x19ae, lo: 0x8f, hi: 0x8f}, + {value: 0x197b, lo: 0xaa, hi: 0xaa}, + {value: 0x197e, lo: 0xab, hi: 0xab}, + // Block 0xa0, offset 0x35b + {value: 0x0000, lo: 0x01}, + {value: 0x193f, lo: 0x90, hi: 0x90}, + // Block 0xa1, offset 0x35d + {value: 0x0028, lo: 0x09}, + {value: 0x2862, lo: 0x80, hi: 0x80}, + {value: 0x2826, lo: 0x81, hi: 0x81}, + {value: 0x2830, lo: 0x82, hi: 0x82}, + {value: 0x2844, lo: 0x83, hi: 0x84}, + {value: 0x284e, lo: 0x85, hi: 0x86}, + {value: 0x283a, lo: 0x87, hi: 0x87}, + {value: 0x2858, lo: 0x88, hi: 0x88}, + {value: 0x0b6f, lo: 0x90, hi: 0x90}, + {value: 0x08e7, lo: 0x91, hi: 0x91}, +} + +// recompMap: 7520 bytes (entries only) +var recompMap map[uint32]rune +var recompMapOnce sync.Once + +const recompMapPacked = "" + + "\x00A\x03\x00\x00\x00\x00\xc0" + // 0x00410300: 0x000000C0 + "\x00A\x03\x01\x00\x00\x00\xc1" + // 0x00410301: 0x000000C1 + "\x00A\x03\x02\x00\x00\x00\xc2" + // 0x00410302: 0x000000C2 + "\x00A\x03\x03\x00\x00\x00\xc3" + // 0x00410303: 0x000000C3 + "\x00A\x03\b\x00\x00\x00\xc4" + // 0x00410308: 0x000000C4 + "\x00A\x03\n\x00\x00\x00\xc5" + // 0x0041030A: 0x000000C5 + "\x00C\x03'\x00\x00\x00\xc7" + // 0x00430327: 0x000000C7 + "\x00E\x03\x00\x00\x00\x00\xc8" + // 0x00450300: 0x000000C8 + "\x00E\x03\x01\x00\x00\x00\xc9" + // 0x00450301: 0x000000C9 + "\x00E\x03\x02\x00\x00\x00\xca" + // 0x00450302: 0x000000CA + "\x00E\x03\b\x00\x00\x00\xcb" + // 0x00450308: 0x000000CB + "\x00I\x03\x00\x00\x00\x00\xcc" + // 0x00490300: 0x000000CC + "\x00I\x03\x01\x00\x00\x00\xcd" + // 0x00490301: 0x000000CD + "\x00I\x03\x02\x00\x00\x00\xce" + // 0x00490302: 0x000000CE + "\x00I\x03\b\x00\x00\x00\xcf" + // 0x00490308: 0x000000CF + "\x00N\x03\x03\x00\x00\x00\xd1" + // 0x004E0303: 0x000000D1 + "\x00O\x03\x00\x00\x00\x00\xd2" + // 0x004F0300: 0x000000D2 + "\x00O\x03\x01\x00\x00\x00\xd3" + // 0x004F0301: 0x000000D3 + "\x00O\x03\x02\x00\x00\x00\xd4" + // 0x004F0302: 0x000000D4 + "\x00O\x03\x03\x00\x00\x00\xd5" + // 0x004F0303: 0x000000D5 + "\x00O\x03\b\x00\x00\x00\xd6" + // 0x004F0308: 0x000000D6 + "\x00U\x03\x00\x00\x00\x00\xd9" + // 0x00550300: 0x000000D9 + "\x00U\x03\x01\x00\x00\x00\xda" + // 0x00550301: 0x000000DA + "\x00U\x03\x02\x00\x00\x00\xdb" + // 0x00550302: 0x000000DB + "\x00U\x03\b\x00\x00\x00\xdc" + // 0x00550308: 0x000000DC + "\x00Y\x03\x01\x00\x00\x00\xdd" + // 0x00590301: 0x000000DD + "\x00a\x03\x00\x00\x00\x00\xe0" + // 0x00610300: 0x000000E0 + "\x00a\x03\x01\x00\x00\x00\xe1" + // 0x00610301: 0x000000E1 + "\x00a\x03\x02\x00\x00\x00\xe2" + // 0x00610302: 0x000000E2 + "\x00a\x03\x03\x00\x00\x00\xe3" + // 0x00610303: 0x000000E3 + "\x00a\x03\b\x00\x00\x00\xe4" + // 0x00610308: 0x000000E4 + "\x00a\x03\n\x00\x00\x00\xe5" + // 0x0061030A: 0x000000E5 + "\x00c\x03'\x00\x00\x00\xe7" + // 0x00630327: 0x000000E7 + "\x00e\x03\x00\x00\x00\x00\xe8" + // 0x00650300: 0x000000E8 + "\x00e\x03\x01\x00\x00\x00\xe9" + // 0x00650301: 0x000000E9 + "\x00e\x03\x02\x00\x00\x00\xea" + // 0x00650302: 0x000000EA + "\x00e\x03\b\x00\x00\x00\xeb" + // 0x00650308: 0x000000EB + "\x00i\x03\x00\x00\x00\x00\xec" + // 0x00690300: 0x000000EC + "\x00i\x03\x01\x00\x00\x00\xed" + // 0x00690301: 0x000000ED + "\x00i\x03\x02\x00\x00\x00\xee" + // 0x00690302: 0x000000EE + "\x00i\x03\b\x00\x00\x00\xef" + // 0x00690308: 0x000000EF + "\x00n\x03\x03\x00\x00\x00\xf1" + // 0x006E0303: 0x000000F1 + "\x00o\x03\x00\x00\x00\x00\xf2" + // 0x006F0300: 0x000000F2 + "\x00o\x03\x01\x00\x00\x00\xf3" + // 0x006F0301: 0x000000F3 + "\x00o\x03\x02\x00\x00\x00\xf4" + // 0x006F0302: 0x000000F4 + "\x00o\x03\x03\x00\x00\x00\xf5" + // 0x006F0303: 0x000000F5 + "\x00o\x03\b\x00\x00\x00\xf6" + // 0x006F0308: 0x000000F6 + "\x00u\x03\x00\x00\x00\x00\xf9" + // 0x00750300: 0x000000F9 + "\x00u\x03\x01\x00\x00\x00\xfa" + // 0x00750301: 0x000000FA + "\x00u\x03\x02\x00\x00\x00\xfb" + // 0x00750302: 0x000000FB + "\x00u\x03\b\x00\x00\x00\xfc" + // 0x00750308: 0x000000FC + "\x00y\x03\x01\x00\x00\x00\xfd" + // 0x00790301: 0x000000FD + "\x00y\x03\b\x00\x00\x00\xff" + // 0x00790308: 0x000000FF + "\x00A\x03\x04\x00\x00\x01\x00" + // 0x00410304: 0x00000100 + "\x00a\x03\x04\x00\x00\x01\x01" + // 0x00610304: 0x00000101 + "\x00A\x03\x06\x00\x00\x01\x02" + // 0x00410306: 0x00000102 + "\x00a\x03\x06\x00\x00\x01\x03" + // 0x00610306: 0x00000103 + "\x00A\x03(\x00\x00\x01\x04" + // 0x00410328: 0x00000104 + "\x00a\x03(\x00\x00\x01\x05" + // 0x00610328: 0x00000105 + "\x00C\x03\x01\x00\x00\x01\x06" + // 0x00430301: 0x00000106 + "\x00c\x03\x01\x00\x00\x01\a" + // 0x00630301: 0x00000107 + "\x00C\x03\x02\x00\x00\x01\b" + // 0x00430302: 0x00000108 + "\x00c\x03\x02\x00\x00\x01\t" + // 0x00630302: 0x00000109 + "\x00C\x03\a\x00\x00\x01\n" + // 0x00430307: 0x0000010A + "\x00c\x03\a\x00\x00\x01\v" + // 0x00630307: 0x0000010B + "\x00C\x03\f\x00\x00\x01\f" + // 0x0043030C: 0x0000010C + "\x00c\x03\f\x00\x00\x01\r" + // 0x0063030C: 0x0000010D + "\x00D\x03\f\x00\x00\x01\x0e" + // 0x0044030C: 0x0000010E + "\x00d\x03\f\x00\x00\x01\x0f" + // 0x0064030C: 0x0000010F + "\x00E\x03\x04\x00\x00\x01\x12" + // 0x00450304: 0x00000112 + "\x00e\x03\x04\x00\x00\x01\x13" + // 0x00650304: 0x00000113 + "\x00E\x03\x06\x00\x00\x01\x14" + // 0x00450306: 0x00000114 + "\x00e\x03\x06\x00\x00\x01\x15" + // 0x00650306: 0x00000115 + "\x00E\x03\a\x00\x00\x01\x16" + // 0x00450307: 0x00000116 + "\x00e\x03\a\x00\x00\x01\x17" + // 0x00650307: 0x00000117 + "\x00E\x03(\x00\x00\x01\x18" + // 0x00450328: 0x00000118 + "\x00e\x03(\x00\x00\x01\x19" + // 0x00650328: 0x00000119 + "\x00E\x03\f\x00\x00\x01\x1a" + // 0x0045030C: 0x0000011A + "\x00e\x03\f\x00\x00\x01\x1b" + // 0x0065030C: 0x0000011B + "\x00G\x03\x02\x00\x00\x01\x1c" + // 0x00470302: 0x0000011C + "\x00g\x03\x02\x00\x00\x01\x1d" + // 0x00670302: 0x0000011D + "\x00G\x03\x06\x00\x00\x01\x1e" + // 0x00470306: 0x0000011E + "\x00g\x03\x06\x00\x00\x01\x1f" + // 0x00670306: 0x0000011F + "\x00G\x03\a\x00\x00\x01 " + // 0x00470307: 0x00000120 + "\x00g\x03\a\x00\x00\x01!" + // 0x00670307: 0x00000121 + "\x00G\x03'\x00\x00\x01\"" + // 0x00470327: 0x00000122 + "\x00g\x03'\x00\x00\x01#" + // 0x00670327: 0x00000123 + "\x00H\x03\x02\x00\x00\x01$" + // 0x00480302: 0x00000124 + "\x00h\x03\x02\x00\x00\x01%" + // 0x00680302: 0x00000125 + "\x00I\x03\x03\x00\x00\x01(" + // 0x00490303: 0x00000128 + "\x00i\x03\x03\x00\x00\x01)" + // 0x00690303: 0x00000129 + "\x00I\x03\x04\x00\x00\x01*" + // 0x00490304: 0x0000012A + "\x00i\x03\x04\x00\x00\x01+" + // 0x00690304: 0x0000012B + "\x00I\x03\x06\x00\x00\x01," + // 0x00490306: 0x0000012C + "\x00i\x03\x06\x00\x00\x01-" + // 0x00690306: 0x0000012D + "\x00I\x03(\x00\x00\x01." + // 0x00490328: 0x0000012E + "\x00i\x03(\x00\x00\x01/" + // 0x00690328: 0x0000012F + "\x00I\x03\a\x00\x00\x010" + // 0x00490307: 0x00000130 + "\x00J\x03\x02\x00\x00\x014" + // 0x004A0302: 0x00000134 + "\x00j\x03\x02\x00\x00\x015" + // 0x006A0302: 0x00000135 + "\x00K\x03'\x00\x00\x016" + // 0x004B0327: 0x00000136 + "\x00k\x03'\x00\x00\x017" + // 0x006B0327: 0x00000137 + "\x00L\x03\x01\x00\x00\x019" + // 0x004C0301: 0x00000139 + "\x00l\x03\x01\x00\x00\x01:" + // 0x006C0301: 0x0000013A + "\x00L\x03'\x00\x00\x01;" + // 0x004C0327: 0x0000013B + "\x00l\x03'\x00\x00\x01<" + // 0x006C0327: 0x0000013C + "\x00L\x03\f\x00\x00\x01=" + // 0x004C030C: 0x0000013D + "\x00l\x03\f\x00\x00\x01>" + // 0x006C030C: 0x0000013E + "\x00N\x03\x01\x00\x00\x01C" + // 0x004E0301: 0x00000143 + "\x00n\x03\x01\x00\x00\x01D" + // 0x006E0301: 0x00000144 + "\x00N\x03'\x00\x00\x01E" + // 0x004E0327: 0x00000145 + "\x00n\x03'\x00\x00\x01F" + // 0x006E0327: 0x00000146 + "\x00N\x03\f\x00\x00\x01G" + // 0x004E030C: 0x00000147 + "\x00n\x03\f\x00\x00\x01H" + // 0x006E030C: 0x00000148 + "\x00O\x03\x04\x00\x00\x01L" + // 0x004F0304: 0x0000014C + "\x00o\x03\x04\x00\x00\x01M" + // 0x006F0304: 0x0000014D + "\x00O\x03\x06\x00\x00\x01N" + // 0x004F0306: 0x0000014E + "\x00o\x03\x06\x00\x00\x01O" + // 0x006F0306: 0x0000014F + "\x00O\x03\v\x00\x00\x01P" + // 0x004F030B: 0x00000150 + "\x00o\x03\v\x00\x00\x01Q" + // 0x006F030B: 0x00000151 + "\x00R\x03\x01\x00\x00\x01T" + // 0x00520301: 0x00000154 + "\x00r\x03\x01\x00\x00\x01U" + // 0x00720301: 0x00000155 + "\x00R\x03'\x00\x00\x01V" + // 0x00520327: 0x00000156 + "\x00r\x03'\x00\x00\x01W" + // 0x00720327: 0x00000157 + "\x00R\x03\f\x00\x00\x01X" + // 0x0052030C: 0x00000158 + "\x00r\x03\f\x00\x00\x01Y" + // 0x0072030C: 0x00000159 + "\x00S\x03\x01\x00\x00\x01Z" + // 0x00530301: 0x0000015A + "\x00s\x03\x01\x00\x00\x01[" + // 0x00730301: 0x0000015B + "\x00S\x03\x02\x00\x00\x01\\" + // 0x00530302: 0x0000015C + "\x00s\x03\x02\x00\x00\x01]" + // 0x00730302: 0x0000015D + "\x00S\x03'\x00\x00\x01^" + // 0x00530327: 0x0000015E + "\x00s\x03'\x00\x00\x01_" + // 0x00730327: 0x0000015F + "\x00S\x03\f\x00\x00\x01`" + // 0x0053030C: 0x00000160 + "\x00s\x03\f\x00\x00\x01a" + // 0x0073030C: 0x00000161 + "\x00T\x03'\x00\x00\x01b" + // 0x00540327: 0x00000162 + "\x00t\x03'\x00\x00\x01c" + // 0x00740327: 0x00000163 + "\x00T\x03\f\x00\x00\x01d" + // 0x0054030C: 0x00000164 + "\x00t\x03\f\x00\x00\x01e" + // 0x0074030C: 0x00000165 + "\x00U\x03\x03\x00\x00\x01h" + // 0x00550303: 0x00000168 + "\x00u\x03\x03\x00\x00\x01i" + // 0x00750303: 0x00000169 + "\x00U\x03\x04\x00\x00\x01j" + // 0x00550304: 0x0000016A + "\x00u\x03\x04\x00\x00\x01k" + // 0x00750304: 0x0000016B + "\x00U\x03\x06\x00\x00\x01l" + // 0x00550306: 0x0000016C + "\x00u\x03\x06\x00\x00\x01m" + // 0x00750306: 0x0000016D + "\x00U\x03\n\x00\x00\x01n" + // 0x0055030A: 0x0000016E + "\x00u\x03\n\x00\x00\x01o" + // 0x0075030A: 0x0000016F + "\x00U\x03\v\x00\x00\x01p" + // 0x0055030B: 0x00000170 + "\x00u\x03\v\x00\x00\x01q" + // 0x0075030B: 0x00000171 + "\x00U\x03(\x00\x00\x01r" + // 0x00550328: 0x00000172 + "\x00u\x03(\x00\x00\x01s" + // 0x00750328: 0x00000173 + "\x00W\x03\x02\x00\x00\x01t" + // 0x00570302: 0x00000174 + "\x00w\x03\x02\x00\x00\x01u" + // 0x00770302: 0x00000175 + "\x00Y\x03\x02\x00\x00\x01v" + // 0x00590302: 0x00000176 + "\x00y\x03\x02\x00\x00\x01w" + // 0x00790302: 0x00000177 + "\x00Y\x03\b\x00\x00\x01x" + // 0x00590308: 0x00000178 + "\x00Z\x03\x01\x00\x00\x01y" + // 0x005A0301: 0x00000179 + "\x00z\x03\x01\x00\x00\x01z" + // 0x007A0301: 0x0000017A + "\x00Z\x03\a\x00\x00\x01{" + // 0x005A0307: 0x0000017B + "\x00z\x03\a\x00\x00\x01|" + // 0x007A0307: 0x0000017C + "\x00Z\x03\f\x00\x00\x01}" + // 0x005A030C: 0x0000017D + "\x00z\x03\f\x00\x00\x01~" + // 0x007A030C: 0x0000017E + "\x00O\x03\x1b\x00\x00\x01\xa0" + // 0x004F031B: 0x000001A0 + "\x00o\x03\x1b\x00\x00\x01\xa1" + // 0x006F031B: 0x000001A1 + "\x00U\x03\x1b\x00\x00\x01\xaf" + // 0x0055031B: 0x000001AF + "\x00u\x03\x1b\x00\x00\x01\xb0" + // 0x0075031B: 0x000001B0 + "\x00A\x03\f\x00\x00\x01\xcd" + // 0x0041030C: 0x000001CD + "\x00a\x03\f\x00\x00\x01\xce" + // 0x0061030C: 0x000001CE + "\x00I\x03\f\x00\x00\x01\xcf" + // 0x0049030C: 0x000001CF + "\x00i\x03\f\x00\x00\x01\xd0" + // 0x0069030C: 0x000001D0 + "\x00O\x03\f\x00\x00\x01\xd1" + // 0x004F030C: 0x000001D1 + "\x00o\x03\f\x00\x00\x01\xd2" + // 0x006F030C: 0x000001D2 + "\x00U\x03\f\x00\x00\x01\xd3" + // 0x0055030C: 0x000001D3 + "\x00u\x03\f\x00\x00\x01\xd4" + // 0x0075030C: 0x000001D4 + "\x00\xdc\x03\x04\x00\x00\x01\xd5" + // 0x00DC0304: 0x000001D5 + "\x00\xfc\x03\x04\x00\x00\x01\xd6" + // 0x00FC0304: 0x000001D6 + "\x00\xdc\x03\x01\x00\x00\x01\xd7" + // 0x00DC0301: 0x000001D7 + "\x00\xfc\x03\x01\x00\x00\x01\xd8" + // 0x00FC0301: 0x000001D8 + "\x00\xdc\x03\f\x00\x00\x01\xd9" + // 0x00DC030C: 0x000001D9 + "\x00\xfc\x03\f\x00\x00\x01\xda" + // 0x00FC030C: 0x000001DA + "\x00\xdc\x03\x00\x00\x00\x01\xdb" + // 0x00DC0300: 0x000001DB + "\x00\xfc\x03\x00\x00\x00\x01\xdc" + // 0x00FC0300: 0x000001DC + "\x00\xc4\x03\x04\x00\x00\x01\xde" + // 0x00C40304: 0x000001DE + "\x00\xe4\x03\x04\x00\x00\x01\xdf" + // 0x00E40304: 0x000001DF + "\x02&\x03\x04\x00\x00\x01\xe0" + // 0x02260304: 0x000001E0 + "\x02'\x03\x04\x00\x00\x01\xe1" + // 0x02270304: 0x000001E1 + "\x00\xc6\x03\x04\x00\x00\x01\xe2" + // 0x00C60304: 0x000001E2 + "\x00\xe6\x03\x04\x00\x00\x01\xe3" + // 0x00E60304: 0x000001E3 + "\x00G\x03\f\x00\x00\x01\xe6" + // 0x0047030C: 0x000001E6 + "\x00g\x03\f\x00\x00\x01\xe7" + // 0x0067030C: 0x000001E7 + "\x00K\x03\f\x00\x00\x01\xe8" + // 0x004B030C: 0x000001E8 + "\x00k\x03\f\x00\x00\x01\xe9" + // 0x006B030C: 0x000001E9 + "\x00O\x03(\x00\x00\x01\xea" + // 0x004F0328: 0x000001EA + "\x00o\x03(\x00\x00\x01\xeb" + // 0x006F0328: 0x000001EB + "\x01\xea\x03\x04\x00\x00\x01\xec" + // 0x01EA0304: 0x000001EC + "\x01\xeb\x03\x04\x00\x00\x01\xed" + // 0x01EB0304: 0x000001ED + "\x01\xb7\x03\f\x00\x00\x01\xee" + // 0x01B7030C: 0x000001EE + "\x02\x92\x03\f\x00\x00\x01\xef" + // 0x0292030C: 0x000001EF + "\x00j\x03\f\x00\x00\x01\xf0" + // 0x006A030C: 0x000001F0 + "\x00G\x03\x01\x00\x00\x01\xf4" + // 0x00470301: 0x000001F4 + "\x00g\x03\x01\x00\x00\x01\xf5" + // 0x00670301: 0x000001F5 + "\x00N\x03\x00\x00\x00\x01\xf8" + // 0x004E0300: 0x000001F8 + "\x00n\x03\x00\x00\x00\x01\xf9" + // 0x006E0300: 0x000001F9 + "\x00\xc5\x03\x01\x00\x00\x01\xfa" + // 0x00C50301: 0x000001FA + "\x00\xe5\x03\x01\x00\x00\x01\xfb" + // 0x00E50301: 0x000001FB + "\x00\xc6\x03\x01\x00\x00\x01\xfc" + // 0x00C60301: 0x000001FC + "\x00\xe6\x03\x01\x00\x00\x01\xfd" + // 0x00E60301: 0x000001FD + "\x00\xd8\x03\x01\x00\x00\x01\xfe" + // 0x00D80301: 0x000001FE + "\x00\xf8\x03\x01\x00\x00\x01\xff" + // 0x00F80301: 0x000001FF + "\x00A\x03\x0f\x00\x00\x02\x00" + // 0x0041030F: 0x00000200 + "\x00a\x03\x0f\x00\x00\x02\x01" + // 0x0061030F: 0x00000201 + "\x00A\x03\x11\x00\x00\x02\x02" + // 0x00410311: 0x00000202 + "\x00a\x03\x11\x00\x00\x02\x03" + // 0x00610311: 0x00000203 + "\x00E\x03\x0f\x00\x00\x02\x04" + // 0x0045030F: 0x00000204 + "\x00e\x03\x0f\x00\x00\x02\x05" + // 0x0065030F: 0x00000205 + "\x00E\x03\x11\x00\x00\x02\x06" + // 0x00450311: 0x00000206 + "\x00e\x03\x11\x00\x00\x02\a" + // 0x00650311: 0x00000207 + "\x00I\x03\x0f\x00\x00\x02\b" + // 0x0049030F: 0x00000208 + "\x00i\x03\x0f\x00\x00\x02\t" + // 0x0069030F: 0x00000209 + "\x00I\x03\x11\x00\x00\x02\n" + // 0x00490311: 0x0000020A + "\x00i\x03\x11\x00\x00\x02\v" + // 0x00690311: 0x0000020B + "\x00O\x03\x0f\x00\x00\x02\f" + // 0x004F030F: 0x0000020C + "\x00o\x03\x0f\x00\x00\x02\r" + // 0x006F030F: 0x0000020D + "\x00O\x03\x11\x00\x00\x02\x0e" + // 0x004F0311: 0x0000020E + "\x00o\x03\x11\x00\x00\x02\x0f" + // 0x006F0311: 0x0000020F + "\x00R\x03\x0f\x00\x00\x02\x10" + // 0x0052030F: 0x00000210 + "\x00r\x03\x0f\x00\x00\x02\x11" + // 0x0072030F: 0x00000211 + "\x00R\x03\x11\x00\x00\x02\x12" + // 0x00520311: 0x00000212 + "\x00r\x03\x11\x00\x00\x02\x13" + // 0x00720311: 0x00000213 + "\x00U\x03\x0f\x00\x00\x02\x14" + // 0x0055030F: 0x00000214 + "\x00u\x03\x0f\x00\x00\x02\x15" + // 0x0075030F: 0x00000215 + "\x00U\x03\x11\x00\x00\x02\x16" + // 0x00550311: 0x00000216 + "\x00u\x03\x11\x00\x00\x02\x17" + // 0x00750311: 0x00000217 + "\x00S\x03&\x00\x00\x02\x18" + // 0x00530326: 0x00000218 + "\x00s\x03&\x00\x00\x02\x19" + // 0x00730326: 0x00000219 + "\x00T\x03&\x00\x00\x02\x1a" + // 0x00540326: 0x0000021A + "\x00t\x03&\x00\x00\x02\x1b" + // 0x00740326: 0x0000021B + "\x00H\x03\f\x00\x00\x02\x1e" + // 0x0048030C: 0x0000021E + "\x00h\x03\f\x00\x00\x02\x1f" + // 0x0068030C: 0x0000021F + "\x00A\x03\a\x00\x00\x02&" + // 0x00410307: 0x00000226 + "\x00a\x03\a\x00\x00\x02'" + // 0x00610307: 0x00000227 + "\x00E\x03'\x00\x00\x02(" + // 0x00450327: 0x00000228 + "\x00e\x03'\x00\x00\x02)" + // 0x00650327: 0x00000229 + "\x00\xd6\x03\x04\x00\x00\x02*" + // 0x00D60304: 0x0000022A + "\x00\xf6\x03\x04\x00\x00\x02+" + // 0x00F60304: 0x0000022B + "\x00\xd5\x03\x04\x00\x00\x02," + // 0x00D50304: 0x0000022C + "\x00\xf5\x03\x04\x00\x00\x02-" + // 0x00F50304: 0x0000022D + "\x00O\x03\a\x00\x00\x02." + // 0x004F0307: 0x0000022E + "\x00o\x03\a\x00\x00\x02/" + // 0x006F0307: 0x0000022F + "\x02.\x03\x04\x00\x00\x020" + // 0x022E0304: 0x00000230 + "\x02/\x03\x04\x00\x00\x021" + // 0x022F0304: 0x00000231 + "\x00Y\x03\x04\x00\x00\x022" + // 0x00590304: 0x00000232 + "\x00y\x03\x04\x00\x00\x023" + // 0x00790304: 0x00000233 + "\x00\xa8\x03\x01\x00\x00\x03\x85" + // 0x00A80301: 0x00000385 + "\x03\x91\x03\x01\x00\x00\x03\x86" + // 0x03910301: 0x00000386 + "\x03\x95\x03\x01\x00\x00\x03\x88" + // 0x03950301: 0x00000388 + "\x03\x97\x03\x01\x00\x00\x03\x89" + // 0x03970301: 0x00000389 + "\x03\x99\x03\x01\x00\x00\x03\x8a" + // 0x03990301: 0x0000038A + "\x03\x9f\x03\x01\x00\x00\x03\x8c" + // 0x039F0301: 0x0000038C + "\x03\xa5\x03\x01\x00\x00\x03\x8e" + // 0x03A50301: 0x0000038E + "\x03\xa9\x03\x01\x00\x00\x03\x8f" + // 0x03A90301: 0x0000038F + "\x03\xca\x03\x01\x00\x00\x03\x90" + // 0x03CA0301: 0x00000390 + "\x03\x99\x03\b\x00\x00\x03\xaa" + // 0x03990308: 0x000003AA + "\x03\xa5\x03\b\x00\x00\x03\xab" + // 0x03A50308: 0x000003AB + "\x03\xb1\x03\x01\x00\x00\x03\xac" + // 0x03B10301: 0x000003AC + "\x03\xb5\x03\x01\x00\x00\x03\xad" + // 0x03B50301: 0x000003AD + "\x03\xb7\x03\x01\x00\x00\x03\xae" + // 0x03B70301: 0x000003AE + "\x03\xb9\x03\x01\x00\x00\x03\xaf" + // 0x03B90301: 0x000003AF + "\x03\xcb\x03\x01\x00\x00\x03\xb0" + // 0x03CB0301: 0x000003B0 + "\x03\xb9\x03\b\x00\x00\x03\xca" + // 0x03B90308: 0x000003CA + "\x03\xc5\x03\b\x00\x00\x03\xcb" + // 0x03C50308: 0x000003CB + "\x03\xbf\x03\x01\x00\x00\x03\xcc" + // 0x03BF0301: 0x000003CC + "\x03\xc5\x03\x01\x00\x00\x03\xcd" + // 0x03C50301: 0x000003CD + "\x03\xc9\x03\x01\x00\x00\x03\xce" + // 0x03C90301: 0x000003CE + "\x03\xd2\x03\x01\x00\x00\x03\xd3" + // 0x03D20301: 0x000003D3 + "\x03\xd2\x03\b\x00\x00\x03\xd4" + // 0x03D20308: 0x000003D4 + "\x04\x15\x03\x00\x00\x00\x04\x00" + // 0x04150300: 0x00000400 + "\x04\x15\x03\b\x00\x00\x04\x01" + // 0x04150308: 0x00000401 + "\x04\x13\x03\x01\x00\x00\x04\x03" + // 0x04130301: 0x00000403 + "\x04\x06\x03\b\x00\x00\x04\a" + // 0x04060308: 0x00000407 + "\x04\x1a\x03\x01\x00\x00\x04\f" + // 0x041A0301: 0x0000040C + "\x04\x18\x03\x00\x00\x00\x04\r" + // 0x04180300: 0x0000040D + "\x04#\x03\x06\x00\x00\x04\x0e" + // 0x04230306: 0x0000040E + "\x04\x18\x03\x06\x00\x00\x04\x19" + // 0x04180306: 0x00000419 + "\x048\x03\x06\x00\x00\x049" + // 0x04380306: 0x00000439 + "\x045\x03\x00\x00\x00\x04P" + // 0x04350300: 0x00000450 + "\x045\x03\b\x00\x00\x04Q" + // 0x04350308: 0x00000451 + "\x043\x03\x01\x00\x00\x04S" + // 0x04330301: 0x00000453 + "\x04V\x03\b\x00\x00\x04W" + // 0x04560308: 0x00000457 + "\x04:\x03\x01\x00\x00\x04\\" + // 0x043A0301: 0x0000045C + "\x048\x03\x00\x00\x00\x04]" + // 0x04380300: 0x0000045D + "\x04C\x03\x06\x00\x00\x04^" + // 0x04430306: 0x0000045E + "\x04t\x03\x0f\x00\x00\x04v" + // 0x0474030F: 0x00000476 + "\x04u\x03\x0f\x00\x00\x04w" + // 0x0475030F: 0x00000477 + "\x04\x16\x03\x06\x00\x00\x04\xc1" + // 0x04160306: 0x000004C1 + "\x046\x03\x06\x00\x00\x04\xc2" + // 0x04360306: 0x000004C2 + "\x04\x10\x03\x06\x00\x00\x04\xd0" + // 0x04100306: 0x000004D0 + "\x040\x03\x06\x00\x00\x04\xd1" + // 0x04300306: 0x000004D1 + "\x04\x10\x03\b\x00\x00\x04\xd2" + // 0x04100308: 0x000004D2 + "\x040\x03\b\x00\x00\x04\xd3" + // 0x04300308: 0x000004D3 + "\x04\x15\x03\x06\x00\x00\x04\xd6" + // 0x04150306: 0x000004D6 + "\x045\x03\x06\x00\x00\x04\xd7" + // 0x04350306: 0x000004D7 + "\x04\xd8\x03\b\x00\x00\x04\xda" + // 0x04D80308: 0x000004DA + "\x04\xd9\x03\b\x00\x00\x04\xdb" + // 0x04D90308: 0x000004DB + "\x04\x16\x03\b\x00\x00\x04\xdc" + // 0x04160308: 0x000004DC + "\x046\x03\b\x00\x00\x04\xdd" + // 0x04360308: 0x000004DD + "\x04\x17\x03\b\x00\x00\x04\xde" + // 0x04170308: 0x000004DE + "\x047\x03\b\x00\x00\x04\xdf" + // 0x04370308: 0x000004DF + "\x04\x18\x03\x04\x00\x00\x04\xe2" + // 0x04180304: 0x000004E2 + "\x048\x03\x04\x00\x00\x04\xe3" + // 0x04380304: 0x000004E3 + "\x04\x18\x03\b\x00\x00\x04\xe4" + // 0x04180308: 0x000004E4 + "\x048\x03\b\x00\x00\x04\xe5" + // 0x04380308: 0x000004E5 + "\x04\x1e\x03\b\x00\x00\x04\xe6" + // 0x041E0308: 0x000004E6 + "\x04>\x03\b\x00\x00\x04\xe7" + // 0x043E0308: 0x000004E7 + "\x04\xe8\x03\b\x00\x00\x04\xea" + // 0x04E80308: 0x000004EA + "\x04\xe9\x03\b\x00\x00\x04\xeb" + // 0x04E90308: 0x000004EB + "\x04-\x03\b\x00\x00\x04\xec" + // 0x042D0308: 0x000004EC + "\x04M\x03\b\x00\x00\x04\xed" + // 0x044D0308: 0x000004ED + "\x04#\x03\x04\x00\x00\x04\xee" + // 0x04230304: 0x000004EE + "\x04C\x03\x04\x00\x00\x04\xef" + // 0x04430304: 0x000004EF + "\x04#\x03\b\x00\x00\x04\xf0" + // 0x04230308: 0x000004F0 + "\x04C\x03\b\x00\x00\x04\xf1" + // 0x04430308: 0x000004F1 + "\x04#\x03\v\x00\x00\x04\xf2" + // 0x0423030B: 0x000004F2 + "\x04C\x03\v\x00\x00\x04\xf3" + // 0x0443030B: 0x000004F3 + "\x04'\x03\b\x00\x00\x04\xf4" + // 0x04270308: 0x000004F4 + "\x04G\x03\b\x00\x00\x04\xf5" + // 0x04470308: 0x000004F5 + "\x04+\x03\b\x00\x00\x04\xf8" + // 0x042B0308: 0x000004F8 + "\x04K\x03\b\x00\x00\x04\xf9" + // 0x044B0308: 0x000004F9 + "\x06'\x06S\x00\x00\x06\"" + // 0x06270653: 0x00000622 + "\x06'\x06T\x00\x00\x06#" + // 0x06270654: 0x00000623 + "\x06H\x06T\x00\x00\x06$" + // 0x06480654: 0x00000624 + "\x06'\x06U\x00\x00\x06%" + // 0x06270655: 0x00000625 + "\x06J\x06T\x00\x00\x06&" + // 0x064A0654: 0x00000626 + "\x06\xd5\x06T\x00\x00\x06\xc0" + // 0x06D50654: 0x000006C0 + "\x06\xc1\x06T\x00\x00\x06\xc2" + // 0x06C10654: 0x000006C2 + "\x06\xd2\x06T\x00\x00\x06\xd3" + // 0x06D20654: 0x000006D3 + "\t(\t<\x00\x00\t)" + // 0x0928093C: 0x00000929 + "\t0\t<\x00\x00\t1" + // 0x0930093C: 0x00000931 + "\t3\t<\x00\x00\t4" + // 0x0933093C: 0x00000934 + "\t\xc7\t\xbe\x00\x00\t\xcb" + // 0x09C709BE: 0x000009CB + "\t\xc7\t\xd7\x00\x00\t\xcc" + // 0x09C709D7: 0x000009CC + "\vG\vV\x00\x00\vH" + // 0x0B470B56: 0x00000B48 + "\vG\v>\x00\x00\vK" + // 0x0B470B3E: 0x00000B4B + "\vG\vW\x00\x00\vL" + // 0x0B470B57: 0x00000B4C + "\v\x92\v\xd7\x00\x00\v\x94" + // 0x0B920BD7: 0x00000B94 + "\v\xc6\v\xbe\x00\x00\v\xca" + // 0x0BC60BBE: 0x00000BCA + "\v\xc7\v\xbe\x00\x00\v\xcb" + // 0x0BC70BBE: 0x00000BCB + "\v\xc6\v\xd7\x00\x00\v\xcc" + // 0x0BC60BD7: 0x00000BCC + "\fF\fV\x00\x00\fH" + // 0x0C460C56: 0x00000C48 + "\f\xbf\f\xd5\x00\x00\f\xc0" + // 0x0CBF0CD5: 0x00000CC0 + "\f\xc6\f\xd5\x00\x00\f\xc7" + // 0x0CC60CD5: 0x00000CC7 + "\f\xc6\f\xd6\x00\x00\f\xc8" + // 0x0CC60CD6: 0x00000CC8 + "\f\xc6\f\xc2\x00\x00\f\xca" + // 0x0CC60CC2: 0x00000CCA + "\f\xca\f\xd5\x00\x00\f\xcb" + // 0x0CCA0CD5: 0x00000CCB + "\rF\r>\x00\x00\rJ" + // 0x0D460D3E: 0x00000D4A + "\rG\r>\x00\x00\rK" + // 0x0D470D3E: 0x00000D4B + "\rF\rW\x00\x00\rL" + // 0x0D460D57: 0x00000D4C + "\r\xd9\r\xca\x00\x00\r\xda" + // 0x0DD90DCA: 0x00000DDA + "\r\xd9\r\xcf\x00\x00\r\xdc" + // 0x0DD90DCF: 0x00000DDC + "\r\xdc\r\xca\x00\x00\r\xdd" + // 0x0DDC0DCA: 0x00000DDD + "\r\xd9\r\xdf\x00\x00\r\xde" + // 0x0DD90DDF: 0x00000DDE + "\x10%\x10.\x00\x00\x10&" + // 0x1025102E: 0x00001026 + "\x1b\x05\x1b5\x00\x00\x1b\x06" + // 0x1B051B35: 0x00001B06 + "\x1b\a\x1b5\x00\x00\x1b\b" + // 0x1B071B35: 0x00001B08 + "\x1b\t\x1b5\x00\x00\x1b\n" + // 0x1B091B35: 0x00001B0A + "\x1b\v\x1b5\x00\x00\x1b\f" + // 0x1B0B1B35: 0x00001B0C + "\x1b\r\x1b5\x00\x00\x1b\x0e" + // 0x1B0D1B35: 0x00001B0E + "\x1b\x11\x1b5\x00\x00\x1b\x12" + // 0x1B111B35: 0x00001B12 + "\x1b:\x1b5\x00\x00\x1b;" + // 0x1B3A1B35: 0x00001B3B + "\x1b<\x1b5\x00\x00\x1b=" + // 0x1B3C1B35: 0x00001B3D + "\x1b>\x1b5\x00\x00\x1b@" + // 0x1B3E1B35: 0x00001B40 + "\x1b?\x1b5\x00\x00\x1bA" + // 0x1B3F1B35: 0x00001B41 + "\x1bB\x1b5\x00\x00\x1bC" + // 0x1B421B35: 0x00001B43 + "\x00A\x03%\x00\x00\x1e\x00" + // 0x00410325: 0x00001E00 + "\x00a\x03%\x00\x00\x1e\x01" + // 0x00610325: 0x00001E01 + "\x00B\x03\a\x00\x00\x1e\x02" + // 0x00420307: 0x00001E02 + "\x00b\x03\a\x00\x00\x1e\x03" + // 0x00620307: 0x00001E03 + "\x00B\x03#\x00\x00\x1e\x04" + // 0x00420323: 0x00001E04 + "\x00b\x03#\x00\x00\x1e\x05" + // 0x00620323: 0x00001E05 + "\x00B\x031\x00\x00\x1e\x06" + // 0x00420331: 0x00001E06 + "\x00b\x031\x00\x00\x1e\a" + // 0x00620331: 0x00001E07 + "\x00\xc7\x03\x01\x00\x00\x1e\b" + // 0x00C70301: 0x00001E08 + "\x00\xe7\x03\x01\x00\x00\x1e\t" + // 0x00E70301: 0x00001E09 + "\x00D\x03\a\x00\x00\x1e\n" + // 0x00440307: 0x00001E0A + "\x00d\x03\a\x00\x00\x1e\v" + // 0x00640307: 0x00001E0B + "\x00D\x03#\x00\x00\x1e\f" + // 0x00440323: 0x00001E0C + "\x00d\x03#\x00\x00\x1e\r" + // 0x00640323: 0x00001E0D + "\x00D\x031\x00\x00\x1e\x0e" + // 0x00440331: 0x00001E0E + "\x00d\x031\x00\x00\x1e\x0f" + // 0x00640331: 0x00001E0F + "\x00D\x03'\x00\x00\x1e\x10" + // 0x00440327: 0x00001E10 + "\x00d\x03'\x00\x00\x1e\x11" + // 0x00640327: 0x00001E11 + "\x00D\x03-\x00\x00\x1e\x12" + // 0x0044032D: 0x00001E12 + "\x00d\x03-\x00\x00\x1e\x13" + // 0x0064032D: 0x00001E13 + "\x01\x12\x03\x00\x00\x00\x1e\x14" + // 0x01120300: 0x00001E14 + "\x01\x13\x03\x00\x00\x00\x1e\x15" + // 0x01130300: 0x00001E15 + "\x01\x12\x03\x01\x00\x00\x1e\x16" + // 0x01120301: 0x00001E16 + "\x01\x13\x03\x01\x00\x00\x1e\x17" + // 0x01130301: 0x00001E17 + "\x00E\x03-\x00\x00\x1e\x18" + // 0x0045032D: 0x00001E18 + "\x00e\x03-\x00\x00\x1e\x19" + // 0x0065032D: 0x00001E19 + "\x00E\x030\x00\x00\x1e\x1a" + // 0x00450330: 0x00001E1A + "\x00e\x030\x00\x00\x1e\x1b" + // 0x00650330: 0x00001E1B + "\x02(\x03\x06\x00\x00\x1e\x1c" + // 0x02280306: 0x00001E1C + "\x02)\x03\x06\x00\x00\x1e\x1d" + // 0x02290306: 0x00001E1D + "\x00F\x03\a\x00\x00\x1e\x1e" + // 0x00460307: 0x00001E1E + "\x00f\x03\a\x00\x00\x1e\x1f" + // 0x00660307: 0x00001E1F + "\x00G\x03\x04\x00\x00\x1e " + // 0x00470304: 0x00001E20 + "\x00g\x03\x04\x00\x00\x1e!" + // 0x00670304: 0x00001E21 + "\x00H\x03\a\x00\x00\x1e\"" + // 0x00480307: 0x00001E22 + "\x00h\x03\a\x00\x00\x1e#" + // 0x00680307: 0x00001E23 + "\x00H\x03#\x00\x00\x1e$" + // 0x00480323: 0x00001E24 + "\x00h\x03#\x00\x00\x1e%" + // 0x00680323: 0x00001E25 + "\x00H\x03\b\x00\x00\x1e&" + // 0x00480308: 0x00001E26 + "\x00h\x03\b\x00\x00\x1e'" + // 0x00680308: 0x00001E27 + "\x00H\x03'\x00\x00\x1e(" + // 0x00480327: 0x00001E28 + "\x00h\x03'\x00\x00\x1e)" + // 0x00680327: 0x00001E29 + "\x00H\x03.\x00\x00\x1e*" + // 0x0048032E: 0x00001E2A + "\x00h\x03.\x00\x00\x1e+" + // 0x0068032E: 0x00001E2B + "\x00I\x030\x00\x00\x1e," + // 0x00490330: 0x00001E2C + "\x00i\x030\x00\x00\x1e-" + // 0x00690330: 0x00001E2D + "\x00\xcf\x03\x01\x00\x00\x1e." + // 0x00CF0301: 0x00001E2E + "\x00\xef\x03\x01\x00\x00\x1e/" + // 0x00EF0301: 0x00001E2F + "\x00K\x03\x01\x00\x00\x1e0" + // 0x004B0301: 0x00001E30 + "\x00k\x03\x01\x00\x00\x1e1" + // 0x006B0301: 0x00001E31 + "\x00K\x03#\x00\x00\x1e2" + // 0x004B0323: 0x00001E32 + "\x00k\x03#\x00\x00\x1e3" + // 0x006B0323: 0x00001E33 + "\x00K\x031\x00\x00\x1e4" + // 0x004B0331: 0x00001E34 + "\x00k\x031\x00\x00\x1e5" + // 0x006B0331: 0x00001E35 + "\x00L\x03#\x00\x00\x1e6" + // 0x004C0323: 0x00001E36 + "\x00l\x03#\x00\x00\x1e7" + // 0x006C0323: 0x00001E37 + "\x1e6\x03\x04\x00\x00\x1e8" + // 0x1E360304: 0x00001E38 + "\x1e7\x03\x04\x00\x00\x1e9" + // 0x1E370304: 0x00001E39 + "\x00L\x031\x00\x00\x1e:" + // 0x004C0331: 0x00001E3A + "\x00l\x031\x00\x00\x1e;" + // 0x006C0331: 0x00001E3B + "\x00L\x03-\x00\x00\x1e<" + // 0x004C032D: 0x00001E3C + "\x00l\x03-\x00\x00\x1e=" + // 0x006C032D: 0x00001E3D + "\x00M\x03\x01\x00\x00\x1e>" + // 0x004D0301: 0x00001E3E + "\x00m\x03\x01\x00\x00\x1e?" + // 0x006D0301: 0x00001E3F + "\x00M\x03\a\x00\x00\x1e@" + // 0x004D0307: 0x00001E40 + "\x00m\x03\a\x00\x00\x1eA" + // 0x006D0307: 0x00001E41 + "\x00M\x03#\x00\x00\x1eB" + // 0x004D0323: 0x00001E42 + "\x00m\x03#\x00\x00\x1eC" + // 0x006D0323: 0x00001E43 + "\x00N\x03\a\x00\x00\x1eD" + // 0x004E0307: 0x00001E44 + "\x00n\x03\a\x00\x00\x1eE" + // 0x006E0307: 0x00001E45 + "\x00N\x03#\x00\x00\x1eF" + // 0x004E0323: 0x00001E46 + "\x00n\x03#\x00\x00\x1eG" + // 0x006E0323: 0x00001E47 + "\x00N\x031\x00\x00\x1eH" + // 0x004E0331: 0x00001E48 + "\x00n\x031\x00\x00\x1eI" + // 0x006E0331: 0x00001E49 + "\x00N\x03-\x00\x00\x1eJ" + // 0x004E032D: 0x00001E4A + "\x00n\x03-\x00\x00\x1eK" + // 0x006E032D: 0x00001E4B + "\x00\xd5\x03\x01\x00\x00\x1eL" + // 0x00D50301: 0x00001E4C + "\x00\xf5\x03\x01\x00\x00\x1eM" + // 0x00F50301: 0x00001E4D + "\x00\xd5\x03\b\x00\x00\x1eN" + // 0x00D50308: 0x00001E4E + "\x00\xf5\x03\b\x00\x00\x1eO" + // 0x00F50308: 0x00001E4F + "\x01L\x03\x00\x00\x00\x1eP" + // 0x014C0300: 0x00001E50 + "\x01M\x03\x00\x00\x00\x1eQ" + // 0x014D0300: 0x00001E51 + "\x01L\x03\x01\x00\x00\x1eR" + // 0x014C0301: 0x00001E52 + "\x01M\x03\x01\x00\x00\x1eS" + // 0x014D0301: 0x00001E53 + "\x00P\x03\x01\x00\x00\x1eT" + // 0x00500301: 0x00001E54 + "\x00p\x03\x01\x00\x00\x1eU" + // 0x00700301: 0x00001E55 + "\x00P\x03\a\x00\x00\x1eV" + // 0x00500307: 0x00001E56 + "\x00p\x03\a\x00\x00\x1eW" + // 0x00700307: 0x00001E57 + "\x00R\x03\a\x00\x00\x1eX" + // 0x00520307: 0x00001E58 + "\x00r\x03\a\x00\x00\x1eY" + // 0x00720307: 0x00001E59 + "\x00R\x03#\x00\x00\x1eZ" + // 0x00520323: 0x00001E5A + "\x00r\x03#\x00\x00\x1e[" + // 0x00720323: 0x00001E5B + "\x1eZ\x03\x04\x00\x00\x1e\\" + // 0x1E5A0304: 0x00001E5C + "\x1e[\x03\x04\x00\x00\x1e]" + // 0x1E5B0304: 0x00001E5D + "\x00R\x031\x00\x00\x1e^" + // 0x00520331: 0x00001E5E + "\x00r\x031\x00\x00\x1e_" + // 0x00720331: 0x00001E5F + "\x00S\x03\a\x00\x00\x1e`" + // 0x00530307: 0x00001E60 + "\x00s\x03\a\x00\x00\x1ea" + // 0x00730307: 0x00001E61 + "\x00S\x03#\x00\x00\x1eb" + // 0x00530323: 0x00001E62 + "\x00s\x03#\x00\x00\x1ec" + // 0x00730323: 0x00001E63 + "\x01Z\x03\a\x00\x00\x1ed" + // 0x015A0307: 0x00001E64 + "\x01[\x03\a\x00\x00\x1ee" + // 0x015B0307: 0x00001E65 + "\x01`\x03\a\x00\x00\x1ef" + // 0x01600307: 0x00001E66 + "\x01a\x03\a\x00\x00\x1eg" + // 0x01610307: 0x00001E67 + "\x1eb\x03\a\x00\x00\x1eh" + // 0x1E620307: 0x00001E68 + "\x1ec\x03\a\x00\x00\x1ei" + // 0x1E630307: 0x00001E69 + "\x00T\x03\a\x00\x00\x1ej" + // 0x00540307: 0x00001E6A + "\x00t\x03\a\x00\x00\x1ek" + // 0x00740307: 0x00001E6B + "\x00T\x03#\x00\x00\x1el" + // 0x00540323: 0x00001E6C + "\x00t\x03#\x00\x00\x1em" + // 0x00740323: 0x00001E6D + "\x00T\x031\x00\x00\x1en" + // 0x00540331: 0x00001E6E + "\x00t\x031\x00\x00\x1eo" + // 0x00740331: 0x00001E6F + "\x00T\x03-\x00\x00\x1ep" + // 0x0054032D: 0x00001E70 + "\x00t\x03-\x00\x00\x1eq" + // 0x0074032D: 0x00001E71 + "\x00U\x03$\x00\x00\x1er" + // 0x00550324: 0x00001E72 + "\x00u\x03$\x00\x00\x1es" + // 0x00750324: 0x00001E73 + "\x00U\x030\x00\x00\x1et" + // 0x00550330: 0x00001E74 + "\x00u\x030\x00\x00\x1eu" + // 0x00750330: 0x00001E75 + "\x00U\x03-\x00\x00\x1ev" + // 0x0055032D: 0x00001E76 + "\x00u\x03-\x00\x00\x1ew" + // 0x0075032D: 0x00001E77 + "\x01h\x03\x01\x00\x00\x1ex" + // 0x01680301: 0x00001E78 + "\x01i\x03\x01\x00\x00\x1ey" + // 0x01690301: 0x00001E79 + "\x01j\x03\b\x00\x00\x1ez" + // 0x016A0308: 0x00001E7A + "\x01k\x03\b\x00\x00\x1e{" + // 0x016B0308: 0x00001E7B + "\x00V\x03\x03\x00\x00\x1e|" + // 0x00560303: 0x00001E7C + "\x00v\x03\x03\x00\x00\x1e}" + // 0x00760303: 0x00001E7D + "\x00V\x03#\x00\x00\x1e~" + // 0x00560323: 0x00001E7E + "\x00v\x03#\x00\x00\x1e\u007f" + // 0x00760323: 0x00001E7F + "\x00W\x03\x00\x00\x00\x1e\x80" + // 0x00570300: 0x00001E80 + "\x00w\x03\x00\x00\x00\x1e\x81" + // 0x00770300: 0x00001E81 + "\x00W\x03\x01\x00\x00\x1e\x82" + // 0x00570301: 0x00001E82 + "\x00w\x03\x01\x00\x00\x1e\x83" + // 0x00770301: 0x00001E83 + "\x00W\x03\b\x00\x00\x1e\x84" + // 0x00570308: 0x00001E84 + "\x00w\x03\b\x00\x00\x1e\x85" + // 0x00770308: 0x00001E85 + "\x00W\x03\a\x00\x00\x1e\x86" + // 0x00570307: 0x00001E86 + "\x00w\x03\a\x00\x00\x1e\x87" + // 0x00770307: 0x00001E87 + "\x00W\x03#\x00\x00\x1e\x88" + // 0x00570323: 0x00001E88 + "\x00w\x03#\x00\x00\x1e\x89" + // 0x00770323: 0x00001E89 + "\x00X\x03\a\x00\x00\x1e\x8a" + // 0x00580307: 0x00001E8A + "\x00x\x03\a\x00\x00\x1e\x8b" + // 0x00780307: 0x00001E8B + "\x00X\x03\b\x00\x00\x1e\x8c" + // 0x00580308: 0x00001E8C + "\x00x\x03\b\x00\x00\x1e\x8d" + // 0x00780308: 0x00001E8D + "\x00Y\x03\a\x00\x00\x1e\x8e" + // 0x00590307: 0x00001E8E + "\x00y\x03\a\x00\x00\x1e\x8f" + // 0x00790307: 0x00001E8F + "\x00Z\x03\x02\x00\x00\x1e\x90" + // 0x005A0302: 0x00001E90 + "\x00z\x03\x02\x00\x00\x1e\x91" + // 0x007A0302: 0x00001E91 + "\x00Z\x03#\x00\x00\x1e\x92" + // 0x005A0323: 0x00001E92 + "\x00z\x03#\x00\x00\x1e\x93" + // 0x007A0323: 0x00001E93 + "\x00Z\x031\x00\x00\x1e\x94" + // 0x005A0331: 0x00001E94 + "\x00z\x031\x00\x00\x1e\x95" + // 0x007A0331: 0x00001E95 + "\x00h\x031\x00\x00\x1e\x96" + // 0x00680331: 0x00001E96 + "\x00t\x03\b\x00\x00\x1e\x97" + // 0x00740308: 0x00001E97 + "\x00w\x03\n\x00\x00\x1e\x98" + // 0x0077030A: 0x00001E98 + "\x00y\x03\n\x00\x00\x1e\x99" + // 0x0079030A: 0x00001E99 + "\x01\u007f\x03\a\x00\x00\x1e\x9b" + // 0x017F0307: 0x00001E9B + "\x00A\x03#\x00\x00\x1e\xa0" + // 0x00410323: 0x00001EA0 + "\x00a\x03#\x00\x00\x1e\xa1" + // 0x00610323: 0x00001EA1 + "\x00A\x03\t\x00\x00\x1e\xa2" + // 0x00410309: 0x00001EA2 + "\x00a\x03\t\x00\x00\x1e\xa3" + // 0x00610309: 0x00001EA3 + "\x00\xc2\x03\x01\x00\x00\x1e\xa4" + // 0x00C20301: 0x00001EA4 + "\x00\xe2\x03\x01\x00\x00\x1e\xa5" + // 0x00E20301: 0x00001EA5 + "\x00\xc2\x03\x00\x00\x00\x1e\xa6" + // 0x00C20300: 0x00001EA6 + "\x00\xe2\x03\x00\x00\x00\x1e\xa7" + // 0x00E20300: 0x00001EA7 + "\x00\xc2\x03\t\x00\x00\x1e\xa8" + // 0x00C20309: 0x00001EA8 + "\x00\xe2\x03\t\x00\x00\x1e\xa9" + // 0x00E20309: 0x00001EA9 + "\x00\xc2\x03\x03\x00\x00\x1e\xaa" + // 0x00C20303: 0x00001EAA + "\x00\xe2\x03\x03\x00\x00\x1e\xab" + // 0x00E20303: 0x00001EAB + "\x1e\xa0\x03\x02\x00\x00\x1e\xac" + // 0x1EA00302: 0x00001EAC + "\x1e\xa1\x03\x02\x00\x00\x1e\xad" + // 0x1EA10302: 0x00001EAD + "\x01\x02\x03\x01\x00\x00\x1e\xae" + // 0x01020301: 0x00001EAE + "\x01\x03\x03\x01\x00\x00\x1e\xaf" + // 0x01030301: 0x00001EAF + "\x01\x02\x03\x00\x00\x00\x1e\xb0" + // 0x01020300: 0x00001EB0 + "\x01\x03\x03\x00\x00\x00\x1e\xb1" + // 0x01030300: 0x00001EB1 + "\x01\x02\x03\t\x00\x00\x1e\xb2" + // 0x01020309: 0x00001EB2 + "\x01\x03\x03\t\x00\x00\x1e\xb3" + // 0x01030309: 0x00001EB3 + "\x01\x02\x03\x03\x00\x00\x1e\xb4" + // 0x01020303: 0x00001EB4 + "\x01\x03\x03\x03\x00\x00\x1e\xb5" + // 0x01030303: 0x00001EB5 + "\x1e\xa0\x03\x06\x00\x00\x1e\xb6" + // 0x1EA00306: 0x00001EB6 + "\x1e\xa1\x03\x06\x00\x00\x1e\xb7" + // 0x1EA10306: 0x00001EB7 + "\x00E\x03#\x00\x00\x1e\xb8" + // 0x00450323: 0x00001EB8 + "\x00e\x03#\x00\x00\x1e\xb9" + // 0x00650323: 0x00001EB9 + "\x00E\x03\t\x00\x00\x1e\xba" + // 0x00450309: 0x00001EBA + "\x00e\x03\t\x00\x00\x1e\xbb" + // 0x00650309: 0x00001EBB + "\x00E\x03\x03\x00\x00\x1e\xbc" + // 0x00450303: 0x00001EBC + "\x00e\x03\x03\x00\x00\x1e\xbd" + // 0x00650303: 0x00001EBD + "\x00\xca\x03\x01\x00\x00\x1e\xbe" + // 0x00CA0301: 0x00001EBE + "\x00\xea\x03\x01\x00\x00\x1e\xbf" + // 0x00EA0301: 0x00001EBF + "\x00\xca\x03\x00\x00\x00\x1e\xc0" + // 0x00CA0300: 0x00001EC0 + "\x00\xea\x03\x00\x00\x00\x1e\xc1" + // 0x00EA0300: 0x00001EC1 + "\x00\xca\x03\t\x00\x00\x1e\xc2" + // 0x00CA0309: 0x00001EC2 + "\x00\xea\x03\t\x00\x00\x1e\xc3" + // 0x00EA0309: 0x00001EC3 + "\x00\xca\x03\x03\x00\x00\x1e\xc4" + // 0x00CA0303: 0x00001EC4 + "\x00\xea\x03\x03\x00\x00\x1e\xc5" + // 0x00EA0303: 0x00001EC5 + "\x1e\xb8\x03\x02\x00\x00\x1e\xc6" + // 0x1EB80302: 0x00001EC6 + "\x1e\xb9\x03\x02\x00\x00\x1e\xc7" + // 0x1EB90302: 0x00001EC7 + "\x00I\x03\t\x00\x00\x1e\xc8" + // 0x00490309: 0x00001EC8 + "\x00i\x03\t\x00\x00\x1e\xc9" + // 0x00690309: 0x00001EC9 + "\x00I\x03#\x00\x00\x1e\xca" + // 0x00490323: 0x00001ECA + "\x00i\x03#\x00\x00\x1e\xcb" + // 0x00690323: 0x00001ECB + "\x00O\x03#\x00\x00\x1e\xcc" + // 0x004F0323: 0x00001ECC + "\x00o\x03#\x00\x00\x1e\xcd" + // 0x006F0323: 0x00001ECD + "\x00O\x03\t\x00\x00\x1e\xce" + // 0x004F0309: 0x00001ECE + "\x00o\x03\t\x00\x00\x1e\xcf" + // 0x006F0309: 0x00001ECF + "\x00\xd4\x03\x01\x00\x00\x1e\xd0" + // 0x00D40301: 0x00001ED0 + "\x00\xf4\x03\x01\x00\x00\x1e\xd1" + // 0x00F40301: 0x00001ED1 + "\x00\xd4\x03\x00\x00\x00\x1e\xd2" + // 0x00D40300: 0x00001ED2 + "\x00\xf4\x03\x00\x00\x00\x1e\xd3" + // 0x00F40300: 0x00001ED3 + "\x00\xd4\x03\t\x00\x00\x1e\xd4" + // 0x00D40309: 0x00001ED4 + "\x00\xf4\x03\t\x00\x00\x1e\xd5" + // 0x00F40309: 0x00001ED5 + "\x00\xd4\x03\x03\x00\x00\x1e\xd6" + // 0x00D40303: 0x00001ED6 + "\x00\xf4\x03\x03\x00\x00\x1e\xd7" + // 0x00F40303: 0x00001ED7 + "\x1e\xcc\x03\x02\x00\x00\x1e\xd8" + // 0x1ECC0302: 0x00001ED8 + "\x1e\xcd\x03\x02\x00\x00\x1e\xd9" + // 0x1ECD0302: 0x00001ED9 + "\x01\xa0\x03\x01\x00\x00\x1e\xda" + // 0x01A00301: 0x00001EDA + "\x01\xa1\x03\x01\x00\x00\x1e\xdb" + // 0x01A10301: 0x00001EDB + "\x01\xa0\x03\x00\x00\x00\x1e\xdc" + // 0x01A00300: 0x00001EDC + "\x01\xa1\x03\x00\x00\x00\x1e\xdd" + // 0x01A10300: 0x00001EDD + "\x01\xa0\x03\t\x00\x00\x1e\xde" + // 0x01A00309: 0x00001EDE + "\x01\xa1\x03\t\x00\x00\x1e\xdf" + // 0x01A10309: 0x00001EDF + "\x01\xa0\x03\x03\x00\x00\x1e\xe0" + // 0x01A00303: 0x00001EE0 + "\x01\xa1\x03\x03\x00\x00\x1e\xe1" + // 0x01A10303: 0x00001EE1 + "\x01\xa0\x03#\x00\x00\x1e\xe2" + // 0x01A00323: 0x00001EE2 + "\x01\xa1\x03#\x00\x00\x1e\xe3" + // 0x01A10323: 0x00001EE3 + "\x00U\x03#\x00\x00\x1e\xe4" + // 0x00550323: 0x00001EE4 + "\x00u\x03#\x00\x00\x1e\xe5" + // 0x00750323: 0x00001EE5 + "\x00U\x03\t\x00\x00\x1e\xe6" + // 0x00550309: 0x00001EE6 + "\x00u\x03\t\x00\x00\x1e\xe7" + // 0x00750309: 0x00001EE7 + "\x01\xaf\x03\x01\x00\x00\x1e\xe8" + // 0x01AF0301: 0x00001EE8 + "\x01\xb0\x03\x01\x00\x00\x1e\xe9" + // 0x01B00301: 0x00001EE9 + "\x01\xaf\x03\x00\x00\x00\x1e\xea" + // 0x01AF0300: 0x00001EEA + "\x01\xb0\x03\x00\x00\x00\x1e\xeb" + // 0x01B00300: 0x00001EEB + "\x01\xaf\x03\t\x00\x00\x1e\xec" + // 0x01AF0309: 0x00001EEC + "\x01\xb0\x03\t\x00\x00\x1e\xed" + // 0x01B00309: 0x00001EED + "\x01\xaf\x03\x03\x00\x00\x1e\xee" + // 0x01AF0303: 0x00001EEE + "\x01\xb0\x03\x03\x00\x00\x1e\xef" + // 0x01B00303: 0x00001EEF + "\x01\xaf\x03#\x00\x00\x1e\xf0" + // 0x01AF0323: 0x00001EF0 + "\x01\xb0\x03#\x00\x00\x1e\xf1" + // 0x01B00323: 0x00001EF1 + "\x00Y\x03\x00\x00\x00\x1e\xf2" + // 0x00590300: 0x00001EF2 + "\x00y\x03\x00\x00\x00\x1e\xf3" + // 0x00790300: 0x00001EF3 + "\x00Y\x03#\x00\x00\x1e\xf4" + // 0x00590323: 0x00001EF4 + "\x00y\x03#\x00\x00\x1e\xf5" + // 0x00790323: 0x00001EF5 + "\x00Y\x03\t\x00\x00\x1e\xf6" + // 0x00590309: 0x00001EF6 + "\x00y\x03\t\x00\x00\x1e\xf7" + // 0x00790309: 0x00001EF7 + "\x00Y\x03\x03\x00\x00\x1e\xf8" + // 0x00590303: 0x00001EF8 + "\x00y\x03\x03\x00\x00\x1e\xf9" + // 0x00790303: 0x00001EF9 + "\x03\xb1\x03\x13\x00\x00\x1f\x00" + // 0x03B10313: 0x00001F00 + "\x03\xb1\x03\x14\x00\x00\x1f\x01" + // 0x03B10314: 0x00001F01 + "\x1f\x00\x03\x00\x00\x00\x1f\x02" + // 0x1F000300: 0x00001F02 + "\x1f\x01\x03\x00\x00\x00\x1f\x03" + // 0x1F010300: 0x00001F03 + "\x1f\x00\x03\x01\x00\x00\x1f\x04" + // 0x1F000301: 0x00001F04 + "\x1f\x01\x03\x01\x00\x00\x1f\x05" + // 0x1F010301: 0x00001F05 + "\x1f\x00\x03B\x00\x00\x1f\x06" + // 0x1F000342: 0x00001F06 + "\x1f\x01\x03B\x00\x00\x1f\a" + // 0x1F010342: 0x00001F07 + "\x03\x91\x03\x13\x00\x00\x1f\b" + // 0x03910313: 0x00001F08 + "\x03\x91\x03\x14\x00\x00\x1f\t" + // 0x03910314: 0x00001F09 + "\x1f\b\x03\x00\x00\x00\x1f\n" + // 0x1F080300: 0x00001F0A + "\x1f\t\x03\x00\x00\x00\x1f\v" + // 0x1F090300: 0x00001F0B + "\x1f\b\x03\x01\x00\x00\x1f\f" + // 0x1F080301: 0x00001F0C + "\x1f\t\x03\x01\x00\x00\x1f\r" + // 0x1F090301: 0x00001F0D + "\x1f\b\x03B\x00\x00\x1f\x0e" + // 0x1F080342: 0x00001F0E + "\x1f\t\x03B\x00\x00\x1f\x0f" + // 0x1F090342: 0x00001F0F + "\x03\xb5\x03\x13\x00\x00\x1f\x10" + // 0x03B50313: 0x00001F10 + "\x03\xb5\x03\x14\x00\x00\x1f\x11" + // 0x03B50314: 0x00001F11 + "\x1f\x10\x03\x00\x00\x00\x1f\x12" + // 0x1F100300: 0x00001F12 + "\x1f\x11\x03\x00\x00\x00\x1f\x13" + // 0x1F110300: 0x00001F13 + "\x1f\x10\x03\x01\x00\x00\x1f\x14" + // 0x1F100301: 0x00001F14 + "\x1f\x11\x03\x01\x00\x00\x1f\x15" + // 0x1F110301: 0x00001F15 + "\x03\x95\x03\x13\x00\x00\x1f\x18" + // 0x03950313: 0x00001F18 + "\x03\x95\x03\x14\x00\x00\x1f\x19" + // 0x03950314: 0x00001F19 + "\x1f\x18\x03\x00\x00\x00\x1f\x1a" + // 0x1F180300: 0x00001F1A + "\x1f\x19\x03\x00\x00\x00\x1f\x1b" + // 0x1F190300: 0x00001F1B + "\x1f\x18\x03\x01\x00\x00\x1f\x1c" + // 0x1F180301: 0x00001F1C + "\x1f\x19\x03\x01\x00\x00\x1f\x1d" + // 0x1F190301: 0x00001F1D + "\x03\xb7\x03\x13\x00\x00\x1f " + // 0x03B70313: 0x00001F20 + "\x03\xb7\x03\x14\x00\x00\x1f!" + // 0x03B70314: 0x00001F21 + "\x1f \x03\x00\x00\x00\x1f\"" + // 0x1F200300: 0x00001F22 + "\x1f!\x03\x00\x00\x00\x1f#" + // 0x1F210300: 0x00001F23 + "\x1f \x03\x01\x00\x00\x1f$" + // 0x1F200301: 0x00001F24 + "\x1f!\x03\x01\x00\x00\x1f%" + // 0x1F210301: 0x00001F25 + "\x1f \x03B\x00\x00\x1f&" + // 0x1F200342: 0x00001F26 + "\x1f!\x03B\x00\x00\x1f'" + // 0x1F210342: 0x00001F27 + "\x03\x97\x03\x13\x00\x00\x1f(" + // 0x03970313: 0x00001F28 + "\x03\x97\x03\x14\x00\x00\x1f)" + // 0x03970314: 0x00001F29 + "\x1f(\x03\x00\x00\x00\x1f*" + // 0x1F280300: 0x00001F2A + "\x1f)\x03\x00\x00\x00\x1f+" + // 0x1F290300: 0x00001F2B + "\x1f(\x03\x01\x00\x00\x1f," + // 0x1F280301: 0x00001F2C + "\x1f)\x03\x01\x00\x00\x1f-" + // 0x1F290301: 0x00001F2D + "\x1f(\x03B\x00\x00\x1f." + // 0x1F280342: 0x00001F2E + "\x1f)\x03B\x00\x00\x1f/" + // 0x1F290342: 0x00001F2F + "\x03\xb9\x03\x13\x00\x00\x1f0" + // 0x03B90313: 0x00001F30 + "\x03\xb9\x03\x14\x00\x00\x1f1" + // 0x03B90314: 0x00001F31 + "\x1f0\x03\x00\x00\x00\x1f2" + // 0x1F300300: 0x00001F32 + "\x1f1\x03\x00\x00\x00\x1f3" + // 0x1F310300: 0x00001F33 + "\x1f0\x03\x01\x00\x00\x1f4" + // 0x1F300301: 0x00001F34 + "\x1f1\x03\x01\x00\x00\x1f5" + // 0x1F310301: 0x00001F35 + "\x1f0\x03B\x00\x00\x1f6" + // 0x1F300342: 0x00001F36 + "\x1f1\x03B\x00\x00\x1f7" + // 0x1F310342: 0x00001F37 + "\x03\x99\x03\x13\x00\x00\x1f8" + // 0x03990313: 0x00001F38 + "\x03\x99\x03\x14\x00\x00\x1f9" + // 0x03990314: 0x00001F39 + "\x1f8\x03\x00\x00\x00\x1f:" + // 0x1F380300: 0x00001F3A + "\x1f9\x03\x00\x00\x00\x1f;" + // 0x1F390300: 0x00001F3B + "\x1f8\x03\x01\x00\x00\x1f<" + // 0x1F380301: 0x00001F3C + "\x1f9\x03\x01\x00\x00\x1f=" + // 0x1F390301: 0x00001F3D + "\x1f8\x03B\x00\x00\x1f>" + // 0x1F380342: 0x00001F3E + "\x1f9\x03B\x00\x00\x1f?" + // 0x1F390342: 0x00001F3F + "\x03\xbf\x03\x13\x00\x00\x1f@" + // 0x03BF0313: 0x00001F40 + "\x03\xbf\x03\x14\x00\x00\x1fA" + // 0x03BF0314: 0x00001F41 + "\x1f@\x03\x00\x00\x00\x1fB" + // 0x1F400300: 0x00001F42 + "\x1fA\x03\x00\x00\x00\x1fC" + // 0x1F410300: 0x00001F43 + "\x1f@\x03\x01\x00\x00\x1fD" + // 0x1F400301: 0x00001F44 + "\x1fA\x03\x01\x00\x00\x1fE" + // 0x1F410301: 0x00001F45 + "\x03\x9f\x03\x13\x00\x00\x1fH" + // 0x039F0313: 0x00001F48 + "\x03\x9f\x03\x14\x00\x00\x1fI" + // 0x039F0314: 0x00001F49 + "\x1fH\x03\x00\x00\x00\x1fJ" + // 0x1F480300: 0x00001F4A + "\x1fI\x03\x00\x00\x00\x1fK" + // 0x1F490300: 0x00001F4B + "\x1fH\x03\x01\x00\x00\x1fL" + // 0x1F480301: 0x00001F4C + "\x1fI\x03\x01\x00\x00\x1fM" + // 0x1F490301: 0x00001F4D + "\x03\xc5\x03\x13\x00\x00\x1fP" + // 0x03C50313: 0x00001F50 + "\x03\xc5\x03\x14\x00\x00\x1fQ" + // 0x03C50314: 0x00001F51 + "\x1fP\x03\x00\x00\x00\x1fR" + // 0x1F500300: 0x00001F52 + "\x1fQ\x03\x00\x00\x00\x1fS" + // 0x1F510300: 0x00001F53 + "\x1fP\x03\x01\x00\x00\x1fT" + // 0x1F500301: 0x00001F54 + "\x1fQ\x03\x01\x00\x00\x1fU" + // 0x1F510301: 0x00001F55 + "\x1fP\x03B\x00\x00\x1fV" + // 0x1F500342: 0x00001F56 + "\x1fQ\x03B\x00\x00\x1fW" + // 0x1F510342: 0x00001F57 + "\x03\xa5\x03\x14\x00\x00\x1fY" + // 0x03A50314: 0x00001F59 + "\x1fY\x03\x00\x00\x00\x1f[" + // 0x1F590300: 0x00001F5B + "\x1fY\x03\x01\x00\x00\x1f]" + // 0x1F590301: 0x00001F5D + "\x1fY\x03B\x00\x00\x1f_" + // 0x1F590342: 0x00001F5F + "\x03\xc9\x03\x13\x00\x00\x1f`" + // 0x03C90313: 0x00001F60 + "\x03\xc9\x03\x14\x00\x00\x1fa" + // 0x03C90314: 0x00001F61 + "\x1f`\x03\x00\x00\x00\x1fb" + // 0x1F600300: 0x00001F62 + "\x1fa\x03\x00\x00\x00\x1fc" + // 0x1F610300: 0x00001F63 + "\x1f`\x03\x01\x00\x00\x1fd" + // 0x1F600301: 0x00001F64 + "\x1fa\x03\x01\x00\x00\x1fe" + // 0x1F610301: 0x00001F65 + "\x1f`\x03B\x00\x00\x1ff" + // 0x1F600342: 0x00001F66 + "\x1fa\x03B\x00\x00\x1fg" + // 0x1F610342: 0x00001F67 + "\x03\xa9\x03\x13\x00\x00\x1fh" + // 0x03A90313: 0x00001F68 + "\x03\xa9\x03\x14\x00\x00\x1fi" + // 0x03A90314: 0x00001F69 + "\x1fh\x03\x00\x00\x00\x1fj" + // 0x1F680300: 0x00001F6A + "\x1fi\x03\x00\x00\x00\x1fk" + // 0x1F690300: 0x00001F6B + "\x1fh\x03\x01\x00\x00\x1fl" + // 0x1F680301: 0x00001F6C + "\x1fi\x03\x01\x00\x00\x1fm" + // 0x1F690301: 0x00001F6D + "\x1fh\x03B\x00\x00\x1fn" + // 0x1F680342: 0x00001F6E + "\x1fi\x03B\x00\x00\x1fo" + // 0x1F690342: 0x00001F6F + "\x03\xb1\x03\x00\x00\x00\x1fp" + // 0x03B10300: 0x00001F70 + "\x03\xb5\x03\x00\x00\x00\x1fr" + // 0x03B50300: 0x00001F72 + "\x03\xb7\x03\x00\x00\x00\x1ft" + // 0x03B70300: 0x00001F74 + "\x03\xb9\x03\x00\x00\x00\x1fv" + // 0x03B90300: 0x00001F76 + "\x03\xbf\x03\x00\x00\x00\x1fx" + // 0x03BF0300: 0x00001F78 + "\x03\xc5\x03\x00\x00\x00\x1fz" + // 0x03C50300: 0x00001F7A + "\x03\xc9\x03\x00\x00\x00\x1f|" + // 0x03C90300: 0x00001F7C + "\x1f\x00\x03E\x00\x00\x1f\x80" + // 0x1F000345: 0x00001F80 + "\x1f\x01\x03E\x00\x00\x1f\x81" + // 0x1F010345: 0x00001F81 + "\x1f\x02\x03E\x00\x00\x1f\x82" + // 0x1F020345: 0x00001F82 + "\x1f\x03\x03E\x00\x00\x1f\x83" + // 0x1F030345: 0x00001F83 + "\x1f\x04\x03E\x00\x00\x1f\x84" + // 0x1F040345: 0x00001F84 + "\x1f\x05\x03E\x00\x00\x1f\x85" + // 0x1F050345: 0x00001F85 + "\x1f\x06\x03E\x00\x00\x1f\x86" + // 0x1F060345: 0x00001F86 + "\x1f\a\x03E\x00\x00\x1f\x87" + // 0x1F070345: 0x00001F87 + "\x1f\b\x03E\x00\x00\x1f\x88" + // 0x1F080345: 0x00001F88 + "\x1f\t\x03E\x00\x00\x1f\x89" + // 0x1F090345: 0x00001F89 + "\x1f\n\x03E\x00\x00\x1f\x8a" + // 0x1F0A0345: 0x00001F8A + "\x1f\v\x03E\x00\x00\x1f\x8b" + // 0x1F0B0345: 0x00001F8B + "\x1f\f\x03E\x00\x00\x1f\x8c" + // 0x1F0C0345: 0x00001F8C + "\x1f\r\x03E\x00\x00\x1f\x8d" + // 0x1F0D0345: 0x00001F8D + "\x1f\x0e\x03E\x00\x00\x1f\x8e" + // 0x1F0E0345: 0x00001F8E + "\x1f\x0f\x03E\x00\x00\x1f\x8f" + // 0x1F0F0345: 0x00001F8F + "\x1f \x03E\x00\x00\x1f\x90" + // 0x1F200345: 0x00001F90 + "\x1f!\x03E\x00\x00\x1f\x91" + // 0x1F210345: 0x00001F91 + "\x1f\"\x03E\x00\x00\x1f\x92" + // 0x1F220345: 0x00001F92 + "\x1f#\x03E\x00\x00\x1f\x93" + // 0x1F230345: 0x00001F93 + "\x1f$\x03E\x00\x00\x1f\x94" + // 0x1F240345: 0x00001F94 + "\x1f%\x03E\x00\x00\x1f\x95" + // 0x1F250345: 0x00001F95 + "\x1f&\x03E\x00\x00\x1f\x96" + // 0x1F260345: 0x00001F96 + "\x1f'\x03E\x00\x00\x1f\x97" + // 0x1F270345: 0x00001F97 + "\x1f(\x03E\x00\x00\x1f\x98" + // 0x1F280345: 0x00001F98 + "\x1f)\x03E\x00\x00\x1f\x99" + // 0x1F290345: 0x00001F99 + "\x1f*\x03E\x00\x00\x1f\x9a" + // 0x1F2A0345: 0x00001F9A + "\x1f+\x03E\x00\x00\x1f\x9b" + // 0x1F2B0345: 0x00001F9B + "\x1f,\x03E\x00\x00\x1f\x9c" + // 0x1F2C0345: 0x00001F9C + "\x1f-\x03E\x00\x00\x1f\x9d" + // 0x1F2D0345: 0x00001F9D + "\x1f.\x03E\x00\x00\x1f\x9e" + // 0x1F2E0345: 0x00001F9E + "\x1f/\x03E\x00\x00\x1f\x9f" + // 0x1F2F0345: 0x00001F9F + "\x1f`\x03E\x00\x00\x1f\xa0" + // 0x1F600345: 0x00001FA0 + "\x1fa\x03E\x00\x00\x1f\xa1" + // 0x1F610345: 0x00001FA1 + "\x1fb\x03E\x00\x00\x1f\xa2" + // 0x1F620345: 0x00001FA2 + "\x1fc\x03E\x00\x00\x1f\xa3" + // 0x1F630345: 0x00001FA3 + "\x1fd\x03E\x00\x00\x1f\xa4" + // 0x1F640345: 0x00001FA4 + "\x1fe\x03E\x00\x00\x1f\xa5" + // 0x1F650345: 0x00001FA5 + "\x1ff\x03E\x00\x00\x1f\xa6" + // 0x1F660345: 0x00001FA6 + "\x1fg\x03E\x00\x00\x1f\xa7" + // 0x1F670345: 0x00001FA7 + "\x1fh\x03E\x00\x00\x1f\xa8" + // 0x1F680345: 0x00001FA8 + "\x1fi\x03E\x00\x00\x1f\xa9" + // 0x1F690345: 0x00001FA9 + "\x1fj\x03E\x00\x00\x1f\xaa" + // 0x1F6A0345: 0x00001FAA + "\x1fk\x03E\x00\x00\x1f\xab" + // 0x1F6B0345: 0x00001FAB + "\x1fl\x03E\x00\x00\x1f\xac" + // 0x1F6C0345: 0x00001FAC + "\x1fm\x03E\x00\x00\x1f\xad" + // 0x1F6D0345: 0x00001FAD + "\x1fn\x03E\x00\x00\x1f\xae" + // 0x1F6E0345: 0x00001FAE + "\x1fo\x03E\x00\x00\x1f\xaf" + // 0x1F6F0345: 0x00001FAF + "\x03\xb1\x03\x06\x00\x00\x1f\xb0" + // 0x03B10306: 0x00001FB0 + "\x03\xb1\x03\x04\x00\x00\x1f\xb1" + // 0x03B10304: 0x00001FB1 + "\x1fp\x03E\x00\x00\x1f\xb2" + // 0x1F700345: 0x00001FB2 + "\x03\xb1\x03E\x00\x00\x1f\xb3" + // 0x03B10345: 0x00001FB3 + "\x03\xac\x03E\x00\x00\x1f\xb4" + // 0x03AC0345: 0x00001FB4 + "\x03\xb1\x03B\x00\x00\x1f\xb6" + // 0x03B10342: 0x00001FB6 + "\x1f\xb6\x03E\x00\x00\x1f\xb7" + // 0x1FB60345: 0x00001FB7 + "\x03\x91\x03\x06\x00\x00\x1f\xb8" + // 0x03910306: 0x00001FB8 + "\x03\x91\x03\x04\x00\x00\x1f\xb9" + // 0x03910304: 0x00001FB9 + "\x03\x91\x03\x00\x00\x00\x1f\xba" + // 0x03910300: 0x00001FBA + "\x03\x91\x03E\x00\x00\x1f\xbc" + // 0x03910345: 0x00001FBC + "\x00\xa8\x03B\x00\x00\x1f\xc1" + // 0x00A80342: 0x00001FC1 + "\x1ft\x03E\x00\x00\x1f\xc2" + // 0x1F740345: 0x00001FC2 + "\x03\xb7\x03E\x00\x00\x1f\xc3" + // 0x03B70345: 0x00001FC3 + "\x03\xae\x03E\x00\x00\x1f\xc4" + // 0x03AE0345: 0x00001FC4 + "\x03\xb7\x03B\x00\x00\x1f\xc6" + // 0x03B70342: 0x00001FC6 + "\x1f\xc6\x03E\x00\x00\x1f\xc7" + // 0x1FC60345: 0x00001FC7 + "\x03\x95\x03\x00\x00\x00\x1f\xc8" + // 0x03950300: 0x00001FC8 + "\x03\x97\x03\x00\x00\x00\x1f\xca" + // 0x03970300: 0x00001FCA + "\x03\x97\x03E\x00\x00\x1f\xcc" + // 0x03970345: 0x00001FCC + "\x1f\xbf\x03\x00\x00\x00\x1f\xcd" + // 0x1FBF0300: 0x00001FCD + "\x1f\xbf\x03\x01\x00\x00\x1f\xce" + // 0x1FBF0301: 0x00001FCE + "\x1f\xbf\x03B\x00\x00\x1f\xcf" + // 0x1FBF0342: 0x00001FCF + "\x03\xb9\x03\x06\x00\x00\x1f\xd0" + // 0x03B90306: 0x00001FD0 + "\x03\xb9\x03\x04\x00\x00\x1f\xd1" + // 0x03B90304: 0x00001FD1 + "\x03\xca\x03\x00\x00\x00\x1f\xd2" + // 0x03CA0300: 0x00001FD2 + "\x03\xb9\x03B\x00\x00\x1f\xd6" + // 0x03B90342: 0x00001FD6 + "\x03\xca\x03B\x00\x00\x1f\xd7" + // 0x03CA0342: 0x00001FD7 + "\x03\x99\x03\x06\x00\x00\x1f\xd8" + // 0x03990306: 0x00001FD8 + "\x03\x99\x03\x04\x00\x00\x1f\xd9" + // 0x03990304: 0x00001FD9 + "\x03\x99\x03\x00\x00\x00\x1f\xda" + // 0x03990300: 0x00001FDA + "\x1f\xfe\x03\x00\x00\x00\x1f\xdd" + // 0x1FFE0300: 0x00001FDD + "\x1f\xfe\x03\x01\x00\x00\x1f\xde" + // 0x1FFE0301: 0x00001FDE + "\x1f\xfe\x03B\x00\x00\x1f\xdf" + // 0x1FFE0342: 0x00001FDF + "\x03\xc5\x03\x06\x00\x00\x1f\xe0" + // 0x03C50306: 0x00001FE0 + "\x03\xc5\x03\x04\x00\x00\x1f\xe1" + // 0x03C50304: 0x00001FE1 + "\x03\xcb\x03\x00\x00\x00\x1f\xe2" + // 0x03CB0300: 0x00001FE2 + "\x03\xc1\x03\x13\x00\x00\x1f\xe4" + // 0x03C10313: 0x00001FE4 + "\x03\xc1\x03\x14\x00\x00\x1f\xe5" + // 0x03C10314: 0x00001FE5 + "\x03\xc5\x03B\x00\x00\x1f\xe6" + // 0x03C50342: 0x00001FE6 + "\x03\xcb\x03B\x00\x00\x1f\xe7" + // 0x03CB0342: 0x00001FE7 + "\x03\xa5\x03\x06\x00\x00\x1f\xe8" + // 0x03A50306: 0x00001FE8 + "\x03\xa5\x03\x04\x00\x00\x1f\xe9" + // 0x03A50304: 0x00001FE9 + "\x03\xa5\x03\x00\x00\x00\x1f\xea" + // 0x03A50300: 0x00001FEA + "\x03\xa1\x03\x14\x00\x00\x1f\xec" + // 0x03A10314: 0x00001FEC + "\x00\xa8\x03\x00\x00\x00\x1f\xed" + // 0x00A80300: 0x00001FED + "\x1f|\x03E\x00\x00\x1f\xf2" + // 0x1F7C0345: 0x00001FF2 + "\x03\xc9\x03E\x00\x00\x1f\xf3" + // 0x03C90345: 0x00001FF3 + "\x03\xce\x03E\x00\x00\x1f\xf4" + // 0x03CE0345: 0x00001FF4 + "\x03\xc9\x03B\x00\x00\x1f\xf6" + // 0x03C90342: 0x00001FF6 + "\x1f\xf6\x03E\x00\x00\x1f\xf7" + // 0x1FF60345: 0x00001FF7 + "\x03\x9f\x03\x00\x00\x00\x1f\xf8" + // 0x039F0300: 0x00001FF8 + "\x03\xa9\x03\x00\x00\x00\x1f\xfa" + // 0x03A90300: 0x00001FFA + "\x03\xa9\x03E\x00\x00\x1f\xfc" + // 0x03A90345: 0x00001FFC + "!\x90\x038\x00\x00!\x9a" + // 0x21900338: 0x0000219A + "!\x92\x038\x00\x00!\x9b" + // 0x21920338: 0x0000219B + "!\x94\x038\x00\x00!\xae" + // 0x21940338: 0x000021AE + "!\xd0\x038\x00\x00!\xcd" + // 0x21D00338: 0x000021CD + "!\xd4\x038\x00\x00!\xce" + // 0x21D40338: 0x000021CE + "!\xd2\x038\x00\x00!\xcf" + // 0x21D20338: 0x000021CF + "\"\x03\x038\x00\x00\"\x04" + // 0x22030338: 0x00002204 + "\"\b\x038\x00\x00\"\t" + // 0x22080338: 0x00002209 + "\"\v\x038\x00\x00\"\f" + // 0x220B0338: 0x0000220C + "\"#\x038\x00\x00\"$" + // 0x22230338: 0x00002224 + "\"%\x038\x00\x00\"&" + // 0x22250338: 0x00002226 + "\"<\x038\x00\x00\"A" + // 0x223C0338: 0x00002241 + "\"C\x038\x00\x00\"D" + // 0x22430338: 0x00002244 + "\"E\x038\x00\x00\"G" + // 0x22450338: 0x00002247 + "\"H\x038\x00\x00\"I" + // 0x22480338: 0x00002249 + "\x00=\x038\x00\x00\"`" + // 0x003D0338: 0x00002260 + "\"a\x038\x00\x00\"b" + // 0x22610338: 0x00002262 + "\"M\x038\x00\x00\"m" + // 0x224D0338: 0x0000226D + "\x00<\x038\x00\x00\"n" + // 0x003C0338: 0x0000226E + "\x00>\x038\x00\x00\"o" + // 0x003E0338: 0x0000226F + "\"d\x038\x00\x00\"p" + // 0x22640338: 0x00002270 + "\"e\x038\x00\x00\"q" + // 0x22650338: 0x00002271 + "\"r\x038\x00\x00\"t" + // 0x22720338: 0x00002274 + "\"s\x038\x00\x00\"u" + // 0x22730338: 0x00002275 + "\"v\x038\x00\x00\"x" + // 0x22760338: 0x00002278 + "\"w\x038\x00\x00\"y" + // 0x22770338: 0x00002279 + "\"z\x038\x00\x00\"\x80" + // 0x227A0338: 0x00002280 + "\"{\x038\x00\x00\"\x81" + // 0x227B0338: 0x00002281 + "\"\x82\x038\x00\x00\"\x84" + // 0x22820338: 0x00002284 + "\"\x83\x038\x00\x00\"\x85" + // 0x22830338: 0x00002285 + "\"\x86\x038\x00\x00\"\x88" + // 0x22860338: 0x00002288 + "\"\x87\x038\x00\x00\"\x89" + // 0x22870338: 0x00002289 + "\"\xa2\x038\x00\x00\"\xac" + // 0x22A20338: 0x000022AC + "\"\xa8\x038\x00\x00\"\xad" + // 0x22A80338: 0x000022AD + "\"\xa9\x038\x00\x00\"\xae" + // 0x22A90338: 0x000022AE + "\"\xab\x038\x00\x00\"\xaf" + // 0x22AB0338: 0x000022AF + "\"|\x038\x00\x00\"\xe0" + // 0x227C0338: 0x000022E0 + "\"}\x038\x00\x00\"\xe1" + // 0x227D0338: 0x000022E1 + "\"\x91\x038\x00\x00\"\xe2" + // 0x22910338: 0x000022E2 + "\"\x92\x038\x00\x00\"\xe3" + // 0x22920338: 0x000022E3 + "\"\xb2\x038\x00\x00\"\xea" + // 0x22B20338: 0x000022EA + "\"\xb3\x038\x00\x00\"\xeb" + // 0x22B30338: 0x000022EB + "\"\xb4\x038\x00\x00\"\xec" + // 0x22B40338: 0x000022EC + "\"\xb5\x038\x00\x00\"\xed" + // 0x22B50338: 0x000022ED + "0K0\x99\x00\x000L" + // 0x304B3099: 0x0000304C + "0M0\x99\x00\x000N" + // 0x304D3099: 0x0000304E + "0O0\x99\x00\x000P" + // 0x304F3099: 0x00003050 + "0Q0\x99\x00\x000R" + // 0x30513099: 0x00003052 + "0S0\x99\x00\x000T" + // 0x30533099: 0x00003054 + "0U0\x99\x00\x000V" + // 0x30553099: 0x00003056 + "0W0\x99\x00\x000X" + // 0x30573099: 0x00003058 + "0Y0\x99\x00\x000Z" + // 0x30593099: 0x0000305A + "0[0\x99\x00\x000\\" + // 0x305B3099: 0x0000305C + "0]0\x99\x00\x000^" + // 0x305D3099: 0x0000305E + "0_0\x99\x00\x000`" + // 0x305F3099: 0x00003060 + "0a0\x99\x00\x000b" + // 0x30613099: 0x00003062 + "0d0\x99\x00\x000e" + // 0x30643099: 0x00003065 + "0f0\x99\x00\x000g" + // 0x30663099: 0x00003067 + "0h0\x99\x00\x000i" + // 0x30683099: 0x00003069 + "0o0\x99\x00\x000p" + // 0x306F3099: 0x00003070 + "0o0\x9a\x00\x000q" + // 0x306F309A: 0x00003071 + "0r0\x99\x00\x000s" + // 0x30723099: 0x00003073 + "0r0\x9a\x00\x000t" + // 0x3072309A: 0x00003074 + "0u0\x99\x00\x000v" + // 0x30753099: 0x00003076 + "0u0\x9a\x00\x000w" + // 0x3075309A: 0x00003077 + "0x0\x99\x00\x000y" + // 0x30783099: 0x00003079 + "0x0\x9a\x00\x000z" + // 0x3078309A: 0x0000307A + "0{0\x99\x00\x000|" + // 0x307B3099: 0x0000307C + "0{0\x9a\x00\x000}" + // 0x307B309A: 0x0000307D + "0F0\x99\x00\x000\x94" + // 0x30463099: 0x00003094 + "0\x9d0\x99\x00\x000\x9e" + // 0x309D3099: 0x0000309E + "0\xab0\x99\x00\x000\xac" + // 0x30AB3099: 0x000030AC + "0\xad0\x99\x00\x000\xae" + // 0x30AD3099: 0x000030AE + "0\xaf0\x99\x00\x000\xb0" + // 0x30AF3099: 0x000030B0 + "0\xb10\x99\x00\x000\xb2" + // 0x30B13099: 0x000030B2 + "0\xb30\x99\x00\x000\xb4" + // 0x30B33099: 0x000030B4 + "0\xb50\x99\x00\x000\xb6" + // 0x30B53099: 0x000030B6 + "0\xb70\x99\x00\x000\xb8" + // 0x30B73099: 0x000030B8 + "0\xb90\x99\x00\x000\xba" + // 0x30B93099: 0x000030BA + "0\xbb0\x99\x00\x000\xbc" + // 0x30BB3099: 0x000030BC + "0\xbd0\x99\x00\x000\xbe" + // 0x30BD3099: 0x000030BE + "0\xbf0\x99\x00\x000\xc0" + // 0x30BF3099: 0x000030C0 + "0\xc10\x99\x00\x000\xc2" + // 0x30C13099: 0x000030C2 + "0\xc40\x99\x00\x000\xc5" + // 0x30C43099: 0x000030C5 + "0\xc60\x99\x00\x000\xc7" + // 0x30C63099: 0x000030C7 + "0\xc80\x99\x00\x000\xc9" + // 0x30C83099: 0x000030C9 + "0\xcf0\x99\x00\x000\xd0" + // 0x30CF3099: 0x000030D0 + "0\xcf0\x9a\x00\x000\xd1" + // 0x30CF309A: 0x000030D1 + "0\xd20\x99\x00\x000\xd3" + // 0x30D23099: 0x000030D3 + "0\xd20\x9a\x00\x000\xd4" + // 0x30D2309A: 0x000030D4 + "0\xd50\x99\x00\x000\xd6" + // 0x30D53099: 0x000030D6 + "0\xd50\x9a\x00\x000\xd7" + // 0x30D5309A: 0x000030D7 + "0\xd80\x99\x00\x000\xd9" + // 0x30D83099: 0x000030D9 + "0\xd80\x9a\x00\x000\xda" + // 0x30D8309A: 0x000030DA + "0\xdb0\x99\x00\x000\xdc" + // 0x30DB3099: 0x000030DC + "0\xdb0\x9a\x00\x000\xdd" + // 0x30DB309A: 0x000030DD + "0\xa60\x99\x00\x000\xf4" + // 0x30A63099: 0x000030F4 + "0\xef0\x99\x00\x000\xf7" + // 0x30EF3099: 0x000030F7 + "0\xf00\x99\x00\x000\xf8" + // 0x30F03099: 0x000030F8 + "0\xf10\x99\x00\x000\xf9" + // 0x30F13099: 0x000030F9 + "0\xf20\x99\x00\x000\xfa" + // 0x30F23099: 0x000030FA + "0\xfd0\x99\x00\x000\xfe" + // 0x30FD3099: 0x000030FE + "\x10\x99\x10\xba\x00\x01\x10\x9a" + // 0x109910BA: 0x0001109A + "\x10\x9b\x10\xba\x00\x01\x10\x9c" + // 0x109B10BA: 0x0001109C + "\x10\xa5\x10\xba\x00\x01\x10\xab" + // 0x10A510BA: 0x000110AB + "\x111\x11'\x00\x01\x11." + // 0x11311127: 0x0001112E + "\x112\x11'\x00\x01\x11/" + // 0x11321127: 0x0001112F + "\x13G\x13>\x00\x01\x13K" + // 0x1347133E: 0x0001134B + "\x13G\x13W\x00\x01\x13L" + // 0x13471357: 0x0001134C + "\x14\xb9\x14\xba\x00\x01\x14\xbb" + // 0x14B914BA: 0x000114BB + "\x14\xb9\x14\xb0\x00\x01\x14\xbc" + // 0x14B914B0: 0x000114BC + "\x14\xb9\x14\xbd\x00\x01\x14\xbe" + // 0x14B914BD: 0x000114BE + "\x15\xb8\x15\xaf\x00\x01\x15\xba" + // 0x15B815AF: 0x000115BA + "\x15\xb9\x15\xaf\x00\x01\x15\xbb" + // 0x15B915AF: 0x000115BB + "" + // Total size of tables: 53KB (54514 bytes) diff --git a/vendor/golang.org/x/text/unicode/norm/tables9.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables9.0.0.go index a01274a8..94290692 100644 --- a/vendor/golang.org/x/text/unicode/norm/tables9.0.0.go +++ b/vendor/golang.org/x/text/unicode/norm/tables9.0.0.go @@ -4,6 +4,8 @@ package norm +import "sync" + const ( // Version is the Unicode edition from which the tables are derived. Version = "9.0.0" @@ -6687,947 +6689,949 @@ var nfkcSparseValues = [875]valueRange{ } // recompMap: 7520 bytes (entries only) -var recompMap = map[uint32]rune{ - 0x00410300: 0x00C0, - 0x00410301: 0x00C1, - 0x00410302: 0x00C2, - 0x00410303: 0x00C3, - 0x00410308: 0x00C4, - 0x0041030A: 0x00C5, - 0x00430327: 0x00C7, - 0x00450300: 0x00C8, - 0x00450301: 0x00C9, - 0x00450302: 0x00CA, - 0x00450308: 0x00CB, - 0x00490300: 0x00CC, - 0x00490301: 0x00CD, - 0x00490302: 0x00CE, - 0x00490308: 0x00CF, - 0x004E0303: 0x00D1, - 0x004F0300: 0x00D2, - 0x004F0301: 0x00D3, - 0x004F0302: 0x00D4, - 0x004F0303: 0x00D5, - 0x004F0308: 0x00D6, - 0x00550300: 0x00D9, - 0x00550301: 0x00DA, - 0x00550302: 0x00DB, - 0x00550308: 0x00DC, - 0x00590301: 0x00DD, - 0x00610300: 0x00E0, - 0x00610301: 0x00E1, - 0x00610302: 0x00E2, - 0x00610303: 0x00E3, - 0x00610308: 0x00E4, - 0x0061030A: 0x00E5, - 0x00630327: 0x00E7, - 0x00650300: 0x00E8, - 0x00650301: 0x00E9, - 0x00650302: 0x00EA, - 0x00650308: 0x00EB, - 0x00690300: 0x00EC, - 0x00690301: 0x00ED, - 0x00690302: 0x00EE, - 0x00690308: 0x00EF, - 0x006E0303: 0x00F1, - 0x006F0300: 0x00F2, - 0x006F0301: 0x00F3, - 0x006F0302: 0x00F4, - 0x006F0303: 0x00F5, - 0x006F0308: 0x00F6, - 0x00750300: 0x00F9, - 0x00750301: 0x00FA, - 0x00750302: 0x00FB, - 0x00750308: 0x00FC, - 0x00790301: 0x00FD, - 0x00790308: 0x00FF, - 0x00410304: 0x0100, - 0x00610304: 0x0101, - 0x00410306: 0x0102, - 0x00610306: 0x0103, - 0x00410328: 0x0104, - 0x00610328: 0x0105, - 0x00430301: 0x0106, - 0x00630301: 0x0107, - 0x00430302: 0x0108, - 0x00630302: 0x0109, - 0x00430307: 0x010A, - 0x00630307: 0x010B, - 0x0043030C: 0x010C, - 0x0063030C: 0x010D, - 0x0044030C: 0x010E, - 0x0064030C: 0x010F, - 0x00450304: 0x0112, - 0x00650304: 0x0113, - 0x00450306: 0x0114, - 0x00650306: 0x0115, - 0x00450307: 0x0116, - 0x00650307: 0x0117, - 0x00450328: 0x0118, - 0x00650328: 0x0119, - 0x0045030C: 0x011A, - 0x0065030C: 0x011B, - 0x00470302: 0x011C, - 0x00670302: 0x011D, - 0x00470306: 0x011E, - 0x00670306: 0x011F, - 0x00470307: 0x0120, - 0x00670307: 0x0121, - 0x00470327: 0x0122, - 0x00670327: 0x0123, - 0x00480302: 0x0124, - 0x00680302: 0x0125, - 0x00490303: 0x0128, - 0x00690303: 0x0129, - 0x00490304: 0x012A, - 0x00690304: 0x012B, - 0x00490306: 0x012C, - 0x00690306: 0x012D, - 0x00490328: 0x012E, - 0x00690328: 0x012F, - 0x00490307: 0x0130, - 0x004A0302: 0x0134, - 0x006A0302: 0x0135, - 0x004B0327: 0x0136, - 0x006B0327: 0x0137, - 0x004C0301: 0x0139, - 0x006C0301: 0x013A, - 0x004C0327: 0x013B, - 0x006C0327: 0x013C, - 0x004C030C: 0x013D, - 0x006C030C: 0x013E, - 0x004E0301: 0x0143, - 0x006E0301: 0x0144, - 0x004E0327: 0x0145, - 0x006E0327: 0x0146, - 0x004E030C: 0x0147, - 0x006E030C: 0x0148, - 0x004F0304: 0x014C, - 0x006F0304: 0x014D, - 0x004F0306: 0x014E, - 0x006F0306: 0x014F, - 0x004F030B: 0x0150, - 0x006F030B: 0x0151, - 0x00520301: 0x0154, - 0x00720301: 0x0155, - 0x00520327: 0x0156, - 0x00720327: 0x0157, - 0x0052030C: 0x0158, - 0x0072030C: 0x0159, - 0x00530301: 0x015A, - 0x00730301: 0x015B, - 0x00530302: 0x015C, - 0x00730302: 0x015D, - 0x00530327: 0x015E, - 0x00730327: 0x015F, - 0x0053030C: 0x0160, - 0x0073030C: 0x0161, - 0x00540327: 0x0162, - 0x00740327: 0x0163, - 0x0054030C: 0x0164, - 0x0074030C: 0x0165, - 0x00550303: 0x0168, - 0x00750303: 0x0169, - 0x00550304: 0x016A, - 0x00750304: 0x016B, - 0x00550306: 0x016C, - 0x00750306: 0x016D, - 0x0055030A: 0x016E, - 0x0075030A: 0x016F, - 0x0055030B: 0x0170, - 0x0075030B: 0x0171, - 0x00550328: 0x0172, - 0x00750328: 0x0173, - 0x00570302: 0x0174, - 0x00770302: 0x0175, - 0x00590302: 0x0176, - 0x00790302: 0x0177, - 0x00590308: 0x0178, - 0x005A0301: 0x0179, - 0x007A0301: 0x017A, - 0x005A0307: 0x017B, - 0x007A0307: 0x017C, - 0x005A030C: 0x017D, - 0x007A030C: 0x017E, - 0x004F031B: 0x01A0, - 0x006F031B: 0x01A1, - 0x0055031B: 0x01AF, - 0x0075031B: 0x01B0, - 0x0041030C: 0x01CD, - 0x0061030C: 0x01CE, - 0x0049030C: 0x01CF, - 0x0069030C: 0x01D0, - 0x004F030C: 0x01D1, - 0x006F030C: 0x01D2, - 0x0055030C: 0x01D3, - 0x0075030C: 0x01D4, - 0x00DC0304: 0x01D5, - 0x00FC0304: 0x01D6, - 0x00DC0301: 0x01D7, - 0x00FC0301: 0x01D8, - 0x00DC030C: 0x01D9, - 0x00FC030C: 0x01DA, - 0x00DC0300: 0x01DB, - 0x00FC0300: 0x01DC, - 0x00C40304: 0x01DE, - 0x00E40304: 0x01DF, - 0x02260304: 0x01E0, - 0x02270304: 0x01E1, - 0x00C60304: 0x01E2, - 0x00E60304: 0x01E3, - 0x0047030C: 0x01E6, - 0x0067030C: 0x01E7, - 0x004B030C: 0x01E8, - 0x006B030C: 0x01E9, - 0x004F0328: 0x01EA, - 0x006F0328: 0x01EB, - 0x01EA0304: 0x01EC, - 0x01EB0304: 0x01ED, - 0x01B7030C: 0x01EE, - 0x0292030C: 0x01EF, - 0x006A030C: 0x01F0, - 0x00470301: 0x01F4, - 0x00670301: 0x01F5, - 0x004E0300: 0x01F8, - 0x006E0300: 0x01F9, - 0x00C50301: 0x01FA, - 0x00E50301: 0x01FB, - 0x00C60301: 0x01FC, - 0x00E60301: 0x01FD, - 0x00D80301: 0x01FE, - 0x00F80301: 0x01FF, - 0x0041030F: 0x0200, - 0x0061030F: 0x0201, - 0x00410311: 0x0202, - 0x00610311: 0x0203, - 0x0045030F: 0x0204, - 0x0065030F: 0x0205, - 0x00450311: 0x0206, - 0x00650311: 0x0207, - 0x0049030F: 0x0208, - 0x0069030F: 0x0209, - 0x00490311: 0x020A, - 0x00690311: 0x020B, - 0x004F030F: 0x020C, - 0x006F030F: 0x020D, - 0x004F0311: 0x020E, - 0x006F0311: 0x020F, - 0x0052030F: 0x0210, - 0x0072030F: 0x0211, - 0x00520311: 0x0212, - 0x00720311: 0x0213, - 0x0055030F: 0x0214, - 0x0075030F: 0x0215, - 0x00550311: 0x0216, - 0x00750311: 0x0217, - 0x00530326: 0x0218, - 0x00730326: 0x0219, - 0x00540326: 0x021A, - 0x00740326: 0x021B, - 0x0048030C: 0x021E, - 0x0068030C: 0x021F, - 0x00410307: 0x0226, - 0x00610307: 0x0227, - 0x00450327: 0x0228, - 0x00650327: 0x0229, - 0x00D60304: 0x022A, - 0x00F60304: 0x022B, - 0x00D50304: 0x022C, - 0x00F50304: 0x022D, - 0x004F0307: 0x022E, - 0x006F0307: 0x022F, - 0x022E0304: 0x0230, - 0x022F0304: 0x0231, - 0x00590304: 0x0232, - 0x00790304: 0x0233, - 0x00A80301: 0x0385, - 0x03910301: 0x0386, - 0x03950301: 0x0388, - 0x03970301: 0x0389, - 0x03990301: 0x038A, - 0x039F0301: 0x038C, - 0x03A50301: 0x038E, - 0x03A90301: 0x038F, - 0x03CA0301: 0x0390, - 0x03990308: 0x03AA, - 0x03A50308: 0x03AB, - 0x03B10301: 0x03AC, - 0x03B50301: 0x03AD, - 0x03B70301: 0x03AE, - 0x03B90301: 0x03AF, - 0x03CB0301: 0x03B0, - 0x03B90308: 0x03CA, - 0x03C50308: 0x03CB, - 0x03BF0301: 0x03CC, - 0x03C50301: 0x03CD, - 0x03C90301: 0x03CE, - 0x03D20301: 0x03D3, - 0x03D20308: 0x03D4, - 0x04150300: 0x0400, - 0x04150308: 0x0401, - 0x04130301: 0x0403, - 0x04060308: 0x0407, - 0x041A0301: 0x040C, - 0x04180300: 0x040D, - 0x04230306: 0x040E, - 0x04180306: 0x0419, - 0x04380306: 0x0439, - 0x04350300: 0x0450, - 0x04350308: 0x0451, - 0x04330301: 0x0453, - 0x04560308: 0x0457, - 0x043A0301: 0x045C, - 0x04380300: 0x045D, - 0x04430306: 0x045E, - 0x0474030F: 0x0476, - 0x0475030F: 0x0477, - 0x04160306: 0x04C1, - 0x04360306: 0x04C2, - 0x04100306: 0x04D0, - 0x04300306: 0x04D1, - 0x04100308: 0x04D2, - 0x04300308: 0x04D3, - 0x04150306: 0x04D6, - 0x04350306: 0x04D7, - 0x04D80308: 0x04DA, - 0x04D90308: 0x04DB, - 0x04160308: 0x04DC, - 0x04360308: 0x04DD, - 0x04170308: 0x04DE, - 0x04370308: 0x04DF, - 0x04180304: 0x04E2, - 0x04380304: 0x04E3, - 0x04180308: 0x04E4, - 0x04380308: 0x04E5, - 0x041E0308: 0x04E6, - 0x043E0308: 0x04E7, - 0x04E80308: 0x04EA, - 0x04E90308: 0x04EB, - 0x042D0308: 0x04EC, - 0x044D0308: 0x04ED, - 0x04230304: 0x04EE, - 0x04430304: 0x04EF, - 0x04230308: 0x04F0, - 0x04430308: 0x04F1, - 0x0423030B: 0x04F2, - 0x0443030B: 0x04F3, - 0x04270308: 0x04F4, - 0x04470308: 0x04F5, - 0x042B0308: 0x04F8, - 0x044B0308: 0x04F9, - 0x06270653: 0x0622, - 0x06270654: 0x0623, - 0x06480654: 0x0624, - 0x06270655: 0x0625, - 0x064A0654: 0x0626, - 0x06D50654: 0x06C0, - 0x06C10654: 0x06C2, - 0x06D20654: 0x06D3, - 0x0928093C: 0x0929, - 0x0930093C: 0x0931, - 0x0933093C: 0x0934, - 0x09C709BE: 0x09CB, - 0x09C709D7: 0x09CC, - 0x0B470B56: 0x0B48, - 0x0B470B3E: 0x0B4B, - 0x0B470B57: 0x0B4C, - 0x0B920BD7: 0x0B94, - 0x0BC60BBE: 0x0BCA, - 0x0BC70BBE: 0x0BCB, - 0x0BC60BD7: 0x0BCC, - 0x0C460C56: 0x0C48, - 0x0CBF0CD5: 0x0CC0, - 0x0CC60CD5: 0x0CC7, - 0x0CC60CD6: 0x0CC8, - 0x0CC60CC2: 0x0CCA, - 0x0CCA0CD5: 0x0CCB, - 0x0D460D3E: 0x0D4A, - 0x0D470D3E: 0x0D4B, - 0x0D460D57: 0x0D4C, - 0x0DD90DCA: 0x0DDA, - 0x0DD90DCF: 0x0DDC, - 0x0DDC0DCA: 0x0DDD, - 0x0DD90DDF: 0x0DDE, - 0x1025102E: 0x1026, - 0x1B051B35: 0x1B06, - 0x1B071B35: 0x1B08, - 0x1B091B35: 0x1B0A, - 0x1B0B1B35: 0x1B0C, - 0x1B0D1B35: 0x1B0E, - 0x1B111B35: 0x1B12, - 0x1B3A1B35: 0x1B3B, - 0x1B3C1B35: 0x1B3D, - 0x1B3E1B35: 0x1B40, - 0x1B3F1B35: 0x1B41, - 0x1B421B35: 0x1B43, - 0x00410325: 0x1E00, - 0x00610325: 0x1E01, - 0x00420307: 0x1E02, - 0x00620307: 0x1E03, - 0x00420323: 0x1E04, - 0x00620323: 0x1E05, - 0x00420331: 0x1E06, - 0x00620331: 0x1E07, - 0x00C70301: 0x1E08, - 0x00E70301: 0x1E09, - 0x00440307: 0x1E0A, - 0x00640307: 0x1E0B, - 0x00440323: 0x1E0C, - 0x00640323: 0x1E0D, - 0x00440331: 0x1E0E, - 0x00640331: 0x1E0F, - 0x00440327: 0x1E10, - 0x00640327: 0x1E11, - 0x0044032D: 0x1E12, - 0x0064032D: 0x1E13, - 0x01120300: 0x1E14, - 0x01130300: 0x1E15, - 0x01120301: 0x1E16, - 0x01130301: 0x1E17, - 0x0045032D: 0x1E18, - 0x0065032D: 0x1E19, - 0x00450330: 0x1E1A, - 0x00650330: 0x1E1B, - 0x02280306: 0x1E1C, - 0x02290306: 0x1E1D, - 0x00460307: 0x1E1E, - 0x00660307: 0x1E1F, - 0x00470304: 0x1E20, - 0x00670304: 0x1E21, - 0x00480307: 0x1E22, - 0x00680307: 0x1E23, - 0x00480323: 0x1E24, - 0x00680323: 0x1E25, - 0x00480308: 0x1E26, - 0x00680308: 0x1E27, - 0x00480327: 0x1E28, - 0x00680327: 0x1E29, - 0x0048032E: 0x1E2A, - 0x0068032E: 0x1E2B, - 0x00490330: 0x1E2C, - 0x00690330: 0x1E2D, - 0x00CF0301: 0x1E2E, - 0x00EF0301: 0x1E2F, - 0x004B0301: 0x1E30, - 0x006B0301: 0x1E31, - 0x004B0323: 0x1E32, - 0x006B0323: 0x1E33, - 0x004B0331: 0x1E34, - 0x006B0331: 0x1E35, - 0x004C0323: 0x1E36, - 0x006C0323: 0x1E37, - 0x1E360304: 0x1E38, - 0x1E370304: 0x1E39, - 0x004C0331: 0x1E3A, - 0x006C0331: 0x1E3B, - 0x004C032D: 0x1E3C, - 0x006C032D: 0x1E3D, - 0x004D0301: 0x1E3E, - 0x006D0301: 0x1E3F, - 0x004D0307: 0x1E40, - 0x006D0307: 0x1E41, - 0x004D0323: 0x1E42, - 0x006D0323: 0x1E43, - 0x004E0307: 0x1E44, - 0x006E0307: 0x1E45, - 0x004E0323: 0x1E46, - 0x006E0323: 0x1E47, - 0x004E0331: 0x1E48, - 0x006E0331: 0x1E49, - 0x004E032D: 0x1E4A, - 0x006E032D: 0x1E4B, - 0x00D50301: 0x1E4C, - 0x00F50301: 0x1E4D, - 0x00D50308: 0x1E4E, - 0x00F50308: 0x1E4F, - 0x014C0300: 0x1E50, - 0x014D0300: 0x1E51, - 0x014C0301: 0x1E52, - 0x014D0301: 0x1E53, - 0x00500301: 0x1E54, - 0x00700301: 0x1E55, - 0x00500307: 0x1E56, - 0x00700307: 0x1E57, - 0x00520307: 0x1E58, - 0x00720307: 0x1E59, - 0x00520323: 0x1E5A, - 0x00720323: 0x1E5B, - 0x1E5A0304: 0x1E5C, - 0x1E5B0304: 0x1E5D, - 0x00520331: 0x1E5E, - 0x00720331: 0x1E5F, - 0x00530307: 0x1E60, - 0x00730307: 0x1E61, - 0x00530323: 0x1E62, - 0x00730323: 0x1E63, - 0x015A0307: 0x1E64, - 0x015B0307: 0x1E65, - 0x01600307: 0x1E66, - 0x01610307: 0x1E67, - 0x1E620307: 0x1E68, - 0x1E630307: 0x1E69, - 0x00540307: 0x1E6A, - 0x00740307: 0x1E6B, - 0x00540323: 0x1E6C, - 0x00740323: 0x1E6D, - 0x00540331: 0x1E6E, - 0x00740331: 0x1E6F, - 0x0054032D: 0x1E70, - 0x0074032D: 0x1E71, - 0x00550324: 0x1E72, - 0x00750324: 0x1E73, - 0x00550330: 0x1E74, - 0x00750330: 0x1E75, - 0x0055032D: 0x1E76, - 0x0075032D: 0x1E77, - 0x01680301: 0x1E78, - 0x01690301: 0x1E79, - 0x016A0308: 0x1E7A, - 0x016B0308: 0x1E7B, - 0x00560303: 0x1E7C, - 0x00760303: 0x1E7D, - 0x00560323: 0x1E7E, - 0x00760323: 0x1E7F, - 0x00570300: 0x1E80, - 0x00770300: 0x1E81, - 0x00570301: 0x1E82, - 0x00770301: 0x1E83, - 0x00570308: 0x1E84, - 0x00770308: 0x1E85, - 0x00570307: 0x1E86, - 0x00770307: 0x1E87, - 0x00570323: 0x1E88, - 0x00770323: 0x1E89, - 0x00580307: 0x1E8A, - 0x00780307: 0x1E8B, - 0x00580308: 0x1E8C, - 0x00780308: 0x1E8D, - 0x00590307: 0x1E8E, - 0x00790307: 0x1E8F, - 0x005A0302: 0x1E90, - 0x007A0302: 0x1E91, - 0x005A0323: 0x1E92, - 0x007A0323: 0x1E93, - 0x005A0331: 0x1E94, - 0x007A0331: 0x1E95, - 0x00680331: 0x1E96, - 0x00740308: 0x1E97, - 0x0077030A: 0x1E98, - 0x0079030A: 0x1E99, - 0x017F0307: 0x1E9B, - 0x00410323: 0x1EA0, - 0x00610323: 0x1EA1, - 0x00410309: 0x1EA2, - 0x00610309: 0x1EA3, - 0x00C20301: 0x1EA4, - 0x00E20301: 0x1EA5, - 0x00C20300: 0x1EA6, - 0x00E20300: 0x1EA7, - 0x00C20309: 0x1EA8, - 0x00E20309: 0x1EA9, - 0x00C20303: 0x1EAA, - 0x00E20303: 0x1EAB, - 0x1EA00302: 0x1EAC, - 0x1EA10302: 0x1EAD, - 0x01020301: 0x1EAE, - 0x01030301: 0x1EAF, - 0x01020300: 0x1EB0, - 0x01030300: 0x1EB1, - 0x01020309: 0x1EB2, - 0x01030309: 0x1EB3, - 0x01020303: 0x1EB4, - 0x01030303: 0x1EB5, - 0x1EA00306: 0x1EB6, - 0x1EA10306: 0x1EB7, - 0x00450323: 0x1EB8, - 0x00650323: 0x1EB9, - 0x00450309: 0x1EBA, - 0x00650309: 0x1EBB, - 0x00450303: 0x1EBC, - 0x00650303: 0x1EBD, - 0x00CA0301: 0x1EBE, - 0x00EA0301: 0x1EBF, - 0x00CA0300: 0x1EC0, - 0x00EA0300: 0x1EC1, - 0x00CA0309: 0x1EC2, - 0x00EA0309: 0x1EC3, - 0x00CA0303: 0x1EC4, - 0x00EA0303: 0x1EC5, - 0x1EB80302: 0x1EC6, - 0x1EB90302: 0x1EC7, - 0x00490309: 0x1EC8, - 0x00690309: 0x1EC9, - 0x00490323: 0x1ECA, - 0x00690323: 0x1ECB, - 0x004F0323: 0x1ECC, - 0x006F0323: 0x1ECD, - 0x004F0309: 0x1ECE, - 0x006F0309: 0x1ECF, - 0x00D40301: 0x1ED0, - 0x00F40301: 0x1ED1, - 0x00D40300: 0x1ED2, - 0x00F40300: 0x1ED3, - 0x00D40309: 0x1ED4, - 0x00F40309: 0x1ED5, - 0x00D40303: 0x1ED6, - 0x00F40303: 0x1ED7, - 0x1ECC0302: 0x1ED8, - 0x1ECD0302: 0x1ED9, - 0x01A00301: 0x1EDA, - 0x01A10301: 0x1EDB, - 0x01A00300: 0x1EDC, - 0x01A10300: 0x1EDD, - 0x01A00309: 0x1EDE, - 0x01A10309: 0x1EDF, - 0x01A00303: 0x1EE0, - 0x01A10303: 0x1EE1, - 0x01A00323: 0x1EE2, - 0x01A10323: 0x1EE3, - 0x00550323: 0x1EE4, - 0x00750323: 0x1EE5, - 0x00550309: 0x1EE6, - 0x00750309: 0x1EE7, - 0x01AF0301: 0x1EE8, - 0x01B00301: 0x1EE9, - 0x01AF0300: 0x1EEA, - 0x01B00300: 0x1EEB, - 0x01AF0309: 0x1EEC, - 0x01B00309: 0x1EED, - 0x01AF0303: 0x1EEE, - 0x01B00303: 0x1EEF, - 0x01AF0323: 0x1EF0, - 0x01B00323: 0x1EF1, - 0x00590300: 0x1EF2, - 0x00790300: 0x1EF3, - 0x00590323: 0x1EF4, - 0x00790323: 0x1EF5, - 0x00590309: 0x1EF6, - 0x00790309: 0x1EF7, - 0x00590303: 0x1EF8, - 0x00790303: 0x1EF9, - 0x03B10313: 0x1F00, - 0x03B10314: 0x1F01, - 0x1F000300: 0x1F02, - 0x1F010300: 0x1F03, - 0x1F000301: 0x1F04, - 0x1F010301: 0x1F05, - 0x1F000342: 0x1F06, - 0x1F010342: 0x1F07, - 0x03910313: 0x1F08, - 0x03910314: 0x1F09, - 0x1F080300: 0x1F0A, - 0x1F090300: 0x1F0B, - 0x1F080301: 0x1F0C, - 0x1F090301: 0x1F0D, - 0x1F080342: 0x1F0E, - 0x1F090342: 0x1F0F, - 0x03B50313: 0x1F10, - 0x03B50314: 0x1F11, - 0x1F100300: 0x1F12, - 0x1F110300: 0x1F13, - 0x1F100301: 0x1F14, - 0x1F110301: 0x1F15, - 0x03950313: 0x1F18, - 0x03950314: 0x1F19, - 0x1F180300: 0x1F1A, - 0x1F190300: 0x1F1B, - 0x1F180301: 0x1F1C, - 0x1F190301: 0x1F1D, - 0x03B70313: 0x1F20, - 0x03B70314: 0x1F21, - 0x1F200300: 0x1F22, - 0x1F210300: 0x1F23, - 0x1F200301: 0x1F24, - 0x1F210301: 0x1F25, - 0x1F200342: 0x1F26, - 0x1F210342: 0x1F27, - 0x03970313: 0x1F28, - 0x03970314: 0x1F29, - 0x1F280300: 0x1F2A, - 0x1F290300: 0x1F2B, - 0x1F280301: 0x1F2C, - 0x1F290301: 0x1F2D, - 0x1F280342: 0x1F2E, - 0x1F290342: 0x1F2F, - 0x03B90313: 0x1F30, - 0x03B90314: 0x1F31, - 0x1F300300: 0x1F32, - 0x1F310300: 0x1F33, - 0x1F300301: 0x1F34, - 0x1F310301: 0x1F35, - 0x1F300342: 0x1F36, - 0x1F310342: 0x1F37, - 0x03990313: 0x1F38, - 0x03990314: 0x1F39, - 0x1F380300: 0x1F3A, - 0x1F390300: 0x1F3B, - 0x1F380301: 0x1F3C, - 0x1F390301: 0x1F3D, - 0x1F380342: 0x1F3E, - 0x1F390342: 0x1F3F, - 0x03BF0313: 0x1F40, - 0x03BF0314: 0x1F41, - 0x1F400300: 0x1F42, - 0x1F410300: 0x1F43, - 0x1F400301: 0x1F44, - 0x1F410301: 0x1F45, - 0x039F0313: 0x1F48, - 0x039F0314: 0x1F49, - 0x1F480300: 0x1F4A, - 0x1F490300: 0x1F4B, - 0x1F480301: 0x1F4C, - 0x1F490301: 0x1F4D, - 0x03C50313: 0x1F50, - 0x03C50314: 0x1F51, - 0x1F500300: 0x1F52, - 0x1F510300: 0x1F53, - 0x1F500301: 0x1F54, - 0x1F510301: 0x1F55, - 0x1F500342: 0x1F56, - 0x1F510342: 0x1F57, - 0x03A50314: 0x1F59, - 0x1F590300: 0x1F5B, - 0x1F590301: 0x1F5D, - 0x1F590342: 0x1F5F, - 0x03C90313: 0x1F60, - 0x03C90314: 0x1F61, - 0x1F600300: 0x1F62, - 0x1F610300: 0x1F63, - 0x1F600301: 0x1F64, - 0x1F610301: 0x1F65, - 0x1F600342: 0x1F66, - 0x1F610342: 0x1F67, - 0x03A90313: 0x1F68, - 0x03A90314: 0x1F69, - 0x1F680300: 0x1F6A, - 0x1F690300: 0x1F6B, - 0x1F680301: 0x1F6C, - 0x1F690301: 0x1F6D, - 0x1F680342: 0x1F6E, - 0x1F690342: 0x1F6F, - 0x03B10300: 0x1F70, - 0x03B50300: 0x1F72, - 0x03B70300: 0x1F74, - 0x03B90300: 0x1F76, - 0x03BF0300: 0x1F78, - 0x03C50300: 0x1F7A, - 0x03C90300: 0x1F7C, - 0x1F000345: 0x1F80, - 0x1F010345: 0x1F81, - 0x1F020345: 0x1F82, - 0x1F030345: 0x1F83, - 0x1F040345: 0x1F84, - 0x1F050345: 0x1F85, - 0x1F060345: 0x1F86, - 0x1F070345: 0x1F87, - 0x1F080345: 0x1F88, - 0x1F090345: 0x1F89, - 0x1F0A0345: 0x1F8A, - 0x1F0B0345: 0x1F8B, - 0x1F0C0345: 0x1F8C, - 0x1F0D0345: 0x1F8D, - 0x1F0E0345: 0x1F8E, - 0x1F0F0345: 0x1F8F, - 0x1F200345: 0x1F90, - 0x1F210345: 0x1F91, - 0x1F220345: 0x1F92, - 0x1F230345: 0x1F93, - 0x1F240345: 0x1F94, - 0x1F250345: 0x1F95, - 0x1F260345: 0x1F96, - 0x1F270345: 0x1F97, - 0x1F280345: 0x1F98, - 0x1F290345: 0x1F99, - 0x1F2A0345: 0x1F9A, - 0x1F2B0345: 0x1F9B, - 0x1F2C0345: 0x1F9C, - 0x1F2D0345: 0x1F9D, - 0x1F2E0345: 0x1F9E, - 0x1F2F0345: 0x1F9F, - 0x1F600345: 0x1FA0, - 0x1F610345: 0x1FA1, - 0x1F620345: 0x1FA2, - 0x1F630345: 0x1FA3, - 0x1F640345: 0x1FA4, - 0x1F650345: 0x1FA5, - 0x1F660345: 0x1FA6, - 0x1F670345: 0x1FA7, - 0x1F680345: 0x1FA8, - 0x1F690345: 0x1FA9, - 0x1F6A0345: 0x1FAA, - 0x1F6B0345: 0x1FAB, - 0x1F6C0345: 0x1FAC, - 0x1F6D0345: 0x1FAD, - 0x1F6E0345: 0x1FAE, - 0x1F6F0345: 0x1FAF, - 0x03B10306: 0x1FB0, - 0x03B10304: 0x1FB1, - 0x1F700345: 0x1FB2, - 0x03B10345: 0x1FB3, - 0x03AC0345: 0x1FB4, - 0x03B10342: 0x1FB6, - 0x1FB60345: 0x1FB7, - 0x03910306: 0x1FB8, - 0x03910304: 0x1FB9, - 0x03910300: 0x1FBA, - 0x03910345: 0x1FBC, - 0x00A80342: 0x1FC1, - 0x1F740345: 0x1FC2, - 0x03B70345: 0x1FC3, - 0x03AE0345: 0x1FC4, - 0x03B70342: 0x1FC6, - 0x1FC60345: 0x1FC7, - 0x03950300: 0x1FC8, - 0x03970300: 0x1FCA, - 0x03970345: 0x1FCC, - 0x1FBF0300: 0x1FCD, - 0x1FBF0301: 0x1FCE, - 0x1FBF0342: 0x1FCF, - 0x03B90306: 0x1FD0, - 0x03B90304: 0x1FD1, - 0x03CA0300: 0x1FD2, - 0x03B90342: 0x1FD6, - 0x03CA0342: 0x1FD7, - 0x03990306: 0x1FD8, - 0x03990304: 0x1FD9, - 0x03990300: 0x1FDA, - 0x1FFE0300: 0x1FDD, - 0x1FFE0301: 0x1FDE, - 0x1FFE0342: 0x1FDF, - 0x03C50306: 0x1FE0, - 0x03C50304: 0x1FE1, - 0x03CB0300: 0x1FE2, - 0x03C10313: 0x1FE4, - 0x03C10314: 0x1FE5, - 0x03C50342: 0x1FE6, - 0x03CB0342: 0x1FE7, - 0x03A50306: 0x1FE8, - 0x03A50304: 0x1FE9, - 0x03A50300: 0x1FEA, - 0x03A10314: 0x1FEC, - 0x00A80300: 0x1FED, - 0x1F7C0345: 0x1FF2, - 0x03C90345: 0x1FF3, - 0x03CE0345: 0x1FF4, - 0x03C90342: 0x1FF6, - 0x1FF60345: 0x1FF7, - 0x039F0300: 0x1FF8, - 0x03A90300: 0x1FFA, - 0x03A90345: 0x1FFC, - 0x21900338: 0x219A, - 0x21920338: 0x219B, - 0x21940338: 0x21AE, - 0x21D00338: 0x21CD, - 0x21D40338: 0x21CE, - 0x21D20338: 0x21CF, - 0x22030338: 0x2204, - 0x22080338: 0x2209, - 0x220B0338: 0x220C, - 0x22230338: 0x2224, - 0x22250338: 0x2226, - 0x223C0338: 0x2241, - 0x22430338: 0x2244, - 0x22450338: 0x2247, - 0x22480338: 0x2249, - 0x003D0338: 0x2260, - 0x22610338: 0x2262, - 0x224D0338: 0x226D, - 0x003C0338: 0x226E, - 0x003E0338: 0x226F, - 0x22640338: 0x2270, - 0x22650338: 0x2271, - 0x22720338: 0x2274, - 0x22730338: 0x2275, - 0x22760338: 0x2278, - 0x22770338: 0x2279, - 0x227A0338: 0x2280, - 0x227B0338: 0x2281, - 0x22820338: 0x2284, - 0x22830338: 0x2285, - 0x22860338: 0x2288, - 0x22870338: 0x2289, - 0x22A20338: 0x22AC, - 0x22A80338: 0x22AD, - 0x22A90338: 0x22AE, - 0x22AB0338: 0x22AF, - 0x227C0338: 0x22E0, - 0x227D0338: 0x22E1, - 0x22910338: 0x22E2, - 0x22920338: 0x22E3, - 0x22B20338: 0x22EA, - 0x22B30338: 0x22EB, - 0x22B40338: 0x22EC, - 0x22B50338: 0x22ED, - 0x304B3099: 0x304C, - 0x304D3099: 0x304E, - 0x304F3099: 0x3050, - 0x30513099: 0x3052, - 0x30533099: 0x3054, - 0x30553099: 0x3056, - 0x30573099: 0x3058, - 0x30593099: 0x305A, - 0x305B3099: 0x305C, - 0x305D3099: 0x305E, - 0x305F3099: 0x3060, - 0x30613099: 0x3062, - 0x30643099: 0x3065, - 0x30663099: 0x3067, - 0x30683099: 0x3069, - 0x306F3099: 0x3070, - 0x306F309A: 0x3071, - 0x30723099: 0x3073, - 0x3072309A: 0x3074, - 0x30753099: 0x3076, - 0x3075309A: 0x3077, - 0x30783099: 0x3079, - 0x3078309A: 0x307A, - 0x307B3099: 0x307C, - 0x307B309A: 0x307D, - 0x30463099: 0x3094, - 0x309D3099: 0x309E, - 0x30AB3099: 0x30AC, - 0x30AD3099: 0x30AE, - 0x30AF3099: 0x30B0, - 0x30B13099: 0x30B2, - 0x30B33099: 0x30B4, - 0x30B53099: 0x30B6, - 0x30B73099: 0x30B8, - 0x30B93099: 0x30BA, - 0x30BB3099: 0x30BC, - 0x30BD3099: 0x30BE, - 0x30BF3099: 0x30C0, - 0x30C13099: 0x30C2, - 0x30C43099: 0x30C5, - 0x30C63099: 0x30C7, - 0x30C83099: 0x30C9, - 0x30CF3099: 0x30D0, - 0x30CF309A: 0x30D1, - 0x30D23099: 0x30D3, - 0x30D2309A: 0x30D4, - 0x30D53099: 0x30D6, - 0x30D5309A: 0x30D7, - 0x30D83099: 0x30D9, - 0x30D8309A: 0x30DA, - 0x30DB3099: 0x30DC, - 0x30DB309A: 0x30DD, - 0x30A63099: 0x30F4, - 0x30EF3099: 0x30F7, - 0x30F03099: 0x30F8, - 0x30F13099: 0x30F9, - 0x30F23099: 0x30FA, - 0x30FD3099: 0x30FE, - 0x109910BA: 0x1109A, - 0x109B10BA: 0x1109C, - 0x10A510BA: 0x110AB, - 0x11311127: 0x1112E, - 0x11321127: 0x1112F, - 0x1347133E: 0x1134B, - 0x13471357: 0x1134C, - 0x14B914BA: 0x114BB, - 0x14B914B0: 0x114BC, - 0x14B914BD: 0x114BE, - 0x15B815AF: 0x115BA, - 0x15B915AF: 0x115BB, -} +var recompMap map[uint32]rune +var recompMapOnce sync.Once -// Total size of tables: 53KB (54006 bytes) +const recompMapPacked = "" + + "\x00A\x03\x00\x00\x00\x00\xc0" + // 0x00410300: 0x000000C0 + "\x00A\x03\x01\x00\x00\x00\xc1" + // 0x00410301: 0x000000C1 + "\x00A\x03\x02\x00\x00\x00\xc2" + // 0x00410302: 0x000000C2 + "\x00A\x03\x03\x00\x00\x00\xc3" + // 0x00410303: 0x000000C3 + "\x00A\x03\b\x00\x00\x00\xc4" + // 0x00410308: 0x000000C4 + "\x00A\x03\n\x00\x00\x00\xc5" + // 0x0041030A: 0x000000C5 + "\x00C\x03'\x00\x00\x00\xc7" + // 0x00430327: 0x000000C7 + "\x00E\x03\x00\x00\x00\x00\xc8" + // 0x00450300: 0x000000C8 + "\x00E\x03\x01\x00\x00\x00\xc9" + // 0x00450301: 0x000000C9 + "\x00E\x03\x02\x00\x00\x00\xca" + // 0x00450302: 0x000000CA + "\x00E\x03\b\x00\x00\x00\xcb" + // 0x00450308: 0x000000CB + "\x00I\x03\x00\x00\x00\x00\xcc" + // 0x00490300: 0x000000CC + "\x00I\x03\x01\x00\x00\x00\xcd" + // 0x00490301: 0x000000CD + "\x00I\x03\x02\x00\x00\x00\xce" + // 0x00490302: 0x000000CE + "\x00I\x03\b\x00\x00\x00\xcf" + // 0x00490308: 0x000000CF + "\x00N\x03\x03\x00\x00\x00\xd1" + // 0x004E0303: 0x000000D1 + "\x00O\x03\x00\x00\x00\x00\xd2" + // 0x004F0300: 0x000000D2 + "\x00O\x03\x01\x00\x00\x00\xd3" + // 0x004F0301: 0x000000D3 + "\x00O\x03\x02\x00\x00\x00\xd4" + // 0x004F0302: 0x000000D4 + "\x00O\x03\x03\x00\x00\x00\xd5" + // 0x004F0303: 0x000000D5 + "\x00O\x03\b\x00\x00\x00\xd6" + // 0x004F0308: 0x000000D6 + "\x00U\x03\x00\x00\x00\x00\xd9" + // 0x00550300: 0x000000D9 + "\x00U\x03\x01\x00\x00\x00\xda" + // 0x00550301: 0x000000DA + "\x00U\x03\x02\x00\x00\x00\xdb" + // 0x00550302: 0x000000DB + "\x00U\x03\b\x00\x00\x00\xdc" + // 0x00550308: 0x000000DC + "\x00Y\x03\x01\x00\x00\x00\xdd" + // 0x00590301: 0x000000DD + "\x00a\x03\x00\x00\x00\x00\xe0" + // 0x00610300: 0x000000E0 + "\x00a\x03\x01\x00\x00\x00\xe1" + // 0x00610301: 0x000000E1 + "\x00a\x03\x02\x00\x00\x00\xe2" + // 0x00610302: 0x000000E2 + "\x00a\x03\x03\x00\x00\x00\xe3" + // 0x00610303: 0x000000E3 + "\x00a\x03\b\x00\x00\x00\xe4" + // 0x00610308: 0x000000E4 + "\x00a\x03\n\x00\x00\x00\xe5" + // 0x0061030A: 0x000000E5 + "\x00c\x03'\x00\x00\x00\xe7" + // 0x00630327: 0x000000E7 + "\x00e\x03\x00\x00\x00\x00\xe8" + // 0x00650300: 0x000000E8 + "\x00e\x03\x01\x00\x00\x00\xe9" + // 0x00650301: 0x000000E9 + "\x00e\x03\x02\x00\x00\x00\xea" + // 0x00650302: 0x000000EA + "\x00e\x03\b\x00\x00\x00\xeb" + // 0x00650308: 0x000000EB + "\x00i\x03\x00\x00\x00\x00\xec" + // 0x00690300: 0x000000EC + "\x00i\x03\x01\x00\x00\x00\xed" + // 0x00690301: 0x000000ED + "\x00i\x03\x02\x00\x00\x00\xee" + // 0x00690302: 0x000000EE + "\x00i\x03\b\x00\x00\x00\xef" + // 0x00690308: 0x000000EF + "\x00n\x03\x03\x00\x00\x00\xf1" + // 0x006E0303: 0x000000F1 + "\x00o\x03\x00\x00\x00\x00\xf2" + // 0x006F0300: 0x000000F2 + "\x00o\x03\x01\x00\x00\x00\xf3" + // 0x006F0301: 0x000000F3 + "\x00o\x03\x02\x00\x00\x00\xf4" + // 0x006F0302: 0x000000F4 + "\x00o\x03\x03\x00\x00\x00\xf5" + // 0x006F0303: 0x000000F5 + "\x00o\x03\b\x00\x00\x00\xf6" + // 0x006F0308: 0x000000F6 + "\x00u\x03\x00\x00\x00\x00\xf9" + // 0x00750300: 0x000000F9 + "\x00u\x03\x01\x00\x00\x00\xfa" + // 0x00750301: 0x000000FA + "\x00u\x03\x02\x00\x00\x00\xfb" + // 0x00750302: 0x000000FB + "\x00u\x03\b\x00\x00\x00\xfc" + // 0x00750308: 0x000000FC + "\x00y\x03\x01\x00\x00\x00\xfd" + // 0x00790301: 0x000000FD + "\x00y\x03\b\x00\x00\x00\xff" + // 0x00790308: 0x000000FF + "\x00A\x03\x04\x00\x00\x01\x00" + // 0x00410304: 0x00000100 + "\x00a\x03\x04\x00\x00\x01\x01" + // 0x00610304: 0x00000101 + "\x00A\x03\x06\x00\x00\x01\x02" + // 0x00410306: 0x00000102 + "\x00a\x03\x06\x00\x00\x01\x03" + // 0x00610306: 0x00000103 + "\x00A\x03(\x00\x00\x01\x04" + // 0x00410328: 0x00000104 + "\x00a\x03(\x00\x00\x01\x05" + // 0x00610328: 0x00000105 + "\x00C\x03\x01\x00\x00\x01\x06" + // 0x00430301: 0x00000106 + "\x00c\x03\x01\x00\x00\x01\a" + // 0x00630301: 0x00000107 + "\x00C\x03\x02\x00\x00\x01\b" + // 0x00430302: 0x00000108 + "\x00c\x03\x02\x00\x00\x01\t" + // 0x00630302: 0x00000109 + "\x00C\x03\a\x00\x00\x01\n" + // 0x00430307: 0x0000010A + "\x00c\x03\a\x00\x00\x01\v" + // 0x00630307: 0x0000010B + "\x00C\x03\f\x00\x00\x01\f" + // 0x0043030C: 0x0000010C + "\x00c\x03\f\x00\x00\x01\r" + // 0x0063030C: 0x0000010D + "\x00D\x03\f\x00\x00\x01\x0e" + // 0x0044030C: 0x0000010E + "\x00d\x03\f\x00\x00\x01\x0f" + // 0x0064030C: 0x0000010F + "\x00E\x03\x04\x00\x00\x01\x12" + // 0x00450304: 0x00000112 + "\x00e\x03\x04\x00\x00\x01\x13" + // 0x00650304: 0x00000113 + "\x00E\x03\x06\x00\x00\x01\x14" + // 0x00450306: 0x00000114 + "\x00e\x03\x06\x00\x00\x01\x15" + // 0x00650306: 0x00000115 + "\x00E\x03\a\x00\x00\x01\x16" + // 0x00450307: 0x00000116 + "\x00e\x03\a\x00\x00\x01\x17" + // 0x00650307: 0x00000117 + "\x00E\x03(\x00\x00\x01\x18" + // 0x00450328: 0x00000118 + "\x00e\x03(\x00\x00\x01\x19" + // 0x00650328: 0x00000119 + "\x00E\x03\f\x00\x00\x01\x1a" + // 0x0045030C: 0x0000011A + "\x00e\x03\f\x00\x00\x01\x1b" + // 0x0065030C: 0x0000011B + "\x00G\x03\x02\x00\x00\x01\x1c" + // 0x00470302: 0x0000011C + "\x00g\x03\x02\x00\x00\x01\x1d" + // 0x00670302: 0x0000011D + "\x00G\x03\x06\x00\x00\x01\x1e" + // 0x00470306: 0x0000011E + "\x00g\x03\x06\x00\x00\x01\x1f" + // 0x00670306: 0x0000011F + "\x00G\x03\a\x00\x00\x01 " + // 0x00470307: 0x00000120 + "\x00g\x03\a\x00\x00\x01!" + // 0x00670307: 0x00000121 + "\x00G\x03'\x00\x00\x01\"" + // 0x00470327: 0x00000122 + "\x00g\x03'\x00\x00\x01#" + // 0x00670327: 0x00000123 + "\x00H\x03\x02\x00\x00\x01$" + // 0x00480302: 0x00000124 + "\x00h\x03\x02\x00\x00\x01%" + // 0x00680302: 0x00000125 + "\x00I\x03\x03\x00\x00\x01(" + // 0x00490303: 0x00000128 + "\x00i\x03\x03\x00\x00\x01)" + // 0x00690303: 0x00000129 + "\x00I\x03\x04\x00\x00\x01*" + // 0x00490304: 0x0000012A + "\x00i\x03\x04\x00\x00\x01+" + // 0x00690304: 0x0000012B + "\x00I\x03\x06\x00\x00\x01," + // 0x00490306: 0x0000012C + "\x00i\x03\x06\x00\x00\x01-" + // 0x00690306: 0x0000012D + "\x00I\x03(\x00\x00\x01." + // 0x00490328: 0x0000012E + "\x00i\x03(\x00\x00\x01/" + // 0x00690328: 0x0000012F + "\x00I\x03\a\x00\x00\x010" + // 0x00490307: 0x00000130 + "\x00J\x03\x02\x00\x00\x014" + // 0x004A0302: 0x00000134 + "\x00j\x03\x02\x00\x00\x015" + // 0x006A0302: 0x00000135 + "\x00K\x03'\x00\x00\x016" + // 0x004B0327: 0x00000136 + "\x00k\x03'\x00\x00\x017" + // 0x006B0327: 0x00000137 + "\x00L\x03\x01\x00\x00\x019" + // 0x004C0301: 0x00000139 + "\x00l\x03\x01\x00\x00\x01:" + // 0x006C0301: 0x0000013A + "\x00L\x03'\x00\x00\x01;" + // 0x004C0327: 0x0000013B + "\x00l\x03'\x00\x00\x01<" + // 0x006C0327: 0x0000013C + "\x00L\x03\f\x00\x00\x01=" + // 0x004C030C: 0x0000013D + "\x00l\x03\f\x00\x00\x01>" + // 0x006C030C: 0x0000013E + "\x00N\x03\x01\x00\x00\x01C" + // 0x004E0301: 0x00000143 + "\x00n\x03\x01\x00\x00\x01D" + // 0x006E0301: 0x00000144 + "\x00N\x03'\x00\x00\x01E" + // 0x004E0327: 0x00000145 + "\x00n\x03'\x00\x00\x01F" + // 0x006E0327: 0x00000146 + "\x00N\x03\f\x00\x00\x01G" + // 0x004E030C: 0x00000147 + "\x00n\x03\f\x00\x00\x01H" + // 0x006E030C: 0x00000148 + "\x00O\x03\x04\x00\x00\x01L" + // 0x004F0304: 0x0000014C + "\x00o\x03\x04\x00\x00\x01M" + // 0x006F0304: 0x0000014D + "\x00O\x03\x06\x00\x00\x01N" + // 0x004F0306: 0x0000014E + "\x00o\x03\x06\x00\x00\x01O" + // 0x006F0306: 0x0000014F + "\x00O\x03\v\x00\x00\x01P" + // 0x004F030B: 0x00000150 + "\x00o\x03\v\x00\x00\x01Q" + // 0x006F030B: 0x00000151 + "\x00R\x03\x01\x00\x00\x01T" + // 0x00520301: 0x00000154 + "\x00r\x03\x01\x00\x00\x01U" + // 0x00720301: 0x00000155 + "\x00R\x03'\x00\x00\x01V" + // 0x00520327: 0x00000156 + "\x00r\x03'\x00\x00\x01W" + // 0x00720327: 0x00000157 + "\x00R\x03\f\x00\x00\x01X" + // 0x0052030C: 0x00000158 + "\x00r\x03\f\x00\x00\x01Y" + // 0x0072030C: 0x00000159 + "\x00S\x03\x01\x00\x00\x01Z" + // 0x00530301: 0x0000015A + "\x00s\x03\x01\x00\x00\x01[" + // 0x00730301: 0x0000015B + "\x00S\x03\x02\x00\x00\x01\\" + // 0x00530302: 0x0000015C + "\x00s\x03\x02\x00\x00\x01]" + // 0x00730302: 0x0000015D + "\x00S\x03'\x00\x00\x01^" + // 0x00530327: 0x0000015E + "\x00s\x03'\x00\x00\x01_" + // 0x00730327: 0x0000015F + "\x00S\x03\f\x00\x00\x01`" + // 0x0053030C: 0x00000160 + "\x00s\x03\f\x00\x00\x01a" + // 0x0073030C: 0x00000161 + "\x00T\x03'\x00\x00\x01b" + // 0x00540327: 0x00000162 + "\x00t\x03'\x00\x00\x01c" + // 0x00740327: 0x00000163 + "\x00T\x03\f\x00\x00\x01d" + // 0x0054030C: 0x00000164 + "\x00t\x03\f\x00\x00\x01e" + // 0x0074030C: 0x00000165 + "\x00U\x03\x03\x00\x00\x01h" + // 0x00550303: 0x00000168 + "\x00u\x03\x03\x00\x00\x01i" + // 0x00750303: 0x00000169 + "\x00U\x03\x04\x00\x00\x01j" + // 0x00550304: 0x0000016A + "\x00u\x03\x04\x00\x00\x01k" + // 0x00750304: 0x0000016B + "\x00U\x03\x06\x00\x00\x01l" + // 0x00550306: 0x0000016C + "\x00u\x03\x06\x00\x00\x01m" + // 0x00750306: 0x0000016D + "\x00U\x03\n\x00\x00\x01n" + // 0x0055030A: 0x0000016E + "\x00u\x03\n\x00\x00\x01o" + // 0x0075030A: 0x0000016F + "\x00U\x03\v\x00\x00\x01p" + // 0x0055030B: 0x00000170 + "\x00u\x03\v\x00\x00\x01q" + // 0x0075030B: 0x00000171 + "\x00U\x03(\x00\x00\x01r" + // 0x00550328: 0x00000172 + "\x00u\x03(\x00\x00\x01s" + // 0x00750328: 0x00000173 + "\x00W\x03\x02\x00\x00\x01t" + // 0x00570302: 0x00000174 + "\x00w\x03\x02\x00\x00\x01u" + // 0x00770302: 0x00000175 + "\x00Y\x03\x02\x00\x00\x01v" + // 0x00590302: 0x00000176 + "\x00y\x03\x02\x00\x00\x01w" + // 0x00790302: 0x00000177 + "\x00Y\x03\b\x00\x00\x01x" + // 0x00590308: 0x00000178 + "\x00Z\x03\x01\x00\x00\x01y" + // 0x005A0301: 0x00000179 + "\x00z\x03\x01\x00\x00\x01z" + // 0x007A0301: 0x0000017A + "\x00Z\x03\a\x00\x00\x01{" + // 0x005A0307: 0x0000017B + "\x00z\x03\a\x00\x00\x01|" + // 0x007A0307: 0x0000017C + "\x00Z\x03\f\x00\x00\x01}" + // 0x005A030C: 0x0000017D + "\x00z\x03\f\x00\x00\x01~" + // 0x007A030C: 0x0000017E + "\x00O\x03\x1b\x00\x00\x01\xa0" + // 0x004F031B: 0x000001A0 + "\x00o\x03\x1b\x00\x00\x01\xa1" + // 0x006F031B: 0x000001A1 + "\x00U\x03\x1b\x00\x00\x01\xaf" + // 0x0055031B: 0x000001AF + "\x00u\x03\x1b\x00\x00\x01\xb0" + // 0x0075031B: 0x000001B0 + "\x00A\x03\f\x00\x00\x01\xcd" + // 0x0041030C: 0x000001CD + "\x00a\x03\f\x00\x00\x01\xce" + // 0x0061030C: 0x000001CE + "\x00I\x03\f\x00\x00\x01\xcf" + // 0x0049030C: 0x000001CF + "\x00i\x03\f\x00\x00\x01\xd0" + // 0x0069030C: 0x000001D0 + "\x00O\x03\f\x00\x00\x01\xd1" + // 0x004F030C: 0x000001D1 + "\x00o\x03\f\x00\x00\x01\xd2" + // 0x006F030C: 0x000001D2 + "\x00U\x03\f\x00\x00\x01\xd3" + // 0x0055030C: 0x000001D3 + "\x00u\x03\f\x00\x00\x01\xd4" + // 0x0075030C: 0x000001D4 + "\x00\xdc\x03\x04\x00\x00\x01\xd5" + // 0x00DC0304: 0x000001D5 + "\x00\xfc\x03\x04\x00\x00\x01\xd6" + // 0x00FC0304: 0x000001D6 + "\x00\xdc\x03\x01\x00\x00\x01\xd7" + // 0x00DC0301: 0x000001D7 + "\x00\xfc\x03\x01\x00\x00\x01\xd8" + // 0x00FC0301: 0x000001D8 + "\x00\xdc\x03\f\x00\x00\x01\xd9" + // 0x00DC030C: 0x000001D9 + "\x00\xfc\x03\f\x00\x00\x01\xda" + // 0x00FC030C: 0x000001DA + "\x00\xdc\x03\x00\x00\x00\x01\xdb" + // 0x00DC0300: 0x000001DB + "\x00\xfc\x03\x00\x00\x00\x01\xdc" + // 0x00FC0300: 0x000001DC + "\x00\xc4\x03\x04\x00\x00\x01\xde" + // 0x00C40304: 0x000001DE + "\x00\xe4\x03\x04\x00\x00\x01\xdf" + // 0x00E40304: 0x000001DF + "\x02&\x03\x04\x00\x00\x01\xe0" + // 0x02260304: 0x000001E0 + "\x02'\x03\x04\x00\x00\x01\xe1" + // 0x02270304: 0x000001E1 + "\x00\xc6\x03\x04\x00\x00\x01\xe2" + // 0x00C60304: 0x000001E2 + "\x00\xe6\x03\x04\x00\x00\x01\xe3" + // 0x00E60304: 0x000001E3 + "\x00G\x03\f\x00\x00\x01\xe6" + // 0x0047030C: 0x000001E6 + "\x00g\x03\f\x00\x00\x01\xe7" + // 0x0067030C: 0x000001E7 + "\x00K\x03\f\x00\x00\x01\xe8" + // 0x004B030C: 0x000001E8 + "\x00k\x03\f\x00\x00\x01\xe9" + // 0x006B030C: 0x000001E9 + "\x00O\x03(\x00\x00\x01\xea" + // 0x004F0328: 0x000001EA + "\x00o\x03(\x00\x00\x01\xeb" + // 0x006F0328: 0x000001EB + "\x01\xea\x03\x04\x00\x00\x01\xec" + // 0x01EA0304: 0x000001EC + "\x01\xeb\x03\x04\x00\x00\x01\xed" + // 0x01EB0304: 0x000001ED + "\x01\xb7\x03\f\x00\x00\x01\xee" + // 0x01B7030C: 0x000001EE + "\x02\x92\x03\f\x00\x00\x01\xef" + // 0x0292030C: 0x000001EF + "\x00j\x03\f\x00\x00\x01\xf0" + // 0x006A030C: 0x000001F0 + "\x00G\x03\x01\x00\x00\x01\xf4" + // 0x00470301: 0x000001F4 + "\x00g\x03\x01\x00\x00\x01\xf5" + // 0x00670301: 0x000001F5 + "\x00N\x03\x00\x00\x00\x01\xf8" + // 0x004E0300: 0x000001F8 + "\x00n\x03\x00\x00\x00\x01\xf9" + // 0x006E0300: 0x000001F9 + "\x00\xc5\x03\x01\x00\x00\x01\xfa" + // 0x00C50301: 0x000001FA + "\x00\xe5\x03\x01\x00\x00\x01\xfb" + // 0x00E50301: 0x000001FB + "\x00\xc6\x03\x01\x00\x00\x01\xfc" + // 0x00C60301: 0x000001FC + "\x00\xe6\x03\x01\x00\x00\x01\xfd" + // 0x00E60301: 0x000001FD + "\x00\xd8\x03\x01\x00\x00\x01\xfe" + // 0x00D80301: 0x000001FE + "\x00\xf8\x03\x01\x00\x00\x01\xff" + // 0x00F80301: 0x000001FF + "\x00A\x03\x0f\x00\x00\x02\x00" + // 0x0041030F: 0x00000200 + "\x00a\x03\x0f\x00\x00\x02\x01" + // 0x0061030F: 0x00000201 + "\x00A\x03\x11\x00\x00\x02\x02" + // 0x00410311: 0x00000202 + "\x00a\x03\x11\x00\x00\x02\x03" + // 0x00610311: 0x00000203 + "\x00E\x03\x0f\x00\x00\x02\x04" + // 0x0045030F: 0x00000204 + "\x00e\x03\x0f\x00\x00\x02\x05" + // 0x0065030F: 0x00000205 + "\x00E\x03\x11\x00\x00\x02\x06" + // 0x00450311: 0x00000206 + "\x00e\x03\x11\x00\x00\x02\a" + // 0x00650311: 0x00000207 + "\x00I\x03\x0f\x00\x00\x02\b" + // 0x0049030F: 0x00000208 + "\x00i\x03\x0f\x00\x00\x02\t" + // 0x0069030F: 0x00000209 + "\x00I\x03\x11\x00\x00\x02\n" + // 0x00490311: 0x0000020A + "\x00i\x03\x11\x00\x00\x02\v" + // 0x00690311: 0x0000020B + "\x00O\x03\x0f\x00\x00\x02\f" + // 0x004F030F: 0x0000020C + "\x00o\x03\x0f\x00\x00\x02\r" + // 0x006F030F: 0x0000020D + "\x00O\x03\x11\x00\x00\x02\x0e" + // 0x004F0311: 0x0000020E + "\x00o\x03\x11\x00\x00\x02\x0f" + // 0x006F0311: 0x0000020F + "\x00R\x03\x0f\x00\x00\x02\x10" + // 0x0052030F: 0x00000210 + "\x00r\x03\x0f\x00\x00\x02\x11" + // 0x0072030F: 0x00000211 + "\x00R\x03\x11\x00\x00\x02\x12" + // 0x00520311: 0x00000212 + "\x00r\x03\x11\x00\x00\x02\x13" + // 0x00720311: 0x00000213 + "\x00U\x03\x0f\x00\x00\x02\x14" + // 0x0055030F: 0x00000214 + "\x00u\x03\x0f\x00\x00\x02\x15" + // 0x0075030F: 0x00000215 + "\x00U\x03\x11\x00\x00\x02\x16" + // 0x00550311: 0x00000216 + "\x00u\x03\x11\x00\x00\x02\x17" + // 0x00750311: 0x00000217 + "\x00S\x03&\x00\x00\x02\x18" + // 0x00530326: 0x00000218 + "\x00s\x03&\x00\x00\x02\x19" + // 0x00730326: 0x00000219 + "\x00T\x03&\x00\x00\x02\x1a" + // 0x00540326: 0x0000021A + "\x00t\x03&\x00\x00\x02\x1b" + // 0x00740326: 0x0000021B + "\x00H\x03\f\x00\x00\x02\x1e" + // 0x0048030C: 0x0000021E + "\x00h\x03\f\x00\x00\x02\x1f" + // 0x0068030C: 0x0000021F + "\x00A\x03\a\x00\x00\x02&" + // 0x00410307: 0x00000226 + "\x00a\x03\a\x00\x00\x02'" + // 0x00610307: 0x00000227 + "\x00E\x03'\x00\x00\x02(" + // 0x00450327: 0x00000228 + "\x00e\x03'\x00\x00\x02)" + // 0x00650327: 0x00000229 + "\x00\xd6\x03\x04\x00\x00\x02*" + // 0x00D60304: 0x0000022A + "\x00\xf6\x03\x04\x00\x00\x02+" + // 0x00F60304: 0x0000022B + "\x00\xd5\x03\x04\x00\x00\x02," + // 0x00D50304: 0x0000022C + "\x00\xf5\x03\x04\x00\x00\x02-" + // 0x00F50304: 0x0000022D + "\x00O\x03\a\x00\x00\x02." + // 0x004F0307: 0x0000022E + "\x00o\x03\a\x00\x00\x02/" + // 0x006F0307: 0x0000022F + "\x02.\x03\x04\x00\x00\x020" + // 0x022E0304: 0x00000230 + "\x02/\x03\x04\x00\x00\x021" + // 0x022F0304: 0x00000231 + "\x00Y\x03\x04\x00\x00\x022" + // 0x00590304: 0x00000232 + "\x00y\x03\x04\x00\x00\x023" + // 0x00790304: 0x00000233 + "\x00\xa8\x03\x01\x00\x00\x03\x85" + // 0x00A80301: 0x00000385 + "\x03\x91\x03\x01\x00\x00\x03\x86" + // 0x03910301: 0x00000386 + "\x03\x95\x03\x01\x00\x00\x03\x88" + // 0x03950301: 0x00000388 + "\x03\x97\x03\x01\x00\x00\x03\x89" + // 0x03970301: 0x00000389 + "\x03\x99\x03\x01\x00\x00\x03\x8a" + // 0x03990301: 0x0000038A + "\x03\x9f\x03\x01\x00\x00\x03\x8c" + // 0x039F0301: 0x0000038C + "\x03\xa5\x03\x01\x00\x00\x03\x8e" + // 0x03A50301: 0x0000038E + "\x03\xa9\x03\x01\x00\x00\x03\x8f" + // 0x03A90301: 0x0000038F + "\x03\xca\x03\x01\x00\x00\x03\x90" + // 0x03CA0301: 0x00000390 + "\x03\x99\x03\b\x00\x00\x03\xaa" + // 0x03990308: 0x000003AA + "\x03\xa5\x03\b\x00\x00\x03\xab" + // 0x03A50308: 0x000003AB + "\x03\xb1\x03\x01\x00\x00\x03\xac" + // 0x03B10301: 0x000003AC + "\x03\xb5\x03\x01\x00\x00\x03\xad" + // 0x03B50301: 0x000003AD + "\x03\xb7\x03\x01\x00\x00\x03\xae" + // 0x03B70301: 0x000003AE + "\x03\xb9\x03\x01\x00\x00\x03\xaf" + // 0x03B90301: 0x000003AF + "\x03\xcb\x03\x01\x00\x00\x03\xb0" + // 0x03CB0301: 0x000003B0 + "\x03\xb9\x03\b\x00\x00\x03\xca" + // 0x03B90308: 0x000003CA + "\x03\xc5\x03\b\x00\x00\x03\xcb" + // 0x03C50308: 0x000003CB + "\x03\xbf\x03\x01\x00\x00\x03\xcc" + // 0x03BF0301: 0x000003CC + "\x03\xc5\x03\x01\x00\x00\x03\xcd" + // 0x03C50301: 0x000003CD + "\x03\xc9\x03\x01\x00\x00\x03\xce" + // 0x03C90301: 0x000003CE + "\x03\xd2\x03\x01\x00\x00\x03\xd3" + // 0x03D20301: 0x000003D3 + "\x03\xd2\x03\b\x00\x00\x03\xd4" + // 0x03D20308: 0x000003D4 + "\x04\x15\x03\x00\x00\x00\x04\x00" + // 0x04150300: 0x00000400 + "\x04\x15\x03\b\x00\x00\x04\x01" + // 0x04150308: 0x00000401 + "\x04\x13\x03\x01\x00\x00\x04\x03" + // 0x04130301: 0x00000403 + "\x04\x06\x03\b\x00\x00\x04\a" + // 0x04060308: 0x00000407 + "\x04\x1a\x03\x01\x00\x00\x04\f" + // 0x041A0301: 0x0000040C + "\x04\x18\x03\x00\x00\x00\x04\r" + // 0x04180300: 0x0000040D + "\x04#\x03\x06\x00\x00\x04\x0e" + // 0x04230306: 0x0000040E + "\x04\x18\x03\x06\x00\x00\x04\x19" + // 0x04180306: 0x00000419 + "\x048\x03\x06\x00\x00\x049" + // 0x04380306: 0x00000439 + "\x045\x03\x00\x00\x00\x04P" + // 0x04350300: 0x00000450 + "\x045\x03\b\x00\x00\x04Q" + // 0x04350308: 0x00000451 + "\x043\x03\x01\x00\x00\x04S" + // 0x04330301: 0x00000453 + "\x04V\x03\b\x00\x00\x04W" + // 0x04560308: 0x00000457 + "\x04:\x03\x01\x00\x00\x04\\" + // 0x043A0301: 0x0000045C + "\x048\x03\x00\x00\x00\x04]" + // 0x04380300: 0x0000045D + "\x04C\x03\x06\x00\x00\x04^" + // 0x04430306: 0x0000045E + "\x04t\x03\x0f\x00\x00\x04v" + // 0x0474030F: 0x00000476 + "\x04u\x03\x0f\x00\x00\x04w" + // 0x0475030F: 0x00000477 + "\x04\x16\x03\x06\x00\x00\x04\xc1" + // 0x04160306: 0x000004C1 + "\x046\x03\x06\x00\x00\x04\xc2" + // 0x04360306: 0x000004C2 + "\x04\x10\x03\x06\x00\x00\x04\xd0" + // 0x04100306: 0x000004D0 + "\x040\x03\x06\x00\x00\x04\xd1" + // 0x04300306: 0x000004D1 + "\x04\x10\x03\b\x00\x00\x04\xd2" + // 0x04100308: 0x000004D2 + "\x040\x03\b\x00\x00\x04\xd3" + // 0x04300308: 0x000004D3 + "\x04\x15\x03\x06\x00\x00\x04\xd6" + // 0x04150306: 0x000004D6 + "\x045\x03\x06\x00\x00\x04\xd7" + // 0x04350306: 0x000004D7 + "\x04\xd8\x03\b\x00\x00\x04\xda" + // 0x04D80308: 0x000004DA + "\x04\xd9\x03\b\x00\x00\x04\xdb" + // 0x04D90308: 0x000004DB + "\x04\x16\x03\b\x00\x00\x04\xdc" + // 0x04160308: 0x000004DC + "\x046\x03\b\x00\x00\x04\xdd" + // 0x04360308: 0x000004DD + "\x04\x17\x03\b\x00\x00\x04\xde" + // 0x04170308: 0x000004DE + "\x047\x03\b\x00\x00\x04\xdf" + // 0x04370308: 0x000004DF + "\x04\x18\x03\x04\x00\x00\x04\xe2" + // 0x04180304: 0x000004E2 + "\x048\x03\x04\x00\x00\x04\xe3" + // 0x04380304: 0x000004E3 + "\x04\x18\x03\b\x00\x00\x04\xe4" + // 0x04180308: 0x000004E4 + "\x048\x03\b\x00\x00\x04\xe5" + // 0x04380308: 0x000004E5 + "\x04\x1e\x03\b\x00\x00\x04\xe6" + // 0x041E0308: 0x000004E6 + "\x04>\x03\b\x00\x00\x04\xe7" + // 0x043E0308: 0x000004E7 + "\x04\xe8\x03\b\x00\x00\x04\xea" + // 0x04E80308: 0x000004EA + "\x04\xe9\x03\b\x00\x00\x04\xeb" + // 0x04E90308: 0x000004EB + "\x04-\x03\b\x00\x00\x04\xec" + // 0x042D0308: 0x000004EC + "\x04M\x03\b\x00\x00\x04\xed" + // 0x044D0308: 0x000004ED + "\x04#\x03\x04\x00\x00\x04\xee" + // 0x04230304: 0x000004EE + "\x04C\x03\x04\x00\x00\x04\xef" + // 0x04430304: 0x000004EF + "\x04#\x03\b\x00\x00\x04\xf0" + // 0x04230308: 0x000004F0 + "\x04C\x03\b\x00\x00\x04\xf1" + // 0x04430308: 0x000004F1 + "\x04#\x03\v\x00\x00\x04\xf2" + // 0x0423030B: 0x000004F2 + "\x04C\x03\v\x00\x00\x04\xf3" + // 0x0443030B: 0x000004F3 + "\x04'\x03\b\x00\x00\x04\xf4" + // 0x04270308: 0x000004F4 + "\x04G\x03\b\x00\x00\x04\xf5" + // 0x04470308: 0x000004F5 + "\x04+\x03\b\x00\x00\x04\xf8" + // 0x042B0308: 0x000004F8 + "\x04K\x03\b\x00\x00\x04\xf9" + // 0x044B0308: 0x000004F9 + "\x06'\x06S\x00\x00\x06\"" + // 0x06270653: 0x00000622 + "\x06'\x06T\x00\x00\x06#" + // 0x06270654: 0x00000623 + "\x06H\x06T\x00\x00\x06$" + // 0x06480654: 0x00000624 + "\x06'\x06U\x00\x00\x06%" + // 0x06270655: 0x00000625 + "\x06J\x06T\x00\x00\x06&" + // 0x064A0654: 0x00000626 + "\x06\xd5\x06T\x00\x00\x06\xc0" + // 0x06D50654: 0x000006C0 + "\x06\xc1\x06T\x00\x00\x06\xc2" + // 0x06C10654: 0x000006C2 + "\x06\xd2\x06T\x00\x00\x06\xd3" + // 0x06D20654: 0x000006D3 + "\t(\t<\x00\x00\t)" + // 0x0928093C: 0x00000929 + "\t0\t<\x00\x00\t1" + // 0x0930093C: 0x00000931 + "\t3\t<\x00\x00\t4" + // 0x0933093C: 0x00000934 + "\t\xc7\t\xbe\x00\x00\t\xcb" + // 0x09C709BE: 0x000009CB + "\t\xc7\t\xd7\x00\x00\t\xcc" + // 0x09C709D7: 0x000009CC + "\vG\vV\x00\x00\vH" + // 0x0B470B56: 0x00000B48 + "\vG\v>\x00\x00\vK" + // 0x0B470B3E: 0x00000B4B + "\vG\vW\x00\x00\vL" + // 0x0B470B57: 0x00000B4C + "\v\x92\v\xd7\x00\x00\v\x94" + // 0x0B920BD7: 0x00000B94 + "\v\xc6\v\xbe\x00\x00\v\xca" + // 0x0BC60BBE: 0x00000BCA + "\v\xc7\v\xbe\x00\x00\v\xcb" + // 0x0BC70BBE: 0x00000BCB + "\v\xc6\v\xd7\x00\x00\v\xcc" + // 0x0BC60BD7: 0x00000BCC + "\fF\fV\x00\x00\fH" + // 0x0C460C56: 0x00000C48 + "\f\xbf\f\xd5\x00\x00\f\xc0" + // 0x0CBF0CD5: 0x00000CC0 + "\f\xc6\f\xd5\x00\x00\f\xc7" + // 0x0CC60CD5: 0x00000CC7 + "\f\xc6\f\xd6\x00\x00\f\xc8" + // 0x0CC60CD6: 0x00000CC8 + "\f\xc6\f\xc2\x00\x00\f\xca" + // 0x0CC60CC2: 0x00000CCA + "\f\xca\f\xd5\x00\x00\f\xcb" + // 0x0CCA0CD5: 0x00000CCB + "\rF\r>\x00\x00\rJ" + // 0x0D460D3E: 0x00000D4A + "\rG\r>\x00\x00\rK" + // 0x0D470D3E: 0x00000D4B + "\rF\rW\x00\x00\rL" + // 0x0D460D57: 0x00000D4C + "\r\xd9\r\xca\x00\x00\r\xda" + // 0x0DD90DCA: 0x00000DDA + "\r\xd9\r\xcf\x00\x00\r\xdc" + // 0x0DD90DCF: 0x00000DDC + "\r\xdc\r\xca\x00\x00\r\xdd" + // 0x0DDC0DCA: 0x00000DDD + "\r\xd9\r\xdf\x00\x00\r\xde" + // 0x0DD90DDF: 0x00000DDE + "\x10%\x10.\x00\x00\x10&" + // 0x1025102E: 0x00001026 + "\x1b\x05\x1b5\x00\x00\x1b\x06" + // 0x1B051B35: 0x00001B06 + "\x1b\a\x1b5\x00\x00\x1b\b" + // 0x1B071B35: 0x00001B08 + "\x1b\t\x1b5\x00\x00\x1b\n" + // 0x1B091B35: 0x00001B0A + "\x1b\v\x1b5\x00\x00\x1b\f" + // 0x1B0B1B35: 0x00001B0C + "\x1b\r\x1b5\x00\x00\x1b\x0e" + // 0x1B0D1B35: 0x00001B0E + "\x1b\x11\x1b5\x00\x00\x1b\x12" + // 0x1B111B35: 0x00001B12 + "\x1b:\x1b5\x00\x00\x1b;" + // 0x1B3A1B35: 0x00001B3B + "\x1b<\x1b5\x00\x00\x1b=" + // 0x1B3C1B35: 0x00001B3D + "\x1b>\x1b5\x00\x00\x1b@" + // 0x1B3E1B35: 0x00001B40 + "\x1b?\x1b5\x00\x00\x1bA" + // 0x1B3F1B35: 0x00001B41 + "\x1bB\x1b5\x00\x00\x1bC" + // 0x1B421B35: 0x00001B43 + "\x00A\x03%\x00\x00\x1e\x00" + // 0x00410325: 0x00001E00 + "\x00a\x03%\x00\x00\x1e\x01" + // 0x00610325: 0x00001E01 + "\x00B\x03\a\x00\x00\x1e\x02" + // 0x00420307: 0x00001E02 + "\x00b\x03\a\x00\x00\x1e\x03" + // 0x00620307: 0x00001E03 + "\x00B\x03#\x00\x00\x1e\x04" + // 0x00420323: 0x00001E04 + "\x00b\x03#\x00\x00\x1e\x05" + // 0x00620323: 0x00001E05 + "\x00B\x031\x00\x00\x1e\x06" + // 0x00420331: 0x00001E06 + "\x00b\x031\x00\x00\x1e\a" + // 0x00620331: 0x00001E07 + "\x00\xc7\x03\x01\x00\x00\x1e\b" + // 0x00C70301: 0x00001E08 + "\x00\xe7\x03\x01\x00\x00\x1e\t" + // 0x00E70301: 0x00001E09 + "\x00D\x03\a\x00\x00\x1e\n" + // 0x00440307: 0x00001E0A + "\x00d\x03\a\x00\x00\x1e\v" + // 0x00640307: 0x00001E0B + "\x00D\x03#\x00\x00\x1e\f" + // 0x00440323: 0x00001E0C + "\x00d\x03#\x00\x00\x1e\r" + // 0x00640323: 0x00001E0D + "\x00D\x031\x00\x00\x1e\x0e" + // 0x00440331: 0x00001E0E + "\x00d\x031\x00\x00\x1e\x0f" + // 0x00640331: 0x00001E0F + "\x00D\x03'\x00\x00\x1e\x10" + // 0x00440327: 0x00001E10 + "\x00d\x03'\x00\x00\x1e\x11" + // 0x00640327: 0x00001E11 + "\x00D\x03-\x00\x00\x1e\x12" + // 0x0044032D: 0x00001E12 + "\x00d\x03-\x00\x00\x1e\x13" + // 0x0064032D: 0x00001E13 + "\x01\x12\x03\x00\x00\x00\x1e\x14" + // 0x01120300: 0x00001E14 + "\x01\x13\x03\x00\x00\x00\x1e\x15" + // 0x01130300: 0x00001E15 + "\x01\x12\x03\x01\x00\x00\x1e\x16" + // 0x01120301: 0x00001E16 + "\x01\x13\x03\x01\x00\x00\x1e\x17" + // 0x01130301: 0x00001E17 + "\x00E\x03-\x00\x00\x1e\x18" + // 0x0045032D: 0x00001E18 + "\x00e\x03-\x00\x00\x1e\x19" + // 0x0065032D: 0x00001E19 + "\x00E\x030\x00\x00\x1e\x1a" + // 0x00450330: 0x00001E1A + "\x00e\x030\x00\x00\x1e\x1b" + // 0x00650330: 0x00001E1B + "\x02(\x03\x06\x00\x00\x1e\x1c" + // 0x02280306: 0x00001E1C + "\x02)\x03\x06\x00\x00\x1e\x1d" + // 0x02290306: 0x00001E1D + "\x00F\x03\a\x00\x00\x1e\x1e" + // 0x00460307: 0x00001E1E + "\x00f\x03\a\x00\x00\x1e\x1f" + // 0x00660307: 0x00001E1F + "\x00G\x03\x04\x00\x00\x1e " + // 0x00470304: 0x00001E20 + "\x00g\x03\x04\x00\x00\x1e!" + // 0x00670304: 0x00001E21 + "\x00H\x03\a\x00\x00\x1e\"" + // 0x00480307: 0x00001E22 + "\x00h\x03\a\x00\x00\x1e#" + // 0x00680307: 0x00001E23 + "\x00H\x03#\x00\x00\x1e$" + // 0x00480323: 0x00001E24 + "\x00h\x03#\x00\x00\x1e%" + // 0x00680323: 0x00001E25 + "\x00H\x03\b\x00\x00\x1e&" + // 0x00480308: 0x00001E26 + "\x00h\x03\b\x00\x00\x1e'" + // 0x00680308: 0x00001E27 + "\x00H\x03'\x00\x00\x1e(" + // 0x00480327: 0x00001E28 + "\x00h\x03'\x00\x00\x1e)" + // 0x00680327: 0x00001E29 + "\x00H\x03.\x00\x00\x1e*" + // 0x0048032E: 0x00001E2A + "\x00h\x03.\x00\x00\x1e+" + // 0x0068032E: 0x00001E2B + "\x00I\x030\x00\x00\x1e," + // 0x00490330: 0x00001E2C + "\x00i\x030\x00\x00\x1e-" + // 0x00690330: 0x00001E2D + "\x00\xcf\x03\x01\x00\x00\x1e." + // 0x00CF0301: 0x00001E2E + "\x00\xef\x03\x01\x00\x00\x1e/" + // 0x00EF0301: 0x00001E2F + "\x00K\x03\x01\x00\x00\x1e0" + // 0x004B0301: 0x00001E30 + "\x00k\x03\x01\x00\x00\x1e1" + // 0x006B0301: 0x00001E31 + "\x00K\x03#\x00\x00\x1e2" + // 0x004B0323: 0x00001E32 + "\x00k\x03#\x00\x00\x1e3" + // 0x006B0323: 0x00001E33 + "\x00K\x031\x00\x00\x1e4" + // 0x004B0331: 0x00001E34 + "\x00k\x031\x00\x00\x1e5" + // 0x006B0331: 0x00001E35 + "\x00L\x03#\x00\x00\x1e6" + // 0x004C0323: 0x00001E36 + "\x00l\x03#\x00\x00\x1e7" + // 0x006C0323: 0x00001E37 + "\x1e6\x03\x04\x00\x00\x1e8" + // 0x1E360304: 0x00001E38 + "\x1e7\x03\x04\x00\x00\x1e9" + // 0x1E370304: 0x00001E39 + "\x00L\x031\x00\x00\x1e:" + // 0x004C0331: 0x00001E3A + "\x00l\x031\x00\x00\x1e;" + // 0x006C0331: 0x00001E3B + "\x00L\x03-\x00\x00\x1e<" + // 0x004C032D: 0x00001E3C + "\x00l\x03-\x00\x00\x1e=" + // 0x006C032D: 0x00001E3D + "\x00M\x03\x01\x00\x00\x1e>" + // 0x004D0301: 0x00001E3E + "\x00m\x03\x01\x00\x00\x1e?" + // 0x006D0301: 0x00001E3F + "\x00M\x03\a\x00\x00\x1e@" + // 0x004D0307: 0x00001E40 + "\x00m\x03\a\x00\x00\x1eA" + // 0x006D0307: 0x00001E41 + "\x00M\x03#\x00\x00\x1eB" + // 0x004D0323: 0x00001E42 + "\x00m\x03#\x00\x00\x1eC" + // 0x006D0323: 0x00001E43 + "\x00N\x03\a\x00\x00\x1eD" + // 0x004E0307: 0x00001E44 + "\x00n\x03\a\x00\x00\x1eE" + // 0x006E0307: 0x00001E45 + "\x00N\x03#\x00\x00\x1eF" + // 0x004E0323: 0x00001E46 + "\x00n\x03#\x00\x00\x1eG" + // 0x006E0323: 0x00001E47 + "\x00N\x031\x00\x00\x1eH" + // 0x004E0331: 0x00001E48 + "\x00n\x031\x00\x00\x1eI" + // 0x006E0331: 0x00001E49 + "\x00N\x03-\x00\x00\x1eJ" + // 0x004E032D: 0x00001E4A + "\x00n\x03-\x00\x00\x1eK" + // 0x006E032D: 0x00001E4B + "\x00\xd5\x03\x01\x00\x00\x1eL" + // 0x00D50301: 0x00001E4C + "\x00\xf5\x03\x01\x00\x00\x1eM" + // 0x00F50301: 0x00001E4D + "\x00\xd5\x03\b\x00\x00\x1eN" + // 0x00D50308: 0x00001E4E + "\x00\xf5\x03\b\x00\x00\x1eO" + // 0x00F50308: 0x00001E4F + "\x01L\x03\x00\x00\x00\x1eP" + // 0x014C0300: 0x00001E50 + "\x01M\x03\x00\x00\x00\x1eQ" + // 0x014D0300: 0x00001E51 + "\x01L\x03\x01\x00\x00\x1eR" + // 0x014C0301: 0x00001E52 + "\x01M\x03\x01\x00\x00\x1eS" + // 0x014D0301: 0x00001E53 + "\x00P\x03\x01\x00\x00\x1eT" + // 0x00500301: 0x00001E54 + "\x00p\x03\x01\x00\x00\x1eU" + // 0x00700301: 0x00001E55 + "\x00P\x03\a\x00\x00\x1eV" + // 0x00500307: 0x00001E56 + "\x00p\x03\a\x00\x00\x1eW" + // 0x00700307: 0x00001E57 + "\x00R\x03\a\x00\x00\x1eX" + // 0x00520307: 0x00001E58 + "\x00r\x03\a\x00\x00\x1eY" + // 0x00720307: 0x00001E59 + "\x00R\x03#\x00\x00\x1eZ" + // 0x00520323: 0x00001E5A + "\x00r\x03#\x00\x00\x1e[" + // 0x00720323: 0x00001E5B + "\x1eZ\x03\x04\x00\x00\x1e\\" + // 0x1E5A0304: 0x00001E5C + "\x1e[\x03\x04\x00\x00\x1e]" + // 0x1E5B0304: 0x00001E5D + "\x00R\x031\x00\x00\x1e^" + // 0x00520331: 0x00001E5E + "\x00r\x031\x00\x00\x1e_" + // 0x00720331: 0x00001E5F + "\x00S\x03\a\x00\x00\x1e`" + // 0x00530307: 0x00001E60 + "\x00s\x03\a\x00\x00\x1ea" + // 0x00730307: 0x00001E61 + "\x00S\x03#\x00\x00\x1eb" + // 0x00530323: 0x00001E62 + "\x00s\x03#\x00\x00\x1ec" + // 0x00730323: 0x00001E63 + "\x01Z\x03\a\x00\x00\x1ed" + // 0x015A0307: 0x00001E64 + "\x01[\x03\a\x00\x00\x1ee" + // 0x015B0307: 0x00001E65 + "\x01`\x03\a\x00\x00\x1ef" + // 0x01600307: 0x00001E66 + "\x01a\x03\a\x00\x00\x1eg" + // 0x01610307: 0x00001E67 + "\x1eb\x03\a\x00\x00\x1eh" + // 0x1E620307: 0x00001E68 + "\x1ec\x03\a\x00\x00\x1ei" + // 0x1E630307: 0x00001E69 + "\x00T\x03\a\x00\x00\x1ej" + // 0x00540307: 0x00001E6A + "\x00t\x03\a\x00\x00\x1ek" + // 0x00740307: 0x00001E6B + "\x00T\x03#\x00\x00\x1el" + // 0x00540323: 0x00001E6C + "\x00t\x03#\x00\x00\x1em" + // 0x00740323: 0x00001E6D + "\x00T\x031\x00\x00\x1en" + // 0x00540331: 0x00001E6E + "\x00t\x031\x00\x00\x1eo" + // 0x00740331: 0x00001E6F + "\x00T\x03-\x00\x00\x1ep" + // 0x0054032D: 0x00001E70 + "\x00t\x03-\x00\x00\x1eq" + // 0x0074032D: 0x00001E71 + "\x00U\x03$\x00\x00\x1er" + // 0x00550324: 0x00001E72 + "\x00u\x03$\x00\x00\x1es" + // 0x00750324: 0x00001E73 + "\x00U\x030\x00\x00\x1et" + // 0x00550330: 0x00001E74 + "\x00u\x030\x00\x00\x1eu" + // 0x00750330: 0x00001E75 + "\x00U\x03-\x00\x00\x1ev" + // 0x0055032D: 0x00001E76 + "\x00u\x03-\x00\x00\x1ew" + // 0x0075032D: 0x00001E77 + "\x01h\x03\x01\x00\x00\x1ex" + // 0x01680301: 0x00001E78 + "\x01i\x03\x01\x00\x00\x1ey" + // 0x01690301: 0x00001E79 + "\x01j\x03\b\x00\x00\x1ez" + // 0x016A0308: 0x00001E7A + "\x01k\x03\b\x00\x00\x1e{" + // 0x016B0308: 0x00001E7B + "\x00V\x03\x03\x00\x00\x1e|" + // 0x00560303: 0x00001E7C + "\x00v\x03\x03\x00\x00\x1e}" + // 0x00760303: 0x00001E7D + "\x00V\x03#\x00\x00\x1e~" + // 0x00560323: 0x00001E7E + "\x00v\x03#\x00\x00\x1e\u007f" + // 0x00760323: 0x00001E7F + "\x00W\x03\x00\x00\x00\x1e\x80" + // 0x00570300: 0x00001E80 + "\x00w\x03\x00\x00\x00\x1e\x81" + // 0x00770300: 0x00001E81 + "\x00W\x03\x01\x00\x00\x1e\x82" + // 0x00570301: 0x00001E82 + "\x00w\x03\x01\x00\x00\x1e\x83" + // 0x00770301: 0x00001E83 + "\x00W\x03\b\x00\x00\x1e\x84" + // 0x00570308: 0x00001E84 + "\x00w\x03\b\x00\x00\x1e\x85" + // 0x00770308: 0x00001E85 + "\x00W\x03\a\x00\x00\x1e\x86" + // 0x00570307: 0x00001E86 + "\x00w\x03\a\x00\x00\x1e\x87" + // 0x00770307: 0x00001E87 + "\x00W\x03#\x00\x00\x1e\x88" + // 0x00570323: 0x00001E88 + "\x00w\x03#\x00\x00\x1e\x89" + // 0x00770323: 0x00001E89 + "\x00X\x03\a\x00\x00\x1e\x8a" + // 0x00580307: 0x00001E8A + "\x00x\x03\a\x00\x00\x1e\x8b" + // 0x00780307: 0x00001E8B + "\x00X\x03\b\x00\x00\x1e\x8c" + // 0x00580308: 0x00001E8C + "\x00x\x03\b\x00\x00\x1e\x8d" + // 0x00780308: 0x00001E8D + "\x00Y\x03\a\x00\x00\x1e\x8e" + // 0x00590307: 0x00001E8E + "\x00y\x03\a\x00\x00\x1e\x8f" + // 0x00790307: 0x00001E8F + "\x00Z\x03\x02\x00\x00\x1e\x90" + // 0x005A0302: 0x00001E90 + "\x00z\x03\x02\x00\x00\x1e\x91" + // 0x007A0302: 0x00001E91 + "\x00Z\x03#\x00\x00\x1e\x92" + // 0x005A0323: 0x00001E92 + "\x00z\x03#\x00\x00\x1e\x93" + // 0x007A0323: 0x00001E93 + "\x00Z\x031\x00\x00\x1e\x94" + // 0x005A0331: 0x00001E94 + "\x00z\x031\x00\x00\x1e\x95" + // 0x007A0331: 0x00001E95 + "\x00h\x031\x00\x00\x1e\x96" + // 0x00680331: 0x00001E96 + "\x00t\x03\b\x00\x00\x1e\x97" + // 0x00740308: 0x00001E97 + "\x00w\x03\n\x00\x00\x1e\x98" + // 0x0077030A: 0x00001E98 + "\x00y\x03\n\x00\x00\x1e\x99" + // 0x0079030A: 0x00001E99 + "\x01\u007f\x03\a\x00\x00\x1e\x9b" + // 0x017F0307: 0x00001E9B + "\x00A\x03#\x00\x00\x1e\xa0" + // 0x00410323: 0x00001EA0 + "\x00a\x03#\x00\x00\x1e\xa1" + // 0x00610323: 0x00001EA1 + "\x00A\x03\t\x00\x00\x1e\xa2" + // 0x00410309: 0x00001EA2 + "\x00a\x03\t\x00\x00\x1e\xa3" + // 0x00610309: 0x00001EA3 + "\x00\xc2\x03\x01\x00\x00\x1e\xa4" + // 0x00C20301: 0x00001EA4 + "\x00\xe2\x03\x01\x00\x00\x1e\xa5" + // 0x00E20301: 0x00001EA5 + "\x00\xc2\x03\x00\x00\x00\x1e\xa6" + // 0x00C20300: 0x00001EA6 + "\x00\xe2\x03\x00\x00\x00\x1e\xa7" + // 0x00E20300: 0x00001EA7 + "\x00\xc2\x03\t\x00\x00\x1e\xa8" + // 0x00C20309: 0x00001EA8 + "\x00\xe2\x03\t\x00\x00\x1e\xa9" + // 0x00E20309: 0x00001EA9 + "\x00\xc2\x03\x03\x00\x00\x1e\xaa" + // 0x00C20303: 0x00001EAA + "\x00\xe2\x03\x03\x00\x00\x1e\xab" + // 0x00E20303: 0x00001EAB + "\x1e\xa0\x03\x02\x00\x00\x1e\xac" + // 0x1EA00302: 0x00001EAC + "\x1e\xa1\x03\x02\x00\x00\x1e\xad" + // 0x1EA10302: 0x00001EAD + "\x01\x02\x03\x01\x00\x00\x1e\xae" + // 0x01020301: 0x00001EAE + "\x01\x03\x03\x01\x00\x00\x1e\xaf" + // 0x01030301: 0x00001EAF + "\x01\x02\x03\x00\x00\x00\x1e\xb0" + // 0x01020300: 0x00001EB0 + "\x01\x03\x03\x00\x00\x00\x1e\xb1" + // 0x01030300: 0x00001EB1 + "\x01\x02\x03\t\x00\x00\x1e\xb2" + // 0x01020309: 0x00001EB2 + "\x01\x03\x03\t\x00\x00\x1e\xb3" + // 0x01030309: 0x00001EB3 + "\x01\x02\x03\x03\x00\x00\x1e\xb4" + // 0x01020303: 0x00001EB4 + "\x01\x03\x03\x03\x00\x00\x1e\xb5" + // 0x01030303: 0x00001EB5 + "\x1e\xa0\x03\x06\x00\x00\x1e\xb6" + // 0x1EA00306: 0x00001EB6 + "\x1e\xa1\x03\x06\x00\x00\x1e\xb7" + // 0x1EA10306: 0x00001EB7 + "\x00E\x03#\x00\x00\x1e\xb8" + // 0x00450323: 0x00001EB8 + "\x00e\x03#\x00\x00\x1e\xb9" + // 0x00650323: 0x00001EB9 + "\x00E\x03\t\x00\x00\x1e\xba" + // 0x00450309: 0x00001EBA + "\x00e\x03\t\x00\x00\x1e\xbb" + // 0x00650309: 0x00001EBB + "\x00E\x03\x03\x00\x00\x1e\xbc" + // 0x00450303: 0x00001EBC + "\x00e\x03\x03\x00\x00\x1e\xbd" + // 0x00650303: 0x00001EBD + "\x00\xca\x03\x01\x00\x00\x1e\xbe" + // 0x00CA0301: 0x00001EBE + "\x00\xea\x03\x01\x00\x00\x1e\xbf" + // 0x00EA0301: 0x00001EBF + "\x00\xca\x03\x00\x00\x00\x1e\xc0" + // 0x00CA0300: 0x00001EC0 + "\x00\xea\x03\x00\x00\x00\x1e\xc1" + // 0x00EA0300: 0x00001EC1 + "\x00\xca\x03\t\x00\x00\x1e\xc2" + // 0x00CA0309: 0x00001EC2 + "\x00\xea\x03\t\x00\x00\x1e\xc3" + // 0x00EA0309: 0x00001EC3 + "\x00\xca\x03\x03\x00\x00\x1e\xc4" + // 0x00CA0303: 0x00001EC4 + "\x00\xea\x03\x03\x00\x00\x1e\xc5" + // 0x00EA0303: 0x00001EC5 + "\x1e\xb8\x03\x02\x00\x00\x1e\xc6" + // 0x1EB80302: 0x00001EC6 + "\x1e\xb9\x03\x02\x00\x00\x1e\xc7" + // 0x1EB90302: 0x00001EC7 + "\x00I\x03\t\x00\x00\x1e\xc8" + // 0x00490309: 0x00001EC8 + "\x00i\x03\t\x00\x00\x1e\xc9" + // 0x00690309: 0x00001EC9 + "\x00I\x03#\x00\x00\x1e\xca" + // 0x00490323: 0x00001ECA + "\x00i\x03#\x00\x00\x1e\xcb" + // 0x00690323: 0x00001ECB + "\x00O\x03#\x00\x00\x1e\xcc" + // 0x004F0323: 0x00001ECC + "\x00o\x03#\x00\x00\x1e\xcd" + // 0x006F0323: 0x00001ECD + "\x00O\x03\t\x00\x00\x1e\xce" + // 0x004F0309: 0x00001ECE + "\x00o\x03\t\x00\x00\x1e\xcf" + // 0x006F0309: 0x00001ECF + "\x00\xd4\x03\x01\x00\x00\x1e\xd0" + // 0x00D40301: 0x00001ED0 + "\x00\xf4\x03\x01\x00\x00\x1e\xd1" + // 0x00F40301: 0x00001ED1 + "\x00\xd4\x03\x00\x00\x00\x1e\xd2" + // 0x00D40300: 0x00001ED2 + "\x00\xf4\x03\x00\x00\x00\x1e\xd3" + // 0x00F40300: 0x00001ED3 + "\x00\xd4\x03\t\x00\x00\x1e\xd4" + // 0x00D40309: 0x00001ED4 + "\x00\xf4\x03\t\x00\x00\x1e\xd5" + // 0x00F40309: 0x00001ED5 + "\x00\xd4\x03\x03\x00\x00\x1e\xd6" + // 0x00D40303: 0x00001ED6 + "\x00\xf4\x03\x03\x00\x00\x1e\xd7" + // 0x00F40303: 0x00001ED7 + "\x1e\xcc\x03\x02\x00\x00\x1e\xd8" + // 0x1ECC0302: 0x00001ED8 + "\x1e\xcd\x03\x02\x00\x00\x1e\xd9" + // 0x1ECD0302: 0x00001ED9 + "\x01\xa0\x03\x01\x00\x00\x1e\xda" + // 0x01A00301: 0x00001EDA + "\x01\xa1\x03\x01\x00\x00\x1e\xdb" + // 0x01A10301: 0x00001EDB + "\x01\xa0\x03\x00\x00\x00\x1e\xdc" + // 0x01A00300: 0x00001EDC + "\x01\xa1\x03\x00\x00\x00\x1e\xdd" + // 0x01A10300: 0x00001EDD + "\x01\xa0\x03\t\x00\x00\x1e\xde" + // 0x01A00309: 0x00001EDE + "\x01\xa1\x03\t\x00\x00\x1e\xdf" + // 0x01A10309: 0x00001EDF + "\x01\xa0\x03\x03\x00\x00\x1e\xe0" + // 0x01A00303: 0x00001EE0 + "\x01\xa1\x03\x03\x00\x00\x1e\xe1" + // 0x01A10303: 0x00001EE1 + "\x01\xa0\x03#\x00\x00\x1e\xe2" + // 0x01A00323: 0x00001EE2 + "\x01\xa1\x03#\x00\x00\x1e\xe3" + // 0x01A10323: 0x00001EE3 + "\x00U\x03#\x00\x00\x1e\xe4" + // 0x00550323: 0x00001EE4 + "\x00u\x03#\x00\x00\x1e\xe5" + // 0x00750323: 0x00001EE5 + "\x00U\x03\t\x00\x00\x1e\xe6" + // 0x00550309: 0x00001EE6 + "\x00u\x03\t\x00\x00\x1e\xe7" + // 0x00750309: 0x00001EE7 + "\x01\xaf\x03\x01\x00\x00\x1e\xe8" + // 0x01AF0301: 0x00001EE8 + "\x01\xb0\x03\x01\x00\x00\x1e\xe9" + // 0x01B00301: 0x00001EE9 + "\x01\xaf\x03\x00\x00\x00\x1e\xea" + // 0x01AF0300: 0x00001EEA + "\x01\xb0\x03\x00\x00\x00\x1e\xeb" + // 0x01B00300: 0x00001EEB + "\x01\xaf\x03\t\x00\x00\x1e\xec" + // 0x01AF0309: 0x00001EEC + "\x01\xb0\x03\t\x00\x00\x1e\xed" + // 0x01B00309: 0x00001EED + "\x01\xaf\x03\x03\x00\x00\x1e\xee" + // 0x01AF0303: 0x00001EEE + "\x01\xb0\x03\x03\x00\x00\x1e\xef" + // 0x01B00303: 0x00001EEF + "\x01\xaf\x03#\x00\x00\x1e\xf0" + // 0x01AF0323: 0x00001EF0 + "\x01\xb0\x03#\x00\x00\x1e\xf1" + // 0x01B00323: 0x00001EF1 + "\x00Y\x03\x00\x00\x00\x1e\xf2" + // 0x00590300: 0x00001EF2 + "\x00y\x03\x00\x00\x00\x1e\xf3" + // 0x00790300: 0x00001EF3 + "\x00Y\x03#\x00\x00\x1e\xf4" + // 0x00590323: 0x00001EF4 + "\x00y\x03#\x00\x00\x1e\xf5" + // 0x00790323: 0x00001EF5 + "\x00Y\x03\t\x00\x00\x1e\xf6" + // 0x00590309: 0x00001EF6 + "\x00y\x03\t\x00\x00\x1e\xf7" + // 0x00790309: 0x00001EF7 + "\x00Y\x03\x03\x00\x00\x1e\xf8" + // 0x00590303: 0x00001EF8 + "\x00y\x03\x03\x00\x00\x1e\xf9" + // 0x00790303: 0x00001EF9 + "\x03\xb1\x03\x13\x00\x00\x1f\x00" + // 0x03B10313: 0x00001F00 + "\x03\xb1\x03\x14\x00\x00\x1f\x01" + // 0x03B10314: 0x00001F01 + "\x1f\x00\x03\x00\x00\x00\x1f\x02" + // 0x1F000300: 0x00001F02 + "\x1f\x01\x03\x00\x00\x00\x1f\x03" + // 0x1F010300: 0x00001F03 + "\x1f\x00\x03\x01\x00\x00\x1f\x04" + // 0x1F000301: 0x00001F04 + "\x1f\x01\x03\x01\x00\x00\x1f\x05" + // 0x1F010301: 0x00001F05 + "\x1f\x00\x03B\x00\x00\x1f\x06" + // 0x1F000342: 0x00001F06 + "\x1f\x01\x03B\x00\x00\x1f\a" + // 0x1F010342: 0x00001F07 + "\x03\x91\x03\x13\x00\x00\x1f\b" + // 0x03910313: 0x00001F08 + "\x03\x91\x03\x14\x00\x00\x1f\t" + // 0x03910314: 0x00001F09 + "\x1f\b\x03\x00\x00\x00\x1f\n" + // 0x1F080300: 0x00001F0A + "\x1f\t\x03\x00\x00\x00\x1f\v" + // 0x1F090300: 0x00001F0B + "\x1f\b\x03\x01\x00\x00\x1f\f" + // 0x1F080301: 0x00001F0C + "\x1f\t\x03\x01\x00\x00\x1f\r" + // 0x1F090301: 0x00001F0D + "\x1f\b\x03B\x00\x00\x1f\x0e" + // 0x1F080342: 0x00001F0E + "\x1f\t\x03B\x00\x00\x1f\x0f" + // 0x1F090342: 0x00001F0F + "\x03\xb5\x03\x13\x00\x00\x1f\x10" + // 0x03B50313: 0x00001F10 + "\x03\xb5\x03\x14\x00\x00\x1f\x11" + // 0x03B50314: 0x00001F11 + "\x1f\x10\x03\x00\x00\x00\x1f\x12" + // 0x1F100300: 0x00001F12 + "\x1f\x11\x03\x00\x00\x00\x1f\x13" + // 0x1F110300: 0x00001F13 + "\x1f\x10\x03\x01\x00\x00\x1f\x14" + // 0x1F100301: 0x00001F14 + "\x1f\x11\x03\x01\x00\x00\x1f\x15" + // 0x1F110301: 0x00001F15 + "\x03\x95\x03\x13\x00\x00\x1f\x18" + // 0x03950313: 0x00001F18 + "\x03\x95\x03\x14\x00\x00\x1f\x19" + // 0x03950314: 0x00001F19 + "\x1f\x18\x03\x00\x00\x00\x1f\x1a" + // 0x1F180300: 0x00001F1A + "\x1f\x19\x03\x00\x00\x00\x1f\x1b" + // 0x1F190300: 0x00001F1B + "\x1f\x18\x03\x01\x00\x00\x1f\x1c" + // 0x1F180301: 0x00001F1C + "\x1f\x19\x03\x01\x00\x00\x1f\x1d" + // 0x1F190301: 0x00001F1D + "\x03\xb7\x03\x13\x00\x00\x1f " + // 0x03B70313: 0x00001F20 + "\x03\xb7\x03\x14\x00\x00\x1f!" + // 0x03B70314: 0x00001F21 + "\x1f \x03\x00\x00\x00\x1f\"" + // 0x1F200300: 0x00001F22 + "\x1f!\x03\x00\x00\x00\x1f#" + // 0x1F210300: 0x00001F23 + "\x1f \x03\x01\x00\x00\x1f$" + // 0x1F200301: 0x00001F24 + "\x1f!\x03\x01\x00\x00\x1f%" + // 0x1F210301: 0x00001F25 + "\x1f \x03B\x00\x00\x1f&" + // 0x1F200342: 0x00001F26 + "\x1f!\x03B\x00\x00\x1f'" + // 0x1F210342: 0x00001F27 + "\x03\x97\x03\x13\x00\x00\x1f(" + // 0x03970313: 0x00001F28 + "\x03\x97\x03\x14\x00\x00\x1f)" + // 0x03970314: 0x00001F29 + "\x1f(\x03\x00\x00\x00\x1f*" + // 0x1F280300: 0x00001F2A + "\x1f)\x03\x00\x00\x00\x1f+" + // 0x1F290300: 0x00001F2B + "\x1f(\x03\x01\x00\x00\x1f," + // 0x1F280301: 0x00001F2C + "\x1f)\x03\x01\x00\x00\x1f-" + // 0x1F290301: 0x00001F2D + "\x1f(\x03B\x00\x00\x1f." + // 0x1F280342: 0x00001F2E + "\x1f)\x03B\x00\x00\x1f/" + // 0x1F290342: 0x00001F2F + "\x03\xb9\x03\x13\x00\x00\x1f0" + // 0x03B90313: 0x00001F30 + "\x03\xb9\x03\x14\x00\x00\x1f1" + // 0x03B90314: 0x00001F31 + "\x1f0\x03\x00\x00\x00\x1f2" + // 0x1F300300: 0x00001F32 + "\x1f1\x03\x00\x00\x00\x1f3" + // 0x1F310300: 0x00001F33 + "\x1f0\x03\x01\x00\x00\x1f4" + // 0x1F300301: 0x00001F34 + "\x1f1\x03\x01\x00\x00\x1f5" + // 0x1F310301: 0x00001F35 + "\x1f0\x03B\x00\x00\x1f6" + // 0x1F300342: 0x00001F36 + "\x1f1\x03B\x00\x00\x1f7" + // 0x1F310342: 0x00001F37 + "\x03\x99\x03\x13\x00\x00\x1f8" + // 0x03990313: 0x00001F38 + "\x03\x99\x03\x14\x00\x00\x1f9" + // 0x03990314: 0x00001F39 + "\x1f8\x03\x00\x00\x00\x1f:" + // 0x1F380300: 0x00001F3A + "\x1f9\x03\x00\x00\x00\x1f;" + // 0x1F390300: 0x00001F3B + "\x1f8\x03\x01\x00\x00\x1f<" + // 0x1F380301: 0x00001F3C + "\x1f9\x03\x01\x00\x00\x1f=" + // 0x1F390301: 0x00001F3D + "\x1f8\x03B\x00\x00\x1f>" + // 0x1F380342: 0x00001F3E + "\x1f9\x03B\x00\x00\x1f?" + // 0x1F390342: 0x00001F3F + "\x03\xbf\x03\x13\x00\x00\x1f@" + // 0x03BF0313: 0x00001F40 + "\x03\xbf\x03\x14\x00\x00\x1fA" + // 0x03BF0314: 0x00001F41 + "\x1f@\x03\x00\x00\x00\x1fB" + // 0x1F400300: 0x00001F42 + "\x1fA\x03\x00\x00\x00\x1fC" + // 0x1F410300: 0x00001F43 + "\x1f@\x03\x01\x00\x00\x1fD" + // 0x1F400301: 0x00001F44 + "\x1fA\x03\x01\x00\x00\x1fE" + // 0x1F410301: 0x00001F45 + "\x03\x9f\x03\x13\x00\x00\x1fH" + // 0x039F0313: 0x00001F48 + "\x03\x9f\x03\x14\x00\x00\x1fI" + // 0x039F0314: 0x00001F49 + "\x1fH\x03\x00\x00\x00\x1fJ" + // 0x1F480300: 0x00001F4A + "\x1fI\x03\x00\x00\x00\x1fK" + // 0x1F490300: 0x00001F4B + "\x1fH\x03\x01\x00\x00\x1fL" + // 0x1F480301: 0x00001F4C + "\x1fI\x03\x01\x00\x00\x1fM" + // 0x1F490301: 0x00001F4D + "\x03\xc5\x03\x13\x00\x00\x1fP" + // 0x03C50313: 0x00001F50 + "\x03\xc5\x03\x14\x00\x00\x1fQ" + // 0x03C50314: 0x00001F51 + "\x1fP\x03\x00\x00\x00\x1fR" + // 0x1F500300: 0x00001F52 + "\x1fQ\x03\x00\x00\x00\x1fS" + // 0x1F510300: 0x00001F53 + "\x1fP\x03\x01\x00\x00\x1fT" + // 0x1F500301: 0x00001F54 + "\x1fQ\x03\x01\x00\x00\x1fU" + // 0x1F510301: 0x00001F55 + "\x1fP\x03B\x00\x00\x1fV" + // 0x1F500342: 0x00001F56 + "\x1fQ\x03B\x00\x00\x1fW" + // 0x1F510342: 0x00001F57 + "\x03\xa5\x03\x14\x00\x00\x1fY" + // 0x03A50314: 0x00001F59 + "\x1fY\x03\x00\x00\x00\x1f[" + // 0x1F590300: 0x00001F5B + "\x1fY\x03\x01\x00\x00\x1f]" + // 0x1F590301: 0x00001F5D + "\x1fY\x03B\x00\x00\x1f_" + // 0x1F590342: 0x00001F5F + "\x03\xc9\x03\x13\x00\x00\x1f`" + // 0x03C90313: 0x00001F60 + "\x03\xc9\x03\x14\x00\x00\x1fa" + // 0x03C90314: 0x00001F61 + "\x1f`\x03\x00\x00\x00\x1fb" + // 0x1F600300: 0x00001F62 + "\x1fa\x03\x00\x00\x00\x1fc" + // 0x1F610300: 0x00001F63 + "\x1f`\x03\x01\x00\x00\x1fd" + // 0x1F600301: 0x00001F64 + "\x1fa\x03\x01\x00\x00\x1fe" + // 0x1F610301: 0x00001F65 + "\x1f`\x03B\x00\x00\x1ff" + // 0x1F600342: 0x00001F66 + "\x1fa\x03B\x00\x00\x1fg" + // 0x1F610342: 0x00001F67 + "\x03\xa9\x03\x13\x00\x00\x1fh" + // 0x03A90313: 0x00001F68 + "\x03\xa9\x03\x14\x00\x00\x1fi" + // 0x03A90314: 0x00001F69 + "\x1fh\x03\x00\x00\x00\x1fj" + // 0x1F680300: 0x00001F6A + "\x1fi\x03\x00\x00\x00\x1fk" + // 0x1F690300: 0x00001F6B + "\x1fh\x03\x01\x00\x00\x1fl" + // 0x1F680301: 0x00001F6C + "\x1fi\x03\x01\x00\x00\x1fm" + // 0x1F690301: 0x00001F6D + "\x1fh\x03B\x00\x00\x1fn" + // 0x1F680342: 0x00001F6E + "\x1fi\x03B\x00\x00\x1fo" + // 0x1F690342: 0x00001F6F + "\x03\xb1\x03\x00\x00\x00\x1fp" + // 0x03B10300: 0x00001F70 + "\x03\xb5\x03\x00\x00\x00\x1fr" + // 0x03B50300: 0x00001F72 + "\x03\xb7\x03\x00\x00\x00\x1ft" + // 0x03B70300: 0x00001F74 + "\x03\xb9\x03\x00\x00\x00\x1fv" + // 0x03B90300: 0x00001F76 + "\x03\xbf\x03\x00\x00\x00\x1fx" + // 0x03BF0300: 0x00001F78 + "\x03\xc5\x03\x00\x00\x00\x1fz" + // 0x03C50300: 0x00001F7A + "\x03\xc9\x03\x00\x00\x00\x1f|" + // 0x03C90300: 0x00001F7C + "\x1f\x00\x03E\x00\x00\x1f\x80" + // 0x1F000345: 0x00001F80 + "\x1f\x01\x03E\x00\x00\x1f\x81" + // 0x1F010345: 0x00001F81 + "\x1f\x02\x03E\x00\x00\x1f\x82" + // 0x1F020345: 0x00001F82 + "\x1f\x03\x03E\x00\x00\x1f\x83" + // 0x1F030345: 0x00001F83 + "\x1f\x04\x03E\x00\x00\x1f\x84" + // 0x1F040345: 0x00001F84 + "\x1f\x05\x03E\x00\x00\x1f\x85" + // 0x1F050345: 0x00001F85 + "\x1f\x06\x03E\x00\x00\x1f\x86" + // 0x1F060345: 0x00001F86 + "\x1f\a\x03E\x00\x00\x1f\x87" + // 0x1F070345: 0x00001F87 + "\x1f\b\x03E\x00\x00\x1f\x88" + // 0x1F080345: 0x00001F88 + "\x1f\t\x03E\x00\x00\x1f\x89" + // 0x1F090345: 0x00001F89 + "\x1f\n\x03E\x00\x00\x1f\x8a" + // 0x1F0A0345: 0x00001F8A + "\x1f\v\x03E\x00\x00\x1f\x8b" + // 0x1F0B0345: 0x00001F8B + "\x1f\f\x03E\x00\x00\x1f\x8c" + // 0x1F0C0345: 0x00001F8C + "\x1f\r\x03E\x00\x00\x1f\x8d" + // 0x1F0D0345: 0x00001F8D + "\x1f\x0e\x03E\x00\x00\x1f\x8e" + // 0x1F0E0345: 0x00001F8E + "\x1f\x0f\x03E\x00\x00\x1f\x8f" + // 0x1F0F0345: 0x00001F8F + "\x1f \x03E\x00\x00\x1f\x90" + // 0x1F200345: 0x00001F90 + "\x1f!\x03E\x00\x00\x1f\x91" + // 0x1F210345: 0x00001F91 + "\x1f\"\x03E\x00\x00\x1f\x92" + // 0x1F220345: 0x00001F92 + "\x1f#\x03E\x00\x00\x1f\x93" + // 0x1F230345: 0x00001F93 + "\x1f$\x03E\x00\x00\x1f\x94" + // 0x1F240345: 0x00001F94 + "\x1f%\x03E\x00\x00\x1f\x95" + // 0x1F250345: 0x00001F95 + "\x1f&\x03E\x00\x00\x1f\x96" + // 0x1F260345: 0x00001F96 + "\x1f'\x03E\x00\x00\x1f\x97" + // 0x1F270345: 0x00001F97 + "\x1f(\x03E\x00\x00\x1f\x98" + // 0x1F280345: 0x00001F98 + "\x1f)\x03E\x00\x00\x1f\x99" + // 0x1F290345: 0x00001F99 + "\x1f*\x03E\x00\x00\x1f\x9a" + // 0x1F2A0345: 0x00001F9A + "\x1f+\x03E\x00\x00\x1f\x9b" + // 0x1F2B0345: 0x00001F9B + "\x1f,\x03E\x00\x00\x1f\x9c" + // 0x1F2C0345: 0x00001F9C + "\x1f-\x03E\x00\x00\x1f\x9d" + // 0x1F2D0345: 0x00001F9D + "\x1f.\x03E\x00\x00\x1f\x9e" + // 0x1F2E0345: 0x00001F9E + "\x1f/\x03E\x00\x00\x1f\x9f" + // 0x1F2F0345: 0x00001F9F + "\x1f`\x03E\x00\x00\x1f\xa0" + // 0x1F600345: 0x00001FA0 + "\x1fa\x03E\x00\x00\x1f\xa1" + // 0x1F610345: 0x00001FA1 + "\x1fb\x03E\x00\x00\x1f\xa2" + // 0x1F620345: 0x00001FA2 + "\x1fc\x03E\x00\x00\x1f\xa3" + // 0x1F630345: 0x00001FA3 + "\x1fd\x03E\x00\x00\x1f\xa4" + // 0x1F640345: 0x00001FA4 + "\x1fe\x03E\x00\x00\x1f\xa5" + // 0x1F650345: 0x00001FA5 + "\x1ff\x03E\x00\x00\x1f\xa6" + // 0x1F660345: 0x00001FA6 + "\x1fg\x03E\x00\x00\x1f\xa7" + // 0x1F670345: 0x00001FA7 + "\x1fh\x03E\x00\x00\x1f\xa8" + // 0x1F680345: 0x00001FA8 + "\x1fi\x03E\x00\x00\x1f\xa9" + // 0x1F690345: 0x00001FA9 + "\x1fj\x03E\x00\x00\x1f\xaa" + // 0x1F6A0345: 0x00001FAA + "\x1fk\x03E\x00\x00\x1f\xab" + // 0x1F6B0345: 0x00001FAB + "\x1fl\x03E\x00\x00\x1f\xac" + // 0x1F6C0345: 0x00001FAC + "\x1fm\x03E\x00\x00\x1f\xad" + // 0x1F6D0345: 0x00001FAD + "\x1fn\x03E\x00\x00\x1f\xae" + // 0x1F6E0345: 0x00001FAE + "\x1fo\x03E\x00\x00\x1f\xaf" + // 0x1F6F0345: 0x00001FAF + "\x03\xb1\x03\x06\x00\x00\x1f\xb0" + // 0x03B10306: 0x00001FB0 + "\x03\xb1\x03\x04\x00\x00\x1f\xb1" + // 0x03B10304: 0x00001FB1 + "\x1fp\x03E\x00\x00\x1f\xb2" + // 0x1F700345: 0x00001FB2 + "\x03\xb1\x03E\x00\x00\x1f\xb3" + // 0x03B10345: 0x00001FB3 + "\x03\xac\x03E\x00\x00\x1f\xb4" + // 0x03AC0345: 0x00001FB4 + "\x03\xb1\x03B\x00\x00\x1f\xb6" + // 0x03B10342: 0x00001FB6 + "\x1f\xb6\x03E\x00\x00\x1f\xb7" + // 0x1FB60345: 0x00001FB7 + "\x03\x91\x03\x06\x00\x00\x1f\xb8" + // 0x03910306: 0x00001FB8 + "\x03\x91\x03\x04\x00\x00\x1f\xb9" + // 0x03910304: 0x00001FB9 + "\x03\x91\x03\x00\x00\x00\x1f\xba" + // 0x03910300: 0x00001FBA + "\x03\x91\x03E\x00\x00\x1f\xbc" + // 0x03910345: 0x00001FBC + "\x00\xa8\x03B\x00\x00\x1f\xc1" + // 0x00A80342: 0x00001FC1 + "\x1ft\x03E\x00\x00\x1f\xc2" + // 0x1F740345: 0x00001FC2 + "\x03\xb7\x03E\x00\x00\x1f\xc3" + // 0x03B70345: 0x00001FC3 + "\x03\xae\x03E\x00\x00\x1f\xc4" + // 0x03AE0345: 0x00001FC4 + "\x03\xb7\x03B\x00\x00\x1f\xc6" + // 0x03B70342: 0x00001FC6 + "\x1f\xc6\x03E\x00\x00\x1f\xc7" + // 0x1FC60345: 0x00001FC7 + "\x03\x95\x03\x00\x00\x00\x1f\xc8" + // 0x03950300: 0x00001FC8 + "\x03\x97\x03\x00\x00\x00\x1f\xca" + // 0x03970300: 0x00001FCA + "\x03\x97\x03E\x00\x00\x1f\xcc" + // 0x03970345: 0x00001FCC + "\x1f\xbf\x03\x00\x00\x00\x1f\xcd" + // 0x1FBF0300: 0x00001FCD + "\x1f\xbf\x03\x01\x00\x00\x1f\xce" + // 0x1FBF0301: 0x00001FCE + "\x1f\xbf\x03B\x00\x00\x1f\xcf" + // 0x1FBF0342: 0x00001FCF + "\x03\xb9\x03\x06\x00\x00\x1f\xd0" + // 0x03B90306: 0x00001FD0 + "\x03\xb9\x03\x04\x00\x00\x1f\xd1" + // 0x03B90304: 0x00001FD1 + "\x03\xca\x03\x00\x00\x00\x1f\xd2" + // 0x03CA0300: 0x00001FD2 + "\x03\xb9\x03B\x00\x00\x1f\xd6" + // 0x03B90342: 0x00001FD6 + "\x03\xca\x03B\x00\x00\x1f\xd7" + // 0x03CA0342: 0x00001FD7 + "\x03\x99\x03\x06\x00\x00\x1f\xd8" + // 0x03990306: 0x00001FD8 + "\x03\x99\x03\x04\x00\x00\x1f\xd9" + // 0x03990304: 0x00001FD9 + "\x03\x99\x03\x00\x00\x00\x1f\xda" + // 0x03990300: 0x00001FDA + "\x1f\xfe\x03\x00\x00\x00\x1f\xdd" + // 0x1FFE0300: 0x00001FDD + "\x1f\xfe\x03\x01\x00\x00\x1f\xde" + // 0x1FFE0301: 0x00001FDE + "\x1f\xfe\x03B\x00\x00\x1f\xdf" + // 0x1FFE0342: 0x00001FDF + "\x03\xc5\x03\x06\x00\x00\x1f\xe0" + // 0x03C50306: 0x00001FE0 + "\x03\xc5\x03\x04\x00\x00\x1f\xe1" + // 0x03C50304: 0x00001FE1 + "\x03\xcb\x03\x00\x00\x00\x1f\xe2" + // 0x03CB0300: 0x00001FE2 + "\x03\xc1\x03\x13\x00\x00\x1f\xe4" + // 0x03C10313: 0x00001FE4 + "\x03\xc1\x03\x14\x00\x00\x1f\xe5" + // 0x03C10314: 0x00001FE5 + "\x03\xc5\x03B\x00\x00\x1f\xe6" + // 0x03C50342: 0x00001FE6 + "\x03\xcb\x03B\x00\x00\x1f\xe7" + // 0x03CB0342: 0x00001FE7 + "\x03\xa5\x03\x06\x00\x00\x1f\xe8" + // 0x03A50306: 0x00001FE8 + "\x03\xa5\x03\x04\x00\x00\x1f\xe9" + // 0x03A50304: 0x00001FE9 + "\x03\xa5\x03\x00\x00\x00\x1f\xea" + // 0x03A50300: 0x00001FEA + "\x03\xa1\x03\x14\x00\x00\x1f\xec" + // 0x03A10314: 0x00001FEC + "\x00\xa8\x03\x00\x00\x00\x1f\xed" + // 0x00A80300: 0x00001FED + "\x1f|\x03E\x00\x00\x1f\xf2" + // 0x1F7C0345: 0x00001FF2 + "\x03\xc9\x03E\x00\x00\x1f\xf3" + // 0x03C90345: 0x00001FF3 + "\x03\xce\x03E\x00\x00\x1f\xf4" + // 0x03CE0345: 0x00001FF4 + "\x03\xc9\x03B\x00\x00\x1f\xf6" + // 0x03C90342: 0x00001FF6 + "\x1f\xf6\x03E\x00\x00\x1f\xf7" + // 0x1FF60345: 0x00001FF7 + "\x03\x9f\x03\x00\x00\x00\x1f\xf8" + // 0x039F0300: 0x00001FF8 + "\x03\xa9\x03\x00\x00\x00\x1f\xfa" + // 0x03A90300: 0x00001FFA + "\x03\xa9\x03E\x00\x00\x1f\xfc" + // 0x03A90345: 0x00001FFC + "!\x90\x038\x00\x00!\x9a" + // 0x21900338: 0x0000219A + "!\x92\x038\x00\x00!\x9b" + // 0x21920338: 0x0000219B + "!\x94\x038\x00\x00!\xae" + // 0x21940338: 0x000021AE + "!\xd0\x038\x00\x00!\xcd" + // 0x21D00338: 0x000021CD + "!\xd4\x038\x00\x00!\xce" + // 0x21D40338: 0x000021CE + "!\xd2\x038\x00\x00!\xcf" + // 0x21D20338: 0x000021CF + "\"\x03\x038\x00\x00\"\x04" + // 0x22030338: 0x00002204 + "\"\b\x038\x00\x00\"\t" + // 0x22080338: 0x00002209 + "\"\v\x038\x00\x00\"\f" + // 0x220B0338: 0x0000220C + "\"#\x038\x00\x00\"$" + // 0x22230338: 0x00002224 + "\"%\x038\x00\x00\"&" + // 0x22250338: 0x00002226 + "\"<\x038\x00\x00\"A" + // 0x223C0338: 0x00002241 + "\"C\x038\x00\x00\"D" + // 0x22430338: 0x00002244 + "\"E\x038\x00\x00\"G" + // 0x22450338: 0x00002247 + "\"H\x038\x00\x00\"I" + // 0x22480338: 0x00002249 + "\x00=\x038\x00\x00\"`" + // 0x003D0338: 0x00002260 + "\"a\x038\x00\x00\"b" + // 0x22610338: 0x00002262 + "\"M\x038\x00\x00\"m" + // 0x224D0338: 0x0000226D + "\x00<\x038\x00\x00\"n" + // 0x003C0338: 0x0000226E + "\x00>\x038\x00\x00\"o" + // 0x003E0338: 0x0000226F + "\"d\x038\x00\x00\"p" + // 0x22640338: 0x00002270 + "\"e\x038\x00\x00\"q" + // 0x22650338: 0x00002271 + "\"r\x038\x00\x00\"t" + // 0x22720338: 0x00002274 + "\"s\x038\x00\x00\"u" + // 0x22730338: 0x00002275 + "\"v\x038\x00\x00\"x" + // 0x22760338: 0x00002278 + "\"w\x038\x00\x00\"y" + // 0x22770338: 0x00002279 + "\"z\x038\x00\x00\"\x80" + // 0x227A0338: 0x00002280 + "\"{\x038\x00\x00\"\x81" + // 0x227B0338: 0x00002281 + "\"\x82\x038\x00\x00\"\x84" + // 0x22820338: 0x00002284 + "\"\x83\x038\x00\x00\"\x85" + // 0x22830338: 0x00002285 + "\"\x86\x038\x00\x00\"\x88" + // 0x22860338: 0x00002288 + "\"\x87\x038\x00\x00\"\x89" + // 0x22870338: 0x00002289 + "\"\xa2\x038\x00\x00\"\xac" + // 0x22A20338: 0x000022AC + "\"\xa8\x038\x00\x00\"\xad" + // 0x22A80338: 0x000022AD + "\"\xa9\x038\x00\x00\"\xae" + // 0x22A90338: 0x000022AE + "\"\xab\x038\x00\x00\"\xaf" + // 0x22AB0338: 0x000022AF + "\"|\x038\x00\x00\"\xe0" + // 0x227C0338: 0x000022E0 + "\"}\x038\x00\x00\"\xe1" + // 0x227D0338: 0x000022E1 + "\"\x91\x038\x00\x00\"\xe2" + // 0x22910338: 0x000022E2 + "\"\x92\x038\x00\x00\"\xe3" + // 0x22920338: 0x000022E3 + "\"\xb2\x038\x00\x00\"\xea" + // 0x22B20338: 0x000022EA + "\"\xb3\x038\x00\x00\"\xeb" + // 0x22B30338: 0x000022EB + "\"\xb4\x038\x00\x00\"\xec" + // 0x22B40338: 0x000022EC + "\"\xb5\x038\x00\x00\"\xed" + // 0x22B50338: 0x000022ED + "0K0\x99\x00\x000L" + // 0x304B3099: 0x0000304C + "0M0\x99\x00\x000N" + // 0x304D3099: 0x0000304E + "0O0\x99\x00\x000P" + // 0x304F3099: 0x00003050 + "0Q0\x99\x00\x000R" + // 0x30513099: 0x00003052 + "0S0\x99\x00\x000T" + // 0x30533099: 0x00003054 + "0U0\x99\x00\x000V" + // 0x30553099: 0x00003056 + "0W0\x99\x00\x000X" + // 0x30573099: 0x00003058 + "0Y0\x99\x00\x000Z" + // 0x30593099: 0x0000305A + "0[0\x99\x00\x000\\" + // 0x305B3099: 0x0000305C + "0]0\x99\x00\x000^" + // 0x305D3099: 0x0000305E + "0_0\x99\x00\x000`" + // 0x305F3099: 0x00003060 + "0a0\x99\x00\x000b" + // 0x30613099: 0x00003062 + "0d0\x99\x00\x000e" + // 0x30643099: 0x00003065 + "0f0\x99\x00\x000g" + // 0x30663099: 0x00003067 + "0h0\x99\x00\x000i" + // 0x30683099: 0x00003069 + "0o0\x99\x00\x000p" + // 0x306F3099: 0x00003070 + "0o0\x9a\x00\x000q" + // 0x306F309A: 0x00003071 + "0r0\x99\x00\x000s" + // 0x30723099: 0x00003073 + "0r0\x9a\x00\x000t" + // 0x3072309A: 0x00003074 + "0u0\x99\x00\x000v" + // 0x30753099: 0x00003076 + "0u0\x9a\x00\x000w" + // 0x3075309A: 0x00003077 + "0x0\x99\x00\x000y" + // 0x30783099: 0x00003079 + "0x0\x9a\x00\x000z" + // 0x3078309A: 0x0000307A + "0{0\x99\x00\x000|" + // 0x307B3099: 0x0000307C + "0{0\x9a\x00\x000}" + // 0x307B309A: 0x0000307D + "0F0\x99\x00\x000\x94" + // 0x30463099: 0x00003094 + "0\x9d0\x99\x00\x000\x9e" + // 0x309D3099: 0x0000309E + "0\xab0\x99\x00\x000\xac" + // 0x30AB3099: 0x000030AC + "0\xad0\x99\x00\x000\xae" + // 0x30AD3099: 0x000030AE + "0\xaf0\x99\x00\x000\xb0" + // 0x30AF3099: 0x000030B0 + "0\xb10\x99\x00\x000\xb2" + // 0x30B13099: 0x000030B2 + "0\xb30\x99\x00\x000\xb4" + // 0x30B33099: 0x000030B4 + "0\xb50\x99\x00\x000\xb6" + // 0x30B53099: 0x000030B6 + "0\xb70\x99\x00\x000\xb8" + // 0x30B73099: 0x000030B8 + "0\xb90\x99\x00\x000\xba" + // 0x30B93099: 0x000030BA + "0\xbb0\x99\x00\x000\xbc" + // 0x30BB3099: 0x000030BC + "0\xbd0\x99\x00\x000\xbe" + // 0x30BD3099: 0x000030BE + "0\xbf0\x99\x00\x000\xc0" + // 0x30BF3099: 0x000030C0 + "0\xc10\x99\x00\x000\xc2" + // 0x30C13099: 0x000030C2 + "0\xc40\x99\x00\x000\xc5" + // 0x30C43099: 0x000030C5 + "0\xc60\x99\x00\x000\xc7" + // 0x30C63099: 0x000030C7 + "0\xc80\x99\x00\x000\xc9" + // 0x30C83099: 0x000030C9 + "0\xcf0\x99\x00\x000\xd0" + // 0x30CF3099: 0x000030D0 + "0\xcf0\x9a\x00\x000\xd1" + // 0x30CF309A: 0x000030D1 + "0\xd20\x99\x00\x000\xd3" + // 0x30D23099: 0x000030D3 + "0\xd20\x9a\x00\x000\xd4" + // 0x30D2309A: 0x000030D4 + "0\xd50\x99\x00\x000\xd6" + // 0x30D53099: 0x000030D6 + "0\xd50\x9a\x00\x000\xd7" + // 0x30D5309A: 0x000030D7 + "0\xd80\x99\x00\x000\xd9" + // 0x30D83099: 0x000030D9 + "0\xd80\x9a\x00\x000\xda" + // 0x30D8309A: 0x000030DA + "0\xdb0\x99\x00\x000\xdc" + // 0x30DB3099: 0x000030DC + "0\xdb0\x9a\x00\x000\xdd" + // 0x30DB309A: 0x000030DD + "0\xa60\x99\x00\x000\xf4" + // 0x30A63099: 0x000030F4 + "0\xef0\x99\x00\x000\xf7" + // 0x30EF3099: 0x000030F7 + "0\xf00\x99\x00\x000\xf8" + // 0x30F03099: 0x000030F8 + "0\xf10\x99\x00\x000\xf9" + // 0x30F13099: 0x000030F9 + "0\xf20\x99\x00\x000\xfa" + // 0x30F23099: 0x000030FA + "0\xfd0\x99\x00\x000\xfe" + // 0x30FD3099: 0x000030FE + "\x10\x99\x10\xba\x00\x01\x10\x9a" + // 0x109910BA: 0x0001109A + "\x10\x9b\x10\xba\x00\x01\x10\x9c" + // 0x109B10BA: 0x0001109C + "\x10\xa5\x10\xba\x00\x01\x10\xab" + // 0x10A510BA: 0x000110AB + "\x111\x11'\x00\x01\x11." + // 0x11311127: 0x0001112E + "\x112\x11'\x00\x01\x11/" + // 0x11321127: 0x0001112F + "\x13G\x13>\x00\x01\x13K" + // 0x1347133E: 0x0001134B + "\x13G\x13W\x00\x01\x13L" + // 0x13471357: 0x0001134C + "\x14\xb9\x14\xba\x00\x01\x14\xbb" + // 0x14B914BA: 0x000114BB + "\x14\xb9\x14\xb0\x00\x01\x14\xbc" + // 0x14B914B0: 0x000114BC + "\x14\xb9\x14\xbd\x00\x01\x14\xbe" + // 0x14B914BD: 0x000114BE + "\x15\xb8\x15\xaf\x00\x01\x15\xba" + // 0x15B815AF: 0x000115BA + "\x15\xb9\x15\xaf\x00\x01\x15\xbb" + // 0x15B915AF: 0x000115BB + "" + // Total size of tables: 53KB (54006 bytes) diff --git a/vendor/golang.org/x/text/unicode/norm/transform.go b/vendor/golang.org/x/text/unicode/norm/transform.go index 9f47efba..a1d366ae 100644 --- a/vendor/golang.org/x/text/unicode/norm/transform.go +++ b/vendor/golang.org/x/text/unicode/norm/transform.go @@ -18,7 +18,6 @@ func (Form) Reset() {} // Users should either catch ErrShortDst and allow dst to grow or have dst be at // least of size MaxTransformChunkSize to be guaranteed of progress. func (f Form) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { - n := 0 // Cap the maximum number of src bytes to check. b := src eof := atEOF @@ -27,13 +26,14 @@ func (f Form) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) eof = false b = b[:ns] } - i, ok := formTable[f].quickSpan(inputBytes(b), n, len(b), eof) - n += copy(dst[n:], b[n:i]) + i, ok := formTable[f].quickSpan(inputBytes(b), 0, len(b), eof) + n := copy(dst, b[:i]) if !ok { nDst, nSrc, err = f.transform(dst[n:], src[n:], atEOF) return nDst + n, nSrc + n, err } - if n < len(src) && !atEOF { + + if err == nil && n < len(src) && !atEOF { err = transform.ErrShortSrc } return n, n, err @@ -79,7 +79,7 @@ func (f Form) transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) nSrc += n nDst += n if ok { - if n < rb.nsrc && !atEOF { + if err == nil && n < rb.nsrc && !atEOF { err = transform.ErrShortSrc } return nDst, nSrc, err diff --git a/vendor/golang.org/x/text/unicode/norm/triegen.go b/vendor/golang.org/x/text/unicode/norm/triegen.go deleted file mode 100644 index 45d71190..00000000 --- a/vendor/golang.org/x/text/unicode/norm/triegen.go +++ /dev/null @@ -1,117 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// Trie table generator. -// Used by make*tables tools to generate a go file with trie data structures -// for mapping UTF-8 to a 16-bit value. All but the last byte in a UTF-8 byte -// sequence are used to lookup offsets in the index table to be used for the -// next byte. The last byte is used to index into a table with 16-bit values. - -package main - -import ( - "fmt" - "io" -) - -const maxSparseEntries = 16 - -type normCompacter struct { - sparseBlocks [][]uint64 - sparseOffset []uint16 - sparseCount int - name string -} - -func mostFrequentStride(a []uint64) int { - counts := make(map[int]int) - var v int - for _, x := range a { - if stride := int(x) - v; v != 0 && stride >= 0 { - counts[stride]++ - } - v = int(x) - } - var maxs, maxc int - for stride, cnt := range counts { - if cnt > maxc || (cnt == maxc && stride < maxs) { - maxs, maxc = stride, cnt - } - } - return maxs -} - -func countSparseEntries(a []uint64) int { - stride := mostFrequentStride(a) - var v, count int - for _, tv := range a { - if int(tv)-v != stride { - if tv != 0 { - count++ - } - } - v = int(tv) - } - return count -} - -func (c *normCompacter) Size(v []uint64) (sz int, ok bool) { - if n := countSparseEntries(v); n <= maxSparseEntries { - return (n+1)*4 + 2, true - } - return 0, false -} - -func (c *normCompacter) Store(v []uint64) uint32 { - h := uint32(len(c.sparseOffset)) - c.sparseBlocks = append(c.sparseBlocks, v) - c.sparseOffset = append(c.sparseOffset, uint16(c.sparseCount)) - c.sparseCount += countSparseEntries(v) + 1 - return h -} - -func (c *normCompacter) Handler() string { - return c.name + "Sparse.lookup" -} - -func (c *normCompacter) Print(w io.Writer) (retErr error) { - p := func(f string, x ...interface{}) { - if _, err := fmt.Fprintf(w, f, x...); retErr == nil && err != nil { - retErr = err - } - } - - ls := len(c.sparseBlocks) - p("// %sSparseOffset: %d entries, %d bytes\n", c.name, ls, ls*2) - p("var %sSparseOffset = %#v\n\n", c.name, c.sparseOffset) - - ns := c.sparseCount - p("// %sSparseValues: %d entries, %d bytes\n", c.name, ns, ns*4) - p("var %sSparseValues = [%d]valueRange {", c.name, ns) - for i, b := range c.sparseBlocks { - p("\n// Block %#x, offset %#x", i, c.sparseOffset[i]) - var v int - stride := mostFrequentStride(b) - n := countSparseEntries(b) - p("\n{value:%#04x,lo:%#02x},", stride, uint8(n)) - for i, nv := range b { - if int(nv)-v != stride { - if v != 0 { - p(",hi:%#02x},", 0x80+i-1) - } - if nv != 0 { - p("\n{value:%#04x,lo:%#02x", nv, 0x80+i) - } - } - v = int(nv) - } - if v != 0 { - p(",hi:%#02x},", 0x80+len(b)-1) - } - } - p("\n}\n\n") - return -} diff --git a/vendor/google.golang.org/appengine/internal/api.go b/vendor/google.golang.org/appengine/internal/api.go index bbc1cb9c..a6ec19e1 100644 --- a/vendor/google.golang.org/appengine/internal/api.go +++ b/vendor/google.golang.org/appengine/internal/api.go @@ -44,6 +44,7 @@ var ( curNamespaceHeader = http.CanonicalHeaderKey("X-AppEngine-Current-Namespace") userIPHeader = http.CanonicalHeaderKey("X-AppEngine-User-IP") remoteAddrHeader = http.CanonicalHeaderKey("X-AppEngine-Remote-Addr") + devRequestIdHeader = http.CanonicalHeaderKey("X-Appengine-Dev-Request-Id") // Outgoing headers. apiEndpointHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Endpoint") @@ -494,6 +495,9 @@ func Call(ctx netcontext.Context, service, method string, in, out proto.Message) if ticket == "" { ticket = DefaultTicket() } + if dri := c.req.Header.Get(devRequestIdHeader); IsDevAppServer() && dri != "" { + ticket = dri + } req := &remotepb.Request{ ServiceName: &service, Method: &method, diff --git a/vendor/gopkg.in/yaml.v2/decode.go b/vendor/gopkg.in/yaml.v2/decode.go index e4e56e28..53108765 100644 --- a/vendor/gopkg.in/yaml.v2/decode.go +++ b/vendor/gopkg.in/yaml.v2/decode.go @@ -229,6 +229,10 @@ type decoder struct { mapType reflect.Type terrors []string strict bool + + decodeCount int + aliasCount int + aliasDepth int } var ( @@ -314,7 +318,39 @@ func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unm return out, false, false } +const ( + // 400,000 decode operations is ~500kb of dense object declarations, or ~5kb of dense object declarations with 10000% alias expansion + alias_ratio_range_low = 400000 + // 4,000,000 decode operations is ~5MB of dense object declarations, or ~4.5MB of dense object declarations with 10% alias expansion + alias_ratio_range_high = 4000000 + // alias_ratio_range is the range over which we scale allowed alias ratios + alias_ratio_range = float64(alias_ratio_range_high - alias_ratio_range_low) +) + +func allowedAliasRatio(decodeCount int) float64 { + switch { + case decodeCount <= alias_ratio_range_low: + // allow 99% to come from alias expansion for small-to-medium documents + return 0.99 + case decodeCount >= alias_ratio_range_high: + // allow 10% to come from alias expansion for very large documents + return 0.10 + default: + // scale smoothly from 99% down to 10% over the range. + // this maps to 396,000 - 400,000 allowed alias-driven decodes over the range. + // 400,000 decode operations is ~100MB of allocations in worst-case scenarios (single-item maps). + return 0.99 - 0.89*(float64(decodeCount-alias_ratio_range_low)/alias_ratio_range) + } +} + func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) { + d.decodeCount++ + if d.aliasDepth > 0 { + d.aliasCount++ + } + if d.aliasCount > 100 && d.decodeCount > 1000 && float64(d.aliasCount)/float64(d.decodeCount) > allowedAliasRatio(d.decodeCount) { + failf("document contains excessive aliasing") + } switch n.kind { case documentNode: return d.document(n, out) @@ -353,7 +389,9 @@ func (d *decoder) alias(n *node, out reflect.Value) (good bool) { failf("anchor '%s' value contains itself", n.value) } d.aliases[n] = true + d.aliasDepth++ good = d.unmarshal(n.alias, out) + d.aliasDepth-- delete(d.aliases, n) return good } diff --git a/vendor/gopkg.in/yaml.v2/resolve.go b/vendor/gopkg.in/yaml.v2/resolve.go index 6c151db6..4120e0c9 100644 --- a/vendor/gopkg.in/yaml.v2/resolve.go +++ b/vendor/gopkg.in/yaml.v2/resolve.go @@ -81,7 +81,7 @@ func resolvableTag(tag string) bool { return false } -var yamlStyleFloat = regexp.MustCompile(`^[-+]?[0-9]*\.?[0-9]+([eE][-+][0-9]+)?$`) +var yamlStyleFloat = regexp.MustCompile(`^[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?$`) func resolve(tag string, in string) (rtag string, out interface{}) { if !resolvableTag(tag) { diff --git a/vendor/gopkg.in/yaml.v2/scannerc.go b/vendor/gopkg.in/yaml.v2/scannerc.go index 077fd1dd..570b8ecd 100644 --- a/vendor/gopkg.in/yaml.v2/scannerc.go +++ b/vendor/gopkg.in/yaml.v2/scannerc.go @@ -906,6 +906,9 @@ func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool { return true } +// max_flow_level limits the flow_level +const max_flow_level = 10000 + // Increase the flow level and resize the simple key list if needed. func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool { // Reset the simple key on the next level. @@ -913,6 +916,11 @@ func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool { // Increase the flow level. parser.flow_level++ + if parser.flow_level > max_flow_level { + return yaml_parser_set_scanner_error(parser, + "while increasing flow level", parser.simple_keys[len(parser.simple_keys)-1].mark, + fmt.Sprintf("exceeded max depth of %d", max_flow_level)) + } return true } @@ -925,6 +933,9 @@ func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool { return true } +// max_indents limits the indents stack size +const max_indents = 10000 + // Push the current indentation level to the stack and set the new level // the current column is greater than the indentation level. In this case, // append or insert the specified token into the token queue. @@ -939,6 +950,11 @@ func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml // indentation level. parser.indents = append(parser.indents, parser.indent) parser.indent = column + if len(parser.indents) > max_indents { + return yaml_parser_set_scanner_error(parser, + "while increasing indent level", parser.simple_keys[len(parser.simple_keys)-1].mark, + fmt.Sprintf("exceeded max depth of %d", max_indents)) + } // Create a token and insert it into the queue. token := yaml_token_t{ diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/doc.go b/vendor/k8s.io/api/admissionregistration/v1/doc.go similarity index 75% rename from vendor/k8s.io/api/admissionregistration/v1alpha1/doc.go rename to vendor/k8s.io/api/admissionregistration/v1/doc.go index 8a5d1fbb..c3940f09 100644 --- a/vendor/k8s.io/api/admissionregistration/v1alpha1/doc.go +++ b/vendor/k8s.io/api/admissionregistration/v1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2019 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -15,11 +15,12 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +k8s:openapi-gen=true +// +groupName=admissionregistration.k8s.io -// Package v1alpha1 is the v1alpha1 version of the API. +// Package v1 is the v1 version of the API. // AdmissionConfiguration and AdmissionPluginConfiguration are legacy static admission plugin configuration -// InitializerConfiguration and validatingWebhookConfiguration is for the +// MutatingWebhookConfiguration and ValidatingWebhookConfiguration are for the // new dynamic admission controller configuration. -// +groupName=admissionregistration.k8s.io -package v1alpha1 // import "k8s.io/api/admissionregistration/v1alpha1" +package v1 // import "k8s.io/api/admissionregistration/v1" diff --git a/vendor/k8s.io/api/admissionregistration/v1/generated.pb.go b/vendor/k8s.io/api/admissionregistration/v1/generated.pb.go new file mode 100644 index 00000000..1acb6345 --- /dev/null +++ b/vendor/k8s.io/api/admissionregistration/v1/generated.pb.go @@ -0,0 +1,3469 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1/generated.proto + +package v1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +func (m *MutatingWebhook) Reset() { *m = MutatingWebhook{} } +func (*MutatingWebhook) ProtoMessage() {} +func (*MutatingWebhook) Descriptor() ([]byte, []int) { + return fileDescriptor_aaac5994f79683e8, []int{0} +} +func (m *MutatingWebhook) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MutatingWebhook) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MutatingWebhook) XXX_Merge(src proto.Message) { + xxx_messageInfo_MutatingWebhook.Merge(m, src) +} +func (m *MutatingWebhook) XXX_Size() int { + return m.Size() +} +func (m *MutatingWebhook) XXX_DiscardUnknown() { + xxx_messageInfo_MutatingWebhook.DiscardUnknown(m) +} + +var xxx_messageInfo_MutatingWebhook proto.InternalMessageInfo + +func (m *MutatingWebhookConfiguration) Reset() { *m = MutatingWebhookConfiguration{} } +func (*MutatingWebhookConfiguration) ProtoMessage() {} +func (*MutatingWebhookConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptor_aaac5994f79683e8, []int{1} +} +func (m *MutatingWebhookConfiguration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MutatingWebhookConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MutatingWebhookConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_MutatingWebhookConfiguration.Merge(m, src) +} +func (m *MutatingWebhookConfiguration) XXX_Size() int { + return m.Size() +} +func (m *MutatingWebhookConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_MutatingWebhookConfiguration.DiscardUnknown(m) +} + +var xxx_messageInfo_MutatingWebhookConfiguration proto.InternalMessageInfo + +func (m *MutatingWebhookConfigurationList) Reset() { *m = MutatingWebhookConfigurationList{} } +func (*MutatingWebhookConfigurationList) ProtoMessage() {} +func (*MutatingWebhookConfigurationList) Descriptor() ([]byte, []int) { + return fileDescriptor_aaac5994f79683e8, []int{2} +} +func (m *MutatingWebhookConfigurationList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MutatingWebhookConfigurationList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MutatingWebhookConfigurationList) XXX_Merge(src proto.Message) { + xxx_messageInfo_MutatingWebhookConfigurationList.Merge(m, src) +} +func (m *MutatingWebhookConfigurationList) XXX_Size() int { + return m.Size() +} +func (m *MutatingWebhookConfigurationList) XXX_DiscardUnknown() { + xxx_messageInfo_MutatingWebhookConfigurationList.DiscardUnknown(m) +} + +var xxx_messageInfo_MutatingWebhookConfigurationList proto.InternalMessageInfo + +func (m *Rule) Reset() { *m = Rule{} } +func (*Rule) ProtoMessage() {} +func (*Rule) Descriptor() ([]byte, []int) { + return fileDescriptor_aaac5994f79683e8, []int{3} +} +func (m *Rule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Rule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Rule) XXX_Merge(src proto.Message) { + xxx_messageInfo_Rule.Merge(m, src) +} +func (m *Rule) XXX_Size() int { + return m.Size() +} +func (m *Rule) XXX_DiscardUnknown() { + xxx_messageInfo_Rule.DiscardUnknown(m) +} + +var xxx_messageInfo_Rule proto.InternalMessageInfo + +func (m *RuleWithOperations) Reset() { *m = RuleWithOperations{} } +func (*RuleWithOperations) ProtoMessage() {} +func (*RuleWithOperations) Descriptor() ([]byte, []int) { + return fileDescriptor_aaac5994f79683e8, []int{4} +} +func (m *RuleWithOperations) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RuleWithOperations) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RuleWithOperations) XXX_Merge(src proto.Message) { + xxx_messageInfo_RuleWithOperations.Merge(m, src) +} +func (m *RuleWithOperations) XXX_Size() int { + return m.Size() +} +func (m *RuleWithOperations) XXX_DiscardUnknown() { + xxx_messageInfo_RuleWithOperations.DiscardUnknown(m) +} + +var xxx_messageInfo_RuleWithOperations proto.InternalMessageInfo + +func (m *ServiceReference) Reset() { *m = ServiceReference{} } +func (*ServiceReference) ProtoMessage() {} +func (*ServiceReference) Descriptor() ([]byte, []int) { + return fileDescriptor_aaac5994f79683e8, []int{5} +} +func (m *ServiceReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ServiceReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceReference.Merge(m, src) +} +func (m *ServiceReference) XXX_Size() int { + return m.Size() +} +func (m *ServiceReference) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceReference.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceReference proto.InternalMessageInfo + +func (m *ValidatingWebhook) Reset() { *m = ValidatingWebhook{} } +func (*ValidatingWebhook) ProtoMessage() {} +func (*ValidatingWebhook) Descriptor() ([]byte, []int) { + return fileDescriptor_aaac5994f79683e8, []int{6} +} +func (m *ValidatingWebhook) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidatingWebhook) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ValidatingWebhook) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatingWebhook.Merge(m, src) +} +func (m *ValidatingWebhook) XXX_Size() int { + return m.Size() +} +func (m *ValidatingWebhook) XXX_DiscardUnknown() { + xxx_messageInfo_ValidatingWebhook.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidatingWebhook proto.InternalMessageInfo + +func (m *ValidatingWebhookConfiguration) Reset() { *m = ValidatingWebhookConfiguration{} } +func (*ValidatingWebhookConfiguration) ProtoMessage() {} +func (*ValidatingWebhookConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptor_aaac5994f79683e8, []int{7} +} +func (m *ValidatingWebhookConfiguration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidatingWebhookConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ValidatingWebhookConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatingWebhookConfiguration.Merge(m, src) +} +func (m *ValidatingWebhookConfiguration) XXX_Size() int { + return m.Size() +} +func (m *ValidatingWebhookConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_ValidatingWebhookConfiguration.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidatingWebhookConfiguration proto.InternalMessageInfo + +func (m *ValidatingWebhookConfigurationList) Reset() { *m = ValidatingWebhookConfigurationList{} } +func (*ValidatingWebhookConfigurationList) ProtoMessage() {} +func (*ValidatingWebhookConfigurationList) Descriptor() ([]byte, []int) { + return fileDescriptor_aaac5994f79683e8, []int{8} +} +func (m *ValidatingWebhookConfigurationList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidatingWebhookConfigurationList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ValidatingWebhookConfigurationList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatingWebhookConfigurationList.Merge(m, src) +} +func (m *ValidatingWebhookConfigurationList) XXX_Size() int { + return m.Size() +} +func (m *ValidatingWebhookConfigurationList) XXX_DiscardUnknown() { + xxx_messageInfo_ValidatingWebhookConfigurationList.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidatingWebhookConfigurationList proto.InternalMessageInfo + +func (m *WebhookClientConfig) Reset() { *m = WebhookClientConfig{} } +func (*WebhookClientConfig) ProtoMessage() {} +func (*WebhookClientConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_aaac5994f79683e8, []int{9} +} +func (m *WebhookClientConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WebhookClientConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *WebhookClientConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_WebhookClientConfig.Merge(m, src) +} +func (m *WebhookClientConfig) XXX_Size() int { + return m.Size() +} +func (m *WebhookClientConfig) XXX_DiscardUnknown() { + xxx_messageInfo_WebhookClientConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_WebhookClientConfig proto.InternalMessageInfo + +func init() { + proto.RegisterType((*MutatingWebhook)(nil), "k8s.io.api.admissionregistration.v1.MutatingWebhook") + proto.RegisterType((*MutatingWebhookConfiguration)(nil), "k8s.io.api.admissionregistration.v1.MutatingWebhookConfiguration") + proto.RegisterType((*MutatingWebhookConfigurationList)(nil), "k8s.io.api.admissionregistration.v1.MutatingWebhookConfigurationList") + proto.RegisterType((*Rule)(nil), "k8s.io.api.admissionregistration.v1.Rule") + proto.RegisterType((*RuleWithOperations)(nil), "k8s.io.api.admissionregistration.v1.RuleWithOperations") + proto.RegisterType((*ServiceReference)(nil), "k8s.io.api.admissionregistration.v1.ServiceReference") + proto.RegisterType((*ValidatingWebhook)(nil), "k8s.io.api.admissionregistration.v1.ValidatingWebhook") + proto.RegisterType((*ValidatingWebhookConfiguration)(nil), "k8s.io.api.admissionregistration.v1.ValidatingWebhookConfiguration") + proto.RegisterType((*ValidatingWebhookConfigurationList)(nil), "k8s.io.api.admissionregistration.v1.ValidatingWebhookConfigurationList") + proto.RegisterType((*WebhookClientConfig)(nil), "k8s.io.api.admissionregistration.v1.WebhookClientConfig") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1/generated.proto", fileDescriptor_aaac5994f79683e8) +} + +var fileDescriptor_aaac5994f79683e8 = []byte{ + // 1104 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x55, 0x4f, 0x6f, 0x1b, 0x45, + 0x14, 0xcf, 0xc6, 0x76, 0x63, 0x8f, 0xf3, 0xa7, 0x19, 0xa0, 0x35, 0xa1, 0xf2, 0x5a, 0x46, 0x42, + 0x46, 0xc0, 0x6e, 0x13, 0x4a, 0xa9, 0xb8, 0xa0, 0x6c, 0xf8, 0xa3, 0x88, 0xa4, 0x8d, 0x26, 0x6d, + 0x8a, 0x50, 0x0e, 0x1d, 0xaf, 0xc7, 0xf6, 0x10, 0x7b, 0x67, 0x35, 0x33, 0xeb, 0x92, 0x1b, 0x1f, + 0x81, 0xaf, 0x00, 0x9f, 0x82, 0x1b, 0xe2, 0x96, 0x63, 0x8f, 0x39, 0xa0, 0x85, 0x2c, 0x17, 0x0e, + 0x7c, 0x82, 0x9c, 0xd0, 0xcc, 0xae, 0x77, 0xfd, 0x27, 0x09, 0x56, 0x0e, 0x3d, 0xe5, 0xb6, 0xf3, + 0x7b, 0xf3, 0x7e, 0x6f, 0xde, 0xdb, 0xf7, 0xde, 0x0f, 0xec, 0x1c, 0x3d, 0x12, 0x16, 0x65, 0xf6, + 0x51, 0xd0, 0x24, 0xdc, 0x23, 0x92, 0x08, 0x7b, 0x40, 0xbc, 0x16, 0xe3, 0x76, 0x62, 0xc0, 0x3e, + 0xb5, 0x71, 0xab, 0x4f, 0x85, 0xa0, 0xcc, 0xe3, 0xa4, 0x43, 0x85, 0xe4, 0x58, 0x52, 0xe6, 0xd9, + 0x83, 0x75, 0xbb, 0x43, 0x3c, 0xc2, 0xb1, 0x24, 0x2d, 0xcb, 0xe7, 0x4c, 0x32, 0xf8, 0x6e, 0xec, + 0x64, 0x61, 0x9f, 0x5a, 0x17, 0x3a, 0x59, 0x83, 0xf5, 0xb5, 0x8f, 0x3a, 0x54, 0x76, 0x83, 0xa6, + 0xe5, 0xb2, 0xbe, 0xdd, 0x61, 0x1d, 0x66, 0x6b, 0xdf, 0x66, 0xd0, 0xd6, 0x27, 0x7d, 0xd0, 0x5f, + 0x31, 0xe7, 0xda, 0x83, 0xec, 0x21, 0x7d, 0xec, 0x76, 0xa9, 0x47, 0xf8, 0xb1, 0xed, 0x1f, 0x75, + 0x14, 0x20, 0xec, 0x3e, 0x91, 0xf8, 0x82, 0x97, 0xac, 0xd9, 0x97, 0x79, 0xf1, 0xc0, 0x93, 0xb4, + 0x4f, 0xa6, 0x1c, 0x1e, 0xfe, 0x9f, 0x83, 0x70, 0xbb, 0xa4, 0x8f, 0x27, 0xfd, 0xea, 0xbf, 0x2d, + 0x80, 0x95, 0xdd, 0x40, 0x62, 0x49, 0xbd, 0xce, 0x73, 0xd2, 0xec, 0x32, 0x76, 0x04, 0x6b, 0x20, + 0xef, 0xe1, 0x3e, 0xa9, 0x18, 0x35, 0xa3, 0x51, 0x72, 0x16, 0x4f, 0x42, 0x73, 0x2e, 0x0a, 0xcd, + 0xfc, 0x63, 0xdc, 0x27, 0x48, 0x5b, 0x20, 0x07, 0x8b, 0x6e, 0x8f, 0x12, 0x4f, 0x6e, 0x31, 0xaf, + 0x4d, 0x3b, 0x95, 0xf9, 0x9a, 0xd1, 0x28, 0x6f, 0x3c, 0xb2, 0x66, 0xa8, 0x9f, 0x95, 0x44, 0xd9, + 0x1a, 0xf1, 0x77, 0xde, 0x4c, 0x62, 0x2c, 0x8e, 0xa2, 0x68, 0x2c, 0x06, 0x3c, 0x04, 0x05, 0x1e, + 0xf4, 0x88, 0xa8, 0xe4, 0x6a, 0xb9, 0x46, 0x79, 0xe3, 0xd3, 0x99, 0x82, 0xa1, 0xa0, 0x47, 0x9e, + 0x53, 0xd9, 0x7d, 0xe2, 0x93, 0x18, 0x14, 0xce, 0x52, 0x12, 0xab, 0xa0, 0x6c, 0x02, 0xc5, 0xa4, + 0x70, 0x07, 0x2c, 0xb5, 0x31, 0xed, 0x05, 0x9c, 0xec, 0xb1, 0x1e, 0x75, 0x8f, 0x2b, 0x79, 0x9d, + 0xfc, 0x7b, 0x51, 0x68, 0x2e, 0x7d, 0x35, 0x6a, 0x38, 0x0f, 0xcd, 0xd5, 0x31, 0xe0, 0xe9, 0xb1, + 0x4f, 0xd0, 0xb8, 0x33, 0xfc, 0x02, 0x94, 0xfb, 0x58, 0xba, 0xdd, 0x84, 0xab, 0xa4, 0xb9, 0xea, + 0x51, 0x68, 0x96, 0x77, 0x33, 0xf8, 0x3c, 0x34, 0x57, 0x46, 0x8e, 0x9a, 0x67, 0xd4, 0x0d, 0xfe, + 0x00, 0x56, 0x55, 0xb5, 0x85, 0x8f, 0x5d, 0xb2, 0x4f, 0x7a, 0xc4, 0x95, 0x8c, 0x57, 0x0a, 0xba, + 0xd4, 0x1f, 0x8f, 0x64, 0x9f, 0xfe, 0x6f, 0xcb, 0x3f, 0xea, 0x28, 0x40, 0x58, 0xaa, 0xad, 0x54, + 0xfa, 0x3b, 0xb8, 0x49, 0x7a, 0x43, 0x57, 0xe7, 0xad, 0x28, 0x34, 0x57, 0x1f, 0x4f, 0x32, 0xa2, + 0xe9, 0x20, 0x90, 0x81, 0x65, 0xd6, 0xfc, 0x9e, 0xb8, 0x32, 0x0d, 0x5b, 0xbe, 0x7e, 0x58, 0x18, + 0x85, 0xe6, 0xf2, 0x93, 0x31, 0x3a, 0x34, 0x41, 0xaf, 0x0a, 0x26, 0x68, 0x8b, 0x7c, 0xd9, 0x6e, + 0x13, 0x57, 0x8a, 0xca, 0xad, 0xac, 0x60, 0xfb, 0x19, 0xac, 0x0a, 0x96, 0x1d, 0xb7, 0x7a, 0x58, + 0x08, 0x34, 0xea, 0x06, 0x3f, 0x03, 0xcb, 0xaa, 0xd7, 0x59, 0x20, 0xf7, 0x89, 0xcb, 0xbc, 0x96, + 0xa8, 0x2c, 0xd4, 0x8c, 0x46, 0x21, 0x7e, 0xc1, 0xd3, 0x31, 0x0b, 0x9a, 0xb8, 0x09, 0x9f, 0x81, + 0xbb, 0x69, 0x17, 0x21, 0x32, 0xa0, 0xe4, 0xe5, 0x01, 0xe1, 0xea, 0x20, 0x2a, 0xc5, 0x5a, 0xae, + 0x51, 0x72, 0xde, 0x89, 0x42, 0xf3, 0xee, 0xe6, 0xc5, 0x57, 0xd0, 0x65, 0xbe, 0xf0, 0x05, 0x80, + 0x9c, 0x50, 0x6f, 0xc0, 0x5c, 0xdd, 0x7e, 0x49, 0x43, 0x00, 0x9d, 0xdf, 0xfd, 0x28, 0x34, 0x21, + 0x9a, 0xb2, 0x9e, 0x87, 0xe6, 0x9d, 0x69, 0x54, 0xb7, 0xc7, 0x05, 0x5c, 0xf5, 0x53, 0x03, 0xdc, + 0x9b, 0x98, 0xe0, 0x78, 0x62, 0x82, 0xb8, 0xe3, 0xe1, 0x0b, 0x50, 0x54, 0x3f, 0xa6, 0x85, 0x25, + 0xd6, 0x23, 0x5d, 0xde, 0xb8, 0x3f, 0xdb, 0x6f, 0x8c, 0xff, 0xd9, 0x2e, 0x91, 0xd8, 0x81, 0xc9, + 0xd0, 0x80, 0x0c, 0x43, 0x29, 0x2b, 0x3c, 0x00, 0xc5, 0x24, 0xb2, 0xa8, 0xcc, 0xeb, 0xe9, 0x7c, + 0x30, 0xd3, 0x74, 0x4e, 0x3c, 0xdb, 0xc9, 0xab, 0x28, 0xa8, 0xf8, 0x32, 0xe1, 0xaa, 0xff, 0x63, + 0x80, 0xda, 0x55, 0xa9, 0xed, 0x50, 0x21, 0xe1, 0xe1, 0x54, 0x7a, 0xd6, 0x8c, 0x5d, 0x4a, 0x45, + 0x9c, 0xdc, 0xed, 0x24, 0xb9, 0xe2, 0x10, 0x19, 0x49, 0xad, 0x0d, 0x0a, 0x54, 0x92, 0xfe, 0x30, + 0xaf, 0xcd, 0xeb, 0xe4, 0x35, 0xf6, 0xe6, 0x6c, 0xff, 0x6c, 0x2b, 0x5e, 0x14, 0xd3, 0xd7, 0x7f, + 0x37, 0x40, 0x5e, 0x2d, 0x24, 0xf8, 0x01, 0x28, 0x61, 0x9f, 0x7e, 0xcd, 0x59, 0xe0, 0x8b, 0x8a, + 0xa1, 0x3b, 0x6f, 0x29, 0x0a, 0xcd, 0xd2, 0xe6, 0xde, 0x76, 0x0c, 0xa2, 0xcc, 0x0e, 0xd7, 0x41, + 0x19, 0xfb, 0x34, 0x6d, 0xd4, 0x79, 0x7d, 0x7d, 0x45, 0x8d, 0xcd, 0xe6, 0xde, 0x76, 0xda, 0x9c, + 0xa3, 0x77, 0x14, 0x3f, 0x27, 0x82, 0x05, 0xdc, 0x4d, 0x56, 0x69, 0xc2, 0x8f, 0x86, 0x20, 0xca, + 0xec, 0xf0, 0x43, 0x50, 0x10, 0x2e, 0xf3, 0x49, 0xb2, 0x0d, 0xef, 0xa8, 0x67, 0xef, 0x2b, 0xe0, + 0x3c, 0x34, 0x4b, 0xfa, 0x43, 0xb7, 0x65, 0x7c, 0xa9, 0xfe, 0x8b, 0x01, 0xe0, 0xf4, 0xc2, 0x85, + 0x9f, 0x03, 0xc0, 0xd2, 0x53, 0x92, 0x92, 0xa9, 0x7b, 0x29, 0x45, 0xcf, 0x43, 0x73, 0x29, 0x3d, + 0x69, 0xca, 0x11, 0x17, 0xf8, 0x0d, 0xc8, 0xab, 0x25, 0x9d, 0xa8, 0xcc, 0xfb, 0x33, 0x2f, 0xfe, + 0x4c, 0xba, 0xd4, 0x09, 0x69, 0x92, 0xfa, 0xcf, 0x06, 0xb8, 0xbd, 0x4f, 0xf8, 0x80, 0xba, 0x04, + 0x91, 0x36, 0xe1, 0xc4, 0x73, 0x09, 0xb4, 0x41, 0x29, 0x5d, 0x82, 0x89, 0xec, 0xad, 0x26, 0xbe, + 0xa5, 0x74, 0x61, 0xa2, 0xec, 0x4e, 0x2a, 0x91, 0xf3, 0x97, 0x4a, 0xe4, 0x3d, 0x90, 0xf7, 0xb1, + 0xec, 0x56, 0x72, 0xfa, 0x46, 0x51, 0x59, 0xf7, 0xb0, 0xec, 0x22, 0x8d, 0x6a, 0x2b, 0xe3, 0x52, + 0xd7, 0xb5, 0x90, 0x58, 0x19, 0x97, 0x48, 0xa3, 0xf5, 0x3f, 0x6f, 0x81, 0xd5, 0x03, 0xdc, 0xa3, + 0xad, 0x1b, 0x59, 0xbe, 0x91, 0xe5, 0x2b, 0x65, 0x19, 0xdc, 0xc8, 0xf2, 0x75, 0x64, 0xb9, 0xfe, + 0x87, 0x01, 0xaa, 0x53, 0x13, 0xf6, 0xba, 0x65, 0xf3, 0xdb, 0x29, 0xd9, 0x7c, 0x38, 0xd3, 0xf4, + 0x4c, 0x3d, 0x7c, 0x4a, 0x38, 0xff, 0x35, 0x40, 0xfd, 0xea, 0xf4, 0x5e, 0x83, 0x74, 0x76, 0xc7, + 0xa5, 0x73, 0xeb, 0x7a, 0xb9, 0xcd, 0x22, 0x9e, 0xbf, 0x1a, 0xe0, 0x8d, 0x0b, 0xf6, 0x17, 0x7c, + 0x1b, 0xe4, 0x02, 0xde, 0x4b, 0x56, 0xf0, 0x42, 0x14, 0x9a, 0xb9, 0x67, 0x68, 0x07, 0x29, 0x0c, + 0x1e, 0x82, 0x05, 0x11, 0xab, 0x40, 0x92, 0xf9, 0x27, 0x33, 0x3d, 0x6f, 0x52, 0x39, 0x9c, 0x72, + 0x14, 0x9a, 0x0b, 0x43, 0x74, 0x48, 0x09, 0x1b, 0xa0, 0xe8, 0x62, 0x27, 0xf0, 0x5a, 0x89, 0x6a, + 0x2d, 0x3a, 0x8b, 0xaa, 0x48, 0x5b, 0x9b, 0x31, 0x86, 0x52, 0xab, 0xd3, 0x38, 0x39, 0xab, 0xce, + 0xbd, 0x3a, 0xab, 0xce, 0x9d, 0x9e, 0x55, 0xe7, 0x7e, 0x8c, 0xaa, 0xc6, 0x49, 0x54, 0x35, 0x5e, + 0x45, 0x55, 0xe3, 0x34, 0xaa, 0x1a, 0x7f, 0x45, 0x55, 0xe3, 0xa7, 0xbf, 0xab, 0x73, 0xdf, 0xcd, + 0x0f, 0xd6, 0xff, 0x0b, 0x00, 0x00, 0xff, 0xff, 0x57, 0x91, 0x2c, 0x7b, 0xeb, 0x0e, 0x00, 0x00, +} + +func (m *MutatingWebhook) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MutatingWebhook) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MutatingWebhook) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ObjectSelector != nil { + { + size, err := m.ObjectSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + } + if m.ReinvocationPolicy != nil { + i -= len(*m.ReinvocationPolicy) + copy(dAtA[i:], *m.ReinvocationPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ReinvocationPolicy))) + i-- + dAtA[i] = 0x52 + } + if m.MatchPolicy != nil { + i -= len(*m.MatchPolicy) + copy(dAtA[i:], *m.MatchPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MatchPolicy))) + i-- + dAtA[i] = 0x4a + } + if len(m.AdmissionReviewVersions) > 0 { + for iNdEx := len(m.AdmissionReviewVersions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AdmissionReviewVersions[iNdEx]) + copy(dAtA[i:], m.AdmissionReviewVersions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AdmissionReviewVersions[iNdEx]))) + i-- + dAtA[i] = 0x42 + } + } + if m.TimeoutSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) + i-- + dAtA[i] = 0x38 + } + if m.SideEffects != nil { + i -= len(*m.SideEffects) + copy(dAtA[i:], *m.SideEffects) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SideEffects))) + i-- + dAtA[i] = 0x32 + } + if m.NamespaceSelector != nil { + { + size, err := m.NamespaceSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.FailurePolicy != nil { + i -= len(*m.FailurePolicy) + copy(dAtA[i:], *m.FailurePolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy))) + i-- + dAtA[i] = 0x22 + } + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + { + size, err := m.ClientConfig.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *MutatingWebhookConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MutatingWebhookConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MutatingWebhookConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Webhooks) > 0 { + for iNdEx := len(m.Webhooks) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Webhooks[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *MutatingWebhookConfigurationList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MutatingWebhookConfigurationList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MutatingWebhookConfigurationList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Rule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Rule) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Rule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Scope != nil { + i -= len(*m.Scope) + copy(dAtA[i:], *m.Scope) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Scope))) + i-- + dAtA[i] = 0x22 + } + if len(m.Resources) > 0 { + for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Resources[iNdEx]) + copy(dAtA[i:], m.Resources[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resources[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.APIVersions) > 0 { + for iNdEx := len(m.APIVersions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.APIVersions[iNdEx]) + copy(dAtA[i:], m.APIVersions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersions[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.APIGroups) > 0 { + for iNdEx := len(m.APIGroups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.APIGroups[iNdEx]) + copy(dAtA[i:], m.APIGroups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroups[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *RuleWithOperations) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RuleWithOperations) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RuleWithOperations) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Rule.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if len(m.Operations) > 0 { + for iNdEx := len(m.Operations) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Operations[iNdEx]) + copy(dAtA[i:], m.Operations[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operations[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ServiceReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServiceReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Port != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Port)) + i-- + dAtA[i] = 0x20 + } + if m.Path != nil { + i -= len(*m.Path) + copy(dAtA[i:], *m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Path))) + i-- + dAtA[i] = 0x1a + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ValidatingWebhook) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidatingWebhook) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatingWebhook) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ObjectSelector != nil { + { + size, err := m.ObjectSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } + if m.MatchPolicy != nil { + i -= len(*m.MatchPolicy) + copy(dAtA[i:], *m.MatchPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MatchPolicy))) + i-- + dAtA[i] = 0x4a + } + if len(m.AdmissionReviewVersions) > 0 { + for iNdEx := len(m.AdmissionReviewVersions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AdmissionReviewVersions[iNdEx]) + copy(dAtA[i:], m.AdmissionReviewVersions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AdmissionReviewVersions[iNdEx]))) + i-- + dAtA[i] = 0x42 + } + } + if m.TimeoutSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) + i-- + dAtA[i] = 0x38 + } + if m.SideEffects != nil { + i -= len(*m.SideEffects) + copy(dAtA[i:], *m.SideEffects) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SideEffects))) + i-- + dAtA[i] = 0x32 + } + if m.NamespaceSelector != nil { + { + size, err := m.NamespaceSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.FailurePolicy != nil { + i -= len(*m.FailurePolicy) + copy(dAtA[i:], *m.FailurePolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy))) + i-- + dAtA[i] = 0x22 + } + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + { + size, err := m.ClientConfig.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ValidatingWebhookConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidatingWebhookConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatingWebhookConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Webhooks) > 0 { + for iNdEx := len(m.Webhooks) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Webhooks[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ValidatingWebhookConfigurationList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidatingWebhookConfigurationList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatingWebhookConfigurationList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *WebhookClientConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WebhookClientConfig) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WebhookClientConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.URL != nil { + i -= len(*m.URL) + copy(dAtA[i:], *m.URL) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.URL))) + i-- + dAtA[i] = 0x1a + } + if m.CABundle != nil { + i -= len(m.CABundle) + copy(dAtA[i:], m.CABundle) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CABundle))) + i-- + dAtA[i] = 0x12 + } + if m.Service != nil { + { + size, err := m.Service.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *MutatingWebhook) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.ClientConfig.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.FailurePolicy != nil { + l = len(*m.FailurePolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NamespaceSelector != nil { + l = m.NamespaceSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.SideEffects != nil { + l = len(*m.SideEffects) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.TimeoutSeconds != nil { + n += 1 + sovGenerated(uint64(*m.TimeoutSeconds)) + } + if len(m.AdmissionReviewVersions) > 0 { + for _, s := range m.AdmissionReviewVersions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.MatchPolicy != nil { + l = len(*m.MatchPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ReinvocationPolicy != nil { + l = len(*m.ReinvocationPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ObjectSelector != nil { + l = m.ObjectSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *MutatingWebhookConfiguration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Webhooks) > 0 { + for _, e := range m.Webhooks { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *MutatingWebhookConfigurationList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *Rule) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.APIGroups) > 0 { + for _, s := range m.APIGroups { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.APIVersions) > 0 { + for _, s := range m.APIVersions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Resources) > 0 { + for _, s := range m.Resources { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Scope != nil { + l = len(*m.Scope) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *RuleWithOperations) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Operations) > 0 { + for _, s := range m.Operations { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.Rule.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ServiceReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.Path != nil { + l = len(*m.Path) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Port != nil { + n += 1 + sovGenerated(uint64(*m.Port)) + } + return n +} + +func (m *ValidatingWebhook) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.ClientConfig.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.FailurePolicy != nil { + l = len(*m.FailurePolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NamespaceSelector != nil { + l = m.NamespaceSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.SideEffects != nil { + l = len(*m.SideEffects) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.TimeoutSeconds != nil { + n += 1 + sovGenerated(uint64(*m.TimeoutSeconds)) + } + if len(m.AdmissionReviewVersions) > 0 { + for _, s := range m.AdmissionReviewVersions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.MatchPolicy != nil { + l = len(*m.MatchPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ObjectSelector != nil { + l = m.ObjectSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ValidatingWebhookConfiguration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Webhooks) > 0 { + for _, e := range m.Webhooks { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ValidatingWebhookConfigurationList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *WebhookClientConfig) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Service != nil { + l = m.Service.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.CABundle != nil { + l = len(m.CABundle) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.URL != nil { + l = len(*m.URL) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *MutatingWebhook) String() string { + if this == nil { + return "nil" + } + repeatedStringForRules := "[]RuleWithOperations{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "RuleWithOperations", "RuleWithOperations", 1), `&`, ``, 1) + "," + } + repeatedStringForRules += "}" + s := strings.Join([]string{`&MutatingWebhook{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1), `&`, ``, 1) + `,`, + `Rules:` + repeatedStringForRules + `,`, + `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`, + `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `SideEffects:` + valueToStringGenerated(this.SideEffects) + `,`, + `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, + `AdmissionReviewVersions:` + fmt.Sprintf("%v", this.AdmissionReviewVersions) + `,`, + `MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`, + `ReinvocationPolicy:` + valueToStringGenerated(this.ReinvocationPolicy) + `,`, + `ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `}`, + }, "") + return s +} +func (this *MutatingWebhookConfiguration) String() string { + if this == nil { + return "nil" + } + repeatedStringForWebhooks := "[]MutatingWebhook{" + for _, f := range this.Webhooks { + repeatedStringForWebhooks += strings.Replace(strings.Replace(f.String(), "MutatingWebhook", "MutatingWebhook", 1), `&`, ``, 1) + "," + } + repeatedStringForWebhooks += "}" + s := strings.Join([]string{`&MutatingWebhookConfiguration{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Webhooks:` + repeatedStringForWebhooks + `,`, + `}`, + }, "") + return s +} +func (this *MutatingWebhookConfigurationList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]MutatingWebhookConfiguration{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "MutatingWebhookConfiguration", "MutatingWebhookConfiguration", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&MutatingWebhookConfigurationList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *Rule) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Rule{`, + `APIGroups:` + fmt.Sprintf("%v", this.APIGroups) + `,`, + `APIVersions:` + fmt.Sprintf("%v", this.APIVersions) + `,`, + `Resources:` + fmt.Sprintf("%v", this.Resources) + `,`, + `Scope:` + valueToStringGenerated(this.Scope) + `,`, + `}`, + }, "") + return s +} +func (this *RuleWithOperations) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RuleWithOperations{`, + `Operations:` + fmt.Sprintf("%v", this.Operations) + `,`, + `Rule:` + strings.Replace(strings.Replace(this.Rule.String(), "Rule", "Rule", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceReference{`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Path:` + valueToStringGenerated(this.Path) + `,`, + `Port:` + valueToStringGenerated(this.Port) + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingWebhook) String() string { + if this == nil { + return "nil" + } + repeatedStringForRules := "[]RuleWithOperations{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "RuleWithOperations", "RuleWithOperations", 1), `&`, ``, 1) + "," + } + repeatedStringForRules += "}" + s := strings.Join([]string{`&ValidatingWebhook{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1), `&`, ``, 1) + `,`, + `Rules:` + repeatedStringForRules + `,`, + `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`, + `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `SideEffects:` + valueToStringGenerated(this.SideEffects) + `,`, + `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, + `AdmissionReviewVersions:` + fmt.Sprintf("%v", this.AdmissionReviewVersions) + `,`, + `MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`, + `ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingWebhookConfiguration) String() string { + if this == nil { + return "nil" + } + repeatedStringForWebhooks := "[]ValidatingWebhook{" + for _, f := range this.Webhooks { + repeatedStringForWebhooks += strings.Replace(strings.Replace(f.String(), "ValidatingWebhook", "ValidatingWebhook", 1), `&`, ``, 1) + "," + } + repeatedStringForWebhooks += "}" + s := strings.Join([]string{`&ValidatingWebhookConfiguration{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Webhooks:` + repeatedStringForWebhooks + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingWebhookConfigurationList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ValidatingWebhookConfiguration{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ValidatingWebhookConfiguration", "ValidatingWebhookConfiguration", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ValidatingWebhookConfigurationList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *WebhookClientConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WebhookClientConfig{`, + `Service:` + strings.Replace(this.Service.String(), "ServiceReference", "ServiceReference", 1) + `,`, + `CABundle:` + valueToStringGenerated(this.CABundle) + `,`, + `URL:` + valueToStringGenerated(this.URL) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MutatingWebhook: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MutatingWebhook: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ClientConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rules = append(m.Rules, RuleWithOperations{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FailurePolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := FailurePolicyType(dAtA[iNdEx:postIndex]) + m.FailurePolicy = &s + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NamespaceSelector == nil { + m.NamespaceSelector = &v1.LabelSelector{} + } + if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SideEffects", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := SideEffectClass(dAtA[iNdEx:postIndex]) + m.SideEffects = &s + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TimeoutSeconds = &v + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AdmissionReviewVersions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AdmissionReviewVersions = append(m.AdmissionReviewVersions, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := MatchPolicyType(dAtA[iNdEx:postIndex]) + m.MatchPolicy = &s + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReinvocationPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := ReinvocationPolicyType(dAtA[iNdEx:postIndex]) + m.ReinvocationPolicy = &s + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ObjectSelector == nil { + m.ObjectSelector = &v1.LabelSelector{} + } + if err := m.ObjectSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MutatingWebhookConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MutatingWebhookConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MutatingWebhookConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Webhooks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Webhooks = append(m.Webhooks, MutatingWebhook{}) + if err := m.Webhooks[len(m.Webhooks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MutatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MutatingWebhookConfigurationList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MutatingWebhookConfigurationList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, MutatingWebhookConfiguration{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Rule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Rule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Rule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIGroups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIGroups = append(m.APIGroups, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIVersions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIVersions = append(m.APIVersions, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resources = append(m.Resources, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := ScopeType(dAtA[iNdEx:postIndex]) + m.Scope = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RuleWithOperations) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RuleWithOperations: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RuleWithOperations: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Operations", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Operations = append(m.Operations, OperationType(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Rule.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServiceReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Path = &s + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Port = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ValidatingWebhook: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ValidatingWebhook: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ClientConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rules = append(m.Rules, RuleWithOperations{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FailurePolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := FailurePolicyType(dAtA[iNdEx:postIndex]) + m.FailurePolicy = &s + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NamespaceSelector == nil { + m.NamespaceSelector = &v1.LabelSelector{} + } + if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SideEffects", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := SideEffectClass(dAtA[iNdEx:postIndex]) + m.SideEffects = &s + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TimeoutSeconds = &v + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AdmissionReviewVersions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AdmissionReviewVersions = append(m.AdmissionReviewVersions, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := MatchPolicyType(dAtA[iNdEx:postIndex]) + m.MatchPolicy = &s + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ObjectSelector == nil { + m.ObjectSelector = &v1.LabelSelector{} + } + if err := m.ObjectSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ValidatingWebhookConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ValidatingWebhookConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ValidatingWebhookConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Webhooks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Webhooks = append(m.Webhooks, ValidatingWebhook{}) + if err := m.Webhooks[len(m.Webhooks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ValidatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ValidatingWebhookConfigurationList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ValidatingWebhookConfigurationList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ValidatingWebhookConfiguration{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WebhookClientConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WebhookClientConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Service == nil { + m.Service = &ServiceReference{} + } + if err := m.Service.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CABundle", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CABundle = append(m.CABundle[:0], dAtA[iNdEx:postIndex]...) + if m.CABundle == nil { + m.CABundle = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.URL = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) diff --git a/vendor/k8s.io/api/admissionregistration/v1/generated.proto b/vendor/k8s.io/api/admissionregistration/v1/generated.proto new file mode 100644 index 00000000..f102f3a7 --- /dev/null +++ b/vendor/k8s.io/api/admissionregistration/v1/generated.proto @@ -0,0 +1,479 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.api.admissionregistration.v1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1"; + +// MutatingWebhook describes an admission webhook and the resources and operations it applies to. +message MutatingWebhook { + // The name of the admission webhook. + // Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where + // "imagepolicy" is the name of the webhook, and kubernetes.io is the name + // of the organization. + // Required. + optional string name = 1; + + // ClientConfig defines how to communicate with the hook. + // Required + optional WebhookClientConfig clientConfig = 2; + + // Rules describes what operations on what resources/subresources the webhook cares about. + // The webhook cares about an operation if it matches _any_ Rule. + // However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks + // from putting the cluster in a state which cannot be recovered from without completely + // disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called + // on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects. + repeated RuleWithOperations rules = 3; + + // FailurePolicy defines how unrecognized errors from the admission endpoint are handled - + // allowed values are Ignore or Fail. Defaults to Fail. + // +optional + optional string failurePolicy = 4; + + // matchPolicy defines how the "rules" list is used to match incoming requests. + // Allowed values are "Exact" or "Equivalent". + // + // - Exact: match a request only if it exactly matches a specified rule. + // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, + // but "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, + // a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook. + // + // - Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. + // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, + // and "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, + // a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook. + // + // Defaults to "Equivalent" + // +optional + optional string matchPolicy = 9; + + // NamespaceSelector decides whether to run the webhook on an object based + // on whether the namespace for that object matches the selector. If the + // object itself is a namespace, the matching is performed on + // object.metadata.labels. If the object is another cluster scoped resource, + // it never skips the webhook. + // + // For example, to run the webhook on any objects whose namespace is not + // associated with "runlevel" of "0" or "1"; you will set the selector as + // follows: + // "namespaceSelector": { + // "matchExpressions": [ + // { + // "key": "runlevel", + // "operator": "NotIn", + // "values": [ + // "0", + // "1" + // ] + // } + // ] + // } + // + // If instead you want to only run the webhook on any objects whose + // namespace is associated with the "environment" of "prod" or "staging"; + // you will set the selector as follows: + // "namespaceSelector": { + // "matchExpressions": [ + // { + // "key": "environment", + // "operator": "In", + // "values": [ + // "prod", + // "staging" + // ] + // } + // ] + // } + // + // See + // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + // for more examples of label selectors. + // + // Default to the empty LabelSelector, which matches everything. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 5; + + // ObjectSelector decides whether to run the webhook based on if the + // object has matching labels. objectSelector is evaluated against both + // the oldObject and newObject that would be sent to the webhook, and + // is considered to match if either object matches the selector. A null + // object (oldObject in the case of create, or newObject in the case of + // delete) or an object that cannot have labels (like a + // DeploymentRollback or a PodProxyOptions object) is not considered to + // match. + // Use the object selector only if the webhook is opt-in, because end + // users may skip the admission webhook by setting the labels. + // Default to the empty LabelSelector, which matches everything. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 11; + + // SideEffects states whether this webhook has side effects. + // Acceptable values are: None, NoneOnDryRun (webhooks created via v1beta1 may also specify Some or Unknown). + // Webhooks with side effects MUST implement a reconciliation system, since a request may be + // rejected by a future step in the admission change and the side effects therefore need to be undone. + // Requests with the dryRun attribute will be auto-rejected if they match a webhook with + // sideEffects == Unknown or Some. + optional string sideEffects = 6; + + // TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, + // the webhook call will be ignored or the API call will fail based on the + // failure policy. + // The timeout value must be between 1 and 30 seconds. + // Default to 10 seconds. + // +optional + optional int32 timeoutSeconds = 7; + + // AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` + // versions the Webhook expects. API server will try to use first version in + // the list which it supports. If none of the versions specified in this list + // supported by API server, validation will fail for this object. + // If a persisted webhook configuration specifies allowed versions and does not + // include any versions known to the API Server, calls to the webhook will fail + // and be subject to the failure policy. + repeated string admissionReviewVersions = 8; + + // reinvocationPolicy indicates whether this webhook should be called multiple times as part of a single admission evaluation. + // Allowed values are "Never" and "IfNeeded". + // + // Never: the webhook will not be called more than once in a single admission evaluation. + // + // IfNeeded: the webhook will be called at least one additional time as part of the admission evaluation + // if the object being admitted is modified by other admission plugins after the initial webhook call. + // Webhooks that specify this option *must* be idempotent, able to process objects they previously admitted. + // Note: + // * the number of additional invocations is not guaranteed to be exactly one. + // * if additional invocations result in further modifications to the object, webhooks are not guaranteed to be invoked again. + // * webhooks that use this option may be reordered to minimize the number of additional invocations. + // * to validate an object after all mutations are guaranteed complete, use a validating admission webhook instead. + // + // Defaults to "Never". + // +optional + optional string reinvocationPolicy = 10; +} + +// MutatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and may change the object. +message MutatingWebhookConfiguration { + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Webhooks is a list of webhooks and the affected resources and operations. + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + repeated MutatingWebhook Webhooks = 2; +} + +// MutatingWebhookConfigurationList is a list of MutatingWebhookConfiguration. +message MutatingWebhookConfigurationList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of MutatingWebhookConfiguration. + repeated MutatingWebhookConfiguration items = 2; +} + +// Rule is a tuple of APIGroups, APIVersion, and Resources.It is recommended +// to make sure that all the tuple expansions are valid. +message Rule { + // APIGroups is the API groups the resources belong to. '*' is all groups. + // If '*' is present, the length of the slice must be one. + // Required. + repeated string apiGroups = 1; + + // APIVersions is the API versions the resources belong to. '*' is all versions. + // If '*' is present, the length of the slice must be one. + // Required. + repeated string apiVersions = 2; + + // Resources is a list of resources this rule applies to. + // + // For example: + // 'pods' means pods. + // 'pods/log' means the log subresource of pods. + // '*' means all resources, but not subresources. + // 'pods/*' means all subresources of pods. + // '*/scale' means all scale subresources. + // '*/*' means all resources and their subresources. + // + // If wildcard is present, the validation rule will ensure resources do not + // overlap with each other. + // + // Depending on the enclosing object, subresources might not be allowed. + // Required. + repeated string resources = 3; + + // scope specifies the scope of this rule. + // Valid values are "Cluster", "Namespaced", and "*" + // "Cluster" means that only cluster-scoped resources will match this rule. + // Namespace API objects are cluster-scoped. + // "Namespaced" means that only namespaced resources will match this rule. + // "*" means that there are no scope restrictions. + // Subresources match the scope of their parent resource. + // Default is "*". + // + // +optional + optional string scope = 4; +} + +// RuleWithOperations is a tuple of Operations and Resources. It is recommended to make +// sure that all the tuple expansions are valid. +message RuleWithOperations { + // Operations is the operations the admission hook cares about - CREATE, UPDATE, or * + // for all operations. + // If '*' is present, the length of the slice must be one. + // Required. + repeated string operations = 1; + + // Rule is embedded, it describes other criteria of the rule, like + // APIGroups, APIVersions, Resources, etc. + optional Rule rule = 2; +} + +// ServiceReference holds a reference to Service.legacy.k8s.io +message ServiceReference { + // `namespace` is the namespace of the service. + // Required + optional string namespace = 1; + + // `name` is the name of the service. + // Required + optional string name = 2; + + // `path` is an optional URL path which will be sent in any request to + // this service. + // +optional + optional string path = 3; + + // If specified, the port on the service that hosting webhook. + // Default to 443 for backward compatibility. + // `port` should be a valid port number (1-65535, inclusive). + // +optional + optional int32 port = 4; +} + +// ValidatingWebhook describes an admission webhook and the resources and operations it applies to. +message ValidatingWebhook { + // The name of the admission webhook. + // Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where + // "imagepolicy" is the name of the webhook, and kubernetes.io is the name + // of the organization. + // Required. + optional string name = 1; + + // ClientConfig defines how to communicate with the hook. + // Required + optional WebhookClientConfig clientConfig = 2; + + // Rules describes what operations on what resources/subresources the webhook cares about. + // The webhook cares about an operation if it matches _any_ Rule. + // However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks + // from putting the cluster in a state which cannot be recovered from without completely + // disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called + // on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects. + repeated RuleWithOperations rules = 3; + + // FailurePolicy defines how unrecognized errors from the admission endpoint are handled - + // allowed values are Ignore or Fail. Defaults to Fail. + // +optional + optional string failurePolicy = 4; + + // matchPolicy defines how the "rules" list is used to match incoming requests. + // Allowed values are "Exact" or "Equivalent". + // + // - Exact: match a request only if it exactly matches a specified rule. + // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, + // but "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, + // a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook. + // + // - Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. + // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, + // and "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, + // a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook. + // + // Defaults to "Equivalent" + // +optional + optional string matchPolicy = 9; + + // NamespaceSelector decides whether to run the webhook on an object based + // on whether the namespace for that object matches the selector. If the + // object itself is a namespace, the matching is performed on + // object.metadata.labels. If the object is another cluster scoped resource, + // it never skips the webhook. + // + // For example, to run the webhook on any objects whose namespace is not + // associated with "runlevel" of "0" or "1"; you will set the selector as + // follows: + // "namespaceSelector": { + // "matchExpressions": [ + // { + // "key": "runlevel", + // "operator": "NotIn", + // "values": [ + // "0", + // "1" + // ] + // } + // ] + // } + // + // If instead you want to only run the webhook on any objects whose + // namespace is associated with the "environment" of "prod" or "staging"; + // you will set the selector as follows: + // "namespaceSelector": { + // "matchExpressions": [ + // { + // "key": "environment", + // "operator": "In", + // "values": [ + // "prod", + // "staging" + // ] + // } + // ] + // } + // + // See + // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels + // for more examples of label selectors. + // + // Default to the empty LabelSelector, which matches everything. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 5; + + // ObjectSelector decides whether to run the webhook based on if the + // object has matching labels. objectSelector is evaluated against both + // the oldObject and newObject that would be sent to the webhook, and + // is considered to match if either object matches the selector. A null + // object (oldObject in the case of create, or newObject in the case of + // delete) or an object that cannot have labels (like a + // DeploymentRollback or a PodProxyOptions object) is not considered to + // match. + // Use the object selector only if the webhook is opt-in, because end + // users may skip the admission webhook by setting the labels. + // Default to the empty LabelSelector, which matches everything. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 10; + + // SideEffects states whether this webhook has side effects. + // Acceptable values are: None, NoneOnDryRun (webhooks created via v1beta1 may also specify Some or Unknown). + // Webhooks with side effects MUST implement a reconciliation system, since a request may be + // rejected by a future step in the admission change and the side effects therefore need to be undone. + // Requests with the dryRun attribute will be auto-rejected if they match a webhook with + // sideEffects == Unknown or Some. + optional string sideEffects = 6; + + // TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, + // the webhook call will be ignored or the API call will fail based on the + // failure policy. + // The timeout value must be between 1 and 30 seconds. + // Default to 10 seconds. + // +optional + optional int32 timeoutSeconds = 7; + + // AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` + // versions the Webhook expects. API server will try to use first version in + // the list which it supports. If none of the versions specified in this list + // supported by API server, validation will fail for this object. + // If a persisted webhook configuration specifies allowed versions and does not + // include any versions known to the API Server, calls to the webhook will fail + // and be subject to the failure policy. + repeated string admissionReviewVersions = 8; +} + +// ValidatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and object without changing it. +message ValidatingWebhookConfiguration { + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Webhooks is a list of webhooks and the affected resources and operations. + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + repeated ValidatingWebhook Webhooks = 2; +} + +// ValidatingWebhookConfigurationList is a list of ValidatingWebhookConfiguration. +message ValidatingWebhookConfigurationList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of ValidatingWebhookConfiguration. + repeated ValidatingWebhookConfiguration items = 2; +} + +// WebhookClientConfig contains the information to make a TLS +// connection with the webhook +message WebhookClientConfig { + // `url` gives the location of the webhook, in standard URL form + // (`scheme://host:port/path`). Exactly one of `url` or `service` + // must be specified. + // + // The `host` should not refer to a service running in the cluster; use + // the `service` field instead. The host might be resolved via external + // DNS in some apiservers (e.g., `kube-apiserver` cannot resolve + // in-cluster DNS as that would be a layering violation). `host` may + // also be an IP address. + // + // Please note that using `localhost` or `127.0.0.1` as a `host` is + // risky unless you take great care to run this webhook on all hosts + // which run an apiserver which might need to make calls to this + // webhook. Such installs are likely to be non-portable, i.e., not easy + // to turn up in a new cluster. + // + // The scheme must be "https"; the URL must begin with "https://". + // + // A path is optional, and if present may be any string permissible in + // a URL. You may use the path to pass an arbitrary string to the + // webhook, for example, a cluster identifier. + // + // Attempting to use a user or basic auth e.g. "user:password@" is not + // allowed. Fragments ("#...") and query parameters ("?...") are not + // allowed, either. + // + // +optional + optional string url = 3; + + // `service` is a reference to the service for this webhook. Either + // `service` or `url` must be specified. + // + // If the webhook is running within the cluster, then you should use `service`. + // + // +optional + optional ServiceReference service = 1; + + // `caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. + // If unspecified, system trust roots on the apiserver are used. + // +optional + optional bytes caBundle = 2; +} + diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/register.go b/vendor/k8s.io/api/admissionregistration/v1/register.go similarity index 87% rename from vendor/k8s.io/api/admissionregistration/v1alpha1/register.go rename to vendor/k8s.io/api/admissionregistration/v1/register.go index e9a4164c..716ce7fc 100644 --- a/vendor/k8s.io/api/admissionregistration/v1alpha1/register.go +++ b/vendor/k8s.io/api/admissionregistration/v1/register.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2019 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1alpha1 +package v1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -25,7 +25,7 @@ import ( const GroupName = "admissionregistration.k8s.io" // SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} // Resource takes an unqualified resource and returns a Group qualified GroupResource func Resource(resource string) schema.GroupResource { @@ -43,8 +43,10 @@ var ( // Adds the list of known types to scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, - &InitializerConfiguration{}, - &InitializerConfigurationList{}, + &ValidatingWebhookConfiguration{}, + &ValidatingWebhookConfigurationList{}, + &MutatingWebhookConfiguration{}, + &MutatingWebhookConfigurationList{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) return nil diff --git a/vendor/k8s.io/api/admissionregistration/v1/types.go b/vendor/k8s.io/api/admissionregistration/v1/types.go new file mode 100644 index 00000000..114a4c68 --- /dev/null +++ b/vendor/k8s.io/api/admissionregistration/v1/types.go @@ -0,0 +1,551 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Rule is a tuple of APIGroups, APIVersion, and Resources.It is recommended +// to make sure that all the tuple expansions are valid. +type Rule struct { + // APIGroups is the API groups the resources belong to. '*' is all groups. + // If '*' is present, the length of the slice must be one. + // Required. + APIGroups []string `json:"apiGroups,omitempty" protobuf:"bytes,1,rep,name=apiGroups"` + + // APIVersions is the API versions the resources belong to. '*' is all versions. + // If '*' is present, the length of the slice must be one. + // Required. + APIVersions []string `json:"apiVersions,omitempty" protobuf:"bytes,2,rep,name=apiVersions"` + + // Resources is a list of resources this rule applies to. + // + // For example: + // 'pods' means pods. + // 'pods/log' means the log subresource of pods. + // '*' means all resources, but not subresources. + // 'pods/*' means all subresources of pods. + // '*/scale' means all scale subresources. + // '*/*' means all resources and their subresources. + // + // If wildcard is present, the validation rule will ensure resources do not + // overlap with each other. + // + // Depending on the enclosing object, subresources might not be allowed. + // Required. + Resources []string `json:"resources,omitempty" protobuf:"bytes,3,rep,name=resources"` + + // scope specifies the scope of this rule. + // Valid values are "Cluster", "Namespaced", and "*" + // "Cluster" means that only cluster-scoped resources will match this rule. + // Namespace API objects are cluster-scoped. + // "Namespaced" means that only namespaced resources will match this rule. + // "*" means that there are no scope restrictions. + // Subresources match the scope of their parent resource. + // Default is "*". + // + // +optional + Scope *ScopeType `json:"scope,omitempty" protobuf:"bytes,4,rep,name=scope"` +} + +type ScopeType string + +const ( + // ClusterScope means that scope is limited to cluster-scoped objects. + // Namespace objects are cluster-scoped. + ClusterScope ScopeType = "Cluster" + // NamespacedScope means that scope is limited to namespaced objects. + NamespacedScope ScopeType = "Namespaced" + // AllScopes means that all scopes are included. + AllScopes ScopeType = "*" +) + +type FailurePolicyType string + +const ( + // Ignore means that an error calling the webhook is ignored. + Ignore FailurePolicyType = "Ignore" + // Fail means that an error calling the webhook causes the admission to fail. + Fail FailurePolicyType = "Fail" +) + +// MatchPolicyType specifies the type of match policy +type MatchPolicyType string + +const ( + // Exact means requests should only be sent to the webhook if they exactly match a given rule + Exact MatchPolicyType = "Exact" + // Equivalent means requests should be sent to the webhook if they modify a resource listed in rules via another API group or version. + Equivalent MatchPolicyType = "Equivalent" +) + +type SideEffectClass string + +const ( + // SideEffectClassUnknown means that no information is known about the side effects of calling the webhook. + // If a request with the dry-run attribute would trigger a call to this webhook, the request will instead fail. + SideEffectClassUnknown SideEffectClass = "Unknown" + // SideEffectClassNone means that calling the webhook will have no side effects. + SideEffectClassNone SideEffectClass = "None" + // SideEffectClassSome means that calling the webhook will possibly have side effects. + // If a request with the dry-run attribute would trigger a call to this webhook, the request will instead fail. + SideEffectClassSome SideEffectClass = "Some" + // SideEffectClassNoneOnDryRun means that calling the webhook will possibly have side effects, but if the + // request being reviewed has the dry-run attribute, the side effects will be suppressed. + SideEffectClassNoneOnDryRun SideEffectClass = "NoneOnDryRun" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ValidatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and object without changing it. +type ValidatingWebhookConfiguration struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // Webhooks is a list of webhooks and the affected resources and operations. + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + Webhooks []ValidatingWebhook `json:"webhooks,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=Webhooks"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ValidatingWebhookConfigurationList is a list of ValidatingWebhookConfiguration. +type ValidatingWebhookConfigurationList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // List of ValidatingWebhookConfiguration. + Items []ValidatingWebhookConfiguration `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// MutatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and may change the object. +type MutatingWebhookConfiguration struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // Webhooks is a list of webhooks and the affected resources and operations. + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + Webhooks []MutatingWebhook `json:"webhooks,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=Webhooks"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// MutatingWebhookConfigurationList is a list of MutatingWebhookConfiguration. +type MutatingWebhookConfigurationList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // List of MutatingWebhookConfiguration. + Items []MutatingWebhookConfiguration `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// ValidatingWebhook describes an admission webhook and the resources and operations it applies to. +type ValidatingWebhook struct { + // The name of the admission webhook. + // Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where + // "imagepolicy" is the name of the webhook, and kubernetes.io is the name + // of the organization. + // Required. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + + // ClientConfig defines how to communicate with the hook. + // Required + ClientConfig WebhookClientConfig `json:"clientConfig" protobuf:"bytes,2,opt,name=clientConfig"` + + // Rules describes what operations on what resources/subresources the webhook cares about. + // The webhook cares about an operation if it matches _any_ Rule. + // However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks + // from putting the cluster in a state which cannot be recovered from without completely + // disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called + // on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects. + Rules []RuleWithOperations `json:"rules,omitempty" protobuf:"bytes,3,rep,name=rules"` + + // FailurePolicy defines how unrecognized errors from the admission endpoint are handled - + // allowed values are Ignore or Fail. Defaults to Fail. + // +optional + FailurePolicy *FailurePolicyType `json:"failurePolicy,omitempty" protobuf:"bytes,4,opt,name=failurePolicy,casttype=FailurePolicyType"` + + // matchPolicy defines how the "rules" list is used to match incoming requests. + // Allowed values are "Exact" or "Equivalent". + // + // - Exact: match a request only if it exactly matches a specified rule. + // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, + // but "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, + // a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook. + // + // - Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. + // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, + // and "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, + // a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook. + // + // Defaults to "Equivalent" + // +optional + MatchPolicy *MatchPolicyType `json:"matchPolicy,omitempty" protobuf:"bytes,9,opt,name=matchPolicy,casttype=MatchPolicyType"` + + // NamespaceSelector decides whether to run the webhook on an object based + // on whether the namespace for that object matches the selector. If the + // object itself is a namespace, the matching is performed on + // object.metadata.labels. If the object is another cluster scoped resource, + // it never skips the webhook. + // + // For example, to run the webhook on any objects whose namespace is not + // associated with "runlevel" of "0" or "1"; you will set the selector as + // follows: + // "namespaceSelector": { + // "matchExpressions": [ + // { + // "key": "runlevel", + // "operator": "NotIn", + // "values": [ + // "0", + // "1" + // ] + // } + // ] + // } + // + // If instead you want to only run the webhook on any objects whose + // namespace is associated with the "environment" of "prod" or "staging"; + // you will set the selector as follows: + // "namespaceSelector": { + // "matchExpressions": [ + // { + // "key": "environment", + // "operator": "In", + // "values": [ + // "prod", + // "staging" + // ] + // } + // ] + // } + // + // See + // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels + // for more examples of label selectors. + // + // Default to the empty LabelSelector, which matches everything. + // +optional + NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty" protobuf:"bytes,5,opt,name=namespaceSelector"` + + // ObjectSelector decides whether to run the webhook based on if the + // object has matching labels. objectSelector is evaluated against both + // the oldObject and newObject that would be sent to the webhook, and + // is considered to match if either object matches the selector. A null + // object (oldObject in the case of create, or newObject in the case of + // delete) or an object that cannot have labels (like a + // DeploymentRollback or a PodProxyOptions object) is not considered to + // match. + // Use the object selector only if the webhook is opt-in, because end + // users may skip the admission webhook by setting the labels. + // Default to the empty LabelSelector, which matches everything. + // +optional + ObjectSelector *metav1.LabelSelector `json:"objectSelector,omitempty" protobuf:"bytes,10,opt,name=objectSelector"` + + // SideEffects states whether this webhook has side effects. + // Acceptable values are: None, NoneOnDryRun (webhooks created via v1beta1 may also specify Some or Unknown). + // Webhooks with side effects MUST implement a reconciliation system, since a request may be + // rejected by a future step in the admission change and the side effects therefore need to be undone. + // Requests with the dryRun attribute will be auto-rejected if they match a webhook with + // sideEffects == Unknown or Some. + SideEffects *SideEffectClass `json:"sideEffects" protobuf:"bytes,6,opt,name=sideEffects,casttype=SideEffectClass"` + + // TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, + // the webhook call will be ignored or the API call will fail based on the + // failure policy. + // The timeout value must be between 1 and 30 seconds. + // Default to 10 seconds. + // +optional + TimeoutSeconds *int32 `json:"timeoutSeconds,omitempty" protobuf:"varint,7,opt,name=timeoutSeconds"` + + // AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` + // versions the Webhook expects. API server will try to use first version in + // the list which it supports. If none of the versions specified in this list + // supported by API server, validation will fail for this object. + // If a persisted webhook configuration specifies allowed versions and does not + // include any versions known to the API Server, calls to the webhook will fail + // and be subject to the failure policy. + AdmissionReviewVersions []string `json:"admissionReviewVersions" protobuf:"bytes,8,rep,name=admissionReviewVersions"` +} + +// MutatingWebhook describes an admission webhook and the resources and operations it applies to. +type MutatingWebhook struct { + // The name of the admission webhook. + // Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where + // "imagepolicy" is the name of the webhook, and kubernetes.io is the name + // of the organization. + // Required. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + + // ClientConfig defines how to communicate with the hook. + // Required + ClientConfig WebhookClientConfig `json:"clientConfig" protobuf:"bytes,2,opt,name=clientConfig"` + + // Rules describes what operations on what resources/subresources the webhook cares about. + // The webhook cares about an operation if it matches _any_ Rule. + // However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks + // from putting the cluster in a state which cannot be recovered from without completely + // disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called + // on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects. + Rules []RuleWithOperations `json:"rules,omitempty" protobuf:"bytes,3,rep,name=rules"` + + // FailurePolicy defines how unrecognized errors from the admission endpoint are handled - + // allowed values are Ignore or Fail. Defaults to Fail. + // +optional + FailurePolicy *FailurePolicyType `json:"failurePolicy,omitempty" protobuf:"bytes,4,opt,name=failurePolicy,casttype=FailurePolicyType"` + + // matchPolicy defines how the "rules" list is used to match incoming requests. + // Allowed values are "Exact" or "Equivalent". + // + // - Exact: match a request only if it exactly matches a specified rule. + // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, + // but "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, + // a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook. + // + // - Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. + // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, + // and "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, + // a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook. + // + // Defaults to "Equivalent" + // +optional + MatchPolicy *MatchPolicyType `json:"matchPolicy,omitempty" protobuf:"bytes,9,opt,name=matchPolicy,casttype=MatchPolicyType"` + + // NamespaceSelector decides whether to run the webhook on an object based + // on whether the namespace for that object matches the selector. If the + // object itself is a namespace, the matching is performed on + // object.metadata.labels. If the object is another cluster scoped resource, + // it never skips the webhook. + // + // For example, to run the webhook on any objects whose namespace is not + // associated with "runlevel" of "0" or "1"; you will set the selector as + // follows: + // "namespaceSelector": { + // "matchExpressions": [ + // { + // "key": "runlevel", + // "operator": "NotIn", + // "values": [ + // "0", + // "1" + // ] + // } + // ] + // } + // + // If instead you want to only run the webhook on any objects whose + // namespace is associated with the "environment" of "prod" or "staging"; + // you will set the selector as follows: + // "namespaceSelector": { + // "matchExpressions": [ + // { + // "key": "environment", + // "operator": "In", + // "values": [ + // "prod", + // "staging" + // ] + // } + // ] + // } + // + // See + // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + // for more examples of label selectors. + // + // Default to the empty LabelSelector, which matches everything. + // +optional + NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty" protobuf:"bytes,5,opt,name=namespaceSelector"` + + // ObjectSelector decides whether to run the webhook based on if the + // object has matching labels. objectSelector is evaluated against both + // the oldObject and newObject that would be sent to the webhook, and + // is considered to match if either object matches the selector. A null + // object (oldObject in the case of create, or newObject in the case of + // delete) or an object that cannot have labels (like a + // DeploymentRollback or a PodProxyOptions object) is not considered to + // match. + // Use the object selector only if the webhook is opt-in, because end + // users may skip the admission webhook by setting the labels. + // Default to the empty LabelSelector, which matches everything. + // +optional + ObjectSelector *metav1.LabelSelector `json:"objectSelector,omitempty" protobuf:"bytes,11,opt,name=objectSelector"` + + // SideEffects states whether this webhook has side effects. + // Acceptable values are: None, NoneOnDryRun (webhooks created via v1beta1 may also specify Some or Unknown). + // Webhooks with side effects MUST implement a reconciliation system, since a request may be + // rejected by a future step in the admission change and the side effects therefore need to be undone. + // Requests with the dryRun attribute will be auto-rejected if they match a webhook with + // sideEffects == Unknown or Some. + SideEffects *SideEffectClass `json:"sideEffects" protobuf:"bytes,6,opt,name=sideEffects,casttype=SideEffectClass"` + + // TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, + // the webhook call will be ignored or the API call will fail based on the + // failure policy. + // The timeout value must be between 1 and 30 seconds. + // Default to 10 seconds. + // +optional + TimeoutSeconds *int32 `json:"timeoutSeconds,omitempty" protobuf:"varint,7,opt,name=timeoutSeconds"` + + // AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` + // versions the Webhook expects. API server will try to use first version in + // the list which it supports. If none of the versions specified in this list + // supported by API server, validation will fail for this object. + // If a persisted webhook configuration specifies allowed versions and does not + // include any versions known to the API Server, calls to the webhook will fail + // and be subject to the failure policy. + AdmissionReviewVersions []string `json:"admissionReviewVersions" protobuf:"bytes,8,rep,name=admissionReviewVersions"` + + // reinvocationPolicy indicates whether this webhook should be called multiple times as part of a single admission evaluation. + // Allowed values are "Never" and "IfNeeded". + // + // Never: the webhook will not be called more than once in a single admission evaluation. + // + // IfNeeded: the webhook will be called at least one additional time as part of the admission evaluation + // if the object being admitted is modified by other admission plugins after the initial webhook call. + // Webhooks that specify this option *must* be idempotent, able to process objects they previously admitted. + // Note: + // * the number of additional invocations is not guaranteed to be exactly one. + // * if additional invocations result in further modifications to the object, webhooks are not guaranteed to be invoked again. + // * webhooks that use this option may be reordered to minimize the number of additional invocations. + // * to validate an object after all mutations are guaranteed complete, use a validating admission webhook instead. + // + // Defaults to "Never". + // +optional + ReinvocationPolicy *ReinvocationPolicyType `json:"reinvocationPolicy,omitempty" protobuf:"bytes,10,opt,name=reinvocationPolicy,casttype=ReinvocationPolicyType"` +} + +// ReinvocationPolicyType specifies what type of policy the admission hook uses. +type ReinvocationPolicyType string + +const ( + // NeverReinvocationPolicy indicates that the webhook must not be called more than once in a + // single admission evaluation. + NeverReinvocationPolicy ReinvocationPolicyType = "Never" + // IfNeededReinvocationPolicy indicates that the webhook may be called at least one + // additional time as part of the admission evaluation if the object being admitted is + // modified by other admission plugins after the initial webhook call. + IfNeededReinvocationPolicy ReinvocationPolicyType = "IfNeeded" +) + +// RuleWithOperations is a tuple of Operations and Resources. It is recommended to make +// sure that all the tuple expansions are valid. +type RuleWithOperations struct { + // Operations is the operations the admission hook cares about - CREATE, UPDATE, or * + // for all operations. + // If '*' is present, the length of the slice must be one. + // Required. + Operations []OperationType `json:"operations,omitempty" protobuf:"bytes,1,rep,name=operations,casttype=OperationType"` + // Rule is embedded, it describes other criteria of the rule, like + // APIGroups, APIVersions, Resources, etc. + Rule `json:",inline" protobuf:"bytes,2,opt,name=rule"` +} + +type OperationType string + +// The constants should be kept in sync with those defined in k8s.io/kubernetes/pkg/admission/interface.go. +const ( + OperationAll OperationType = "*" + Create OperationType = "CREATE" + Update OperationType = "UPDATE" + Delete OperationType = "DELETE" + Connect OperationType = "CONNECT" +) + +// WebhookClientConfig contains the information to make a TLS +// connection with the webhook +type WebhookClientConfig struct { + // `url` gives the location of the webhook, in standard URL form + // (`scheme://host:port/path`). Exactly one of `url` or `service` + // must be specified. + // + // The `host` should not refer to a service running in the cluster; use + // the `service` field instead. The host might be resolved via external + // DNS in some apiservers (e.g., `kube-apiserver` cannot resolve + // in-cluster DNS as that would be a layering violation). `host` may + // also be an IP address. + // + // Please note that using `localhost` or `127.0.0.1` as a `host` is + // risky unless you take great care to run this webhook on all hosts + // which run an apiserver which might need to make calls to this + // webhook. Such installs are likely to be non-portable, i.e., not easy + // to turn up in a new cluster. + // + // The scheme must be "https"; the URL must begin with "https://". + // + // A path is optional, and if present may be any string permissible in + // a URL. You may use the path to pass an arbitrary string to the + // webhook, for example, a cluster identifier. + // + // Attempting to use a user or basic auth e.g. "user:password@" is not + // allowed. Fragments ("#...") and query parameters ("?...") are not + // allowed, either. + // + // +optional + URL *string `json:"url,omitempty" protobuf:"bytes,3,opt,name=url"` + + // `service` is a reference to the service for this webhook. Either + // `service` or `url` must be specified. + // + // If the webhook is running within the cluster, then you should use `service`. + // + // +optional + Service *ServiceReference `json:"service,omitempty" protobuf:"bytes,1,opt,name=service"` + + // `caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. + // If unspecified, system trust roots on the apiserver are used. + // +optional + CABundle []byte `json:"caBundle,omitempty" protobuf:"bytes,2,opt,name=caBundle"` +} + +// ServiceReference holds a reference to Service.legacy.k8s.io +type ServiceReference struct { + // `namespace` is the namespace of the service. + // Required + Namespace string `json:"namespace" protobuf:"bytes,1,opt,name=namespace"` + // `name` is the name of the service. + // Required + Name string `json:"name" protobuf:"bytes,2,opt,name=name"` + + // `path` is an optional URL path which will be sent in any request to + // this service. + // +optional + Path *string `json:"path,omitempty" protobuf:"bytes,3,opt,name=path"` + + // If specified, the port on the service that hosting webhook. + // Default to 443 for backward compatibility. + // `port` should be a valid port number (1-65535, inclusive). + // +optional + Port *int32 `json:"port,omitempty" protobuf:"varint,4,opt,name=port"` +} diff --git a/vendor/k8s.io/api/admissionregistration/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/admissionregistration/v1/types_swagger_doc_generated.go new file mode 100644 index 00000000..2fde0ce3 --- /dev/null +++ b/vendor/k8s.io/api/admissionregistration/v1/types_swagger_doc_generated.go @@ -0,0 +1,151 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_MutatingWebhook = map[string]string{ + "": "MutatingWebhook describes an admission webhook and the resources and operations it applies to.", + "name": "The name of the admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where \"imagepolicy\" is the name of the webhook, and kubernetes.io is the name of the organization. Required.", + "clientConfig": "ClientConfig defines how to communicate with the hook. Required", + "rules": "Rules describes what operations on what resources/subresources the webhook cares about. The webhook cares about an operation if it matches _any_ Rule. However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks from putting the cluster in a state which cannot be recovered from without completely disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects.", + "failurePolicy": "FailurePolicy defines how unrecognized errors from the admission endpoint are handled - allowed values are Ignore or Fail. Defaults to Fail.", + "matchPolicy": "matchPolicy defines how the \"rules\" list is used to match incoming requests. Allowed values are \"Exact\" or \"Equivalent\".\n\n- Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook.\n\n- Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook.\n\nDefaults to \"Equivalent\"", + "namespaceSelector": "NamespaceSelector decides whether to run the webhook on an object based on whether the namespace for that object matches the selector. If the object itself is a namespace, the matching is performed on object.metadata.labels. If the object is another cluster scoped resource, it never skips the webhook.\n\nFor example, to run the webhook on any objects whose namespace is not associated with \"runlevel\" of \"0\" or \"1\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"runlevel\",\n \"operator\": \"NotIn\",\n \"values\": [\n \"0\",\n \"1\"\n ]\n }\n ]\n}\n\nIf instead you want to only run the webhook on any objects whose namespace is associated with the \"environment\" of \"prod\" or \"staging\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"environment\",\n \"operator\": \"In\",\n \"values\": [\n \"prod\",\n \"staging\"\n ]\n }\n ]\n}\n\nSee https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ for more examples of label selectors.\n\nDefault to the empty LabelSelector, which matches everything.", + "objectSelector": "ObjectSelector decides whether to run the webhook based on if the object has matching labels. objectSelector is evaluated against both the oldObject and newObject that would be sent to the webhook, and is considered to match if either object matches the selector. A null object (oldObject in the case of create, or newObject in the case of delete) or an object that cannot have labels (like a DeploymentRollback or a PodProxyOptions object) is not considered to match. Use the object selector only if the webhook is opt-in, because end users may skip the admission webhook by setting the labels. Default to the empty LabelSelector, which matches everything.", + "sideEffects": "SideEffects states whether this webhook has side effects. Acceptable values are: None, NoneOnDryRun (webhooks created via v1beta1 may also specify Some or Unknown). Webhooks with side effects MUST implement a reconciliation system, since a request may be rejected by a future step in the admission change and the side effects therefore need to be undone. Requests with the dryRun attribute will be auto-rejected if they match a webhook with sideEffects == Unknown or Some.", + "timeoutSeconds": "TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, the webhook call will be ignored or the API call will fail based on the failure policy. The timeout value must be between 1 and 30 seconds. Default to 10 seconds.", + "admissionReviewVersions": "AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` versions the Webhook expects. API server will try to use first version in the list which it supports. If none of the versions specified in this list supported by API server, validation will fail for this object. If a persisted webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail and be subject to the failure policy.", + "reinvocationPolicy": "reinvocationPolicy indicates whether this webhook should be called multiple times as part of a single admission evaluation. Allowed values are \"Never\" and \"IfNeeded\".\n\nNever: the webhook will not be called more than once in a single admission evaluation.\n\nIfNeeded: the webhook will be called at least one additional time as part of the admission evaluation if the object being admitted is modified by other admission plugins after the initial webhook call. Webhooks that specify this option *must* be idempotent, able to process objects they previously admitted. Note: * the number of additional invocations is not guaranteed to be exactly one. * if additional invocations result in further modifications to the object, webhooks are not guaranteed to be invoked again. * webhooks that use this option may be reordered to minimize the number of additional invocations. * to validate an object after all mutations are guaranteed complete, use a validating admission webhook instead.\n\nDefaults to \"Never\".", +} + +func (MutatingWebhook) SwaggerDoc() map[string]string { + return map_MutatingWebhook +} + +var map_MutatingWebhookConfiguration = map[string]string{ + "": "MutatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and may change the object.", + "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.", + "webhooks": "Webhooks is a list of webhooks and the affected resources and operations.", +} + +func (MutatingWebhookConfiguration) SwaggerDoc() map[string]string { + return map_MutatingWebhookConfiguration +} + +var map_MutatingWebhookConfigurationList = map[string]string{ + "": "MutatingWebhookConfigurationList is a list of MutatingWebhookConfiguration.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "items": "List of MutatingWebhookConfiguration.", +} + +func (MutatingWebhookConfigurationList) SwaggerDoc() map[string]string { + return map_MutatingWebhookConfigurationList +} + +var map_Rule = map[string]string{ + "": "Rule is a tuple of APIGroups, APIVersion, and Resources.It is recommended to make sure that all the tuple expansions are valid.", + "apiGroups": "APIGroups is the API groups the resources belong to. '*' is all groups. If '*' is present, the length of the slice must be one. Required.", + "apiVersions": "APIVersions is the API versions the resources belong to. '*' is all versions. If '*' is present, the length of the slice must be one. Required.", + "resources": "Resources is a list of resources this rule applies to.\n\nFor example: 'pods' means pods. 'pods/log' means the log subresource of pods. '*' means all resources, but not subresources. 'pods/*' means all subresources of pods. '*/scale' means all scale subresources. '*/*' means all resources and their subresources.\n\nIf wildcard is present, the validation rule will ensure resources do not overlap with each other.\n\nDepending on the enclosing object, subresources might not be allowed. Required.", + "scope": "scope specifies the scope of this rule. Valid values are \"Cluster\", \"Namespaced\", and \"*\" \"Cluster\" means that only cluster-scoped resources will match this rule. Namespace API objects are cluster-scoped. \"Namespaced\" means that only namespaced resources will match this rule. \"*\" means that there are no scope restrictions. Subresources match the scope of their parent resource. Default is \"*\".", +} + +func (Rule) SwaggerDoc() map[string]string { + return map_Rule +} + +var map_RuleWithOperations = map[string]string{ + "": "RuleWithOperations is a tuple of Operations and Resources. It is recommended to make sure that all the tuple expansions are valid.", + "operations": "Operations is the operations the admission hook cares about - CREATE, UPDATE, or * for all operations. If '*' is present, the length of the slice must be one. Required.", +} + +func (RuleWithOperations) SwaggerDoc() map[string]string { + return map_RuleWithOperations +} + +var map_ServiceReference = map[string]string{ + "": "ServiceReference holds a reference to Service.legacy.k8s.io", + "namespace": "`namespace` is the namespace of the service. Required", + "name": "`name` is the name of the service. Required", + "path": "`path` is an optional URL path which will be sent in any request to this service.", + "port": "If specified, the port on the service that hosting webhook. Default to 443 for backward compatibility. `port` should be a valid port number (1-65535, inclusive).", +} + +func (ServiceReference) SwaggerDoc() map[string]string { + return map_ServiceReference +} + +var map_ValidatingWebhook = map[string]string{ + "": "ValidatingWebhook describes an admission webhook and the resources and operations it applies to.", + "name": "The name of the admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where \"imagepolicy\" is the name of the webhook, and kubernetes.io is the name of the organization. Required.", + "clientConfig": "ClientConfig defines how to communicate with the hook. Required", + "rules": "Rules describes what operations on what resources/subresources the webhook cares about. The webhook cares about an operation if it matches _any_ Rule. However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks from putting the cluster in a state which cannot be recovered from without completely disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects.", + "failurePolicy": "FailurePolicy defines how unrecognized errors from the admission endpoint are handled - allowed values are Ignore or Fail. Defaults to Fail.", + "matchPolicy": "matchPolicy defines how the \"rules\" list is used to match incoming requests. Allowed values are \"Exact\" or \"Equivalent\".\n\n- Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook.\n\n- Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook.\n\nDefaults to \"Equivalent\"", + "namespaceSelector": "NamespaceSelector decides whether to run the webhook on an object based on whether the namespace for that object matches the selector. If the object itself is a namespace, the matching is performed on object.metadata.labels. If the object is another cluster scoped resource, it never skips the webhook.\n\nFor example, to run the webhook on any objects whose namespace is not associated with \"runlevel\" of \"0\" or \"1\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"runlevel\",\n \"operator\": \"NotIn\",\n \"values\": [\n \"0\",\n \"1\"\n ]\n }\n ]\n}\n\nIf instead you want to only run the webhook on any objects whose namespace is associated with the \"environment\" of \"prod\" or \"staging\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"environment\",\n \"operator\": \"In\",\n \"values\": [\n \"prod\",\n \"staging\"\n ]\n }\n ]\n}\n\nSee https://kubernetes.io/docs/concepts/overview/working-with-objects/labels for more examples of label selectors.\n\nDefault to the empty LabelSelector, which matches everything.", + "objectSelector": "ObjectSelector decides whether to run the webhook based on if the object has matching labels. objectSelector is evaluated against both the oldObject and newObject that would be sent to the webhook, and is considered to match if either object matches the selector. A null object (oldObject in the case of create, or newObject in the case of delete) or an object that cannot have labels (like a DeploymentRollback or a PodProxyOptions object) is not considered to match. Use the object selector only if the webhook is opt-in, because end users may skip the admission webhook by setting the labels. Default to the empty LabelSelector, which matches everything.", + "sideEffects": "SideEffects states whether this webhook has side effects. Acceptable values are: None, NoneOnDryRun (webhooks created via v1beta1 may also specify Some or Unknown). Webhooks with side effects MUST implement a reconciliation system, since a request may be rejected by a future step in the admission change and the side effects therefore need to be undone. Requests with the dryRun attribute will be auto-rejected if they match a webhook with sideEffects == Unknown or Some.", + "timeoutSeconds": "TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, the webhook call will be ignored or the API call will fail based on the failure policy. The timeout value must be between 1 and 30 seconds. Default to 10 seconds.", + "admissionReviewVersions": "AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` versions the Webhook expects. API server will try to use first version in the list which it supports. If none of the versions specified in this list supported by API server, validation will fail for this object. If a persisted webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail and be subject to the failure policy.", +} + +func (ValidatingWebhook) SwaggerDoc() map[string]string { + return map_ValidatingWebhook +} + +var map_ValidatingWebhookConfiguration = map[string]string{ + "": "ValidatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and object without changing it.", + "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.", + "webhooks": "Webhooks is a list of webhooks and the affected resources and operations.", +} + +func (ValidatingWebhookConfiguration) SwaggerDoc() map[string]string { + return map_ValidatingWebhookConfiguration +} + +var map_ValidatingWebhookConfigurationList = map[string]string{ + "": "ValidatingWebhookConfigurationList is a list of ValidatingWebhookConfiguration.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "items": "List of ValidatingWebhookConfiguration.", +} + +func (ValidatingWebhookConfigurationList) SwaggerDoc() map[string]string { + return map_ValidatingWebhookConfigurationList +} + +var map_WebhookClientConfig = map[string]string{ + "": "WebhookClientConfig contains the information to make a TLS connection with the webhook", + "url": "`url` gives the location of the webhook, in standard URL form (`scheme://host:port/path`). Exactly one of `url` or `service` must be specified.\n\nThe `host` should not refer to a service running in the cluster; use the `service` field instead. The host might be resolved via external DNS in some apiservers (e.g., `kube-apiserver` cannot resolve in-cluster DNS as that would be a layering violation). `host` may also be an IP address.\n\nPlease note that using `localhost` or `127.0.0.1` as a `host` is risky unless you take great care to run this webhook on all hosts which run an apiserver which might need to make calls to this webhook. Such installs are likely to be non-portable, i.e., not easy to turn up in a new cluster.\n\nThe scheme must be \"https\"; the URL must begin with \"https://\".\n\nA path is optional, and if present may be any string permissible in a URL. You may use the path to pass an arbitrary string to the webhook, for example, a cluster identifier.\n\nAttempting to use a user or basic auth e.g. \"user:password@\" is not allowed. Fragments (\"#...\") and query parameters (\"?...\") are not allowed, either.", + "service": "`service` is a reference to the service for this webhook. Either `service` or `url` must be specified.\n\nIf the webhook is running within the cluster, then you should use `service`.", + "caBundle": "`caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. If unspecified, system trust roots on the apiserver are used.", +} + +func (WebhookClientConfig) SwaggerDoc() map[string]string { + return map_WebhookClientConfig +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/admissionregistration/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/admissionregistration/v1/zz_generated.deepcopy.go new file mode 100644 index 00000000..3afb7467 --- /dev/null +++ b/vendor/k8s.io/api/admissionregistration/v1/zz_generated.deepcopy.go @@ -0,0 +1,396 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MutatingWebhook) DeepCopyInto(out *MutatingWebhook) { + *out = *in + in.ClientConfig.DeepCopyInto(&out.ClientConfig) + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]RuleWithOperations, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FailurePolicy != nil { + in, out := &in.FailurePolicy, &out.FailurePolicy + *out = new(FailurePolicyType) + **out = **in + } + if in.MatchPolicy != nil { + in, out := &in.MatchPolicy, &out.MatchPolicy + *out = new(MatchPolicyType) + **out = **in + } + if in.NamespaceSelector != nil { + in, out := &in.NamespaceSelector, &out.NamespaceSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.ObjectSelector != nil { + in, out := &in.ObjectSelector, &out.ObjectSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.SideEffects != nil { + in, out := &in.SideEffects, &out.SideEffects + *out = new(SideEffectClass) + **out = **in + } + if in.TimeoutSeconds != nil { + in, out := &in.TimeoutSeconds, &out.TimeoutSeconds + *out = new(int32) + **out = **in + } + if in.AdmissionReviewVersions != nil { + in, out := &in.AdmissionReviewVersions, &out.AdmissionReviewVersions + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ReinvocationPolicy != nil { + in, out := &in.ReinvocationPolicy, &out.ReinvocationPolicy + *out = new(ReinvocationPolicyType) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingWebhook. +func (in *MutatingWebhook) DeepCopy() *MutatingWebhook { + if in == nil { + return nil + } + out := new(MutatingWebhook) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MutatingWebhookConfiguration) DeepCopyInto(out *MutatingWebhookConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Webhooks != nil { + in, out := &in.Webhooks, &out.Webhooks + *out = make([]MutatingWebhook, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingWebhookConfiguration. +func (in *MutatingWebhookConfiguration) DeepCopy() *MutatingWebhookConfiguration { + if in == nil { + return nil + } + out := new(MutatingWebhookConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MutatingWebhookConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MutatingWebhookConfigurationList) DeepCopyInto(out *MutatingWebhookConfigurationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]MutatingWebhookConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingWebhookConfigurationList. +func (in *MutatingWebhookConfigurationList) DeepCopy() *MutatingWebhookConfigurationList { + if in == nil { + return nil + } + out := new(MutatingWebhookConfigurationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MutatingWebhookConfigurationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Rule) DeepCopyInto(out *Rule) { + *out = *in + if in.APIGroups != nil { + in, out := &in.APIGroups, &out.APIGroups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.APIVersions != nil { + in, out := &in.APIVersions, &out.APIVersions + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Scope != nil { + in, out := &in.Scope, &out.Scope + *out = new(ScopeType) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Rule. +func (in *Rule) DeepCopy() *Rule { + if in == nil { + return nil + } + out := new(Rule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleWithOperations) DeepCopyInto(out *RuleWithOperations) { + *out = *in + if in.Operations != nil { + in, out := &in.Operations, &out.Operations + *out = make([]OperationType, len(*in)) + copy(*out, *in) + } + in.Rule.DeepCopyInto(&out.Rule) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleWithOperations. +func (in *RuleWithOperations) DeepCopy() *RuleWithOperations { + if in == nil { + return nil + } + out := new(RuleWithOperations) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceReference) DeepCopyInto(out *ServiceReference) { + *out = *in + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceReference. +func (in *ServiceReference) DeepCopy() *ServiceReference { + if in == nil { + return nil + } + out := new(ServiceReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidatingWebhook) DeepCopyInto(out *ValidatingWebhook) { + *out = *in + in.ClientConfig.DeepCopyInto(&out.ClientConfig) + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]RuleWithOperations, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FailurePolicy != nil { + in, out := &in.FailurePolicy, &out.FailurePolicy + *out = new(FailurePolicyType) + **out = **in + } + if in.MatchPolicy != nil { + in, out := &in.MatchPolicy, &out.MatchPolicy + *out = new(MatchPolicyType) + **out = **in + } + if in.NamespaceSelector != nil { + in, out := &in.NamespaceSelector, &out.NamespaceSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.ObjectSelector != nil { + in, out := &in.ObjectSelector, &out.ObjectSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.SideEffects != nil { + in, out := &in.SideEffects, &out.SideEffects + *out = new(SideEffectClass) + **out = **in + } + if in.TimeoutSeconds != nil { + in, out := &in.TimeoutSeconds, &out.TimeoutSeconds + *out = new(int32) + **out = **in + } + if in.AdmissionReviewVersions != nil { + in, out := &in.AdmissionReviewVersions, &out.AdmissionReviewVersions + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingWebhook. +func (in *ValidatingWebhook) DeepCopy() *ValidatingWebhook { + if in == nil { + return nil + } + out := new(ValidatingWebhook) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidatingWebhookConfiguration) DeepCopyInto(out *ValidatingWebhookConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Webhooks != nil { + in, out := &in.Webhooks, &out.Webhooks + *out = make([]ValidatingWebhook, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingWebhookConfiguration. +func (in *ValidatingWebhookConfiguration) DeepCopy() *ValidatingWebhookConfiguration { + if in == nil { + return nil + } + out := new(ValidatingWebhookConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ValidatingWebhookConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidatingWebhookConfigurationList) DeepCopyInto(out *ValidatingWebhookConfigurationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ValidatingWebhookConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingWebhookConfigurationList. +func (in *ValidatingWebhookConfigurationList) DeepCopy() *ValidatingWebhookConfigurationList { + if in == nil { + return nil + } + out := new(ValidatingWebhookConfigurationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ValidatingWebhookConfigurationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookClientConfig) DeepCopyInto(out *WebhookClientConfig) { + *out = *in + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = new(ServiceReference) + (*in).DeepCopyInto(*out) + } + if in.CABundle != nil { + in, out := &in.CABundle, &out.CABundle + *out = make([]byte, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookClientConfig. +func (in *WebhookClientConfig) DeepCopy() *WebhookClientConfig { + if in == nil { + return nil + } + out := new(WebhookClientConfig) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go deleted file mode 100644 index b87f74e5..00000000 --- a/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go +++ /dev/null @@ -1,1027 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto -// DO NOT EDIT! - -/* - Package v1alpha1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto - - It has these top-level messages: - Initializer - InitializerConfiguration - InitializerConfigurationList - Rule -*/ -package v1alpha1 - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import strings "strings" -import reflect "reflect" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package - -func (m *Initializer) Reset() { *m = Initializer{} } -func (*Initializer) ProtoMessage() {} -func (*Initializer) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } - -func (m *InitializerConfiguration) Reset() { *m = InitializerConfiguration{} } -func (*InitializerConfiguration) ProtoMessage() {} -func (*InitializerConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{1} -} - -func (m *InitializerConfigurationList) Reset() { *m = InitializerConfigurationList{} } -func (*InitializerConfigurationList) ProtoMessage() {} -func (*InitializerConfigurationList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{2} -} - -func (m *Rule) Reset() { *m = Rule{} } -func (*Rule) ProtoMessage() {} -func (*Rule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } - -func init() { - proto.RegisterType((*Initializer)(nil), "k8s.io.api.admissionregistration.v1alpha1.Initializer") - proto.RegisterType((*InitializerConfiguration)(nil), "k8s.io.api.admissionregistration.v1alpha1.InitializerConfiguration") - proto.RegisterType((*InitializerConfigurationList)(nil), "k8s.io.api.admissionregistration.v1alpha1.InitializerConfigurationList") - proto.RegisterType((*Rule)(nil), "k8s.io.api.admissionregistration.v1alpha1.Rule") -} -func (m *Initializer) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Initializer) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - if len(m.Rules) > 0 { - for _, msg := range m.Rules { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *InitializerConfiguration) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *InitializerConfiguration) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - if len(m.Initializers) > 0 { - for _, msg := range m.Initializers { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *InitializerConfigurationList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *InitializerConfigurationList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n2, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *Rule) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Rule) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.APIGroups) > 0 { - for _, s := range m.APIGroups { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if len(m.APIVersions) > 0 { - for _, s := range m.APIVersions { - dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if len(m.Resources) > 0 { - for _, s := range m.Resources { - dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil -} - -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *Initializer) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Rules) > 0 { - for _, e := range m.Rules { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *InitializerConfiguration) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Initializers) > 0 { - for _, e := range m.Initializers { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *InitializerConfigurationList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *Rule) Size() (n int) { - var l int - _ = l - if len(m.APIGroups) > 0 { - for _, s := range m.APIGroups { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.APIVersions) > 0 { - for _, s := range m.APIVersions { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Resources) > 0 { - for _, s := range m.Resources { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *Initializer) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Initializer{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "Rule", "Rule", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *InitializerConfiguration) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&InitializerConfiguration{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Initializers:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Initializers), "Initializer", "Initializer", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *InitializerConfigurationList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&InitializerConfigurationList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "InitializerConfiguration", "InitializerConfiguration", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *Rule) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Rule{`, - `APIGroups:` + fmt.Sprintf("%v", this.APIGroups) + `,`, - `APIVersions:` + fmt.Sprintf("%v", this.APIVersions) + `,`, - `Resources:` + fmt.Sprintf("%v", this.Resources) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *Initializer) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Initializer: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Initializer: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Rules = append(m.Rules, Rule{}) - if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *InitializerConfiguration) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: InitializerConfiguration: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: InitializerConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Initializers", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Initializers = append(m.Initializers, Initializer{}) - if err := m.Initializers[len(m.Initializers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *InitializerConfigurationList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: InitializerConfigurationList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: InitializerConfigurationList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, InitializerConfiguration{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Rule) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Rule: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Rule: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field APIGroups", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.APIGroups = append(m.APIGroups, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field APIVersions", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.APIVersions = append(m.APIVersions, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Resources = append(m.Resources, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipGenerated(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") -) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 531 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x51, 0x4d, 0x8b, 0x13, 0x31, - 0x18, 0x6e, 0x6c, 0x0b, 0x6d, 0xda, 0x45, 0x19, 0x3c, 0x94, 0x22, 0xd3, 0xd2, 0x53, 0x45, 0x4c, - 0xec, 0x22, 0x8b, 0xd7, 0x9d, 0x3d, 0x48, 0xc1, 0x8f, 0x25, 0x88, 0x07, 0xf1, 0x60, 0xda, 0xbe, - 0x3b, 0x8d, 0xed, 0x4c, 0x86, 0x24, 0x53, 0xd0, 0x93, 0x17, 0xef, 0x82, 0x7f, 0xaa, 0xc7, 0x3d, - 0xee, 0xa9, 0xd8, 0x11, 0x3c, 0xfa, 0x1b, 0x24, 0x33, 0x9d, 0x9d, 0x59, 0xeb, 0xe2, 0xea, 0x2d, - 0xef, 0xf3, 0xe6, 0xf9, 0x4a, 0x30, 0x5b, 0x3c, 0xd1, 0x44, 0x48, 0xba, 0x88, 0x27, 0xa0, 0x42, - 0x30, 0xa0, 0xe9, 0x0a, 0xc2, 0x99, 0x54, 0x74, 0xb7, 0xe0, 0x91, 0xa0, 0x7c, 0x16, 0x08, 0xad, - 0x85, 0x0c, 0x15, 0xf8, 0x42, 0x1b, 0xc5, 0x8d, 0x90, 0x21, 0x5d, 0x8d, 0xf8, 0x32, 0x9a, 0xf3, - 0x11, 0xf5, 0x21, 0x04, 0xc5, 0x0d, 0xcc, 0x48, 0xa4, 0xa4, 0x91, 0xce, 0xfd, 0x8c, 0x4a, 0x78, - 0x24, 0xc8, 0x1f, 0xa9, 0x24, 0xa7, 0x76, 0x1f, 0xfa, 0xc2, 0xcc, 0xe3, 0x09, 0x99, 0xca, 0x80, - 0xfa, 0xd2, 0x97, 0x34, 0x55, 0x98, 0xc4, 0x67, 0xe9, 0x94, 0x0e, 0xe9, 0x29, 0x53, 0xee, 0x3e, - 0x2e, 0x42, 0x05, 0x7c, 0x3a, 0x17, 0x21, 0xa8, 0x0f, 0x34, 0x5a, 0xf8, 0x16, 0xd0, 0x34, 0x00, - 0xc3, 0xe9, 0x6a, 0x2f, 0x4f, 0x97, 0x5e, 0xc7, 0x52, 0x71, 0x68, 0x44, 0x00, 0x7b, 0x84, 0xa3, - 0xbf, 0x11, 0xf4, 0x74, 0x0e, 0x01, 0xff, 0x9d, 0x37, 0xf8, 0x8c, 0x70, 0x6b, 0x1c, 0x0a, 0x23, - 0xf8, 0x52, 0x7c, 0x04, 0xe5, 0xf4, 0x71, 0x2d, 0xe4, 0x01, 0x74, 0x50, 0x1f, 0x0d, 0x9b, 0x5e, - 0x7b, 0xbd, 0xe9, 0x55, 0x92, 0x4d, 0xaf, 0xf6, 0x82, 0x07, 0xc0, 0xd2, 0x8d, 0xf3, 0x0a, 0xd7, - 0x55, 0xbc, 0x04, 0xdd, 0xb9, 0xd5, 0xaf, 0x0e, 0x5b, 0x87, 0x94, 0xdc, 0xf8, 0xe9, 0x08, 0x8b, - 0x97, 0xe0, 0x1d, 0xec, 0x34, 0xeb, 0x76, 0xd2, 0x2c, 0x13, 0x1b, 0xfc, 0x44, 0xb8, 0x53, 0xca, - 0x71, 0x22, 0xc3, 0x33, 0xe1, 0xc7, 0x99, 0x80, 0xf3, 0x0e, 0x37, 0xec, 0x43, 0xcd, 0xb8, 0xe1, - 0x69, 0xb0, 0xd6, 0xe1, 0xa3, 0x92, 0xeb, 0x65, 0x5f, 0x12, 0x2d, 0x7c, 0x0b, 0x68, 0x62, 0x6f, - 0x93, 0xd5, 0x88, 0xbc, 0x9c, 0xbc, 0x87, 0xa9, 0x79, 0x0e, 0x86, 0x7b, 0xce, 0xce, 0x16, 0x17, - 0x18, 0xbb, 0x54, 0x75, 0x22, 0xdc, 0x16, 0x85, 0x7b, 0xde, 0xed, 0xe8, 0x1f, 0xba, 0x95, 0xc2, - 0x7b, 0x77, 0x77, 0x5e, 0xed, 0x12, 0xa8, 0xd9, 0x15, 0x87, 0xc1, 0x0f, 0x84, 0xef, 0x5d, 0x57, - 0xf8, 0x99, 0xd0, 0xc6, 0x79, 0xbb, 0x57, 0x9a, 0xdc, 0xac, 0xb4, 0x65, 0xa7, 0x95, 0xef, 0xec, - 0x62, 0x34, 0x72, 0xa4, 0x54, 0x78, 0x8e, 0xeb, 0xc2, 0x40, 0x90, 0x37, 0x3d, 0xf9, 0xbf, 0xa6, - 0x57, 0x52, 0x17, 0x3f, 0x3b, 0xb6, 0xca, 0x2c, 0x33, 0x18, 0x7c, 0x45, 0xb8, 0x66, 0xbf, 0xda, - 0x79, 0x80, 0x9b, 0x3c, 0x12, 0x4f, 0x95, 0x8c, 0x23, 0xdd, 0x41, 0xfd, 0xea, 0xb0, 0xe9, 0x1d, - 0x24, 0x9b, 0x5e, 0xf3, 0xf8, 0x74, 0x9c, 0x81, 0xac, 0xd8, 0x3b, 0x23, 0xdc, 0xe2, 0x91, 0x78, - 0x0d, 0xca, 0xe6, 0xc8, 0x52, 0x36, 0xbd, 0xdb, 0xc9, 0xa6, 0xd7, 0x3a, 0x3e, 0x1d, 0xe7, 0x30, - 0x2b, 0xdf, 0xb1, 0xfa, 0x0a, 0xb4, 0x8c, 0xd5, 0x14, 0x74, 0xa7, 0x5a, 0xe8, 0xb3, 0x1c, 0x64, - 0xc5, 0xde, 0x23, 0xeb, 0xad, 0x5b, 0x39, 0xdf, 0xba, 0x95, 0x8b, 0xad, 0x5b, 0xf9, 0x94, 0xb8, - 0x68, 0x9d, 0xb8, 0xe8, 0x3c, 0x71, 0xd1, 0x45, 0xe2, 0xa2, 0x6f, 0x89, 0x8b, 0xbe, 0x7c, 0x77, - 0x2b, 0x6f, 0x1a, 0x79, 0xe9, 0x5f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xa2, 0x06, 0xa3, 0xcb, 0x75, - 0x04, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto b/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto deleted file mode 100644 index e17b5596..00000000 --- a/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto +++ /dev/null @@ -1,107 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package k8s.io.api.admissionregistration.v1alpha1; - -import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "v1alpha1"; - -// Initializer describes the name and the failure policy of an initializer, and -// what resources it applies to. -message Initializer { - // Name is the identifier of the initializer. It will be added to the - // object that needs to be initialized. - // Name should be fully qualified, e.g., alwayspullimages.kubernetes.io, where - // "alwayspullimages" is the name of the webhook, and kubernetes.io is the name - // of the organization. - // Required - optional string name = 1; - - // Rules describes what resources/subresources the initializer cares about. - // The initializer cares about an operation if it matches _any_ Rule. - // Rule.Resources must not include subresources. - repeated Rule rules = 2; -} - -// InitializerConfiguration describes the configuration of initializers. -message InitializerConfiguration { - // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Initializers is a list of resources and their default initializers - // Order-sensitive. - // When merging multiple InitializerConfigurations, we sort the initializers - // from different InitializerConfigurations by the name of the - // InitializerConfigurations; the order of the initializers from the same - // InitializerConfiguration is preserved. - // +patchMergeKey=name - // +patchStrategy=merge - // +optional - repeated Initializer initializers = 2; -} - -// InitializerConfigurationList is a list of InitializerConfiguration. -message InitializerConfigurationList { - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // List of InitializerConfiguration. - repeated InitializerConfiguration items = 2; -} - -// Rule is a tuple of APIGroups, APIVersion, and Resources.It is recommended -// to make sure that all the tuple expansions are valid. -message Rule { - // APIGroups is the API groups the resources belong to. '*' is all groups. - // If '*' is present, the length of the slice must be one. - // Required. - repeated string apiGroups = 1; - - // APIVersions is the API versions the resources belong to. '*' is all versions. - // If '*' is present, the length of the slice must be one. - // Required. - repeated string apiVersions = 2; - - // Resources is a list of resources this rule applies to. - // - // For example: - // 'pods' means pods. - // 'pods/log' means the log subresource of pods. - // '*' means all resources, but not subresources. - // 'pods/*' means all subresources of pods. - // '*/scale' means all scale subresources. - // '*/*' means all resources and their subresources. - // - // If wildcard is present, the validation rule will ensure resources do not - // overlap with each other. - // - // Depending on the enclosing object, subresources might not be allowed. - // Required. - repeated string resources = 3; -} - diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go deleted file mode 100644 index a245f1e8..00000000 --- a/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go +++ /dev/null @@ -1,106 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// InitializerConfiguration describes the configuration of initializers. -type InitializerConfiguration struct { - metav1.TypeMeta `json:",inline"` - // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Initializers is a list of resources and their default initializers - // Order-sensitive. - // When merging multiple InitializerConfigurations, we sort the initializers - // from different InitializerConfigurations by the name of the - // InitializerConfigurations; the order of the initializers from the same - // InitializerConfiguration is preserved. - // +patchMergeKey=name - // +patchStrategy=merge - // +optional - Initializers []Initializer `json:"initializers,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=initializers"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// InitializerConfigurationList is a list of InitializerConfiguration. -type InitializerConfigurationList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // List of InitializerConfiguration. - Items []InitializerConfiguration `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// Initializer describes the name and the failure policy of an initializer, and -// what resources it applies to. -type Initializer struct { - // Name is the identifier of the initializer. It will be added to the - // object that needs to be initialized. - // Name should be fully qualified, e.g., alwayspullimages.kubernetes.io, where - // "alwayspullimages" is the name of the webhook, and kubernetes.io is the name - // of the organization. - // Required - Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - - // Rules describes what resources/subresources the initializer cares about. - // The initializer cares about an operation if it matches _any_ Rule. - // Rule.Resources must not include subresources. - Rules []Rule `json:"rules,omitempty" protobuf:"bytes,2,rep,name=rules"` -} - -// Rule is a tuple of APIGroups, APIVersion, and Resources.It is recommended -// to make sure that all the tuple expansions are valid. -type Rule struct { - // APIGroups is the API groups the resources belong to. '*' is all groups. - // If '*' is present, the length of the slice must be one. - // Required. - APIGroups []string `json:"apiGroups,omitempty" protobuf:"bytes,1,rep,name=apiGroups"` - - // APIVersions is the API versions the resources belong to. '*' is all versions. - // If '*' is present, the length of the slice must be one. - // Required. - APIVersions []string `json:"apiVersions,omitempty" protobuf:"bytes,2,rep,name=apiVersions"` - - // Resources is a list of resources this rule applies to. - // - // For example: - // 'pods' means pods. - // 'pods/log' means the log subresource of pods. - // '*' means all resources, but not subresources. - // 'pods/*' means all subresources of pods. - // '*/scale' means all scale subresources. - // '*/*' means all resources and their subresources. - // - // If wildcard is present, the validation rule will ensure resources do not - // overlap with each other. - // - // Depending on the enclosing object, subresources might not be allowed. - // Required. - Resources []string `json:"resources,omitempty" protobuf:"bytes,3,rep,name=resources"` -} diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go deleted file mode 100644 index 69e4b7c6..00000000 --- a/vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go +++ /dev/null @@ -1,71 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -// This file contains a collection of methods that can be used from go-restful to -// generate Swagger API documentation for its models. Please read this PR for more -// information on the implementation: https://github.com/emicklei/go-restful/pull/215 -// -// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if -// they are on one line! For multiple line or blocks that you want to ignore use ---. -// Any context after a --- is ignored. -// -// Those methods can be generated by using hack/update-generated-swagger-docs.sh - -// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. -var map_Initializer = map[string]string{ - "": "Initializer describes the name and the failure policy of an initializer, and what resources it applies to.", - "name": "Name is the identifier of the initializer. It will be added to the object that needs to be initialized. Name should be fully qualified, e.g., alwayspullimages.kubernetes.io, where \"alwayspullimages\" is the name of the webhook, and kubernetes.io is the name of the organization. Required", - "rules": "Rules describes what resources/subresources the initializer cares about. The initializer cares about an operation if it matches _any_ Rule. Rule.Resources must not include subresources.", -} - -func (Initializer) SwaggerDoc() map[string]string { - return map_Initializer -} - -var map_InitializerConfiguration = map[string]string{ - "": "InitializerConfiguration describes the configuration of initializers.", - "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.", - "initializers": "Initializers is a list of resources and their default initializers Order-sensitive. When merging multiple InitializerConfigurations, we sort the initializers from different InitializerConfigurations by the name of the InitializerConfigurations; the order of the initializers from the same InitializerConfiguration is preserved.", -} - -func (InitializerConfiguration) SwaggerDoc() map[string]string { - return map_InitializerConfiguration -} - -var map_InitializerConfigurationList = map[string]string{ - "": "InitializerConfigurationList is a list of InitializerConfiguration.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - "items": "List of InitializerConfiguration.", -} - -func (InitializerConfigurationList) SwaggerDoc() map[string]string { - return map_InitializerConfigurationList -} - -var map_Rule = map[string]string{ - "": "Rule is a tuple of APIGroups, APIVersion, and Resources.It is recommended to make sure that all the tuple expansions are valid.", - "apiGroups": "APIGroups is the API groups the resources belong to. '*' is all groups. If '*' is present, the length of the slice must be one. Required.", - "apiVersions": "APIVersions is the API versions the resources belong to. '*' is all versions. If '*' is present, the length of the slice must be one. Required.", - "resources": "Resources is a list of resources this rule applies to.\n\nFor example: 'pods' means pods. 'pods/log' means the log subresource of pods. '*' means all resources, but not subresources. 'pods/*' means all subresources of pods. '*/scale' means all scale subresources. '*/*' means all resources and their subresources.\n\nIf wildcard is present, the validation rule will ensure resources do not overlap with each other.\n\nDepending on the enclosing object, subresources might not be allowed. Required.", -} - -func (Rule) SwaggerDoc() map[string]string { - return map_Rule -} - -// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/doc.go b/vendor/k8s.io/api/admissionregistration/v1beta1/doc.go index afbb3d6d..0a40726f 100644 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/doc.go +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/doc.go @@ -15,11 +15,12 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +k8s:openapi-gen=true +// +groupName=admissionregistration.k8s.io // Package v1beta1 is the v1beta1 version of the API. // AdmissionConfiguration and AdmissionPluginConfiguration are legacy static admission plugin configuration -// InitializerConfiguration and validatingWebhookConfiguration is for the +// MutatingWebhookConfiguration and ValidatingWebhookConfiguration are for the // new dynamic admission controller configuration. -// +groupName=admissionregistration.k8s.io package v1beta1 // import "k8s.io/api/admissionregistration/v1beta1" diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go index d6c9d958..d84d8b63 100644 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go @@ -14,39 +14,24 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto -// DO NOT EDIT! -/* - Package v1beta1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto - - It has these top-level messages: - MutatingWebhookConfiguration - MutatingWebhookConfigurationList - Rule - RuleWithOperations - ServiceReference - ValidatingWebhookConfiguration - ValidatingWebhookConfigurationList - Webhook - WebhookClientConfig -*/ package v1beta1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + io "io" -import strings "strings" -import reflect "reflect" + proto "github.com/gogo/protobuf/proto" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" -import io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -59,141 +44,593 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package +func (m *MutatingWebhook) Reset() { *m = MutatingWebhook{} } +func (*MutatingWebhook) ProtoMessage() {} +func (*MutatingWebhook) Descriptor() ([]byte, []int) { + return fileDescriptor_abeea74cbc46f55a, []int{0} +} +func (m *MutatingWebhook) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MutatingWebhook) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MutatingWebhook) XXX_Merge(src proto.Message) { + xxx_messageInfo_MutatingWebhook.Merge(m, src) +} +func (m *MutatingWebhook) XXX_Size() int { + return m.Size() +} +func (m *MutatingWebhook) XXX_DiscardUnknown() { + xxx_messageInfo_MutatingWebhook.DiscardUnknown(m) +} + +var xxx_messageInfo_MutatingWebhook proto.InternalMessageInfo + func (m *MutatingWebhookConfiguration) Reset() { *m = MutatingWebhookConfiguration{} } func (*MutatingWebhookConfiguration) ProtoMessage() {} func (*MutatingWebhookConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{0} + return fileDescriptor_abeea74cbc46f55a, []int{1} +} +func (m *MutatingWebhookConfiguration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MutatingWebhookConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MutatingWebhookConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_MutatingWebhookConfiguration.Merge(m, src) +} +func (m *MutatingWebhookConfiguration) XXX_Size() int { + return m.Size() +} +func (m *MutatingWebhookConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_MutatingWebhookConfiguration.DiscardUnknown(m) } +var xxx_messageInfo_MutatingWebhookConfiguration proto.InternalMessageInfo + func (m *MutatingWebhookConfigurationList) Reset() { *m = MutatingWebhookConfigurationList{} } func (*MutatingWebhookConfigurationList) ProtoMessage() {} func (*MutatingWebhookConfigurationList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{1} + return fileDescriptor_abeea74cbc46f55a, []int{2} +} +func (m *MutatingWebhookConfigurationList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MutatingWebhookConfigurationList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MutatingWebhookConfigurationList) XXX_Merge(src proto.Message) { + xxx_messageInfo_MutatingWebhookConfigurationList.Merge(m, src) +} +func (m *MutatingWebhookConfigurationList) XXX_Size() int { + return m.Size() +} +func (m *MutatingWebhookConfigurationList) XXX_DiscardUnknown() { + xxx_messageInfo_MutatingWebhookConfigurationList.DiscardUnknown(m) +} + +var xxx_messageInfo_MutatingWebhookConfigurationList proto.InternalMessageInfo + +func (m *Rule) Reset() { *m = Rule{} } +func (*Rule) ProtoMessage() {} +func (*Rule) Descriptor() ([]byte, []int) { + return fileDescriptor_abeea74cbc46f55a, []int{3} +} +func (m *Rule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Rule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Rule) XXX_Merge(src proto.Message) { + xxx_messageInfo_Rule.Merge(m, src) +} +func (m *Rule) XXX_Size() int { + return m.Size() +} +func (m *Rule) XXX_DiscardUnknown() { + xxx_messageInfo_Rule.DiscardUnknown(m) +} + +var xxx_messageInfo_Rule proto.InternalMessageInfo + +func (m *RuleWithOperations) Reset() { *m = RuleWithOperations{} } +func (*RuleWithOperations) ProtoMessage() {} +func (*RuleWithOperations) Descriptor() ([]byte, []int) { + return fileDescriptor_abeea74cbc46f55a, []int{4} +} +func (m *RuleWithOperations) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RuleWithOperations) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RuleWithOperations) XXX_Merge(src proto.Message) { + xxx_messageInfo_RuleWithOperations.Merge(m, src) +} +func (m *RuleWithOperations) XXX_Size() int { + return m.Size() +} +func (m *RuleWithOperations) XXX_DiscardUnknown() { + xxx_messageInfo_RuleWithOperations.DiscardUnknown(m) +} + +var xxx_messageInfo_RuleWithOperations proto.InternalMessageInfo + +func (m *ServiceReference) Reset() { *m = ServiceReference{} } +func (*ServiceReference) ProtoMessage() {} +func (*ServiceReference) Descriptor() ([]byte, []int) { + return fileDescriptor_abeea74cbc46f55a, []int{5} +} +func (m *ServiceReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ServiceReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceReference.Merge(m, src) +} +func (m *ServiceReference) XXX_Size() int { + return m.Size() +} +func (m *ServiceReference) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceReference.DiscardUnknown(m) } -func (m *Rule) Reset() { *m = Rule{} } -func (*Rule) ProtoMessage() {} -func (*Rule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +var xxx_messageInfo_ServiceReference proto.InternalMessageInfo -func (m *RuleWithOperations) Reset() { *m = RuleWithOperations{} } -func (*RuleWithOperations) ProtoMessage() {} -func (*RuleWithOperations) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +func (m *ValidatingWebhook) Reset() { *m = ValidatingWebhook{} } +func (*ValidatingWebhook) ProtoMessage() {} +func (*ValidatingWebhook) Descriptor() ([]byte, []int) { + return fileDescriptor_abeea74cbc46f55a, []int{6} +} +func (m *ValidatingWebhook) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidatingWebhook) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ValidatingWebhook) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatingWebhook.Merge(m, src) +} +func (m *ValidatingWebhook) XXX_Size() int { + return m.Size() +} +func (m *ValidatingWebhook) XXX_DiscardUnknown() { + xxx_messageInfo_ValidatingWebhook.DiscardUnknown(m) +} -func (m *ServiceReference) Reset() { *m = ServiceReference{} } -func (*ServiceReference) ProtoMessage() {} -func (*ServiceReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +var xxx_messageInfo_ValidatingWebhook proto.InternalMessageInfo func (m *ValidatingWebhookConfiguration) Reset() { *m = ValidatingWebhookConfiguration{} } func (*ValidatingWebhookConfiguration) ProtoMessage() {} func (*ValidatingWebhookConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{5} + return fileDescriptor_abeea74cbc46f55a, []int{7} +} +func (m *ValidatingWebhookConfiguration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidatingWebhookConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ValidatingWebhookConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatingWebhookConfiguration.Merge(m, src) +} +func (m *ValidatingWebhookConfiguration) XXX_Size() int { + return m.Size() } +func (m *ValidatingWebhookConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_ValidatingWebhookConfiguration.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidatingWebhookConfiguration proto.InternalMessageInfo func (m *ValidatingWebhookConfigurationList) Reset() { *m = ValidatingWebhookConfigurationList{} } func (*ValidatingWebhookConfigurationList) ProtoMessage() {} func (*ValidatingWebhookConfigurationList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{6} + return fileDescriptor_abeea74cbc46f55a, []int{8} +} +func (m *ValidatingWebhookConfigurationList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidatingWebhookConfigurationList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ValidatingWebhookConfigurationList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatingWebhookConfigurationList.Merge(m, src) +} +func (m *ValidatingWebhookConfigurationList) XXX_Size() int { + return m.Size() +} +func (m *ValidatingWebhookConfigurationList) XXX_DiscardUnknown() { + xxx_messageInfo_ValidatingWebhookConfigurationList.DiscardUnknown(m) } -func (m *Webhook) Reset() { *m = Webhook{} } -func (*Webhook) ProtoMessage() {} -func (*Webhook) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +var xxx_messageInfo_ValidatingWebhookConfigurationList proto.InternalMessageInfo + +func (m *WebhookClientConfig) Reset() { *m = WebhookClientConfig{} } +func (*WebhookClientConfig) ProtoMessage() {} +func (*WebhookClientConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_abeea74cbc46f55a, []int{9} +} +func (m *WebhookClientConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WebhookClientConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *WebhookClientConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_WebhookClientConfig.Merge(m, src) +} +func (m *WebhookClientConfig) XXX_Size() int { + return m.Size() +} +func (m *WebhookClientConfig) XXX_DiscardUnknown() { + xxx_messageInfo_WebhookClientConfig.DiscardUnknown(m) +} -func (m *WebhookClientConfig) Reset() { *m = WebhookClientConfig{} } -func (*WebhookClientConfig) ProtoMessage() {} -func (*WebhookClientConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +var xxx_messageInfo_WebhookClientConfig proto.InternalMessageInfo func init() { + proto.RegisterType((*MutatingWebhook)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingWebhook") proto.RegisterType((*MutatingWebhookConfiguration)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingWebhookConfiguration") proto.RegisterType((*MutatingWebhookConfigurationList)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingWebhookConfigurationList") proto.RegisterType((*Rule)(nil), "k8s.io.api.admissionregistration.v1beta1.Rule") proto.RegisterType((*RuleWithOperations)(nil), "k8s.io.api.admissionregistration.v1beta1.RuleWithOperations") proto.RegisterType((*ServiceReference)(nil), "k8s.io.api.admissionregistration.v1beta1.ServiceReference") + proto.RegisterType((*ValidatingWebhook)(nil), "k8s.io.api.admissionregistration.v1beta1.ValidatingWebhook") proto.RegisterType((*ValidatingWebhookConfiguration)(nil), "k8s.io.api.admissionregistration.v1beta1.ValidatingWebhookConfiguration") proto.RegisterType((*ValidatingWebhookConfigurationList)(nil), "k8s.io.api.admissionregistration.v1beta1.ValidatingWebhookConfigurationList") - proto.RegisterType((*Webhook)(nil), "k8s.io.api.admissionregistration.v1beta1.Webhook") proto.RegisterType((*WebhookClientConfig)(nil), "k8s.io.api.admissionregistration.v1beta1.WebhookClientConfig") } -func (m *MutatingWebhookConfiguration) Marshal() (dAtA []byte, err error) { + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto", fileDescriptor_abeea74cbc46f55a) +} + +var fileDescriptor_abeea74cbc46f55a = []byte{ + // 1114 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x55, 0x4d, 0x6f, 0x23, 0x45, + 0x13, 0xce, 0xc4, 0xf6, 0xda, 0x6e, 0x27, 0xbb, 0x9b, 0x7e, 0x5f, 0x76, 0x4d, 0x58, 0x79, 0x2c, + 0x1f, 0x90, 0x25, 0xd8, 0x99, 0x4d, 0x40, 0x08, 0x16, 0x10, 0x8a, 0x03, 0x0b, 0x91, 0x92, 0xdd, + 0xd0, 0xd9, 0x0f, 0x89, 0x0f, 0x69, 0xdb, 0xe3, 0xb2, 0xdd, 0xd8, 0x9e, 0x1e, 0x4d, 0xf7, 0x38, + 0xe4, 0xc6, 0x4f, 0xe0, 0x2f, 0x70, 0xe2, 0x57, 0x70, 0xe0, 0x16, 0x6e, 0x7b, 0xdc, 0x0b, 0x23, + 0x32, 0x9c, 0x38, 0x70, 0xe0, 0x9a, 0x13, 0x9a, 0x9e, 0xf6, 0xf8, 0x2b, 0x59, 0x4c, 0x90, 0xf6, + 0x94, 0xdb, 0xf4, 0x53, 0x5d, 0x4f, 0x75, 0xd5, 0x54, 0xd5, 0x83, 0x3e, 0xef, 0xbd, 0x2b, 0x2c, + 0xc6, 0xed, 0x5e, 0xd0, 0x04, 0xdf, 0x05, 0x09, 0xc2, 0x1e, 0x82, 0xdb, 0xe2, 0xbe, 0xad, 0x0d, + 0xd4, 0x63, 0x36, 0x6d, 0x0d, 0x98, 0x10, 0x8c, 0xbb, 0x3e, 0x74, 0x98, 0x90, 0x3e, 0x95, 0x8c, + 0xbb, 0xf6, 0x70, 0xa3, 0x09, 0x92, 0x6e, 0xd8, 0x1d, 0x70, 0xc1, 0xa7, 0x12, 0x5a, 0x96, 0xe7, + 0x73, 0xc9, 0x71, 0x3d, 0xf1, 0xb4, 0xa8, 0xc7, 0xac, 0x33, 0x3d, 0x2d, 0xed, 0xb9, 0x7e, 0xbb, + 0xc3, 0x64, 0x37, 0x68, 0x5a, 0x0e, 0x1f, 0xd8, 0x1d, 0xde, 0xe1, 0xb6, 0x22, 0x68, 0x06, 0x6d, + 0x75, 0x52, 0x07, 0xf5, 0x95, 0x10, 0xaf, 0xbf, 0x3d, 0x7e, 0xd2, 0x80, 0x3a, 0x5d, 0xe6, 0x82, + 0x7f, 0x64, 0x7b, 0xbd, 0x4e, 0x0c, 0x08, 0x7b, 0x00, 0x92, 0xda, 0xc3, 0xb9, 0xe7, 0xac, 0xdb, + 0xe7, 0x79, 0xf9, 0x81, 0x2b, 0xd9, 0x00, 0xe6, 0x1c, 0xde, 0xf9, 0x27, 0x07, 0xe1, 0x74, 0x61, + 0x40, 0x67, 0xfd, 0x6a, 0xbf, 0xe4, 0xd1, 0xb5, 0xbd, 0x40, 0x52, 0xc9, 0xdc, 0xce, 0x13, 0x68, + 0x76, 0x39, 0xef, 0xe1, 0x2a, 0xca, 0xba, 0x74, 0x00, 0x65, 0xa3, 0x6a, 0xd4, 0x8b, 0x8d, 0x95, + 0xe3, 0xd0, 0x5c, 0x8a, 0x42, 0x33, 0x7b, 0x9f, 0x0e, 0x80, 0x28, 0x0b, 0x3e, 0x44, 0x2b, 0x4e, + 0x9f, 0x81, 0x2b, 0xb7, 0xb9, 0xdb, 0x66, 0x9d, 0xf2, 0x72, 0xd5, 0xa8, 0x97, 0x36, 0x3f, 0xb4, + 0x16, 0x2d, 0xa2, 0xa5, 0x43, 0x6d, 0x4f, 0x90, 0x34, 0xfe, 0xaf, 0x03, 0xad, 0x4c, 0xa2, 0x64, + 0x2a, 0x10, 0xa6, 0x28, 0xe7, 0x07, 0x7d, 0x10, 0xe5, 0x4c, 0x35, 0x53, 0x2f, 0x6d, 0x7e, 0xb0, + 0x78, 0x44, 0x12, 0xf4, 0xe1, 0x09, 0x93, 0xdd, 0x07, 0x1e, 0x24, 0x16, 0xd1, 0x58, 0xd5, 0x01, + 0x73, 0xb1, 0x4d, 0x90, 0x84, 0x19, 0xef, 0xa2, 0xd5, 0x36, 0x65, 0xfd, 0xc0, 0x87, 0x7d, 0xde, + 0x67, 0xce, 0x51, 0x39, 0xab, 0xca, 0xf0, 0x7a, 0x14, 0x9a, 0xab, 0xf7, 0x26, 0x0d, 0xa7, 0xa1, + 0xb9, 0x36, 0x05, 0x3c, 0x3c, 0xf2, 0x80, 0x4c, 0x3b, 0xe3, 0x8f, 0x51, 0x69, 0x40, 0xa5, 0xd3, + 0xd5, 0x5c, 0x45, 0xc5, 0x55, 0x8b, 0x42, 0xb3, 0xb4, 0x37, 0x86, 0x4f, 0x43, 0xf3, 0xda, 0xc4, + 0x51, 0xf1, 0x4c, 0xba, 0xe1, 0x6f, 0xd1, 0x5a, 0x5c, 0x77, 0xe1, 0x51, 0x07, 0x0e, 0xa0, 0x0f, + 0x8e, 0xe4, 0x7e, 0x39, 0xa7, 0x8a, 0xfe, 0xd6, 0x44, 0x09, 0xd2, 0x3f, 0x6f, 0x79, 0xbd, 0x4e, + 0x0c, 0x08, 0x2b, 0x6e, 0x30, 0x6b, 0xb8, 0x61, 0xed, 0xd2, 0x26, 0xf4, 0x47, 0xae, 0x8d, 0x57, + 0xa2, 0xd0, 0x5c, 0xbb, 0x3f, 0xcb, 0x48, 0xe6, 0x83, 0x60, 0x8e, 0xae, 0xf2, 0xe6, 0x37, 0xe0, + 0xc8, 0x34, 0x6c, 0xe9, 0xe2, 0x61, 0x71, 0x14, 0x9a, 0x57, 0x1f, 0x4c, 0xd1, 0x91, 0x19, 0xfa, + 0xb8, 0x60, 0x82, 0xb5, 0xe0, 0x93, 0x76, 0x1b, 0x1c, 0x29, 0xca, 0x57, 0xc6, 0x05, 0x3b, 0x18, + 0xc3, 0x71, 0xc1, 0xc6, 0xc7, 0xed, 0x3e, 0x15, 0x82, 0x4c, 0xba, 0xe1, 0xbb, 0xe8, 0x6a, 0xdc, + 0xf5, 0x3c, 0x90, 0x07, 0xe0, 0x70, 0xb7, 0x25, 0xca, 0xf9, 0xaa, 0x51, 0xcf, 0x25, 0x2f, 0x78, + 0x38, 0x65, 0x21, 0x33, 0x37, 0xf1, 0x23, 0x74, 0x33, 0x6d, 0x25, 0x02, 0x43, 0x06, 0x87, 0x8f, + 0xc1, 0x8f, 0x0f, 0xa2, 0x5c, 0xa8, 0x66, 0xea, 0xc5, 0xc6, 0x6b, 0x51, 0x68, 0xde, 0xdc, 0x3a, + 0xfb, 0x0a, 0x39, 0xcf, 0x17, 0x3f, 0x45, 0xd8, 0x07, 0xe6, 0x0e, 0xb9, 0xa3, 0xda, 0x4f, 0x37, + 0x04, 0x52, 0xf9, 0xdd, 0x89, 0x42, 0x13, 0x93, 0x39, 0xeb, 0x69, 0x68, 0xde, 0x98, 0x47, 0x55, + 0x7b, 0x9c, 0xc1, 0x55, 0xfb, 0xd5, 0x40, 0xb7, 0x66, 0x66, 0x39, 0x19, 0x9b, 0x20, 0xe9, 0x78, + 0xfc, 0x14, 0x15, 0xe2, 0x1f, 0xd3, 0xa2, 0x92, 0xaa, 0xe1, 0x2e, 0x6d, 0xde, 0x59, 0xec, 0x37, + 0x26, 0xff, 0x6c, 0x0f, 0x24, 0x6d, 0x60, 0x3d, 0x34, 0x68, 0x8c, 0x91, 0x94, 0x15, 0x7f, 0x89, + 0x0a, 0x3a, 0xb2, 0x28, 0x2f, 0xab, 0x11, 0x7d, 0x6f, 0xf1, 0x11, 0x9d, 0x79, 0x7b, 0x23, 0x1b, + 0x87, 0x22, 0x85, 0x43, 0x4d, 0x58, 0xfb, 0xd3, 0x40, 0xd5, 0x17, 0xe5, 0xb7, 0xcb, 0x84, 0xc4, + 0x5f, 0xcd, 0xe5, 0x68, 0x2d, 0xd8, 0xaa, 0x4c, 0x24, 0x19, 0x5e, 0xd7, 0x19, 0x16, 0x46, 0xc8, + 0x44, 0x7e, 0x3d, 0x94, 0x63, 0x12, 0x06, 0xa3, 0xe4, 0xee, 0x5d, 0x38, 0xb9, 0xa9, 0x87, 0x8f, + 0x37, 0xd1, 0x4e, 0x4c, 0x4e, 0x92, 0x18, 0xb5, 0x9f, 0x0d, 0x94, 0x8d, 0x57, 0x13, 0x7e, 0x03, + 0x15, 0xa9, 0xc7, 0x3e, 0xf5, 0x79, 0xe0, 0x89, 0xb2, 0xa1, 0x7a, 0x70, 0x35, 0x0a, 0xcd, 0xe2, + 0xd6, 0xfe, 0x4e, 0x02, 0x92, 0xb1, 0x1d, 0x6f, 0xa0, 0x12, 0xf5, 0x58, 0xda, 0xb2, 0xcb, 0xea, + 0xfa, 0xb5, 0x78, 0x80, 0xb6, 0xf6, 0x77, 0xd2, 0x36, 0x9d, 0xbc, 0x13, 0xf3, 0xfb, 0x20, 0x78, + 0xe0, 0x3b, 0x7a, 0xb3, 0x6a, 0x7e, 0x32, 0x02, 0xc9, 0xd8, 0x8e, 0xdf, 0x44, 0x39, 0xe1, 0x70, + 0x0f, 0xf4, 0x5e, 0xbc, 0x11, 0x3f, 0xfb, 0x20, 0x06, 0x4e, 0x43, 0xb3, 0xa8, 0x3e, 0x54, 0x83, + 0x26, 0x97, 0x6a, 0x3f, 0x1a, 0x08, 0xcf, 0xaf, 0x5e, 0xfc, 0x11, 0x42, 0x3c, 0x3d, 0xe9, 0x94, + 0x4c, 0xd5, 0x55, 0x29, 0x7a, 0x1a, 0x9a, 0xab, 0xe9, 0x49, 0x51, 0x4e, 0xb8, 0xe0, 0x7d, 0x94, + 0x8d, 0xd7, 0xb5, 0x56, 0x1e, 0xeb, 0xdf, 0xe9, 0xc0, 0x58, 0xd3, 0xe2, 0x13, 0x51, 0x4c, 0xb5, + 0x1f, 0x0c, 0x74, 0xfd, 0x00, 0xfc, 0x21, 0x73, 0x80, 0x40, 0x1b, 0x7c, 0x70, 0x1d, 0xc0, 0x36, + 0x2a, 0xa6, 0x3b, 0x51, 0xeb, 0xe1, 0x9a, 0xf6, 0x2d, 0xa6, 0xfb, 0x93, 0x8c, 0xef, 0xa4, 0xda, + 0xb9, 0x7c, 0xae, 0x76, 0xde, 0x42, 0x59, 0x8f, 0xca, 0x6e, 0x39, 0xa3, 0x6e, 0x14, 0x62, 0xeb, + 0x3e, 0x95, 0x5d, 0xa2, 0x50, 0x65, 0xe5, 0xbe, 0x54, 0xc5, 0xcd, 0x69, 0x2b, 0xf7, 0x25, 0x51, + 0x68, 0xed, 0x8f, 0x2b, 0x68, 0xed, 0x31, 0xed, 0xb3, 0xd6, 0xa5, 0x5e, 0x5f, 0xea, 0xf5, 0x82, + 0x7a, 0x8d, 0x2e, 0xf5, 0xfa, 0x22, 0x7a, 0x5d, 0x3b, 0x31, 0x50, 0x65, 0x6e, 0xd6, 0x5e, 0xb6, + 0x9e, 0x7e, 0x3d, 0xa7, 0xa7, 0xef, 0x2f, 0x3e, 0x42, 0x73, 0xaf, 0x9f, 0x53, 0xd4, 0xbf, 0x0c, + 0x54, 0x7b, 0x71, 0x8e, 0x2f, 0x41, 0x53, 0x07, 0xd3, 0x9a, 0xfa, 0xd9, 0x7f, 0x48, 0x70, 0x11, + 0x55, 0xfd, 0xc9, 0x40, 0xff, 0x3b, 0x63, 0x9d, 0xe1, 0x57, 0x51, 0x26, 0xf0, 0xfb, 0x7a, 0x2d, + 0xe7, 0xa3, 0xd0, 0xcc, 0x3c, 0x22, 0xbb, 0x24, 0xc6, 0x30, 0x45, 0x79, 0x91, 0x28, 0x83, 0x4e, + 0xff, 0xee, 0xe2, 0x6f, 0x9c, 0x95, 0x94, 0x46, 0x29, 0x0a, 0xcd, 0xfc, 0x08, 0x1d, 0xf1, 0xe2, + 0x3a, 0x2a, 0x38, 0xb4, 0x11, 0xb8, 0x2d, 0xad, 0x69, 0x2b, 0x8d, 0x95, 0xb8, 0x5c, 0xdb, 0x5b, + 0x09, 0x46, 0x52, 0x6b, 0xe3, 0xf6, 0xf1, 0x49, 0x65, 0xe9, 0xd9, 0x49, 0x65, 0xe9, 0xf9, 0x49, + 0x65, 0xe9, 0xbb, 0xa8, 0x62, 0x1c, 0x47, 0x15, 0xe3, 0x59, 0x54, 0x31, 0x9e, 0x47, 0x15, 0xe3, + 0xb7, 0xa8, 0x62, 0x7c, 0xff, 0x7b, 0x65, 0xe9, 0x8b, 0xbc, 0x8e, 0xff, 0x77, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xae, 0x27, 0x2b, 0xcb, 0x2c, 0x0f, 0x00, 0x00, +} + +func (m *MutatingWebhook) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *MutatingWebhookConfiguration) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *MutatingWebhook) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MutatingWebhook) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.ObjectSelector != nil { + { + size, err := m.ObjectSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + } + if m.ReinvocationPolicy != nil { + i -= len(*m.ReinvocationPolicy) + copy(dAtA[i:], *m.ReinvocationPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ReinvocationPolicy))) + i-- + dAtA[i] = 0x52 + } + if m.MatchPolicy != nil { + i -= len(*m.MatchPolicy) + copy(dAtA[i:], *m.MatchPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MatchPolicy))) + i-- + dAtA[i] = 0x4a + } + if len(m.AdmissionReviewVersions) > 0 { + for iNdEx := len(m.AdmissionReviewVersions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AdmissionReviewVersions[iNdEx]) + copy(dAtA[i:], m.AdmissionReviewVersions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AdmissionReviewVersions[iNdEx]))) + i-- + dAtA[i] = 0x42 + } } - i += n1 - if len(m.Webhooks) > 0 { - for _, msg := range m.Webhooks { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.TimeoutSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) + i-- + dAtA[i] = 0x38 + } + if m.SideEffects != nil { + i -= len(*m.SideEffects) + copy(dAtA[i:], *m.SideEffects) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SideEffects))) + i-- + dAtA[i] = 0x32 + } + if m.NamespaceSelector != nil { + { + size, err := m.NamespaceSelector.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.FailurePolicy != nil { + i -= len(*m.FailurePolicy) + copy(dAtA[i:], *m.FailurePolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy))) + i-- + dAtA[i] = 0x22 + } + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + { + size, err := m.ClientConfig.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *MutatingWebhookConfigurationList) Marshal() (dAtA []byte, err error) { +func (m *MutatingWebhookConfiguration) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *MutatingWebhookConfigurationList) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *MutatingWebhookConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MutatingWebhookConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + if len(m.Webhooks) > 0 { + for iNdEx := len(m.Webhooks) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Webhooks[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n2, err := m.ListMeta.MarshalTo(dAtA[i:]) + return len(dAtA) - i, nil +} + +func (m *MutatingWebhookConfigurationList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { - return 0, err + return nil, err } - i += n2 + return dAtA[:n], nil +} + +func (m *MutatingWebhookConfigurationList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MutatingWebhookConfigurationList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *Rule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -201,62 +638,56 @@ func (m *Rule) Marshal() (dAtA []byte, err error) { } func (m *Rule) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Rule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.APIGroups) > 0 { - for _, s := range m.APIGroups { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + if m.Scope != nil { + i -= len(*m.Scope) + copy(dAtA[i:], *m.Scope) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Scope))) + i-- + dAtA[i] = 0x22 + } + if len(m.Resources) > 0 { + for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Resources[iNdEx]) + copy(dAtA[i:], m.Resources[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resources[iNdEx]))) + i-- + dAtA[i] = 0x1a } } if len(m.APIVersions) > 0 { - for _, s := range m.APIVersions { + for iNdEx := len(m.APIVersions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.APIVersions[iNdEx]) + copy(dAtA[i:], m.APIVersions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersions[iNdEx]))) + i-- dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - if len(m.Resources) > 0 { - for _, s := range m.Resources { - dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + if len(m.APIGroups) > 0 { + for iNdEx := len(m.APIGroups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.APIGroups[iNdEx]) + copy(dAtA[i:], m.APIGroups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroups[iNdEx]))) + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } func (m *RuleWithOperations) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -264,40 +695,41 @@ func (m *RuleWithOperations) Marshal() (dAtA []byte, err error) { } func (m *RuleWithOperations) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RuleWithOperations) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Operations) > 0 { - for _, s := range m.Operations { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + { + size, err := m.Rule.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Rule.Size())) - n3, err := m.Rule.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Operations) > 0 { + for iNdEx := len(m.Operations) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Operations[iNdEx]) + copy(dAtA[i:], m.Operations[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operations[iNdEx]))) + i-- + dAtA[i] = 0xa + } } - i += n3 - return i, nil + return len(dAtA) - i, nil } func (m *ServiceReference) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -305,171 +737,249 @@ func (m *ServiceReference) Marshal() (dAtA []byte, err error) { } func (m *ServiceReference) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServiceReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i += copy(dAtA[i:], m.Namespace) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) + if m.Port != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Port)) + i-- + dAtA[i] = 0x20 + } if m.Path != nil { - dAtA[i] = 0x1a - i++ + i -= len(*m.Path) + copy(dAtA[i:], *m.Path) i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Path))) - i += copy(dAtA[i:], *m.Path) + i-- + dAtA[i] = 0x1a } - return i, nil + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ValidatingWebhookConfiguration) Marshal() (dAtA []byte, err error) { +func (m *ValidatingWebhook) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ValidatingWebhookConfiguration) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ValidatingWebhook) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatingWebhook) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n4, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 - if len(m.Webhooks) > 0 { - for _, msg := range m.Webhooks { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.ObjectSelector != nil { + { + size, err := m.ObjectSelector.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } + if m.MatchPolicy != nil { + i -= len(*m.MatchPolicy) + copy(dAtA[i:], *m.MatchPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MatchPolicy))) + i-- + dAtA[i] = 0x4a + } + if len(m.AdmissionReviewVersions) > 0 { + for iNdEx := len(m.AdmissionReviewVersions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AdmissionReviewVersions[iNdEx]) + copy(dAtA[i:], m.AdmissionReviewVersions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AdmissionReviewVersions[iNdEx]))) + i-- + dAtA[i] = 0x42 } } - return i, nil -} - -func (m *ValidatingWebhookConfigurationList) Marshal() (dAtA []byte, err error) { + if m.TimeoutSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) + i-- + dAtA[i] = 0x38 + } + if m.SideEffects != nil { + i -= len(*m.SideEffects) + copy(dAtA[i:], *m.SideEffects) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SideEffects))) + i-- + dAtA[i] = 0x32 + } + if m.NamespaceSelector != nil { + { + size, err := m.NamespaceSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.FailurePolicy != nil { + i -= len(*m.FailurePolicy) + copy(dAtA[i:], *m.FailurePolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy))) + i-- + dAtA[i] = 0x22 + } + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + { + size, err := m.ClientConfig.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ValidatingWebhookConfiguration) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ValidatingWebhookConfigurationList) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ValidatingWebhookConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatingWebhookConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n5, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Webhooks) > 0 { + for iNdEx := len(m.Webhooks) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Webhooks[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *Webhook) Marshal() (dAtA []byte, err error) { +func (m *ValidatingWebhookConfigurationList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *Webhook) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ValidatingWebhookConfigurationList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatingWebhookConfigurationList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ClientConfig.Size())) - n6, err := m.ClientConfig.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 - if len(m.Rules) > 0 { - for _, msg := range m.Rules { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - if m.FailurePolicy != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy))) - i += copy(dAtA[i:], *m.FailurePolicy) - } - if m.NamespaceSelector != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NamespaceSelector.Size())) - n7, err := m.NamespaceSelector.MarshalTo(dAtA[i:]) + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n7 - } - if m.SideEffects != nil { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SideEffects))) - i += copy(dAtA[i:], *m.SideEffects) + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *WebhookClientConfig) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -477,63 +987,111 @@ func (m *WebhookClientConfig) Marshal() (dAtA []byte, err error) { } func (m *WebhookClientConfig) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WebhookClientConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Service != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Service.Size())) - n8, err := m.Service.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n8 + if m.URL != nil { + i -= len(*m.URL) + copy(dAtA[i:], *m.URL) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.URL))) + i-- + dAtA[i] = 0x1a } if m.CABundle != nil { - dAtA[i] = 0x12 - i++ + i -= len(m.CABundle) + copy(dAtA[i:], m.CABundle) i = encodeVarintGenerated(dAtA, i, uint64(len(m.CABundle))) - i += copy(dAtA[i:], m.CABundle) + i-- + dAtA[i] = 0x12 } - if m.URL != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.URL))) - i += copy(dAtA[i:], *m.URL) + if m.Service != nil { + { + size, err := m.Service.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base +} +func (m *MutatingWebhook) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.ClientConfig.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.FailurePolicy != nil { + l = len(*m.FailurePolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NamespaceSelector != nil { + l = m.NamespaceSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.SideEffects != nil { + l = len(*m.SideEffects) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.TimeoutSeconds != nil { + n += 1 + sovGenerated(uint64(*m.TimeoutSeconds)) + } + if len(m.AdmissionReviewVersions) > 0 { + for _, s := range m.AdmissionReviewVersions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.MatchPolicy != nil { + l = len(*m.MatchPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ReinvocationPolicy != nil { + l = len(*m.ReinvocationPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ObjectSelector != nil { + l = m.ObjectSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n } + func (m *MutatingWebhookConfiguration) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -548,6 +1106,9 @@ func (m *MutatingWebhookConfiguration) Size() (n int) { } func (m *MutatingWebhookConfigurationList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -562,6 +1123,9 @@ func (m *MutatingWebhookConfigurationList) Size() (n int) { } func (m *Rule) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.APIGroups) > 0 { @@ -582,10 +1146,17 @@ func (m *Rule) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + if m.Scope != nil { + l = len(*m.Scope) + n += 1 + l + sovGenerated(uint64(l)) + } return n } func (m *RuleWithOperations) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Operations) > 0 { @@ -600,6 +1171,9 @@ func (m *RuleWithOperations) Size() (n int) { } func (m *ServiceReference) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Namespace) @@ -610,30 +1184,70 @@ func (m *ServiceReference) Size() (n int) { l = len(*m.Path) n += 1 + l + sovGenerated(uint64(l)) } + if m.Port != nil { + n += 1 + sovGenerated(uint64(*m.Port)) + } return n } -func (m *ValidatingWebhookConfiguration) Size() (n int) { +func (m *ValidatingWebhook) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l - l = m.ObjectMeta.Size() + l = len(m.Name) n += 1 + l + sovGenerated(uint64(l)) - if len(m.Webhooks) > 0 { - for _, e := range m.Webhooks { + l = m.ClientConfig.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Rules) > 0 { + for _, e := range m.Rules { l = e.Size() n += 1 + l + sovGenerated(uint64(l)) } } + if m.FailurePolicy != nil { + l = len(*m.FailurePolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NamespaceSelector != nil { + l = m.NamespaceSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.SideEffects != nil { + l = len(*m.SideEffects) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.TimeoutSeconds != nil { + n += 1 + sovGenerated(uint64(*m.TimeoutSeconds)) + } + if len(m.AdmissionReviewVersions) > 0 { + for _, s := range m.AdmissionReviewVersions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.MatchPolicy != nil { + l = len(*m.MatchPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ObjectSelector != nil { + l = m.ObjectSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } -func (m *ValidatingWebhookConfigurationList) Size() (n int) { +func (m *ValidatingWebhookConfiguration) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l - l = m.ListMeta.Size() + l = m.ObjectMeta.Size() n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { + if len(m.Webhooks) > 0 { + for _, e := range m.Webhooks { l = e.Size() n += 1 + l + sovGenerated(uint64(l)) } @@ -641,35 +1255,27 @@ func (m *ValidatingWebhookConfigurationList) Size() (n int) { return n } -func (m *Webhook) Size() (n int) { +func (m *ValidatingWebhookConfigurationList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = m.ClientConfig.Size() + l = m.ListMeta.Size() n += 1 + l + sovGenerated(uint64(l)) - if len(m.Rules) > 0 { - for _, e := range m.Rules { + if len(m.Items) > 0 { + for _, e := range m.Items { l = e.Size() n += 1 + l + sovGenerated(uint64(l)) } } - if m.FailurePolicy != nil { - l = len(*m.FailurePolicy) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.NamespaceSelector != nil { - l = m.NamespaceSelector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.SideEffects != nil { - l = len(*m.SideEffects) - n += 1 + l + sovGenerated(uint64(l)) - } return n } func (m *WebhookClientConfig) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Service != nil { @@ -688,25 +1294,48 @@ func (m *WebhookClientConfig) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } +func (this *MutatingWebhook) String() string { + if this == nil { + return "nil" + } + repeatedStringForRules := "[]RuleWithOperations{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "RuleWithOperations", "RuleWithOperations", 1), `&`, ``, 1) + "," + } + repeatedStringForRules += "}" + s := strings.Join([]string{`&MutatingWebhook{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1), `&`, ``, 1) + `,`, + `Rules:` + repeatedStringForRules + `,`, + `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`, + `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `SideEffects:` + valueToStringGenerated(this.SideEffects) + `,`, + `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, + `AdmissionReviewVersions:` + fmt.Sprintf("%v", this.AdmissionReviewVersions) + `,`, + `MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`, + `ReinvocationPolicy:` + valueToStringGenerated(this.ReinvocationPolicy) + `,`, + `ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `}`, + }, "") + return s +} func (this *MutatingWebhookConfiguration) String() string { if this == nil { return "nil" } + repeatedStringForWebhooks := "[]MutatingWebhook{" + for _, f := range this.Webhooks { + repeatedStringForWebhooks += strings.Replace(strings.Replace(f.String(), "MutatingWebhook", "MutatingWebhook", 1), `&`, ``, 1) + "," + } + repeatedStringForWebhooks += "}" s := strings.Join([]string{`&MutatingWebhookConfiguration{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Webhooks:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Webhooks), "Webhook", "Webhook", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Webhooks:` + repeatedStringForWebhooks + `,`, `}`, }, "") return s @@ -715,9 +1344,14 @@ func (this *MutatingWebhookConfigurationList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]MutatingWebhookConfiguration{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "MutatingWebhookConfiguration", "MutatingWebhookConfiguration", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&MutatingWebhookConfigurationList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "MutatingWebhookConfiguration", "MutatingWebhookConfiguration", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -730,6 +1364,7 @@ func (this *Rule) String() string { `APIGroups:` + fmt.Sprintf("%v", this.APIGroups) + `,`, `APIVersions:` + fmt.Sprintf("%v", this.APIVersions) + `,`, `Resources:` + fmt.Sprintf("%v", this.Resources) + `,`, + `Scope:` + valueToStringGenerated(this.Scope) + `,`, `}`, }, "") return s @@ -753,6 +1388,31 @@ func (this *ServiceReference) String() string { `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `Path:` + valueToStringGenerated(this.Path) + `,`, + `Port:` + valueToStringGenerated(this.Port) + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingWebhook) String() string { + if this == nil { + return "nil" + } + repeatedStringForRules := "[]RuleWithOperations{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "RuleWithOperations", "RuleWithOperations", 1), `&`, ``, 1) + "," + } + repeatedStringForRules += "}" + s := strings.Join([]string{`&ValidatingWebhook{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1), `&`, ``, 1) + `,`, + `Rules:` + repeatedStringForRules + `,`, + `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`, + `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `SideEffects:` + valueToStringGenerated(this.SideEffects) + `,`, + `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, + `AdmissionReviewVersions:` + fmt.Sprintf("%v", this.AdmissionReviewVersions) + `,`, + `MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`, + `ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, `}`, }, "") return s @@ -761,9 +1421,14 @@ func (this *ValidatingWebhookConfiguration) String() string { if this == nil { return "nil" } + repeatedStringForWebhooks := "[]ValidatingWebhook{" + for _, f := range this.Webhooks { + repeatedStringForWebhooks += strings.Replace(strings.Replace(f.String(), "ValidatingWebhook", "ValidatingWebhook", 1), `&`, ``, 1) + "," + } + repeatedStringForWebhooks += "}" s := strings.Join([]string{`&ValidatingWebhookConfiguration{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Webhooks:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Webhooks), "Webhook", "Webhook", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Webhooks:` + repeatedStringForWebhooks + `,`, `}`, }, "") return s @@ -772,34 +1437,24 @@ func (this *ValidatingWebhookConfigurationList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]ValidatingWebhookConfiguration{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ValidatingWebhookConfiguration", "ValidatingWebhookConfiguration", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&ValidatingWebhookConfigurationList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ValidatingWebhookConfiguration", "ValidatingWebhookConfiguration", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s } -func (this *Webhook) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Webhook{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1), `&`, ``, 1) + `,`, - `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "RuleWithOperations", "RuleWithOperations", 1), `&`, ``, 1) + `,`, - `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`, - `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `SideEffects:` + valueToStringGenerated(this.SideEffects) + `,`, - `}`, - }, "") - return s -} -func (this *WebhookClientConfig) String() string { +func (this *WebhookClientConfig) String() string { if this == nil { return "nil" } s := strings.Join([]string{`&WebhookClientConfig{`, - `Service:` + strings.Replace(fmt.Sprintf("%v", this.Service), "ServiceReference", "ServiceReference", 1) + `,`, + `Service:` + strings.Replace(this.Service.String(), "ServiceReference", "ServiceReference", 1) + `,`, `CABundle:` + valueToStringGenerated(this.CABundle) + `,`, `URL:` + valueToStringGenerated(this.URL) + `,`, `}`, @@ -814,7 +1469,7 @@ func valueToStringGenerated(v interface{}) string { pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("*%v", pv) } -func (m *MutatingWebhookConfiguration) Unmarshal(dAtA []byte) error { +func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -829,7 +1484,7 @@ func (m *MutatingWebhookConfiguration) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -837,17 +1492,729 @@ func (m *MutatingWebhookConfiguration) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: MutatingWebhookConfiguration: wiretype end group for non-group") + return fmt.Errorf("proto: MutatingWebhook: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: MutatingWebhookConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: MutatingWebhook: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ClientConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rules = append(m.Rules, RuleWithOperations{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FailurePolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := FailurePolicyType(dAtA[iNdEx:postIndex]) + m.FailurePolicy = &s + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NamespaceSelector == nil { + m.NamespaceSelector = &v1.LabelSelector{} + } + if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SideEffects", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := SideEffectClass(dAtA[iNdEx:postIndex]) + m.SideEffects = &s + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TimeoutSeconds = &v + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AdmissionReviewVersions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AdmissionReviewVersions = append(m.AdmissionReviewVersions, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := MatchPolicyType(dAtA[iNdEx:postIndex]) + m.MatchPolicy = &s + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReinvocationPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := ReinvocationPolicyType(dAtA[iNdEx:postIndex]) + m.ReinvocationPolicy = &s + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ObjectSelector == nil { + m.ObjectSelector = &v1.LabelSelector{} + } + if err := m.ObjectSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MutatingWebhookConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MutatingWebhookConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MutatingWebhookConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Webhooks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Webhooks = append(m.Webhooks, MutatingWebhook{}) + if err := m.Webhooks[len(m.Webhooks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MutatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MutatingWebhookConfigurationList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MutatingWebhookConfigurationList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, MutatingWebhookConfiguration{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Rule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Rule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Rule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIGroups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIGroups = append(m.APIGroups, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIVersions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIVersions = append(m.APIVersions, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -857,27 +2224,29 @@ func (m *MutatingWebhookConfiguration) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Resources = append(m.Resources, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 2: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Webhooks", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -887,22 +2256,24 @@ func (m *MutatingWebhookConfiguration) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.Webhooks = append(m.Webhooks, Webhook{}) - if err := m.Webhooks[len(m.Webhooks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + s := ScopeType(dAtA[iNdEx:postIndex]) + m.Scope = &s iNdEx = postIndex default: iNdEx = preIndex @@ -913,6 +2284,9 @@ func (m *MutatingWebhookConfiguration) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -925,7 +2299,7 @@ func (m *MutatingWebhookConfiguration) Unmarshal(dAtA []byte) error { } return nil } -func (m *MutatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { +func (m *RuleWithOperations) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -940,7 +2314,7 @@ func (m *MutatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -948,17 +2322,17 @@ func (m *MutatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: MutatingWebhookConfigurationList: wiretype end group for non-group") + return fmt.Errorf("proto: RuleWithOperations: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: MutatingWebhookConfigurationList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RuleWithOperations: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Operations", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -968,25 +2342,27 @@ func (m *MutatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Operations = append(m.Operations, OperationType(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -998,7 +2374,7 @@ func (m *MutatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1007,11 +2383,13 @@ func (m *MutatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, MutatingWebhookConfiguration{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Rule.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -1024,6 +2402,9 @@ func (m *MutatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1036,7 +2417,7 @@ func (m *MutatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { } return nil } -func (m *Rule) Unmarshal(dAtA []byte) error { +func (m *ServiceReference) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1051,7 +2432,7 @@ func (m *Rule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1059,15 +2440,15 @@ func (m *Rule) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Rule: wiretype end group for non-group") + return fmt.Errorf("proto: ServiceReference: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Rule: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ServiceReference: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field APIGroups", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -1079,7 +2460,7 @@ func (m *Rule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1089,14 +2470,17 @@ func (m *Rule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.APIGroups = append(m.APIGroups, string(dAtA[iNdEx:postIndex])) + m.Namespace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field APIVersions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -1108,7 +2492,7 @@ func (m *Rule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1118,14 +2502,17 @@ func (m *Rule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.APIVersions = append(m.APIVersions, string(dAtA[iNdEx:postIndex])) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -1137,7 +2524,7 @@ func (m *Rule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1147,11 +2534,35 @@ func (m *Rule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Resources = append(m.Resources, string(dAtA[iNdEx:postIndex])) + s := string(dAtA[iNdEx:postIndex]) + m.Path = &s iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Port = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -1161,6 +2572,9 @@ func (m *Rule) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1173,7 +2587,7 @@ func (m *Rule) Unmarshal(dAtA []byte) error { } return nil } -func (m *RuleWithOperations) Unmarshal(dAtA []byte) error { +func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1188,7 +2602,7 @@ func (m *RuleWithOperations) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1196,15 +2610,15 @@ func (m *RuleWithOperations) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RuleWithOperations: wiretype end group for non-group") + return fmt.Errorf("proto: ValidatingWebhook: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RuleWithOperations: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ValidatingWebhook: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Operations", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -1216,7 +2630,7 @@ func (m *RuleWithOperations) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1226,14 +2640,17 @@ func (m *RuleWithOperations) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Operations = append(m.Operations, OperationType(dAtA[iNdEx:postIndex])) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ClientConfig", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1245,7 +2662,7 @@ func (m *RuleWithOperations) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1254,66 +2671,53 @@ func (m *RuleWithOperations) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Rule.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ClientConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) } - if skippy < 0 { - return ErrInvalidLengthGenerated + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + if msglen < 0 { + return ErrInvalidLengthGenerated } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ServiceReference) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + m.Rules = append(m.Rules, RuleWithOperations{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ServiceReference: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ServiceReference: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + iNdEx = postIndex + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field FailurePolicy", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -1325,7 +2729,7 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1335,14 +2739,54 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Namespace = string(dAtA[iNdEx:postIndex]) + s := FailurePolicyType(dAtA[iNdEx:postIndex]) + m.FailurePolicy = &s iNdEx = postIndex - case 2: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NamespaceSelector == nil { + m.NamespaceSelector = &v1.LabelSelector{} + } + if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SideEffects", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -1354,7 +2798,7 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1364,14 +2808,38 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: + s := SideEffectClass(dAtA[iNdEx:postIndex]) + m.SideEffects = &s + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TimeoutSeconds = &v + case 8: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AdmissionReviewVersions", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -1383,7 +2851,7 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1393,67 +2861,19 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Path = &s - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ValidatingWebhookConfiguration) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ValidatingWebhookConfiguration: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ValidatingWebhookConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.AdmissionReviewVersions = append(m.AdmissionReviewVersions, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 9: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MatchPolicy", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1463,25 +2883,28 @@ func (m *ValidatingWebhookConfiguration) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + s := MatchPolicyType(dAtA[iNdEx:postIndex]) + m.MatchPolicy = &s iNdEx = postIndex - case 2: + case 10: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Webhooks", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectSelector", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1493,7 +2916,7 @@ func (m *ValidatingWebhookConfiguration) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1502,11 +2925,16 @@ func (m *ValidatingWebhookConfiguration) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Webhooks = append(m.Webhooks, Webhook{}) - if err := m.Webhooks[len(m.Webhooks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.ObjectSelector == nil { + m.ObjectSelector = &v1.LabelSelector{} + } + if err := m.ObjectSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -1519,6 +2947,9 @@ func (m *ValidatingWebhookConfiguration) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1531,7 +2962,7 @@ func (m *ValidatingWebhookConfiguration) Unmarshal(dAtA []byte) error { } return nil } -func (m *ValidatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { +func (m *ValidatingWebhookConfiguration) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1546,7 +2977,7 @@ func (m *ValidatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1554,15 +2985,15 @@ func (m *ValidatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ValidatingWebhookConfigurationList: wiretype end group for non-group") + return fmt.Errorf("proto: ValidatingWebhookConfiguration: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ValidatingWebhookConfigurationList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ValidatingWebhookConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1574,7 +3005,7 @@ func (m *ValidatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1583,16 +3014,19 @@ func (m *ValidatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Webhooks", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1604,7 +3038,7 @@ func (m *ValidatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1613,11 +3047,14 @@ func (m *ValidatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, ValidatingWebhookConfiguration{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Webhooks = append(m.Webhooks, ValidatingWebhook{}) + if err := m.Webhooks[len(m.Webhooks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -1630,6 +3067,9 @@ func (m *ValidatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1642,7 +3082,7 @@ func (m *ValidatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { } return nil } -func (m *Webhook) Unmarshal(dAtA []byte) error { +func (m *ValidatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1657,7 +3097,7 @@ func (m *Webhook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1665,44 +3105,15 @@ func (m *Webhook) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Webhook: wiretype end group for non-group") + return fmt.Errorf("proto: ValidatingWebhookConfigurationList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Webhook: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ValidatingWebhookConfigurationList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClientConfig", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1714,7 +3125,7 @@ func (m *Webhook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1723,77 +3134,19 @@ func (m *Webhook) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ClientConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.Rules = append(m.Rules, RuleWithOperations{}) - if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FailurePolicy", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := FailurePolicyType(dAtA[iNdEx:postIndex]) - m.FailurePolicy = &s - iNdEx = postIndex - case 5: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1805,7 +3158,7 @@ func (m *Webhook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1814,45 +3167,16 @@ func (m *Webhook) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.NamespaceSelector == nil { - m.NamespaceSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} - } - if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SideEffects", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - s := SideEffectClass(dAtA[iNdEx:postIndex]) - m.SideEffects = &s + m.Items = append(m.Items, ValidatingWebhookConfiguration{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -1863,6 +3187,9 @@ func (m *Webhook) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1890,7 +3217,7 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1918,7 +3245,7 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1927,6 +3254,9 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1951,7 +3281,7 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= (int(b) & 0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1960,6 +3290,9 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1982,7 +3315,7 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1992,6 +3325,9 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2007,6 +3343,9 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2073,10 +3412,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -2105,6 +3447,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -2123,68 +3468,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 906 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x54, 0xcf, 0x6f, 0xe3, 0x44, - 0x14, 0x8e, 0x37, 0x29, 0x49, 0x26, 0x89, 0x76, 0x3b, 0x80, 0x14, 0xaa, 0x95, 0x1d, 0xe5, 0x80, - 0x22, 0xa1, 0xb5, 0x49, 0x41, 0x08, 0x21, 0x10, 0xaa, 0x0b, 0x0b, 0x95, 0xba, 0xbb, 0x61, 0x0a, - 0xbb, 0x12, 0xe2, 0xc0, 0xc4, 0x79, 0x49, 0x86, 0xf8, 0x97, 0x66, 0xc6, 0x59, 0x7a, 0x43, 0xe2, - 0x1f, 0x40, 0x42, 0xfc, 0x0d, 0xfc, 0x15, 0xdc, 0x7b, 0xdc, 0x0b, 0x62, 0x4f, 0x16, 0x35, 0x67, - 0x0e, 0x5c, 0x7b, 0x42, 0x63, 0x3b, 0x71, 0xd2, 0x6c, 0xbb, 0xe9, 0x85, 0x03, 0x37, 0xcf, 0xf7, - 0xe6, 0xfb, 0xde, 0xfb, 0x9e, 0xdf, 0x1b, 0xf4, 0xc5, 0xec, 0x7d, 0x61, 0xb2, 0xc0, 0x9a, 0x45, - 0x43, 0xe0, 0x3e, 0x48, 0x10, 0xd6, 0x1c, 0xfc, 0x51, 0xc0, 0xad, 0x3c, 0x40, 0x43, 0x66, 0xd1, - 0x91, 0xc7, 0x84, 0x60, 0x81, 0xcf, 0x61, 0xc2, 0x84, 0xe4, 0x54, 0xb2, 0xc0, 0xb7, 0xe6, 0xfd, - 0x21, 0x48, 0xda, 0xb7, 0x26, 0xe0, 0x03, 0xa7, 0x12, 0x46, 0x66, 0xc8, 0x03, 0x19, 0xe0, 0x5e, - 0xc6, 0x34, 0x69, 0xc8, 0xcc, 0x17, 0x32, 0xcd, 0x9c, 0xb9, 0x77, 0x6f, 0xc2, 0xe4, 0x34, 0x1a, - 0x9a, 0x4e, 0xe0, 0x59, 0x93, 0x60, 0x12, 0x58, 0xa9, 0xc0, 0x30, 0x1a, 0xa7, 0xa7, 0xf4, 0x90, - 0x7e, 0x65, 0xc2, 0x7b, 0xef, 0x16, 0x25, 0x79, 0xd4, 0x99, 0x32, 0x1f, 0xf8, 0xa9, 0x15, 0xce, - 0x26, 0x0a, 0x10, 0x96, 0x07, 0x92, 0x5a, 0xf3, 0x8d, 0x72, 0xf6, 0xac, 0xab, 0x58, 0x3c, 0xf2, - 0x25, 0xf3, 0x60, 0x83, 0xf0, 0xde, 0xcb, 0x08, 0xc2, 0x99, 0x82, 0x47, 0x2f, 0xf3, 0xba, 0xbf, - 0x6b, 0xe8, 0xee, 0x83, 0x48, 0x52, 0xc9, 0xfc, 0xc9, 0x13, 0x18, 0x4e, 0x83, 0x60, 0x76, 0x18, - 0xf8, 0x63, 0x36, 0x89, 0x32, 0xdb, 0xf8, 0x5b, 0x54, 0x53, 0x45, 0x8e, 0xa8, 0xa4, 0x6d, 0xad, - 0xa3, 0xf5, 0x1a, 0xfb, 0x6f, 0x9b, 0x45, 0xaf, 0x96, 0xb9, 0xcc, 0x70, 0x36, 0x51, 0x80, 0x30, - 0xd5, 0x6d, 0x73, 0xde, 0x37, 0x1f, 0x0d, 0xbf, 0x03, 0x47, 0x3e, 0x00, 0x49, 0x6d, 0x7c, 0x16, - 0x1b, 0xa5, 0x24, 0x36, 0x50, 0x81, 0x91, 0xa5, 0x2a, 0x3e, 0x41, 0xb5, 0x3c, 0xb3, 0x68, 0xdf, - 0xea, 0x94, 0x7b, 0x8d, 0xfd, 0xbe, 0xb9, 0xed, 0xdf, 0x30, 0x73, 0xa6, 0x5d, 0x51, 0x29, 0x48, - 0xed, 0x69, 0x2e, 0xd4, 0xfd, 0x5b, 0x43, 0x9d, 0xeb, 0x7c, 0x1d, 0x33, 0x21, 0xf1, 0x37, 0x1b, - 0xde, 0xcc, 0xed, 0xbc, 0x29, 0x76, 0xea, 0xec, 0x4e, 0xee, 0xac, 0xb6, 0x40, 0x56, 0x7c, 0xcd, - 0xd0, 0x0e, 0x93, 0xe0, 0x2d, 0x4c, 0xdd, 0xdf, 0xde, 0xd4, 0x75, 0x85, 0xdb, 0xad, 0x3c, 0xe5, - 0xce, 0x91, 0x12, 0x27, 0x59, 0x8e, 0xee, 0xcf, 0x1a, 0xaa, 0x90, 0xc8, 0x05, 0xfc, 0x16, 0xaa, - 0xd3, 0x90, 0x7d, 0xc6, 0x83, 0x28, 0x14, 0x6d, 0xad, 0x53, 0xee, 0xd5, 0xed, 0x56, 0x12, 0x1b, - 0xf5, 0x83, 0xc1, 0x51, 0x06, 0x92, 0x22, 0x8e, 0xfb, 0xa8, 0x41, 0x43, 0xf6, 0x18, 0xb8, 0x2a, - 0x25, 0x2b, 0xb4, 0x6e, 0xdf, 0x4e, 0x62, 0xa3, 0x71, 0x30, 0x38, 0x5a, 0xc0, 0x64, 0xf5, 0x8e, - 0xd2, 0xe7, 0x20, 0x82, 0x88, 0x3b, 0x20, 0xda, 0xe5, 0x42, 0x9f, 0x2c, 0x40, 0x52, 0xc4, 0xbb, - 0xbf, 0x6a, 0x08, 0xab, 0xaa, 0x9e, 0x30, 0x39, 0x7d, 0x14, 0x42, 0xe6, 0x40, 0xe0, 0x8f, 0x11, - 0x0a, 0x96, 0xa7, 0xbc, 0x48, 0x23, 0x9d, 0x8f, 0x25, 0x7a, 0x11, 0x1b, 0xad, 0xe5, 0xe9, 0xcb, - 0xd3, 0x10, 0xc8, 0x0a, 0x05, 0x0f, 0x50, 0x85, 0x47, 0x2e, 0xb4, 0x6f, 0x6d, 0xfc, 0xb4, 0x97, - 0x74, 0x56, 0x15, 0x63, 0x37, 0xf3, 0x0e, 0xa6, 0x0d, 0x23, 0xa9, 0x52, 0xf7, 0x47, 0x0d, 0xdd, - 0x39, 0x01, 0x3e, 0x67, 0x0e, 0x10, 0x18, 0x03, 0x07, 0xdf, 0x01, 0x6c, 0xa1, 0xba, 0x4f, 0x3d, - 0x10, 0x21, 0x75, 0x20, 0x1d, 0x90, 0xba, 0xbd, 0x9b, 0x73, 0xeb, 0x0f, 0x17, 0x01, 0x52, 0xdc, - 0xc1, 0x1d, 0x54, 0x51, 0x87, 0xb4, 0xae, 0x7a, 0x91, 0x47, 0xdd, 0x25, 0x69, 0x04, 0xdf, 0x45, - 0x95, 0x90, 0xca, 0x69, 0xbb, 0x9c, 0xde, 0xa8, 0xa9, 0xe8, 0x80, 0xca, 0x29, 0x49, 0xd1, 0xee, - 0x1f, 0x1a, 0xd2, 0x1f, 0x53, 0x97, 0x8d, 0xfe, 0x77, 0xfb, 0xf8, 0x8f, 0x86, 0xba, 0xd7, 0x3b, - 0xfb, 0x0f, 0x36, 0xd2, 0x5b, 0xdf, 0xc8, 0xcf, 0xb7, 0xb7, 0x75, 0x7d, 0xe9, 0x57, 0xec, 0xe4, - 0x2f, 0x15, 0x54, 0xcd, 0xaf, 0x2f, 0x27, 0x43, 0xbb, 0x72, 0x32, 0x9e, 0xa2, 0xa6, 0xe3, 0x32, - 0xf0, 0x65, 0x26, 0x9d, 0xcf, 0xf6, 0x47, 0x37, 0x6e, 0xfd, 0xe1, 0x8a, 0x88, 0xfd, 0x5a, 0x9e, - 0xa8, 0xb9, 0x8a, 0x92, 0xb5, 0x44, 0x98, 0xa2, 0x1d, 0xb5, 0x02, 0xd9, 0x36, 0x37, 0xf6, 0x3f, - 0xbc, 0xd9, 0x36, 0xad, 0xaf, 0x76, 0xd1, 0x09, 0x15, 0x13, 0x24, 0x53, 0xc6, 0xc7, 0xa8, 0x35, - 0xa6, 0xcc, 0x8d, 0x38, 0x0c, 0x02, 0x97, 0x39, 0xa7, 0xed, 0x4a, 0xda, 0x86, 0x37, 0x93, 0xd8, - 0x68, 0xdd, 0x5f, 0x0d, 0x5c, 0xc4, 0xc6, 0xee, 0x1a, 0x90, 0xae, 0xfe, 0x3a, 0x19, 0x7f, 0x8f, - 0x76, 0x97, 0x2b, 0x77, 0x02, 0x2e, 0x38, 0x32, 0xe0, 0xed, 0x9d, 0xb4, 0x5d, 0xef, 0x6c, 0x39, - 0x2d, 0x74, 0x08, 0xee, 0x82, 0x6a, 0xbf, 0x9e, 0xc4, 0xc6, 0xee, 0xc3, 0xcb, 0x8a, 0x64, 0x33, - 0x09, 0xfe, 0x04, 0x35, 0x04, 0x1b, 0xc1, 0xa7, 0xe3, 0x31, 0x38, 0x52, 0xb4, 0x5f, 0x49, 0x5d, - 0x74, 0xd5, 0x7b, 0x79, 0x52, 0xc0, 0x17, 0xb1, 0x71, 0xbb, 0x38, 0x1e, 0xba, 0x54, 0x08, 0xb2, - 0x4a, 0xeb, 0xfe, 0xa6, 0xa1, 0x57, 0x5f, 0xf0, 0xb3, 0x30, 0x45, 0x55, 0x91, 0x3d, 0x41, 0xf9, - 0xec, 0x7f, 0xb0, 0xfd, 0xaf, 0xb8, 0xfc, 0x76, 0xd9, 0x8d, 0x24, 0x36, 0xaa, 0x0b, 0x74, 0xa1, - 0x8b, 0x7b, 0xa8, 0xe6, 0x50, 0x3b, 0xf2, 0x47, 0xf9, 0xe3, 0xd9, 0xb4, 0x9b, 0x6a, 0x57, 0x0e, - 0x0f, 0x32, 0x8c, 0x2c, 0xa3, 0xf8, 0x0d, 0x54, 0x8e, 0xb8, 0x9b, 0xbf, 0x53, 0xd5, 0x24, 0x36, - 0xca, 0x5f, 0x91, 0x63, 0xa2, 0x30, 0xfb, 0xde, 0xd9, 0xb9, 0x5e, 0x7a, 0x76, 0xae, 0x97, 0x9e, - 0x9f, 0xeb, 0xa5, 0x1f, 0x12, 0x5d, 0x3b, 0x4b, 0x74, 0xed, 0x59, 0xa2, 0x6b, 0xcf, 0x13, 0x5d, - 0xfb, 0x33, 0xd1, 0xb5, 0x9f, 0xfe, 0xd2, 0x4b, 0x5f, 0x57, 0xf3, 0xd2, 0xfe, 0x0d, 0x00, 0x00, - 0xff, 0xff, 0x85, 0x06, 0x8c, 0x7f, 0xae, 0x09, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto index 4d55ca87..086cbcc7 100644 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto @@ -28,9 +28,160 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". option go_package = "v1beta1"; +// MutatingWebhook describes an admission webhook and the resources and operations it applies to. +message MutatingWebhook { + // The name of the admission webhook. + // Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where + // "imagepolicy" is the name of the webhook, and kubernetes.io is the name + // of the organization. + // Required. + optional string name = 1; + + // ClientConfig defines how to communicate with the hook. + // Required + optional WebhookClientConfig clientConfig = 2; + + // Rules describes what operations on what resources/subresources the webhook cares about. + // The webhook cares about an operation if it matches _any_ Rule. + // However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks + // from putting the cluster in a state which cannot be recovered from without completely + // disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called + // on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects. + repeated RuleWithOperations rules = 3; + + // FailurePolicy defines how unrecognized errors from the admission endpoint are handled - + // allowed values are Ignore or Fail. Defaults to Ignore. + // +optional + optional string failurePolicy = 4; + + // matchPolicy defines how the "rules" list is used to match incoming requests. + // Allowed values are "Exact" or "Equivalent". + // + // - Exact: match a request only if it exactly matches a specified rule. + // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, + // but "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, + // a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook. + // + // - Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. + // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, + // and "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, + // a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook. + // + // Defaults to "Exact" + // +optional + optional string matchPolicy = 9; + + // NamespaceSelector decides whether to run the webhook on an object based + // on whether the namespace for that object matches the selector. If the + // object itself is a namespace, the matching is performed on + // object.metadata.labels. If the object is another cluster scoped resource, + // it never skips the webhook. + // + // For example, to run the webhook on any objects whose namespace is not + // associated with "runlevel" of "0" or "1"; you will set the selector as + // follows: + // "namespaceSelector": { + // "matchExpressions": [ + // { + // "key": "runlevel", + // "operator": "NotIn", + // "values": [ + // "0", + // "1" + // ] + // } + // ] + // } + // + // If instead you want to only run the webhook on any objects whose + // namespace is associated with the "environment" of "prod" or "staging"; + // you will set the selector as follows: + // "namespaceSelector": { + // "matchExpressions": [ + // { + // "key": "environment", + // "operator": "In", + // "values": [ + // "prod", + // "staging" + // ] + // } + // ] + // } + // + // See + // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + // for more examples of label selectors. + // + // Default to the empty LabelSelector, which matches everything. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 5; + + // ObjectSelector decides whether to run the webhook based on if the + // object has matching labels. objectSelector is evaluated against both + // the oldObject and newObject that would be sent to the webhook, and + // is considered to match if either object matches the selector. A null + // object (oldObject in the case of create, or newObject in the case of + // delete) or an object that cannot have labels (like a + // DeploymentRollback or a PodProxyOptions object) is not considered to + // match. + // Use the object selector only if the webhook is opt-in, because end + // users may skip the admission webhook by setting the labels. + // Default to the empty LabelSelector, which matches everything. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 11; + + // SideEffects states whether this webhook has side effects. + // Acceptable values are: Unknown, None, Some, NoneOnDryRun + // Webhooks with side effects MUST implement a reconciliation system, since a request may be + // rejected by a future step in the admission change and the side effects therefore need to be undone. + // Requests with the dryRun attribute will be auto-rejected if they match a webhook with + // sideEffects == Unknown or Some. Defaults to Unknown. + // +optional + optional string sideEffects = 6; + + // TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, + // the webhook call will be ignored or the API call will fail based on the + // failure policy. + // The timeout value must be between 1 and 30 seconds. + // Default to 30 seconds. + // +optional + optional int32 timeoutSeconds = 7; + + // AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` + // versions the Webhook expects. API server will try to use first version in + // the list which it supports. If none of the versions specified in this list + // supported by API server, validation will fail for this object. + // If a persisted webhook configuration specifies allowed versions and does not + // include any versions known to the API Server, calls to the webhook will fail + // and be subject to the failure policy. + // Default to `['v1beta1']`. + // +optional + repeated string admissionReviewVersions = 8; + + // reinvocationPolicy indicates whether this webhook should be called multiple times as part of a single admission evaluation. + // Allowed values are "Never" and "IfNeeded". + // + // Never: the webhook will not be called more than once in a single admission evaluation. + // + // IfNeeded: the webhook will be called at least one additional time as part of the admission evaluation + // if the object being admitted is modified by other admission plugins after the initial webhook call. + // Webhooks that specify this option *must* be idempotent, able to process objects they previously admitted. + // Note: + // * the number of additional invocations is not guaranteed to be exactly one. + // * if additional invocations result in further modifications to the object, webhooks are not guaranteed to be invoked again. + // * webhooks that use this option may be reordered to minimize the number of additional invocations. + // * to validate an object after all mutations are guaranteed complete, use a validating admission webhook instead. + // + // Defaults to "Never". + // +optional + optional string reinvocationPolicy = 10; +} + // MutatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and may change the object. +// Deprecated in v1.16, planned for removal in v1.19. Use admissionregistration.k8s.io/v1 MutatingWebhookConfiguration instead. message MutatingWebhookConfiguration { - // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -38,13 +189,13 @@ message MutatingWebhookConfiguration { // +optional // +patchMergeKey=name // +patchStrategy=merge - repeated Webhook Webhooks = 2; + repeated MutatingWebhook Webhooks = 2; } // MutatingWebhookConfigurationList is a list of MutatingWebhookConfiguration. message MutatingWebhookConfigurationList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -66,7 +217,7 @@ message Rule { repeated string apiVersions = 2; // Resources is a list of resources this rule applies to. - // + // // For example: // 'pods' means pods. // 'pods/log' means the log subresource of pods. @@ -74,13 +225,25 @@ message Rule { // 'pods/*' means all subresources of pods. // '*/scale' means all scale subresources. // '*/*' means all resources and their subresources. - // + // // If wildcard is present, the validation rule will ensure resources do not // overlap with each other. - // + // // Depending on the enclosing object, subresources might not be allowed. // Required. repeated string resources = 3; + + // scope specifies the scope of this rule. + // Valid values are "Cluster", "Namespaced", and "*" + // "Cluster" means that only cluster-scoped resources will match this rule. + // Namespace API objects are cluster-scoped. + // "Namespaced" means that only namespaced resources will match this rule. + // "*" means that there are no scope restrictions. + // Subresources match the scope of their parent resource. + // Default is "*". + // + // +optional + optional string scope = 4; } // RuleWithOperations is a tuple of Operations and Resources. It is recommended to make @@ -111,34 +274,16 @@ message ServiceReference { // this service. // +optional optional string path = 3; -} -// ValidatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and object without changing it. -message ValidatingWebhookConfiguration { - // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. + // If specified, the port on the service that hosting webhook. + // Default to 443 for backward compatibility. + // `port` should be a valid port number (1-65535, inclusive). // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Webhooks is a list of webhooks and the affected resources and operations. - // +optional - // +patchMergeKey=name - // +patchStrategy=merge - repeated Webhook Webhooks = 2; -} - -// ValidatingWebhookConfigurationList is a list of ValidatingWebhookConfiguration. -message ValidatingWebhookConfigurationList { - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // List of ValidatingWebhookConfiguration. - repeated ValidatingWebhookConfiguration items = 2; + optional int32 port = 4; } -// Webhook describes an admission webhook and the resources and operations it applies to. -message Webhook { +// ValidatingWebhook describes an admission webhook and the resources and operations it applies to. +message ValidatingWebhook { // The name of the admission webhook. // Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where // "imagepolicy" is the name of the webhook, and kubernetes.io is the name @@ -163,12 +308,29 @@ message Webhook { // +optional optional string failurePolicy = 4; + // matchPolicy defines how the "rules" list is used to match incoming requests. + // Allowed values are "Exact" or "Equivalent". + // + // - Exact: match a request only if it exactly matches a specified rule. + // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, + // but "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, + // a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook. + // + // - Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. + // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, + // and "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, + // a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook. + // + // Defaults to "Exact" + // +optional + optional string matchPolicy = 9; + // NamespaceSelector decides whether to run the webhook on an object based // on whether the namespace for that object matches the selector. If the // object itself is a namespace, the matching is performed on // object.metadata.labels. If the object is another cluster scoped resource, // it never skips the webhook. - // + // // For example, to run the webhook on any objects whose namespace is not // associated with "runlevel" of "0" or "1"; you will set the selector as // follows: @@ -184,7 +346,7 @@ message Webhook { // } // ] // } - // + // // If instead you want to only run the webhook on any objects whose // namespace is associated with the "environment" of "prod" or "staging"; // you will set the selector as follows: @@ -200,16 +362,30 @@ message Webhook { // } // ] // } - // + // // See - // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels // for more examples of label selectors. - // + // // Default to the empty LabelSelector, which matches everything. // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 5; - // SideEffects states whether this webhookk has side effects. + // ObjectSelector decides whether to run the webhook based on if the + // object has matching labels. objectSelector is evaluated against both + // the oldObject and newObject that would be sent to the webhook, and + // is considered to match if either object matches the selector. A null + // object (oldObject in the case of create, or newObject in the case of + // delete) or an object that cannot have labels (like a + // DeploymentRollback or a PodProxyOptions object) is not considered to + // match. + // Use the object selector only if the webhook is opt-in, because end + // users may skip the admission webhook by setting the labels. + // Default to the empty LabelSelector, which matches everything. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 10; + + // SideEffects states whether this webhook has side effects. // Acceptable values are: Unknown, None, Some, NoneOnDryRun // Webhooks with side effects MUST implement a reconciliation system, since a request may be // rejected by a future step in the admission change and the side effects therefore need to be undone. @@ -217,53 +393,95 @@ message Webhook { // sideEffects == Unknown or Some. Defaults to Unknown. // +optional optional string sideEffects = 6; + + // TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, + // the webhook call will be ignored or the API call will fail based on the + // failure policy. + // The timeout value must be between 1 and 30 seconds. + // Default to 30 seconds. + // +optional + optional int32 timeoutSeconds = 7; + + // AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` + // versions the Webhook expects. API server will try to use first version in + // the list which it supports. If none of the versions specified in this list + // supported by API server, validation will fail for this object. + // If a persisted webhook configuration specifies allowed versions and does not + // include any versions known to the API Server, calls to the webhook will fail + // and be subject to the failure policy. + // Default to `['v1beta1']`. + // +optional + repeated string admissionReviewVersions = 8; +} + +// ValidatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and object without changing it. +// Deprecated in v1.16, planned for removal in v1.19. Use admissionregistration.k8s.io/v1 ValidatingWebhookConfiguration instead. +message ValidatingWebhookConfiguration { + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Webhooks is a list of webhooks and the affected resources and operations. + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + repeated ValidatingWebhook Webhooks = 2; +} + +// ValidatingWebhookConfigurationList is a list of ValidatingWebhookConfiguration. +message ValidatingWebhookConfigurationList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of ValidatingWebhookConfiguration. + repeated ValidatingWebhookConfiguration items = 2; } // WebhookClientConfig contains the information to make a TLS // connection with the webhook message WebhookClientConfig { // `url` gives the location of the webhook, in standard URL form - // (`[scheme://]host:port/path`). Exactly one of `url` or `service` + // (`scheme://host:port/path`). Exactly one of `url` or `service` // must be specified. - // + // // The `host` should not refer to a service running in the cluster; use // the `service` field instead. The host might be resolved via external // DNS in some apiservers (e.g., `kube-apiserver` cannot resolve // in-cluster DNS as that would be a layering violation). `host` may // also be an IP address. - // + // // Please note that using `localhost` or `127.0.0.1` as a `host` is // risky unless you take great care to run this webhook on all hosts // which run an apiserver which might need to make calls to this // webhook. Such installs are likely to be non-portable, i.e., not easy // to turn up in a new cluster. - // + // // The scheme must be "https"; the URL must begin with "https://". - // + // // A path is optional, and if present may be any string permissible in // a URL. You may use the path to pass an arbitrary string to the // webhook, for example, a cluster identifier. - // + // // Attempting to use a user or basic auth e.g. "user:password@" is not // allowed. Fragments ("#...") and query parameters ("?...") are not // allowed, either. - // + // // +optional optional string url = 3; // `service` is a reference to the service for this webhook. Either // `service` or `url` must be specified. - // + // // If the webhook is running within the cluster, then you should use `service`. - // - // Port 443 will be used if it is open, otherwise it is an error. - // + // // +optional optional ServiceReference service = 1; - // `caBundle` is a PEM encoded CA bundle which will be used to validate - // the webhook's server certificate. - // Required. + // `caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. + // If unspecified, system trust roots on the apiserver are used. + // +optional optional bytes caBundle = 2; } diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/types.go b/vendor/k8s.io/api/admissionregistration/v1beta1/types.go index 0b948ba1..37a993e3 100644 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/types.go +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/types.go @@ -49,8 +49,32 @@ type Rule struct { // Depending on the enclosing object, subresources might not be allowed. // Required. Resources []string `json:"resources,omitempty" protobuf:"bytes,3,rep,name=resources"` + + // scope specifies the scope of this rule. + // Valid values are "Cluster", "Namespaced", and "*" + // "Cluster" means that only cluster-scoped resources will match this rule. + // Namespace API objects are cluster-scoped. + // "Namespaced" means that only namespaced resources will match this rule. + // "*" means that there are no scope restrictions. + // Subresources match the scope of their parent resource. + // Default is "*". + // + // +optional + Scope *ScopeType `json:"scope,omitempty" protobuf:"bytes,4,rep,name=scope"` } +type ScopeType string + +const ( + // ClusterScope means that scope is limited to cluster-scoped objects. + // Namespace objects are cluster-scoped. + ClusterScope ScopeType = "Cluster" + // NamespacedScope means that scope is limited to namespaced objects. + NamespacedScope ScopeType = "Namespaced" + // AllScopes means that all scopes are included. + AllScopes ScopeType = "*" +) + type FailurePolicyType string const ( @@ -60,6 +84,16 @@ const ( Fail FailurePolicyType = "Fail" ) +// MatchPolicyType specifies the type of match policy +type MatchPolicyType string + +const ( + // Exact means requests should only be sent to the webhook if they exactly match a given rule + Exact MatchPolicyType = "Exact" + // Equivalent means requests should be sent to the webhook if they modify a resource listed in rules via another API group or version. + Equivalent MatchPolicyType = "Equivalent" +) + type SideEffectClass string const ( @@ -81,16 +115,17 @@ const ( // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // ValidatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and object without changing it. +// Deprecated in v1.16, planned for removal in v1.19. Use admissionregistration.k8s.io/v1 ValidatingWebhookConfiguration instead. type ValidatingWebhookConfiguration struct { metav1.TypeMeta `json:",inline"` - // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Webhooks is a list of webhooks and the affected resources and operations. // +optional // +patchMergeKey=name // +patchStrategy=merge - Webhooks []Webhook `json:"webhooks,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=Webhooks"` + Webhooks []ValidatingWebhook `json:"webhooks,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=Webhooks"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -99,7 +134,7 @@ type ValidatingWebhookConfiguration struct { type ValidatingWebhookConfigurationList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // List of ValidatingWebhookConfiguration. @@ -111,16 +146,17 @@ type ValidatingWebhookConfigurationList struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // MutatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and may change the object. +// Deprecated in v1.16, planned for removal in v1.19. Use admissionregistration.k8s.io/v1 MutatingWebhookConfiguration instead. type MutatingWebhookConfiguration struct { metav1.TypeMeta `json:",inline"` - // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Webhooks is a list of webhooks and the affected resources and operations. // +optional // +patchMergeKey=name // +patchStrategy=merge - Webhooks []Webhook `json:"webhooks,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=Webhooks"` + Webhooks []MutatingWebhook `json:"webhooks,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=Webhooks"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -129,15 +165,15 @@ type MutatingWebhookConfiguration struct { type MutatingWebhookConfigurationList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // List of MutatingWebhookConfiguration. Items []MutatingWebhookConfiguration `json:"items" protobuf:"bytes,2,rep,name=items"` } -// Webhook describes an admission webhook and the resources and operations it applies to. -type Webhook struct { +// ValidatingWebhook describes an admission webhook and the resources and operations it applies to. +type ValidatingWebhook struct { // The name of the admission webhook. // Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where // "imagepolicy" is the name of the webhook, and kubernetes.io is the name @@ -162,6 +198,155 @@ type Webhook struct { // +optional FailurePolicy *FailurePolicyType `json:"failurePolicy,omitempty" protobuf:"bytes,4,opt,name=failurePolicy,casttype=FailurePolicyType"` + // matchPolicy defines how the "rules" list is used to match incoming requests. + // Allowed values are "Exact" or "Equivalent". + // + // - Exact: match a request only if it exactly matches a specified rule. + // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, + // but "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, + // a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook. + // + // - Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. + // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, + // and "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, + // a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook. + // + // Defaults to "Exact" + // +optional + MatchPolicy *MatchPolicyType `json:"matchPolicy,omitempty" protobuf:"bytes,9,opt,name=matchPolicy,casttype=MatchPolicyType"` + + // NamespaceSelector decides whether to run the webhook on an object based + // on whether the namespace for that object matches the selector. If the + // object itself is a namespace, the matching is performed on + // object.metadata.labels. If the object is another cluster scoped resource, + // it never skips the webhook. + // + // For example, to run the webhook on any objects whose namespace is not + // associated with "runlevel" of "0" or "1"; you will set the selector as + // follows: + // "namespaceSelector": { + // "matchExpressions": [ + // { + // "key": "runlevel", + // "operator": "NotIn", + // "values": [ + // "0", + // "1" + // ] + // } + // ] + // } + // + // If instead you want to only run the webhook on any objects whose + // namespace is associated with the "environment" of "prod" or "staging"; + // you will set the selector as follows: + // "namespaceSelector": { + // "matchExpressions": [ + // { + // "key": "environment", + // "operator": "In", + // "values": [ + // "prod", + // "staging" + // ] + // } + // ] + // } + // + // See + // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels + // for more examples of label selectors. + // + // Default to the empty LabelSelector, which matches everything. + // +optional + NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty" protobuf:"bytes,5,opt,name=namespaceSelector"` + + // ObjectSelector decides whether to run the webhook based on if the + // object has matching labels. objectSelector is evaluated against both + // the oldObject and newObject that would be sent to the webhook, and + // is considered to match if either object matches the selector. A null + // object (oldObject in the case of create, or newObject in the case of + // delete) or an object that cannot have labels (like a + // DeploymentRollback or a PodProxyOptions object) is not considered to + // match. + // Use the object selector only if the webhook is opt-in, because end + // users may skip the admission webhook by setting the labels. + // Default to the empty LabelSelector, which matches everything. + // +optional + ObjectSelector *metav1.LabelSelector `json:"objectSelector,omitempty" protobuf:"bytes,10,opt,name=objectSelector"` + + // SideEffects states whether this webhook has side effects. + // Acceptable values are: Unknown, None, Some, NoneOnDryRun + // Webhooks with side effects MUST implement a reconciliation system, since a request may be + // rejected by a future step in the admission change and the side effects therefore need to be undone. + // Requests with the dryRun attribute will be auto-rejected if they match a webhook with + // sideEffects == Unknown or Some. Defaults to Unknown. + // +optional + SideEffects *SideEffectClass `json:"sideEffects,omitempty" protobuf:"bytes,6,opt,name=sideEffects,casttype=SideEffectClass"` + + // TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, + // the webhook call will be ignored or the API call will fail based on the + // failure policy. + // The timeout value must be between 1 and 30 seconds. + // Default to 30 seconds. + // +optional + TimeoutSeconds *int32 `json:"timeoutSeconds,omitempty" protobuf:"varint,7,opt,name=timeoutSeconds"` + + // AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` + // versions the Webhook expects. API server will try to use first version in + // the list which it supports. If none of the versions specified in this list + // supported by API server, validation will fail for this object. + // If a persisted webhook configuration specifies allowed versions and does not + // include any versions known to the API Server, calls to the webhook will fail + // and be subject to the failure policy. + // Default to `['v1beta1']`. + // +optional + AdmissionReviewVersions []string `json:"admissionReviewVersions,omitempty" protobuf:"bytes,8,rep,name=admissionReviewVersions"` +} + +// MutatingWebhook describes an admission webhook and the resources and operations it applies to. +type MutatingWebhook struct { + // The name of the admission webhook. + // Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where + // "imagepolicy" is the name of the webhook, and kubernetes.io is the name + // of the organization. + // Required. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + + // ClientConfig defines how to communicate with the hook. + // Required + ClientConfig WebhookClientConfig `json:"clientConfig" protobuf:"bytes,2,opt,name=clientConfig"` + + // Rules describes what operations on what resources/subresources the webhook cares about. + // The webhook cares about an operation if it matches _any_ Rule. + // However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks + // from putting the cluster in a state which cannot be recovered from without completely + // disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called + // on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects. + Rules []RuleWithOperations `json:"rules,omitempty" protobuf:"bytes,3,rep,name=rules"` + + // FailurePolicy defines how unrecognized errors from the admission endpoint are handled - + // allowed values are Ignore or Fail. Defaults to Ignore. + // +optional + FailurePolicy *FailurePolicyType `json:"failurePolicy,omitempty" protobuf:"bytes,4,opt,name=failurePolicy,casttype=FailurePolicyType"` + + // matchPolicy defines how the "rules" list is used to match incoming requests. + // Allowed values are "Exact" or "Equivalent". + // + // - Exact: match a request only if it exactly matches a specified rule. + // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, + // but "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, + // a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook. + // + // - Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. + // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, + // and "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, + // a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook. + // + // Defaults to "Exact" + // +optional + MatchPolicy *MatchPolicyType `json:"matchPolicy,omitempty" protobuf:"bytes,9,opt,name=matchPolicy,casttype=MatchPolicyType"` + // NamespaceSelector decides whether to run the webhook on an object based // on whether the namespace for that object matches the selector. If the // object itself is a namespace, the matching is performed on @@ -208,7 +393,21 @@ type Webhook struct { // +optional NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty" protobuf:"bytes,5,opt,name=namespaceSelector"` - // SideEffects states whether this webhookk has side effects. + // ObjectSelector decides whether to run the webhook based on if the + // object has matching labels. objectSelector is evaluated against both + // the oldObject and newObject that would be sent to the webhook, and + // is considered to match if either object matches the selector. A null + // object (oldObject in the case of create, or newObject in the case of + // delete) or an object that cannot have labels (like a + // DeploymentRollback or a PodProxyOptions object) is not considered to + // match. + // Use the object selector only if the webhook is opt-in, because end + // users may skip the admission webhook by setting the labels. + // Default to the empty LabelSelector, which matches everything. + // +optional + ObjectSelector *metav1.LabelSelector `json:"objectSelector,omitempty" protobuf:"bytes,11,opt,name=objectSelector"` + + // SideEffects states whether this webhook has side effects. // Acceptable values are: Unknown, None, Some, NoneOnDryRun // Webhooks with side effects MUST implement a reconciliation system, since a request may be // rejected by a future step in the admission change and the side effects therefore need to be undone. @@ -216,8 +415,58 @@ type Webhook struct { // sideEffects == Unknown or Some. Defaults to Unknown. // +optional SideEffects *SideEffectClass `json:"sideEffects,omitempty" protobuf:"bytes,6,opt,name=sideEffects,casttype=SideEffectClass"` + + // TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, + // the webhook call will be ignored or the API call will fail based on the + // failure policy. + // The timeout value must be between 1 and 30 seconds. + // Default to 30 seconds. + // +optional + TimeoutSeconds *int32 `json:"timeoutSeconds,omitempty" protobuf:"varint,7,opt,name=timeoutSeconds"` + + // AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` + // versions the Webhook expects. API server will try to use first version in + // the list which it supports. If none of the versions specified in this list + // supported by API server, validation will fail for this object. + // If a persisted webhook configuration specifies allowed versions and does not + // include any versions known to the API Server, calls to the webhook will fail + // and be subject to the failure policy. + // Default to `['v1beta1']`. + // +optional + AdmissionReviewVersions []string `json:"admissionReviewVersions,omitempty" protobuf:"bytes,8,rep,name=admissionReviewVersions"` + + // reinvocationPolicy indicates whether this webhook should be called multiple times as part of a single admission evaluation. + // Allowed values are "Never" and "IfNeeded". + // + // Never: the webhook will not be called more than once in a single admission evaluation. + // + // IfNeeded: the webhook will be called at least one additional time as part of the admission evaluation + // if the object being admitted is modified by other admission plugins after the initial webhook call. + // Webhooks that specify this option *must* be idempotent, able to process objects they previously admitted. + // Note: + // * the number of additional invocations is not guaranteed to be exactly one. + // * if additional invocations result in further modifications to the object, webhooks are not guaranteed to be invoked again. + // * webhooks that use this option may be reordered to minimize the number of additional invocations. + // * to validate an object after all mutations are guaranteed complete, use a validating admission webhook instead. + // + // Defaults to "Never". + // +optional + ReinvocationPolicy *ReinvocationPolicyType `json:"reinvocationPolicy,omitempty" protobuf:"bytes,10,opt,name=reinvocationPolicy,casttype=ReinvocationPolicyType"` } +// ReinvocationPolicyType specifies what type of policy the admission hook uses. +type ReinvocationPolicyType string + +const ( + // NeverReinvocationPolicy indicates that the webhook must not be called more than once in a + // single admission evaluation. + NeverReinvocationPolicy ReinvocationPolicyType = "Never" + // IfNeededReinvocationPolicy indicates that the webhook may be called at least one + // additional time as part of the admission evaluation if the object being admitted is + // modified by other admission plugins after the initial webhook call. + IfNeededReinvocationPolicy ReinvocationPolicyType = "IfNeeded" +) + // RuleWithOperations is a tuple of Operations and Resources. It is recommended to make // sure that all the tuple expansions are valid. type RuleWithOperations struct { @@ -246,7 +495,7 @@ const ( // connection with the webhook type WebhookClientConfig struct { // `url` gives the location of the webhook, in standard URL form - // (`[scheme://]host:port/path`). Exactly one of `url` or `service` + // (`scheme://host:port/path`). Exactly one of `url` or `service` // must be specified. // // The `host` should not refer to a service running in the cluster; use @@ -279,15 +528,13 @@ type WebhookClientConfig struct { // // If the webhook is running within the cluster, then you should use `service`. // - // Port 443 will be used if it is open, otherwise it is an error. - // // +optional - Service *ServiceReference `json:"service" protobuf:"bytes,1,opt,name=service"` + Service *ServiceReference `json:"service,omitempty" protobuf:"bytes,1,opt,name=service"` - // `caBundle` is a PEM encoded CA bundle which will be used to validate - // the webhook's server certificate. - // Required. - CABundle []byte `json:"caBundle" protobuf:"bytes,2,opt,name=caBundle"` + // `caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. + // If unspecified, system trust roots on the apiserver are used. + // +optional + CABundle []byte `json:"caBundle,omitempty" protobuf:"bytes,2,opt,name=caBundle"` } // ServiceReference holds a reference to Service.legacy.k8s.io @@ -303,4 +550,10 @@ type ServiceReference struct { // this service. // +optional Path *string `json:"path,omitempty" protobuf:"bytes,3,opt,name=path"` + + // If specified, the port on the service that hosting webhook. + // Default to 443 for backward compatibility. + // `port` should be a valid port number (1-65535, inclusive). + // +optional + Port *int32 `json:"port,omitempty" protobuf:"varint,4,opt,name=port"` } diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go index aab917a4..d9fb5af8 100644 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go @@ -27,9 +27,28 @@ package v1beta1 // Those methods can be generated by using hack/update-generated-swagger-docs.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_MutatingWebhook = map[string]string{ + "": "MutatingWebhook describes an admission webhook and the resources and operations it applies to.", + "name": "The name of the admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where \"imagepolicy\" is the name of the webhook, and kubernetes.io is the name of the organization. Required.", + "clientConfig": "ClientConfig defines how to communicate with the hook. Required", + "rules": "Rules describes what operations on what resources/subresources the webhook cares about. The webhook cares about an operation if it matches _any_ Rule. However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks from putting the cluster in a state which cannot be recovered from without completely disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects.", + "failurePolicy": "FailurePolicy defines how unrecognized errors from the admission endpoint are handled - allowed values are Ignore or Fail. Defaults to Ignore.", + "matchPolicy": "matchPolicy defines how the \"rules\" list is used to match incoming requests. Allowed values are \"Exact\" or \"Equivalent\".\n\n- Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook.\n\n- Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook.\n\nDefaults to \"Exact\"", + "namespaceSelector": "NamespaceSelector decides whether to run the webhook on an object based on whether the namespace for that object matches the selector. If the object itself is a namespace, the matching is performed on object.metadata.labels. If the object is another cluster scoped resource, it never skips the webhook.\n\nFor example, to run the webhook on any objects whose namespace is not associated with \"runlevel\" of \"0\" or \"1\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"runlevel\",\n \"operator\": \"NotIn\",\n \"values\": [\n \"0\",\n \"1\"\n ]\n }\n ]\n}\n\nIf instead you want to only run the webhook on any objects whose namespace is associated with the \"environment\" of \"prod\" or \"staging\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"environment\",\n \"operator\": \"In\",\n \"values\": [\n \"prod\",\n \"staging\"\n ]\n }\n ]\n}\n\nSee https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ for more examples of label selectors.\n\nDefault to the empty LabelSelector, which matches everything.", + "objectSelector": "ObjectSelector decides whether to run the webhook based on if the object has matching labels. objectSelector is evaluated against both the oldObject and newObject that would be sent to the webhook, and is considered to match if either object matches the selector. A null object (oldObject in the case of create, or newObject in the case of delete) or an object that cannot have labels (like a DeploymentRollback or a PodProxyOptions object) is not considered to match. Use the object selector only if the webhook is opt-in, because end users may skip the admission webhook by setting the labels. Default to the empty LabelSelector, which matches everything.", + "sideEffects": "SideEffects states whether this webhook has side effects. Acceptable values are: Unknown, None, Some, NoneOnDryRun Webhooks with side effects MUST implement a reconciliation system, since a request may be rejected by a future step in the admission change and the side effects therefore need to be undone. Requests with the dryRun attribute will be auto-rejected if they match a webhook with sideEffects == Unknown or Some. Defaults to Unknown.", + "timeoutSeconds": "TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, the webhook call will be ignored or the API call will fail based on the failure policy. The timeout value must be between 1 and 30 seconds. Default to 30 seconds.", + "admissionReviewVersions": "AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` versions the Webhook expects. API server will try to use first version in the list which it supports. If none of the versions specified in this list supported by API server, validation will fail for this object. If a persisted webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail and be subject to the failure policy. Default to `['v1beta1']`.", + "reinvocationPolicy": "reinvocationPolicy indicates whether this webhook should be called multiple times as part of a single admission evaluation. Allowed values are \"Never\" and \"IfNeeded\".\n\nNever: the webhook will not be called more than once in a single admission evaluation.\n\nIfNeeded: the webhook will be called at least one additional time as part of the admission evaluation if the object being admitted is modified by other admission plugins after the initial webhook call. Webhooks that specify this option *must* be idempotent, able to process objects they previously admitted. Note: * the number of additional invocations is not guaranteed to be exactly one. * if additional invocations result in further modifications to the object, webhooks are not guaranteed to be invoked again. * webhooks that use this option may be reordered to minimize the number of additional invocations. * to validate an object after all mutations are guaranteed complete, use a validating admission webhook instead.\n\nDefaults to \"Never\".", +} + +func (MutatingWebhook) SwaggerDoc() map[string]string { + return map_MutatingWebhook +} + var map_MutatingWebhookConfiguration = map[string]string{ - "": "MutatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and may change the object.", - "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.", + "": "MutatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and may change the object. Deprecated in v1.16, planned for removal in v1.19. Use admissionregistration.k8s.io/v1 MutatingWebhookConfiguration instead.", + "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.", "webhooks": "Webhooks is a list of webhooks and the affected resources and operations.", } @@ -39,7 +58,7 @@ func (MutatingWebhookConfiguration) SwaggerDoc() map[string]string { var map_MutatingWebhookConfigurationList = map[string]string{ "": "MutatingWebhookConfigurationList is a list of MutatingWebhookConfiguration.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "items": "List of MutatingWebhookConfiguration.", } @@ -52,6 +71,7 @@ var map_Rule = map[string]string{ "apiGroups": "APIGroups is the API groups the resources belong to. '*' is all groups. If '*' is present, the length of the slice must be one. Required.", "apiVersions": "APIVersions is the API versions the resources belong to. '*' is all versions. If '*' is present, the length of the slice must be one. Required.", "resources": "Resources is a list of resources this rule applies to.\n\nFor example: 'pods' means pods. 'pods/log' means the log subresource of pods. '*' means all resources, but not subresources. 'pods/*' means all subresources of pods. '*/scale' means all scale subresources. '*/*' means all resources and their subresources.\n\nIf wildcard is present, the validation rule will ensure resources do not overlap with each other.\n\nDepending on the enclosing object, subresources might not be allowed. Required.", + "scope": "scope specifies the scope of this rule. Valid values are \"Cluster\", \"Namespaced\", and \"*\" \"Cluster\" means that only cluster-scoped resources will match this rule. Namespace API objects are cluster-scoped. \"Namespaced\" means that only namespaced resources will match this rule. \"*\" means that there are no scope restrictions. Subresources match the scope of their parent resource. Default is \"*\".", } func (Rule) SwaggerDoc() map[string]string { @@ -72,15 +92,34 @@ var map_ServiceReference = map[string]string{ "namespace": "`namespace` is the namespace of the service. Required", "name": "`name` is the name of the service. Required", "path": "`path` is an optional URL path which will be sent in any request to this service.", + "port": "If specified, the port on the service that hosting webhook. Default to 443 for backward compatibility. `port` should be a valid port number (1-65535, inclusive).", } func (ServiceReference) SwaggerDoc() map[string]string { return map_ServiceReference } +var map_ValidatingWebhook = map[string]string{ + "": "ValidatingWebhook describes an admission webhook and the resources and operations it applies to.", + "name": "The name of the admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where \"imagepolicy\" is the name of the webhook, and kubernetes.io is the name of the organization. Required.", + "clientConfig": "ClientConfig defines how to communicate with the hook. Required", + "rules": "Rules describes what operations on what resources/subresources the webhook cares about. The webhook cares about an operation if it matches _any_ Rule. However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks from putting the cluster in a state which cannot be recovered from without completely disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects.", + "failurePolicy": "FailurePolicy defines how unrecognized errors from the admission endpoint are handled - allowed values are Ignore or Fail. Defaults to Ignore.", + "matchPolicy": "matchPolicy defines how the \"rules\" list is used to match incoming requests. Allowed values are \"Exact\" or \"Equivalent\".\n\n- Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook.\n\n- Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook.\n\nDefaults to \"Exact\"", + "namespaceSelector": "NamespaceSelector decides whether to run the webhook on an object based on whether the namespace for that object matches the selector. If the object itself is a namespace, the matching is performed on object.metadata.labels. If the object is another cluster scoped resource, it never skips the webhook.\n\nFor example, to run the webhook on any objects whose namespace is not associated with \"runlevel\" of \"0\" or \"1\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"runlevel\",\n \"operator\": \"NotIn\",\n \"values\": [\n \"0\",\n \"1\"\n ]\n }\n ]\n}\n\nIf instead you want to only run the webhook on any objects whose namespace is associated with the \"environment\" of \"prod\" or \"staging\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"environment\",\n \"operator\": \"In\",\n \"values\": [\n \"prod\",\n \"staging\"\n ]\n }\n ]\n}\n\nSee https://kubernetes.io/docs/concepts/overview/working-with-objects/labels for more examples of label selectors.\n\nDefault to the empty LabelSelector, which matches everything.", + "objectSelector": "ObjectSelector decides whether to run the webhook based on if the object has matching labels. objectSelector is evaluated against both the oldObject and newObject that would be sent to the webhook, and is considered to match if either object matches the selector. A null object (oldObject in the case of create, or newObject in the case of delete) or an object that cannot have labels (like a DeploymentRollback or a PodProxyOptions object) is not considered to match. Use the object selector only if the webhook is opt-in, because end users may skip the admission webhook by setting the labels. Default to the empty LabelSelector, which matches everything.", + "sideEffects": "SideEffects states whether this webhook has side effects. Acceptable values are: Unknown, None, Some, NoneOnDryRun Webhooks with side effects MUST implement a reconciliation system, since a request may be rejected by a future step in the admission change and the side effects therefore need to be undone. Requests with the dryRun attribute will be auto-rejected if they match a webhook with sideEffects == Unknown or Some. Defaults to Unknown.", + "timeoutSeconds": "TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, the webhook call will be ignored or the API call will fail based on the failure policy. The timeout value must be between 1 and 30 seconds. Default to 30 seconds.", + "admissionReviewVersions": "AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` versions the Webhook expects. API server will try to use first version in the list which it supports. If none of the versions specified in this list supported by API server, validation will fail for this object. If a persisted webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail and be subject to the failure policy. Default to `['v1beta1']`.", +} + +func (ValidatingWebhook) SwaggerDoc() map[string]string { + return map_ValidatingWebhook +} + var map_ValidatingWebhookConfiguration = map[string]string{ - "": "ValidatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and object without changing it.", - "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.", + "": "ValidatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and object without changing it. Deprecated in v1.16, planned for removal in v1.19. Use admissionregistration.k8s.io/v1 ValidatingWebhookConfiguration instead.", + "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.", "webhooks": "Webhooks is a list of webhooks and the affected resources and operations.", } @@ -90,7 +129,7 @@ func (ValidatingWebhookConfiguration) SwaggerDoc() map[string]string { var map_ValidatingWebhookConfigurationList = map[string]string{ "": "ValidatingWebhookConfigurationList is a list of ValidatingWebhookConfiguration.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "items": "List of ValidatingWebhookConfiguration.", } @@ -98,25 +137,11 @@ func (ValidatingWebhookConfigurationList) SwaggerDoc() map[string]string { return map_ValidatingWebhookConfigurationList } -var map_Webhook = map[string]string{ - "": "Webhook describes an admission webhook and the resources and operations it applies to.", - "name": "The name of the admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where \"imagepolicy\" is the name of the webhook, and kubernetes.io is the name of the organization. Required.", - "clientConfig": "ClientConfig defines how to communicate with the hook. Required", - "rules": "Rules describes what operations on what resources/subresources the webhook cares about. The webhook cares about an operation if it matches _any_ Rule. However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks from putting the cluster in a state which cannot be recovered from without completely disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects.", - "failurePolicy": "FailurePolicy defines how unrecognized errors from the admission endpoint are handled - allowed values are Ignore or Fail. Defaults to Ignore.", - "namespaceSelector": "NamespaceSelector decides whether to run the webhook on an object based on whether the namespace for that object matches the selector. If the object itself is a namespace, the matching is performed on object.metadata.labels. If the object is another cluster scoped resource, it never skips the webhook.\n\nFor example, to run the webhook on any objects whose namespace is not associated with \"runlevel\" of \"0\" or \"1\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"runlevel\",\n \"operator\": \"NotIn\",\n \"values\": [\n \"0\",\n \"1\"\n ]\n }\n ]\n}\n\nIf instead you want to only run the webhook on any objects whose namespace is associated with the \"environment\" of \"prod\" or \"staging\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"environment\",\n \"operator\": \"In\",\n \"values\": [\n \"prod\",\n \"staging\"\n ]\n }\n ]\n}\n\nSee https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ for more examples of label selectors.\n\nDefault to the empty LabelSelector, which matches everything.", - "sideEffects": "SideEffects states whether this webhookk has side effects. Acceptable values are: Unknown, None, Some, NoneOnDryRun Webhooks with side effects MUST implement a reconciliation system, since a request may be rejected by a future step in the admission change and the side effects therefore need to be undone. Requests with the dryRun attribute will be auto-rejected if they match a webhook with sideEffects == Unknown or Some. Defaults to Unknown.", -} - -func (Webhook) SwaggerDoc() map[string]string { - return map_Webhook -} - var map_WebhookClientConfig = map[string]string{ "": "WebhookClientConfig contains the information to make a TLS connection with the webhook", - "url": "`url` gives the location of the webhook, in standard URL form (`[scheme://]host:port/path`). Exactly one of `url` or `service` must be specified.\n\nThe `host` should not refer to a service running in the cluster; use the `service` field instead. The host might be resolved via external DNS in some apiservers (e.g., `kube-apiserver` cannot resolve in-cluster DNS as that would be a layering violation). `host` may also be an IP address.\n\nPlease note that using `localhost` or `127.0.0.1` as a `host` is risky unless you take great care to run this webhook on all hosts which run an apiserver which might need to make calls to this webhook. Such installs are likely to be non-portable, i.e., not easy to turn up in a new cluster.\n\nThe scheme must be \"https\"; the URL must begin with \"https://\".\n\nA path is optional, and if present may be any string permissible in a URL. You may use the path to pass an arbitrary string to the webhook, for example, a cluster identifier.\n\nAttempting to use a user or basic auth e.g. \"user:password@\" is not allowed. Fragments (\"#...\") and query parameters (\"?...\") are not allowed, either.", - "service": "`service` is a reference to the service for this webhook. Either `service` or `url` must be specified.\n\nIf the webhook is running within the cluster, then you should use `service`.\n\nPort 443 will be used if it is open, otherwise it is an error.", - "caBundle": "`caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. Required.", + "url": "`url` gives the location of the webhook, in standard URL form (`scheme://host:port/path`). Exactly one of `url` or `service` must be specified.\n\nThe `host` should not refer to a service running in the cluster; use the `service` field instead. The host might be resolved via external DNS in some apiservers (e.g., `kube-apiserver` cannot resolve in-cluster DNS as that would be a layering violation). `host` may also be an IP address.\n\nPlease note that using `localhost` or `127.0.0.1` as a `host` is risky unless you take great care to run this webhook on all hosts which run an apiserver which might need to make calls to this webhook. Such installs are likely to be non-portable, i.e., not easy to turn up in a new cluster.\n\nThe scheme must be \"https\"; the URL must begin with \"https://\".\n\nA path is optional, and if present may be any string permissible in a URL. You may use the path to pass an arbitrary string to the webhook, for example, a cluster identifier.\n\nAttempting to use a user or basic auth e.g. \"user:password@\" is not allowed. Fragments (\"#...\") and query parameters (\"?...\") are not allowed, either.", + "service": "`service` is a reference to the service for this webhook. Either `service` or `url` must be specified.\n\nIf the webhook is running within the cluster, then you should use `service`.", + "caBundle": "`caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. If unspecified, system trust roots on the apiserver are used.", } func (WebhookClientConfig) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go index c6867be1..c4570d03 100644 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go @@ -25,6 +25,70 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MutatingWebhook) DeepCopyInto(out *MutatingWebhook) { + *out = *in + in.ClientConfig.DeepCopyInto(&out.ClientConfig) + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]RuleWithOperations, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FailurePolicy != nil { + in, out := &in.FailurePolicy, &out.FailurePolicy + *out = new(FailurePolicyType) + **out = **in + } + if in.MatchPolicy != nil { + in, out := &in.MatchPolicy, &out.MatchPolicy + *out = new(MatchPolicyType) + **out = **in + } + if in.NamespaceSelector != nil { + in, out := &in.NamespaceSelector, &out.NamespaceSelector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.ObjectSelector != nil { + in, out := &in.ObjectSelector, &out.ObjectSelector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.SideEffects != nil { + in, out := &in.SideEffects, &out.SideEffects + *out = new(SideEffectClass) + **out = **in + } + if in.TimeoutSeconds != nil { + in, out := &in.TimeoutSeconds, &out.TimeoutSeconds + *out = new(int32) + **out = **in + } + if in.AdmissionReviewVersions != nil { + in, out := &in.AdmissionReviewVersions, &out.AdmissionReviewVersions + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ReinvocationPolicy != nil { + in, out := &in.ReinvocationPolicy, &out.ReinvocationPolicy + *out = new(ReinvocationPolicyType) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingWebhook. +func (in *MutatingWebhook) DeepCopy() *MutatingWebhook { + if in == nil { + return nil + } + out := new(MutatingWebhook) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MutatingWebhookConfiguration) DeepCopyInto(out *MutatingWebhookConfiguration) { *out = *in @@ -32,7 +96,7 @@ func (in *MutatingWebhookConfiguration) DeepCopyInto(out *MutatingWebhookConfigu in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) if in.Webhooks != nil { in, out := &in.Webhooks, &out.Webhooks - *out = make([]Webhook, len(*in)) + *out = make([]MutatingWebhook, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -62,7 +126,7 @@ func (in *MutatingWebhookConfiguration) DeepCopyObject() runtime.Object { func (in *MutatingWebhookConfigurationList) DeepCopyInto(out *MutatingWebhookConfigurationList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]MutatingWebhookConfiguration, len(*in)) @@ -109,6 +173,11 @@ func (in *Rule) DeepCopyInto(out *Rule) { *out = make([]string, len(*in)) copy(*out, *in) } + if in.Scope != nil { + in, out := &in.Scope, &out.Scope + *out = new(ScopeType) + **out = **in + } return } @@ -152,6 +221,11 @@ func (in *ServiceReference) DeepCopyInto(out *ServiceReference) { *out = new(string) **out = **in } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(int32) + **out = **in + } return } @@ -165,6 +239,65 @@ func (in *ServiceReference) DeepCopy() *ServiceReference { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidatingWebhook) DeepCopyInto(out *ValidatingWebhook) { + *out = *in + in.ClientConfig.DeepCopyInto(&out.ClientConfig) + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]RuleWithOperations, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FailurePolicy != nil { + in, out := &in.FailurePolicy, &out.FailurePolicy + *out = new(FailurePolicyType) + **out = **in + } + if in.MatchPolicy != nil { + in, out := &in.MatchPolicy, &out.MatchPolicy + *out = new(MatchPolicyType) + **out = **in + } + if in.NamespaceSelector != nil { + in, out := &in.NamespaceSelector, &out.NamespaceSelector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.ObjectSelector != nil { + in, out := &in.ObjectSelector, &out.ObjectSelector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.SideEffects != nil { + in, out := &in.SideEffects, &out.SideEffects + *out = new(SideEffectClass) + **out = **in + } + if in.TimeoutSeconds != nil { + in, out := &in.TimeoutSeconds, &out.TimeoutSeconds + *out = new(int32) + **out = **in + } + if in.AdmissionReviewVersions != nil { + in, out := &in.AdmissionReviewVersions, &out.AdmissionReviewVersions + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingWebhook. +func (in *ValidatingWebhook) DeepCopy() *ValidatingWebhook { + if in == nil { + return nil + } + out := new(ValidatingWebhook) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ValidatingWebhookConfiguration) DeepCopyInto(out *ValidatingWebhookConfiguration) { *out = *in @@ -172,7 +305,7 @@ func (in *ValidatingWebhookConfiguration) DeepCopyInto(out *ValidatingWebhookCon in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) if in.Webhooks != nil { in, out := &in.Webhooks, &out.Webhooks - *out = make([]Webhook, len(*in)) + *out = make([]ValidatingWebhook, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -202,7 +335,7 @@ func (in *ValidatingWebhookConfiguration) DeepCopyObject() runtime.Object { func (in *ValidatingWebhookConfigurationList) DeepCopyInto(out *ValidatingWebhookConfigurationList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ValidatingWebhookConfiguration, len(*in)) @@ -231,45 +364,6 @@ func (in *ValidatingWebhookConfigurationList) DeepCopyObject() runtime.Object { return nil } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Webhook) DeepCopyInto(out *Webhook) { - *out = *in - in.ClientConfig.DeepCopyInto(&out.ClientConfig) - if in.Rules != nil { - in, out := &in.Rules, &out.Rules - *out = make([]RuleWithOperations, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.FailurePolicy != nil { - in, out := &in.FailurePolicy, &out.FailurePolicy - *out = new(FailurePolicyType) - **out = **in - } - if in.NamespaceSelector != nil { - in, out := &in.NamespaceSelector, &out.NamespaceSelector - *out = new(v1.LabelSelector) - (*in).DeepCopyInto(*out) - } - if in.SideEffects != nil { - in, out := &in.SideEffects, &out.SideEffects - *out = new(SideEffectClass) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Webhook. -func (in *Webhook) DeepCopy() *Webhook { - if in == nil { - return nil - } - out := new(Webhook) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *WebhookClientConfig) DeepCopyInto(out *WebhookClientConfig) { *out = *in diff --git a/vendor/k8s.io/api/apps/v1/doc.go b/vendor/k8s.io/api/apps/v1/doc.go index 1d66c222..61dc97bd 100644 --- a/vendor/k8s.io/api/apps/v1/doc.go +++ b/vendor/k8s.io/api/apps/v1/doc.go @@ -15,6 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +k8s:openapi-gen=true package v1 // import "k8s.io/api/apps/v1" diff --git a/vendor/k8s.io/api/apps/v1/generated.pb.go b/vendor/k8s.io/api/apps/v1/generated.pb.go index eac6ef2a..425144d8 100644 --- a/vendor/k8s.io/api/apps/v1/generated.pb.go +++ b/vendor/k8s.io/api/apps/v1/generated.pb.go @@ -14,61 +14,28 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/apps/v1/generated.proto -// DO NOT EDIT! -/* - Package v1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/apps/v1/generated.proto - - It has these top-level messages: - ControllerRevision - ControllerRevisionList - DaemonSet - DaemonSetCondition - DaemonSetList - DaemonSetSpec - DaemonSetStatus - DaemonSetUpdateStrategy - Deployment - DeploymentCondition - DeploymentList - DeploymentSpec - DeploymentStatus - DeploymentStrategy - ReplicaSet - ReplicaSetCondition - ReplicaSetList - ReplicaSetSpec - ReplicaSetStatus - RollingUpdateDaemonSet - RollingUpdateDeployment - RollingUpdateStatefulSetStrategy - StatefulSet - StatefulSetCondition - StatefulSetList - StatefulSetSpec - StatefulSetStatus - StatefulSetUpdateStrategy -*/ package v1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import k8s_io_api_core_v1 "k8s.io/api/core/v1" -import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + io "io" -import k8s_io_apimachinery_pkg_util_intstr "k8s.io/apimachinery/pkg/util/intstr" + proto "github.com/gogo/protobuf/proto" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + v11 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" -import strings "strings" -import reflect "reflect" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" -import io "io" + intstr "k8s.io/apimachinery/pkg/util/intstr" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -81,123 +48,789 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *ControllerRevision) Reset() { *m = ControllerRevision{} } -func (*ControllerRevision) ProtoMessage() {} -func (*ControllerRevision) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *ControllerRevision) Reset() { *m = ControllerRevision{} } +func (*ControllerRevision) ProtoMessage() {} +func (*ControllerRevision) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{0} +} +func (m *ControllerRevision) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ControllerRevision) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ControllerRevision) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControllerRevision.Merge(m, src) +} +func (m *ControllerRevision) XXX_Size() int { + return m.Size() +} +func (m *ControllerRevision) XXX_DiscardUnknown() { + xxx_messageInfo_ControllerRevision.DiscardUnknown(m) +} + +var xxx_messageInfo_ControllerRevision proto.InternalMessageInfo + +func (m *ControllerRevisionList) Reset() { *m = ControllerRevisionList{} } +func (*ControllerRevisionList) ProtoMessage() {} +func (*ControllerRevisionList) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{1} +} +func (m *ControllerRevisionList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ControllerRevisionList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ControllerRevisionList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControllerRevisionList.Merge(m, src) +} +func (m *ControllerRevisionList) XXX_Size() int { + return m.Size() +} +func (m *ControllerRevisionList) XXX_DiscardUnknown() { + xxx_messageInfo_ControllerRevisionList.DiscardUnknown(m) +} + +var xxx_messageInfo_ControllerRevisionList proto.InternalMessageInfo + +func (m *DaemonSet) Reset() { *m = DaemonSet{} } +func (*DaemonSet) ProtoMessage() {} +func (*DaemonSet) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{2} +} +func (m *DaemonSet) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DaemonSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DaemonSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_DaemonSet.Merge(m, src) +} +func (m *DaemonSet) XXX_Size() int { + return m.Size() +} +func (m *DaemonSet) XXX_DiscardUnknown() { + xxx_messageInfo_DaemonSet.DiscardUnknown(m) +} + +var xxx_messageInfo_DaemonSet proto.InternalMessageInfo + +func (m *DaemonSetCondition) Reset() { *m = DaemonSetCondition{} } +func (*DaemonSetCondition) ProtoMessage() {} +func (*DaemonSetCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{3} +} +func (m *DaemonSetCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DaemonSetCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DaemonSetCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_DaemonSetCondition.Merge(m, src) +} +func (m *DaemonSetCondition) XXX_Size() int { + return m.Size() +} +func (m *DaemonSetCondition) XXX_DiscardUnknown() { + xxx_messageInfo_DaemonSetCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_DaemonSetCondition proto.InternalMessageInfo + +func (m *DaemonSetList) Reset() { *m = DaemonSetList{} } +func (*DaemonSetList) ProtoMessage() {} +func (*DaemonSetList) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{4} +} +func (m *DaemonSetList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DaemonSetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DaemonSetList) XXX_Merge(src proto.Message) { + xxx_messageInfo_DaemonSetList.Merge(m, src) +} +func (m *DaemonSetList) XXX_Size() int { + return m.Size() +} +func (m *DaemonSetList) XXX_DiscardUnknown() { + xxx_messageInfo_DaemonSetList.DiscardUnknown(m) +} + +var xxx_messageInfo_DaemonSetList proto.InternalMessageInfo + +func (m *DaemonSetSpec) Reset() { *m = DaemonSetSpec{} } +func (*DaemonSetSpec) ProtoMessage() {} +func (*DaemonSetSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{5} +} +func (m *DaemonSetSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DaemonSetSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DaemonSetSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_DaemonSetSpec.Merge(m, src) +} +func (m *DaemonSetSpec) XXX_Size() int { + return m.Size() +} +func (m *DaemonSetSpec) XXX_DiscardUnknown() { + xxx_messageInfo_DaemonSetSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_DaemonSetSpec proto.InternalMessageInfo -func (m *ControllerRevisionList) Reset() { *m = ControllerRevisionList{} } -func (*ControllerRevisionList) ProtoMessage() {} -func (*ControllerRevisionList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +func (m *DaemonSetStatus) Reset() { *m = DaemonSetStatus{} } +func (*DaemonSetStatus) ProtoMessage() {} +func (*DaemonSetStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{6} +} +func (m *DaemonSetStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DaemonSetStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DaemonSetStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_DaemonSetStatus.Merge(m, src) +} +func (m *DaemonSetStatus) XXX_Size() int { + return m.Size() +} +func (m *DaemonSetStatus) XXX_DiscardUnknown() { + xxx_messageInfo_DaemonSetStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_DaemonSetStatus proto.InternalMessageInfo + +func (m *DaemonSetUpdateStrategy) Reset() { *m = DaemonSetUpdateStrategy{} } +func (*DaemonSetUpdateStrategy) ProtoMessage() {} +func (*DaemonSetUpdateStrategy) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{7} +} +func (m *DaemonSetUpdateStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DaemonSetUpdateStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DaemonSetUpdateStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_DaemonSetUpdateStrategy.Merge(m, src) +} +func (m *DaemonSetUpdateStrategy) XXX_Size() int { + return m.Size() +} +func (m *DaemonSetUpdateStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_DaemonSetUpdateStrategy.DiscardUnknown(m) +} + +var xxx_messageInfo_DaemonSetUpdateStrategy proto.InternalMessageInfo + +func (m *Deployment) Reset() { *m = Deployment{} } +func (*Deployment) ProtoMessage() {} +func (*Deployment) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{8} +} +func (m *Deployment) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Deployment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Deployment) XXX_Merge(src proto.Message) { + xxx_messageInfo_Deployment.Merge(m, src) +} +func (m *Deployment) XXX_Size() int { + return m.Size() +} +func (m *Deployment) XXX_DiscardUnknown() { + xxx_messageInfo_Deployment.DiscardUnknown(m) +} + +var xxx_messageInfo_Deployment proto.InternalMessageInfo + +func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} } +func (*DeploymentCondition) ProtoMessage() {} +func (*DeploymentCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{9} +} +func (m *DeploymentCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentCondition.Merge(m, src) +} +func (m *DeploymentCondition) XXX_Size() int { + return m.Size() +} +func (m *DeploymentCondition) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentCondition proto.InternalMessageInfo + +func (m *DeploymentList) Reset() { *m = DeploymentList{} } +func (*DeploymentList) ProtoMessage() {} +func (*DeploymentList) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{10} +} +func (m *DeploymentList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentList) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentList.Merge(m, src) +} +func (m *DeploymentList) XXX_Size() int { + return m.Size() +} +func (m *DeploymentList) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentList.DiscardUnknown(m) +} -func (m *DaemonSet) Reset() { *m = DaemonSet{} } -func (*DaemonSet) ProtoMessage() {} -func (*DaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +var xxx_messageInfo_DeploymentList proto.InternalMessageInfo -func (m *DaemonSetCondition) Reset() { *m = DaemonSetCondition{} } -func (*DaemonSetCondition) ProtoMessage() {} -func (*DaemonSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} } +func (*DeploymentSpec) ProtoMessage() {} +func (*DeploymentSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{11} +} +func (m *DeploymentSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentSpec.Merge(m, src) +} +func (m *DeploymentSpec) XXX_Size() int { + return m.Size() +} +func (m *DeploymentSpec) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentSpec.DiscardUnknown(m) +} -func (m *DaemonSetList) Reset() { *m = DaemonSetList{} } -func (*DaemonSetList) ProtoMessage() {} -func (*DaemonSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +var xxx_messageInfo_DeploymentSpec proto.InternalMessageInfo + +func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} } +func (*DeploymentStatus) ProtoMessage() {} +func (*DeploymentStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{12} +} +func (m *DeploymentStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentStatus.Merge(m, src) +} +func (m *DeploymentStatus) XXX_Size() int { + return m.Size() +} +func (m *DeploymentStatus) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentStatus.DiscardUnknown(m) +} -func (m *DaemonSetSpec) Reset() { *m = DaemonSetSpec{} } -func (*DaemonSetSpec) ProtoMessage() {} -func (*DaemonSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +var xxx_messageInfo_DeploymentStatus proto.InternalMessageInfo -func (m *DaemonSetStatus) Reset() { *m = DaemonSetStatus{} } -func (*DaemonSetStatus) ProtoMessage() {} -func (*DaemonSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} } +func (*DeploymentStrategy) ProtoMessage() {} +func (*DeploymentStrategy) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{13} +} +func (m *DeploymentStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentStrategy.Merge(m, src) +} +func (m *DeploymentStrategy) XXX_Size() int { + return m.Size() +} +func (m *DeploymentStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentStrategy.DiscardUnknown(m) +} -func (m *DaemonSetUpdateStrategy) Reset() { *m = DaemonSetUpdateStrategy{} } -func (*DaemonSetUpdateStrategy) ProtoMessage() {} -func (*DaemonSetUpdateStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +var xxx_messageInfo_DeploymentStrategy proto.InternalMessageInfo -func (m *Deployment) Reset() { *m = Deployment{} } -func (*Deployment) ProtoMessage() {} -func (*Deployment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +func (m *ReplicaSet) Reset() { *m = ReplicaSet{} } +func (*ReplicaSet) ProtoMessage() {} +func (*ReplicaSet) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{14} +} +func (m *ReplicaSet) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicaSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ReplicaSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicaSet.Merge(m, src) +} +func (m *ReplicaSet) XXX_Size() int { + return m.Size() +} +func (m *ReplicaSet) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicaSet.DiscardUnknown(m) +} -func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} } -func (*DeploymentCondition) ProtoMessage() {} -func (*DeploymentCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +var xxx_messageInfo_ReplicaSet proto.InternalMessageInfo -func (m *DeploymentList) Reset() { *m = DeploymentList{} } -func (*DeploymentList) ProtoMessage() {} -func (*DeploymentList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } +func (m *ReplicaSetCondition) Reset() { *m = ReplicaSetCondition{} } +func (*ReplicaSetCondition) ProtoMessage() {} +func (*ReplicaSetCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{15} +} +func (m *ReplicaSetCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicaSetCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ReplicaSetCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicaSetCondition.Merge(m, src) +} +func (m *ReplicaSetCondition) XXX_Size() int { + return m.Size() +} +func (m *ReplicaSetCondition) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicaSetCondition.DiscardUnknown(m) +} -func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} } -func (*DeploymentSpec) ProtoMessage() {} -func (*DeploymentSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } +var xxx_messageInfo_ReplicaSetCondition proto.InternalMessageInfo -func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} } -func (*DeploymentStatus) ProtoMessage() {} -func (*DeploymentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } +func (m *ReplicaSetList) Reset() { *m = ReplicaSetList{} } +func (*ReplicaSetList) ProtoMessage() {} +func (*ReplicaSetList) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{16} +} +func (m *ReplicaSetList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicaSetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ReplicaSetList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicaSetList.Merge(m, src) +} +func (m *ReplicaSetList) XXX_Size() int { + return m.Size() +} +func (m *ReplicaSetList) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicaSetList.DiscardUnknown(m) +} -func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} } -func (*DeploymentStrategy) ProtoMessage() {} -func (*DeploymentStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } +var xxx_messageInfo_ReplicaSetList proto.InternalMessageInfo -func (m *ReplicaSet) Reset() { *m = ReplicaSet{} } -func (*ReplicaSet) ProtoMessage() {} -func (*ReplicaSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } +func (m *ReplicaSetSpec) Reset() { *m = ReplicaSetSpec{} } +func (*ReplicaSetSpec) ProtoMessage() {} +func (*ReplicaSetSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{17} +} +func (m *ReplicaSetSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicaSetSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ReplicaSetSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicaSetSpec.Merge(m, src) +} +func (m *ReplicaSetSpec) XXX_Size() int { + return m.Size() +} +func (m *ReplicaSetSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicaSetSpec.DiscardUnknown(m) +} -func (m *ReplicaSetCondition) Reset() { *m = ReplicaSetCondition{} } -func (*ReplicaSetCondition) ProtoMessage() {} -func (*ReplicaSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } +var xxx_messageInfo_ReplicaSetSpec proto.InternalMessageInfo -func (m *ReplicaSetList) Reset() { *m = ReplicaSetList{} } -func (*ReplicaSetList) ProtoMessage() {} -func (*ReplicaSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } +func (m *ReplicaSetStatus) Reset() { *m = ReplicaSetStatus{} } +func (*ReplicaSetStatus) ProtoMessage() {} +func (*ReplicaSetStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{18} +} +func (m *ReplicaSetStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicaSetStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ReplicaSetStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicaSetStatus.Merge(m, src) +} +func (m *ReplicaSetStatus) XXX_Size() int { + return m.Size() +} +func (m *ReplicaSetStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicaSetStatus.DiscardUnknown(m) +} -func (m *ReplicaSetSpec) Reset() { *m = ReplicaSetSpec{} } -func (*ReplicaSetSpec) ProtoMessage() {} -func (*ReplicaSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } +var xxx_messageInfo_ReplicaSetStatus proto.InternalMessageInfo -func (m *ReplicaSetStatus) Reset() { *m = ReplicaSetStatus{} } -func (*ReplicaSetStatus) ProtoMessage() {} -func (*ReplicaSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } +func (m *RollingUpdateDaemonSet) Reset() { *m = RollingUpdateDaemonSet{} } +func (*RollingUpdateDaemonSet) ProtoMessage() {} +func (*RollingUpdateDaemonSet) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{19} +} +func (m *RollingUpdateDaemonSet) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RollingUpdateDaemonSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RollingUpdateDaemonSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_RollingUpdateDaemonSet.Merge(m, src) +} +func (m *RollingUpdateDaemonSet) XXX_Size() int { + return m.Size() +} +func (m *RollingUpdateDaemonSet) XXX_DiscardUnknown() { + xxx_messageInfo_RollingUpdateDaemonSet.DiscardUnknown(m) +} -func (m *RollingUpdateDaemonSet) Reset() { *m = RollingUpdateDaemonSet{} } -func (*RollingUpdateDaemonSet) ProtoMessage() {} -func (*RollingUpdateDaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } +var xxx_messageInfo_RollingUpdateDaemonSet proto.InternalMessageInfo func (m *RollingUpdateDeployment) Reset() { *m = RollingUpdateDeployment{} } func (*RollingUpdateDeployment) ProtoMessage() {} func (*RollingUpdateDeployment) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{20} + return fileDescriptor_e1014cab6f31e43b, []int{20} +} +func (m *RollingUpdateDeployment) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RollingUpdateDeployment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RollingUpdateDeployment) XXX_Merge(src proto.Message) { + xxx_messageInfo_RollingUpdateDeployment.Merge(m, src) +} +func (m *RollingUpdateDeployment) XXX_Size() int { + return m.Size() +} +func (m *RollingUpdateDeployment) XXX_DiscardUnknown() { + xxx_messageInfo_RollingUpdateDeployment.DiscardUnknown(m) } +var xxx_messageInfo_RollingUpdateDeployment proto.InternalMessageInfo + func (m *RollingUpdateStatefulSetStrategy) Reset() { *m = RollingUpdateStatefulSetStrategy{} } func (*RollingUpdateStatefulSetStrategy) ProtoMessage() {} func (*RollingUpdateStatefulSetStrategy) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{21} + return fileDescriptor_e1014cab6f31e43b, []int{21} +} +func (m *RollingUpdateStatefulSetStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RollingUpdateStatefulSetStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RollingUpdateStatefulSetStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_RollingUpdateStatefulSetStrategy.Merge(m, src) +} +func (m *RollingUpdateStatefulSetStrategy) XXX_Size() int { + return m.Size() +} +func (m *RollingUpdateStatefulSetStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_RollingUpdateStatefulSetStrategy.DiscardUnknown(m) +} + +var xxx_messageInfo_RollingUpdateStatefulSetStrategy proto.InternalMessageInfo + +func (m *StatefulSet) Reset() { *m = StatefulSet{} } +func (*StatefulSet) ProtoMessage() {} +func (*StatefulSet) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{22} +} +func (m *StatefulSet) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSet.Merge(m, src) +} +func (m *StatefulSet) XXX_Size() int { + return m.Size() +} +func (m *StatefulSet) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSet.DiscardUnknown(m) +} + +var xxx_messageInfo_StatefulSet proto.InternalMessageInfo + +func (m *StatefulSetCondition) Reset() { *m = StatefulSetCondition{} } +func (*StatefulSetCondition) ProtoMessage() {} +func (*StatefulSetCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{23} +} +func (m *StatefulSetCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSetCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSetCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSetCondition.Merge(m, src) +} +func (m *StatefulSetCondition) XXX_Size() int { + return m.Size() +} +func (m *StatefulSetCondition) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSetCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_StatefulSetCondition proto.InternalMessageInfo + +func (m *StatefulSetList) Reset() { *m = StatefulSetList{} } +func (*StatefulSetList) ProtoMessage() {} +func (*StatefulSetList) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{24} +} +func (m *StatefulSetList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSetList) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSetList.Merge(m, src) +} +func (m *StatefulSetList) XXX_Size() int { + return m.Size() +} +func (m *StatefulSetList) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSetList.DiscardUnknown(m) +} + +var xxx_messageInfo_StatefulSetList proto.InternalMessageInfo + +func (m *StatefulSetSpec) Reset() { *m = StatefulSetSpec{} } +func (*StatefulSetSpec) ProtoMessage() {} +func (*StatefulSetSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{25} +} +func (m *StatefulSetSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSetSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSetSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSetSpec.Merge(m, src) +} +func (m *StatefulSetSpec) XXX_Size() int { + return m.Size() +} +func (m *StatefulSetSpec) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSetSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_StatefulSetSpec proto.InternalMessageInfo + +func (m *StatefulSetStatus) Reset() { *m = StatefulSetStatus{} } +func (*StatefulSetStatus) ProtoMessage() {} +func (*StatefulSetStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{26} +} +func (m *StatefulSetStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSetStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSetStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSetStatus.Merge(m, src) +} +func (m *StatefulSetStatus) XXX_Size() int { + return m.Size() +} +func (m *StatefulSetStatus) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSetStatus.DiscardUnknown(m) } -func (m *StatefulSet) Reset() { *m = StatefulSet{} } -func (*StatefulSet) ProtoMessage() {} -func (*StatefulSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } - -func (m *StatefulSetCondition) Reset() { *m = StatefulSetCondition{} } -func (*StatefulSetCondition) ProtoMessage() {} -func (*StatefulSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } - -func (m *StatefulSetList) Reset() { *m = StatefulSetList{} } -func (*StatefulSetList) ProtoMessage() {} -func (*StatefulSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } - -func (m *StatefulSetSpec) Reset() { *m = StatefulSetSpec{} } -func (*StatefulSetSpec) ProtoMessage() {} -func (*StatefulSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } - -func (m *StatefulSetStatus) Reset() { *m = StatefulSetStatus{} } -func (*StatefulSetStatus) ProtoMessage() {} -func (*StatefulSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } +var xxx_messageInfo_StatefulSetStatus proto.InternalMessageInfo func (m *StatefulSetUpdateStrategy) Reset() { *m = StatefulSetUpdateStrategy{} } func (*StatefulSetUpdateStrategy) ProtoMessage() {} func (*StatefulSetUpdateStrategy) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{27} + return fileDescriptor_e1014cab6f31e43b, []int{27} +} +func (m *StatefulSetUpdateStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) } +func (m *StatefulSetUpdateStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSetUpdateStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSetUpdateStrategy.Merge(m, src) +} +func (m *StatefulSetUpdateStrategy) XXX_Size() int { + return m.Size() +} +func (m *StatefulSetUpdateStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSetUpdateStrategy.DiscardUnknown(m) +} + +var xxx_messageInfo_StatefulSetUpdateStrategy proto.InternalMessageInfo func init() { proto.RegisterType((*ControllerRevision)(nil), "k8s.io.api.apps.v1.ControllerRevision") @@ -229,10 +862,146 @@ func init() { proto.RegisterType((*StatefulSetStatus)(nil), "k8s.io.api.apps.v1.StatefulSetStatus") proto.RegisterType((*StatefulSetUpdateStrategy)(nil), "k8s.io.api.apps.v1.StatefulSetUpdateStrategy") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/apps/v1/generated.proto", fileDescriptor_e1014cab6f31e43b) +} + +var fileDescriptor_e1014cab6f31e43b = []byte{ + // 2031 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0xcd, 0x6f, 0x24, 0x47, + 0x1d, 0x75, 0xcf, 0x87, 0x3d, 0x2e, 0xaf, 0xed, 0xdd, 0xb2, 0xb1, 0x27, 0xbb, 0x64, 0x66, 0x19, + 0x60, 0xe3, 0x64, 0xb3, 0x3d, 0xec, 0x66, 0x13, 0xa1, 0x2c, 0x02, 0x79, 0xc6, 0x21, 0x84, 0x78, + 0x6c, 0x53, 0x5e, 0xef, 0x61, 0x09, 0x12, 0xe5, 0xe9, 0xda, 0x71, 0xc7, 0xfd, 0xa5, 0xee, 0xea, + 0x61, 0x47, 0x5c, 0x10, 0x12, 0x9c, 0x38, 0xf0, 0x9f, 0x20, 0x84, 0xe0, 0x86, 0x22, 0xc4, 0x65, + 0x2f, 0x48, 0x11, 0x17, 0x72, 0xb2, 0xd8, 0xc9, 0x09, 0xa1, 0x1c, 0xb9, 0xe4, 0x02, 0xaa, 0xea, + 0xea, 0xef, 0x6a, 0xcf, 0xd8, 0x9b, 0x38, 0x24, 0xca, 0xcd, 0x53, 0xf5, 0x7e, 0xaf, 0x7f, 0x55, + 0xf5, 0xab, 0x7a, 0xaf, 0xab, 0x0d, 0xee, 0x1d, 0x7f, 0xdb, 0x53, 0x75, 0xbb, 0x7d, 0xec, 0x1f, + 0x12, 0xd7, 0x22, 0x94, 0x78, 0xed, 0x21, 0xb1, 0x34, 0xdb, 0x6d, 0x8b, 0x0e, 0xec, 0xe8, 0x6d, + 0xec, 0x38, 0x5e, 0x7b, 0x78, 0xbb, 0x3d, 0x20, 0x16, 0x71, 0x31, 0x25, 0x9a, 0xea, 0xb8, 0x36, + 0xb5, 0x21, 0x0c, 0x30, 0x2a, 0x76, 0x74, 0x95, 0x61, 0xd4, 0xe1, 0xed, 0xab, 0xb7, 0x06, 0x3a, + 0x3d, 0xf2, 0x0f, 0xd5, 0xbe, 0x6d, 0xb6, 0x07, 0xf6, 0xc0, 0x6e, 0x73, 0xe8, 0xa1, 0xff, 0x88, + 0xff, 0xe2, 0x3f, 0xf8, 0x5f, 0x01, 0xc5, 0xd5, 0x56, 0xe2, 0x31, 0x7d, 0xdb, 0x25, 0x92, 0xc7, + 0x5c, 0xbd, 0x1b, 0x63, 0x4c, 0xdc, 0x3f, 0xd2, 0x2d, 0xe2, 0x8e, 0xda, 0xce, 0xf1, 0x80, 0x35, + 0x78, 0x6d, 0x93, 0x50, 0x2c, 0x8b, 0x6a, 0x17, 0x45, 0xb9, 0xbe, 0x45, 0x75, 0x93, 0xe4, 0x02, + 0x5e, 0x9b, 0x14, 0xe0, 0xf5, 0x8f, 0x88, 0x89, 0x73, 0x71, 0xaf, 0x14, 0xc5, 0xf9, 0x54, 0x37, + 0xda, 0xba, 0x45, 0x3d, 0xea, 0x66, 0x83, 0x5a, 0xff, 0x51, 0x00, 0xec, 0xda, 0x16, 0x75, 0x6d, + 0xc3, 0x20, 0x2e, 0x22, 0x43, 0xdd, 0xd3, 0x6d, 0x0b, 0xfe, 0x14, 0xd4, 0xd8, 0x78, 0x34, 0x4c, + 0x71, 0x5d, 0xb9, 0xae, 0x6c, 0x2c, 0xdc, 0xf9, 0x96, 0x1a, 0x4f, 0x72, 0x44, 0xaf, 0x3a, 0xc7, + 0x03, 0xd6, 0xe0, 0xa9, 0x0c, 0xad, 0x0e, 0x6f, 0xab, 0xbb, 0x87, 0xef, 0x92, 0x3e, 0xed, 0x11, + 0x8a, 0x3b, 0xf0, 0xc9, 0x49, 0x73, 0x66, 0x7c, 0xd2, 0x04, 0x71, 0x1b, 0x8a, 0x58, 0xe1, 0x2e, + 0xa8, 0x70, 0xf6, 0x12, 0x67, 0xbf, 0x55, 0xc8, 0x2e, 0x06, 0xad, 0x22, 0xfc, 0xb3, 0x37, 0x1e, + 0x53, 0x62, 0xb1, 0xf4, 0x3a, 0x97, 0x04, 0x75, 0x65, 0x0b, 0x53, 0x8c, 0x38, 0x11, 0x7c, 0x19, + 0xd4, 0x5c, 0x91, 0x7e, 0xbd, 0x7c, 0x5d, 0xd9, 0x28, 0x77, 0x2e, 0x0b, 0x54, 0x2d, 0x1c, 0x16, + 0x8a, 0x10, 0xad, 0xbf, 0x2a, 0x60, 0x2d, 0x3f, 0xee, 0x6d, 0xdd, 0xa3, 0xf0, 0x9d, 0xdc, 0xd8, + 0xd5, 0xe9, 0xc6, 0xce, 0xa2, 0xf9, 0xc8, 0xa3, 0x07, 0x87, 0x2d, 0x89, 0x71, 0xbf, 0x0d, 0xaa, + 0x3a, 0x25, 0xa6, 0x57, 0x2f, 0x5d, 0x2f, 0x6f, 0x2c, 0xdc, 0xb9, 0xa1, 0xe6, 0x6b, 0x57, 0xcd, + 0x27, 0xd6, 0x59, 0x14, 0x94, 0xd5, 0xb7, 0x58, 0x30, 0x0a, 0x38, 0x5a, 0xff, 0x55, 0xc0, 0xfc, + 0x16, 0x26, 0xa6, 0x6d, 0xed, 0x13, 0x7a, 0x01, 0x8b, 0xd6, 0x05, 0x15, 0xcf, 0x21, 0x7d, 0xb1, + 0x68, 0x5f, 0x93, 0xe5, 0x1e, 0xa5, 0xb3, 0xef, 0x90, 0x7e, 0xbc, 0x50, 0xec, 0x17, 0xe2, 0xc1, + 0xf0, 0x6d, 0x30, 0xeb, 0x51, 0x4c, 0x7d, 0x8f, 0x2f, 0xd3, 0xc2, 0x9d, 0xaf, 0x9f, 0x4e, 0xc3, + 0xa1, 0x9d, 0x25, 0x41, 0x34, 0x1b, 0xfc, 0x46, 0x82, 0xa2, 0xf5, 0xaf, 0x12, 0x80, 0x11, 0xb6, + 0x6b, 0x5b, 0x9a, 0x4e, 0x59, 0xfd, 0xbe, 0x0e, 0x2a, 0x74, 0xe4, 0x10, 0x3e, 0x0d, 0xf3, 0x9d, + 0x1b, 0x61, 0x16, 0xf7, 0x47, 0x0e, 0xf9, 0xf8, 0xa4, 0xb9, 0x96, 0x8f, 0x60, 0x3d, 0x88, 0xc7, + 0xc0, 0xed, 0x28, 0xbf, 0x12, 0x8f, 0xbe, 0x9b, 0x7e, 0xf4, 0xc7, 0x27, 0x4d, 0xc9, 0x61, 0xa1, + 0x46, 0x4c, 0xe9, 0x04, 0xe1, 0x10, 0x40, 0x03, 0x7b, 0xf4, 0xbe, 0x8b, 0x2d, 0x2f, 0x78, 0x92, + 0x6e, 0x12, 0x31, 0xf2, 0x97, 0xa6, 0x5b, 0x1e, 0x16, 0xd1, 0xb9, 0x2a, 0xb2, 0x80, 0xdb, 0x39, + 0x36, 0x24, 0x79, 0x02, 0xbc, 0x01, 0x66, 0x5d, 0x82, 0x3d, 0xdb, 0xaa, 0x57, 0xf8, 0x28, 0xa2, + 0x09, 0x44, 0xbc, 0x15, 0x89, 0x5e, 0xf8, 0x22, 0x98, 0x33, 0x89, 0xe7, 0xe1, 0x01, 0xa9, 0x57, + 0x39, 0x70, 0x59, 0x00, 0xe7, 0x7a, 0x41, 0x33, 0x0a, 0xfb, 0x5b, 0xbf, 0x57, 0xc0, 0x62, 0x34, + 0x73, 0x17, 0xb0, 0x55, 0x3a, 0xe9, 0xad, 0xf2, 0xfc, 0xa9, 0x75, 0x52, 0xb0, 0x43, 0xde, 0x2b, + 0x27, 0x72, 0x66, 0x45, 0x08, 0x7f, 0x02, 0x6a, 0x1e, 0x31, 0x48, 0x9f, 0xda, 0xae, 0xc8, 0xf9, + 0x95, 0x29, 0x73, 0xc6, 0x87, 0xc4, 0xd8, 0x17, 0xa1, 0x9d, 0x4b, 0x2c, 0xe9, 0xf0, 0x17, 0x8a, + 0x28, 0xe1, 0x8f, 0x40, 0x8d, 0x12, 0xd3, 0x31, 0x30, 0x25, 0x62, 0x9b, 0xa4, 0xea, 0x9b, 0x95, + 0x0b, 0x23, 0xdb, 0xb3, 0xb5, 0xfb, 0x02, 0xc6, 0x37, 0x4a, 0x34, 0x0f, 0x61, 0x2b, 0x8a, 0x68, + 0xe0, 0x31, 0x58, 0xf2, 0x1d, 0x8d, 0x21, 0x29, 0x3b, 0xba, 0x07, 0x23, 0x51, 0x3e, 0x37, 0x4f, + 0x9d, 0x90, 0x83, 0x54, 0x48, 0x67, 0x4d, 0x3c, 0x60, 0x29, 0xdd, 0x8e, 0x32, 0xd4, 0x70, 0x13, + 0x2c, 0x9b, 0xba, 0x85, 0x08, 0xd6, 0x46, 0xfb, 0xa4, 0x6f, 0x5b, 0x9a, 0xc7, 0x0b, 0xa8, 0xda, + 0x59, 0x17, 0x04, 0xcb, 0xbd, 0x74, 0x37, 0xca, 0xe2, 0xe1, 0x36, 0x58, 0x0d, 0xcf, 0xd9, 0x1f, + 0xe8, 0x1e, 0xb5, 0xdd, 0xd1, 0xb6, 0x6e, 0xea, 0xb4, 0x3e, 0xcb, 0x79, 0xea, 0xe3, 0x93, 0xe6, + 0x2a, 0x92, 0xf4, 0x23, 0x69, 0x54, 0xeb, 0x37, 0xb3, 0x60, 0x39, 0x73, 0x1a, 0xc0, 0x07, 0x60, + 0xad, 0xef, 0xbb, 0x2e, 0xb1, 0xe8, 0x8e, 0x6f, 0x1e, 0x12, 0x77, 0xbf, 0x7f, 0x44, 0x34, 0xdf, + 0x20, 0x1a, 0x5f, 0xd1, 0x6a, 0xa7, 0x21, 0x72, 0x5d, 0xeb, 0x4a, 0x51, 0xa8, 0x20, 0x1a, 0xfe, + 0x10, 0x40, 0x8b, 0x37, 0xf5, 0x74, 0xcf, 0x8b, 0x38, 0x4b, 0x9c, 0x33, 0xda, 0x80, 0x3b, 0x39, + 0x04, 0x92, 0x44, 0xb1, 0x1c, 0x35, 0xe2, 0xe9, 0x2e, 0xd1, 0xb2, 0x39, 0x96, 0xd3, 0x39, 0x6e, + 0x49, 0x51, 0xa8, 0x20, 0x1a, 0xbe, 0x0a, 0x16, 0x82, 0xa7, 0xf1, 0x39, 0x17, 0x8b, 0xb3, 0x22, + 0xc8, 0x16, 0x76, 0xe2, 0x2e, 0x94, 0xc4, 0xb1, 0xa1, 0xd9, 0x87, 0x1e, 0x71, 0x87, 0x44, 0x7b, + 0x33, 0xf0, 0x00, 0x4c, 0x28, 0xab, 0x5c, 0x28, 0xa3, 0xa1, 0xed, 0xe6, 0x10, 0x48, 0x12, 0xc5, + 0x86, 0x16, 0x54, 0x4d, 0x6e, 0x68, 0xb3, 0xe9, 0xa1, 0x1d, 0x48, 0x51, 0xa8, 0x20, 0x9a, 0xd5, + 0x5e, 0x90, 0xf2, 0xe6, 0x10, 0xeb, 0x06, 0x3e, 0x34, 0x48, 0x7d, 0x2e, 0x5d, 0x7b, 0x3b, 0xe9, + 0x6e, 0x94, 0xc5, 0xc3, 0x37, 0xc1, 0x95, 0xa0, 0xe9, 0xc0, 0xc2, 0x11, 0x49, 0x8d, 0x93, 0x3c, + 0x27, 0x48, 0xae, 0xec, 0x64, 0x01, 0x28, 0x1f, 0x03, 0x5f, 0x07, 0x4b, 0x7d, 0xdb, 0x30, 0x78, + 0x3d, 0x76, 0x6d, 0xdf, 0xa2, 0xf5, 0x79, 0xce, 0x02, 0xd9, 0x1e, 0xea, 0xa6, 0x7a, 0x50, 0x06, + 0x09, 0x1f, 0x02, 0xd0, 0x0f, 0xe5, 0xc0, 0xab, 0x83, 0x62, 0xa1, 0xcf, 0xeb, 0x50, 0x2c, 0xc0, + 0x51, 0x93, 0x87, 0x12, 0x6c, 0xad, 0xf7, 0x14, 0xb0, 0x5e, 0xb0, 0xc7, 0xe1, 0xf7, 0x52, 0xaa, + 0x77, 0x33, 0xa3, 0x7a, 0xd7, 0x0a, 0xc2, 0x12, 0xd2, 0xd7, 0x07, 0x8b, 0xcc, 0x77, 0xe8, 0xd6, + 0x20, 0x80, 0x88, 0x13, 0xec, 0x25, 0x59, 0xee, 0x28, 0x09, 0x8c, 0x8f, 0xe1, 0x2b, 0xe3, 0x93, + 0xe6, 0x62, 0xaa, 0x0f, 0xa5, 0x39, 0x5b, 0xbf, 0x2c, 0x01, 0xb0, 0x45, 0x1c, 0xc3, 0x1e, 0x99, + 0xc4, 0xba, 0x08, 0xd7, 0xb2, 0x95, 0x72, 0x2d, 0x2d, 0xe9, 0x42, 0x44, 0xf9, 0x14, 0xda, 0x96, + 0xed, 0x8c, 0x6d, 0xf9, 0xc6, 0x04, 0x9e, 0xd3, 0x7d, 0xcb, 0x3f, 0xca, 0x60, 0x25, 0x06, 0xc7, + 0xc6, 0xe5, 0x5e, 0x6a, 0x09, 0x5f, 0xc8, 0x2c, 0xe1, 0xba, 0x24, 0xe4, 0x53, 0x73, 0x2e, 0xef, + 0x82, 0x25, 0xe6, 0x2b, 0x82, 0x55, 0xe3, 0xae, 0x65, 0xf6, 0xcc, 0xae, 0x25, 0x52, 0x9d, 0xed, + 0x14, 0x13, 0xca, 0x30, 0x17, 0xb8, 0xa4, 0xb9, 0xcf, 0xa3, 0x4b, 0xfa, 0x83, 0x02, 0x96, 0xe2, + 0x65, 0xba, 0x00, 0x9b, 0xd4, 0x4d, 0xdb, 0xa4, 0xc6, 0xe9, 0x75, 0x59, 0xe0, 0x93, 0xfe, 0x5e, + 0x49, 0x66, 0xcd, 0x8d, 0xd2, 0x06, 0x7b, 0xa1, 0x72, 0x0c, 0xbd, 0x8f, 0x3d, 0x21, 0xab, 0x97, + 0x82, 0x97, 0xa9, 0xa0, 0x0d, 0x45, 0xbd, 0x29, 0x4b, 0x55, 0xfa, 0x74, 0x2d, 0x55, 0xf9, 0x93, + 0xb1, 0x54, 0xf7, 0x41, 0xcd, 0x0b, 0xcd, 0x54, 0x85, 0x53, 0xde, 0x98, 0xb4, 0x9d, 0x85, 0x8f, + 0x8a, 0x58, 0x23, 0x07, 0x15, 0x31, 0xc9, 0xbc, 0x53, 0xf5, 0xb3, 0xf4, 0x4e, 0xac, 0xbc, 0x1d, + 0xec, 0x7b, 0x44, 0xe3, 0x5b, 0xa9, 0x16, 0x97, 0xf7, 0x1e, 0x6f, 0x45, 0xa2, 0x17, 0x1e, 0x80, + 0x75, 0xc7, 0xb5, 0x07, 0x2e, 0xf1, 0xbc, 0x2d, 0x82, 0x35, 0x43, 0xb7, 0x48, 0x38, 0x80, 0x40, + 0xf5, 0xae, 0x8d, 0x4f, 0x9a, 0xeb, 0x7b, 0x72, 0x08, 0x2a, 0x8a, 0x6d, 0xfd, 0xb9, 0x02, 0x2e, + 0x67, 0x4f, 0xc4, 0x02, 0x23, 0xa2, 0x9c, 0xcb, 0x88, 0xbc, 0x9c, 0x28, 0xd1, 0xc0, 0xa5, 0x25, + 0xde, 0xf9, 0x73, 0x65, 0xba, 0x09, 0x96, 0x85, 0xf1, 0x08, 0x3b, 0x85, 0x15, 0x8b, 0x96, 0xe7, + 0x20, 0xdd, 0x8d, 0xb2, 0x78, 0x78, 0x0f, 0x2c, 0xba, 0xdc, 0x5b, 0x85, 0x04, 0x81, 0x3f, 0xf9, + 0x8a, 0x20, 0x58, 0x44, 0xc9, 0x4e, 0x94, 0xc6, 0x32, 0x6f, 0x12, 0x5b, 0x8e, 0x90, 0xa0, 0x92, + 0xf6, 0x26, 0x9b, 0x59, 0x00, 0xca, 0xc7, 0xc0, 0x1e, 0x58, 0xf1, 0xad, 0x3c, 0x55, 0x50, 0x6b, + 0xd7, 0x04, 0xd5, 0xca, 0x41, 0x1e, 0x82, 0x64, 0x71, 0xf0, 0xc7, 0x29, 0xbb, 0x32, 0xcb, 0x4f, + 0x91, 0x17, 0x4e, 0xdf, 0x0e, 0x53, 0xfb, 0x15, 0x89, 0x8f, 0xaa, 0x4d, 0xeb, 0xa3, 0x5a, 0x7f, + 0x52, 0x00, 0xcc, 0x6f, 0xc1, 0x89, 0x2f, 0xf7, 0xb9, 0x88, 0x84, 0x44, 0x6a, 0x72, 0x87, 0x73, + 0x73, 0xb2, 0xc3, 0x89, 0x4f, 0xd0, 0xe9, 0x2c, 0x8e, 0x98, 0xde, 0x8b, 0xb9, 0x98, 0x99, 0xc2, + 0xe2, 0xc4, 0xf9, 0x3c, 0x9b, 0xc5, 0x49, 0xf0, 0x9c, 0x6e, 0x71, 0xfe, 0x5d, 0x02, 0x2b, 0x31, + 0x78, 0x6a, 0x8b, 0x23, 0x09, 0xf9, 0xf2, 0x72, 0x66, 0x3a, 0xdb, 0x11, 0x4f, 0xdd, 0xff, 0x89, + 0xed, 0x88, 0x13, 0x2a, 0xb0, 0x1d, 0xbf, 0x2b, 0x25, 0xb3, 0x3e, 0xa3, 0xed, 0xf8, 0x04, 0xae, + 0x2a, 0x3e, 0x77, 0xce, 0xa5, 0xf5, 0x97, 0x32, 0xb8, 0x9c, 0xdd, 0x82, 0x29, 0x1d, 0x54, 0x26, + 0xea, 0xe0, 0x1e, 0x58, 0x7d, 0xe4, 0x1b, 0xc6, 0x88, 0x8f, 0x21, 0x21, 0x86, 0x81, 0x82, 0x7e, + 0x55, 0x44, 0xae, 0x7e, 0x5f, 0x82, 0x41, 0xd2, 0xc8, 0xbc, 0x2c, 0x56, 0x9e, 0x55, 0x16, 0xab, + 0xe7, 0x90, 0x45, 0xb9, 0xb3, 0x28, 0x9f, 0xcb, 0x59, 0x4c, 0xad, 0x89, 0x92, 0xe3, 0x6a, 0xe2, + 0x3b, 0xfc, 0xaf, 0x15, 0xb0, 0x26, 0x7f, 0x7d, 0x86, 0x06, 0x58, 0x32, 0xf1, 0xe3, 0xe4, 0xe5, + 0xc5, 0x24, 0xc1, 0xf0, 0xa9, 0x6e, 0xa8, 0xc1, 0xd7, 0x1d, 0xf5, 0x2d, 0x8b, 0xee, 0xba, 0xfb, + 0xd4, 0xd5, 0xad, 0x41, 0x20, 0xb0, 0xbd, 0x14, 0x17, 0xca, 0x70, 0xb7, 0x3e, 0x54, 0xc0, 0x7a, + 0x81, 0xca, 0x5d, 0x6c, 0x26, 0xf0, 0x21, 0xa8, 0x99, 0xf8, 0xf1, 0xbe, 0xef, 0x0e, 0x42, 0x49, + 0x3e, 0xfb, 0x73, 0xf8, 0x2e, 0xec, 0x09, 0x16, 0x14, 0xf1, 0xb5, 0x76, 0xc1, 0xf5, 0xd4, 0x20, + 0xd9, 0xa6, 0x21, 0x8f, 0x7c, 0x83, 0xef, 0x1f, 0xe1, 0x29, 0x6e, 0x82, 0x79, 0x07, 0xbb, 0x54, + 0x8f, 0xcc, 0x68, 0xb5, 0xb3, 0x38, 0x3e, 0x69, 0xce, 0xef, 0x85, 0x8d, 0x28, 0xee, 0x6f, 0xfd, + 0xaa, 0x04, 0x16, 0x12, 0x24, 0x17, 0xa0, 0xef, 0x6f, 0xa4, 0xf4, 0x5d, 0xfa, 0xc5, 0x24, 0x39, + 0xaa, 0x22, 0x81, 0xef, 0x65, 0x04, 0xfe, 0x9b, 0x93, 0x88, 0x4e, 0x57, 0xf8, 0x8f, 0x4a, 0x60, + 0x35, 0x81, 0x8e, 0x25, 0xfe, 0x3b, 0x29, 0x89, 0xdf, 0xc8, 0x48, 0x7c, 0x5d, 0x16, 0xf3, 0xa5, + 0xc6, 0x4f, 0xd6, 0xf8, 0x3f, 0x2a, 0x60, 0x39, 0x31, 0x77, 0x17, 0x20, 0xf2, 0x5b, 0x69, 0x91, + 0x6f, 0x4e, 0xa8, 0x97, 0x02, 0x95, 0x7f, 0x52, 0x4d, 0xe5, 0xfd, 0x85, 0xbf, 0x5d, 0xf8, 0x39, + 0x58, 0x1d, 0xda, 0x86, 0x6f, 0x92, 0xae, 0x81, 0x75, 0x33, 0x04, 0x30, 0x55, 0x64, 0x93, 0xf8, + 0xa2, 0x94, 0x9e, 0xb8, 0x9e, 0xee, 0x51, 0x62, 0xd1, 0x07, 0x71, 0x64, 0xac, 0xc5, 0x0f, 0x24, + 0x74, 0x48, 0xfa, 0x10, 0xf8, 0x2a, 0x58, 0x60, 0x6a, 0xa6, 0xf7, 0xc9, 0x0e, 0x36, 0xc3, 0x9a, + 0x8a, 0xbe, 0x0f, 0xec, 0xc7, 0x5d, 0x28, 0x89, 0x83, 0x47, 0x60, 0xc5, 0xb1, 0xb5, 0x1e, 0xb6, + 0xf0, 0x80, 0xb0, 0xf3, 0x7f, 0xcf, 0x36, 0xf4, 0xfe, 0x88, 0xdf, 0x3b, 0xcc, 0x77, 0x5e, 0x0b, + 0xdf, 0x29, 0xf7, 0xf2, 0x10, 0xe6, 0xd9, 0x25, 0xcd, 0x7c, 0x3f, 0xcb, 0x28, 0xa1, 0x99, 0xfb, + 0x9c, 0x35, 0x97, 0xfb, 0x1f, 0x00, 0x59, 0x71, 0x9d, 0xf3, 0x83, 0x56, 0xd1, 0x8d, 0x4a, 0xed, + 0x5c, 0x5f, 0xa3, 0x3e, 0xaa, 0x80, 0x2b, 0xb9, 0x03, 0xf2, 0x33, 0xbc, 0xd3, 0xc8, 0x39, 0xaf, + 0xf2, 0x19, 0x9c, 0xd7, 0x26, 0x58, 0x16, 0x1f, 0xc2, 0x32, 0xc6, 0x2d, 0x32, 0xd0, 0xdd, 0x74, + 0x37, 0xca, 0xe2, 0x65, 0x77, 0x2a, 0xd5, 0x33, 0xde, 0xa9, 0x24, 0xb3, 0x10, 0xff, 0xbf, 0x11, + 0x54, 0x5d, 0x3e, 0x0b, 0xf1, 0x6f, 0x1c, 0x59, 0x3c, 0xfc, 0x6e, 0x58, 0x52, 0x11, 0xc3, 0x1c, + 0x67, 0xc8, 0xd4, 0x48, 0x44, 0x90, 0x41, 0x3f, 0xd3, 0xc7, 0x9e, 0x77, 0x24, 0x1f, 0x7b, 0x36, + 0x26, 0x94, 0xf2, 0xf4, 0x56, 0xf1, 0x6f, 0x0a, 0x78, 0xae, 0x70, 0x0f, 0xc0, 0xcd, 0x94, 0xce, + 0xde, 0xca, 0xe8, 0xec, 0xf3, 0x85, 0x81, 0x09, 0xb1, 0x35, 0xe5, 0x17, 0x22, 0x77, 0x27, 0x5e, + 0x88, 0x48, 0x5c, 0xd4, 0xe4, 0x9b, 0x91, 0xce, 0xc6, 0x93, 0xa7, 0x8d, 0x99, 0xf7, 0x9f, 0x36, + 0x66, 0x3e, 0x78, 0xda, 0x98, 0xf9, 0xc5, 0xb8, 0xa1, 0x3c, 0x19, 0x37, 0x94, 0xf7, 0xc7, 0x0d, + 0xe5, 0x83, 0x71, 0x43, 0xf9, 0xe7, 0xb8, 0xa1, 0xfc, 0xf6, 0xc3, 0xc6, 0xcc, 0xc3, 0xd2, 0xf0, + 0xf6, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0x59, 0xb3, 0x11, 0xc0, 0x12, 0x26, 0x00, 0x00, +} + func (m *ControllerRevision) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -240,36 +1009,45 @@ func (m *ControllerRevision) Marshal() (dAtA []byte, err error) { } func (m *ControllerRevision) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ControllerRevision) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i = encodeVarintGenerated(dAtA, i, uint64(m.Revision)) + i-- + dAtA[i] = 0x18 + { + size, err := m.Data.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n1 + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Data.Size())) - n2, err := m.Data.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n2 - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Revision)) - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ControllerRevisionList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -277,37 +1055,46 @@ func (m *ControllerRevisionList) Marshal() (dAtA []byte, err error) { } func (m *ControllerRevisionList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ControllerRevisionList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n3, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DaemonSet) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -315,41 +1102,52 @@ func (m *DaemonSet) Marshal() (dAtA []byte, err error) { } func (m *DaemonSet) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DaemonSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n4, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n5, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n5 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n6, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n6 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DaemonSetCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -357,41 +1155,52 @@ func (m *DaemonSetCondition) Marshal() (dAtA []byte, err error) { } func (m *DaemonSetCondition) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DaemonSetCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n7, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x2a - i++ + i -= len(m.Message) + copy(dAtA[i:], m.Message) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DaemonSetList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -399,37 +1208,46 @@ func (m *DaemonSetList) Marshal() (dAtA []byte, err error) { } func (m *DaemonSetList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DaemonSetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n8, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n8 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DaemonSetSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -437,51 +1255,62 @@ func (m *DaemonSetSpec) Marshal() (dAtA []byte, err error) { } func (m *DaemonSetSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DaemonSetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Selector != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n9, err := m.Selector.MarshalTo(dAtA[i:]) + if m.RevisionHistoryLimit != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + i-- + dAtA[i] = 0x30 + } + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + i-- + dAtA[i] = 0x20 + { + size, err := m.UpdateStrategy.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n9 - } - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n10, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n10 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UpdateStrategy.Size())) - n11, err := m.UpdateStrategy.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n11 - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) - if m.RevisionHistoryLimit != nil { - dAtA[i] = 0x30 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + i-- + dAtA[i] = 0x12 + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *DaemonSetStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -489,58 +1318,65 @@ func (m *DaemonSetStatus) Marshal() (dAtA []byte, err error) { } func (m *DaemonSetStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DaemonSetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentNumberScheduled)) - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NumberMisscheduled)) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredNumberScheduled)) - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NumberReady)) - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) - dAtA[i] = 0x30 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedNumberScheduled)) - dAtA[i] = 0x38 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NumberAvailable)) - dAtA[i] = 0x40 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NumberUnavailable)) - if m.CollisionCount != nil { - dAtA[i] = 0x48 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) - } if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x52 } } - return i, nil + if m.CollisionCount != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) + i-- + dAtA[i] = 0x48 + } + i = encodeVarintGenerated(dAtA, i, uint64(m.NumberUnavailable)) + i-- + dAtA[i] = 0x40 + i = encodeVarintGenerated(dAtA, i, uint64(m.NumberAvailable)) + i-- + dAtA[i] = 0x38 + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedNumberScheduled)) + i-- + dAtA[i] = 0x30 + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.NumberReady)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredNumberScheduled)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.NumberMisscheduled)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentNumberScheduled)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *DaemonSetUpdateStrategy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -548,31 +1384,39 @@ func (m *DaemonSetUpdateStrategy) Marshal() (dAtA []byte, err error) { } func (m *DaemonSetUpdateStrategy) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DaemonSetUpdateStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) if m.RollingUpdate != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) - n12, err := m.RollingUpdate.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.RollingUpdate.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n12 + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *Deployment) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -580,91 +1424,115 @@ func (m *Deployment) Marshal() (dAtA []byte, err error) { } func (m *Deployment) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Deployment) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n13, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n13 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n14, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n14 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n15, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n15 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DeploymentCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *DeploymentCondition) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastUpdateTime.Size())) - n16, err := m.LastUpdateTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err +func (m *DeploymentCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n16 + i-- dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n17, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.LastUpdateTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n17 - return i, nil + i-- + dAtA[i] = 0x32 + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DeploymentList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -672,37 +1540,46 @@ func (m *DeploymentList) Marshal() (dAtA []byte, err error) { } func (m *DeploymentList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n18, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n18 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DeploymentSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -710,69 +1587,80 @@ func (m *DeploymentSpec) Marshal() (dAtA []byte, err error) { } func (m *DeploymentSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Replicas != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + if m.ProgressDeadlineSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.ProgressDeadlineSeconds)) + i-- + dAtA[i] = 0x48 } - if m.Selector != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n19, err := m.Selector.MarshalTo(dAtA[i:]) + i-- + if m.Paused { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 + if m.RevisionHistoryLimit != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + i-- + dAtA[i] = 0x30 + } + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + i-- + dAtA[i] = 0x28 + { + size, err := m.Strategy.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n19 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n20, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n20 + i-- dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Strategy.Size())) - n21, err := m.Strategy.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n21 - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) - if m.RevisionHistoryLimit != nil { - dAtA[i] = 0x30 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - dAtA[i] = 0x38 - i++ - if m.Paused { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + i-- + dAtA[i] = 0x1a + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } - i++ - if m.ProgressDeadlineSeconds != nil { - dAtA[i] = 0x48 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.ProgressDeadlineSeconds)) + if m.Replicas != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + i-- + dAtA[i] = 0x8 } - return i, nil + return len(dAtA) - i, nil } func (m *DeploymentStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -780,52 +1668,59 @@ func (m *DeploymentStatus) Marshal() (dAtA []byte, err error) { } func (m *DeploymentStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UnavailableReplicas)) + if m.CollisionCount != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) + i-- + dAtA[i] = 0x40 + } + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) + i-- + dAtA[i] = 0x38 if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x32 } } - dAtA[i] = 0x38 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) - if m.CollisionCount != nil { - dAtA[i] = 0x40 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) - } - return i, nil + i = encodeVarintGenerated(dAtA, i, uint64(m.UnavailableReplicas)) + i-- + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *DeploymentStrategy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -833,31 +1728,39 @@ func (m *DeploymentStrategy) Marshal() (dAtA []byte, err error) { } func (m *DeploymentStrategy) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) if m.RollingUpdate != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) - n22, err := m.RollingUpdate.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.RollingUpdate.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n22 + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ReplicaSet) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -865,41 +1768,52 @@ func (m *ReplicaSet) Marshal() (dAtA []byte, err error) { } func (m *ReplicaSet) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicaSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n23, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n23 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n24, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n24 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n25, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n25 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ReplicaSetCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -907,41 +1821,52 @@ func (m *ReplicaSetCondition) Marshal() (dAtA []byte, err error) { } func (m *ReplicaSetCondition) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicaSetCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n26, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n26 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x2a - i++ + i -= len(m.Message) + copy(dAtA[i:], m.Message) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ReplicaSetList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -949,37 +1874,46 @@ func (m *ReplicaSetList) Marshal() (dAtA []byte, err error) { } func (m *ReplicaSetList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicaSetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n27, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n27 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ReplicaSetSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -987,43 +1921,52 @@ func (m *ReplicaSetSpec) Marshal() (dAtA []byte, err error) { } func (m *ReplicaSetSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicaSetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Replicas != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) - } - if m.Selector != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n28, err := m.Selector.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + i-- + dAtA[i] = 0x20 + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n28 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n29, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } - i += n29 - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) - return i, nil + if m.Replicas != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil } func (m *ReplicaSetStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1031,44 +1974,51 @@ func (m *ReplicaSetStatus) Marshal() (dAtA []byte, err error) { } func (m *ReplicaSetStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicaSetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.FullyLabeledReplicas)) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) - if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x32 } } - return i, nil + i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) + i-- + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.FullyLabeledReplicas)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *RollingUpdateDaemonSet) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1076,27 +2026,34 @@ func (m *RollingUpdateDaemonSet) Marshal() (dAtA []byte, err error) { } func (m *RollingUpdateDaemonSet) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RollingUpdateDaemonSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if m.MaxUnavailable != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MaxUnavailable.Size())) - n30, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.MaxUnavailable.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n30 + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *RollingUpdateDeployment) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1104,37 +2061,46 @@ func (m *RollingUpdateDeployment) Marshal() (dAtA []byte, err error) { } func (m *RollingUpdateDeployment) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RollingUpdateDeployment) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.MaxUnavailable != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MaxUnavailable.Size())) - n31, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n31 - } if m.MaxSurge != nil { + { + size, err := m.MaxSurge.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MaxSurge.Size())) - n32, err := m.MaxSurge.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + } + if m.MaxUnavailable != nil { + { + size, err := m.MaxUnavailable.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n32 + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *RollingUpdateStatefulSetStrategy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1142,22 +2108,27 @@ func (m *RollingUpdateStatefulSetStrategy) Marshal() (dAtA []byte, err error) { } func (m *RollingUpdateStatefulSetStrategy) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RollingUpdateStatefulSetStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if m.Partition != nil { - dAtA[i] = 0x8 - i++ i = encodeVarintGenerated(dAtA, i, uint64(*m.Partition)) + i-- + dAtA[i] = 0x8 } - return i, nil + return len(dAtA) - i, nil } func (m *StatefulSet) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1165,41 +2136,52 @@ func (m *StatefulSet) Marshal() (dAtA []byte, err error) { } func (m *StatefulSet) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n33, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n33 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n34, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n34 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n35, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n35 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *StatefulSetCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1207,41 +2189,52 @@ func (m *StatefulSetCondition) Marshal() (dAtA []byte, err error) { } func (m *StatefulSetCondition) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSetCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n36, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n36 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x2a - i++ + i -= len(m.Message) + copy(dAtA[i:], m.Message) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *StatefulSetList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1249,37 +2242,46 @@ func (m *StatefulSetList) Marshal() (dAtA []byte, err error) { } func (m *StatefulSetList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n37, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n37 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *StatefulSetSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1287,73 +2289,88 @@ func (m *StatefulSetSpec) Marshal() (dAtA []byte, err error) { } func (m *StatefulSetSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Replicas != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + if m.RevisionHistoryLimit != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + i-- + dAtA[i] = 0x40 } - if m.Selector != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n38, err := m.Selector.MarshalTo(dAtA[i:]) + { + size, err := m.UpdateStrategy.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n38 - } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n39, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n39 + i-- + dAtA[i] = 0x3a + i -= len(m.PodManagementPolicy) + copy(dAtA[i:], m.PodManagementPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodManagementPolicy))) + i-- + dAtA[i] = 0x32 + i -= len(m.ServiceName) + copy(dAtA[i:], m.ServiceName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceName))) + i-- + dAtA[i] = 0x2a if len(m.VolumeClaimTemplates) > 0 { - for _, msg := range m.VolumeClaimTemplates { + for iNdEx := len(m.VolumeClaimTemplates) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.VolumeClaimTemplates[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + } + } + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 } - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceName))) - i += copy(dAtA[i:], m.ServiceName) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodManagementPolicy))) - i += copy(dAtA[i:], m.PodManagementPolicy) - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UpdateStrategy.Size())) - n40, err := m.UpdateStrategy.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n40 - if m.RevisionHistoryLimit != nil { - dAtA[i] = 0x40 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + if m.Replicas != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + i-- + dAtA[i] = 0x8 } - return i, nil + return len(dAtA) - i, nil } func (m *StatefulSetStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1361,57 +2378,66 @@ func (m *StatefulSetStatus) Marshal() (dAtA []byte, err error) { } func (m *StatefulSetStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentReplicas)) - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.CurrentRevision))) - i += copy(dAtA[i:], m.CurrentRevision) - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UpdateRevision))) - i += copy(dAtA[i:], m.UpdateRevision) - if m.CollisionCount != nil { - dAtA[i] = 0x48 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) - } if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x52 } } - return i, nil + if m.CollisionCount != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) + i-- + dAtA[i] = 0x48 + } + i -= len(m.UpdateRevision) + copy(dAtA[i:], m.UpdateRevision) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UpdateRevision))) + i-- + dAtA[i] = 0x3a + i -= len(m.CurrentRevision) + copy(dAtA[i:], m.CurrentRevision) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CurrentRevision))) + i-- + dAtA[i] = 0x32 + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) + i-- + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentReplicas)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *StatefulSetUpdateStrategy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1419,55 +2445,50 @@ func (m *StatefulSetUpdateStrategy) Marshal() (dAtA []byte, err error) { } func (m *StatefulSetUpdateStrategy) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSetUpdateStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) if m.RollingUpdate != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) - n41, err := m.RollingUpdate.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.RollingUpdate.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n41 + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *ControllerRevision) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -1479,6 +2500,9 @@ func (m *ControllerRevision) Size() (n int) { } func (m *ControllerRevisionList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -1493,6 +2517,9 @@ func (m *ControllerRevisionList) Size() (n int) { } func (m *DaemonSet) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -1505,6 +2532,9 @@ func (m *DaemonSet) Size() (n int) { } func (m *DaemonSetCondition) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1521,6 +2551,9 @@ func (m *DaemonSetCondition) Size() (n int) { } func (m *DaemonSetList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -1535,6 +2568,9 @@ func (m *DaemonSetList) Size() (n int) { } func (m *DaemonSetSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Selector != nil { @@ -1553,6 +2589,9 @@ func (m *DaemonSetSpec) Size() (n int) { } func (m *DaemonSetStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.CurrentNumberScheduled)) @@ -1576,6 +2615,9 @@ func (m *DaemonSetStatus) Size() (n int) { } func (m *DaemonSetUpdateStrategy) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1588,6 +2630,9 @@ func (m *DaemonSetUpdateStrategy) Size() (n int) { } func (m *Deployment) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -1600,6 +2645,9 @@ func (m *Deployment) Size() (n int) { } func (m *DeploymentCondition) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1618,6 +2666,9 @@ func (m *DeploymentCondition) Size() (n int) { } func (m *DeploymentList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -1632,6 +2683,9 @@ func (m *DeploymentList) Size() (n int) { } func (m *DeploymentSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Replicas != nil { @@ -1657,6 +2711,9 @@ func (m *DeploymentSpec) Size() (n int) { } func (m *DeploymentStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.ObservedGeneration)) @@ -1678,6 +2735,9 @@ func (m *DeploymentStatus) Size() (n int) { } func (m *DeploymentStrategy) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1690,6 +2750,9 @@ func (m *DeploymentStrategy) Size() (n int) { } func (m *ReplicaSet) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -1702,6 +2765,9 @@ func (m *ReplicaSet) Size() (n int) { } func (m *ReplicaSetCondition) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1718,6 +2784,9 @@ func (m *ReplicaSetCondition) Size() (n int) { } func (m *ReplicaSetList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -1732,6 +2801,9 @@ func (m *ReplicaSetList) Size() (n int) { } func (m *ReplicaSetSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Replicas != nil { @@ -1748,6 +2820,9 @@ func (m *ReplicaSetSpec) Size() (n int) { } func (m *ReplicaSetStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.Replicas)) @@ -1765,6 +2840,9 @@ func (m *ReplicaSetStatus) Size() (n int) { } func (m *RollingUpdateDaemonSet) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.MaxUnavailable != nil { @@ -1775,6 +2853,9 @@ func (m *RollingUpdateDaemonSet) Size() (n int) { } func (m *RollingUpdateDeployment) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.MaxUnavailable != nil { @@ -1789,6 +2870,9 @@ func (m *RollingUpdateDeployment) Size() (n int) { } func (m *RollingUpdateStatefulSetStrategy) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Partition != nil { @@ -1798,6 +2882,9 @@ func (m *RollingUpdateStatefulSetStrategy) Size() (n int) { } func (m *StatefulSet) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -1810,6 +2897,9 @@ func (m *StatefulSet) Size() (n int) { } func (m *StatefulSetCondition) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1826,6 +2916,9 @@ func (m *StatefulSetCondition) Size() (n int) { } func (m *StatefulSetList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -1840,6 +2933,9 @@ func (m *StatefulSetList) Size() (n int) { } func (m *StatefulSetSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Replicas != nil { @@ -1870,6 +2966,9 @@ func (m *StatefulSetSpec) Size() (n int) { } func (m *StatefulSetStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.ObservedGeneration)) @@ -1894,6 +2993,9 @@ func (m *StatefulSetStatus) Size() (n int) { } func (m *StatefulSetUpdateStrategy) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1906,14 +3008,7 @@ func (m *StatefulSetUpdateStrategy) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -1923,8 +3018,8 @@ func (this *ControllerRevision) String() string { return "nil" } s := strings.Join([]string{`&ControllerRevision{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Data:` + strings.Replace(strings.Replace(this.Data.String(), "RawExtension", "k8s_io_apimachinery_pkg_runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Data:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Data), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, `Revision:` + fmt.Sprintf("%v", this.Revision) + `,`, `}`, }, "") @@ -1934,9 +3029,14 @@ func (this *ControllerRevisionList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]ControllerRevision{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ControllerRevision", "ControllerRevision", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&ControllerRevisionList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ControllerRevision", "ControllerRevision", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -1946,7 +3046,7 @@ func (this *DaemonSet) String() string { return "nil" } s := strings.Join([]string{`&DaemonSet{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DaemonSetSpec", "DaemonSetSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DaemonSetStatus", "DaemonSetStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -1960,7 +3060,7 @@ func (this *DaemonSetCondition) String() string { s := strings.Join([]string{`&DaemonSetCondition{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `}`, @@ -1971,9 +3071,14 @@ func (this *DaemonSetList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]DaemonSet{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "DaemonSet", "DaemonSet", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&DaemonSetList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "DaemonSet", "DaemonSet", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -1983,8 +3088,8 @@ func (this *DaemonSetSpec) String() string { return "nil" } s := strings.Join([]string{`&DaemonSetSpec{`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, `UpdateStrategy:` + strings.Replace(strings.Replace(this.UpdateStrategy.String(), "DaemonSetUpdateStrategy", "DaemonSetUpdateStrategy", 1), `&`, ``, 1) + `,`, `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, @@ -1996,6 +3101,11 @@ func (this *DaemonSetStatus) String() string { if this == nil { return "nil" } + repeatedStringForConditions := "[]DaemonSetCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "DaemonSetCondition", "DaemonSetCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" s := strings.Join([]string{`&DaemonSetStatus{`, `CurrentNumberScheduled:` + fmt.Sprintf("%v", this.CurrentNumberScheduled) + `,`, `NumberMisscheduled:` + fmt.Sprintf("%v", this.NumberMisscheduled) + `,`, @@ -2006,7 +3116,7 @@ func (this *DaemonSetStatus) String() string { `NumberAvailable:` + fmt.Sprintf("%v", this.NumberAvailable) + `,`, `NumberUnavailable:` + fmt.Sprintf("%v", this.NumberUnavailable) + `,`, `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "DaemonSetCondition", "DaemonSetCondition", 1), `&`, ``, 1) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, `}`, }, "") return s @@ -2017,7 +3127,7 @@ func (this *DaemonSetUpdateStrategy) String() string { } s := strings.Join([]string{`&DaemonSetUpdateStrategy{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `RollingUpdate:` + strings.Replace(fmt.Sprintf("%v", this.RollingUpdate), "RollingUpdateDaemonSet", "RollingUpdateDaemonSet", 1) + `,`, + `RollingUpdate:` + strings.Replace(this.RollingUpdate.String(), "RollingUpdateDaemonSet", "RollingUpdateDaemonSet", 1) + `,`, `}`, }, "") return s @@ -2027,7 +3137,7 @@ func (this *Deployment) String() string { return "nil" } s := strings.Join([]string{`&Deployment{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DeploymentSpec", "DeploymentSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DeploymentStatus", "DeploymentStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -2043,8 +3153,8 @@ func (this *DeploymentCondition) String() string { `Status:` + fmt.Sprintf("%v", this.Status) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `LastUpdateTime:` + strings.Replace(strings.Replace(this.LastUpdateTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastUpdateTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastUpdateTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -2053,9 +3163,14 @@ func (this *DeploymentList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]Deployment{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Deployment", "Deployment", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&DeploymentList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Deployment", "Deployment", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -2066,8 +3181,8 @@ func (this *DeploymentSpec) String() string { } s := strings.Join([]string{`&DeploymentSpec{`, `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, `Strategy:` + strings.Replace(strings.Replace(this.Strategy.String(), "DeploymentStrategy", "DeploymentStrategy", 1), `&`, ``, 1) + `,`, `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, @@ -2081,13 +3196,18 @@ func (this *DeploymentStatus) String() string { if this == nil { return "nil" } + repeatedStringForConditions := "[]DeploymentCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "DeploymentCondition", "DeploymentCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" s := strings.Join([]string{`&DeploymentStatus{`, `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, `UpdatedReplicas:` + fmt.Sprintf("%v", this.UpdatedReplicas) + `,`, `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, `UnavailableReplicas:` + fmt.Sprintf("%v", this.UnavailableReplicas) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "DeploymentCondition", "DeploymentCondition", 1), `&`, ``, 1) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, `}`, @@ -2100,7 +3220,7 @@ func (this *DeploymentStrategy) String() string { } s := strings.Join([]string{`&DeploymentStrategy{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `RollingUpdate:` + strings.Replace(fmt.Sprintf("%v", this.RollingUpdate), "RollingUpdateDeployment", "RollingUpdateDeployment", 1) + `,`, + `RollingUpdate:` + strings.Replace(this.RollingUpdate.String(), "RollingUpdateDeployment", "RollingUpdateDeployment", 1) + `,`, `}`, }, "") return s @@ -2110,7 +3230,7 @@ func (this *ReplicaSet) String() string { return "nil" } s := strings.Join([]string{`&ReplicaSet{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ReplicaSetSpec", "ReplicaSetSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ReplicaSetStatus", "ReplicaSetStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -2124,7 +3244,7 @@ func (this *ReplicaSetCondition) String() string { s := strings.Join([]string{`&ReplicaSetCondition{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `}`, @@ -2135,9 +3255,14 @@ func (this *ReplicaSetList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]ReplicaSet{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ReplicaSet", "ReplicaSet", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&ReplicaSetList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ReplicaSet", "ReplicaSet", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -2148,8 +3273,8 @@ func (this *ReplicaSetSpec) String() string { } s := strings.Join([]string{`&ReplicaSetSpec{`, `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, `}`, }, "") @@ -2159,13 +3284,18 @@ func (this *ReplicaSetStatus) String() string { if this == nil { return "nil" } + repeatedStringForConditions := "[]ReplicaSetCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "ReplicaSetCondition", "ReplicaSetCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" s := strings.Join([]string{`&ReplicaSetStatus{`, `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, `FullyLabeledReplicas:` + fmt.Sprintf("%v", this.FullyLabeledReplicas) + `,`, `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "ReplicaSetCondition", "ReplicaSetCondition", 1), `&`, ``, 1) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, `}`, }, "") return s @@ -2175,7 +3305,7 @@ func (this *RollingUpdateDaemonSet) String() string { return "nil" } s := strings.Join([]string{`&RollingUpdateDaemonSet{`, - `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, + `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`, `}`, }, "") return s @@ -2185,8 +3315,8 @@ func (this *RollingUpdateDeployment) String() string { return "nil" } s := strings.Join([]string{`&RollingUpdateDeployment{`, - `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, - `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, + `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`, + `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "intstr.IntOrString", 1) + `,`, `}`, }, "") return s @@ -2206,7 +3336,7 @@ func (this *StatefulSet) String() string { return "nil" } s := strings.Join([]string{`&StatefulSet{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "StatefulSetSpec", "StatefulSetSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "StatefulSetStatus", "StatefulSetStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -2220,7 +3350,7 @@ func (this *StatefulSetCondition) String() string { s := strings.Join([]string{`&StatefulSetCondition{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `}`, @@ -2231,9 +3361,14 @@ func (this *StatefulSetList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]StatefulSet{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "StatefulSet", "StatefulSet", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&StatefulSetList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "StatefulSet", "StatefulSet", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -2242,11 +3377,16 @@ func (this *StatefulSetSpec) String() string { if this == nil { return "nil" } + repeatedStringForVolumeClaimTemplates := "[]PersistentVolumeClaim{" + for _, f := range this.VolumeClaimTemplates { + repeatedStringForVolumeClaimTemplates += fmt.Sprintf("%v", f) + "," + } + repeatedStringForVolumeClaimTemplates += "}" s := strings.Join([]string{`&StatefulSetSpec{`, `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, - `VolumeClaimTemplates:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.VolumeClaimTemplates), "PersistentVolumeClaim", "k8s_io_api_core_v1.PersistentVolumeClaim", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `VolumeClaimTemplates:` + repeatedStringForVolumeClaimTemplates + `,`, `ServiceName:` + fmt.Sprintf("%v", this.ServiceName) + `,`, `PodManagementPolicy:` + fmt.Sprintf("%v", this.PodManagementPolicy) + `,`, `UpdateStrategy:` + strings.Replace(strings.Replace(this.UpdateStrategy.String(), "StatefulSetUpdateStrategy", "StatefulSetUpdateStrategy", 1), `&`, ``, 1) + `,`, @@ -2259,6 +3399,11 @@ func (this *StatefulSetStatus) String() string { if this == nil { return "nil" } + repeatedStringForConditions := "[]StatefulSetCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "StatefulSetCondition", "StatefulSetCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" s := strings.Join([]string{`&StatefulSetStatus{`, `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, @@ -2268,7 +3413,7 @@ func (this *StatefulSetStatus) String() string { `CurrentRevision:` + fmt.Sprintf("%v", this.CurrentRevision) + `,`, `UpdateRevision:` + fmt.Sprintf("%v", this.UpdateRevision) + `,`, `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "StatefulSetCondition", "StatefulSetCondition", 1), `&`, ``, 1) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, `}`, }, "") return s @@ -2279,7 +3424,7 @@ func (this *StatefulSetUpdateStrategy) String() string { } s := strings.Join([]string{`&StatefulSetUpdateStrategy{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `RollingUpdate:` + strings.Replace(fmt.Sprintf("%v", this.RollingUpdate), "RollingUpdateStatefulSetStrategy", "RollingUpdateStatefulSetStrategy", 1) + `,`, + `RollingUpdate:` + strings.Replace(this.RollingUpdate.String(), "RollingUpdateStatefulSetStrategy", "RollingUpdateStatefulSetStrategy", 1) + `,`, `}`, }, "") return s @@ -2307,7 +3452,7 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2335,7 +3480,7 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2344,6 +3489,9 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2365,7 +3513,7 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2374,6 +3522,9 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2395,7 +3546,7 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Revision |= (int64(b) & 0x7F) << shift + m.Revision |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -2409,6 +3560,9 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2436,7 +3590,7 @@ func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2464,7 +3618,7 @@ func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2473,6 +3627,9 @@ func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2494,7 +3651,7 @@ func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2503,6 +3660,9 @@ func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2520,6 +3680,9 @@ func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2547,7 +3710,7 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2575,7 +3738,7 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2584,6 +3747,9 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2605,7 +3771,7 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2614,6 +3780,9 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2635,7 +3804,7 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2644,6 +3813,9 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2660,6 +3832,9 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2687,7 +3862,7 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2715,7 +3890,7 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2725,6 +3900,9 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2744,7 +3922,7 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2754,6 +3932,9 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2773,7 +3954,7 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2782,6 +3963,9 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2803,7 +3987,7 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2813,6 +3997,9 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2832,7 +4019,7 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2842,6 +4029,9 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2856,6 +4046,9 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2883,7 +4076,7 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2911,7 +4104,7 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2920,6 +4113,9 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2941,7 +4137,7 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2950,6 +4146,9 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2967,6 +4166,9 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2994,7 +4196,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3022,7 +4224,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3031,11 +4233,14 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3055,7 +4260,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3064,6 +4269,9 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3085,7 +4293,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3094,6 +4302,9 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3115,7 +4326,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MinReadySeconds |= (int32(b) & 0x7F) << shift + m.MinReadySeconds |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3134,7 +4345,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3149,6 +4360,9 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3176,7 +4390,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3204,7 +4418,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.CurrentNumberScheduled |= (int32(b) & 0x7F) << shift + m.CurrentNumberScheduled |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3223,7 +4437,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.NumberMisscheduled |= (int32(b) & 0x7F) << shift + m.NumberMisscheduled |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3242,7 +4456,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.DesiredNumberScheduled |= (int32(b) & 0x7F) << shift + m.DesiredNumberScheduled |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3261,7 +4475,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.NumberReady |= (int32(b) & 0x7F) << shift + m.NumberReady |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3280,7 +4494,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ObservedGeneration |= (int64(b) & 0x7F) << shift + m.ObservedGeneration |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -3299,7 +4513,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.UpdatedNumberScheduled |= (int32(b) & 0x7F) << shift + m.UpdatedNumberScheduled |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3318,7 +4532,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.NumberAvailable |= (int32(b) & 0x7F) << shift + m.NumberAvailable |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3337,7 +4551,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.NumberUnavailable |= (int32(b) & 0x7F) << shift + m.NumberUnavailable |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3356,7 +4570,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3376,7 +4590,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3385,6 +4599,9 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3402,6 +4619,9 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3429,7 +4649,7 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3457,7 +4677,7 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3467,6 +4687,9 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3486,7 +4709,7 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3495,6 +4718,9 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3514,6 +4740,9 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3541,7 +4770,7 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3569,7 +4798,7 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3578,6 +4807,9 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3599,7 +4831,7 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3608,6 +4840,9 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3629,7 +4864,7 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3638,6 +4873,9 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3654,6 +4892,9 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3681,7 +4922,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3709,7 +4950,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3719,6 +4960,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3738,7 +4982,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3748,6 +4992,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3767,7 +5014,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3777,6 +5024,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3796,7 +5046,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3806,6 +5056,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3825,7 +5078,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3834,6 +5087,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3855,7 +5111,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3864,6 +5120,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3880,6 +5139,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3907,7 +5169,7 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3935,7 +5197,7 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3944,6 +5206,9 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3965,7 +5230,7 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3974,6 +5239,9 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3991,6 +5259,9 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4018,7 +5289,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4046,7 +5317,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4066,7 +5337,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4075,11 +5346,14 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -4099,7 +5373,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4108,6 +5382,9 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4129,7 +5406,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4138,6 +5415,9 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4159,7 +5439,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MinReadySeconds |= (int32(b) & 0x7F) << shift + m.MinReadySeconds |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4178,7 +5458,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4198,7 +5478,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4218,7 +5498,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4233,6 +5513,9 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4260,7 +5543,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4288,7 +5571,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ObservedGeneration |= (int64(b) & 0x7F) << shift + m.ObservedGeneration |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -4307,7 +5590,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift + m.Replicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4326,7 +5609,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.UpdatedReplicas |= (int32(b) & 0x7F) << shift + m.UpdatedReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4345,7 +5628,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.AvailableReplicas |= (int32(b) & 0x7F) << shift + m.AvailableReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4364,7 +5647,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.UnavailableReplicas |= (int32(b) & 0x7F) << shift + m.UnavailableReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4383,7 +5666,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4392,6 +5675,9 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4414,7 +5700,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ReadyReplicas |= (int32(b) & 0x7F) << shift + m.ReadyReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4433,7 +5719,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4448,6 +5734,9 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4475,7 +5764,7 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4503,7 +5792,7 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4513,6 +5802,9 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4532,7 +5824,7 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4541,6 +5833,9 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4560,6 +5855,9 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4587,7 +5885,7 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4615,7 +5913,7 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4624,6 +5922,9 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4645,7 +5946,7 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4654,6 +5955,9 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4675,7 +5979,7 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4684,6 +5988,9 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4700,6 +6007,9 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4727,7 +6037,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4755,7 +6065,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4765,6 +6075,9 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4784,7 +6097,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4794,6 +6107,9 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4813,7 +6129,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4822,6 +6138,9 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4843,7 +6162,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4853,6 +6172,9 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4872,7 +6194,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4882,6 +6204,9 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4896,6 +6221,9 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4923,7 +6251,7 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4951,7 +6279,7 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4960,6 +6288,9 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4981,7 +6312,7 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4990,6 +6321,9 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5007,6 +6341,9 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5034,7 +6371,7 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5062,7 +6399,7 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5082,7 +6419,7 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5091,11 +6428,14 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -5115,7 +6455,7 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5124,6 +6464,9 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5145,7 +6488,7 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MinReadySeconds |= (int32(b) & 0x7F) << shift + m.MinReadySeconds |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5159,6 +6502,9 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5186,7 +6532,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5214,7 +6560,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift + m.Replicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5233,7 +6579,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.FullyLabeledReplicas |= (int32(b) & 0x7F) << shift + m.FullyLabeledReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5252,7 +6598,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ObservedGeneration |= (int64(b) & 0x7F) << shift + m.ObservedGeneration |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -5271,7 +6617,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ReadyReplicas |= (int32(b) & 0x7F) << shift + m.ReadyReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5290,7 +6636,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.AvailableReplicas |= (int32(b) & 0x7F) << shift + m.AvailableReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5309,7 +6655,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5318,6 +6664,9 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5335,6 +6684,9 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5362,7 +6714,7 @@ func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5390,7 +6742,7 @@ func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5399,11 +6751,14 @@ func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.MaxUnavailable == nil { - m.MaxUnavailable = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + m.MaxUnavailable = &intstr.IntOrString{} } if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -5418,6 +6773,9 @@ func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5445,7 +6803,7 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5473,7 +6831,7 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5482,11 +6840,14 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.MaxUnavailable == nil { - m.MaxUnavailable = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + m.MaxUnavailable = &intstr.IntOrString{} } if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -5506,7 +6867,7 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5515,11 +6876,14 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.MaxSurge == nil { - m.MaxSurge = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + m.MaxSurge = &intstr.IntOrString{} } if err := m.MaxSurge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -5534,6 +6898,9 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5561,7 +6928,7 @@ func (m *RollingUpdateStatefulSetStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5589,7 +6956,7 @@ func (m *RollingUpdateStatefulSetStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5604,6 +6971,9 @@ func (m *RollingUpdateStatefulSetStrategy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5631,7 +7001,7 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5659,7 +7029,7 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5668,6 +7038,9 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5689,7 +7062,7 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5698,6 +7071,9 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5719,7 +7095,7 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5728,6 +7104,9 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5744,6 +7123,9 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5771,7 +7153,7 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5799,7 +7181,7 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5809,6 +7191,9 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5828,7 +7213,7 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5838,6 +7223,9 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5857,7 +7245,7 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5866,6 +7254,9 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5887,7 +7278,7 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5897,6 +7288,9 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5916,7 +7310,7 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5926,6 +7320,9 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5940,6 +7337,9 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5967,7 +7367,7 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5995,7 +7395,7 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6004,6 +7404,9 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6025,7 +7428,7 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6034,6 +7437,9 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6051,6 +7457,9 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6078,7 +7487,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6106,7 +7515,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -6126,7 +7535,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6135,11 +7544,14 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -6159,7 +7571,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6168,6 +7580,9 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6189,7 +7604,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6198,10 +7613,13 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.VolumeClaimTemplates = append(m.VolumeClaimTemplates, k8s_io_api_core_v1.PersistentVolumeClaim{}) + m.VolumeClaimTemplates = append(m.VolumeClaimTemplates, v11.PersistentVolumeClaim{}) if err := m.VolumeClaimTemplates[len(m.VolumeClaimTemplates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -6220,7 +7638,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6230,6 +7648,9 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6249,7 +7670,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6259,6 +7680,9 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6278,7 +7702,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6287,6 +7711,9 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6308,7 +7735,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -6323,6 +7750,9 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6350,7 +7780,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6378,7 +7808,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ObservedGeneration |= (int64(b) & 0x7F) << shift + m.ObservedGeneration |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -6397,7 +7827,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift + m.Replicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -6416,7 +7846,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ReadyReplicas |= (int32(b) & 0x7F) << shift + m.ReadyReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -6435,7 +7865,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.CurrentReplicas |= (int32(b) & 0x7F) << shift + m.CurrentReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -6454,7 +7884,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.UpdatedReplicas |= (int32(b) & 0x7F) << shift + m.UpdatedReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -6473,7 +7903,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6483,6 +7913,9 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6502,7 +7935,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6512,6 +7945,9 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6531,7 +7967,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -6551,7 +7987,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6560,6 +7996,9 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6577,6 +8016,9 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6604,7 +8046,7 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6632,7 +8074,7 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6642,6 +8084,9 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6661,7 +8106,7 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6670,6 +8115,9 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6689,6 +8137,9 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6755,10 +8206,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -6787,6 +8241,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -6805,139 +8262,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/apps/v1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 2037 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0xcd, 0x6f, 0x24, 0x47, - 0x1d, 0x75, 0xcf, 0x87, 0x3d, 0x2e, 0xaf, 0xed, 0xdd, 0xb2, 0xb1, 0x27, 0xbb, 0x64, 0x66, 0x19, - 0x60, 0xe3, 0x64, 0xb3, 0x3d, 0xec, 0x66, 0x13, 0xa1, 0x2c, 0x02, 0x79, 0xc6, 0x21, 0x84, 0x78, - 0x6c, 0x53, 0x5e, 0xef, 0x61, 0x09, 0x12, 0xe5, 0xe9, 0xda, 0x71, 0xc7, 0xfd, 0xa5, 0xee, 0xea, - 0x61, 0x47, 0x5c, 0x10, 0x12, 0x9c, 0x38, 0xf0, 0x9f, 0x20, 0x84, 0xe0, 0x86, 0x22, 0xc4, 0x65, - 0x2f, 0x48, 0x11, 0x17, 0x72, 0xb2, 0xd8, 0xc9, 0x09, 0xa1, 0x1c, 0xb9, 0xe4, 0x02, 0xaa, 0xea, - 0xea, 0xef, 0x6a, 0xcf, 0xd8, 0x9b, 0x75, 0x50, 0x94, 0x9b, 0xa7, 0xea, 0xfd, 0x5e, 0xff, 0xaa, - 0xea, 0x57, 0xf5, 0x5e, 0x57, 0x1b, 0xdc, 0x3b, 0xfe, 0xb6, 0xa7, 0xea, 0x76, 0xfb, 0xd8, 0x3f, - 0x24, 0xae, 0x45, 0x28, 0xf1, 0xda, 0x43, 0x62, 0x69, 0xb6, 0xdb, 0x16, 0x1d, 0xd8, 0xd1, 0xdb, - 0xd8, 0x71, 0xbc, 0xf6, 0xf0, 0x76, 0x7b, 0x40, 0x2c, 0xe2, 0x62, 0x4a, 0x34, 0xd5, 0x71, 0x6d, - 0x6a, 0x43, 0x18, 0x60, 0x54, 0xec, 0xe8, 0x2a, 0xc3, 0xa8, 0xc3, 0xdb, 0x57, 0x6f, 0x0d, 0x74, - 0x7a, 0xe4, 0x1f, 0xaa, 0x7d, 0xdb, 0x6c, 0x0f, 0xec, 0x81, 0xdd, 0xe6, 0xd0, 0x43, 0xff, 0x11, - 0xff, 0xc5, 0x7f, 0xf0, 0xbf, 0x02, 0x8a, 0xab, 0xad, 0xc4, 0x63, 0xfa, 0xb6, 0x4b, 0x24, 0x8f, - 0xb9, 0x7a, 0x37, 0xc6, 0x98, 0xb8, 0x7f, 0xa4, 0x5b, 0xc4, 0x1d, 0xb5, 0x9d, 0xe3, 0x01, 0x6b, - 0xf0, 0xda, 0x26, 0xa1, 0x58, 0x16, 0xd5, 0x2e, 0x8a, 0x72, 0x7d, 0x8b, 0xea, 0x26, 0xc9, 0x05, - 0xbc, 0x31, 0x29, 0xc0, 0xeb, 0x1f, 0x11, 0x13, 0xe7, 0xe2, 0x5e, 0x2b, 0x8a, 0xf3, 0xa9, 0x6e, - 0xb4, 0x75, 0x8b, 0x7a, 0xd4, 0xcd, 0x06, 0xb5, 0xfe, 0xa3, 0x00, 0xd8, 0xb5, 0x2d, 0xea, 0xda, - 0x86, 0x41, 0x5c, 0x44, 0x86, 0xba, 0xa7, 0xdb, 0x16, 0xfc, 0x29, 0xa8, 0xb1, 0xf1, 0x68, 0x98, - 0xe2, 0xba, 0x72, 0x5d, 0xd9, 0x58, 0xb8, 0xf3, 0x2d, 0x35, 0x9e, 0xe4, 0x88, 0x5e, 0x75, 0x8e, - 0x07, 0xac, 0xc1, 0x53, 0x19, 0x5a, 0x1d, 0xde, 0x56, 0x77, 0x0f, 0xdf, 0x27, 0x7d, 0xda, 0x23, - 0x14, 0x77, 0xe0, 0x93, 0x93, 0xe6, 0xcc, 0xf8, 0xa4, 0x09, 0xe2, 0x36, 0x14, 0xb1, 0xc2, 0x5d, - 0x50, 0xe1, 0xec, 0x25, 0xce, 0x7e, 0xab, 0x90, 0x5d, 0x0c, 0x5a, 0x45, 0xf8, 0x67, 0x6f, 0x3d, - 0xa6, 0xc4, 0x62, 0xe9, 0x75, 0x2e, 0x09, 0xea, 0xca, 0x16, 0xa6, 0x18, 0x71, 0x22, 0xf8, 0x2a, - 0xa8, 0xb9, 0x22, 0xfd, 0x7a, 0xf9, 0xba, 0xb2, 0x51, 0xee, 0x5c, 0x16, 0xa8, 0x5a, 0x38, 0x2c, - 0x14, 0x21, 0x5a, 0x7f, 0x55, 0xc0, 0x5a, 0x7e, 0xdc, 0xdb, 0xba, 0x47, 0xe1, 0x7b, 0xb9, 0xb1, - 0xab, 0xd3, 0x8d, 0x9d, 0x45, 0xf3, 0x91, 0x47, 0x0f, 0x0e, 0x5b, 0x12, 0xe3, 0x7e, 0x17, 0x54, - 0x75, 0x4a, 0x4c, 0xaf, 0x5e, 0xba, 0x5e, 0xde, 0x58, 0xb8, 0x73, 0x43, 0xcd, 0xd7, 0xae, 0x9a, - 0x4f, 0xac, 0xb3, 0x28, 0x28, 0xab, 0xef, 0xb0, 0x60, 0x14, 0x70, 0xb4, 0xfe, 0xab, 0x80, 0xf9, - 0x2d, 0x4c, 0x4c, 0xdb, 0xda, 0x27, 0xf4, 0x02, 0x16, 0xad, 0x0b, 0x2a, 0x9e, 0x43, 0xfa, 0x62, - 0xd1, 0xbe, 0x26, 0xcb, 0x3d, 0x4a, 0x67, 0xdf, 0x21, 0xfd, 0x78, 0xa1, 0xd8, 0x2f, 0xc4, 0x83, - 0xe1, 0xbb, 0x60, 0xd6, 0xa3, 0x98, 0xfa, 0x1e, 0x5f, 0xa6, 0x85, 0x3b, 0x5f, 0x3f, 0x9d, 0x86, - 0x43, 0x3b, 0x4b, 0x82, 0x68, 0x36, 0xf8, 0x8d, 0x04, 0x45, 0xeb, 0x5f, 0x25, 0x00, 0x23, 0x6c, - 0xd7, 0xb6, 0x34, 0x9d, 0xb2, 0xfa, 0x7d, 0x13, 0x54, 0xe8, 0xc8, 0x21, 0x7c, 0x1a, 0xe6, 0x3b, - 0x37, 0xc2, 0x2c, 0xee, 0x8f, 0x1c, 0xf2, 0xe9, 0x49, 0x73, 0x2d, 0x1f, 0xc1, 0x7a, 0x10, 0x8f, - 0x81, 0xdb, 0x51, 0x7e, 0x25, 0x1e, 0x7d, 0x37, 0xfd, 0xe8, 0x4f, 0x4f, 0x9a, 0x92, 0xc3, 0x42, - 0x8d, 0x98, 0xd2, 0x09, 0xc2, 0x21, 0x80, 0x06, 0xf6, 0xe8, 0x7d, 0x17, 0x5b, 0x5e, 0xf0, 0x24, - 0xdd, 0x24, 0x62, 0xe4, 0xaf, 0x4c, 0xb7, 0x3c, 0x2c, 0xa2, 0x73, 0x55, 0x64, 0x01, 0xb7, 0x73, - 0x6c, 0x48, 0xf2, 0x04, 0x78, 0x03, 0xcc, 0xba, 0x04, 0x7b, 0xb6, 0x55, 0xaf, 0xf0, 0x51, 0x44, - 0x13, 0x88, 0x78, 0x2b, 0x12, 0xbd, 0xf0, 0x65, 0x30, 0x67, 0x12, 0xcf, 0xc3, 0x03, 0x52, 0xaf, - 0x72, 0xe0, 0xb2, 0x00, 0xce, 0xf5, 0x82, 0x66, 0x14, 0xf6, 0xb7, 0x7e, 0xaf, 0x80, 0xc5, 0x68, - 0xe6, 0x2e, 0x60, 0xab, 0x74, 0xd2, 0x5b, 0xe5, 0xc5, 0x53, 0xeb, 0xa4, 0x60, 0x87, 0x7c, 0x50, - 0x4e, 0xe4, 0xcc, 0x8a, 0x10, 0xfe, 0x04, 0xd4, 0x3c, 0x62, 0x90, 0x3e, 0xb5, 0x5d, 0x91, 0xf3, - 0x6b, 0x53, 0xe6, 0x8c, 0x0f, 0x89, 0xb1, 0x2f, 0x42, 0x3b, 0x97, 0x58, 0xd2, 0xe1, 0x2f, 0x14, - 0x51, 0xc2, 0x1f, 0x81, 0x1a, 0x25, 0xa6, 0x63, 0x60, 0x4a, 0xc4, 0x36, 0x49, 0xd5, 0x37, 0x2b, - 0x17, 0x46, 0xb6, 0x67, 0x6b, 0xf7, 0x05, 0x8c, 0x6f, 0x94, 0x68, 0x1e, 0xc2, 0x56, 0x14, 0xd1, - 0xc0, 0x63, 0xb0, 0xe4, 0x3b, 0x1a, 0x43, 0x52, 0x76, 0x74, 0x0f, 0x46, 0xa2, 0x7c, 0x6e, 0x9e, - 0x3a, 0x21, 0x07, 0xa9, 0x90, 0xce, 0x9a, 0x78, 0xc0, 0x52, 0xba, 0x1d, 0x65, 0xa8, 0xe1, 0x26, - 0x58, 0x36, 0x75, 0x0b, 0x11, 0xac, 0x8d, 0xf6, 0x49, 0xdf, 0xb6, 0x34, 0x8f, 0x17, 0x50, 0xb5, - 0xb3, 0x2e, 0x08, 0x96, 0x7b, 0xe9, 0x6e, 0x94, 0xc5, 0xc3, 0x6d, 0xb0, 0x1a, 0x9e, 0xb3, 0x3f, - 0xd0, 0x3d, 0x6a, 0xbb, 0xa3, 0x6d, 0xdd, 0xd4, 0x69, 0x7d, 0x96, 0xf3, 0xd4, 0xc7, 0x27, 0xcd, - 0x55, 0x24, 0xe9, 0x47, 0xd2, 0xa8, 0xd6, 0x6f, 0x66, 0xc1, 0x72, 0xe6, 0x34, 0x80, 0x0f, 0xc0, - 0x5a, 0xdf, 0x77, 0x5d, 0x62, 0xd1, 0x1d, 0xdf, 0x3c, 0x24, 0xee, 0x7e, 0xff, 0x88, 0x68, 0xbe, - 0x41, 0x34, 0xbe, 0xa2, 0xd5, 0x4e, 0x43, 0xe4, 0xba, 0xd6, 0x95, 0xa2, 0x50, 0x41, 0x34, 0xfc, - 0x21, 0x80, 0x16, 0x6f, 0xea, 0xe9, 0x9e, 0x17, 0x71, 0x96, 0x38, 0x67, 0xb4, 0x01, 0x77, 0x72, - 0x08, 0x24, 0x89, 0x62, 0x39, 0x6a, 0xc4, 0xd3, 0x5d, 0xa2, 0x65, 0x73, 0x2c, 0xa7, 0x73, 0xdc, - 0x92, 0xa2, 0x50, 0x41, 0x34, 0x7c, 0x1d, 0x2c, 0x04, 0x4f, 0xe3, 0x73, 0x2e, 0x16, 0x67, 0x45, - 0x90, 0x2d, 0xec, 0xc4, 0x5d, 0x28, 0x89, 0x63, 0x43, 0xb3, 0x0f, 0x3d, 0xe2, 0x0e, 0x89, 0xf6, - 0x76, 0xe0, 0x01, 0x98, 0x50, 0x56, 0xb9, 0x50, 0x46, 0x43, 0xdb, 0xcd, 0x21, 0x90, 0x24, 0x8a, - 0x0d, 0x2d, 0xa8, 0x9a, 0xdc, 0xd0, 0x66, 0xd3, 0x43, 0x3b, 0x90, 0xa2, 0x50, 0x41, 0x34, 0xab, - 0xbd, 0x20, 0xe5, 0xcd, 0x21, 0xd6, 0x0d, 0x7c, 0x68, 0x90, 0xfa, 0x5c, 0xba, 0xf6, 0x76, 0xd2, - 0xdd, 0x28, 0x8b, 0x87, 0x6f, 0x83, 0x2b, 0x41, 0xd3, 0x81, 0x85, 0x23, 0x92, 0x1a, 0x27, 0x79, - 0x41, 0x90, 0x5c, 0xd9, 0xc9, 0x02, 0x50, 0x3e, 0x06, 0xbe, 0x09, 0x96, 0xfa, 0xb6, 0x61, 0xf0, - 0x7a, 0xec, 0xda, 0xbe, 0x45, 0xeb, 0xf3, 0x9c, 0x05, 0xb2, 0x3d, 0xd4, 0x4d, 0xf5, 0xa0, 0x0c, - 0x12, 0x3e, 0x04, 0xa0, 0x1f, 0xca, 0x81, 0x57, 0x07, 0xc5, 0x42, 0x9f, 0xd7, 0xa1, 0x58, 0x80, - 0xa3, 0x26, 0x0f, 0x25, 0xd8, 0x5a, 0x1f, 0x28, 0x60, 0xbd, 0x60, 0x8f, 0xc3, 0xef, 0xa5, 0x54, - 0xef, 0x66, 0x46, 0xf5, 0xae, 0x15, 0x84, 0x25, 0xa4, 0xaf, 0x0f, 0x16, 0x99, 0xef, 0xd0, 0xad, - 0x41, 0x00, 0x11, 0x27, 0xd8, 0x2b, 0xb2, 0xdc, 0x51, 0x12, 0x18, 0x1f, 0xc3, 0x57, 0xc6, 0x27, - 0xcd, 0xc5, 0x54, 0x1f, 0x4a, 0x73, 0xb6, 0x7e, 0x59, 0x02, 0x60, 0x8b, 0x38, 0x86, 0x3d, 0x32, - 0x89, 0x75, 0x11, 0xae, 0x65, 0x2b, 0xe5, 0x5a, 0x5a, 0xd2, 0x85, 0x88, 0xf2, 0x29, 0xb4, 0x2d, - 0xdb, 0x19, 0xdb, 0xf2, 0x8d, 0x09, 0x3c, 0xa7, 0xfb, 0x96, 0x7f, 0x94, 0xc1, 0x4a, 0x0c, 0x8e, - 0x8d, 0xcb, 0xbd, 0xd4, 0x12, 0xbe, 0x94, 0x59, 0xc2, 0x75, 0x49, 0xc8, 0x73, 0x73, 0x2e, 0x9f, - 0xbd, 0x83, 0x80, 0xef, 0x83, 0x25, 0x66, 0x55, 0x82, 0x42, 0xe0, 0x46, 0x68, 0xf6, 0xcc, 0x46, - 0x28, 0x12, 0xb2, 0xed, 0x14, 0x13, 0xca, 0x30, 0x17, 0x18, 0xaf, 0xb9, 0xe7, 0x6d, 0xbc, 0x5a, - 0x7f, 0x50, 0xc0, 0x52, 0xbc, 0x4c, 0x17, 0x60, 0x93, 0xba, 0x69, 0x9b, 0xd4, 0x38, 0xbd, 0x2e, - 0x0b, 0x7c, 0xd2, 0xdf, 0x2b, 0xc9, 0xac, 0xb9, 0x51, 0xda, 0x60, 0x2f, 0x54, 0x8e, 0xa1, 0xf7, - 0xb1, 0x27, 0x64, 0xf5, 0x52, 0xf0, 0x32, 0x15, 0xb4, 0xa1, 0xa8, 0x37, 0x65, 0xa9, 0x4a, 0xcf, - 0xd7, 0x52, 0x95, 0x3f, 0x1b, 0x4b, 0x75, 0x1f, 0xd4, 0xbc, 0xd0, 0x4c, 0x55, 0x38, 0xe5, 0x8d, - 0x49, 0xdb, 0x59, 0xf8, 0xa8, 0x88, 0x35, 0x72, 0x50, 0x11, 0x93, 0xcc, 0x3b, 0x55, 0x3f, 0x4f, - 0xef, 0xc4, 0xb6, 0xb0, 0x83, 0x7d, 0x8f, 0x68, 0xbc, 0xee, 0x6b, 0xf1, 0x16, 0xde, 0xe3, 0xad, - 0x48, 0xf4, 0xc2, 0x03, 0xb0, 0xee, 0xb8, 0xf6, 0xc0, 0x25, 0x9e, 0xb7, 0x45, 0xb0, 0x66, 0xe8, - 0x16, 0x09, 0x07, 0x10, 0xa8, 0xde, 0xb5, 0xf1, 0x49, 0x73, 0x7d, 0x4f, 0x0e, 0x41, 0x45, 0xb1, - 0xad, 0x3f, 0x57, 0xc0, 0xe5, 0xec, 0x89, 0x58, 0x60, 0x44, 0x94, 0x73, 0x19, 0x91, 0x57, 0x13, - 0x25, 0x1a, 0xb8, 0xb4, 0xc4, 0x3b, 0x7f, 0xae, 0x4c, 0x37, 0xc1, 0xb2, 0x30, 0x1e, 0x61, 0xa7, - 0xb0, 0x62, 0xd1, 0xf2, 0x1c, 0xa4, 0xbb, 0x51, 0x16, 0xcf, 0xec, 0x45, 0xec, 0x1a, 0x42, 0x92, - 0x4a, 0xda, 0x5e, 0x6c, 0x66, 0x01, 0x28, 0x1f, 0x03, 0x7b, 0x60, 0xc5, 0xb7, 0xf2, 0x54, 0x41, - 0xb9, 0x5c, 0x13, 0x54, 0x2b, 0x07, 0x79, 0x08, 0x92, 0xc5, 0xc1, 0x1f, 0xa7, 0x1c, 0xc7, 0x2c, - 0x3f, 0x08, 0x5e, 0x3a, 0xbd, 0xa2, 0xa7, 0xb6, 0x1c, 0xf0, 0x1e, 0x58, 0x74, 0xb9, 0xa1, 0x0c, - 0xb3, 0x0c, 0x4c, 0xd9, 0x57, 0x44, 0xd8, 0x22, 0x4a, 0x76, 0xa2, 0x34, 0x56, 0xe2, 0xa3, 0x6a, - 0xd3, 0xfa, 0xa8, 0xd6, 0x9f, 0x14, 0x00, 0xf3, 0x5b, 0x70, 0xe2, 0xcb, 0x7d, 0x2e, 0x22, 0x21, - 0x91, 0x9a, 0xdc, 0xe1, 0xdc, 0x9c, 0xec, 0x70, 0xe2, 0x13, 0x74, 0x3a, 0x8b, 0x23, 0x66, 0xe0, - 0x62, 0x2e, 0x66, 0xa6, 0xb0, 0x38, 0x71, 0x3e, 0xcf, 0x66, 0x71, 0x12, 0x3c, 0xa7, 0x5b, 0x9c, - 0x7f, 0x97, 0xc0, 0x4a, 0x0c, 0x9e, 0xda, 0xe2, 0x48, 0x42, 0xbe, 0xbc, 0x9c, 0x99, 0x7c, 0x39, - 0xc3, 0x6c, 0x47, 0x3c, 0x75, 0xff, 0x27, 0xb6, 0x23, 0x4e, 0xa8, 0xc0, 0x76, 0xfc, 0xae, 0x94, - 0xcc, 0xfa, 0x0b, 0x6f, 0x3b, 0x9e, 0xfd, 0x72, 0xa5, 0xf5, 0x97, 0x32, 0xb8, 0x9c, 0xdd, 0x82, - 0x29, 0x1d, 0x54, 0x26, 0xea, 0xe0, 0x1e, 0x58, 0x7d, 0xe4, 0x1b, 0xc6, 0x88, 0x4f, 0x43, 0x42, - 0x0c, 0x03, 0x05, 0xfd, 0xaa, 0x88, 0x5c, 0xfd, 0xbe, 0x04, 0x83, 0xa4, 0x91, 0x05, 0x9a, 0x5e, - 0x3e, 0x97, 0xa6, 0xe7, 0xd4, 0xa6, 0x72, 0x06, 0xb5, 0x91, 0xea, 0x73, 0xf5, 0x1c, 0xfa, 0x3c, - 0xb5, 0xa0, 0x4a, 0x8e, 0xab, 0x89, 0xef, 0xf0, 0xbf, 0x56, 0xc0, 0x9a, 0xfc, 0xf5, 0x19, 0x1a, - 0x60, 0xc9, 0xc4, 0x8f, 0x93, 0x97, 0x17, 0x93, 0x04, 0xc3, 0xa7, 0xba, 0xa1, 0x06, 0x5f, 0x77, - 0xd4, 0x77, 0x2c, 0xba, 0xeb, 0xee, 0x53, 0x57, 0xb7, 0x06, 0x81, 0xc0, 0xf6, 0x52, 0x5c, 0x28, - 0xc3, 0xdd, 0xfa, 0x58, 0x01, 0xeb, 0x05, 0x2a, 0x77, 0xb1, 0x99, 0xc0, 0x87, 0xa0, 0x66, 0xe2, - 0xc7, 0xfb, 0xbe, 0x3b, 0x08, 0x25, 0xf9, 0xec, 0xcf, 0xe1, 0x1b, 0xb9, 0x27, 0x58, 0x50, 0xc4, - 0xd7, 0xda, 0x05, 0xd7, 0x53, 0x83, 0x64, 0x9b, 0x86, 0x3c, 0xf2, 0x0d, 0xbe, 0x7f, 0x84, 0xa7, - 0xb8, 0x09, 0xe6, 0x1d, 0xec, 0x52, 0x3d, 0x32, 0xa3, 0xd5, 0xce, 0xe2, 0xf8, 0xa4, 0x39, 0xbf, - 0x17, 0x36, 0xa2, 0xb8, 0xbf, 0xf5, 0xab, 0x12, 0x58, 0x48, 0x90, 0x5c, 0x80, 0xbe, 0xbf, 0x95, - 0xd2, 0x77, 0xe9, 0x17, 0x93, 0xe4, 0xa8, 0x8a, 0x04, 0xbe, 0x97, 0x11, 0xf8, 0x6f, 0x4e, 0x22, - 0x3a, 0x5d, 0xe1, 0x3f, 0x29, 0x81, 0xd5, 0x04, 0x3a, 0x96, 0xf8, 0xef, 0xa4, 0x24, 0x7e, 0x23, - 0x23, 0xf1, 0x75, 0x59, 0xcc, 0x97, 0x1a, 0x3f, 0x59, 0xe3, 0xff, 0xa8, 0x80, 0xe5, 0xc4, 0xdc, - 0x5d, 0x80, 0xc8, 0x6f, 0xa5, 0x45, 0xbe, 0x39, 0xa1, 0x5e, 0x0a, 0x54, 0xfe, 0x49, 0x35, 0x95, - 0xf7, 0x17, 0x5e, 0xe6, 0x7f, 0x0e, 0x56, 0x87, 0xb6, 0xe1, 0x9b, 0xa4, 0x6b, 0x60, 0xdd, 0x0c, - 0x01, 0x4c, 0xc9, 0xd8, 0x24, 0xbe, 0x2c, 0xa5, 0x27, 0xae, 0xa7, 0x7b, 0x94, 0x58, 0xf4, 0x41, - 0x1c, 0x19, 0x6b, 0xf1, 0x03, 0x09, 0x1d, 0x92, 0x3e, 0x04, 0xbe, 0x0e, 0x16, 0x98, 0xa6, 0xea, - 0x7d, 0xb2, 0x83, 0xcd, 0xb0, 0xa6, 0xa2, 0xef, 0x03, 0xfb, 0x71, 0x17, 0x4a, 0xe2, 0xe0, 0x11, - 0x58, 0x71, 0x6c, 0xad, 0x87, 0x2d, 0x3c, 0x20, 0xec, 0xfc, 0xdf, 0xb3, 0x0d, 0xbd, 0x3f, 0xe2, - 0xf7, 0x0e, 0xf3, 0x9d, 0x37, 0xc2, 0x17, 0xd2, 0xbd, 0x3c, 0x84, 0x79, 0x76, 0x49, 0x33, 0xdf, - 0xcf, 0x32, 0x4a, 0x68, 0xe6, 0x3e, 0x67, 0xcd, 0xe5, 0xfe, 0x07, 0x40, 0x56, 0x5c, 0xe7, 0xfc, - 0xa0, 0x55, 0x74, 0xa3, 0x52, 0x3b, 0xd7, 0xd7, 0xa8, 0x4f, 0x2a, 0xe0, 0x4a, 0xee, 0x80, 0xfc, - 0x1c, 0xef, 0x34, 0x72, 0x6e, 0xa9, 0x7c, 0x06, 0xb7, 0xb4, 0x09, 0x96, 0xc5, 0x87, 0xb0, 0x8c, - 0xd9, 0x8a, 0xec, 0x68, 0x37, 0xdd, 0x8d, 0xb2, 0x78, 0xd9, 0x9d, 0x4a, 0xf5, 0x8c, 0x77, 0x2a, - 0xc9, 0x2c, 0xc4, 0xff, 0x6f, 0x04, 0x55, 0x97, 0xcf, 0x42, 0xfc, 0x1b, 0x47, 0x16, 0x0f, 0xbf, - 0x1b, 0x96, 0x54, 0xc4, 0x30, 0xc7, 0x19, 0x32, 0x35, 0x12, 0x11, 0x64, 0xd0, 0xcf, 0xf4, 0xb1, - 0xe7, 0x3d, 0xc9, 0xc7, 0x9e, 0x8d, 0x09, 0xa5, 0x3c, 0xbd, 0x55, 0xfc, 0x9b, 0x02, 0x5e, 0x28, - 0xdc, 0x03, 0x70, 0x33, 0xa5, 0xb3, 0xb7, 0x32, 0x3a, 0xfb, 0x62, 0x61, 0x60, 0x42, 0x6c, 0x4d, - 0xf9, 0x85, 0xc8, 0xdd, 0x89, 0x17, 0x22, 0x12, 0x17, 0x35, 0xf9, 0x66, 0xa4, 0xb3, 0xf1, 0xe4, - 0x69, 0x63, 0xe6, 0xc3, 0xa7, 0x8d, 0x99, 0x8f, 0x9e, 0x36, 0x66, 0x7e, 0x31, 0x6e, 0x28, 0x4f, - 0xc6, 0x0d, 0xe5, 0xc3, 0x71, 0x43, 0xf9, 0x68, 0xdc, 0x50, 0xfe, 0x39, 0x6e, 0x28, 0xbf, 0xfd, - 0xb8, 0x31, 0xf3, 0xb0, 0x34, 0xbc, 0xfd, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xbc, 0x6b, 0x01, - 0x7b, 0x12, 0x26, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/apps/v1/generated.proto b/vendor/k8s.io/api/apps/v1/generated.proto index c8a957ac..6c552797 100644 --- a/vendor/k8s.io/api/apps/v1/generated.proto +++ b/vendor/k8s.io/api/apps/v1/generated.proto @@ -41,7 +41,7 @@ option go_package = "v1"; // depend on its stability. It is primarily for internal use by controllers. message ControllerRevision { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -54,7 +54,7 @@ message ControllerRevision { // ControllerRevisionList is a resource containing a list of ControllerRevision objects. message ControllerRevisionList { - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -65,12 +65,12 @@ message ControllerRevisionList { // DaemonSet represents the configuration of a daemon set. message DaemonSet { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // The desired behavior of this daemon set. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional DaemonSetSpec spec = 2; @@ -78,7 +78,7 @@ message DaemonSet { // out of date by some window of time. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional DaemonSetStatus status = 3; } @@ -107,7 +107,7 @@ message DaemonSetCondition { // DaemonSetList is a collection of daemon sets. message DaemonSetList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -280,6 +280,7 @@ message DeploymentSpec { // The deployment strategy to use to replace existing pods with new ones. // +optional + // +patchStrategy=retainKeys optional DeploymentStrategy strategy = 4; // Minimum number of seconds for which a newly created pod should be ready @@ -365,12 +366,12 @@ message DeploymentStrategy { message ReplicaSet { // If the Labels of a ReplicaSet are empty, they are defaulted to // be the same as the Pod(s) that the ReplicaSet manages. - // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the specification of the desired behavior of the ReplicaSet. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional ReplicaSetSpec spec = 2; @@ -378,7 +379,7 @@ message ReplicaSet { // This data may be out of date by some window of time. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional ReplicaSetStatus status = 3; } @@ -407,7 +408,7 @@ message ReplicaSetCondition { // ReplicaSetList is a collection of ReplicaSets. message ReplicaSetList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; diff --git a/vendor/k8s.io/api/apps/v1/types.go b/vendor/k8s.io/api/apps/v1/types.go index 4431ca2c..e003a0c4 100644 --- a/vendor/k8s.io/api/apps/v1/types.go +++ b/vendor/k8s.io/api/apps/v1/types.go @@ -32,6 +32,8 @@ const ( ) // +genclient +// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/autoscaling/v1.Scale +// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // StatefulSet represents a set of pods with consistent identities. @@ -67,7 +69,7 @@ const ( // ParallelPodManagement will create and delete pods as soon as the stateful set // replica count is changed, and will not wait for pods to be ready or complete // termination. - ParallelPodManagement = "Parallel" + ParallelPodManagement PodManagementPolicyType = "Parallel" ) // StatefulSetUpdateStrategy indicates the strategy that the StatefulSet @@ -93,13 +95,13 @@ const ( // ordering constraints. When a scale operation is performed with this // strategy, new Pods will be created from the specification version indicated // by the StatefulSet's updateRevision. - RollingUpdateStatefulSetStrategyType = "RollingUpdate" + RollingUpdateStatefulSetStrategyType StatefulSetUpdateStrategyType = "RollingUpdate" // OnDeleteStatefulSetStrategyType triggers the legacy behavior. Version // tracking and ordered rolling restarts are disabled. Pods are recreated // from the StatefulSetSpec when they are manually deleted. When a scale // operation is performed with this strategy,specification version indicated // by the StatefulSet's currentRevision. - OnDeleteStatefulSetStrategyType = "OnDelete" + OnDeleteStatefulSetStrategyType StatefulSetUpdateStrategyType = "OnDelete" ) // RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType. @@ -244,6 +246,8 @@ type StatefulSetList struct { } // +genclient +// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/autoscaling/v1.Scale +// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // Deployment enables declarative updates for Pods and ReplicaSets. @@ -279,7 +283,8 @@ type DeploymentSpec struct { // The deployment strategy to use to replace existing pods with new ones. // +optional - Strategy DeploymentStrategy `json:"strategy,omitempty" protobuf:"bytes,4,opt,name=strategy"` + // +patchStrategy=retainKeys + Strategy DeploymentStrategy `json:"strategy,omitempty" patchStrategy:"retainKeys" protobuf:"bytes,4,opt,name=strategy"` // Minimum number of seconds for which a newly created pod should be ready // without any of its container crashing, for it to be considered available. @@ -613,12 +618,12 @@ type DaemonSetCondition struct { type DaemonSet struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // The desired behavior of this daemon set. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec DaemonSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` @@ -626,7 +631,7 @@ type DaemonSet struct { // out of date by some window of time. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Status DaemonSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -644,7 +649,7 @@ const ( type DaemonSetList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -653,6 +658,8 @@ type DaemonSetList struct { } // +genclient +// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/autoscaling/v1.Scale +// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // ReplicaSet ensures that a specified number of pod replicas are running at any given time. @@ -661,12 +668,12 @@ type ReplicaSet struct { // If the Labels of a ReplicaSet are empty, they are defaulted to // be the same as the Pod(s) that the ReplicaSet manages. - // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec defines the specification of the desired behavior of the ReplicaSet. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec ReplicaSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` @@ -674,7 +681,7 @@ type ReplicaSet struct { // This data may be out of date by some window of time. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Status ReplicaSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -685,7 +692,7 @@ type ReplicaSet struct { type ReplicaSetList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -793,7 +800,7 @@ type ReplicaSetCondition struct { type ControllerRevision struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -810,7 +817,7 @@ type ControllerRevision struct { type ControllerRevisionList struct { metav1.TypeMeta `json:",inline"` - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` diff --git a/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go index 85fb159d..3f0299d0 100644 --- a/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go @@ -29,7 +29,7 @@ package v1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_ControllerRevision = map[string]string{ "": "ControllerRevision implements an immutable snapshot of state data. Clients are responsible for serializing and deserializing the objects that contain their internal state. Once a ControllerRevision has been successfully created, it can not be updated. The API Server will fail validation of all requests that attempt to mutate the Data field. ControllerRevisions may, however, be deleted. Note that, due to its use by both the DaemonSet and StatefulSet controllers for update and rollback, this object is beta. However, it may be subject to name and representation changes in future releases, and clients should not depend on its stability. It is primarily for internal use by controllers.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "data": "Data is the serialized representation of the state.", "revision": "Revision indicates the revision of the state represented by Data.", } @@ -40,7 +40,7 @@ func (ControllerRevision) SwaggerDoc() map[string]string { var map_ControllerRevisionList = map[string]string{ "": "ControllerRevisionList is a resource containing a list of ControllerRevision objects.", - "metadata": "More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "Items is the list of ControllerRevisions", } @@ -50,9 +50,9 @@ func (ControllerRevisionList) SwaggerDoc() map[string]string { var map_DaemonSet = map[string]string{ "": "DaemonSet represents the configuration of a daemon set.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "The desired behavior of this daemon set. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - "status": "The current status of this daemon set. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "The desired behavior of this daemon set. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "The current status of this daemon set. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (DaemonSet) SwaggerDoc() map[string]string { @@ -74,7 +74,7 @@ func (DaemonSetCondition) SwaggerDoc() map[string]string { var map_DaemonSetList = map[string]string{ "": "DaemonSetList is a collection of daemon sets.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "A list of daemon sets.", } @@ -96,7 +96,7 @@ func (DaemonSetSpec) SwaggerDoc() map[string]string { } var map_DaemonSetStatus = map[string]string{ - "": "DaemonSetStatus represents the current status of a daemon set.", + "": "DaemonSetStatus represents the current status of a daemon set.", "currentNumberScheduled": "The number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", "numberMisscheduled": "The number of nodes that are running the daemon pod, but are not supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", "desiredNumberScheduled": "The total number of nodes that should be running the daemon pod (including nodes correctly running the daemon pod). More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", @@ -202,9 +202,9 @@ func (DeploymentStrategy) SwaggerDoc() map[string]string { var map_ReplicaSet = map[string]string{ "": "ReplicaSet ensures that a specified number of pod replicas are running at any given time.", - "metadata": "If the Labels of a ReplicaSet are empty, they are defaulted to be the same as the Pod(s) that the ReplicaSet manages. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Spec defines the specification of the desired behavior of the ReplicaSet. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - "status": "Status is the most recently observed status of the ReplicaSet. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "If the Labels of a ReplicaSet are empty, they are defaulted to be the same as the Pod(s) that the ReplicaSet manages. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Spec defines the specification of the desired behavior of the ReplicaSet. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "Status is the most recently observed status of the ReplicaSet. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (ReplicaSet) SwaggerDoc() map[string]string { @@ -226,7 +226,7 @@ func (ReplicaSetCondition) SwaggerDoc() map[string]string { var map_ReplicaSetList = map[string]string{ "": "ReplicaSetList is a collection of ReplicaSets.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "items": "List of ReplicaSets. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller", } diff --git a/vendor/k8s.io/api/apps/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/apps/v1/zz_generated.deepcopy.go index 885203fc..7b7ff385 100644 --- a/vendor/k8s.io/api/apps/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/apps/v1/zz_generated.deepcopy.go @@ -58,7 +58,7 @@ func (in *ControllerRevision) DeepCopyObject() runtime.Object { func (in *ControllerRevisionList) DeepCopyInto(out *ControllerRevisionList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ControllerRevision, len(*in)) @@ -136,7 +136,7 @@ func (in *DaemonSetCondition) DeepCopy() *DaemonSetCondition { func (in *DaemonSetList) DeepCopyInto(out *DaemonSetList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]DaemonSet, len(*in)) @@ -292,7 +292,7 @@ func (in *DeploymentCondition) DeepCopy() *DeploymentCondition { func (in *DeploymentList) DeepCopyInto(out *DeploymentList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Deployment, len(*in)) @@ -457,7 +457,7 @@ func (in *ReplicaSetCondition) DeepCopy() *ReplicaSetCondition { func (in *ReplicaSetList) DeepCopyInto(out *ReplicaSetList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ReplicaSet, len(*in)) @@ -653,7 +653,7 @@ func (in *StatefulSetCondition) DeepCopy() *StatefulSetCondition { func (in *StatefulSetList) DeepCopyInto(out *StatefulSetList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]StatefulSet, len(*in)) diff --git a/vendor/k8s.io/api/apps/v1beta1/doc.go b/vendor/k8s.io/api/apps/v1beta1/doc.go index 6047ed50..9072bab6 100644 --- a/vendor/k8s.io/api/apps/v1beta1/doc.go +++ b/vendor/k8s.io/api/apps/v1beta1/doc.go @@ -15,6 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +k8s:openapi-gen=true package v1beta1 // import "k8s.io/api/apps/v1beta1" diff --git a/vendor/k8s.io/api/apps/v1beta1/generated.pb.go b/vendor/k8s.io/api/apps/v1beta1/generated.pb.go index ef9aa8e0..921e055c 100644 --- a/vendor/k8s.io/api/apps/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/apps/v1beta1/generated.pb.go @@ -14,56 +14,29 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/apps/v1beta1/generated.proto -// DO NOT EDIT! -/* - Package v1beta1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/apps/v1beta1/generated.proto - - It has these top-level messages: - ControllerRevision - ControllerRevisionList - Deployment - DeploymentCondition - DeploymentList - DeploymentRollback - DeploymentSpec - DeploymentStatus - DeploymentStrategy - RollbackConfig - RollingUpdateDeployment - RollingUpdateStatefulSetStrategy - Scale - ScaleSpec - ScaleStatus - StatefulSet - StatefulSetCondition - StatefulSetList - StatefulSetSpec - StatefulSetStatus - StatefulSetUpdateStrategy -*/ package v1beta1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import k8s_io_api_core_v1 "k8s.io/api/core/v1" -import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + io "io" -import k8s_io_apimachinery_pkg_util_intstr "k8s.io/apimachinery/pkg/util/intstr" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + v11 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" -import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" -import strings "strings" -import reflect "reflect" - -import io "io" + intstr "k8s.io/apimachinery/pkg/util/intstr" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -76,96 +49,594 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *ControllerRevision) Reset() { *m = ControllerRevision{} } -func (*ControllerRevision) ProtoMessage() {} -func (*ControllerRevision) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *ControllerRevision) Reset() { *m = ControllerRevision{} } +func (*ControllerRevision) ProtoMessage() {} +func (*ControllerRevision) Descriptor() ([]byte, []int) { + return fileDescriptor_2a07313e8f66e805, []int{0} +} +func (m *ControllerRevision) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ControllerRevision) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ControllerRevision) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControllerRevision.Merge(m, src) +} +func (m *ControllerRevision) XXX_Size() int { + return m.Size() +} +func (m *ControllerRevision) XXX_DiscardUnknown() { + xxx_messageInfo_ControllerRevision.DiscardUnknown(m) +} + +var xxx_messageInfo_ControllerRevision proto.InternalMessageInfo + +func (m *ControllerRevisionList) Reset() { *m = ControllerRevisionList{} } +func (*ControllerRevisionList) ProtoMessage() {} +func (*ControllerRevisionList) Descriptor() ([]byte, []int) { + return fileDescriptor_2a07313e8f66e805, []int{1} +} +func (m *ControllerRevisionList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ControllerRevisionList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ControllerRevisionList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControllerRevisionList.Merge(m, src) +} +func (m *ControllerRevisionList) XXX_Size() int { + return m.Size() +} +func (m *ControllerRevisionList) XXX_DiscardUnknown() { + xxx_messageInfo_ControllerRevisionList.DiscardUnknown(m) +} -func (m *ControllerRevisionList) Reset() { *m = ControllerRevisionList{} } -func (*ControllerRevisionList) ProtoMessage() {} -func (*ControllerRevisionList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +var xxx_messageInfo_ControllerRevisionList proto.InternalMessageInfo -func (m *Deployment) Reset() { *m = Deployment{} } -func (*Deployment) ProtoMessage() {} -func (*Deployment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +func (m *Deployment) Reset() { *m = Deployment{} } +func (*Deployment) ProtoMessage() {} +func (*Deployment) Descriptor() ([]byte, []int) { + return fileDescriptor_2a07313e8f66e805, []int{2} +} +func (m *Deployment) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Deployment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Deployment) XXX_Merge(src proto.Message) { + xxx_messageInfo_Deployment.Merge(m, src) +} +func (m *Deployment) XXX_Size() int { + return m.Size() +} +func (m *Deployment) XXX_DiscardUnknown() { + xxx_messageInfo_Deployment.DiscardUnknown(m) +} -func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} } -func (*DeploymentCondition) ProtoMessage() {} -func (*DeploymentCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +var xxx_messageInfo_Deployment proto.InternalMessageInfo -func (m *DeploymentList) Reset() { *m = DeploymentList{} } -func (*DeploymentList) ProtoMessage() {} -func (*DeploymentList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} } +func (*DeploymentCondition) ProtoMessage() {} +func (*DeploymentCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_2a07313e8f66e805, []int{3} +} +func (m *DeploymentCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentCondition.Merge(m, src) +} +func (m *DeploymentCondition) XXX_Size() int { + return m.Size() +} +func (m *DeploymentCondition) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentCondition.DiscardUnknown(m) +} -func (m *DeploymentRollback) Reset() { *m = DeploymentRollback{} } -func (*DeploymentRollback) ProtoMessage() {} -func (*DeploymentRollback) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +var xxx_messageInfo_DeploymentCondition proto.InternalMessageInfo -func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} } -func (*DeploymentSpec) ProtoMessage() {} -func (*DeploymentSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +func (m *DeploymentList) Reset() { *m = DeploymentList{} } +func (*DeploymentList) ProtoMessage() {} +func (*DeploymentList) Descriptor() ([]byte, []int) { + return fileDescriptor_2a07313e8f66e805, []int{4} +} +func (m *DeploymentList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentList) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentList.Merge(m, src) +} +func (m *DeploymentList) XXX_Size() int { + return m.Size() +} +func (m *DeploymentList) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentList.DiscardUnknown(m) +} -func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} } -func (*DeploymentStatus) ProtoMessage() {} -func (*DeploymentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +var xxx_messageInfo_DeploymentList proto.InternalMessageInfo -func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} } -func (*DeploymentStrategy) ProtoMessage() {} -func (*DeploymentStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +func (m *DeploymentRollback) Reset() { *m = DeploymentRollback{} } +func (*DeploymentRollback) ProtoMessage() {} +func (*DeploymentRollback) Descriptor() ([]byte, []int) { + return fileDescriptor_2a07313e8f66e805, []int{5} +} +func (m *DeploymentRollback) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentRollback) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentRollback) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentRollback.Merge(m, src) +} +func (m *DeploymentRollback) XXX_Size() int { + return m.Size() +} +func (m *DeploymentRollback) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentRollback.DiscardUnknown(m) +} -func (m *RollbackConfig) Reset() { *m = RollbackConfig{} } -func (*RollbackConfig) ProtoMessage() {} -func (*RollbackConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +var xxx_messageInfo_DeploymentRollback proto.InternalMessageInfo + +func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} } +func (*DeploymentSpec) ProtoMessage() {} +func (*DeploymentSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_2a07313e8f66e805, []int{6} +} +func (m *DeploymentSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentSpec.Merge(m, src) +} +func (m *DeploymentSpec) XXX_Size() int { + return m.Size() +} +func (m *DeploymentSpec) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentSpec proto.InternalMessageInfo + +func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} } +func (*DeploymentStatus) ProtoMessage() {} +func (*DeploymentStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_2a07313e8f66e805, []int{7} +} +func (m *DeploymentStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentStatus.Merge(m, src) +} +func (m *DeploymentStatus) XXX_Size() int { + return m.Size() +} +func (m *DeploymentStatus) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentStatus proto.InternalMessageInfo + +func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} } +func (*DeploymentStrategy) ProtoMessage() {} +func (*DeploymentStrategy) Descriptor() ([]byte, []int) { + return fileDescriptor_2a07313e8f66e805, []int{8} +} +func (m *DeploymentStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentStrategy.Merge(m, src) +} +func (m *DeploymentStrategy) XXX_Size() int { + return m.Size() +} +func (m *DeploymentStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentStrategy.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentStrategy proto.InternalMessageInfo + +func (m *RollbackConfig) Reset() { *m = RollbackConfig{} } +func (*RollbackConfig) ProtoMessage() {} +func (*RollbackConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_2a07313e8f66e805, []int{9} +} +func (m *RollbackConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RollbackConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RollbackConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_RollbackConfig.Merge(m, src) +} +func (m *RollbackConfig) XXX_Size() int { + return m.Size() +} +func (m *RollbackConfig) XXX_DiscardUnknown() { + xxx_messageInfo_RollbackConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_RollbackConfig proto.InternalMessageInfo func (m *RollingUpdateDeployment) Reset() { *m = RollingUpdateDeployment{} } func (*RollingUpdateDeployment) ProtoMessage() {} func (*RollingUpdateDeployment) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{10} + return fileDescriptor_2a07313e8f66e805, []int{10} +} +func (m *RollingUpdateDeployment) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RollingUpdateDeployment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RollingUpdateDeployment) XXX_Merge(src proto.Message) { + xxx_messageInfo_RollingUpdateDeployment.Merge(m, src) +} +func (m *RollingUpdateDeployment) XXX_Size() int { + return m.Size() +} +func (m *RollingUpdateDeployment) XXX_DiscardUnknown() { + xxx_messageInfo_RollingUpdateDeployment.DiscardUnknown(m) } +var xxx_messageInfo_RollingUpdateDeployment proto.InternalMessageInfo + func (m *RollingUpdateStatefulSetStrategy) Reset() { *m = RollingUpdateStatefulSetStrategy{} } func (*RollingUpdateStatefulSetStrategy) ProtoMessage() {} func (*RollingUpdateStatefulSetStrategy) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{11} + return fileDescriptor_2a07313e8f66e805, []int{11} +} +func (m *RollingUpdateStatefulSetStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RollingUpdateStatefulSetStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RollingUpdateStatefulSetStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_RollingUpdateStatefulSetStrategy.Merge(m, src) +} +func (m *RollingUpdateStatefulSetStrategy) XXX_Size() int { + return m.Size() +} +func (m *RollingUpdateStatefulSetStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_RollingUpdateStatefulSetStrategy.DiscardUnknown(m) } -func (m *Scale) Reset() { *m = Scale{} } -func (*Scale) ProtoMessage() {} -func (*Scale) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } +var xxx_messageInfo_RollingUpdateStatefulSetStrategy proto.InternalMessageInfo -func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } -func (*ScaleSpec) ProtoMessage() {} -func (*ScaleSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } +func (m *Scale) Reset() { *m = Scale{} } +func (*Scale) ProtoMessage() {} +func (*Scale) Descriptor() ([]byte, []int) { + return fileDescriptor_2a07313e8f66e805, []int{12} +} +func (m *Scale) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Scale) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Scale) XXX_Merge(src proto.Message) { + xxx_messageInfo_Scale.Merge(m, src) +} +func (m *Scale) XXX_Size() int { + return m.Size() +} +func (m *Scale) XXX_DiscardUnknown() { + xxx_messageInfo_Scale.DiscardUnknown(m) +} + +var xxx_messageInfo_Scale proto.InternalMessageInfo + +func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } +func (*ScaleSpec) ProtoMessage() {} +func (*ScaleSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_2a07313e8f66e805, []int{13} +} +func (m *ScaleSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ScaleSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ScaleSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ScaleSpec.Merge(m, src) +} +func (m *ScaleSpec) XXX_Size() int { + return m.Size() +} +func (m *ScaleSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ScaleSpec.DiscardUnknown(m) +} -func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } -func (*ScaleStatus) ProtoMessage() {} -func (*ScaleStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } +var xxx_messageInfo_ScaleSpec proto.InternalMessageInfo + +func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } +func (*ScaleStatus) ProtoMessage() {} +func (*ScaleStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_2a07313e8f66e805, []int{14} +} +func (m *ScaleStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ScaleStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ScaleStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ScaleStatus.Merge(m, src) +} +func (m *ScaleStatus) XXX_Size() int { + return m.Size() +} +func (m *ScaleStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ScaleStatus.DiscardUnknown(m) +} -func (m *StatefulSet) Reset() { *m = StatefulSet{} } -func (*StatefulSet) ProtoMessage() {} -func (*StatefulSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } +var xxx_messageInfo_ScaleStatus proto.InternalMessageInfo -func (m *StatefulSetCondition) Reset() { *m = StatefulSetCondition{} } -func (*StatefulSetCondition) ProtoMessage() {} -func (*StatefulSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } +func (m *StatefulSet) Reset() { *m = StatefulSet{} } +func (*StatefulSet) ProtoMessage() {} +func (*StatefulSet) Descriptor() ([]byte, []int) { + return fileDescriptor_2a07313e8f66e805, []int{15} +} +func (m *StatefulSet) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSet.Merge(m, src) +} +func (m *StatefulSet) XXX_Size() int { + return m.Size() +} +func (m *StatefulSet) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSet.DiscardUnknown(m) +} + +var xxx_messageInfo_StatefulSet proto.InternalMessageInfo + +func (m *StatefulSetCondition) Reset() { *m = StatefulSetCondition{} } +func (*StatefulSetCondition) ProtoMessage() {} +func (*StatefulSetCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_2a07313e8f66e805, []int{16} +} +func (m *StatefulSetCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSetCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSetCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSetCondition.Merge(m, src) +} +func (m *StatefulSetCondition) XXX_Size() int { + return m.Size() +} +func (m *StatefulSetCondition) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSetCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_StatefulSetCondition proto.InternalMessageInfo + +func (m *StatefulSetList) Reset() { *m = StatefulSetList{} } +func (*StatefulSetList) ProtoMessage() {} +func (*StatefulSetList) Descriptor() ([]byte, []int) { + return fileDescriptor_2a07313e8f66e805, []int{17} +} +func (m *StatefulSetList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSetList) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSetList.Merge(m, src) +} +func (m *StatefulSetList) XXX_Size() int { + return m.Size() +} +func (m *StatefulSetList) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSetList.DiscardUnknown(m) +} + +var xxx_messageInfo_StatefulSetList proto.InternalMessageInfo + +func (m *StatefulSetSpec) Reset() { *m = StatefulSetSpec{} } +func (*StatefulSetSpec) ProtoMessage() {} +func (*StatefulSetSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_2a07313e8f66e805, []int{18} +} +func (m *StatefulSetSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSetSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSetSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSetSpec.Merge(m, src) +} +func (m *StatefulSetSpec) XXX_Size() int { + return m.Size() +} +func (m *StatefulSetSpec) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSetSpec.DiscardUnknown(m) +} -func (m *StatefulSetList) Reset() { *m = StatefulSetList{} } -func (*StatefulSetList) ProtoMessage() {} -func (*StatefulSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } +var xxx_messageInfo_StatefulSetSpec proto.InternalMessageInfo -func (m *StatefulSetSpec) Reset() { *m = StatefulSetSpec{} } -func (*StatefulSetSpec) ProtoMessage() {} -func (*StatefulSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } +func (m *StatefulSetStatus) Reset() { *m = StatefulSetStatus{} } +func (*StatefulSetStatus) ProtoMessage() {} +func (*StatefulSetStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_2a07313e8f66e805, []int{19} +} +func (m *StatefulSetStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSetStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSetStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSetStatus.Merge(m, src) +} +func (m *StatefulSetStatus) XXX_Size() int { + return m.Size() +} +func (m *StatefulSetStatus) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSetStatus.DiscardUnknown(m) +} -func (m *StatefulSetStatus) Reset() { *m = StatefulSetStatus{} } -func (*StatefulSetStatus) ProtoMessage() {} -func (*StatefulSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } +var xxx_messageInfo_StatefulSetStatus proto.InternalMessageInfo func (m *StatefulSetUpdateStrategy) Reset() { *m = StatefulSetUpdateStrategy{} } func (*StatefulSetUpdateStrategy) ProtoMessage() {} func (*StatefulSetUpdateStrategy) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{20} + return fileDescriptor_2a07313e8f66e805, []int{20} +} +func (m *StatefulSetUpdateStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSetUpdateStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSetUpdateStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSetUpdateStrategy.Merge(m, src) +} +func (m *StatefulSetUpdateStrategy) XXX_Size() int { + return m.Size() +} +func (m *StatefulSetUpdateStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSetUpdateStrategy.DiscardUnknown(m) } +var xxx_messageInfo_StatefulSetUpdateStrategy proto.InternalMessageInfo + func init() { proto.RegisterType((*ControllerRevision)(nil), "k8s.io.api.apps.v1beta1.ControllerRevision") proto.RegisterType((*ControllerRevisionList)(nil), "k8s.io.api.apps.v1beta1.ControllerRevisionList") @@ -173,6 +644,7 @@ func init() { proto.RegisterType((*DeploymentCondition)(nil), "k8s.io.api.apps.v1beta1.DeploymentCondition") proto.RegisterType((*DeploymentList)(nil), "k8s.io.api.apps.v1beta1.DeploymentList") proto.RegisterType((*DeploymentRollback)(nil), "k8s.io.api.apps.v1beta1.DeploymentRollback") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.apps.v1beta1.DeploymentRollback.UpdatedAnnotationsEntry") proto.RegisterType((*DeploymentSpec)(nil), "k8s.io.api.apps.v1beta1.DeploymentSpec") proto.RegisterType((*DeploymentStatus)(nil), "k8s.io.api.apps.v1beta1.DeploymentStatus") proto.RegisterType((*DeploymentStrategy)(nil), "k8s.io.api.apps.v1beta1.DeploymentStrategy") @@ -182,6 +654,7 @@ func init() { proto.RegisterType((*Scale)(nil), "k8s.io.api.apps.v1beta1.Scale") proto.RegisterType((*ScaleSpec)(nil), "k8s.io.api.apps.v1beta1.ScaleSpec") proto.RegisterType((*ScaleStatus)(nil), "k8s.io.api.apps.v1beta1.ScaleStatus") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.apps.v1beta1.ScaleStatus.SelectorEntry") proto.RegisterType((*StatefulSet)(nil), "k8s.io.api.apps.v1beta1.StatefulSet") proto.RegisterType((*StatefulSetCondition)(nil), "k8s.io.api.apps.v1beta1.StatefulSetCondition") proto.RegisterType((*StatefulSetList)(nil), "k8s.io.api.apps.v1beta1.StatefulSetList") @@ -189,10 +662,135 @@ func init() { proto.RegisterType((*StatefulSetStatus)(nil), "k8s.io.api.apps.v1beta1.StatefulSetStatus") proto.RegisterType((*StatefulSetUpdateStrategy)(nil), "k8s.io.api.apps.v1beta1.StatefulSetUpdateStrategy") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/apps/v1beta1/generated.proto", fileDescriptor_2a07313e8f66e805) +} + +var fileDescriptor_2a07313e8f66e805 = []byte{ + // 1855 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x58, 0xcd, 0x6f, 0x24, 0x47, + 0x15, 0x77, 0x8f, 0x67, 0xec, 0xf1, 0x73, 0x3c, 0xde, 0x2d, 0x9b, 0xf5, 0xc4, 0x81, 0xb1, 0x35, + 0x44, 0x89, 0xf3, 0xe1, 0x9e, 0xac, 0x13, 0xa2, 0x64, 0x17, 0x45, 0x78, 0xbc, 0x4b, 0xb2, 0x91, + 0x8d, 0x9d, 0xb2, 0x1d, 0x44, 0x00, 0x29, 0x35, 0x3d, 0xb5, 0xb3, 0x1d, 0xf7, 0x97, 0xba, 0xab, + 0x87, 0x1d, 0x71, 0xe1, 0x0f, 0x40, 0x0a, 0x67, 0xfe, 0x0a, 0x8e, 0x08, 0x6e, 0x9c, 0xf6, 0x82, + 0x14, 0x71, 0x21, 0x27, 0x8b, 0x9d, 0x5c, 0x81, 0x1b, 0x97, 0x95, 0x90, 0x50, 0x55, 0x57, 0x7f, + 0x77, 0xdb, 0x6d, 0xa4, 0xb5, 0x04, 0xb7, 0xe9, 0x7a, 0xef, 0xfd, 0x5e, 0x7d, 0xbc, 0xaf, 0xdf, + 0xc0, 0x0f, 0xce, 0xde, 0xf3, 0x54, 0xdd, 0xee, 0x9d, 0xf9, 0x03, 0xea, 0x5a, 0x94, 0x51, 0xaf, + 0x37, 0xa6, 0xd6, 0xd0, 0x76, 0x7b, 0x52, 0x40, 0x1c, 0xbd, 0x47, 0x1c, 0xc7, 0xeb, 0x8d, 0x6f, + 0x0f, 0x28, 0x23, 0xb7, 0x7b, 0x23, 0x6a, 0x51, 0x97, 0x30, 0x3a, 0x54, 0x1d, 0xd7, 0x66, 0x36, + 0x5a, 0x0b, 0x14, 0x55, 0xe2, 0xe8, 0x2a, 0x57, 0x54, 0xa5, 0xe2, 0xfa, 0xf6, 0x48, 0x67, 0x8f, + 0xfc, 0x81, 0xaa, 0xd9, 0x66, 0x6f, 0x64, 0x8f, 0xec, 0x9e, 0xd0, 0x1f, 0xf8, 0x0f, 0xc5, 0x97, + 0xf8, 0x10, 0xbf, 0x02, 0x9c, 0xf5, 0x6e, 0xc2, 0xa1, 0x66, 0xbb, 0xb4, 0x37, 0xce, 0xf9, 0x5a, + 0x7f, 0x27, 0xd6, 0x31, 0x89, 0xf6, 0x48, 0xb7, 0xa8, 0x3b, 0xe9, 0x39, 0x67, 0x23, 0xbe, 0xe0, + 0xf5, 0x4c, 0xca, 0x48, 0x91, 0x55, 0xaf, 0xcc, 0xca, 0xf5, 0x2d, 0xa6, 0x9b, 0x34, 0x67, 0xf0, + 0xee, 0x65, 0x06, 0x9e, 0xf6, 0x88, 0x9a, 0x24, 0x67, 0xf7, 0x76, 0x99, 0x9d, 0xcf, 0x74, 0xa3, + 0xa7, 0x5b, 0xcc, 0x63, 0x6e, 0xd6, 0xa8, 0xfb, 0x2f, 0x05, 0xd0, 0x9e, 0x6d, 0x31, 0xd7, 0x36, + 0x0c, 0xea, 0x62, 0x3a, 0xd6, 0x3d, 0xdd, 0xb6, 0xd0, 0xe7, 0xd0, 0xe4, 0xe7, 0x19, 0x12, 0x46, + 0xda, 0xca, 0xa6, 0xb2, 0xb5, 0xb8, 0xf3, 0x96, 0x1a, 0xdf, 0x74, 0x04, 0xaf, 0x3a, 0x67, 0x23, + 0xbe, 0xe0, 0xa9, 0x5c, 0x5b, 0x1d, 0xdf, 0x56, 0x0f, 0x07, 0x5f, 0x50, 0x8d, 0x1d, 0x50, 0x46, + 0xfa, 0xe8, 0xc9, 0xf9, 0xc6, 0xcc, 0xf4, 0x7c, 0x03, 0xe2, 0x35, 0x1c, 0xa1, 0xa2, 0x43, 0xa8, + 0x0b, 0xf4, 0x9a, 0x40, 0xdf, 0x2e, 0x45, 0x97, 0x87, 0x56, 0x31, 0xf9, 0xc5, 0xfd, 0xc7, 0x8c, + 0x5a, 0x7c, 0x7b, 0xfd, 0x17, 0x24, 0x74, 0xfd, 0x1e, 0x61, 0x04, 0x0b, 0x20, 0xf4, 0x26, 0x34, + 0x5d, 0xb9, 0xfd, 0xf6, 0xec, 0xa6, 0xb2, 0x35, 0xdb, 0xbf, 0x21, 0xb5, 0x9a, 0xe1, 0xb1, 0x70, + 0xa4, 0xd1, 0x7d, 0xa2, 0xc0, 0xad, 0xfc, 0xb9, 0xf7, 0x75, 0x8f, 0xa1, 0x9f, 0xe5, 0xce, 0xae, + 0x56, 0x3b, 0x3b, 0xb7, 0x16, 0x27, 0x8f, 0x1c, 0x87, 0x2b, 0x89, 0x73, 0x1f, 0x41, 0x43, 0x67, + 0xd4, 0xf4, 0xda, 0xb5, 0xcd, 0xd9, 0xad, 0xc5, 0x9d, 0x37, 0xd4, 0x92, 0x00, 0x56, 0xf3, 0xbb, + 0xeb, 0x2f, 0x49, 0xdc, 0xc6, 0x03, 0x8e, 0x80, 0x03, 0xa0, 0xee, 0xaf, 0x6b, 0x00, 0xf7, 0xa8, + 0x63, 0xd8, 0x13, 0x93, 0x5a, 0xec, 0x1a, 0x9e, 0xee, 0x01, 0xd4, 0x3d, 0x87, 0x6a, 0xf2, 0xe9, + 0x5e, 0x2d, 0x3d, 0x41, 0xbc, 0xa9, 0x63, 0x87, 0x6a, 0xf1, 0xa3, 0xf1, 0x2f, 0x2c, 0x20, 0xd0, + 0x27, 0x30, 0xe7, 0x31, 0xc2, 0x7c, 0x4f, 0x3c, 0xd9, 0xe2, 0xce, 0x6b, 0x55, 0xc0, 0x84, 0x41, + 0xbf, 0x25, 0xe1, 0xe6, 0x82, 0x6f, 0x2c, 0x81, 0xba, 0x7f, 0x9d, 0x85, 0x95, 0x58, 0x79, 0xcf, + 0xb6, 0x86, 0x3a, 0xe3, 0x21, 0x7d, 0x17, 0xea, 0x6c, 0xe2, 0x50, 0x71, 0x27, 0x0b, 0xfd, 0x57, + 0xc3, 0xcd, 0x9c, 0x4c, 0x1c, 0xfa, 0xec, 0x7c, 0x63, 0xad, 0xc0, 0x84, 0x8b, 0xb0, 0x30, 0x42, + 0xfb, 0xd1, 0x3e, 0x6b, 0xc2, 0xfc, 0x9d, 0xb4, 0xf3, 0x67, 0xe7, 0x1b, 0x05, 0x05, 0x44, 0x8d, + 0x90, 0xd2, 0x5b, 0x44, 0x5f, 0x40, 0xcb, 0x20, 0x1e, 0x3b, 0x75, 0x86, 0x84, 0xd1, 0x13, 0xdd, + 0xa4, 0xed, 0x39, 0x71, 0xfa, 0xd7, 0xab, 0x3d, 0x14, 0xb7, 0xe8, 0xdf, 0x92, 0x3b, 0x68, 0xed, + 0xa7, 0x90, 0x70, 0x06, 0x19, 0x8d, 0x01, 0xf1, 0x95, 0x13, 0x97, 0x58, 0x5e, 0x70, 0x2a, 0xee, + 0x6f, 0xfe, 0xca, 0xfe, 0xd6, 0xa5, 0x3f, 0xb4, 0x9f, 0x43, 0xc3, 0x05, 0x1e, 0xd0, 0x2b, 0x30, + 0xe7, 0x52, 0xe2, 0xd9, 0x56, 0xbb, 0x2e, 0x6e, 0x2c, 0x7a, 0x2e, 0x2c, 0x56, 0xb1, 0x94, 0xa2, + 0xd7, 0x60, 0xde, 0xa4, 0x9e, 0x47, 0x46, 0xb4, 0xdd, 0x10, 0x8a, 0xcb, 0x52, 0x71, 0xfe, 0x20, + 0x58, 0xc6, 0xa1, 0xbc, 0xfb, 0x7b, 0x05, 0x5a, 0xf1, 0x33, 0x5d, 0x43, 0xae, 0x7e, 0x94, 0xce, + 0xd5, 0xef, 0x56, 0x08, 0xce, 0x92, 0x1c, 0xfd, 0x7b, 0x0d, 0x50, 0xac, 0x84, 0x6d, 0xc3, 0x18, + 0x10, 0xed, 0x0c, 0x6d, 0x42, 0xdd, 0x22, 0x66, 0x18, 0x93, 0x51, 0x82, 0xfc, 0x88, 0x98, 0x14, + 0x0b, 0x09, 0xfa, 0x52, 0x01, 0xe4, 0x8b, 0xd7, 0x1c, 0xee, 0x5a, 0x96, 0xcd, 0x08, 0xbf, 0xe0, + 0x70, 0x43, 0x7b, 0x15, 0x36, 0x14, 0xfa, 0x52, 0x4f, 0x73, 0x28, 0xf7, 0x2d, 0xe6, 0x4e, 0xe2, + 0x87, 0xcd, 0x2b, 0xe0, 0x02, 0xd7, 0xe8, 0xa7, 0x00, 0xae, 0xc4, 0x3c, 0xb1, 0x65, 0xda, 0x96, + 0xd7, 0x80, 0xd0, 0xfd, 0x9e, 0x6d, 0x3d, 0xd4, 0x47, 0x71, 0x61, 0xc1, 0x11, 0x04, 0x4e, 0xc0, + 0xad, 0xdf, 0x87, 0xb5, 0x92, 0x7d, 0xa2, 0x1b, 0x30, 0x7b, 0x46, 0x27, 0xc1, 0x55, 0x61, 0xfe, + 0x13, 0xad, 0x42, 0x63, 0x4c, 0x0c, 0x9f, 0x06, 0x39, 0x89, 0x83, 0x8f, 0x3b, 0xb5, 0xf7, 0x94, + 0xee, 0xef, 0x1a, 0xc9, 0x48, 0xe1, 0xf5, 0x06, 0x6d, 0xf1, 0xf6, 0xe0, 0x18, 0xba, 0x46, 0x3c, + 0x81, 0xd1, 0xe8, 0xbf, 0x10, 0xb4, 0x86, 0x60, 0x0d, 0x47, 0x52, 0xf4, 0x73, 0x68, 0x7a, 0xd4, + 0xa0, 0x1a, 0xb3, 0x5d, 0x59, 0xe2, 0xde, 0xae, 0x18, 0x53, 0x64, 0x40, 0x8d, 0x63, 0x69, 0x1a, + 0xc0, 0x87, 0x5f, 0x38, 0x82, 0x44, 0x9f, 0x40, 0x93, 0x51, 0xd3, 0x31, 0x08, 0xa3, 0xf2, 0xf6, + 0x52, 0x71, 0xc5, 0x6b, 0x07, 0x07, 0x3b, 0xb2, 0x87, 0x27, 0x52, 0x4d, 0x54, 0xcf, 0x28, 0x4e, + 0xc3, 0x55, 0x1c, 0xc1, 0xa0, 0x9f, 0x40, 0xd3, 0x63, 0xbc, 0xab, 0x8f, 0x26, 0x22, 0xdb, 0x2e, + 0x6a, 0x2b, 0xc9, 0x3a, 0x1a, 0x98, 0xc4, 0xd0, 0xe1, 0x0a, 0x8e, 0xe0, 0xd0, 0x2e, 0x2c, 0x9b, + 0xba, 0x85, 0x29, 0x19, 0x4e, 0x8e, 0xa9, 0x66, 0x5b, 0x43, 0x4f, 0xa4, 0x69, 0xa3, 0xbf, 0x26, + 0x8d, 0x96, 0x0f, 0xd2, 0x62, 0x9c, 0xd5, 0x47, 0xfb, 0xb0, 0x1a, 0xb6, 0xdd, 0x8f, 0x74, 0x8f, + 0xd9, 0xee, 0x64, 0x5f, 0x37, 0x75, 0x26, 0x6a, 0x5e, 0xa3, 0xdf, 0x9e, 0x9e, 0x6f, 0xac, 0xe2, + 0x02, 0x39, 0x2e, 0xb4, 0xe2, 0x75, 0xc5, 0x21, 0xbe, 0x47, 0x87, 0xa2, 0x86, 0x35, 0xe3, 0xba, + 0x72, 0x24, 0x56, 0xb1, 0x94, 0xa2, 0x1f, 0xa7, 0xc2, 0xb4, 0x79, 0xb5, 0x30, 0x6d, 0x95, 0x87, + 0x28, 0x3a, 0x85, 0x35, 0xc7, 0xb5, 0x47, 0x2e, 0xf5, 0xbc, 0x7b, 0x94, 0x0c, 0x0d, 0xdd, 0xa2, + 0xe1, 0xcd, 0x2c, 0x88, 0x13, 0xbd, 0x34, 0x3d, 0xdf, 0x58, 0x3b, 0x2a, 0x56, 0xc1, 0x65, 0xb6, + 0xdd, 0x3f, 0xd5, 0xe1, 0x46, 0xb6, 0xc7, 0xa1, 0x8f, 0x01, 0xd9, 0x03, 0x8f, 0xba, 0x63, 0x3a, + 0xfc, 0x30, 0x18, 0xdc, 0xf8, 0x74, 0xa3, 0x88, 0xe9, 0x26, 0xca, 0xdb, 0xc3, 0x9c, 0x06, 0x2e, + 0xb0, 0x0a, 0xe6, 0x23, 0x99, 0x00, 0x35, 0xb1, 0xd1, 0xc4, 0x7c, 0x94, 0x4b, 0x82, 0x5d, 0x58, + 0x96, 0xb9, 0x1f, 0x0a, 0x45, 0xb0, 0x26, 0xde, 0xfd, 0x34, 0x2d, 0xc6, 0x59, 0x7d, 0x74, 0x17, + 0x96, 0x5c, 0x1e, 0x07, 0x11, 0xc0, 0xbc, 0x00, 0xf8, 0x96, 0x04, 0x58, 0xc2, 0x49, 0x21, 0x4e, + 0xeb, 0xa2, 0x0f, 0xe1, 0x26, 0x19, 0x13, 0xdd, 0x20, 0x03, 0x83, 0x46, 0x00, 0x75, 0x01, 0xf0, + 0xa2, 0x04, 0xb8, 0xb9, 0x9b, 0x55, 0xc0, 0x79, 0x1b, 0x74, 0x00, 0x2b, 0xbe, 0x95, 0x87, 0x0a, + 0x82, 0xf8, 0x25, 0x09, 0xb5, 0x72, 0x9a, 0x57, 0xc1, 0x45, 0x76, 0xe8, 0x73, 0x00, 0x2d, 0xec, + 0xea, 0x5e, 0x7b, 0x4e, 0x94, 0xe1, 0x37, 0x2b, 0x24, 0x5b, 0x34, 0x0a, 0xc4, 0x25, 0x30, 0x5a, + 0xf2, 0x70, 0x02, 0x13, 0xdd, 0x81, 0x96, 0x66, 0x1b, 0x86, 0x88, 0xfc, 0x3d, 0xdb, 0xb7, 0x98, + 0x08, 0xde, 0x46, 0x1f, 0xf1, 0x66, 0xbf, 0x97, 0x92, 0xe0, 0x8c, 0x66, 0xf7, 0x8f, 0x4a, 0xb2, + 0xcd, 0x84, 0xe9, 0x8c, 0xee, 0xa4, 0x46, 0x9f, 0x57, 0x32, 0xa3, 0xcf, 0xad, 0xbc, 0x45, 0x62, + 0xf2, 0xd1, 0x61, 0x89, 0x07, 0xbf, 0x6e, 0x8d, 0x82, 0x07, 0x97, 0x25, 0xf1, 0xad, 0x0b, 0x53, + 0x29, 0xd2, 0x4e, 0x34, 0xc6, 0x9b, 0xe2, 0xcd, 0x93, 0x42, 0x9c, 0x46, 0xee, 0x7e, 0x00, 0xad, + 0x74, 0x1e, 0xa6, 0x66, 0x7a, 0xe5, 0xd2, 0x99, 0xfe, 0x1b, 0x05, 0xd6, 0x4a, 0xbc, 0x23, 0x03, + 0x5a, 0x26, 0x79, 0x9c, 0x78, 0xe6, 0x4b, 0x67, 0x63, 0xce, 0x9a, 0xd4, 0x80, 0x35, 0xa9, 0x0f, + 0x2c, 0x76, 0xe8, 0x1e, 0x33, 0x57, 0xb7, 0x46, 0xc1, 0x3b, 0x1c, 0xa4, 0xb0, 0x70, 0x06, 0x1b, + 0x7d, 0x06, 0x4d, 0x93, 0x3c, 0x3e, 0xf6, 0xdd, 0x51, 0xd1, 0x7d, 0x55, 0xf3, 0x23, 0xfa, 0xc7, + 0x81, 0x44, 0xc1, 0x11, 0x5e, 0xf7, 0x10, 0x36, 0x53, 0x87, 0xe4, 0xa5, 0x82, 0x3e, 0xf4, 0x8d, + 0x63, 0x1a, 0x3f, 0xf8, 0x1b, 0xb0, 0xe0, 0x10, 0x97, 0xe9, 0x51, 0xb9, 0x68, 0xf4, 0x97, 0xa6, + 0xe7, 0x1b, 0x0b, 0x47, 0xe1, 0x22, 0x8e, 0xe5, 0xdd, 0x7f, 0x2b, 0xd0, 0x38, 0xd6, 0x88, 0x41, + 0xaf, 0x81, 0x3a, 0xdc, 0x4b, 0x51, 0x87, 0x6e, 0x69, 0x10, 0x89, 0xfd, 0x94, 0xb2, 0x86, 0xfd, + 0x0c, 0x6b, 0x78, 0xf9, 0x12, 0x9c, 0x8b, 0x09, 0xc3, 0xfb, 0xb0, 0x10, 0xb9, 0x4b, 0x55, 0x49, + 0xe5, 0xb2, 0x2a, 0xd9, 0xfd, 0x6d, 0x0d, 0x16, 0x13, 0x2e, 0xae, 0x66, 0xcd, 0xaf, 0x3b, 0x31, + 0x68, 0xf0, 0x4a, 0xb2, 0x53, 0xe5, 0x20, 0x6a, 0x38, 0x54, 0x04, 0xf3, 0x5b, 0xdc, 0xbd, 0xf3, + 0xb3, 0xc6, 0x07, 0xd0, 0x62, 0xc4, 0x1d, 0x51, 0x16, 0xca, 0xc4, 0x85, 0x2d, 0xc4, 0xe4, 0xe1, + 0x24, 0x25, 0xc5, 0x19, 0xed, 0xf5, 0xbb, 0xb0, 0x94, 0x72, 0x76, 0xa5, 0x21, 0xec, 0x4b, 0x7e, + 0x39, 0x71, 0x70, 0x5e, 0x43, 0x74, 0x7d, 0x9c, 0x8a, 0xae, 0xad, 0xf2, 0xcb, 0x4c, 0xa4, 0x4c, + 0x59, 0x8c, 0xe1, 0x4c, 0x8c, 0xbd, 0x5e, 0x09, 0xed, 0xe2, 0x48, 0xfb, 0x47, 0x0d, 0x56, 0x13, + 0xda, 0x31, 0x37, 0xfd, 0x7e, 0xaa, 0x40, 0x6f, 0x65, 0x0a, 0x74, 0xbb, 0xc8, 0xe6, 0xb9, 0x91, + 0xd3, 0x62, 0xc2, 0x38, 0xfb, 0xbf, 0x48, 0x18, 0xff, 0xa0, 0xc0, 0x72, 0xe2, 0xee, 0xae, 0x81, + 0x31, 0x3e, 0x48, 0x33, 0xc6, 0x97, 0xab, 0x04, 0x4d, 0x09, 0x65, 0xfc, 0x73, 0x23, 0xb5, 0xf9, + 0xff, 0x7b, 0x12, 0xf3, 0x4b, 0x58, 0x1d, 0xdb, 0x86, 0x6f, 0xd2, 0x3d, 0x83, 0xe8, 0x66, 0xa8, + 0xc0, 0x87, 0xbe, 0xd9, 0xec, 0x1f, 0x43, 0x11, 0x3c, 0x75, 0x3d, 0xdd, 0x63, 0xd4, 0x62, 0x9f, + 0xc6, 0x96, 0xfd, 0x6f, 0x4b, 0x27, 0xab, 0x9f, 0x16, 0xc0, 0xe1, 0x42, 0x27, 0xe8, 0x7b, 0xb0, + 0xc8, 0x07, 0x66, 0x5d, 0xa3, 0x9c, 0x7b, 0xcb, 0xc0, 0x5a, 0x91, 0x40, 0x8b, 0xc7, 0xb1, 0x08, + 0x27, 0xf5, 0xd0, 0x23, 0x58, 0x71, 0xec, 0xe1, 0x01, 0xb1, 0xc8, 0x88, 0xf2, 0x31, 0xe3, 0xc8, + 0x36, 0x74, 0x6d, 0x22, 0x98, 0xcd, 0x42, 0xff, 0xdd, 0x70, 0xb8, 0x3c, 0xca, 0xab, 0x3c, 0xe3, + 0x14, 0x21, 0xbf, 0x2c, 0x92, 0xba, 0x08, 0x12, 0xb9, 0xd0, 0xf2, 0x65, 0xbb, 0x97, 0x44, 0x2f, + 0xf8, 0x0b, 0x67, 0xa7, 0x4a, 0x84, 0x9d, 0xa6, 0x2c, 0xe3, 0xea, 0x9f, 0x5e, 0xc7, 0x19, 0x0f, + 0xa5, 0xc4, 0xad, 0xf9, 0xdf, 0x10, 0xb7, 0xee, 0x3f, 0xeb, 0x70, 0x33, 0x57, 0x2a, 0xd1, 0x0f, + 0x2f, 0x60, 0x38, 0xb7, 0x9e, 0x1b, 0xbb, 0xc9, 0x51, 0x93, 0xd9, 0x2b, 0x50, 0x93, 0x5d, 0x58, + 0xd6, 0x7c, 0xd7, 0xa5, 0x16, 0xcb, 0x10, 0x93, 0x88, 0x1a, 0xed, 0xa5, 0xc5, 0x38, 0xab, 0x5f, + 0xc4, 0xae, 0x1a, 0x57, 0x64, 0x57, 0xc9, 0x5d, 0xc8, 0x09, 0x39, 0x08, 0xbb, 0xfc, 0x2e, 0xe4, + 0xa0, 0x9c, 0xd5, 0xe7, 0xd3, 0x41, 0x80, 0x1a, 0x21, 0xcc, 0xa7, 0xa7, 0x83, 0xd3, 0x94, 0x14, + 0x67, 0xb4, 0x0b, 0x98, 0xca, 0x42, 0x55, 0xa6, 0x82, 0x48, 0x8a, 0x47, 0x81, 0xc8, 0xf1, 0xed, + 0x2a, 0xb1, 0x5c, 0x99, 0x48, 0x75, 0xff, 0xa2, 0xc0, 0x8b, 0xa5, 0x49, 0x80, 0x76, 0x53, 0x2d, + 0x77, 0x3b, 0xd3, 0x72, 0xbf, 0x53, 0x6a, 0x98, 0xe8, 0xbb, 0x6e, 0x31, 0x35, 0x7a, 0xbf, 0x1a, + 0x35, 0x2a, 0x98, 0xdb, 0x2f, 0xe7, 0x48, 0xfd, 0xed, 0x27, 0x4f, 0x3b, 0x33, 0x5f, 0x3d, 0xed, + 0xcc, 0x7c, 0xfd, 0xb4, 0x33, 0xf3, 0xab, 0x69, 0x47, 0x79, 0x32, 0xed, 0x28, 0x5f, 0x4d, 0x3b, + 0xca, 0xd7, 0xd3, 0x8e, 0xf2, 0xb7, 0x69, 0x47, 0xf9, 0xcd, 0x37, 0x9d, 0x99, 0xcf, 0xe6, 0xa5, + 0xc7, 0xff, 0x04, 0x00, 0x00, 0xff, 0xff, 0x99, 0x8d, 0x1e, 0xaf, 0x61, 0x1b, 0x00, 0x00, +} + func (m *ControllerRevision) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -200,36 +798,45 @@ func (m *ControllerRevision) Marshal() (dAtA []byte, err error) { } func (m *ControllerRevision) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ControllerRevision) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i = encodeVarintGenerated(dAtA, i, uint64(m.Revision)) + i-- + dAtA[i] = 0x18 + { + size, err := m.Data.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n1 + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Data.Size())) - n2, err := m.Data.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n2 - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Revision)) - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ControllerRevisionList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -237,37 +844,46 @@ func (m *ControllerRevisionList) Marshal() (dAtA []byte, err error) { } func (m *ControllerRevisionList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ControllerRevisionList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n3, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *Deployment) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -275,41 +891,52 @@ func (m *Deployment) Marshal() (dAtA []byte, err error) { } func (m *Deployment) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Deployment) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n4, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n5, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n5 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n6, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n6 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DeploymentCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -317,49 +944,62 @@ func (m *DeploymentCondition) Marshal() (dAtA []byte, err error) { } func (m *DeploymentCondition) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastUpdateTime.Size())) - n7, err := m.LastUpdateTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n7 + i-- dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n8, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.LastUpdateTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n8 - return i, nil + i-- + dAtA[i] = 0x32 + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DeploymentList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -367,37 +1007,46 @@ func (m *DeploymentList) Marshal() (dAtA []byte, err error) { } func (m *DeploymentList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n9, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n9 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DeploymentRollback) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -405,51 +1054,61 @@ func (m *DeploymentRollback) Marshal() (dAtA []byte, err error) { } func (m *DeploymentRollback) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentRollback) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) + { + size, err := m.RollbackTo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a if len(m.UpdatedAnnotations) > 0 { keysForUpdatedAnnotations := make([]string, 0, len(m.UpdatedAnnotations)) for k := range m.UpdatedAnnotations { keysForUpdatedAnnotations = append(keysForUpdatedAnnotations, string(k)) } github_com_gogo_protobuf_sortkeys.Strings(keysForUpdatedAnnotations) - for _, k := range keysForUpdatedAnnotations { + for iNdEx := len(keysForUpdatedAnnotations) - 1; iNdEx >= 0; iNdEx-- { + v := m.UpdatedAnnotations[string(keysForUpdatedAnnotations[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- dAtA[i] = 0x12 - i++ - v := m.UpdatedAnnotations[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + i -= len(keysForUpdatedAnnotations[iNdEx]) + copy(dAtA[i:], keysForUpdatedAnnotations[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForUpdatedAnnotations[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) } } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RollbackTo.Size())) - n10, err := m.RollbackTo.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n10 - return i, nil + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DeploymentSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -457,79 +1116,92 @@ func (m *DeploymentSpec) Marshal() (dAtA []byte, err error) { } func (m *DeploymentSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Replicas != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + if m.ProgressDeadlineSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.ProgressDeadlineSeconds)) + i-- + dAtA[i] = 0x48 } - if m.Selector != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n11, err := m.Selector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.RollbackTo != nil { + { + size, err := m.RollbackTo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n11 - } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n12, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n12 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Strategy.Size())) - n13, err := m.Strategy.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n13 - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) - if m.RevisionHistoryLimit != nil { - dAtA[i] = 0x30 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + i-- + dAtA[i] = 0x42 } - dAtA[i] = 0x38 - i++ + i-- if m.Paused { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - if m.RollbackTo != nil { - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RollbackTo.Size())) - n14, err := m.RollbackTo.MarshalTo(dAtA[i:]) + i-- + dAtA[i] = 0x38 + if m.RevisionHistoryLimit != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + i-- + dAtA[i] = 0x30 + } + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + i-- + dAtA[i] = 0x28 + { + size, err := m.Strategy.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n14 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.ProgressDeadlineSeconds != nil { - dAtA[i] = 0x48 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.ProgressDeadlineSeconds)) + i-- + dAtA[i] = 0x22 + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Replicas != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + i-- + dAtA[i] = 0x8 } - return i, nil + return len(dAtA) - i, nil } func (m *DeploymentStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -537,52 +1209,59 @@ func (m *DeploymentStatus) Marshal() (dAtA []byte, err error) { } func (m *DeploymentStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UnavailableReplicas)) + if m.CollisionCount != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) + i-- + dAtA[i] = 0x40 + } + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) + i-- + dAtA[i] = 0x38 if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x32 } } - dAtA[i] = 0x38 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) - if m.CollisionCount != nil { - dAtA[i] = 0x40 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) - } - return i, nil + i = encodeVarintGenerated(dAtA, i, uint64(m.UnavailableReplicas)) + i-- + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *DeploymentStrategy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -590,31 +1269,39 @@ func (m *DeploymentStrategy) Marshal() (dAtA []byte, err error) { } func (m *DeploymentStrategy) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) if m.RollingUpdate != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) - n15, err := m.RollingUpdate.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.RollingUpdate.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n15 + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *RollbackConfig) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -622,20 +1309,25 @@ func (m *RollbackConfig) Marshal() (dAtA []byte, err error) { } func (m *RollbackConfig) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RollbackConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Revision)) - return i, nil + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *RollingUpdateDeployment) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -643,37 +1335,46 @@ func (m *RollingUpdateDeployment) Marshal() (dAtA []byte, err error) { } func (m *RollingUpdateDeployment) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RollingUpdateDeployment) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.MaxUnavailable != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MaxUnavailable.Size())) - n16, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n16 - } if m.MaxSurge != nil { + { + size, err := m.MaxSurge.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MaxSurge.Size())) - n17, err := m.MaxSurge.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + } + if m.MaxUnavailable != nil { + { + size, err := m.MaxUnavailable.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n17 + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *RollingUpdateStatefulSetStrategy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -681,22 +1382,27 @@ func (m *RollingUpdateStatefulSetStrategy) Marshal() (dAtA []byte, err error) { } func (m *RollingUpdateStatefulSetStrategy) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RollingUpdateStatefulSetStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if m.Partition != nil { - dAtA[i] = 0x8 - i++ i = encodeVarintGenerated(dAtA, i, uint64(*m.Partition)) + i-- + dAtA[i] = 0x8 } - return i, nil + return len(dAtA) - i, nil } func (m *Scale) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -704,41 +1410,52 @@ func (m *Scale) Marshal() (dAtA []byte, err error) { } func (m *Scale) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Scale) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n18, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n18 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n19, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n19 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n20, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n20 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ScaleSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -746,20 +1463,25 @@ func (m *ScaleSpec) Marshal() (dAtA []byte, err error) { } func (m *ScaleSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ScaleSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - return i, nil + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *ScaleStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -767,46 +1489,54 @@ func (m *ScaleStatus) Marshal() (dAtA []byte, err error) { } func (m *ScaleStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ScaleStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i -= len(m.TargetSelector) + copy(dAtA[i:], m.TargetSelector) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetSelector))) + i-- + dAtA[i] = 0x1a if len(m.Selector) > 0 { keysForSelector := make([]string, 0, len(m.Selector)) for k := range m.Selector { keysForSelector = append(keysForSelector, string(k)) } github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) - for _, k := range keysForSelector { + for iNdEx := len(keysForSelector) - 1; iNdEx >= 0; iNdEx-- { + v := m.Selector[string(keysForSelector[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- dAtA[i] = 0x12 - i++ - v := m.Selector[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + i -= len(keysForSelector[iNdEx]) + copy(dAtA[i:], keysForSelector[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForSelector[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) } } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetSelector))) - i += copy(dAtA[i:], m.TargetSelector) - return i, nil + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *StatefulSet) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -814,41 +1544,52 @@ func (m *StatefulSet) Marshal() (dAtA []byte, err error) { } func (m *StatefulSet) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n21, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n21 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n22, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n22 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n23, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n23 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *StatefulSetCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -856,41 +1597,52 @@ func (m *StatefulSetCondition) Marshal() (dAtA []byte, err error) { } func (m *StatefulSetCondition) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSetCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n24, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n24 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x2a - i++ + i -= len(m.Message) + copy(dAtA[i:], m.Message) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *StatefulSetList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -898,37 +1650,46 @@ func (m *StatefulSetList) Marshal() (dAtA []byte, err error) { } func (m *StatefulSetList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n25, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n25 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *StatefulSetSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -936,73 +1697,88 @@ func (m *StatefulSetSpec) Marshal() (dAtA []byte, err error) { } func (m *StatefulSetSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Replicas != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + if m.RevisionHistoryLimit != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + i-- + dAtA[i] = 0x40 } - if m.Selector != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n26, err := m.Selector.MarshalTo(dAtA[i:]) + { + size, err := m.UpdateStrategy.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n26 - } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n27, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n27 + i-- + dAtA[i] = 0x3a + i -= len(m.PodManagementPolicy) + copy(dAtA[i:], m.PodManagementPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodManagementPolicy))) + i-- + dAtA[i] = 0x32 + i -= len(m.ServiceName) + copy(dAtA[i:], m.ServiceName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceName))) + i-- + dAtA[i] = 0x2a if len(m.VolumeClaimTemplates) > 0 { - for _, msg := range m.VolumeClaimTemplates { + for iNdEx := len(m.VolumeClaimTemplates) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.VolumeClaimTemplates[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + } + } + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 } - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceName))) - i += copy(dAtA[i:], m.ServiceName) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodManagementPolicy))) - i += copy(dAtA[i:], m.PodManagementPolicy) - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UpdateStrategy.Size())) - n28, err := m.UpdateStrategy.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n28 - if m.RevisionHistoryLimit != nil { - dAtA[i] = 0x40 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + if m.Replicas != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + i-- + dAtA[i] = 0x8 } - return i, nil + return len(dAtA) - i, nil } func (m *StatefulSetStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1010,59 +1786,68 @@ func (m *StatefulSetStatus) Marshal() (dAtA []byte, err error) { } func (m *StatefulSetStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.ObservedGeneration != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.ObservedGeneration)) + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } } - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentReplicas)) - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.CurrentRevision))) - i += copy(dAtA[i:], m.CurrentRevision) - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UpdateRevision))) - i += copy(dAtA[i:], m.UpdateRevision) if m.CollisionCount != nil { - dAtA[i] = 0x48 - i++ i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) + i-- + dAtA[i] = 0x48 } - if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } + i -= len(m.UpdateRevision) + copy(dAtA[i:], m.UpdateRevision) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UpdateRevision))) + i-- + dAtA[i] = 0x3a + i -= len(m.CurrentRevision) + copy(dAtA[i:], m.CurrentRevision) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CurrentRevision))) + i-- + dAtA[i] = 0x32 + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) + i-- + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentReplicas)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i-- + dAtA[i] = 0x10 + if m.ObservedGeneration != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.ObservedGeneration)) + i-- + dAtA[i] = 0x8 } - return i, nil + return len(dAtA) - i, nil } func (m *StatefulSetUpdateStrategy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1070,55 +1855,50 @@ func (m *StatefulSetUpdateStrategy) Marshal() (dAtA []byte, err error) { } func (m *StatefulSetUpdateStrategy) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSetUpdateStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) if m.RollingUpdate != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) - n29, err := m.RollingUpdate.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.RollingUpdate.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n29 + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *ControllerRevision) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -1130,6 +1910,9 @@ func (m *ControllerRevision) Size() (n int) { } func (m *ControllerRevisionList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -1144,6 +1927,9 @@ func (m *ControllerRevisionList) Size() (n int) { } func (m *Deployment) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -1156,6 +1942,9 @@ func (m *Deployment) Size() (n int) { } func (m *DeploymentCondition) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1174,6 +1963,9 @@ func (m *DeploymentCondition) Size() (n int) { } func (m *DeploymentList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -1188,6 +1980,9 @@ func (m *DeploymentList) Size() (n int) { } func (m *DeploymentRollback) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Name) @@ -1206,6 +2001,9 @@ func (m *DeploymentRollback) Size() (n int) { } func (m *DeploymentSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Replicas != nil { @@ -1235,6 +2033,9 @@ func (m *DeploymentSpec) Size() (n int) { } func (m *DeploymentStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.ObservedGeneration)) @@ -1256,6 +2057,9 @@ func (m *DeploymentStatus) Size() (n int) { } func (m *DeploymentStrategy) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1268,6 +2072,9 @@ func (m *DeploymentStrategy) Size() (n int) { } func (m *RollbackConfig) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.Revision)) @@ -1275,6 +2082,9 @@ func (m *RollbackConfig) Size() (n int) { } func (m *RollingUpdateDeployment) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.MaxUnavailable != nil { @@ -1289,6 +2099,9 @@ func (m *RollingUpdateDeployment) Size() (n int) { } func (m *RollingUpdateStatefulSetStrategy) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Partition != nil { @@ -1298,6 +2111,9 @@ func (m *RollingUpdateStatefulSetStrategy) Size() (n int) { } func (m *Scale) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -1310,6 +2126,9 @@ func (m *Scale) Size() (n int) { } func (m *ScaleSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.Replicas)) @@ -1317,6 +2136,9 @@ func (m *ScaleSpec) Size() (n int) { } func (m *ScaleStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.Replicas)) @@ -1334,6 +2156,9 @@ func (m *ScaleStatus) Size() (n int) { } func (m *StatefulSet) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -1346,6 +2171,9 @@ func (m *StatefulSet) Size() (n int) { } func (m *StatefulSetCondition) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1362,6 +2190,9 @@ func (m *StatefulSetCondition) Size() (n int) { } func (m *StatefulSetList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -1376,6 +2207,9 @@ func (m *StatefulSetList) Size() (n int) { } func (m *StatefulSetSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Replicas != nil { @@ -1406,6 +2240,9 @@ func (m *StatefulSetSpec) Size() (n int) { } func (m *StatefulSetStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.ObservedGeneration != nil { @@ -1432,6 +2269,9 @@ func (m *StatefulSetStatus) Size() (n int) { } func (m *StatefulSetUpdateStrategy) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1444,14 +2284,7 @@ func (m *StatefulSetUpdateStrategy) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -1461,8 +2294,8 @@ func (this *ControllerRevision) String() string { return "nil" } s := strings.Join([]string{`&ControllerRevision{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Data:` + strings.Replace(strings.Replace(this.Data.String(), "RawExtension", "k8s_io_apimachinery_pkg_runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Data:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Data), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, `Revision:` + fmt.Sprintf("%v", this.Revision) + `,`, `}`, }, "") @@ -1472,9 +2305,14 @@ func (this *ControllerRevisionList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]ControllerRevision{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ControllerRevision", "ControllerRevision", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&ControllerRevisionList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ControllerRevision", "ControllerRevision", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -1484,7 +2322,7 @@ func (this *Deployment) String() string { return "nil" } s := strings.Join([]string{`&Deployment{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DeploymentSpec", "DeploymentSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DeploymentStatus", "DeploymentStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -1500,8 +2338,8 @@ func (this *DeploymentCondition) String() string { `Status:` + fmt.Sprintf("%v", this.Status) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `LastUpdateTime:` + strings.Replace(strings.Replace(this.LastUpdateTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastUpdateTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastUpdateTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -1510,9 +2348,14 @@ func (this *DeploymentList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]Deployment{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Deployment", "Deployment", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&DeploymentList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Deployment", "Deployment", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -1545,13 +2388,13 @@ func (this *DeploymentSpec) String() string { } s := strings.Join([]string{`&DeploymentSpec{`, `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, `Strategy:` + strings.Replace(strings.Replace(this.Strategy.String(), "DeploymentStrategy", "DeploymentStrategy", 1), `&`, ``, 1) + `,`, `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, `Paused:` + fmt.Sprintf("%v", this.Paused) + `,`, - `RollbackTo:` + strings.Replace(fmt.Sprintf("%v", this.RollbackTo), "RollbackConfig", "RollbackConfig", 1) + `,`, + `RollbackTo:` + strings.Replace(this.RollbackTo.String(), "RollbackConfig", "RollbackConfig", 1) + `,`, `ProgressDeadlineSeconds:` + valueToStringGenerated(this.ProgressDeadlineSeconds) + `,`, `}`, }, "") @@ -1561,13 +2404,18 @@ func (this *DeploymentStatus) String() string { if this == nil { return "nil" } + repeatedStringForConditions := "[]DeploymentCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "DeploymentCondition", "DeploymentCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" s := strings.Join([]string{`&DeploymentStatus{`, `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, `UpdatedReplicas:` + fmt.Sprintf("%v", this.UpdatedReplicas) + `,`, `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, `UnavailableReplicas:` + fmt.Sprintf("%v", this.UnavailableReplicas) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "DeploymentCondition", "DeploymentCondition", 1), `&`, ``, 1) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, `}`, @@ -1580,7 +2428,7 @@ func (this *DeploymentStrategy) String() string { } s := strings.Join([]string{`&DeploymentStrategy{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `RollingUpdate:` + strings.Replace(fmt.Sprintf("%v", this.RollingUpdate), "RollingUpdateDeployment", "RollingUpdateDeployment", 1) + `,`, + `RollingUpdate:` + strings.Replace(this.RollingUpdate.String(), "RollingUpdateDeployment", "RollingUpdateDeployment", 1) + `,`, `}`, }, "") return s @@ -1600,8 +2448,8 @@ func (this *RollingUpdateDeployment) String() string { return "nil" } s := strings.Join([]string{`&RollingUpdateDeployment{`, - `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, - `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, + `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`, + `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "intstr.IntOrString", 1) + `,`, `}`, }, "") return s @@ -1621,7 +2469,7 @@ func (this *Scale) String() string { return "nil" } s := strings.Join([]string{`&Scale{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ScaleSpec", "ScaleSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ScaleStatus", "ScaleStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -1665,7 +2513,7 @@ func (this *StatefulSet) String() string { return "nil" } s := strings.Join([]string{`&StatefulSet{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "StatefulSetSpec", "StatefulSetSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "StatefulSetStatus", "StatefulSetStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -1679,7 +2527,7 @@ func (this *StatefulSetCondition) String() string { s := strings.Join([]string{`&StatefulSetCondition{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `}`, @@ -1690,9 +2538,14 @@ func (this *StatefulSetList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]StatefulSet{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "StatefulSet", "StatefulSet", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&StatefulSetList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "StatefulSet", "StatefulSet", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -1701,11 +2554,16 @@ func (this *StatefulSetSpec) String() string { if this == nil { return "nil" } + repeatedStringForVolumeClaimTemplates := "[]PersistentVolumeClaim{" + for _, f := range this.VolumeClaimTemplates { + repeatedStringForVolumeClaimTemplates += fmt.Sprintf("%v", f) + "," + } + repeatedStringForVolumeClaimTemplates += "}" s := strings.Join([]string{`&StatefulSetSpec{`, `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, - `VolumeClaimTemplates:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.VolumeClaimTemplates), "PersistentVolumeClaim", "k8s_io_api_core_v1.PersistentVolumeClaim", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `VolumeClaimTemplates:` + repeatedStringForVolumeClaimTemplates + `,`, `ServiceName:` + fmt.Sprintf("%v", this.ServiceName) + `,`, `PodManagementPolicy:` + fmt.Sprintf("%v", this.PodManagementPolicy) + `,`, `UpdateStrategy:` + strings.Replace(strings.Replace(this.UpdateStrategy.String(), "StatefulSetUpdateStrategy", "StatefulSetUpdateStrategy", 1), `&`, ``, 1) + `,`, @@ -1718,6 +2576,11 @@ func (this *StatefulSetStatus) String() string { if this == nil { return "nil" } + repeatedStringForConditions := "[]StatefulSetCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "StatefulSetCondition", "StatefulSetCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" s := strings.Join([]string{`&StatefulSetStatus{`, `ObservedGeneration:` + valueToStringGenerated(this.ObservedGeneration) + `,`, `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, @@ -1727,7 +2590,7 @@ func (this *StatefulSetStatus) String() string { `CurrentRevision:` + fmt.Sprintf("%v", this.CurrentRevision) + `,`, `UpdateRevision:` + fmt.Sprintf("%v", this.UpdateRevision) + `,`, `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "StatefulSetCondition", "StatefulSetCondition", 1), `&`, ``, 1) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, `}`, }, "") return s @@ -1738,7 +2601,7 @@ func (this *StatefulSetUpdateStrategy) String() string { } s := strings.Join([]string{`&StatefulSetUpdateStrategy{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `RollingUpdate:` + strings.Replace(fmt.Sprintf("%v", this.RollingUpdate), "RollingUpdateStatefulSetStrategy", "RollingUpdateStatefulSetStrategy", 1) + `,`, + `RollingUpdate:` + strings.Replace(this.RollingUpdate.String(), "RollingUpdateStatefulSetStrategy", "RollingUpdateStatefulSetStrategy", 1) + `,`, `}`, }, "") return s @@ -1766,7 +2629,7 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1794,7 +2657,7 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1803,6 +2666,9 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1824,7 +2690,7 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1833,6 +2699,9 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1854,7 +2723,7 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Revision |= (int64(b) & 0x7F) << shift + m.Revision |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -1868,6 +2737,9 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1895,7 +2767,7 @@ func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1923,7 +2795,7 @@ func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1932,6 +2804,9 @@ func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1953,7 +2828,7 @@ func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1962,6 +2837,9 @@ func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1979,6 +2857,9 @@ func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2006,7 +2887,7 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2034,7 +2915,7 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2043,6 +2924,9 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2064,7 +2948,7 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2073,6 +2957,9 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2094,7 +2981,7 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2103,6 +2990,9 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2119,6 +3009,9 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2146,7 +3039,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2174,7 +3067,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2184,6 +3077,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2203,7 +3099,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2212,7 +3108,10 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2232,7 +3131,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2242,6 +3141,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2261,7 +3163,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2271,6 +3173,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2290,7 +3195,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2299,6 +3204,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2320,7 +3228,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2329,6 +3237,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2345,6 +3256,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2372,7 +3286,7 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2400,7 +3314,7 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2409,6 +3323,9 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2430,7 +3347,7 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2439,6 +3356,9 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2456,6 +3376,9 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2483,7 +3406,7 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2511,7 +3434,7 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2521,6 +3444,9 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2540,7 +3466,7 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2549,54 +3475,20 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { + if postIndex > l { return io.ErrUnexpectedEOF } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.UpdatedAnnotations == nil { m.UpdatedAnnotations = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2606,41 +3498,86 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.UpdatedAnnotations[mapkey] = mapvalue - } else { - var mapvalue string - m.UpdatedAnnotations[mapkey] = mapvalue } + m.UpdatedAnnotations[mapkey] = mapvalue iNdEx = postIndex case 3: if wireType != 2 { @@ -2656,7 +3593,7 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2665,6 +3602,9 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2681,6 +3621,9 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2708,7 +3651,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2736,7 +3679,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -2756,7 +3699,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2765,11 +3708,14 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -2789,7 +3735,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2798,6 +3744,9 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2819,7 +3768,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2828,6 +3777,9 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2849,7 +3801,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MinReadySeconds |= (int32(b) & 0x7F) << shift + m.MinReadySeconds |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -2868,7 +3820,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -2888,7 +3840,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2908,7 +3860,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2917,6 +3869,9 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2941,7 +3896,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -2956,6 +3911,9 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2983,7 +3941,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3011,7 +3969,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ObservedGeneration |= (int64(b) & 0x7F) << shift + m.ObservedGeneration |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -3030,7 +3988,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift + m.Replicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3049,7 +4007,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.UpdatedReplicas |= (int32(b) & 0x7F) << shift + m.UpdatedReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3068,7 +4026,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.AvailableReplicas |= (int32(b) & 0x7F) << shift + m.AvailableReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3087,7 +4045,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.UnavailableReplicas |= (int32(b) & 0x7F) << shift + m.UnavailableReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3106,7 +4064,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3115,6 +4073,9 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3137,7 +4098,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ReadyReplicas |= (int32(b) & 0x7F) << shift + m.ReadyReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3156,7 +4117,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3171,6 +4132,9 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3198,7 +4162,7 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3226,7 +4190,7 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3236,6 +4200,9 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3255,7 +4222,7 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3264,6 +4231,9 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3283,6 +4253,9 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3310,7 +4283,7 @@ func (m *RollbackConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3338,7 +4311,7 @@ func (m *RollbackConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Revision |= (int64(b) & 0x7F) << shift + m.Revision |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -3352,6 +4325,9 @@ func (m *RollbackConfig) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3379,7 +4355,7 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3407,7 +4383,7 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3416,11 +4392,14 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.MaxUnavailable == nil { - m.MaxUnavailable = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + m.MaxUnavailable = &intstr.IntOrString{} } if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3440,7 +4419,7 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3449,11 +4428,14 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.MaxSurge == nil { - m.MaxSurge = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + m.MaxSurge = &intstr.IntOrString{} } if err := m.MaxSurge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3468,6 +4450,9 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3495,7 +4480,7 @@ func (m *RollingUpdateStatefulSetStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3523,7 +4508,7 @@ func (m *RollingUpdateStatefulSetStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3538,6 +4523,9 @@ func (m *RollingUpdateStatefulSetStrategy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3565,7 +4553,7 @@ func (m *Scale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3593,7 +4581,7 @@ func (m *Scale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3602,6 +4590,9 @@ func (m *Scale) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3623,7 +4614,7 @@ func (m *Scale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3632,6 +4623,9 @@ func (m *Scale) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3653,7 +4647,7 @@ func (m *Scale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3662,6 +4656,9 @@ func (m *Scale) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3678,6 +4675,9 @@ func (m *Scale) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3705,7 +4705,7 @@ func (m *ScaleSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3733,7 +4733,7 @@ func (m *ScaleSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift + m.Replicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3747,6 +4747,9 @@ func (m *ScaleSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3774,7 +4777,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3802,7 +4805,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift + m.Replicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3821,7 +4824,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3830,54 +4833,20 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { + if postIndex > l { return io.ErrUnexpectedEOF } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Selector == nil { m.Selector = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -3887,41 +4856,86 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Selector[mapkey] = mapvalue - } else { - var mapvalue string - m.Selector[mapkey] = mapvalue } + m.Selector[mapkey] = mapvalue iNdEx = postIndex case 3: if wireType != 2 { @@ -3937,7 +4951,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3947,6 +4961,9 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3961,6 +4978,9 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3988,7 +5008,7 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4016,7 +5036,7 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4025,6 +5045,9 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4046,7 +5069,7 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4055,6 +5078,9 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4076,7 +5102,7 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4085,6 +5111,9 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4101,6 +5130,9 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4128,7 +5160,7 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4156,7 +5188,7 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4166,6 +5198,9 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4185,7 +5220,7 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4195,6 +5230,9 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4214,7 +5252,7 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4223,6 +5261,9 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4244,7 +5285,7 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4254,6 +5295,9 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4273,7 +5317,7 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4283,6 +5327,9 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4297,6 +5344,9 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4324,7 +5374,7 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4352,7 +5402,7 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4361,6 +5411,9 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4382,7 +5435,7 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4391,6 +5444,9 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4408,6 +5464,9 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4435,7 +5494,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4463,7 +5522,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4483,7 +5542,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4492,11 +5551,14 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -4516,7 +5578,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4525,6 +5587,9 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4546,7 +5611,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4555,10 +5620,13 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.VolumeClaimTemplates = append(m.VolumeClaimTemplates, k8s_io_api_core_v1.PersistentVolumeClaim{}) + m.VolumeClaimTemplates = append(m.VolumeClaimTemplates, v11.PersistentVolumeClaim{}) if err := m.VolumeClaimTemplates[len(m.VolumeClaimTemplates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -4577,7 +5645,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4587,6 +5655,9 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4606,7 +5677,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4616,6 +5687,9 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4635,7 +5709,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4644,6 +5718,9 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4665,7 +5742,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4680,6 +5757,9 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4707,7 +5787,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4735,7 +5815,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -4755,7 +5835,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift + m.Replicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4774,7 +5854,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ReadyReplicas |= (int32(b) & 0x7F) << shift + m.ReadyReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4793,7 +5873,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.CurrentReplicas |= (int32(b) & 0x7F) << shift + m.CurrentReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4812,7 +5892,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.UpdatedReplicas |= (int32(b) & 0x7F) << shift + m.UpdatedReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4831,7 +5911,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4841,6 +5921,9 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4860,7 +5943,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4870,6 +5953,9 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4889,7 +5975,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4909,7 +5995,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4918,6 +6004,9 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4935,6 +6024,9 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4962,7 +6054,7 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4990,7 +6082,7 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5000,6 +6092,9 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5019,7 +6114,7 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5028,6 +6123,9 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5047,6 +6145,9 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5113,10 +6214,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -5145,6 +6249,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -5163,128 +6270,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/apps/v1beta1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 1859 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x58, 0xcd, 0x6f, 0x24, 0x47, - 0x15, 0x77, 0x8f, 0x67, 0xec, 0xf1, 0x73, 0x3c, 0xde, 0x2d, 0x9b, 0xf5, 0xc4, 0x81, 0xb1, 0xd5, - 0x44, 0x89, 0xf3, 0xe1, 0x9e, 0xac, 0x13, 0xa2, 0x64, 0x17, 0x45, 0x78, 0xbc, 0x4b, 0xb2, 0x91, - 0x8d, 0x9d, 0xb2, 0x1d, 0x44, 0x00, 0x29, 0x35, 0x3d, 0xb5, 0xb3, 0x1d, 0xf7, 0x97, 0xba, 0x6b, - 0x86, 0x1d, 0x71, 0xe1, 0x0f, 0x40, 0x0a, 0x67, 0xfe, 0x0a, 0x8e, 0x08, 0x6e, 0x9c, 0xf6, 0x82, - 0x14, 0x71, 0x21, 0x27, 0x8b, 0x9d, 0x5c, 0x81, 0x1b, 0x97, 0x95, 0x90, 0x50, 0x55, 0x57, 0x7f, - 0x77, 0xdb, 0x6d, 0xa4, 0xf5, 0x21, 0xb7, 0xe9, 0x7a, 0xef, 0xfd, 0x5e, 0x7d, 0xbc, 0xaf, 0xdf, - 0xc0, 0x8f, 0xce, 0xde, 0xf3, 0x35, 0xc3, 0xe9, 0x9e, 0x8d, 0xfa, 0xd4, 0xb3, 0x29, 0xa3, 0x7e, - 0x77, 0x4c, 0xed, 0x81, 0xe3, 0x75, 0xa5, 0x80, 0xb8, 0x46, 0x97, 0xb8, 0xae, 0xdf, 0x1d, 0xdf, - 0xee, 0x53, 0x46, 0x6e, 0x77, 0x87, 0xd4, 0xa6, 0x1e, 0x61, 0x74, 0xa0, 0xb9, 0x9e, 0xc3, 0x1c, - 0xb4, 0x16, 0x28, 0x6a, 0xc4, 0x35, 0x34, 0xae, 0xa8, 0x49, 0xc5, 0xf5, 0xed, 0xa1, 0xc1, 0x1e, - 0x8d, 0xfa, 0x9a, 0xee, 0x58, 0xdd, 0xa1, 0x33, 0x74, 0xba, 0x42, 0xbf, 0x3f, 0x7a, 0x28, 0xbe, - 0xc4, 0x87, 0xf8, 0x15, 0xe0, 0xac, 0xab, 0x09, 0x87, 0xba, 0xe3, 0xd1, 0xee, 0x38, 0xe7, 0x6b, - 0xfd, 0x9d, 0x58, 0xc7, 0x22, 0xfa, 0x23, 0xc3, 0xa6, 0xde, 0xa4, 0xeb, 0x9e, 0x0d, 0xf9, 0x82, - 0xdf, 0xb5, 0x28, 0x23, 0x45, 0x56, 0xdd, 0x32, 0x2b, 0x6f, 0x64, 0x33, 0xc3, 0xa2, 0x39, 0x83, - 0x77, 0x2f, 0x33, 0xf0, 0xf5, 0x47, 0xd4, 0x22, 0x39, 0xbb, 0xb7, 0xcb, 0xec, 0x46, 0xcc, 0x30, - 0xbb, 0x86, 0xcd, 0x7c, 0xe6, 0x65, 0x8d, 0xd4, 0xff, 0x28, 0x80, 0xf6, 0x1c, 0x9b, 0x79, 0x8e, - 0x69, 0x52, 0x0f, 0xd3, 0xb1, 0xe1, 0x1b, 0x8e, 0x8d, 0x3e, 0x87, 0x26, 0x3f, 0xcf, 0x80, 0x30, - 0xd2, 0x56, 0x36, 0x95, 0xad, 0xc5, 0x9d, 0xb7, 0xb4, 0xf8, 0xa6, 0x23, 0x78, 0xcd, 0x3d, 0x1b, - 0xf2, 0x05, 0x5f, 0xe3, 0xda, 0xda, 0xf8, 0xb6, 0x76, 0xd8, 0xff, 0x82, 0xea, 0xec, 0x80, 0x32, - 0xd2, 0x43, 0x4f, 0xce, 0x37, 0x66, 0xa6, 0xe7, 0x1b, 0x10, 0xaf, 0xe1, 0x08, 0x15, 0x1d, 0x42, - 0x5d, 0xa0, 0xd7, 0x04, 0xfa, 0x76, 0x29, 0xba, 0x3c, 0xb4, 0x86, 0xc9, 0xaf, 0xee, 0x3f, 0x66, - 0xd4, 0xe6, 0xdb, 0xeb, 0xbd, 0x20, 0xa1, 0xeb, 0xf7, 0x08, 0x23, 0x58, 0x00, 0xa1, 0x37, 0xa1, - 0xe9, 0xc9, 0xed, 0xb7, 0x67, 0x37, 0x95, 0xad, 0xd9, 0xde, 0x0d, 0xa9, 0xd5, 0x0c, 0x8f, 0x85, - 0x23, 0x0d, 0xf5, 0x89, 0x02, 0xb7, 0xf2, 0xe7, 0xde, 0x37, 0x7c, 0x86, 0x7e, 0x91, 0x3b, 0xbb, - 0x56, 0xed, 0xec, 0xdc, 0x5a, 0x9c, 0x3c, 0x72, 0x1c, 0xae, 0x24, 0xce, 0x7d, 0x04, 0x0d, 0x83, - 0x51, 0xcb, 0x6f, 0xd7, 0x36, 0x67, 0xb7, 0x16, 0x77, 0xde, 0xd0, 0x4a, 0x02, 0x58, 0xcb, 0xef, - 0xae, 0xb7, 0x24, 0x71, 0x1b, 0x0f, 0x38, 0x02, 0x0e, 0x80, 0xd4, 0xdf, 0xd6, 0x00, 0xee, 0x51, - 0xd7, 0x74, 0x26, 0x16, 0xb5, 0xd9, 0x35, 0x3c, 0xdd, 0x03, 0xa8, 0xfb, 0x2e, 0xd5, 0xe5, 0xd3, - 0xbd, 0x5a, 0x7a, 0x82, 0x78, 0x53, 0xc7, 0x2e, 0xd5, 0xe3, 0x47, 0xe3, 0x5f, 0x58, 0x40, 0xa0, - 0x4f, 0x60, 0xce, 0x67, 0x84, 0x8d, 0x7c, 0xf1, 0x64, 0x8b, 0x3b, 0xaf, 0x55, 0x01, 0x13, 0x06, - 0xbd, 0x96, 0x84, 0x9b, 0x0b, 0xbe, 0xb1, 0x04, 0x52, 0xff, 0x3e, 0x0b, 0x2b, 0xb1, 0xf2, 0x9e, - 0x63, 0x0f, 0x0c, 0xc6, 0x43, 0xfa, 0x2e, 0xd4, 0xd9, 0xc4, 0xa5, 0xe2, 0x4e, 0x16, 0x7a, 0xaf, - 0x86, 0x9b, 0x39, 0x99, 0xb8, 0xf4, 0xd9, 0xf9, 0xc6, 0x5a, 0x81, 0x09, 0x17, 0x61, 0x61, 0x84, - 0xf6, 0xa3, 0x7d, 0xd6, 0x84, 0xf9, 0x3b, 0x69, 0xe7, 0xcf, 0xce, 0x37, 0x0a, 0x0a, 0x88, 0x16, - 0x21, 0xa5, 0xb7, 0x88, 0x5e, 0x81, 0x39, 0x8f, 0x12, 0xdf, 0xb1, 0xdb, 0x75, 0x81, 0x16, 0x1d, - 0x05, 0x8b, 0x55, 0x2c, 0xa5, 0xe8, 0x35, 0x98, 0xb7, 0xa8, 0xef, 0x93, 0x21, 0x6d, 0x37, 0x84, - 0xe2, 0xb2, 0x54, 0x9c, 0x3f, 0x08, 0x96, 0x71, 0x28, 0x47, 0x5f, 0x40, 0xcb, 0x24, 0x3e, 0x3b, - 0x75, 0x07, 0x84, 0xd1, 0x13, 0xc3, 0xa2, 0xed, 0x39, 0x71, 0xa1, 0xaf, 0x57, 0x7b, 0x7b, 0x6e, - 0xd1, 0xbb, 0x25, 0xd1, 0x5b, 0xfb, 0x29, 0x24, 0x9c, 0x41, 0x46, 0x63, 0x40, 0x7c, 0xe5, 0xc4, - 0x23, 0xb6, 0x1f, 0x5c, 0x14, 0xf7, 0x37, 0x7f, 0x65, 0x7f, 0xeb, 0xd2, 0x1f, 0xda, 0xcf, 0xa1, - 0xe1, 0x02, 0x0f, 0xea, 0x1f, 0x15, 0x68, 0xc5, 0xcf, 0x74, 0x0d, 0xb9, 0xfa, 0x51, 0x3a, 0x57, - 0xbf, 0x5f, 0x21, 0x38, 0x4b, 0x72, 0xf4, 0x9f, 0x35, 0x40, 0xb1, 0x12, 0x76, 0x4c, 0xb3, 0x4f, - 0xf4, 0x33, 0xb4, 0x09, 0x75, 0x9b, 0x58, 0x61, 0x4c, 0x46, 0x09, 0xf2, 0x13, 0x62, 0x51, 0x2c, - 0x24, 0xe8, 0x4b, 0x05, 0xd0, 0x48, 0x5c, 0xfd, 0x60, 0xd7, 0xb6, 0x1d, 0x46, 0xf8, 0x6d, 0x84, - 0x1b, 0xda, 0xab, 0xb0, 0xa1, 0xd0, 0x97, 0x76, 0x9a, 0x43, 0xb9, 0x6f, 0x33, 0x6f, 0x12, 0xbf, - 0x42, 0x5e, 0x01, 0x17, 0xb8, 0x46, 0x3f, 0x07, 0xf0, 0x24, 0xe6, 0x89, 0x23, 0xd3, 0xb6, 0xbc, - 0x06, 0x84, 0xee, 0xf7, 0x1c, 0xfb, 0xa1, 0x31, 0x8c, 0x0b, 0x0b, 0x8e, 0x20, 0x70, 0x02, 0x6e, - 0xfd, 0x3e, 0xac, 0x95, 0xec, 0x13, 0xdd, 0x80, 0xd9, 0x33, 0x3a, 0x09, 0xae, 0x0a, 0xf3, 0x9f, - 0x68, 0x15, 0x1a, 0x63, 0x62, 0x8e, 0x68, 0x90, 0x93, 0x38, 0xf8, 0xb8, 0x53, 0x7b, 0x4f, 0x51, - 0xff, 0xd0, 0x48, 0x46, 0x0a, 0xaf, 0x37, 0x68, 0x8b, 0xb7, 0x07, 0xd7, 0x34, 0x74, 0xe2, 0x0b, - 0x8c, 0x46, 0xef, 0x85, 0xa0, 0x35, 0x04, 0x6b, 0x38, 0x92, 0xa2, 0x5f, 0x42, 0xd3, 0xa7, 0x26, - 0xd5, 0x99, 0xe3, 0xc9, 0x12, 0xf7, 0x76, 0xc5, 0x98, 0x22, 0x7d, 0x6a, 0x1e, 0x4b, 0xd3, 0x00, - 0x3e, 0xfc, 0xc2, 0x11, 0x24, 0xfa, 0x04, 0x9a, 0x8c, 0x5a, 0xae, 0x49, 0x18, 0x95, 0xb7, 0x97, - 0x8a, 0x2b, 0x5e, 0x3b, 0x38, 0xd8, 0x91, 0x33, 0x38, 0x91, 0x6a, 0xa2, 0x7a, 0x46, 0x71, 0x1a, - 0xae, 0xe2, 0x08, 0x06, 0xfd, 0x0c, 0x9a, 0x3e, 0xe3, 0x5d, 0x7d, 0x38, 0x11, 0x15, 0xe5, 0xa2, - 0xb6, 0x92, 0xac, 0xa3, 0x81, 0x49, 0x0c, 0x1d, 0xae, 0xe0, 0x08, 0x0e, 0xed, 0xc2, 0xb2, 0x65, - 0xd8, 0x98, 0x92, 0xc1, 0xe4, 0x98, 0xea, 0x8e, 0x3d, 0xf0, 0x45, 0x29, 0x6a, 0xf4, 0xd6, 0xa4, - 0xd1, 0xf2, 0x41, 0x5a, 0x8c, 0xb3, 0xfa, 0x68, 0x1f, 0x56, 0xc3, 0xb6, 0xfb, 0x91, 0xe1, 0x33, - 0xc7, 0x9b, 0xec, 0x1b, 0x96, 0xc1, 0x44, 0x81, 0x6a, 0xf4, 0xda, 0xd3, 0xf3, 0x8d, 0x55, 0x5c, - 0x20, 0xc7, 0x85, 0x56, 0xbc, 0x76, 0xba, 0x64, 0xe4, 0xd3, 0x81, 0x28, 0x38, 0xcd, 0xb8, 0x76, - 0x1e, 0x89, 0x55, 0x2c, 0xa5, 0xe8, 0xa7, 0xa9, 0x30, 0x6d, 0x5e, 0x2d, 0x4c, 0x5b, 0xe5, 0x21, - 0x8a, 0x4e, 0x61, 0xcd, 0xf5, 0x9c, 0xa1, 0x47, 0x7d, 0xff, 0x1e, 0x25, 0x03, 0xd3, 0xb0, 0x69, - 0x78, 0x33, 0x0b, 0xe2, 0x44, 0x2f, 0x4d, 0xcf, 0x37, 0xd6, 0x8e, 0x8a, 0x55, 0x70, 0x99, 0xad, - 0xfa, 0x97, 0x3a, 0xdc, 0xc8, 0xf6, 0x38, 0xf4, 0x31, 0x20, 0xa7, 0xef, 0x53, 0x6f, 0x4c, 0x07, - 0x1f, 0x06, 0x83, 0x1b, 0x9f, 0x6e, 0x14, 0x31, 0xdd, 0x44, 0x79, 0x7b, 0x98, 0xd3, 0xc0, 0x05, - 0x56, 0xc1, 0x7c, 0x24, 0x13, 0xa0, 0x26, 0x36, 0x9a, 0x98, 0x8f, 0x72, 0x49, 0xb0, 0x0b, 0xcb, - 0x32, 0xf7, 0x43, 0xa1, 0x08, 0xd6, 0xc4, 0xbb, 0x9f, 0xa6, 0xc5, 0x38, 0xab, 0x8f, 0x3e, 0x84, - 0x9b, 0x64, 0x4c, 0x0c, 0x93, 0xf4, 0x4d, 0x1a, 0x81, 0xd4, 0x05, 0xc8, 0x8b, 0x12, 0xe4, 0xe6, - 0x6e, 0x56, 0x01, 0xe7, 0x6d, 0xd0, 0x01, 0xac, 0x8c, 0xec, 0x3c, 0x54, 0x10, 0x87, 0x2f, 0x49, - 0xa8, 0x95, 0xd3, 0xbc, 0x0a, 0x2e, 0xb2, 0x43, 0x9f, 0x03, 0xe8, 0x61, 0x63, 0xf6, 0xdb, 0x73, - 0xa2, 0x92, 0xbe, 0x59, 0x21, 0x5f, 0xa2, 0x6e, 0x1e, 0x57, 0xb1, 0x68, 0xc9, 0xc7, 0x09, 0x4c, - 0x74, 0x17, 0x96, 0x3c, 0x9e, 0x01, 0xd1, 0x56, 0xe7, 0xc5, 0x56, 0xbf, 0x23, 0xcd, 0x96, 0x70, - 0x52, 0x88, 0xd3, 0xba, 0xe8, 0x0e, 0xb4, 0x74, 0xc7, 0x34, 0x45, 0xe4, 0xef, 0x39, 0x23, 0x9b, - 0x89, 0xe0, 0x6d, 0xf4, 0x10, 0xef, 0xcc, 0x7b, 0x29, 0x09, 0xce, 0x68, 0xaa, 0x7f, 0x56, 0x92, - 0x6d, 0x26, 0x4c, 0x67, 0x74, 0x27, 0x35, 0xfa, 0xbc, 0x92, 0x19, 0x7d, 0x6e, 0xe5, 0x2d, 0x12, - 0x93, 0x8f, 0x01, 0x4b, 0x3c, 0xf8, 0x0d, 0x7b, 0x18, 0x3c, 0xb8, 0x2c, 0x89, 0x6f, 0x5d, 0x98, - 0x4a, 0x91, 0x76, 0xa2, 0x31, 0xde, 0x14, 0x27, 0x4f, 0x0a, 0x71, 0x1a, 0x59, 0xfd, 0x00, 0x5a, - 0xe9, 0x3c, 0x4c, 0xcd, 0xf4, 0xca, 0xa5, 0x33, 0xfd, 0x37, 0x0a, 0xac, 0x95, 0x78, 0x47, 0x26, - 0xb4, 0x2c, 0xf2, 0x38, 0x11, 0x23, 0x97, 0xce, 0xc6, 0x9c, 0x35, 0x69, 0x01, 0x6b, 0xd2, 0x1e, - 0xd8, 0xec, 0xd0, 0x3b, 0x66, 0x9e, 0x61, 0x0f, 0x83, 0x77, 0x38, 0x48, 0x61, 0xe1, 0x0c, 0x36, - 0xfa, 0x0c, 0x9a, 0x16, 0x79, 0x7c, 0x3c, 0xf2, 0x86, 0x45, 0xf7, 0x55, 0xcd, 0x8f, 0xe8, 0x1f, - 0x07, 0x12, 0x05, 0x47, 0x78, 0xea, 0x21, 0x6c, 0xa6, 0x0e, 0xc9, 0x4b, 0x05, 0x7d, 0x38, 0x32, - 0x8f, 0x69, 0xfc, 0xe0, 0x6f, 0xc0, 0x82, 0x4b, 0x3c, 0x66, 0x44, 0xe5, 0xa2, 0xd1, 0x5b, 0x9a, - 0x9e, 0x6f, 0x2c, 0x1c, 0x85, 0x8b, 0x38, 0x96, 0xab, 0xff, 0x55, 0xa0, 0x71, 0xac, 0x13, 0x93, - 0x5e, 0x03, 0x75, 0xb8, 0x97, 0xa2, 0x0e, 0x6a, 0x69, 0x10, 0x89, 0xfd, 0x94, 0xb2, 0x86, 0xfd, - 0x0c, 0x6b, 0x78, 0xf9, 0x12, 0x9c, 0x8b, 0x09, 0xc3, 0xfb, 0xb0, 0x10, 0xb9, 0x4b, 0x55, 0x49, - 0xe5, 0xb2, 0x2a, 0xa9, 0xfe, 0xbe, 0x06, 0x8b, 0x09, 0x17, 0x57, 0xb3, 0xe6, 0xd7, 0x9d, 0x18, - 0x34, 0x78, 0x19, 0xda, 0xa9, 0x72, 0x10, 0x2d, 0x1c, 0x2a, 0x82, 0xf9, 0x2d, 0xee, 0xde, 0xf9, - 0x59, 0xe3, 0x03, 0x68, 0x31, 0xe2, 0x0d, 0x29, 0x0b, 0x65, 0xe2, 0xc2, 0x16, 0xe2, 0x49, 0xff, - 0x24, 0x25, 0xc5, 0x19, 0xed, 0xf5, 0xbb, 0xb0, 0x94, 0x72, 0x76, 0xa5, 0x21, 0xec, 0x4b, 0x7e, - 0x39, 0x71, 0x70, 0x5e, 0x43, 0x74, 0x7d, 0x9c, 0x8a, 0xae, 0xad, 0xf2, 0xcb, 0x4c, 0xa4, 0x4c, - 0x59, 0x8c, 0xe1, 0x4c, 0x8c, 0xbd, 0x5e, 0x09, 0xed, 0xe2, 0x48, 0xfb, 0x57, 0x0d, 0x56, 0x13, - 0xda, 0x31, 0x37, 0xfd, 0x61, 0xaa, 0x40, 0x6f, 0x65, 0x0a, 0x74, 0xbb, 0xc8, 0xe6, 0xb9, 0x91, - 0xd3, 0x62, 0x76, 0x37, 0xfb, 0xbc, 0xd9, 0xdd, 0x73, 0x20, 0xc5, 0xea, 0x9f, 0x14, 0x58, 0x4e, - 0xdc, 0xdd, 0x35, 0x30, 0xc6, 0x07, 0x69, 0xc6, 0xf8, 0x72, 0x95, 0xa0, 0x29, 0xa1, 0x8c, 0x7f, - 0x6d, 0xa4, 0x36, 0xff, 0xad, 0x27, 0x31, 0xbf, 0x86, 0xd5, 0xb1, 0x63, 0x8e, 0x2c, 0xba, 0x67, - 0x12, 0xc3, 0x0a, 0x15, 0xf8, 0xc4, 0x38, 0x9b, 0xfd, 0x63, 0x28, 0x82, 0xa7, 0x9e, 0x6f, 0xf8, - 0x8c, 0xda, 0xec, 0xd3, 0xd8, 0xb2, 0xf7, 0x5d, 0xe9, 0x64, 0xf5, 0xd3, 0x02, 0x38, 0x5c, 0xe8, - 0x04, 0xfd, 0x00, 0x16, 0xf9, 0xc0, 0x6c, 0xe8, 0x94, 0x73, 0x6f, 0x19, 0x58, 0x2b, 0x12, 0x68, - 0xf1, 0x38, 0x16, 0xe1, 0xa4, 0x1e, 0x7a, 0x04, 0x2b, 0xae, 0x33, 0x38, 0x20, 0x36, 0x19, 0x52, - 0x3e, 0x66, 0x1c, 0x39, 0xa6, 0xa1, 0x4f, 0x04, 0xb3, 0x59, 0xe8, 0xbd, 0x1b, 0x4e, 0xa6, 0x47, - 0x79, 0x95, 0x67, 0x9c, 0x22, 0xe4, 0x97, 0x45, 0x52, 0x17, 0x41, 0x22, 0x0f, 0x5a, 0x23, 0xd9, - 0xee, 0x25, 0xd1, 0x0b, 0xfe, 0x6f, 0xd9, 0xa9, 0x12, 0x61, 0xa7, 0x29, 0xcb, 0xb8, 0xfa, 0xa7, - 0xd7, 0x71, 0xc6, 0x43, 0x29, 0x71, 0x6b, 0xfe, 0x3f, 0xc4, 0x4d, 0xfd, 0x77, 0x1d, 0x6e, 0xe6, - 0x4a, 0x25, 0xfa, 0xf1, 0x05, 0x0c, 0xe7, 0xd6, 0x73, 0x63, 0x37, 0xb9, 0x01, 0x7d, 0xf6, 0x0a, - 0x03, 0xfa, 0x2e, 0x2c, 0xeb, 0x23, 0xcf, 0xa3, 0x36, 0xcb, 0xb0, 0x9a, 0x88, 0x1a, 0xed, 0xa5, - 0xc5, 0x38, 0xab, 0x5f, 0xc4, 0xae, 0x1a, 0x57, 0x64, 0x57, 0xc9, 0x5d, 0xc8, 0x09, 0x39, 0x08, - 0xbb, 0xfc, 0x2e, 0xe4, 0xa0, 0x9c, 0xd5, 0xe7, 0xd3, 0x41, 0x80, 0x1a, 0x21, 0xcc, 0xa7, 0xa7, - 0x83, 0xd3, 0x94, 0x14, 0x67, 0xb4, 0x0b, 0x98, 0xca, 0x42, 0x55, 0xa6, 0x82, 0x48, 0x8a, 0x84, - 0x81, 0xc8, 0xf1, 0xed, 0x2a, 0xb1, 0x5c, 0x99, 0x85, 0xa9, 0x7f, 0x53, 0xe0, 0xc5, 0xd2, 0x24, - 0x40, 0xbb, 0xa9, 0x96, 0xbb, 0x9d, 0x69, 0xb9, 0xdf, 0x2b, 0x35, 0x4c, 0xf4, 0x5d, 0xaf, 0x98, - 0x1a, 0xbd, 0x5f, 0x8d, 0x1a, 0x15, 0xcc, 0xed, 0x97, 0x73, 0xa4, 0xde, 0xf6, 0x93, 0xa7, 0x9d, - 0x99, 0xaf, 0x9e, 0x76, 0x66, 0xbe, 0x7e, 0xda, 0x99, 0xf9, 0xcd, 0xb4, 0xa3, 0x3c, 0x99, 0x76, - 0x94, 0xaf, 0xa6, 0x1d, 0xe5, 0xeb, 0x69, 0x47, 0xf9, 0xc7, 0xb4, 0xa3, 0xfc, 0xee, 0x9b, 0xce, - 0xcc, 0x67, 0xf3, 0xd2, 0xe3, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0xe9, 0x89, 0x29, 0x5c, 0x61, - 0x1b, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/apps/v1beta1/generated.proto b/vendor/k8s.io/api/apps/v1beta1/generated.proto index 6f41f06b..694f6570 100644 --- a/vendor/k8s.io/api/apps/v1beta1/generated.proto +++ b/vendor/k8s.io/api/apps/v1beta1/generated.proto @@ -43,7 +43,7 @@ option go_package = "v1beta1"; // depend on its stability. It is primarily for internal use by controllers. message ControllerRevision { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -56,7 +56,7 @@ message ControllerRevision { // ControllerRevisionList is a resource containing a list of ControllerRevision objects. message ControllerRevisionList { - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -143,6 +143,7 @@ message DeploymentSpec { // The deployment strategy to use to replace existing pods with new ones. // +optional + // +patchStrategy=retainKeys optional DeploymentStrategy strategy = 4; // Minimum number of seconds for which a newly created pod should be ready @@ -262,7 +263,7 @@ message RollingUpdateDeployment { // the rolling update starts, such that the total number of old and new pods do not exceed // 130% of desired pods. Once old pods have been killed, // new ReplicaSet can be scaled up further, ensuring that total number of pods running - // at any time during the update is atmost 130% of desired pods. + // at any time during the update is at most 130% of desired pods. // +optional optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; } @@ -276,15 +277,15 @@ message RollingUpdateStatefulSetStrategy { // Scale represents a scaling request for a resource. message Scale { - // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. + // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. // +optional optional ScaleSpec spec = 2; - // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only. + // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only. // +optional optional ScaleStatus status = 3; } diff --git a/vendor/k8s.io/api/apps/v1beta1/types.go b/vendor/k8s.io/api/apps/v1beta1/types.go index d462604d..b77fcf7a 100644 --- a/vendor/k8s.io/api/apps/v1beta1/types.go +++ b/vendor/k8s.io/api/apps/v1beta1/types.go @@ -17,7 +17,7 @@ limitations under the License. package v1beta1 import ( - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/intstr" @@ -55,22 +55,20 @@ type ScaleStatus struct { TargetSelector string `json:"targetSelector,omitempty" protobuf:"bytes,3,opt,name=targetSelector"` } -// +genclient -// +genclient:noVerbs // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // Scale represents a scaling request for a resource. type Scale struct { metav1.TypeMeta `json:",inline"` - // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. + // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. // +optional Spec ScaleSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only. + // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only. // +optional Status ScaleStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -113,7 +111,7 @@ const ( // ParallelPodManagement will create and delete pods as soon as the stateful set // replica count is changed, and will not wait for pods to be ready or complete // termination. - ParallelPodManagement = "Parallel" + ParallelPodManagement PodManagementPolicyType = "Parallel" ) // StatefulSetUpdateStrategy indicates the strategy that the StatefulSet @@ -136,13 +134,13 @@ const ( // ordering constraints. When a scale operation is performed with this // strategy, new Pods will be created from the specification version indicated // by the StatefulSet's updateRevision. - RollingUpdateStatefulSetStrategyType = "RollingUpdate" + RollingUpdateStatefulSetStrategyType StatefulSetUpdateStrategyType = "RollingUpdate" // OnDeleteStatefulSetStrategyType triggers the legacy behavior. Version // tracking and ordered rolling restarts are disabled. Pods are recreated // from the StatefulSetSpec when they are manually deleted. When a scale // operation is performed with this strategy,specification version indicated // by the StatefulSet's currentRevision. - OnDeleteStatefulSetStrategyType = "OnDelete" + OnDeleteStatefulSetStrategyType StatefulSetUpdateStrategyType = "OnDelete" ) // RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType. @@ -323,7 +321,8 @@ type DeploymentSpec struct { // The deployment strategy to use to replace existing pods with new ones. // +optional - Strategy DeploymentStrategy `json:"strategy,omitempty" protobuf:"bytes,4,opt,name=strategy"` + // +patchStrategy=retainKeys + Strategy DeploymentStrategy `json:"strategy,omitempty" patchStrategy:"retainKeys" protobuf:"bytes,4,opt,name=strategy"` // Minimum number of seconds for which a newly created pod should be ready // without any of its container crashing, for it to be considered available. @@ -434,7 +433,7 @@ type RollingUpdateDeployment struct { // the rolling update starts, such that the total number of old and new pods do not exceed // 130% of desired pods. Once old pods have been killed, // new ReplicaSet can be scaled up further, ensuring that total number of pods running - // at any time during the update is atmost 130% of desired pods. + // at any time during the update is at most 130% of desired pods. // +optional MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty" protobuf:"bytes,2,opt,name=maxSurge"` } @@ -542,7 +541,7 @@ type DeploymentList struct { type ControllerRevision struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -559,7 +558,7 @@ type ControllerRevision struct { type ControllerRevisionList struct { metav1.TypeMeta `json:",inline"` - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` diff --git a/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go index 68ebef34..504b8586 100644 --- a/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go @@ -29,7 +29,7 @@ package v1beta1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_ControllerRevision = map[string]string{ "": "DEPRECATED - This group version of ControllerRevision is deprecated by apps/v1beta2/ControllerRevision. See the release notes for more information. ControllerRevision implements an immutable snapshot of state data. Clients are responsible for serializing and deserializing the objects that contain their internal state. Once a ControllerRevision has been successfully created, it can not be updated. The API Server will fail validation of all requests that attempt to mutate the Data field. ControllerRevisions may, however, be deleted. Note that, due to its use by both the DaemonSet and StatefulSet controllers for update and rollback, this object is beta. However, it may be subject to name and representation changes in future releases, and clients should not depend on its stability. It is primarily for internal use by controllers.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "data": "Data is the serialized representation of the state.", "revision": "Revision indicates the revision of the state represented by Data.", } @@ -40,7 +40,7 @@ func (ControllerRevision) SwaggerDoc() map[string]string { var map_ControllerRevisionList = map[string]string{ "": "ControllerRevisionList is a resource containing a list of ControllerRevision objects.", - "metadata": "More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "Items is the list of ControllerRevisions", } @@ -149,7 +149,7 @@ func (RollbackConfig) SwaggerDoc() map[string]string { var map_RollingUpdateDeployment = map[string]string{ "": "Spec to control the desired behavior of rolling update.", "maxUnavailable": "The maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). Absolute number is calculated from percentage by rounding down. This can not be 0 if MaxSurge is 0. Defaults to 25%. Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods immediately when the rolling update starts. Once new pods are ready, old ReplicaSet can be scaled down further, followed by scaling up the new ReplicaSet, ensuring that the total number of pods available at all times during the update is at least 70% of desired pods.", - "maxSurge": "The maximum number of pods that can be scheduled above the desired number of pods. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up. Defaults to 25%. Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when the rolling update starts, such that the total number of old and new pods do not exceed 130% of desired pods. Once old pods have been killed, new ReplicaSet can be scaled up further, ensuring that total number of pods running at any time during the update is atmost 130% of desired pods.", + "maxSurge": "The maximum number of pods that can be scheduled above the desired number of pods. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up. Defaults to 25%. Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when the rolling update starts, such that the total number of old and new pods do not exceed 130% of desired pods. Once old pods have been killed, new ReplicaSet can be scaled up further, ensuring that total number of pods running at any time during the update is at most 130% of desired pods.", } func (RollingUpdateDeployment) SwaggerDoc() map[string]string { @@ -167,9 +167,9 @@ func (RollingUpdateStatefulSetStrategy) SwaggerDoc() map[string]string { var map_Scale = map[string]string{ "": "Scale represents a scaling request for a resource.", - "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.", - "spec": "defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.", - "status": "current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only.", + "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.", + "spec": "defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.", + "status": "current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only.", } func (Scale) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/apps/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/apps/v1beta1/zz_generated.deepcopy.go index 93892bfd..fb276124 100644 --- a/vendor/k8s.io/api/apps/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/apps/v1beta1/zz_generated.deepcopy.go @@ -58,7 +58,7 @@ func (in *ControllerRevision) DeepCopyObject() runtime.Object { func (in *ControllerRevisionList) DeepCopyInto(out *ControllerRevisionList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ControllerRevision, len(*in)) @@ -137,7 +137,7 @@ func (in *DeploymentCondition) DeepCopy() *DeploymentCondition { func (in *DeploymentList) DeepCopyInto(out *DeploymentList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Deployment, len(*in)) @@ -470,7 +470,7 @@ func (in *StatefulSetCondition) DeepCopy() *StatefulSetCondition { func (in *StatefulSetList) DeepCopyInto(out *StatefulSetList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]StatefulSet, len(*in)) diff --git a/vendor/k8s.io/api/apps/v1beta2/doc.go b/vendor/k8s.io/api/apps/v1beta2/doc.go index e93e164e..9f499869 100644 --- a/vendor/k8s.io/api/apps/v1beta2/doc.go +++ b/vendor/k8s.io/api/apps/v1beta2/doc.go @@ -15,6 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +k8s:openapi-gen=true package v1beta2 // import "k8s.io/api/apps/v1beta2" diff --git a/vendor/k8s.io/api/apps/v1beta2/generated.pb.go b/vendor/k8s.io/api/apps/v1beta2/generated.pb.go index 72d832c3..624bb942 100644 --- a/vendor/k8s.io/api/apps/v1beta2/generated.pb.go +++ b/vendor/k8s.io/api/apps/v1beta2/generated.pb.go @@ -14,66 +14,29 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/apps/v1beta2/generated.proto -// DO NOT EDIT! -/* - Package v1beta2 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/apps/v1beta2/generated.proto - - It has these top-level messages: - ControllerRevision - ControllerRevisionList - DaemonSet - DaemonSetCondition - DaemonSetList - DaemonSetSpec - DaemonSetStatus - DaemonSetUpdateStrategy - Deployment - DeploymentCondition - DeploymentList - DeploymentSpec - DeploymentStatus - DeploymentStrategy - ReplicaSet - ReplicaSetCondition - ReplicaSetList - ReplicaSetSpec - ReplicaSetStatus - RollingUpdateDaemonSet - RollingUpdateDeployment - RollingUpdateStatefulSetStrategy - Scale - ScaleSpec - ScaleStatus - StatefulSet - StatefulSetCondition - StatefulSetList - StatefulSetSpec - StatefulSetStatus - StatefulSetUpdateStrategy -*/ package v1beta2 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import k8s_io_api_core_v1 "k8s.io/api/core/v1" -import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + io "io" -import k8s_io_apimachinery_pkg_util_intstr "k8s.io/apimachinery/pkg/util/intstr" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + v11 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" -import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" -import strings "strings" -import reflect "reflect" - -import io "io" + intstr "k8s.io/apimachinery/pkg/util/intstr" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -86,136 +49,874 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *ControllerRevision) Reset() { *m = ControllerRevision{} } -func (*ControllerRevision) ProtoMessage() {} -func (*ControllerRevision) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *ControllerRevision) Reset() { *m = ControllerRevision{} } +func (*ControllerRevision) ProtoMessage() {} +func (*ControllerRevision) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{0} +} +func (m *ControllerRevision) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ControllerRevision) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ControllerRevision) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControllerRevision.Merge(m, src) +} +func (m *ControllerRevision) XXX_Size() int { + return m.Size() +} +func (m *ControllerRevision) XXX_DiscardUnknown() { + xxx_messageInfo_ControllerRevision.DiscardUnknown(m) +} + +var xxx_messageInfo_ControllerRevision proto.InternalMessageInfo + +func (m *ControllerRevisionList) Reset() { *m = ControllerRevisionList{} } +func (*ControllerRevisionList) ProtoMessage() {} +func (*ControllerRevisionList) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{1} +} +func (m *ControllerRevisionList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ControllerRevisionList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ControllerRevisionList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControllerRevisionList.Merge(m, src) +} +func (m *ControllerRevisionList) XXX_Size() int { + return m.Size() +} +func (m *ControllerRevisionList) XXX_DiscardUnknown() { + xxx_messageInfo_ControllerRevisionList.DiscardUnknown(m) +} + +var xxx_messageInfo_ControllerRevisionList proto.InternalMessageInfo + +func (m *DaemonSet) Reset() { *m = DaemonSet{} } +func (*DaemonSet) ProtoMessage() {} +func (*DaemonSet) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{2} +} +func (m *DaemonSet) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DaemonSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DaemonSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_DaemonSet.Merge(m, src) +} +func (m *DaemonSet) XXX_Size() int { + return m.Size() +} +func (m *DaemonSet) XXX_DiscardUnknown() { + xxx_messageInfo_DaemonSet.DiscardUnknown(m) +} + +var xxx_messageInfo_DaemonSet proto.InternalMessageInfo + +func (m *DaemonSetCondition) Reset() { *m = DaemonSetCondition{} } +func (*DaemonSetCondition) ProtoMessage() {} +func (*DaemonSetCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{3} +} +func (m *DaemonSetCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DaemonSetCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DaemonSetCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_DaemonSetCondition.Merge(m, src) +} +func (m *DaemonSetCondition) XXX_Size() int { + return m.Size() +} +func (m *DaemonSetCondition) XXX_DiscardUnknown() { + xxx_messageInfo_DaemonSetCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_DaemonSetCondition proto.InternalMessageInfo + +func (m *DaemonSetList) Reset() { *m = DaemonSetList{} } +func (*DaemonSetList) ProtoMessage() {} +func (*DaemonSetList) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{4} +} +func (m *DaemonSetList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DaemonSetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DaemonSetList) XXX_Merge(src proto.Message) { + xxx_messageInfo_DaemonSetList.Merge(m, src) +} +func (m *DaemonSetList) XXX_Size() int { + return m.Size() +} +func (m *DaemonSetList) XXX_DiscardUnknown() { + xxx_messageInfo_DaemonSetList.DiscardUnknown(m) +} + +var xxx_messageInfo_DaemonSetList proto.InternalMessageInfo + +func (m *DaemonSetSpec) Reset() { *m = DaemonSetSpec{} } +func (*DaemonSetSpec) ProtoMessage() {} +func (*DaemonSetSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{5} +} +func (m *DaemonSetSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DaemonSetSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DaemonSetSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_DaemonSetSpec.Merge(m, src) +} +func (m *DaemonSetSpec) XXX_Size() int { + return m.Size() +} +func (m *DaemonSetSpec) XXX_DiscardUnknown() { + xxx_messageInfo_DaemonSetSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_DaemonSetSpec proto.InternalMessageInfo + +func (m *DaemonSetStatus) Reset() { *m = DaemonSetStatus{} } +func (*DaemonSetStatus) ProtoMessage() {} +func (*DaemonSetStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{6} +} +func (m *DaemonSetStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DaemonSetStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DaemonSetStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_DaemonSetStatus.Merge(m, src) +} +func (m *DaemonSetStatus) XXX_Size() int { + return m.Size() +} +func (m *DaemonSetStatus) XXX_DiscardUnknown() { + xxx_messageInfo_DaemonSetStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_DaemonSetStatus proto.InternalMessageInfo + +func (m *DaemonSetUpdateStrategy) Reset() { *m = DaemonSetUpdateStrategy{} } +func (*DaemonSetUpdateStrategy) ProtoMessage() {} +func (*DaemonSetUpdateStrategy) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{7} +} +func (m *DaemonSetUpdateStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DaemonSetUpdateStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DaemonSetUpdateStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_DaemonSetUpdateStrategy.Merge(m, src) +} +func (m *DaemonSetUpdateStrategy) XXX_Size() int { + return m.Size() +} +func (m *DaemonSetUpdateStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_DaemonSetUpdateStrategy.DiscardUnknown(m) +} + +var xxx_messageInfo_DaemonSetUpdateStrategy proto.InternalMessageInfo + +func (m *Deployment) Reset() { *m = Deployment{} } +func (*Deployment) ProtoMessage() {} +func (*Deployment) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{8} +} +func (m *Deployment) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Deployment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Deployment) XXX_Merge(src proto.Message) { + xxx_messageInfo_Deployment.Merge(m, src) +} +func (m *Deployment) XXX_Size() int { + return m.Size() +} +func (m *Deployment) XXX_DiscardUnknown() { + xxx_messageInfo_Deployment.DiscardUnknown(m) +} + +var xxx_messageInfo_Deployment proto.InternalMessageInfo -func (m *ControllerRevisionList) Reset() { *m = ControllerRevisionList{} } -func (*ControllerRevisionList) ProtoMessage() {} -func (*ControllerRevisionList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} } +func (*DeploymentCondition) ProtoMessage() {} +func (*DeploymentCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{9} +} +func (m *DeploymentCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentCondition.Merge(m, src) +} +func (m *DeploymentCondition) XXX_Size() int { + return m.Size() +} +func (m *DeploymentCondition) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentCondition.DiscardUnknown(m) +} -func (m *DaemonSet) Reset() { *m = DaemonSet{} } -func (*DaemonSet) ProtoMessage() {} -func (*DaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +var xxx_messageInfo_DeploymentCondition proto.InternalMessageInfo -func (m *DaemonSetCondition) Reset() { *m = DaemonSetCondition{} } -func (*DaemonSetCondition) ProtoMessage() {} -func (*DaemonSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +func (m *DeploymentList) Reset() { *m = DeploymentList{} } +func (*DeploymentList) ProtoMessage() {} +func (*DeploymentList) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{10} +} +func (m *DeploymentList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentList) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentList.Merge(m, src) +} +func (m *DeploymentList) XXX_Size() int { + return m.Size() +} +func (m *DeploymentList) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentList.DiscardUnknown(m) +} -func (m *DaemonSetList) Reset() { *m = DaemonSetList{} } -func (*DaemonSetList) ProtoMessage() {} -func (*DaemonSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +var xxx_messageInfo_DeploymentList proto.InternalMessageInfo -func (m *DaemonSetSpec) Reset() { *m = DaemonSetSpec{} } -func (*DaemonSetSpec) ProtoMessage() {} -func (*DaemonSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} } +func (*DeploymentSpec) ProtoMessage() {} +func (*DeploymentSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{11} +} +func (m *DeploymentSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentSpec.Merge(m, src) +} +func (m *DeploymentSpec) XXX_Size() int { + return m.Size() +} +func (m *DeploymentSpec) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentSpec.DiscardUnknown(m) +} -func (m *DaemonSetStatus) Reset() { *m = DaemonSetStatus{} } -func (*DaemonSetStatus) ProtoMessage() {} -func (*DaemonSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +var xxx_messageInfo_DeploymentSpec proto.InternalMessageInfo -func (m *DaemonSetUpdateStrategy) Reset() { *m = DaemonSetUpdateStrategy{} } -func (*DaemonSetUpdateStrategy) ProtoMessage() {} -func (*DaemonSetUpdateStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} } +func (*DeploymentStatus) ProtoMessage() {} +func (*DeploymentStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{12} +} +func (m *DeploymentStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentStatus.Merge(m, src) +} +func (m *DeploymentStatus) XXX_Size() int { + return m.Size() +} +func (m *DeploymentStatus) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentStatus.DiscardUnknown(m) +} -func (m *Deployment) Reset() { *m = Deployment{} } -func (*Deployment) ProtoMessage() {} -func (*Deployment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +var xxx_messageInfo_DeploymentStatus proto.InternalMessageInfo -func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} } -func (*DeploymentCondition) ProtoMessage() {} -func (*DeploymentCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} } +func (*DeploymentStrategy) ProtoMessage() {} +func (*DeploymentStrategy) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{13} +} +func (m *DeploymentStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentStrategy.Merge(m, src) +} +func (m *DeploymentStrategy) XXX_Size() int { + return m.Size() +} +func (m *DeploymentStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentStrategy.DiscardUnknown(m) +} -func (m *DeploymentList) Reset() { *m = DeploymentList{} } -func (*DeploymentList) ProtoMessage() {} -func (*DeploymentList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } +var xxx_messageInfo_DeploymentStrategy proto.InternalMessageInfo -func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} } -func (*DeploymentSpec) ProtoMessage() {} -func (*DeploymentSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } +func (m *ReplicaSet) Reset() { *m = ReplicaSet{} } +func (*ReplicaSet) ProtoMessage() {} +func (*ReplicaSet) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{14} +} +func (m *ReplicaSet) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicaSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ReplicaSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicaSet.Merge(m, src) +} +func (m *ReplicaSet) XXX_Size() int { + return m.Size() +} +func (m *ReplicaSet) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicaSet.DiscardUnknown(m) +} -func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} } -func (*DeploymentStatus) ProtoMessage() {} -func (*DeploymentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } +var xxx_messageInfo_ReplicaSet proto.InternalMessageInfo -func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} } -func (*DeploymentStrategy) ProtoMessage() {} -func (*DeploymentStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } +func (m *ReplicaSetCondition) Reset() { *m = ReplicaSetCondition{} } +func (*ReplicaSetCondition) ProtoMessage() {} +func (*ReplicaSetCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{15} +} +func (m *ReplicaSetCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicaSetCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ReplicaSetCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicaSetCondition.Merge(m, src) +} +func (m *ReplicaSetCondition) XXX_Size() int { + return m.Size() +} +func (m *ReplicaSetCondition) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicaSetCondition.DiscardUnknown(m) +} -func (m *ReplicaSet) Reset() { *m = ReplicaSet{} } -func (*ReplicaSet) ProtoMessage() {} -func (*ReplicaSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } +var xxx_messageInfo_ReplicaSetCondition proto.InternalMessageInfo -func (m *ReplicaSetCondition) Reset() { *m = ReplicaSetCondition{} } -func (*ReplicaSetCondition) ProtoMessage() {} -func (*ReplicaSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } +func (m *ReplicaSetList) Reset() { *m = ReplicaSetList{} } +func (*ReplicaSetList) ProtoMessage() {} +func (*ReplicaSetList) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{16} +} +func (m *ReplicaSetList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicaSetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ReplicaSetList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicaSetList.Merge(m, src) +} +func (m *ReplicaSetList) XXX_Size() int { + return m.Size() +} +func (m *ReplicaSetList) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicaSetList.DiscardUnknown(m) +} -func (m *ReplicaSetList) Reset() { *m = ReplicaSetList{} } -func (*ReplicaSetList) ProtoMessage() {} -func (*ReplicaSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } +var xxx_messageInfo_ReplicaSetList proto.InternalMessageInfo + +func (m *ReplicaSetSpec) Reset() { *m = ReplicaSetSpec{} } +func (*ReplicaSetSpec) ProtoMessage() {} +func (*ReplicaSetSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{17} +} +func (m *ReplicaSetSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicaSetSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ReplicaSetSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicaSetSpec.Merge(m, src) +} +func (m *ReplicaSetSpec) XXX_Size() int { + return m.Size() +} +func (m *ReplicaSetSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicaSetSpec.DiscardUnknown(m) +} -func (m *ReplicaSetSpec) Reset() { *m = ReplicaSetSpec{} } -func (*ReplicaSetSpec) ProtoMessage() {} -func (*ReplicaSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } +var xxx_messageInfo_ReplicaSetSpec proto.InternalMessageInfo -func (m *ReplicaSetStatus) Reset() { *m = ReplicaSetStatus{} } -func (*ReplicaSetStatus) ProtoMessage() {} -func (*ReplicaSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } +func (m *ReplicaSetStatus) Reset() { *m = ReplicaSetStatus{} } +func (*ReplicaSetStatus) ProtoMessage() {} +func (*ReplicaSetStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{18} +} +func (m *ReplicaSetStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicaSetStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ReplicaSetStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicaSetStatus.Merge(m, src) +} +func (m *ReplicaSetStatus) XXX_Size() int { + return m.Size() +} +func (m *ReplicaSetStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicaSetStatus.DiscardUnknown(m) +} -func (m *RollingUpdateDaemonSet) Reset() { *m = RollingUpdateDaemonSet{} } -func (*RollingUpdateDaemonSet) ProtoMessage() {} -func (*RollingUpdateDaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } +var xxx_messageInfo_ReplicaSetStatus proto.InternalMessageInfo + +func (m *RollingUpdateDaemonSet) Reset() { *m = RollingUpdateDaemonSet{} } +func (*RollingUpdateDaemonSet) ProtoMessage() {} +func (*RollingUpdateDaemonSet) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{19} +} +func (m *RollingUpdateDaemonSet) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RollingUpdateDaemonSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RollingUpdateDaemonSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_RollingUpdateDaemonSet.Merge(m, src) +} +func (m *RollingUpdateDaemonSet) XXX_Size() int { + return m.Size() +} +func (m *RollingUpdateDaemonSet) XXX_DiscardUnknown() { + xxx_messageInfo_RollingUpdateDaemonSet.DiscardUnknown(m) +} + +var xxx_messageInfo_RollingUpdateDaemonSet proto.InternalMessageInfo func (m *RollingUpdateDeployment) Reset() { *m = RollingUpdateDeployment{} } func (*RollingUpdateDeployment) ProtoMessage() {} func (*RollingUpdateDeployment) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{20} + return fileDescriptor_42fe616264472f7e, []int{20} +} +func (m *RollingUpdateDeployment) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RollingUpdateDeployment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RollingUpdateDeployment) XXX_Merge(src proto.Message) { + xxx_messageInfo_RollingUpdateDeployment.Merge(m, src) +} +func (m *RollingUpdateDeployment) XXX_Size() int { + return m.Size() +} +func (m *RollingUpdateDeployment) XXX_DiscardUnknown() { + xxx_messageInfo_RollingUpdateDeployment.DiscardUnknown(m) } +var xxx_messageInfo_RollingUpdateDeployment proto.InternalMessageInfo + func (m *RollingUpdateStatefulSetStrategy) Reset() { *m = RollingUpdateStatefulSetStrategy{} } func (*RollingUpdateStatefulSetStrategy) ProtoMessage() {} func (*RollingUpdateStatefulSetStrategy) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{21} + return fileDescriptor_42fe616264472f7e, []int{21} +} +func (m *RollingUpdateStatefulSetStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RollingUpdateStatefulSetStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RollingUpdateStatefulSetStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_RollingUpdateStatefulSetStrategy.Merge(m, src) +} +func (m *RollingUpdateStatefulSetStrategy) XXX_Size() int { + return m.Size() +} +func (m *RollingUpdateStatefulSetStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_RollingUpdateStatefulSetStrategy.DiscardUnknown(m) } -func (m *Scale) Reset() { *m = Scale{} } -func (*Scale) ProtoMessage() {} -func (*Scale) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } +var xxx_messageInfo_RollingUpdateStatefulSetStrategy proto.InternalMessageInfo -func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } -func (*ScaleSpec) ProtoMessage() {} -func (*ScaleSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } +func (m *Scale) Reset() { *m = Scale{} } +func (*Scale) ProtoMessage() {} +func (*Scale) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{22} +} +func (m *Scale) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Scale) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Scale) XXX_Merge(src proto.Message) { + xxx_messageInfo_Scale.Merge(m, src) +} +func (m *Scale) XXX_Size() int { + return m.Size() +} +func (m *Scale) XXX_DiscardUnknown() { + xxx_messageInfo_Scale.DiscardUnknown(m) +} -func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } -func (*ScaleStatus) ProtoMessage() {} -func (*ScaleStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } +var xxx_messageInfo_Scale proto.InternalMessageInfo -func (m *StatefulSet) Reset() { *m = StatefulSet{} } -func (*StatefulSet) ProtoMessage() {} -func (*StatefulSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } +func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } +func (*ScaleSpec) ProtoMessage() {} +func (*ScaleSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{23} +} +func (m *ScaleSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ScaleSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ScaleSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ScaleSpec.Merge(m, src) +} +func (m *ScaleSpec) XXX_Size() int { + return m.Size() +} +func (m *ScaleSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ScaleSpec.DiscardUnknown(m) +} -func (m *StatefulSetCondition) Reset() { *m = StatefulSetCondition{} } -func (*StatefulSetCondition) ProtoMessage() {} -func (*StatefulSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } +var xxx_messageInfo_ScaleSpec proto.InternalMessageInfo -func (m *StatefulSetList) Reset() { *m = StatefulSetList{} } -func (*StatefulSetList) ProtoMessage() {} -func (*StatefulSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{27} } +func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } +func (*ScaleStatus) ProtoMessage() {} +func (*ScaleStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{24} +} +func (m *ScaleStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ScaleStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ScaleStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ScaleStatus.Merge(m, src) +} +func (m *ScaleStatus) XXX_Size() int { + return m.Size() +} +func (m *ScaleStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ScaleStatus.DiscardUnknown(m) +} -func (m *StatefulSetSpec) Reset() { *m = StatefulSetSpec{} } -func (*StatefulSetSpec) ProtoMessage() {} -func (*StatefulSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{28} } +var xxx_messageInfo_ScaleStatus proto.InternalMessageInfo -func (m *StatefulSetStatus) Reset() { *m = StatefulSetStatus{} } -func (*StatefulSetStatus) ProtoMessage() {} -func (*StatefulSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{29} } +func (m *StatefulSet) Reset() { *m = StatefulSet{} } +func (*StatefulSet) ProtoMessage() {} +func (*StatefulSet) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{25} +} +func (m *StatefulSet) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSet.Merge(m, src) +} +func (m *StatefulSet) XXX_Size() int { + return m.Size() +} +func (m *StatefulSet) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSet.DiscardUnknown(m) +} + +var xxx_messageInfo_StatefulSet proto.InternalMessageInfo + +func (m *StatefulSetCondition) Reset() { *m = StatefulSetCondition{} } +func (*StatefulSetCondition) ProtoMessage() {} +func (*StatefulSetCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{26} +} +func (m *StatefulSetCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSetCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSetCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSetCondition.Merge(m, src) +} +func (m *StatefulSetCondition) XXX_Size() int { + return m.Size() +} +func (m *StatefulSetCondition) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSetCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_StatefulSetCondition proto.InternalMessageInfo + +func (m *StatefulSetList) Reset() { *m = StatefulSetList{} } +func (*StatefulSetList) ProtoMessage() {} +func (*StatefulSetList) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{27} +} +func (m *StatefulSetList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSetList) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSetList.Merge(m, src) +} +func (m *StatefulSetList) XXX_Size() int { + return m.Size() +} +func (m *StatefulSetList) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSetList.DiscardUnknown(m) +} + +var xxx_messageInfo_StatefulSetList proto.InternalMessageInfo + +func (m *StatefulSetSpec) Reset() { *m = StatefulSetSpec{} } +func (*StatefulSetSpec) ProtoMessage() {} +func (*StatefulSetSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{28} +} +func (m *StatefulSetSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSetSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSetSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSetSpec.Merge(m, src) +} +func (m *StatefulSetSpec) XXX_Size() int { + return m.Size() +} +func (m *StatefulSetSpec) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSetSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_StatefulSetSpec proto.InternalMessageInfo + +func (m *StatefulSetStatus) Reset() { *m = StatefulSetStatus{} } +func (*StatefulSetStatus) ProtoMessage() {} +func (*StatefulSetStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{29} +} +func (m *StatefulSetStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSetStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSetStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSetStatus.Merge(m, src) +} +func (m *StatefulSetStatus) XXX_Size() int { + return m.Size() +} +func (m *StatefulSetStatus) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSetStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_StatefulSetStatus proto.InternalMessageInfo func (m *StatefulSetUpdateStrategy) Reset() { *m = StatefulSetUpdateStrategy{} } func (*StatefulSetUpdateStrategy) ProtoMessage() {} func (*StatefulSetUpdateStrategy) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{30} + return fileDescriptor_42fe616264472f7e, []int{30} +} +func (m *StatefulSetUpdateStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSetUpdateStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSetUpdateStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSetUpdateStrategy.Merge(m, src) +} +func (m *StatefulSetUpdateStrategy) XXX_Size() int { + return m.Size() +} +func (m *StatefulSetUpdateStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSetUpdateStrategy.DiscardUnknown(m) } +var xxx_messageInfo_StatefulSetUpdateStrategy proto.InternalMessageInfo + func init() { proto.RegisterType((*ControllerRevision)(nil), "k8s.io.api.apps.v1beta2.ControllerRevision") proto.RegisterType((*ControllerRevisionList)(nil), "k8s.io.api.apps.v1beta2.ControllerRevisionList") @@ -242,6 +943,7 @@ func init() { proto.RegisterType((*Scale)(nil), "k8s.io.api.apps.v1beta2.Scale") proto.RegisterType((*ScaleSpec)(nil), "k8s.io.api.apps.v1beta2.ScaleSpec") proto.RegisterType((*ScaleStatus)(nil), "k8s.io.api.apps.v1beta2.ScaleStatus") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.apps.v1beta2.ScaleStatus.SelectorEntry") proto.RegisterType((*StatefulSet)(nil), "k8s.io.api.apps.v1beta2.StatefulSet") proto.RegisterType((*StatefulSetCondition)(nil), "k8s.io.api.apps.v1beta2.StatefulSetCondition") proto.RegisterType((*StatefulSetList)(nil), "k8s.io.api.apps.v1beta2.StatefulSetList") @@ -249,10 +951,155 @@ func init() { proto.RegisterType((*StatefulSetStatus)(nil), "k8s.io.api.apps.v1beta2.StatefulSetStatus") proto.RegisterType((*StatefulSetUpdateStrategy)(nil), "k8s.io.api.apps.v1beta2.StatefulSetUpdateStrategy") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/apps/v1beta2/generated.proto", fileDescriptor_42fe616264472f7e) +} + +var fileDescriptor_42fe616264472f7e = []byte{ + // 2171 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0xcd, 0x6f, 0x1c, 0xb7, + 0xf9, 0xd6, 0xec, 0x87, 0xb4, 0xa2, 0x2c, 0xc9, 0xa6, 0xf4, 0x93, 0x36, 0xf2, 0xaf, 0x2b, 0x63, + 0x13, 0x38, 0x4a, 0x6c, 0xcd, 0xda, 0xca, 0x07, 0x12, 0xbb, 0x68, 0xab, 0x95, 0x52, 0xdb, 0x81, + 0xbe, 0x42, 0x59, 0x06, 0x1a, 0xb4, 0xa8, 0xa9, 0x5d, 0x7a, 0x35, 0xd1, 0x7c, 0x61, 0x86, 0xb3, + 0xf5, 0xa2, 0x97, 0x9e, 0x0a, 0x14, 0x28, 0xd0, 0xf6, 0xda, 0x7f, 0xa2, 0xb7, 0xa2, 0x68, 0x6f, + 0x45, 0x50, 0xf8, 0x52, 0x20, 0xe8, 0x25, 0x39, 0x09, 0xf5, 0xe6, 0x54, 0x14, 0xbd, 0x14, 0xe8, + 0x25, 0x40, 0x81, 0x82, 0x1c, 0xce, 0x07, 0xe7, 0xc3, 0x3b, 0x52, 0x1c, 0xa5, 0x29, 0x72, 0xd3, + 0x92, 0xcf, 0xfb, 0xf0, 0x7d, 0xc9, 0x97, 0x7c, 0x1f, 0x72, 0x04, 0xbe, 0x73, 0xfc, 0x96, 0xab, + 0x6a, 0x56, 0xeb, 0xd8, 0x3b, 0x24, 0x8e, 0x49, 0x28, 0x71, 0x5b, 0x7d, 0x62, 0x76, 0x2d, 0xa7, + 0x25, 0x3a, 0xb0, 0xad, 0xb5, 0xb0, 0x6d, 0xbb, 0xad, 0xfe, 0xcd, 0x43, 0x42, 0xf1, 0x5a, 0xab, + 0x47, 0x4c, 0xe2, 0x60, 0x4a, 0xba, 0xaa, 0xed, 0x58, 0xd4, 0x82, 0x8b, 0x3e, 0x50, 0xc5, 0xb6, + 0xa6, 0x32, 0xa0, 0x2a, 0x80, 0x4b, 0xab, 0x3d, 0x8d, 0x1e, 0x79, 0x87, 0x6a, 0xc7, 0x32, 0x5a, + 0x3d, 0xab, 0x67, 0xb5, 0x38, 0xfe, 0xd0, 0x7b, 0xc4, 0x7f, 0xf1, 0x1f, 0xfc, 0x2f, 0x9f, 0x67, + 0xa9, 0x19, 0x1b, 0xb0, 0x63, 0x39, 0xa4, 0xd5, 0xbf, 0x99, 0x1c, 0x6b, 0xe9, 0xf5, 0x08, 0x63, + 0xe0, 0xce, 0x91, 0x66, 0x12, 0x67, 0xd0, 0xb2, 0x8f, 0x7b, 0xac, 0xc1, 0x6d, 0x19, 0x84, 0xe2, + 0x2c, 0xab, 0x56, 0x9e, 0x95, 0xe3, 0x99, 0x54, 0x33, 0x48, 0xca, 0xe0, 0xcd, 0x51, 0x06, 0x6e, + 0xe7, 0x88, 0x18, 0x38, 0x65, 0xf7, 0x5a, 0x9e, 0x9d, 0x47, 0x35, 0xbd, 0xa5, 0x99, 0xd4, 0xa5, + 0x4e, 0xd2, 0xa8, 0xf9, 0x2f, 0x05, 0xc0, 0x0d, 0xcb, 0xa4, 0x8e, 0xa5, 0xeb, 0xc4, 0x41, 0xa4, + 0xaf, 0xb9, 0x9a, 0x65, 0xc2, 0x87, 0xa0, 0xc6, 0xe2, 0xe9, 0x62, 0x8a, 0xeb, 0xca, 0x15, 0x65, + 0x65, 0x6a, 0xed, 0x86, 0x1a, 0xcd, 0x74, 0x48, 0xaf, 0xda, 0xc7, 0x3d, 0xd6, 0xe0, 0xaa, 0x0c, + 0xad, 0xf6, 0x6f, 0xaa, 0xbb, 0x87, 0x1f, 0x90, 0x0e, 0xdd, 0x26, 0x14, 0xb7, 0xe1, 0x93, 0x93, + 0xe5, 0xb1, 0xe1, 0xc9, 0x32, 0x88, 0xda, 0x50, 0xc8, 0x0a, 0x77, 0x41, 0x85, 0xb3, 0x97, 0x38, + 0xfb, 0x6a, 0x2e, 0xbb, 0x08, 0x5a, 0x45, 0xf8, 0x47, 0xef, 0x3c, 0xa6, 0xc4, 0x64, 0xee, 0xb5, + 0x2f, 0x08, 0xea, 0xca, 0x26, 0xa6, 0x18, 0x71, 0x22, 0x78, 0x1d, 0xd4, 0x1c, 0xe1, 0x7e, 0xbd, + 0x7c, 0x45, 0x59, 0x29, 0xb7, 0x2f, 0x0a, 0x54, 0x2d, 0x08, 0x0b, 0x85, 0x88, 0xe6, 0x13, 0x05, + 0x2c, 0xa4, 0xe3, 0xde, 0xd2, 0x5c, 0x0a, 0xbf, 0x9f, 0x8a, 0x5d, 0x2d, 0x16, 0x3b, 0xb3, 0xe6, + 0x91, 0x87, 0x03, 0x07, 0x2d, 0xb1, 0xb8, 0xf7, 0x40, 0x55, 0xa3, 0xc4, 0x70, 0xeb, 0xa5, 0x2b, + 0xe5, 0x95, 0xa9, 0xb5, 0x6b, 0x6a, 0x4e, 0x02, 0xab, 0x69, 0xef, 0xda, 0xd3, 0x82, 0xb7, 0x7a, + 0x8f, 0x31, 0x20, 0x9f, 0xa8, 0xf9, 0xb3, 0x12, 0x98, 0xdc, 0xc4, 0xc4, 0xb0, 0xcc, 0x7d, 0x42, + 0xcf, 0x61, 0xe5, 0xee, 0x82, 0x8a, 0x6b, 0x93, 0x8e, 0x58, 0xb9, 0xab, 0xb9, 0x01, 0x84, 0x3e, + 0xed, 0xdb, 0xa4, 0x13, 0x2d, 0x19, 0xfb, 0x85, 0x38, 0x03, 0xdc, 0x03, 0xe3, 0x2e, 0xc5, 0xd4, + 0x73, 0xf9, 0x82, 0x4d, 0xad, 0xad, 0x14, 0xe0, 0xe2, 0xf8, 0xf6, 0x8c, 0x60, 0x1b, 0xf7, 0x7f, + 0x23, 0xc1, 0xd3, 0xfc, 0x5b, 0x09, 0xc0, 0x10, 0xbb, 0x61, 0x99, 0x5d, 0x8d, 0xb2, 0x74, 0xbe, + 0x05, 0x2a, 0x74, 0x60, 0x13, 0x3e, 0x21, 0x93, 0xed, 0xab, 0x81, 0x2b, 0xf7, 0x07, 0x36, 0xf9, + 0xec, 0x64, 0x79, 0x21, 0x6d, 0xc1, 0x7a, 0x10, 0xb7, 0x81, 0x5b, 0xa1, 0x93, 0x25, 0x6e, 0xfd, + 0xba, 0x3c, 0xf4, 0x67, 0x27, 0xcb, 0x19, 0x67, 0x87, 0x1a, 0x32, 0xc9, 0x0e, 0xc2, 0x3e, 0x80, + 0x3a, 0x76, 0xe9, 0x7d, 0x07, 0x9b, 0xae, 0x3f, 0x92, 0x66, 0x10, 0x11, 0xfe, 0xab, 0xc5, 0x16, + 0x8a, 0x59, 0xb4, 0x97, 0x84, 0x17, 0x70, 0x2b, 0xc5, 0x86, 0x32, 0x46, 0x80, 0x57, 0xc1, 0xb8, + 0x43, 0xb0, 0x6b, 0x99, 0xf5, 0x0a, 0x8f, 0x22, 0x9c, 0x40, 0xc4, 0x5b, 0x91, 0xe8, 0x85, 0xaf, + 0x80, 0x09, 0x83, 0xb8, 0x2e, 0xee, 0x91, 0x7a, 0x95, 0x03, 0x67, 0x05, 0x70, 0x62, 0xdb, 0x6f, + 0x46, 0x41, 0x7f, 0xf3, 0xb7, 0x0a, 0x98, 0x0e, 0x67, 0xee, 0x1c, 0x76, 0xce, 0x1d, 0x79, 0xe7, + 0x34, 0x47, 0x27, 0x4b, 0xce, 0x86, 0xf9, 0xb0, 0x1c, 0x73, 0x9c, 0xa5, 0x23, 0xfc, 0x01, 0xa8, + 0xb9, 0x44, 0x27, 0x1d, 0x6a, 0x39, 0xc2, 0xf1, 0xd7, 0x0a, 0x3a, 0x8e, 0x0f, 0x89, 0xbe, 0x2f, + 0x4c, 0xdb, 0x17, 0x98, 0xe7, 0xc1, 0x2f, 0x14, 0x52, 0xc2, 0xf7, 0x40, 0x8d, 0x12, 0xc3, 0xd6, + 0x31, 0x25, 0x62, 0xd7, 0xbc, 0x18, 0x77, 0x9e, 0xe5, 0x0c, 0x23, 0xdb, 0xb3, 0xba, 0xf7, 0x05, + 0x8c, 0x6f, 0x99, 0x70, 0x32, 0x82, 0x56, 0x14, 0xd2, 0x40, 0x1b, 0xcc, 0x78, 0x76, 0x97, 0x21, + 0x29, 0x3b, 0xce, 0x7b, 0x03, 0x91, 0x43, 0x37, 0x46, 0xcf, 0xca, 0x81, 0x64, 0xd7, 0x5e, 0x10, + 0xa3, 0xcc, 0xc8, 0xed, 0x28, 0xc1, 0x0f, 0xd7, 0xc1, 0xac, 0xa1, 0x99, 0x88, 0xe0, 0xee, 0x60, + 0x9f, 0x74, 0x2c, 0xb3, 0xeb, 0xf2, 0x54, 0xaa, 0xb6, 0x17, 0x05, 0xc1, 0xec, 0xb6, 0xdc, 0x8d, + 0x92, 0x78, 0xb8, 0x05, 0xe6, 0x83, 0x03, 0xf8, 0xae, 0xe6, 0x52, 0xcb, 0x19, 0x6c, 0x69, 0x86, + 0x46, 0xeb, 0xe3, 0x9c, 0xa7, 0x3e, 0x3c, 0x59, 0x9e, 0x47, 0x19, 0xfd, 0x28, 0xd3, 0xaa, 0xf9, + 0xab, 0x71, 0x30, 0x9b, 0x38, 0x17, 0xe0, 0x03, 0xb0, 0xd0, 0xf1, 0x1c, 0x87, 0x98, 0x74, 0xc7, + 0x33, 0x0e, 0x89, 0xb3, 0xdf, 0x39, 0x22, 0x5d, 0x4f, 0x27, 0x5d, 0xbe, 0xac, 0xd5, 0x76, 0x43, + 0xf8, 0xba, 0xb0, 0x91, 0x89, 0x42, 0x39, 0xd6, 0xf0, 0x5d, 0x00, 0x4d, 0xde, 0xb4, 0xad, 0xb9, + 0x6e, 0xc8, 0x59, 0xe2, 0x9c, 0xe1, 0x56, 0xdc, 0x49, 0x21, 0x50, 0x86, 0x15, 0xf3, 0xb1, 0x4b, + 0x5c, 0xcd, 0x21, 0xdd, 0xa4, 0x8f, 0x65, 0xd9, 0xc7, 0xcd, 0x4c, 0x14, 0xca, 0xb1, 0x86, 0x6f, + 0x80, 0x29, 0x7f, 0x34, 0x3e, 0xe7, 0x62, 0x71, 0xe6, 0x04, 0xd9, 0xd4, 0x4e, 0xd4, 0x85, 0xe2, + 0x38, 0x16, 0x9a, 0x75, 0xe8, 0x12, 0xa7, 0x4f, 0xba, 0x77, 0x7c, 0x71, 0xc0, 0x2a, 0x68, 0x95, + 0x57, 0xd0, 0x30, 0xb4, 0xdd, 0x14, 0x02, 0x65, 0x58, 0xb1, 0xd0, 0xfc, 0xac, 0x49, 0x85, 0x36, + 0x2e, 0x87, 0x76, 0x90, 0x89, 0x42, 0x39, 0xd6, 0x2c, 0xf7, 0x7c, 0x97, 0xd7, 0xfb, 0x58, 0xd3, + 0xf1, 0xa1, 0x4e, 0xea, 0x13, 0x72, 0xee, 0xed, 0xc8, 0xdd, 0x28, 0x89, 0x87, 0x77, 0xc0, 0x25, + 0xbf, 0xe9, 0xc0, 0xc4, 0x21, 0x49, 0x8d, 0x93, 0xbc, 0x20, 0x48, 0x2e, 0xed, 0x24, 0x01, 0x28, + 0x6d, 0x03, 0x6f, 0x81, 0x99, 0x8e, 0xa5, 0xeb, 0x3c, 0x1f, 0x37, 0x2c, 0xcf, 0xa4, 0xf5, 0x49, + 0xce, 0x02, 0xd9, 0x1e, 0xda, 0x90, 0x7a, 0x50, 0x02, 0x09, 0x7f, 0x08, 0x40, 0x27, 0x28, 0x0c, + 0x6e, 0x1d, 0x8c, 0x50, 0x00, 0xe9, 0xb2, 0x14, 0x55, 0xe6, 0xb0, 0xc9, 0x45, 0x31, 0xca, 0xe6, + 0x87, 0x0a, 0x58, 0xcc, 0xd9, 0xe8, 0xf0, 0xdb, 0x52, 0x11, 0xbc, 0x96, 0x28, 0x82, 0x97, 0x73, + 0xcc, 0x62, 0x95, 0xf0, 0x08, 0x4c, 0x33, 0x41, 0xa2, 0x99, 0x3d, 0x1f, 0x22, 0xce, 0xb2, 0x56, + 0x6e, 0x00, 0x28, 0x8e, 0x8e, 0x4e, 0xe5, 0x4b, 0xc3, 0x93, 0xe5, 0x69, 0xa9, 0x0f, 0xc9, 0xc4, + 0xcd, 0x9f, 0x97, 0x00, 0xd8, 0x24, 0xb6, 0x6e, 0x0d, 0x0c, 0x62, 0x9e, 0x87, 0xa6, 0xb9, 0x27, + 0x69, 0x9a, 0x97, 0xf3, 0x97, 0x24, 0x74, 0x2a, 0x57, 0xd4, 0xbc, 0x97, 0x10, 0x35, 0xaf, 0x14, + 0x21, 0x7b, 0xb6, 0xaa, 0xf9, 0xb8, 0x0c, 0xe6, 0x22, 0x70, 0x24, 0x6b, 0x6e, 0x4b, 0x2b, 0xfa, + 0x72, 0x62, 0x45, 0x17, 0x33, 0x4c, 0xbe, 0x30, 0x5d, 0xf3, 0x01, 0x98, 0x61, 0xaa, 0xc3, 0x5f, + 0x3f, 0xae, 0x69, 0xc6, 0x4f, 0xad, 0x69, 0xc2, 0x4a, 0xb4, 0x25, 0x31, 0xa1, 0x04, 0x73, 0x8e, + 0x86, 0x9a, 0xf8, 0x2a, 0x6a, 0xa8, 0xdf, 0x29, 0x60, 0x26, 0x5a, 0xa6, 0x73, 0x10, 0x51, 0x77, + 0x65, 0x11, 0xf5, 0x62, 0x81, 0xe4, 0xcc, 0x51, 0x51, 0x1f, 0x57, 0xe2, 0xae, 0x73, 0x19, 0xb5, + 0xc2, 0xae, 0x60, 0xb6, 0xae, 0x75, 0xb0, 0x2b, 0xea, 0xed, 0x05, 0xff, 0xfa, 0xe5, 0xb7, 0xa1, + 0xb0, 0x57, 0x12, 0x5c, 0xa5, 0x2f, 0x56, 0x70, 0x95, 0x9f, 0x8f, 0xe0, 0xfa, 0x1e, 0xa8, 0xb9, + 0x81, 0xd4, 0xaa, 0x70, 0xca, 0x6b, 0x85, 0x36, 0xb6, 0x50, 0x59, 0x21, 0x75, 0xa8, 0xaf, 0x42, + 0xba, 0x2c, 0x65, 0x55, 0xfd, 0x32, 0x95, 0x15, 0x4b, 0x74, 0x1b, 0x7b, 0x2e, 0xe9, 0xf2, 0x4d, + 0x55, 0x8b, 0x12, 0x7d, 0x8f, 0xb7, 0x22, 0xd1, 0x0b, 0x0f, 0xc0, 0xa2, 0xed, 0x58, 0x3d, 0x87, + 0xb8, 0xee, 0x26, 0xc1, 0x5d, 0x5d, 0x33, 0x49, 0x10, 0x80, 0x5f, 0x13, 0x2f, 0x0f, 0x4f, 0x96, + 0x17, 0xf7, 0xb2, 0x21, 0x28, 0xcf, 0xb6, 0xf9, 0xc7, 0x0a, 0xb8, 0x98, 0x3c, 0x1b, 0x73, 0x64, + 0x8a, 0x72, 0x26, 0x99, 0x72, 0x3d, 0x96, 0xa7, 0xbe, 0x86, 0x8b, 0x3d, 0x15, 0xa4, 0x72, 0x75, + 0x1d, 0xcc, 0x0a, 0x59, 0x12, 0x74, 0x0a, 0xa1, 0x16, 0x2e, 0xcf, 0x81, 0xdc, 0x8d, 0x92, 0x78, + 0x78, 0x1b, 0x4c, 0x3b, 0x5c, 0x79, 0x05, 0x04, 0xbe, 0x7a, 0xf9, 0x3f, 0x41, 0x30, 0x8d, 0xe2, + 0x9d, 0x48, 0xc6, 0x32, 0xe5, 0x12, 0x09, 0x92, 0x80, 0xa0, 0x22, 0x2b, 0x97, 0xf5, 0x24, 0x00, + 0xa5, 0x6d, 0xe0, 0x36, 0x98, 0xf3, 0xcc, 0x34, 0x95, 0x9f, 0x6b, 0x97, 0x05, 0xd5, 0xdc, 0x41, + 0x1a, 0x82, 0xb2, 0xec, 0xe0, 0x43, 0x49, 0xcc, 0x8c, 0xf3, 0xf3, 0xe4, 0x7a, 0x81, 0x3d, 0x51, + 0x58, 0xcd, 0x64, 0x48, 0xad, 0x5a, 0x51, 0xa9, 0xd5, 0xfc, 0x83, 0x02, 0x60, 0x7a, 0x1f, 0x8e, + 0x7c, 0x09, 0x48, 0x59, 0xc4, 0x2a, 0xa6, 0x96, 0xad, 0x7f, 0x6e, 0x14, 0xd4, 0x3f, 0xd1, 0x81, + 0x5a, 0x4c, 0x00, 0x89, 0x89, 0x3e, 0x9f, 0x47, 0x9d, 0xa2, 0x02, 0x28, 0x72, 0xea, 0x39, 0x08, + 0xa0, 0x18, 0xd9, 0xb3, 0x05, 0xd0, 0xdf, 0x4b, 0x60, 0x2e, 0x02, 0x17, 0x16, 0x40, 0x19, 0x26, + 0x5f, 0x3f, 0xec, 0x14, 0x13, 0x25, 0xd1, 0xd4, 0xfd, 0x37, 0x89, 0x92, 0xc8, 0xab, 0x1c, 0x51, + 0xf2, 0x9b, 0x52, 0xdc, 0xf5, 0x53, 0x8a, 0x92, 0xe7, 0xf0, 0xc2, 0xf1, 0x95, 0xd3, 0x35, 0xcd, + 0x3f, 0x95, 0xc1, 0xc5, 0xe4, 0x3e, 0x94, 0x0a, 0xa4, 0x32, 0xb2, 0x40, 0xee, 0x81, 0xf9, 0x47, + 0x9e, 0xae, 0x0f, 0x78, 0x0c, 0xb1, 0x2a, 0xe9, 0x97, 0xd6, 0xff, 0x17, 0x96, 0xf3, 0xdf, 0xcd, + 0xc0, 0xa0, 0x4c, 0xcb, 0x74, 0xbd, 0xac, 0x7c, 0xde, 0x7a, 0x59, 0x3d, 0x43, 0xbd, 0xcc, 0x96, + 0x1c, 0xe5, 0x33, 0x49, 0x8e, 0xd3, 0x15, 0xcb, 0x8c, 0x83, 0x6b, 0xe4, 0xd5, 0xff, 0xa7, 0x0a, + 0x58, 0xc8, 0xbe, 0x70, 0x43, 0x1d, 0xcc, 0x18, 0xf8, 0x71, 0xfc, 0xe1, 0x63, 0x54, 0x11, 0xf1, + 0xa8, 0xa6, 0xab, 0xfe, 0x27, 0x23, 0xf5, 0x9e, 0x49, 0x77, 0x9d, 0x7d, 0xea, 0x68, 0x66, 0xcf, + 0xaf, 0xbc, 0xdb, 0x12, 0x17, 0x4a, 0x70, 0x37, 0x3f, 0x55, 0xc0, 0x62, 0x4e, 0xe5, 0x3b, 0x5f, + 0x4f, 0xe0, 0xfb, 0xa0, 0x66, 0xe0, 0xc7, 0xfb, 0x9e, 0xd3, 0xcb, 0xaa, 0xd5, 0xc5, 0xc6, 0xe1, + 0x5b, 0x71, 0x5b, 0xb0, 0xa0, 0x90, 0xaf, 0xb9, 0x0b, 0xae, 0x48, 0x41, 0xb2, 0x9d, 0x43, 0x1e, + 0x79, 0x3a, 0xdf, 0x44, 0x42, 0x6c, 0x5c, 0x03, 0x93, 0x36, 0x76, 0xa8, 0x16, 0x4a, 0xd5, 0x6a, + 0x7b, 0x7a, 0x78, 0xb2, 0x3c, 0xb9, 0x17, 0x34, 0xa2, 0xa8, 0xbf, 0xf9, 0x6f, 0x05, 0x54, 0xf7, + 0x3b, 0x58, 0x27, 0xe7, 0x50, 0xed, 0x37, 0xa5, 0x6a, 0x9f, 0xff, 0x92, 0xce, 0xfd, 0xc9, 0x2d, + 0xf4, 0x5b, 0x89, 0x42, 0xff, 0xd2, 0x08, 0x9e, 0x67, 0xd7, 0xf8, 0xb7, 0xc1, 0x64, 0x38, 0xdc, + 0xe9, 0x0e, 0xa0, 0xe6, 0xaf, 0x4b, 0x60, 0x2a, 0x36, 0xc4, 0x29, 0x8f, 0xaf, 0x87, 0xd2, 0x99, + 0xcd, 0x36, 0xe6, 0x5a, 0x91, 0x40, 0xd4, 0xe0, 0x7c, 0x7e, 0xc7, 0xa4, 0x4e, 0xfc, 0x82, 0x97, + 0x3e, 0xb6, 0xbf, 0x05, 0x66, 0x28, 0x76, 0x7a, 0x84, 0x06, 0x7d, 0x7c, 0xc2, 0x26, 0xa3, 0x07, + 0x8f, 0xfb, 0x52, 0x2f, 0x4a, 0xa0, 0x97, 0x6e, 0x83, 0x69, 0x69, 0x30, 0x78, 0x11, 0x94, 0x8f, + 0xc9, 0xc0, 0x97, 0x3d, 0x88, 0xfd, 0x09, 0xe7, 0x41, 0xb5, 0x8f, 0x75, 0xcf, 0xcf, 0xf3, 0x49, + 0xe4, 0xff, 0xb8, 0x55, 0x7a, 0x4b, 0x69, 0xfe, 0x82, 0x4d, 0x4e, 0x94, 0x9c, 0xe7, 0x90, 0x5d, + 0xef, 0x4a, 0xd9, 0x95, 0xff, 0x51, 0x2f, 0xbe, 0x65, 0xf2, 0x72, 0x0c, 0x25, 0x72, 0xec, 0xd5, + 0x42, 0x6c, 0xcf, 0xce, 0xb4, 0x7f, 0x94, 0xc0, 0x7c, 0x0c, 0x1d, 0xc9, 0xc9, 0x6f, 0x4a, 0x72, + 0x72, 0x25, 0x21, 0x27, 0xeb, 0x59, 0x36, 0x5f, 0xeb, 0xc9, 0xd1, 0x7a, 0xf2, 0xf7, 0x0a, 0x98, + 0x8d, 0xcd, 0xdd, 0x39, 0x08, 0xca, 0x7b, 0xb2, 0xa0, 0x7c, 0xa9, 0x48, 0xd2, 0xe4, 0x28, 0xca, + 0x3f, 0x57, 0x25, 0xe7, 0xff, 0xe7, 0xdf, 0xb9, 0x7e, 0x0c, 0xe6, 0xfb, 0x96, 0xee, 0x19, 0x64, + 0x43, 0xc7, 0x9a, 0x11, 0x00, 0x98, 0x02, 0x2b, 0x27, 0xef, 0x72, 0x21, 0x3d, 0x71, 0x5c, 0xcd, + 0xa5, 0xc4, 0xa4, 0x0f, 0x22, 0xcb, 0x48, 0xf7, 0x3d, 0xc8, 0xa0, 0x43, 0x99, 0x83, 0xc0, 0x37, + 0xc0, 0x14, 0x53, 0x4e, 0x5a, 0x87, 0xec, 0x60, 0x23, 0x48, 0xac, 0xf0, 0x13, 0xd6, 0x7e, 0xd4, + 0x85, 0xe2, 0x38, 0x78, 0x04, 0xe6, 0x6c, 0xab, 0xbb, 0x8d, 0x4d, 0xdc, 0x23, 0x4c, 0x66, 0xec, + 0x59, 0xba, 0xd6, 0x19, 0xf0, 0xc7, 0xaf, 0xc9, 0xf6, 0x9b, 0xc1, 0xc3, 0xc6, 0x5e, 0x1a, 0xc2, + 0x2e, 0x89, 0x19, 0xcd, 0x7c, 0x53, 0x67, 0x51, 0x42, 0x27, 0xf5, 0xd9, 0xd5, 0x7f, 0x76, 0x5e, + 0x2b, 0x92, 0x61, 0x67, 0xfc, 0xf0, 0x9a, 0xf7, 0xb6, 0x57, 0x3b, 0xd3, 0x57, 0xd3, 0x7f, 0x56, + 0xc0, 0xa5, 0xd4, 0x51, 0xf9, 0x25, 0xbe, 0xae, 0xa5, 0xa4, 0x7e, 0xf9, 0x14, 0x52, 0x7f, 0x1d, + 0xcc, 0x8a, 0x0f, 0xb6, 0x89, 0x9b, 0x42, 0x78, 0x63, 0xdb, 0x90, 0xbb, 0x51, 0x12, 0x9f, 0xf5, + 0xba, 0x57, 0x3d, 0xe5, 0xeb, 0x5e, 0xdc, 0x0b, 0xf1, 0x0f, 0x48, 0x7e, 0xea, 0xa5, 0xbd, 0x10, + 0xff, 0x87, 0x94, 0xc4, 0x33, 0x85, 0xe0, 0xb3, 0x86, 0x0c, 0x13, 0xb2, 0x42, 0x38, 0x90, 0x7a, + 0x51, 0x02, 0xfd, 0xb9, 0x3e, 0x4a, 0xe2, 0x8c, 0x8f, 0x92, 0xab, 0x45, 0xf2, 0xb9, 0xf8, 0xdd, + 0xe4, 0x2f, 0x0a, 0x78, 0x21, 0x77, 0x23, 0xc0, 0x75, 0xa9, 0xec, 0xae, 0x26, 0xca, 0xee, 0x37, + 0x72, 0x0d, 0x63, 0xb5, 0xd7, 0xc9, 0x7e, 0x9a, 0x7b, 0xbb, 0xd8, 0xd3, 0x5c, 0x86, 0x76, 0x1f, + 0xfd, 0x46, 0xd7, 0x5e, 0x7d, 0xf2, 0xb4, 0x31, 0xf6, 0xd1, 0xd3, 0xc6, 0xd8, 0x27, 0x4f, 0x1b, + 0x63, 0x3f, 0x19, 0x36, 0x94, 0x27, 0xc3, 0x86, 0xf2, 0xd1, 0xb0, 0xa1, 0x7c, 0x32, 0x6c, 0x28, + 0x7f, 0x1d, 0x36, 0x94, 0x5f, 0x7e, 0xda, 0x18, 0x7b, 0x7f, 0x42, 0x8c, 0xf8, 0x9f, 0x00, 0x00, + 0x00, 0xff, 0xff, 0x70, 0x2d, 0x26, 0x9d, 0xec, 0x28, 0x00, 0x00, +} + func (m *ControllerRevision) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -260,36 +1107,45 @@ func (m *ControllerRevision) Marshal() (dAtA []byte, err error) { } func (m *ControllerRevision) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ControllerRevision) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i = encodeVarintGenerated(dAtA, i, uint64(m.Revision)) + i-- + dAtA[i] = 0x18 + { + size, err := m.Data.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n1 + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Data.Size())) - n2, err := m.Data.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n2 - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Revision)) - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ControllerRevisionList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -297,37 +1153,46 @@ func (m *ControllerRevisionList) Marshal() (dAtA []byte, err error) { } func (m *ControllerRevisionList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ControllerRevisionList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n3, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DaemonSet) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -335,41 +1200,52 @@ func (m *DaemonSet) Marshal() (dAtA []byte, err error) { } func (m *DaemonSet) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DaemonSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n4, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n5, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n5 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n6, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n6 - return i, nil + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DaemonSetCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -377,41 +1253,52 @@ func (m *DaemonSetCondition) Marshal() (dAtA []byte, err error) { } func (m *DaemonSetCondition) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DaemonSetCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n7, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x2a - i++ + i -= len(m.Message) + copy(dAtA[i:], m.Message) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DaemonSetList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -419,37 +1306,46 @@ func (m *DaemonSetList) Marshal() (dAtA []byte, err error) { } func (m *DaemonSetList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DaemonSetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n8, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n8 + _ = l if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DaemonSetSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -457,51 +1353,62 @@ func (m *DaemonSetSpec) Marshal() (dAtA []byte, err error) { } func (m *DaemonSetSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DaemonSetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Selector != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n9, err := m.Selector.MarshalTo(dAtA[i:]) + if m.RevisionHistoryLimit != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + i-- + dAtA[i] = 0x30 + } + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + i-- + dAtA[i] = 0x20 + { + size, err := m.UpdateStrategy.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n9 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n10, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n10 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UpdateStrategy.Size())) - n11, err := m.UpdateStrategy.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n11 - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) - if m.RevisionHistoryLimit != nil { - dAtA[i] = 0x30 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + i-- + dAtA[i] = 0x12 + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *DaemonSetStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -509,58 +1416,65 @@ func (m *DaemonSetStatus) Marshal() (dAtA []byte, err error) { } func (m *DaemonSetStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DaemonSetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentNumberScheduled)) - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NumberMisscheduled)) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredNumberScheduled)) - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NumberReady)) - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) - dAtA[i] = 0x30 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedNumberScheduled)) - dAtA[i] = 0x38 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NumberAvailable)) - dAtA[i] = 0x40 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NumberUnavailable)) - if m.CollisionCount != nil { - dAtA[i] = 0x48 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) - } if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x52 } } - return i, nil + if m.CollisionCount != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) + i-- + dAtA[i] = 0x48 + } + i = encodeVarintGenerated(dAtA, i, uint64(m.NumberUnavailable)) + i-- + dAtA[i] = 0x40 + i = encodeVarintGenerated(dAtA, i, uint64(m.NumberAvailable)) + i-- + dAtA[i] = 0x38 + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedNumberScheduled)) + i-- + dAtA[i] = 0x30 + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.NumberReady)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredNumberScheduled)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.NumberMisscheduled)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentNumberScheduled)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *DaemonSetUpdateStrategy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -568,31 +1482,39 @@ func (m *DaemonSetUpdateStrategy) Marshal() (dAtA []byte, err error) { } func (m *DaemonSetUpdateStrategy) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DaemonSetUpdateStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) if m.RollingUpdate != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) - n12, err := m.RollingUpdate.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.RollingUpdate.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n12 + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *Deployment) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -600,41 +1522,52 @@ func (m *Deployment) Marshal() (dAtA []byte, err error) { } func (m *Deployment) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Deployment) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n13, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n13 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n14, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n14 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n15, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n15 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DeploymentCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -642,49 +1575,62 @@ func (m *DeploymentCondition) Marshal() (dAtA []byte, err error) { } func (m *DeploymentCondition) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastUpdateTime.Size())) - n16, err := m.LastUpdateTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n16 + i-- dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n17, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.LastUpdateTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n17 - return i, nil + i-- + dAtA[i] = 0x32 + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DeploymentList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -692,37 +1638,46 @@ func (m *DeploymentList) Marshal() (dAtA []byte, err error) { } func (m *DeploymentList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n18, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n18 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DeploymentSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -730,122 +1685,140 @@ func (m *DeploymentSpec) Marshal() (dAtA []byte, err error) { } func (m *DeploymentSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Replicas != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + if m.ProgressDeadlineSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.ProgressDeadlineSeconds)) + i-- + dAtA[i] = 0x48 } - if m.Selector != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n19, err := m.Selector.MarshalTo(dAtA[i:]) + i-- + if m.Paused { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 + if m.RevisionHistoryLimit != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + i-- + dAtA[i] = 0x30 + } + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + i-- + dAtA[i] = 0x28 + { + size, err := m.Strategy.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n19 - } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n20, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n20 + i-- dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Strategy.Size())) - n21, err := m.Strategy.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n21 - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) - if m.RevisionHistoryLimit != nil { - dAtA[i] = 0x30 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - dAtA[i] = 0x38 - i++ - if m.Paused { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + i-- + dAtA[i] = 0x1a + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } - i++ - if m.ProgressDeadlineSeconds != nil { - dAtA[i] = 0x48 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.ProgressDeadlineSeconds)) + if m.Replicas != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + i-- + dAtA[i] = 0x8 } - return i, nil + return len(dAtA) - i, nil } func (m *DeploymentStatus) Marshal() (dAtA []byte, err error) { size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DeploymentStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UnavailableReplicas)) - if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - dAtA[i] = 0x38 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) + return dAtA[:n], nil +} + +func (m *DeploymentStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l if m.CollisionCount != nil { - dAtA[i] = 0x40 - i++ i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) + i-- + dAtA[i] = 0x40 + } + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) + i-- + dAtA[i] = 0x38 + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } } - return i, nil + i = encodeVarintGenerated(dAtA, i, uint64(m.UnavailableReplicas)) + i-- + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *DeploymentStrategy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -853,31 +1826,39 @@ func (m *DeploymentStrategy) Marshal() (dAtA []byte, err error) { } func (m *DeploymentStrategy) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) if m.RollingUpdate != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) - n22, err := m.RollingUpdate.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.RollingUpdate.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n22 + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ReplicaSet) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -885,41 +1866,52 @@ func (m *ReplicaSet) Marshal() (dAtA []byte, err error) { } func (m *ReplicaSet) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicaSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n23, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n23 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n24, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n24 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n25, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n25 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ReplicaSetCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -927,41 +1919,52 @@ func (m *ReplicaSetCondition) Marshal() (dAtA []byte, err error) { } func (m *ReplicaSetCondition) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicaSetCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n26, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n26 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x2a - i++ + i -= len(m.Message) + copy(dAtA[i:], m.Message) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ReplicaSetList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -969,37 +1972,46 @@ func (m *ReplicaSetList) Marshal() (dAtA []byte, err error) { } func (m *ReplicaSetList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicaSetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n27, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n27 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ReplicaSetSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1007,43 +2019,52 @@ func (m *ReplicaSetSpec) Marshal() (dAtA []byte, err error) { } func (m *ReplicaSetSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicaSetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Replicas != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) - } - if m.Selector != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n28, err := m.Selector.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + i-- + dAtA[i] = 0x20 + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n28 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n29, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } - i += n29 - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) - return i, nil + if m.Replicas != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil } func (m *ReplicaSetStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1051,44 +2072,51 @@ func (m *ReplicaSetStatus) Marshal() (dAtA []byte, err error) { } func (m *ReplicaSetStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicaSetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.FullyLabeledReplicas)) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x32 } } - return i, nil + i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) + i-- + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.FullyLabeledReplicas)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *RollingUpdateDaemonSet) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1096,27 +2124,34 @@ func (m *RollingUpdateDaemonSet) Marshal() (dAtA []byte, err error) { } func (m *RollingUpdateDaemonSet) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RollingUpdateDaemonSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if m.MaxUnavailable != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MaxUnavailable.Size())) - n30, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.MaxUnavailable.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n30 + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *RollingUpdateDeployment) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1124,37 +2159,46 @@ func (m *RollingUpdateDeployment) Marshal() (dAtA []byte, err error) { } func (m *RollingUpdateDeployment) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RollingUpdateDeployment) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.MaxUnavailable != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MaxUnavailable.Size())) - n31, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n31 - } if m.MaxSurge != nil { + { + size, err := m.MaxSurge.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MaxSurge.Size())) - n32, err := m.MaxSurge.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + } + if m.MaxUnavailable != nil { + { + size, err := m.MaxUnavailable.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n32 + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *RollingUpdateStatefulSetStrategy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1162,22 +2206,27 @@ func (m *RollingUpdateStatefulSetStrategy) Marshal() (dAtA []byte, err error) { } func (m *RollingUpdateStatefulSetStrategy) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RollingUpdateStatefulSetStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if m.Partition != nil { - dAtA[i] = 0x8 - i++ i = encodeVarintGenerated(dAtA, i, uint64(*m.Partition)) + i-- + dAtA[i] = 0x8 } - return i, nil + return len(dAtA) - i, nil } func (m *Scale) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1185,41 +2234,52 @@ func (m *Scale) Marshal() (dAtA []byte, err error) { } func (m *Scale) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Scale) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n33, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n33 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n34, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n34 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n35, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n35 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ScaleSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1227,20 +2287,25 @@ func (m *ScaleSpec) Marshal() (dAtA []byte, err error) { } func (m *ScaleSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ScaleSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - return i, nil + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *ScaleStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1248,46 +2313,54 @@ func (m *ScaleStatus) Marshal() (dAtA []byte, err error) { } func (m *ScaleStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ScaleStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i -= len(m.TargetSelector) + copy(dAtA[i:], m.TargetSelector) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetSelector))) + i-- + dAtA[i] = 0x1a if len(m.Selector) > 0 { keysForSelector := make([]string, 0, len(m.Selector)) for k := range m.Selector { keysForSelector = append(keysForSelector, string(k)) } github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) - for _, k := range keysForSelector { + for iNdEx := len(keysForSelector) - 1; iNdEx >= 0; iNdEx-- { + v := m.Selector[string(keysForSelector[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- dAtA[i] = 0x12 - i++ - v := m.Selector[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + i -= len(keysForSelector[iNdEx]) + copy(dAtA[i:], keysForSelector[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForSelector[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) } } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetSelector))) - i += copy(dAtA[i:], m.TargetSelector) - return i, nil + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *StatefulSet) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1295,41 +2368,52 @@ func (m *StatefulSet) Marshal() (dAtA []byte, err error) { } func (m *StatefulSet) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n36, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n36 + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n37, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n37 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n38, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n38 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *StatefulSetCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1337,41 +2421,52 @@ func (m *StatefulSetCondition) Marshal() (dAtA []byte, err error) { } func (m *StatefulSetCondition) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSetCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n39, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n39 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x2a - i++ + i -= len(m.Message) + copy(dAtA[i:], m.Message) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *StatefulSetList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1379,37 +2474,46 @@ func (m *StatefulSetList) Marshal() (dAtA []byte, err error) { } func (m *StatefulSetList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n40, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n40 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *StatefulSetSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1417,73 +2521,88 @@ func (m *StatefulSetSpec) Marshal() (dAtA []byte, err error) { } func (m *StatefulSetSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Replicas != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + if m.RevisionHistoryLimit != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + i-- + dAtA[i] = 0x40 } - if m.Selector != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n41, err := m.Selector.MarshalTo(dAtA[i:]) + { + size, err := m.UpdateStrategy.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n41 - } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n42, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n42 + i-- + dAtA[i] = 0x3a + i -= len(m.PodManagementPolicy) + copy(dAtA[i:], m.PodManagementPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodManagementPolicy))) + i-- + dAtA[i] = 0x32 + i -= len(m.ServiceName) + copy(dAtA[i:], m.ServiceName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceName))) + i-- + dAtA[i] = 0x2a if len(m.VolumeClaimTemplates) > 0 { - for _, msg := range m.VolumeClaimTemplates { + for iNdEx := len(m.VolumeClaimTemplates) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.VolumeClaimTemplates[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + } + } + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 } - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceName))) - i += copy(dAtA[i:], m.ServiceName) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodManagementPolicy))) - i += copy(dAtA[i:], m.PodManagementPolicy) - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UpdateStrategy.Size())) - n43, err := m.UpdateStrategy.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n43 - if m.RevisionHistoryLimit != nil { - dAtA[i] = 0x40 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + if m.Replicas != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + i-- + dAtA[i] = 0x8 } - return i, nil + return len(dAtA) - i, nil } func (m *StatefulSetStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1491,57 +2610,66 @@ func (m *StatefulSetStatus) Marshal() (dAtA []byte, err error) { } func (m *StatefulSetStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentReplicas)) - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.CurrentRevision))) - i += copy(dAtA[i:], m.CurrentRevision) - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UpdateRevision))) - i += copy(dAtA[i:], m.UpdateRevision) - if m.CollisionCount != nil { - dAtA[i] = 0x48 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) - } if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x52 } } - return i, nil + if m.CollisionCount != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) + i-- + dAtA[i] = 0x48 + } + i -= len(m.UpdateRevision) + copy(dAtA[i:], m.UpdateRevision) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UpdateRevision))) + i-- + dAtA[i] = 0x3a + i -= len(m.CurrentRevision) + copy(dAtA[i:], m.CurrentRevision) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CurrentRevision))) + i-- + dAtA[i] = 0x32 + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) + i-- + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentReplicas)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *StatefulSetUpdateStrategy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1549,55 +2677,50 @@ func (m *StatefulSetUpdateStrategy) Marshal() (dAtA []byte, err error) { } func (m *StatefulSetUpdateStrategy) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSetUpdateStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) if m.RollingUpdate != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) - n44, err := m.RollingUpdate.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.RollingUpdate.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n44 + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *ControllerRevision) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -1609,6 +2732,9 @@ func (m *ControllerRevision) Size() (n int) { } func (m *ControllerRevisionList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -1623,6 +2749,9 @@ func (m *ControllerRevisionList) Size() (n int) { } func (m *DaemonSet) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -1635,6 +2764,9 @@ func (m *DaemonSet) Size() (n int) { } func (m *DaemonSetCondition) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1651,6 +2783,9 @@ func (m *DaemonSetCondition) Size() (n int) { } func (m *DaemonSetList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -1665,6 +2800,9 @@ func (m *DaemonSetList) Size() (n int) { } func (m *DaemonSetSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Selector != nil { @@ -1683,6 +2821,9 @@ func (m *DaemonSetSpec) Size() (n int) { } func (m *DaemonSetStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.CurrentNumberScheduled)) @@ -1706,6 +2847,9 @@ func (m *DaemonSetStatus) Size() (n int) { } func (m *DaemonSetUpdateStrategy) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1718,6 +2862,9 @@ func (m *DaemonSetUpdateStrategy) Size() (n int) { } func (m *Deployment) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -1730,6 +2877,9 @@ func (m *Deployment) Size() (n int) { } func (m *DeploymentCondition) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1748,6 +2898,9 @@ func (m *DeploymentCondition) Size() (n int) { } func (m *DeploymentList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -1762,6 +2915,9 @@ func (m *DeploymentList) Size() (n int) { } func (m *DeploymentSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Replicas != nil { @@ -1787,6 +2943,9 @@ func (m *DeploymentSpec) Size() (n int) { } func (m *DeploymentStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.ObservedGeneration)) @@ -1808,6 +2967,9 @@ func (m *DeploymentStatus) Size() (n int) { } func (m *DeploymentStrategy) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1820,6 +2982,9 @@ func (m *DeploymentStrategy) Size() (n int) { } func (m *ReplicaSet) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -1832,6 +2997,9 @@ func (m *ReplicaSet) Size() (n int) { } func (m *ReplicaSetCondition) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1848,6 +3016,9 @@ func (m *ReplicaSetCondition) Size() (n int) { } func (m *ReplicaSetList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -1862,6 +3033,9 @@ func (m *ReplicaSetList) Size() (n int) { } func (m *ReplicaSetSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Replicas != nil { @@ -1878,6 +3052,9 @@ func (m *ReplicaSetSpec) Size() (n int) { } func (m *ReplicaSetStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.Replicas)) @@ -1895,6 +3072,9 @@ func (m *ReplicaSetStatus) Size() (n int) { } func (m *RollingUpdateDaemonSet) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.MaxUnavailable != nil { @@ -1905,6 +3085,9 @@ func (m *RollingUpdateDaemonSet) Size() (n int) { } func (m *RollingUpdateDeployment) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.MaxUnavailable != nil { @@ -1919,6 +3102,9 @@ func (m *RollingUpdateDeployment) Size() (n int) { } func (m *RollingUpdateStatefulSetStrategy) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Partition != nil { @@ -1928,6 +3114,9 @@ func (m *RollingUpdateStatefulSetStrategy) Size() (n int) { } func (m *Scale) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -1940,6 +3129,9 @@ func (m *Scale) Size() (n int) { } func (m *ScaleSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.Replicas)) @@ -1947,6 +3139,9 @@ func (m *ScaleSpec) Size() (n int) { } func (m *ScaleStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.Replicas)) @@ -1964,6 +3159,9 @@ func (m *ScaleStatus) Size() (n int) { } func (m *StatefulSet) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -1976,6 +3174,9 @@ func (m *StatefulSet) Size() (n int) { } func (m *StatefulSetCondition) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1992,6 +3193,9 @@ func (m *StatefulSetCondition) Size() (n int) { } func (m *StatefulSetList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -2006,6 +3210,9 @@ func (m *StatefulSetList) Size() (n int) { } func (m *StatefulSetSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Replicas != nil { @@ -2036,6 +3243,9 @@ func (m *StatefulSetSpec) Size() (n int) { } func (m *StatefulSetStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.ObservedGeneration)) @@ -2060,6 +3270,9 @@ func (m *StatefulSetStatus) Size() (n int) { } func (m *StatefulSetUpdateStrategy) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -2072,14 +3285,7 @@ func (m *StatefulSetUpdateStrategy) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -2089,8 +3295,8 @@ func (this *ControllerRevision) String() string { return "nil" } s := strings.Join([]string{`&ControllerRevision{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Data:` + strings.Replace(strings.Replace(this.Data.String(), "RawExtension", "k8s_io_apimachinery_pkg_runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Data:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Data), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, `Revision:` + fmt.Sprintf("%v", this.Revision) + `,`, `}`, }, "") @@ -2100,9 +3306,14 @@ func (this *ControllerRevisionList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]ControllerRevision{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ControllerRevision", "ControllerRevision", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&ControllerRevisionList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ControllerRevision", "ControllerRevision", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -2112,7 +3323,7 @@ func (this *DaemonSet) String() string { return "nil" } s := strings.Join([]string{`&DaemonSet{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DaemonSetSpec", "DaemonSetSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DaemonSetStatus", "DaemonSetStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -2126,7 +3337,7 @@ func (this *DaemonSetCondition) String() string { s := strings.Join([]string{`&DaemonSetCondition{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `}`, @@ -2137,9 +3348,14 @@ func (this *DaemonSetList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]DaemonSet{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "DaemonSet", "DaemonSet", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&DaemonSetList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "DaemonSet", "DaemonSet", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -2149,8 +3365,8 @@ func (this *DaemonSetSpec) String() string { return "nil" } s := strings.Join([]string{`&DaemonSetSpec{`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, `UpdateStrategy:` + strings.Replace(strings.Replace(this.UpdateStrategy.String(), "DaemonSetUpdateStrategy", "DaemonSetUpdateStrategy", 1), `&`, ``, 1) + `,`, `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, @@ -2162,6 +3378,11 @@ func (this *DaemonSetStatus) String() string { if this == nil { return "nil" } + repeatedStringForConditions := "[]DaemonSetCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "DaemonSetCondition", "DaemonSetCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" s := strings.Join([]string{`&DaemonSetStatus{`, `CurrentNumberScheduled:` + fmt.Sprintf("%v", this.CurrentNumberScheduled) + `,`, `NumberMisscheduled:` + fmt.Sprintf("%v", this.NumberMisscheduled) + `,`, @@ -2172,7 +3393,7 @@ func (this *DaemonSetStatus) String() string { `NumberAvailable:` + fmt.Sprintf("%v", this.NumberAvailable) + `,`, `NumberUnavailable:` + fmt.Sprintf("%v", this.NumberUnavailable) + `,`, `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "DaemonSetCondition", "DaemonSetCondition", 1), `&`, ``, 1) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, `}`, }, "") return s @@ -2183,7 +3404,7 @@ func (this *DaemonSetUpdateStrategy) String() string { } s := strings.Join([]string{`&DaemonSetUpdateStrategy{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `RollingUpdate:` + strings.Replace(fmt.Sprintf("%v", this.RollingUpdate), "RollingUpdateDaemonSet", "RollingUpdateDaemonSet", 1) + `,`, + `RollingUpdate:` + strings.Replace(this.RollingUpdate.String(), "RollingUpdateDaemonSet", "RollingUpdateDaemonSet", 1) + `,`, `}`, }, "") return s @@ -2193,7 +3414,7 @@ func (this *Deployment) String() string { return "nil" } s := strings.Join([]string{`&Deployment{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DeploymentSpec", "DeploymentSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DeploymentStatus", "DeploymentStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -2209,8 +3430,8 @@ func (this *DeploymentCondition) String() string { `Status:` + fmt.Sprintf("%v", this.Status) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `LastUpdateTime:` + strings.Replace(strings.Replace(this.LastUpdateTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastUpdateTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastUpdateTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -2219,9 +3440,14 @@ func (this *DeploymentList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]Deployment{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Deployment", "Deployment", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&DeploymentList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Deployment", "Deployment", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -2232,8 +3458,8 @@ func (this *DeploymentSpec) String() string { } s := strings.Join([]string{`&DeploymentSpec{`, `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, `Strategy:` + strings.Replace(strings.Replace(this.Strategy.String(), "DeploymentStrategy", "DeploymentStrategy", 1), `&`, ``, 1) + `,`, `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, @@ -2247,13 +3473,18 @@ func (this *DeploymentStatus) String() string { if this == nil { return "nil" } + repeatedStringForConditions := "[]DeploymentCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "DeploymentCondition", "DeploymentCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" s := strings.Join([]string{`&DeploymentStatus{`, `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, `UpdatedReplicas:` + fmt.Sprintf("%v", this.UpdatedReplicas) + `,`, `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, `UnavailableReplicas:` + fmt.Sprintf("%v", this.UnavailableReplicas) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "DeploymentCondition", "DeploymentCondition", 1), `&`, ``, 1) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, `}`, @@ -2266,7 +3497,7 @@ func (this *DeploymentStrategy) String() string { } s := strings.Join([]string{`&DeploymentStrategy{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `RollingUpdate:` + strings.Replace(fmt.Sprintf("%v", this.RollingUpdate), "RollingUpdateDeployment", "RollingUpdateDeployment", 1) + `,`, + `RollingUpdate:` + strings.Replace(this.RollingUpdate.String(), "RollingUpdateDeployment", "RollingUpdateDeployment", 1) + `,`, `}`, }, "") return s @@ -2276,7 +3507,7 @@ func (this *ReplicaSet) String() string { return "nil" } s := strings.Join([]string{`&ReplicaSet{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ReplicaSetSpec", "ReplicaSetSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ReplicaSetStatus", "ReplicaSetStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -2290,7 +3521,7 @@ func (this *ReplicaSetCondition) String() string { s := strings.Join([]string{`&ReplicaSetCondition{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `}`, @@ -2301,9 +3532,14 @@ func (this *ReplicaSetList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]ReplicaSet{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ReplicaSet", "ReplicaSet", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&ReplicaSetList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ReplicaSet", "ReplicaSet", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -2314,8 +3550,8 @@ func (this *ReplicaSetSpec) String() string { } s := strings.Join([]string{`&ReplicaSetSpec{`, `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, `}`, }, "") @@ -2325,13 +3561,18 @@ func (this *ReplicaSetStatus) String() string { if this == nil { return "nil" } + repeatedStringForConditions := "[]ReplicaSetCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "ReplicaSetCondition", "ReplicaSetCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" s := strings.Join([]string{`&ReplicaSetStatus{`, `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, `FullyLabeledReplicas:` + fmt.Sprintf("%v", this.FullyLabeledReplicas) + `,`, `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "ReplicaSetCondition", "ReplicaSetCondition", 1), `&`, ``, 1) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, `}`, }, "") return s @@ -2341,7 +3582,7 @@ func (this *RollingUpdateDaemonSet) String() string { return "nil" } s := strings.Join([]string{`&RollingUpdateDaemonSet{`, - `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, + `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`, `}`, }, "") return s @@ -2351,8 +3592,8 @@ func (this *RollingUpdateDeployment) String() string { return "nil" } s := strings.Join([]string{`&RollingUpdateDeployment{`, - `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, - `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, + `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`, + `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "intstr.IntOrString", 1) + `,`, `}`, }, "") return s @@ -2372,7 +3613,7 @@ func (this *Scale) String() string { return "nil" } s := strings.Join([]string{`&Scale{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ScaleSpec", "ScaleSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ScaleStatus", "ScaleStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -2416,7 +3657,7 @@ func (this *StatefulSet) String() string { return "nil" } s := strings.Join([]string{`&StatefulSet{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "StatefulSetSpec", "StatefulSetSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "StatefulSetStatus", "StatefulSetStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -2430,7 +3671,7 @@ func (this *StatefulSetCondition) String() string { s := strings.Join([]string{`&StatefulSetCondition{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `}`, @@ -2441,9 +3682,14 @@ func (this *StatefulSetList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]StatefulSet{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "StatefulSet", "StatefulSet", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&StatefulSetList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "StatefulSet", "StatefulSet", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -2452,11 +3698,16 @@ func (this *StatefulSetSpec) String() string { if this == nil { return "nil" } + repeatedStringForVolumeClaimTemplates := "[]PersistentVolumeClaim{" + for _, f := range this.VolumeClaimTemplates { + repeatedStringForVolumeClaimTemplates += fmt.Sprintf("%v", f) + "," + } + repeatedStringForVolumeClaimTemplates += "}" s := strings.Join([]string{`&StatefulSetSpec{`, `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, - `VolumeClaimTemplates:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.VolumeClaimTemplates), "PersistentVolumeClaim", "k8s_io_api_core_v1.PersistentVolumeClaim", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `VolumeClaimTemplates:` + repeatedStringForVolumeClaimTemplates + `,`, `ServiceName:` + fmt.Sprintf("%v", this.ServiceName) + `,`, `PodManagementPolicy:` + fmt.Sprintf("%v", this.PodManagementPolicy) + `,`, `UpdateStrategy:` + strings.Replace(strings.Replace(this.UpdateStrategy.String(), "StatefulSetUpdateStrategy", "StatefulSetUpdateStrategy", 1), `&`, ``, 1) + `,`, @@ -2469,6 +3720,11 @@ func (this *StatefulSetStatus) String() string { if this == nil { return "nil" } + repeatedStringForConditions := "[]StatefulSetCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "StatefulSetCondition", "StatefulSetCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" s := strings.Join([]string{`&StatefulSetStatus{`, `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, @@ -2478,7 +3734,7 @@ func (this *StatefulSetStatus) String() string { `CurrentRevision:` + fmt.Sprintf("%v", this.CurrentRevision) + `,`, `UpdateRevision:` + fmt.Sprintf("%v", this.UpdateRevision) + `,`, `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "StatefulSetCondition", "StatefulSetCondition", 1), `&`, ``, 1) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, `}`, }, "") return s @@ -2489,7 +3745,7 @@ func (this *StatefulSetUpdateStrategy) String() string { } s := strings.Join([]string{`&StatefulSetUpdateStrategy{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `RollingUpdate:` + strings.Replace(fmt.Sprintf("%v", this.RollingUpdate), "RollingUpdateStatefulSetStrategy", "RollingUpdateStatefulSetStrategy", 1) + `,`, + `RollingUpdate:` + strings.Replace(this.RollingUpdate.String(), "RollingUpdateStatefulSetStrategy", "RollingUpdateStatefulSetStrategy", 1) + `,`, `}`, }, "") return s @@ -2517,7 +3773,7 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2545,7 +3801,7 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2554,6 +3810,9 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2575,7 +3834,7 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2584,6 +3843,9 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2605,7 +3867,7 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Revision |= (int64(b) & 0x7F) << shift + m.Revision |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -2619,6 +3881,9 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2646,7 +3911,7 @@ func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2674,7 +3939,7 @@ func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2683,6 +3948,9 @@ func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2704,7 +3972,7 @@ func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2713,6 +3981,9 @@ func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2730,6 +4001,9 @@ func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2757,7 +4031,7 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2785,7 +4059,7 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2794,6 +4068,9 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2815,7 +4092,7 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2824,6 +4101,9 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2845,7 +4125,7 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2854,6 +4134,9 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2870,6 +4153,9 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2897,7 +4183,7 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2925,7 +4211,7 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2935,6 +4221,9 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2954,7 +4243,7 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2964,6 +4253,9 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2983,7 +4275,7 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2992,6 +4284,9 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3013,7 +4308,7 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3023,6 +4318,9 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3042,7 +4340,7 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3052,6 +4350,9 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3066,6 +4367,9 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3093,7 +4397,7 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3121,7 +4425,7 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3130,6 +4434,9 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3151,7 +4458,7 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3160,6 +4467,9 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3177,6 +4487,9 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3204,7 +4517,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3232,7 +4545,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3241,11 +4554,14 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3265,7 +4581,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3274,6 +4590,9 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3295,7 +4614,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3304,6 +4623,9 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3325,7 +4647,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MinReadySeconds |= (int32(b) & 0x7F) << shift + m.MinReadySeconds |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3344,7 +4666,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3359,6 +4681,9 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3386,7 +4711,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3414,7 +4739,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.CurrentNumberScheduled |= (int32(b) & 0x7F) << shift + m.CurrentNumberScheduled |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3433,7 +4758,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.NumberMisscheduled |= (int32(b) & 0x7F) << shift + m.NumberMisscheduled |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3452,7 +4777,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.DesiredNumberScheduled |= (int32(b) & 0x7F) << shift + m.DesiredNumberScheduled |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3471,7 +4796,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.NumberReady |= (int32(b) & 0x7F) << shift + m.NumberReady |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3490,7 +4815,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ObservedGeneration |= (int64(b) & 0x7F) << shift + m.ObservedGeneration |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -3509,7 +4834,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.UpdatedNumberScheduled |= (int32(b) & 0x7F) << shift + m.UpdatedNumberScheduled |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3528,7 +4853,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.NumberAvailable |= (int32(b) & 0x7F) << shift + m.NumberAvailable |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3547,7 +4872,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.NumberUnavailable |= (int32(b) & 0x7F) << shift + m.NumberUnavailable |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3566,7 +4891,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3586,7 +4911,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3595,6 +4920,9 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3612,6 +4940,9 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3639,7 +4970,7 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3667,7 +4998,7 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3677,6 +5008,9 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3696,7 +5030,7 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3705,6 +5039,9 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3724,6 +5061,9 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3751,7 +5091,7 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3779,7 +5119,7 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3788,6 +5128,9 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3809,7 +5152,7 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3818,6 +5161,9 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3839,7 +5185,7 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3848,6 +5194,9 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3864,6 +5213,9 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3891,7 +5243,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3919,7 +5271,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3929,6 +5281,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3948,7 +5303,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3958,6 +5313,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3977,7 +5335,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3987,6 +5345,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4006,7 +5367,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4016,6 +5377,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4035,7 +5399,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4044,6 +5408,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4065,7 +5432,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4074,6 +5441,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4090,6 +5460,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4117,7 +5490,7 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4145,7 +5518,7 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4154,6 +5527,9 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4175,7 +5551,7 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4184,6 +5560,9 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4201,6 +5580,9 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4228,7 +5610,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4256,7 +5638,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4276,7 +5658,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4285,11 +5667,14 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -4309,7 +5694,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4318,6 +5703,9 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4339,7 +5727,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4348,6 +5736,9 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4369,7 +5760,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MinReadySeconds |= (int32(b) & 0x7F) << shift + m.MinReadySeconds |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4388,7 +5779,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4408,7 +5799,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4428,7 +5819,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4443,6 +5834,9 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4470,7 +5864,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4498,7 +5892,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ObservedGeneration |= (int64(b) & 0x7F) << shift + m.ObservedGeneration |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -4517,7 +5911,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift + m.Replicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4536,7 +5930,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.UpdatedReplicas |= (int32(b) & 0x7F) << shift + m.UpdatedReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4555,7 +5949,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.AvailableReplicas |= (int32(b) & 0x7F) << shift + m.AvailableReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4574,7 +5968,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.UnavailableReplicas |= (int32(b) & 0x7F) << shift + m.UnavailableReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4593,7 +5987,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4602,6 +5996,9 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4624,7 +6021,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ReadyReplicas |= (int32(b) & 0x7F) << shift + m.ReadyReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4643,7 +6040,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4658,6 +6055,9 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4685,7 +6085,7 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4713,7 +6113,7 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4723,6 +6123,9 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4742,7 +6145,7 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4751,6 +6154,9 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4770,6 +6176,9 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4797,7 +6206,7 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4825,7 +6234,7 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4834,6 +6243,9 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4855,7 +6267,7 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4864,6 +6276,9 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4885,7 +6300,7 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4894,6 +6309,9 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4910,6 +6328,9 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4937,7 +6358,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4965,7 +6386,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4975,6 +6396,9 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4994,7 +6418,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5004,6 +6428,9 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5023,7 +6450,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5032,6 +6459,9 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5053,7 +6483,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5063,6 +6493,9 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5082,7 +6515,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5092,6 +6525,9 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5106,6 +6542,9 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5133,7 +6572,7 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5161,7 +6600,7 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5170,6 +6609,9 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5191,7 +6633,7 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5200,6 +6642,9 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5217,6 +6662,9 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5244,7 +6692,7 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5272,7 +6720,7 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5292,7 +6740,7 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5301,11 +6749,14 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -5325,7 +6776,7 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5334,6 +6785,9 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5355,7 +6809,7 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MinReadySeconds |= (int32(b) & 0x7F) << shift + m.MinReadySeconds |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5369,6 +6823,9 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5396,7 +6853,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5424,7 +6881,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift + m.Replicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5443,7 +6900,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.FullyLabeledReplicas |= (int32(b) & 0x7F) << shift + m.FullyLabeledReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5462,7 +6919,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ObservedGeneration |= (int64(b) & 0x7F) << shift + m.ObservedGeneration |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -5481,7 +6938,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ReadyReplicas |= (int32(b) & 0x7F) << shift + m.ReadyReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5500,7 +6957,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.AvailableReplicas |= (int32(b) & 0x7F) << shift + m.AvailableReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5519,7 +6976,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5528,6 +6985,9 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5545,6 +7005,9 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5572,7 +7035,7 @@ func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5600,7 +7063,7 @@ func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5609,11 +7072,14 @@ func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.MaxUnavailable == nil { - m.MaxUnavailable = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + m.MaxUnavailable = &intstr.IntOrString{} } if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -5628,6 +7094,9 @@ func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5655,7 +7124,7 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5683,7 +7152,7 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5692,11 +7161,14 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.MaxUnavailable == nil { - m.MaxUnavailable = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + m.MaxUnavailable = &intstr.IntOrString{} } if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -5716,7 +7188,7 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5725,11 +7197,14 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.MaxSurge == nil { - m.MaxSurge = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + m.MaxSurge = &intstr.IntOrString{} } if err := m.MaxSurge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -5744,6 +7219,9 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5771,7 +7249,7 @@ func (m *RollingUpdateStatefulSetStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5799,7 +7277,7 @@ func (m *RollingUpdateStatefulSetStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5814,6 +7292,9 @@ func (m *RollingUpdateStatefulSetStrategy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5841,7 +7322,7 @@ func (m *Scale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5869,7 +7350,7 @@ func (m *Scale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5878,6 +7359,9 @@ func (m *Scale) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5899,7 +7383,7 @@ func (m *Scale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5908,6 +7392,9 @@ func (m *Scale) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5929,7 +7416,7 @@ func (m *Scale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5938,6 +7425,9 @@ func (m *Scale) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5954,6 +7444,9 @@ func (m *Scale) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5981,7 +7474,7 @@ func (m *ScaleSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6009,7 +7502,7 @@ func (m *ScaleSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift + m.Replicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -6023,6 +7516,9 @@ func (m *ScaleSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6050,7 +7546,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6078,7 +7574,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift + m.Replicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -6097,7 +7593,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6106,54 +7602,20 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { + if postIndex > l { return io.ErrUnexpectedEOF } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Selector == nil { m.Selector = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -6163,41 +7625,86 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Selector[mapkey] = mapvalue - } else { - var mapvalue string - m.Selector[mapkey] = mapvalue } + m.Selector[mapkey] = mapvalue iNdEx = postIndex case 3: if wireType != 2 { @@ -6213,7 +7720,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6223,6 +7730,9 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6237,6 +7747,9 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6264,7 +7777,7 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6292,7 +7805,7 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6301,6 +7814,9 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6322,7 +7838,7 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6331,6 +7847,9 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6352,7 +7871,7 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6361,6 +7880,9 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6377,6 +7899,9 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6404,7 +7929,7 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6432,7 +7957,7 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6442,6 +7967,9 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6461,7 +7989,7 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6471,6 +7999,9 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6490,7 +8021,7 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6499,6 +8030,9 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6520,7 +8054,7 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6530,6 +8064,9 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6549,7 +8086,7 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6559,6 +8096,9 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6573,6 +8113,9 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6600,7 +8143,7 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6628,7 +8171,7 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6637,6 +8180,9 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6658,7 +8204,7 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6667,6 +8213,9 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6684,6 +8233,9 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6711,7 +8263,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6739,7 +8291,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -6759,7 +8311,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6768,11 +8320,14 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -6792,7 +8347,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6801,6 +8356,9 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6822,7 +8380,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6831,10 +8389,13 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.VolumeClaimTemplates = append(m.VolumeClaimTemplates, k8s_io_api_core_v1.PersistentVolumeClaim{}) + m.VolumeClaimTemplates = append(m.VolumeClaimTemplates, v11.PersistentVolumeClaim{}) if err := m.VolumeClaimTemplates[len(m.VolumeClaimTemplates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -6853,7 +8414,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6863,6 +8424,9 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6882,7 +8446,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6892,6 +8456,9 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6911,7 +8478,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6920,6 +8487,9 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6941,7 +8511,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -6956,6 +8526,9 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6983,7 +8556,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7011,7 +8584,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ObservedGeneration |= (int64(b) & 0x7F) << shift + m.ObservedGeneration |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -7030,7 +8603,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift + m.Replicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -7049,7 +8622,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ReadyReplicas |= (int32(b) & 0x7F) << shift + m.ReadyReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -7068,7 +8641,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.CurrentReplicas |= (int32(b) & 0x7F) << shift + m.CurrentReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -7087,7 +8660,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.UpdatedReplicas |= (int32(b) & 0x7F) << shift + m.UpdatedReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -7106,7 +8679,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7116,6 +8689,9 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7135,7 +8711,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7145,6 +8721,9 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7164,7 +8743,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -7184,7 +8763,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -7193,6 +8772,9 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7210,6 +8792,9 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7237,7 +8822,7 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7265,7 +8850,7 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7275,6 +8860,9 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7294,7 +8882,7 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -7303,6 +8891,9 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7322,6 +8913,9 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7388,10 +8982,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -7420,6 +9017,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -7438,147 +9038,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/apps/v1beta2/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 2176 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0xcb, 0x6f, 0x1c, 0xb7, - 0x19, 0xd7, 0xec, 0x43, 0x5a, 0x51, 0x91, 0x64, 0x53, 0xaa, 0xb4, 0x91, 0xdb, 0x95, 0xb1, 0x09, - 0x1c, 0x25, 0xb6, 0x66, 0x6d, 0xe5, 0x81, 0xc4, 0x2e, 0xda, 0x6a, 0xa5, 0xd4, 0x76, 0xa0, 0x57, - 0x28, 0xcb, 0x40, 0x83, 0x16, 0x35, 0xb5, 0x4b, 0xaf, 0x26, 0x9a, 0x17, 0x66, 0x38, 0x5b, 0x2f, - 0x7a, 0xe9, 0xa9, 0x40, 0x81, 0x02, 0x6d, 0xaf, 0xfd, 0x27, 0x7a, 0x2b, 0x8a, 0xf6, 0x56, 0x04, - 0x85, 0x2f, 0x05, 0x82, 0x5e, 0x92, 0x93, 0x50, 0x6f, 0x4e, 0x45, 0xd1, 0x4b, 0x81, 0x5e, 0x02, - 0x14, 0x28, 0xc8, 0xe1, 0x3c, 0x38, 0x0f, 0xef, 0x48, 0xb1, 0x95, 0x22, 0xc8, 0x6d, 0x87, 0xfc, - 0x7d, 0x3f, 0x7e, 0x24, 0xbf, 0x8f, 0xdf, 0x6f, 0x38, 0x0b, 0xbe, 0x77, 0xfc, 0xb6, 0xab, 0x6a, - 0x56, 0xeb, 0xd8, 0x3b, 0x24, 0x8e, 0x49, 0x28, 0x71, 0x5b, 0x7d, 0x62, 0x76, 0x2d, 0xa7, 0x25, - 0x3a, 0xb0, 0xad, 0xb5, 0xb0, 0x6d, 0xbb, 0xad, 0xfe, 0x8d, 0x43, 0x42, 0xf1, 0x5a, 0xab, 0x47, - 0x4c, 0xe2, 0x60, 0x4a, 0xba, 0xaa, 0xed, 0x58, 0xd4, 0x82, 0x8b, 0x3e, 0x50, 0xc5, 0xb6, 0xa6, - 0x32, 0xa0, 0x2a, 0x80, 0x4b, 0xab, 0x3d, 0x8d, 0x1e, 0x79, 0x87, 0x6a, 0xc7, 0x32, 0x5a, 0x3d, - 0xab, 0x67, 0xb5, 0x38, 0xfe, 0xd0, 0x7b, 0xc8, 0x9f, 0xf8, 0x03, 0xff, 0xe5, 0xf3, 0x2c, 0x35, - 0x63, 0x03, 0x76, 0x2c, 0x87, 0xb4, 0xfa, 0x37, 0x92, 0x63, 0x2d, 0xbd, 0x11, 0x61, 0x0c, 0xdc, - 0x39, 0xd2, 0x4c, 0xe2, 0x0c, 0x5a, 0xf6, 0x71, 0x8f, 0x35, 0xb8, 0x2d, 0x83, 0x50, 0x9c, 0x65, - 0xd5, 0xca, 0xb3, 0x72, 0x3c, 0x93, 0x6a, 0x06, 0x49, 0x19, 0xbc, 0x35, 0xca, 0xc0, 0xed, 0x1c, - 0x11, 0x03, 0xa7, 0xec, 0x5e, 0xcf, 0xb3, 0xf3, 0xa8, 0xa6, 0xb7, 0x34, 0x93, 0xba, 0xd4, 0x49, - 0x1a, 0x35, 0xff, 0xa3, 0x00, 0xb8, 0x61, 0x99, 0xd4, 0xb1, 0x74, 0x9d, 0x38, 0x88, 0xf4, 0x35, - 0x57, 0xb3, 0x4c, 0xf8, 0x00, 0xd4, 0xd8, 0x7c, 0xba, 0x98, 0xe2, 0xba, 0x72, 0x59, 0x59, 0x99, - 0x5a, 0xbb, 0xae, 0x46, 0x2b, 0x1d, 0xd2, 0xab, 0xf6, 0x71, 0x8f, 0x35, 0xb8, 0x2a, 0x43, 0xab, - 0xfd, 0x1b, 0xea, 0xee, 0xe1, 0x87, 0xa4, 0x43, 0xb7, 0x09, 0xc5, 0x6d, 0xf8, 0xf8, 0x64, 0x79, - 0x6c, 0x78, 0xb2, 0x0c, 0xa2, 0x36, 0x14, 0xb2, 0xc2, 0x5d, 0x50, 0xe1, 0xec, 0x25, 0xce, 0xbe, - 0x9a, 0xcb, 0x2e, 0x26, 0xad, 0x22, 0xfc, 0x93, 0x77, 0x1f, 0x51, 0x62, 0x32, 0xf7, 0xda, 0x2f, - 0x08, 0xea, 0xca, 0x26, 0xa6, 0x18, 0x71, 0x22, 0x78, 0x0d, 0xd4, 0x1c, 0xe1, 0x7e, 0xbd, 0x7c, - 0x59, 0x59, 0x29, 0xb7, 0x2f, 0x08, 0x54, 0x2d, 0x98, 0x16, 0x0a, 0x11, 0xcd, 0xc7, 0x0a, 0x58, - 0x48, 0xcf, 0x7b, 0x4b, 0x73, 0x29, 0xfc, 0x61, 0x6a, 0xee, 0x6a, 0xb1, 0xb9, 0x33, 0x6b, 0x3e, - 0xf3, 0x70, 0xe0, 0xa0, 0x25, 0x36, 0xef, 0x3d, 0x50, 0xd5, 0x28, 0x31, 0xdc, 0x7a, 0xe9, 0x72, - 0x79, 0x65, 0x6a, 0xed, 0xaa, 0x9a, 0x13, 0xc0, 0x6a, 0xda, 0xbb, 0xf6, 0xb4, 0xe0, 0xad, 0xde, - 0x65, 0x0c, 0xc8, 0x27, 0x6a, 0xfe, 0xa2, 0x04, 0x26, 0x37, 0x31, 0x31, 0x2c, 0x73, 0x9f, 0xd0, - 0x73, 0xd8, 0xb9, 0x3b, 0xa0, 0xe2, 0xda, 0xa4, 0x23, 0x76, 0xee, 0x4a, 0xee, 0x04, 0x42, 0x9f, - 0xf6, 0x6d, 0xd2, 0x89, 0xb6, 0x8c, 0x3d, 0x21, 0xce, 0x00, 0xf7, 0xc0, 0xb8, 0x4b, 0x31, 0xf5, - 0x5c, 0xbe, 0x61, 0x53, 0x6b, 0x2b, 0x05, 0xb8, 0x38, 0xbe, 0x3d, 0x23, 0xd8, 0xc6, 0xfd, 0x67, - 0x24, 0x78, 0x9a, 0xff, 0x28, 0x01, 0x18, 0x62, 0x37, 0x2c, 0xb3, 0xab, 0x51, 0x16, 0xce, 0x37, - 0x41, 0x85, 0x0e, 0x6c, 0xc2, 0x17, 0x64, 0xb2, 0x7d, 0x25, 0x70, 0xe5, 0xde, 0xc0, 0x26, 0x9f, - 0x9f, 0x2c, 0x2f, 0xa4, 0x2d, 0x58, 0x0f, 0xe2, 0x36, 0x70, 0x2b, 0x74, 0xb2, 0xc4, 0xad, 0xdf, - 0x90, 0x87, 0xfe, 0xfc, 0x64, 0x39, 0xe3, 0xec, 0x50, 0x43, 0x26, 0xd9, 0x41, 0xd8, 0x07, 0x50, - 0xc7, 0x2e, 0xbd, 0xe7, 0x60, 0xd3, 0xf5, 0x47, 0xd2, 0x0c, 0x22, 0xa6, 0xff, 0x5a, 0xb1, 0x8d, - 0x62, 0x16, 0xed, 0x25, 0xe1, 0x05, 0xdc, 0x4a, 0xb1, 0xa1, 0x8c, 0x11, 0xe0, 0x15, 0x30, 0xee, - 0x10, 0xec, 0x5a, 0x66, 0xbd, 0xc2, 0x67, 0x11, 0x2e, 0x20, 0xe2, 0xad, 0x48, 0xf4, 0xc2, 0x57, - 0xc1, 0x84, 0x41, 0x5c, 0x17, 0xf7, 0x48, 0xbd, 0xca, 0x81, 0xb3, 0x02, 0x38, 0xb1, 0xed, 0x37, - 0xa3, 0xa0, 0xbf, 0xf9, 0x7b, 0x05, 0x4c, 0x87, 0x2b, 0x77, 0x0e, 0x99, 0x73, 0x5b, 0xce, 0x9c, - 0xe6, 0xe8, 0x60, 0xc9, 0x49, 0x98, 0x8f, 0xca, 0x31, 0xc7, 0x59, 0x38, 0xc2, 0x1f, 0x81, 0x9a, - 0x4b, 0x74, 0xd2, 0xa1, 0x96, 0x23, 0x1c, 0x7f, 0xbd, 0xa0, 0xe3, 0xf8, 0x90, 0xe8, 0xfb, 0xc2, - 0xb4, 0xfd, 0x02, 0xf3, 0x3c, 0x78, 0x42, 0x21, 0x25, 0x7c, 0x1f, 0xd4, 0x28, 0x31, 0x6c, 0x1d, - 0x53, 0x22, 0xb2, 0xe6, 0xa5, 0xb8, 0xf3, 0x2c, 0x66, 0x18, 0xd9, 0x9e, 0xd5, 0xbd, 0x27, 0x60, - 0x3c, 0x65, 0xc2, 0xc5, 0x08, 0x5a, 0x51, 0x48, 0x03, 0x6d, 0x30, 0xe3, 0xd9, 0x5d, 0x86, 0xa4, - 0xec, 0x38, 0xef, 0x0d, 0x44, 0x0c, 0x5d, 0x1f, 0xbd, 0x2a, 0x07, 0x92, 0x5d, 0x7b, 0x41, 0x8c, - 0x32, 0x23, 0xb7, 0xa3, 0x04, 0x3f, 0x5c, 0x07, 0xb3, 0x86, 0x66, 0x22, 0x82, 0xbb, 0x83, 0x7d, - 0xd2, 0xb1, 0xcc, 0xae, 0xcb, 0x43, 0xa9, 0xda, 0x5e, 0x14, 0x04, 0xb3, 0xdb, 0x72, 0x37, 0x4a, - 0xe2, 0xe1, 0x16, 0x98, 0x0f, 0x0e, 0xe0, 0x3b, 0x9a, 0x4b, 0x2d, 0x67, 0xb0, 0xa5, 0x19, 0x1a, - 0xad, 0x8f, 0x73, 0x9e, 0xfa, 0xf0, 0x64, 0x79, 0x1e, 0x65, 0xf4, 0xa3, 0x4c, 0xab, 0xe6, 0x6f, - 0xc6, 0xc1, 0x6c, 0xe2, 0x5c, 0x80, 0xf7, 0xc1, 0x42, 0xc7, 0x73, 0x1c, 0x62, 0xd2, 0x1d, 0xcf, - 0x38, 0x24, 0xce, 0x7e, 0xe7, 0x88, 0x74, 0x3d, 0x9d, 0x74, 0xf9, 0xb6, 0x56, 0xdb, 0x0d, 0xe1, - 0xeb, 0xc2, 0x46, 0x26, 0x0a, 0xe5, 0x58, 0xc3, 0xf7, 0x00, 0x34, 0x79, 0xd3, 0xb6, 0xe6, 0xba, - 0x21, 0x67, 0x89, 0x73, 0x86, 0xa9, 0xb8, 0x93, 0x42, 0xa0, 0x0c, 0x2b, 0xe6, 0x63, 0x97, 0xb8, - 0x9a, 0x43, 0xba, 0x49, 0x1f, 0xcb, 0xb2, 0x8f, 0x9b, 0x99, 0x28, 0x94, 0x63, 0x0d, 0xdf, 0x04, - 0x53, 0xfe, 0x68, 0x7c, 0xcd, 0xc5, 0xe6, 0xcc, 0x09, 0xb2, 0xa9, 0x9d, 0xa8, 0x0b, 0xc5, 0x71, - 0x6c, 0x6a, 0xd6, 0xa1, 0x4b, 0x9c, 0x3e, 0xe9, 0xde, 0xf6, 0xc5, 0x01, 0xab, 0xa0, 0x55, 0x5e, - 0x41, 0xc3, 0xa9, 0xed, 0xa6, 0x10, 0x28, 0xc3, 0x8a, 0x4d, 0xcd, 0x8f, 0x9a, 0xd4, 0xd4, 0xc6, - 0xe5, 0xa9, 0x1d, 0x64, 0xa2, 0x50, 0x8e, 0x35, 0x8b, 0x3d, 0xdf, 0xe5, 0xf5, 0x3e, 0xd6, 0x74, - 0x7c, 0xa8, 0x93, 0xfa, 0x84, 0x1c, 0x7b, 0x3b, 0x72, 0x37, 0x4a, 0xe2, 0xe1, 0x6d, 0x70, 0xd1, - 0x6f, 0x3a, 0x30, 0x71, 0x48, 0x52, 0xe3, 0x24, 0x2f, 0x0a, 0x92, 0x8b, 0x3b, 0x49, 0x00, 0x4a, - 0xdb, 0xc0, 0x9b, 0x60, 0xa6, 0x63, 0xe9, 0x3a, 0x8f, 0xc7, 0x0d, 0xcb, 0x33, 0x69, 0x7d, 0x92, - 0xb3, 0x40, 0x96, 0x43, 0x1b, 0x52, 0x0f, 0x4a, 0x20, 0xe1, 0x8f, 0x01, 0xe8, 0x04, 0x85, 0xc1, - 0xad, 0x83, 0x11, 0x0a, 0x20, 0x5d, 0x96, 0xa2, 0xca, 0x1c, 0x36, 0xb9, 0x28, 0x46, 0xd9, 0xfc, - 0x48, 0x01, 0x8b, 0x39, 0x89, 0x0e, 0xbf, 0x2b, 0x15, 0xc1, 0xab, 0x89, 0x22, 0x78, 0x29, 0xc7, - 0x2c, 0x56, 0x09, 0x8f, 0xc0, 0x34, 0x13, 0x24, 0x9a, 0xd9, 0xf3, 0x21, 0xe2, 0x2c, 0x6b, 0xe5, - 0x4e, 0x00, 0xc5, 0xd1, 0xd1, 0xa9, 0x7c, 0x71, 0x78, 0xb2, 0x3c, 0x2d, 0xf5, 0x21, 0x99, 0xb8, - 0xf9, 0xcb, 0x12, 0x00, 0x9b, 0xc4, 0xd6, 0xad, 0x81, 0x41, 0xcc, 0xf3, 0xd0, 0x34, 0x77, 0x25, - 0x4d, 0xf3, 0x4a, 0xfe, 0x96, 0x84, 0x4e, 0xe5, 0x8a, 0x9a, 0xf7, 0x13, 0xa2, 0xe6, 0xd5, 0x22, - 0x64, 0x4f, 0x57, 0x35, 0x9f, 0x94, 0xc1, 0x5c, 0x04, 0x8e, 0x64, 0xcd, 0x2d, 0x69, 0x47, 0x5f, - 0x49, 0xec, 0xe8, 0x62, 0x86, 0xc9, 0x73, 0xd3, 0x35, 0xcf, 0x5e, 0x5f, 0xc0, 0x0f, 0xc1, 0x0c, - 0x13, 0x32, 0x7e, 0x48, 0x70, 0x99, 0x34, 0x7e, 0x6a, 0x99, 0x14, 0x16, 0xb7, 0x2d, 0x89, 0x09, - 0x25, 0x98, 0x73, 0x64, 0xd9, 0xc4, 0xf3, 0x96, 0x65, 0xcd, 0x3f, 0x28, 0x60, 0x26, 0xda, 0xa6, - 0x73, 0x10, 0x51, 0x77, 0x64, 0x11, 0xf5, 0x52, 0x81, 0xe0, 0xcc, 0x51, 0x51, 0x9f, 0x54, 0xe2, - 0xae, 0x73, 0x19, 0xb5, 0xc2, 0x5e, 0xc1, 0x6c, 0x5d, 0xeb, 0x60, 0x57, 0xd4, 0xdb, 0x17, 0xfc, - 0xd7, 0x2f, 0xbf, 0x0d, 0x85, 0xbd, 0x92, 0xe0, 0x2a, 0x3d, 0x5f, 0xc1, 0x55, 0x7e, 0x36, 0x82, - 0xeb, 0x07, 0xa0, 0xe6, 0x06, 0x52, 0xab, 0xc2, 0x29, 0xaf, 0x16, 0x4a, 0x6c, 0xa1, 0xb2, 0x42, - 0xea, 0x50, 0x5f, 0x85, 0x74, 0x59, 0xca, 0xaa, 0xfa, 0x65, 0x2a, 0x2b, 0x96, 0xcc, 0x36, 0xf6, - 0x5c, 0xd2, 0xe5, 0x19, 0x50, 0x8b, 0x92, 0x79, 0x8f, 0xb7, 0x22, 0xd1, 0x0b, 0x0f, 0xc0, 0xa2, - 0xed, 0x58, 0x3d, 0x87, 0xb8, 0xee, 0x26, 0xc1, 0x5d, 0x5d, 0x33, 0x49, 0x30, 0x01, 0xbf, 0x26, - 0x5e, 0x1a, 0x9e, 0x2c, 0x2f, 0xee, 0x65, 0x43, 0x50, 0x9e, 0x6d, 0xf3, 0xcf, 0x15, 0x70, 0x21, - 0x79, 0x36, 0xe6, 0xc8, 0x14, 0xe5, 0x4c, 0x32, 0xe5, 0x5a, 0x2c, 0x4e, 0x7d, 0x0d, 0x17, 0xbb, - 0x2a, 0x48, 0xc5, 0xea, 0x3a, 0x98, 0x15, 0xb2, 0x24, 0xe8, 0x14, 0x42, 0x2d, 0xdc, 0x9e, 0x03, - 0xb9, 0x1b, 0x25, 0xf1, 0x4c, 0x7c, 0x44, 0x9a, 0x22, 0x20, 0xa9, 0xc8, 0xe2, 0x63, 0x3d, 0x09, - 0x40, 0x69, 0x1b, 0xb8, 0x0d, 0xe6, 0x3c, 0x33, 0x4d, 0xe5, 0x87, 0xcb, 0x25, 0x41, 0x35, 0x77, - 0x90, 0x86, 0xa0, 0x2c, 0x3b, 0xf8, 0x40, 0xd2, 0x23, 0xe3, 0xfc, 0x48, 0xb8, 0x56, 0x20, 0xac, - 0x0b, 0x0b, 0x12, 0x78, 0x0b, 0x4c, 0x3b, 0x5c, 0x73, 0x06, 0xae, 0xfa, 0xba, 0xed, 0x1b, 0xc2, - 0x6c, 0x1a, 0xc5, 0x3b, 0x91, 0x8c, 0xcd, 0x90, 0x5a, 0xb5, 0xa2, 0x52, 0xab, 0xf9, 0x27, 0x05, - 0xc0, 0x74, 0x1e, 0x8e, 0xbc, 0x09, 0x48, 0x59, 0xc4, 0x2a, 0xa6, 0x96, 0xad, 0x7f, 0xae, 0x17, - 0xd4, 0x3f, 0xd1, 0x81, 0x5a, 0x4c, 0x00, 0x89, 0x65, 0x38, 0x9f, 0x4b, 0x9d, 0xa2, 0x02, 0x28, - 0x72, 0xea, 0x19, 0x08, 0xa0, 0x18, 0xd9, 0xd3, 0x05, 0xd0, 0x3f, 0x4b, 0x60, 0x2e, 0x02, 0x17, - 0x16, 0x40, 0x19, 0x26, 0x5f, 0x5f, 0xec, 0x8c, 0xbe, 0xd8, 0x61, 0xa2, 0x24, 0x5a, 0xba, 0xff, - 0x27, 0x51, 0x12, 0x79, 0x95, 0x23, 0x4a, 0x7e, 0x57, 0x8a, 0xbb, 0xfe, 0x95, 0x17, 0x25, 0x5f, - 0xfc, 0x4e, 0xa6, 0xf9, 0x97, 0x32, 0xb8, 0x90, 0xcc, 0x43, 0xa9, 0x40, 0x2a, 0x23, 0x0b, 0xe4, - 0x1e, 0x98, 0x7f, 0xe8, 0xe9, 0xfa, 0x80, 0x2f, 0x43, 0xac, 0x4a, 0xfa, 0xa5, 0xf5, 0x9b, 0xc2, - 0x72, 0xfe, 0xfb, 0x19, 0x18, 0x94, 0x69, 0x99, 0x53, 0xec, 0xcb, 0x67, 0x2a, 0xf6, 0xa9, 0x0a, - 0x54, 0x39, 0x45, 0x05, 0xca, 0x2c, 0xdc, 0xd5, 0x33, 0x14, 0xee, 0xd3, 0x55, 0xda, 0x8c, 0x83, - 0x6b, 0xe4, 0xab, 0xff, 0xcf, 0x15, 0xb0, 0x90, 0xfd, 0xc2, 0x0d, 0x75, 0x30, 0x63, 0xe0, 0x47, - 0xf1, 0x8b, 0x8f, 0x51, 0x45, 0xc4, 0xa3, 0x9a, 0xae, 0xfa, 0x9f, 0x8c, 0xd4, 0xbb, 0x26, 0xdd, - 0x75, 0xf6, 0xa9, 0xa3, 0x99, 0x3d, 0xbf, 0xf2, 0x6e, 0x4b, 0x5c, 0x28, 0xc1, 0xdd, 0xfc, 0x4c, - 0x01, 0x8b, 0x39, 0x95, 0xef, 0x7c, 0x3d, 0x81, 0x1f, 0x80, 0x9a, 0x81, 0x1f, 0xed, 0x7b, 0x4e, - 0x2f, 0xab, 0x56, 0x17, 0x1b, 0x87, 0x67, 0xf3, 0xb6, 0x60, 0x41, 0x21, 0x5f, 0x73, 0x17, 0x5c, - 0x96, 0x26, 0xc9, 0x32, 0x87, 0x3c, 0xf4, 0x74, 0x9e, 0x44, 0x42, 0x6c, 0x5c, 0x05, 0x93, 0x36, - 0x76, 0xa8, 0x16, 0x4a, 0xd5, 0x6a, 0x7b, 0x7a, 0x78, 0xb2, 0x3c, 0xb9, 0x17, 0x34, 0xa2, 0xa8, - 0xbf, 0xf9, 0x5f, 0x05, 0x54, 0xf7, 0x3b, 0x58, 0x27, 0xe7, 0x50, 0xed, 0x37, 0xa5, 0x6a, 0x9f, - 0x7f, 0x93, 0xce, 0xfd, 0xc9, 0x2d, 0xf4, 0x5b, 0x89, 0x42, 0xff, 0xf2, 0x08, 0x9e, 0xa7, 0xd7, - 0xf8, 0x77, 0xc0, 0x64, 0x38, 0xdc, 0xe9, 0x0e, 0xa0, 0xe6, 0x6f, 0x4b, 0x60, 0x2a, 0x36, 0xc4, - 0x29, 0x8f, 0xaf, 0x07, 0xd2, 0xb1, 0xcf, 0x12, 0x73, 0xad, 0xc8, 0x44, 0xd4, 0xe0, 0x88, 0x7f, - 0xd7, 0xa4, 0x4e, 0xfc, 0x05, 0x2f, 0x7d, 0xf2, 0x7f, 0x07, 0xcc, 0x50, 0xec, 0xf4, 0x08, 0x0d, - 0xfa, 0xf8, 0x82, 0x4d, 0x46, 0xb7, 0x13, 0xf7, 0xa4, 0x5e, 0x94, 0x40, 0x2f, 0xdd, 0x02, 0xd3, - 0xd2, 0x60, 0xf0, 0x02, 0x28, 0x1f, 0x93, 0x81, 0x2f, 0x7b, 0x10, 0xfb, 0x09, 0xe7, 0x41, 0xb5, - 0x8f, 0x75, 0xcf, 0x8f, 0xf3, 0x49, 0xe4, 0x3f, 0xdc, 0x2c, 0xbd, 0xad, 0x34, 0x7f, 0xc5, 0x16, - 0x27, 0x0a, 0xce, 0x73, 0x88, 0xae, 0xf7, 0xa4, 0xe8, 0xca, 0xff, 0xa8, 0x17, 0x4f, 0x99, 0xbc, - 0x18, 0x43, 0x89, 0x18, 0x7b, 0xad, 0x10, 0xdb, 0xd3, 0x23, 0xed, 0x5f, 0x25, 0x30, 0x1f, 0x43, - 0x47, 0x72, 0xf2, 0xdb, 0x92, 0x9c, 0x5c, 0x49, 0xc8, 0xc9, 0x7a, 0x96, 0xcd, 0xd7, 0x7a, 0x72, - 0xb4, 0x9e, 0xfc, 0xa3, 0x02, 0x66, 0x63, 0x6b, 0x77, 0x0e, 0x82, 0xf2, 0xae, 0x2c, 0x28, 0x5f, - 0x2e, 0x12, 0x34, 0x39, 0x8a, 0xf2, 0xaf, 0x55, 0xc9, 0xf9, 0xaf, 0xbc, 0xa4, 0xfc, 0x29, 0x98, - 0xef, 0x5b, 0xba, 0x67, 0x90, 0x0d, 0x1d, 0x6b, 0x46, 0x00, 0x60, 0xaa, 0xa9, 0x9c, 0x7c, 0x97, - 0x0b, 0xe9, 0x89, 0xe3, 0x6a, 0x2e, 0x25, 0x26, 0xbd, 0x1f, 0x59, 0x46, 0xba, 0xef, 0x7e, 0x06, - 0x1d, 0xca, 0x1c, 0x04, 0xbe, 0x09, 0xa6, 0x98, 0x7e, 0xd3, 0x3a, 0x64, 0x07, 0x1b, 0x41, 0x60, - 0x85, 0x9f, 0xb0, 0xf6, 0xa3, 0x2e, 0x14, 0xc7, 0xc1, 0x23, 0x30, 0x67, 0x5b, 0xdd, 0x6d, 0x6c, - 0xe2, 0x1e, 0x61, 0x32, 0x63, 0xcf, 0xd2, 0xb5, 0xce, 0x80, 0x5f, 0x7e, 0x4d, 0xb6, 0xdf, 0x0a, - 0x6e, 0x45, 0xf6, 0xd2, 0x10, 0xf6, 0x92, 0x98, 0xd1, 0xcc, 0x93, 0x3a, 0x8b, 0x12, 0x3a, 0xa9, - 0xcf, 0xae, 0xfe, 0x1d, 0xf1, 0x5a, 0x91, 0x08, 0x3b, 0xe3, 0x87, 0xd7, 0xbc, 0xbb, 0xbd, 0xda, - 0x99, 0xbe, 0x9a, 0xfe, 0xbb, 0x02, 0x2e, 0xa6, 0x8e, 0xca, 0x2f, 0xf1, 0x76, 0x2d, 0x25, 0xcf, - 0xcb, 0xa7, 0x90, 0xe7, 0xeb, 0x60, 0x56, 0x7c, 0xb0, 0x4d, 0xa8, 0xfb, 0xf0, 0xfd, 0x67, 0x43, - 0xee, 0x46, 0x49, 0x7c, 0xd6, 0xed, 0x5e, 0xf5, 0x94, 0xb7, 0x7b, 0x71, 0x2f, 0xc4, 0x1f, 0x90, - 0xfc, 0xd0, 0x4b, 0x7b, 0x21, 0xfe, 0x87, 0x94, 0xc4, 0x33, 0x85, 0xe0, 0xb3, 0x86, 0x0c, 0x13, - 0xb2, 0x42, 0x38, 0x90, 0x7a, 0x51, 0x02, 0xfd, 0x85, 0x3e, 0x4a, 0xe2, 0x8c, 0x8f, 0x92, 0xab, - 0x45, 0xe2, 0xb9, 0xf8, 0xbb, 0xc9, 0xdf, 0x14, 0xf0, 0x62, 0x6e, 0x22, 0xc0, 0x75, 0xa9, 0xec, - 0xae, 0x26, 0xca, 0xee, 0xb7, 0x72, 0x0d, 0x63, 0xb5, 0xd7, 0xc9, 0xbe, 0x9a, 0x7b, 0xa7, 0xd8, - 0xd5, 0x5c, 0x86, 0x76, 0x1f, 0x7d, 0x47, 0xd7, 0x5e, 0x7d, 0xfc, 0xa4, 0x31, 0xf6, 0xf1, 0x93, - 0xc6, 0xd8, 0xa7, 0x4f, 0x1a, 0x63, 0x3f, 0x1b, 0x36, 0x94, 0xc7, 0xc3, 0x86, 0xf2, 0xf1, 0xb0, - 0xa1, 0x7c, 0x3a, 0x6c, 0x28, 0x7f, 0x1f, 0x36, 0x94, 0x5f, 0x7f, 0xd6, 0x18, 0xfb, 0x60, 0x42, - 0x8c, 0xf8, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x80, 0x85, 0x43, 0x0a, 0xec, 0x28, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/apps/v1beta2/generated.proto b/vendor/k8s.io/api/apps/v1beta2/generated.proto index cc3656d2..17e43970 100644 --- a/vendor/k8s.io/api/apps/v1beta2/generated.proto +++ b/vendor/k8s.io/api/apps/v1beta2/generated.proto @@ -43,7 +43,7 @@ option go_package = "v1beta2"; // depend on its stability. It is primarily for internal use by controllers. message ControllerRevision { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -56,7 +56,7 @@ message ControllerRevision { // ControllerRevisionList is a resource containing a list of ControllerRevision objects. message ControllerRevisionList { - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -69,12 +69,12 @@ message ControllerRevisionList { // DaemonSet represents the configuration of a daemon set. message DaemonSet { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // The desired behavior of this daemon set. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional DaemonSetSpec spec = 2; @@ -82,7 +82,7 @@ message DaemonSet { // out of date by some window of time. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional DaemonSetStatus status = 3; } @@ -111,7 +111,7 @@ message DaemonSetCondition { // DaemonSetList is a collection of daemon sets. message DaemonSetList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -286,6 +286,7 @@ message DeploymentSpec { // The deployment strategy to use to replace existing pods with new ones. // +optional + // +patchStrategy=retainKeys optional DeploymentStrategy strategy = 4; // Minimum number of seconds for which a newly created pod should be ready @@ -373,12 +374,12 @@ message DeploymentStrategy { message ReplicaSet { // If the Labels of a ReplicaSet are empty, they are defaulted to // be the same as the Pod(s) that the ReplicaSet manages. - // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the specification of the desired behavior of the ReplicaSet. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional ReplicaSetSpec spec = 2; @@ -386,7 +387,7 @@ message ReplicaSet { // This data may be out of date by some window of time. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional ReplicaSetStatus status = 3; } @@ -415,7 +416,7 @@ message ReplicaSetCondition { // ReplicaSetList is a collection of ReplicaSets. message ReplicaSetList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -526,7 +527,7 @@ message RollingUpdateDeployment { // the rolling update starts, such that the total number of old and new pods do not exceed // 130% of desired pods. Once old pods have been killed, // new ReplicaSet can be scaled up further, ensuring that total number of pods running - // at any time during the update is atmost 130% of desired pods. + // at any time during the update is at most 130% of desired pods. // +optional optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; } @@ -542,15 +543,15 @@ message RollingUpdateStatefulSetStrategy { // Scale represents a scaling request for a resource. message Scale { - // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. + // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. // +optional optional ScaleSpec spec = 2; - // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only. + // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only. // +optional optional ScaleStatus status = 3; } diff --git a/vendor/k8s.io/api/apps/v1beta2/types.go b/vendor/k8s.io/api/apps/v1beta2/types.go index e5525222..d358455f 100644 --- a/vendor/k8s.io/api/apps/v1beta2/types.go +++ b/vendor/k8s.io/api/apps/v1beta2/types.go @@ -17,7 +17,7 @@ limitations under the License. package v1beta2 import ( - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/intstr" @@ -57,22 +57,20 @@ type ScaleStatus struct { TargetSelector string `json:"targetSelector,omitempty" protobuf:"bytes,3,opt,name=targetSelector"` } -// +genclient -// +genclient:noVerbs // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // Scale represents a scaling request for a resource. type Scale struct { metav1.TypeMeta `json:",inline"` - // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. + // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. // +optional Spec ScaleSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only. + // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only. // +optional Status ScaleStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -117,7 +115,7 @@ const ( // ParallelPodManagement will create and delete pods as soon as the stateful set // replica count is changed, and will not wait for pods to be ready or complete // termination. - ParallelPodManagement = "Parallel" + ParallelPodManagement PodManagementPolicyType = "Parallel" ) // StatefulSetUpdateStrategy indicates the strategy that the StatefulSet @@ -143,13 +141,13 @@ const ( // ordering constraints. When a scale operation is performed with this // strategy, new Pods will be created from the specification version indicated // by the StatefulSet's updateRevision. - RollingUpdateStatefulSetStrategyType = "RollingUpdate" + RollingUpdateStatefulSetStrategyType StatefulSetUpdateStrategyType = "RollingUpdate" // OnDeleteStatefulSetStrategyType triggers the legacy behavior. Version // tracking and ordered rolling restarts are disabled. Pods are recreated // from the StatefulSetSpec when they are manually deleted. When a scale // operation is performed with this strategy,specification version indicated // by the StatefulSet's currentRevision. - OnDeleteStatefulSetStrategyType = "OnDelete" + OnDeleteStatefulSetStrategyType StatefulSetUpdateStrategyType = "OnDelete" ) // RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType. @@ -331,7 +329,8 @@ type DeploymentSpec struct { // The deployment strategy to use to replace existing pods with new ones. // +optional - Strategy DeploymentStrategy `json:"strategy,omitempty" protobuf:"bytes,4,opt,name=strategy"` + // +patchStrategy=retainKeys + Strategy DeploymentStrategy `json:"strategy,omitempty" patchStrategy:"retainKeys" protobuf:"bytes,4,opt,name=strategy"` // Minimum number of seconds for which a newly created pod should be ready // without any of its container crashing, for it to be considered available. @@ -414,7 +413,7 @@ type RollingUpdateDeployment struct { // the rolling update starts, such that the total number of old and new pods do not exceed // 130% of desired pods. Once old pods have been killed, // new ReplicaSet can be scaled up further, ensuring that total number of pods running - // at any time during the update is atmost 130% of desired pods. + // at any time during the update is at most 130% of desired pods. // +optional MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty" protobuf:"bytes,2,opt,name=maxSurge"` } @@ -667,12 +666,12 @@ type DaemonSetCondition struct { type DaemonSet struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // The desired behavior of this daemon set. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec DaemonSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` @@ -680,7 +679,7 @@ type DaemonSet struct { // out of date by some window of time. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Status DaemonSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -698,7 +697,7 @@ const ( type DaemonSetList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -717,12 +716,12 @@ type ReplicaSet struct { // If the Labels of a ReplicaSet are empty, they are defaulted to // be the same as the Pod(s) that the ReplicaSet manages. - // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec defines the specification of the desired behavior of the ReplicaSet. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec ReplicaSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` @@ -730,7 +729,7 @@ type ReplicaSet struct { // This data may be out of date by some window of time. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Status ReplicaSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -741,7 +740,7 @@ type ReplicaSet struct { type ReplicaSetList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -851,7 +850,7 @@ type ReplicaSetCondition struct { type ControllerRevision struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -868,7 +867,7 @@ type ControllerRevision struct { type ControllerRevisionList struct { metav1.TypeMeta `json:",inline"` - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` diff --git a/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go b/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go index 627df3ab..822158a1 100644 --- a/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go @@ -29,7 +29,7 @@ package v1beta2 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_ControllerRevision = map[string]string{ "": "DEPRECATED - This group version of ControllerRevision is deprecated by apps/v1/ControllerRevision. See the release notes for more information. ControllerRevision implements an immutable snapshot of state data. Clients are responsible for serializing and deserializing the objects that contain their internal state. Once a ControllerRevision has been successfully created, it can not be updated. The API Server will fail validation of all requests that attempt to mutate the Data field. ControllerRevisions may, however, be deleted. Note that, due to its use by both the DaemonSet and StatefulSet controllers for update and rollback, this object is beta. However, it may be subject to name and representation changes in future releases, and clients should not depend on its stability. It is primarily for internal use by controllers.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "data": "Data is the serialized representation of the state.", "revision": "Revision indicates the revision of the state represented by Data.", } @@ -40,7 +40,7 @@ func (ControllerRevision) SwaggerDoc() map[string]string { var map_ControllerRevisionList = map[string]string{ "": "ControllerRevisionList is a resource containing a list of ControllerRevision objects.", - "metadata": "More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "Items is the list of ControllerRevisions", } @@ -50,9 +50,9 @@ func (ControllerRevisionList) SwaggerDoc() map[string]string { var map_DaemonSet = map[string]string{ "": "DEPRECATED - This group version of DaemonSet is deprecated by apps/v1/DaemonSet. See the release notes for more information. DaemonSet represents the configuration of a daemon set.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "The desired behavior of this daemon set. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - "status": "The current status of this daemon set. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "The desired behavior of this daemon set. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "The current status of this daemon set. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (DaemonSet) SwaggerDoc() map[string]string { @@ -74,7 +74,7 @@ func (DaemonSetCondition) SwaggerDoc() map[string]string { var map_DaemonSetList = map[string]string{ "": "DaemonSetList is a collection of daemon sets.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "A list of daemon sets.", } @@ -96,7 +96,7 @@ func (DaemonSetSpec) SwaggerDoc() map[string]string { } var map_DaemonSetStatus = map[string]string{ - "": "DaemonSetStatus represents the current status of a daemon set.", + "": "DaemonSetStatus represents the current status of a daemon set.", "currentNumberScheduled": "The number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", "numberMisscheduled": "The number of nodes that are running the daemon pod, but are not supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", "desiredNumberScheduled": "The total number of nodes that should be running the daemon pod (including nodes correctly running the daemon pod). More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", @@ -202,9 +202,9 @@ func (DeploymentStrategy) SwaggerDoc() map[string]string { var map_ReplicaSet = map[string]string{ "": "DEPRECATED - This group version of ReplicaSet is deprecated by apps/v1/ReplicaSet. See the release notes for more information. ReplicaSet ensures that a specified number of pod replicas are running at any given time.", - "metadata": "If the Labels of a ReplicaSet are empty, they are defaulted to be the same as the Pod(s) that the ReplicaSet manages. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Spec defines the specification of the desired behavior of the ReplicaSet. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - "status": "Status is the most recently observed status of the ReplicaSet. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "If the Labels of a ReplicaSet are empty, they are defaulted to be the same as the Pod(s) that the ReplicaSet manages. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Spec defines the specification of the desired behavior of the ReplicaSet. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "Status is the most recently observed status of the ReplicaSet. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (ReplicaSet) SwaggerDoc() map[string]string { @@ -226,7 +226,7 @@ func (ReplicaSetCondition) SwaggerDoc() map[string]string { var map_ReplicaSetList = map[string]string{ "": "ReplicaSetList is a collection of ReplicaSets.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "items": "List of ReplicaSets. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller", } @@ -272,7 +272,7 @@ func (RollingUpdateDaemonSet) SwaggerDoc() map[string]string { var map_RollingUpdateDeployment = map[string]string{ "": "Spec to control the desired behavior of rolling update.", "maxUnavailable": "The maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). Absolute number is calculated from percentage by rounding down. This can not be 0 if MaxSurge is 0. Defaults to 25%. Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods immediately when the rolling update starts. Once new pods are ready, old ReplicaSet can be scaled down further, followed by scaling up the new ReplicaSet, ensuring that the total number of pods available at all times during the update is at least 70% of desired pods.", - "maxSurge": "The maximum number of pods that can be scheduled above the desired number of pods. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up. Defaults to 25%. Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when the rolling update starts, such that the total number of old and new pods do not exceed 130% of desired pods. Once old pods have been killed, new ReplicaSet can be scaled up further, ensuring that total number of pods running at any time during the update is atmost 130% of desired pods.", + "maxSurge": "The maximum number of pods that can be scheduled above the desired number of pods. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up. Defaults to 25%. Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when the rolling update starts, such that the total number of old and new pods do not exceed 130% of desired pods. Once old pods have been killed, new ReplicaSet can be scaled up further, ensuring that total number of pods running at any time during the update is at most 130% of desired pods.", } func (RollingUpdateDeployment) SwaggerDoc() map[string]string { @@ -290,9 +290,9 @@ func (RollingUpdateStatefulSetStrategy) SwaggerDoc() map[string]string { var map_Scale = map[string]string{ "": "Scale represents a scaling request for a resource.", - "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.", - "spec": "defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.", - "status": "current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only.", + "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.", + "spec": "defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.", + "status": "current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only.", } func (Scale) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/apps/v1beta2/zz_generated.deepcopy.go b/vendor/k8s.io/api/apps/v1beta2/zz_generated.deepcopy.go index 8a0bad22..127bf095 100644 --- a/vendor/k8s.io/api/apps/v1beta2/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/apps/v1beta2/zz_generated.deepcopy.go @@ -58,7 +58,7 @@ func (in *ControllerRevision) DeepCopyObject() runtime.Object { func (in *ControllerRevisionList) DeepCopyInto(out *ControllerRevisionList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ControllerRevision, len(*in)) @@ -136,7 +136,7 @@ func (in *DaemonSetCondition) DeepCopy() *DaemonSetCondition { func (in *DaemonSetList) DeepCopyInto(out *DaemonSetList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]DaemonSet, len(*in)) @@ -292,7 +292,7 @@ func (in *DeploymentCondition) DeepCopy() *DeploymentCondition { func (in *DeploymentList) DeepCopyInto(out *DeploymentList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Deployment, len(*in)) @@ -457,7 +457,7 @@ func (in *ReplicaSetCondition) DeepCopy() *ReplicaSetCondition { func (in *ReplicaSetList) DeepCopyInto(out *ReplicaSetList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ReplicaSet, len(*in)) @@ -720,7 +720,7 @@ func (in *StatefulSetCondition) DeepCopy() *StatefulSetCondition { func (in *StatefulSetList) DeepCopyInto(out *StatefulSetList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]StatefulSet, len(*in)) diff --git a/vendor/k8s.io/api/auditregistration/v1alpha1/doc.go b/vendor/k8s.io/api/auditregistration/v1alpha1/doc.go new file mode 100644 index 00000000..ae8f7671 --- /dev/null +++ b/vendor/k8s.io/api/auditregistration/v1alpha1/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:openapi-gen=true + +// +groupName=auditregistration.k8s.io + +package v1alpha1 // import "k8s.io/api/auditregistration/v1alpha1" diff --git a/vendor/k8s.io/api/auditregistration/v1alpha1/generated.pb.go b/vendor/k8s.io/api/auditregistration/v1alpha1/generated.pb.go new file mode 100644 index 00000000..003cc30b --- /dev/null +++ b/vendor/k8s.io/api/auditregistration/v1alpha1/generated.pb.go @@ -0,0 +1,2056 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/api/auditregistration/v1alpha1/generated.proto + +package v1alpha1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +func (m *AuditSink) Reset() { *m = AuditSink{} } +func (*AuditSink) ProtoMessage() {} +func (*AuditSink) Descriptor() ([]byte, []int) { + return fileDescriptor_642d3597c6afa8ba, []int{0} +} +func (m *AuditSink) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AuditSink) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AuditSink) XXX_Merge(src proto.Message) { + xxx_messageInfo_AuditSink.Merge(m, src) +} +func (m *AuditSink) XXX_Size() int { + return m.Size() +} +func (m *AuditSink) XXX_DiscardUnknown() { + xxx_messageInfo_AuditSink.DiscardUnknown(m) +} + +var xxx_messageInfo_AuditSink proto.InternalMessageInfo + +func (m *AuditSinkList) Reset() { *m = AuditSinkList{} } +func (*AuditSinkList) ProtoMessage() {} +func (*AuditSinkList) Descriptor() ([]byte, []int) { + return fileDescriptor_642d3597c6afa8ba, []int{1} +} +func (m *AuditSinkList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AuditSinkList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AuditSinkList) XXX_Merge(src proto.Message) { + xxx_messageInfo_AuditSinkList.Merge(m, src) +} +func (m *AuditSinkList) XXX_Size() int { + return m.Size() +} +func (m *AuditSinkList) XXX_DiscardUnknown() { + xxx_messageInfo_AuditSinkList.DiscardUnknown(m) +} + +var xxx_messageInfo_AuditSinkList proto.InternalMessageInfo + +func (m *AuditSinkSpec) Reset() { *m = AuditSinkSpec{} } +func (*AuditSinkSpec) ProtoMessage() {} +func (*AuditSinkSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_642d3597c6afa8ba, []int{2} +} +func (m *AuditSinkSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AuditSinkSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AuditSinkSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_AuditSinkSpec.Merge(m, src) +} +func (m *AuditSinkSpec) XXX_Size() int { + return m.Size() +} +func (m *AuditSinkSpec) XXX_DiscardUnknown() { + xxx_messageInfo_AuditSinkSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_AuditSinkSpec proto.InternalMessageInfo + +func (m *Policy) Reset() { *m = Policy{} } +func (*Policy) ProtoMessage() {} +func (*Policy) Descriptor() ([]byte, []int) { + return fileDescriptor_642d3597c6afa8ba, []int{3} +} +func (m *Policy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Policy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Policy) XXX_Merge(src proto.Message) { + xxx_messageInfo_Policy.Merge(m, src) +} +func (m *Policy) XXX_Size() int { + return m.Size() +} +func (m *Policy) XXX_DiscardUnknown() { + xxx_messageInfo_Policy.DiscardUnknown(m) +} + +var xxx_messageInfo_Policy proto.InternalMessageInfo + +func (m *ServiceReference) Reset() { *m = ServiceReference{} } +func (*ServiceReference) ProtoMessage() {} +func (*ServiceReference) Descriptor() ([]byte, []int) { + return fileDescriptor_642d3597c6afa8ba, []int{4} +} +func (m *ServiceReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ServiceReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceReference.Merge(m, src) +} +func (m *ServiceReference) XXX_Size() int { + return m.Size() +} +func (m *ServiceReference) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceReference.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceReference proto.InternalMessageInfo + +func (m *Webhook) Reset() { *m = Webhook{} } +func (*Webhook) ProtoMessage() {} +func (*Webhook) Descriptor() ([]byte, []int) { + return fileDescriptor_642d3597c6afa8ba, []int{5} +} +func (m *Webhook) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Webhook) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Webhook) XXX_Merge(src proto.Message) { + xxx_messageInfo_Webhook.Merge(m, src) +} +func (m *Webhook) XXX_Size() int { + return m.Size() +} +func (m *Webhook) XXX_DiscardUnknown() { + xxx_messageInfo_Webhook.DiscardUnknown(m) +} + +var xxx_messageInfo_Webhook proto.InternalMessageInfo + +func (m *WebhookClientConfig) Reset() { *m = WebhookClientConfig{} } +func (*WebhookClientConfig) ProtoMessage() {} +func (*WebhookClientConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_642d3597c6afa8ba, []int{6} +} +func (m *WebhookClientConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WebhookClientConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *WebhookClientConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_WebhookClientConfig.Merge(m, src) +} +func (m *WebhookClientConfig) XXX_Size() int { + return m.Size() +} +func (m *WebhookClientConfig) XXX_DiscardUnknown() { + xxx_messageInfo_WebhookClientConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_WebhookClientConfig proto.InternalMessageInfo + +func (m *WebhookThrottleConfig) Reset() { *m = WebhookThrottleConfig{} } +func (*WebhookThrottleConfig) ProtoMessage() {} +func (*WebhookThrottleConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_642d3597c6afa8ba, []int{7} +} +func (m *WebhookThrottleConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WebhookThrottleConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *WebhookThrottleConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_WebhookThrottleConfig.Merge(m, src) +} +func (m *WebhookThrottleConfig) XXX_Size() int { + return m.Size() +} +func (m *WebhookThrottleConfig) XXX_DiscardUnknown() { + xxx_messageInfo_WebhookThrottleConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_WebhookThrottleConfig proto.InternalMessageInfo + +func init() { + proto.RegisterType((*AuditSink)(nil), "k8s.io.api.auditregistration.v1alpha1.AuditSink") + proto.RegisterType((*AuditSinkList)(nil), "k8s.io.api.auditregistration.v1alpha1.AuditSinkList") + proto.RegisterType((*AuditSinkSpec)(nil), "k8s.io.api.auditregistration.v1alpha1.AuditSinkSpec") + proto.RegisterType((*Policy)(nil), "k8s.io.api.auditregistration.v1alpha1.Policy") + proto.RegisterType((*ServiceReference)(nil), "k8s.io.api.auditregistration.v1alpha1.ServiceReference") + proto.RegisterType((*Webhook)(nil), "k8s.io.api.auditregistration.v1alpha1.Webhook") + proto.RegisterType((*WebhookClientConfig)(nil), "k8s.io.api.auditregistration.v1alpha1.WebhookClientConfig") + proto.RegisterType((*WebhookThrottleConfig)(nil), "k8s.io.api.auditregistration.v1alpha1.WebhookThrottleConfig") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/auditregistration/v1alpha1/generated.proto", fileDescriptor_642d3597c6afa8ba) +} + +var fileDescriptor_642d3597c6afa8ba = []byte{ + // 765 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x52, 0x41, 0x6f, 0x13, 0x47, + 0x14, 0xf6, 0xc6, 0x76, 0x6c, 0x4f, 0x9c, 0x36, 0x9d, 0xb4, 0x95, 0x1b, 0x55, 0x6b, 0x6b, 0xa5, + 0x4a, 0x91, 0xda, 0xcc, 0x36, 0x55, 0xd4, 0x56, 0x88, 0x4b, 0x36, 0x27, 0xa4, 0x10, 0xc2, 0x98, + 0x80, 0x40, 0x08, 0x31, 0x5e, 0x3f, 0xef, 0x0e, 0xb6, 0x77, 0x97, 0xdd, 0x59, 0xa3, 0xdc, 0xf8, + 0x09, 0xfc, 0x05, 0xfe, 0x06, 0x37, 0x24, 0x90, 0x72, 0xcc, 0x31, 0xa7, 0x88, 0x98, 0x03, 0xff, + 0x81, 0x13, 0x9a, 0xd9, 0x59, 0xdb, 0xc4, 0x41, 0x38, 0xb7, 0x79, 0xdf, 0x7b, 0xdf, 0xf7, 0xbe, + 0xf7, 0xde, 0xa0, 0x83, 0xfe, 0xff, 0x09, 0xe1, 0xa1, 0xdd, 0x4f, 0x3b, 0x10, 0x07, 0x20, 0x20, + 0xb1, 0x47, 0x10, 0x74, 0xc3, 0xd8, 0xd6, 0x09, 0x16, 0x71, 0x9b, 0xa5, 0x5d, 0x2e, 0x62, 0xf0, + 0x78, 0x22, 0x62, 0x26, 0x78, 0x18, 0xd8, 0xa3, 0x6d, 0x36, 0x88, 0x7c, 0xb6, 0x6d, 0x7b, 0x10, + 0x40, 0xcc, 0x04, 0x74, 0x49, 0x14, 0x87, 0x22, 0xc4, 0x7f, 0x64, 0x34, 0xc2, 0x22, 0x4e, 0xe6, + 0x68, 0x24, 0xa7, 0x6d, 0x6c, 0x79, 0x5c, 0xf8, 0x69, 0x87, 0xb8, 0xe1, 0xd0, 0xf6, 0x42, 0x2f, + 0xb4, 0x15, 0xbb, 0x93, 0xf6, 0x54, 0xa4, 0x02, 0xf5, 0xca, 0x54, 0x37, 0x76, 0xa6, 0x66, 0x86, + 0xcc, 0xf5, 0x79, 0x00, 0xf1, 0xb1, 0x1d, 0xf5, 0x3d, 0x09, 0x24, 0xf6, 0x10, 0x04, 0xb3, 0x47, + 0x73, 0x5e, 0x36, 0xec, 0x6f, 0xb1, 0xe2, 0x34, 0x10, 0x7c, 0x08, 0x73, 0x84, 0x7f, 0xbf, 0x47, + 0x48, 0x5c, 0x1f, 0x86, 0xec, 0x32, 0xcf, 0x7a, 0x6f, 0xa0, 0xda, 0xae, 0x1c, 0xb6, 0xcd, 0x83, + 0x3e, 0x7e, 0x8a, 0xaa, 0xd2, 0x51, 0x97, 0x09, 0xd6, 0x30, 0x5a, 0xc6, 0xe6, 0xca, 0x3f, 0x7f, + 0x93, 0xe9, 0x56, 0x26, 0xc2, 0x24, 0xea, 0x7b, 0x12, 0x48, 0x88, 0xac, 0x26, 0xa3, 0x6d, 0x72, + 0xa7, 0xf3, 0x0c, 0x5c, 0x71, 0x1b, 0x04, 0x73, 0xf0, 0xc9, 0x79, 0xb3, 0x30, 0x3e, 0x6f, 0xa2, + 0x29, 0x46, 0x27, 0xaa, 0xf8, 0x3e, 0x2a, 0x25, 0x11, 0xb8, 0x8d, 0x25, 0xa5, 0xbe, 0x43, 0x16, + 0xda, 0x39, 0x99, 0x38, 0x6c, 0x47, 0xe0, 0x3a, 0x75, 0xdd, 0xa1, 0x24, 0x23, 0xaa, 0xf4, 0xac, + 0x77, 0x06, 0x5a, 0x9d, 0x54, 0xed, 0xf3, 0x44, 0xe0, 0xc7, 0x73, 0xb3, 0x90, 0xc5, 0x66, 0x91, + 0x6c, 0x35, 0xc9, 0x9a, 0xee, 0x53, 0xcd, 0x91, 0x99, 0x39, 0x8e, 0x50, 0x99, 0x0b, 0x18, 0x26, + 0x8d, 0xa5, 0x56, 0xf1, 0xd2, 0x9a, 0x16, 0x1a, 0xc4, 0x59, 0xd5, 0xe2, 0xe5, 0x5b, 0x52, 0x86, + 0x66, 0x6a, 0xd6, 0xdb, 0xd9, 0x31, 0xe4, 0x78, 0xf8, 0x08, 0x2d, 0x47, 0xe1, 0x80, 0xbb, 0xc7, + 0x7a, 0x88, 0xad, 0x05, 0x3b, 0x1d, 0x2a, 0x92, 0xf3, 0x83, 0x6e, 0xb3, 0x9c, 0xc5, 0x54, 0x8b, + 0xe1, 0x87, 0xa8, 0xf2, 0x02, 0x3a, 0x7e, 0x18, 0xf6, 0xf5, 0x29, 0xc8, 0x82, 0xba, 0x0f, 0x32, + 0x96, 0xf3, 0xa3, 0x16, 0xae, 0x68, 0x80, 0xe6, 0x7a, 0x96, 0x8b, 0x74, 0x33, 0xfc, 0x17, 0x2a, + 0x0f, 0x60, 0x04, 0x03, 0x65, 0xbd, 0xe6, 0xfc, 0x9a, 0x8f, 0xbc, 0x2f, 0xc1, 0xcf, 0xf9, 0x83, + 0x66, 0x45, 0xf8, 0x4f, 0xb4, 0x9c, 0x08, 0xe6, 0x41, 0xb6, 0xd3, 0x9a, 0xb3, 0x2e, 0x6d, 0xb7, + 0x15, 0x22, 0x6b, 0xd5, 0x8b, 0xea, 0x12, 0xeb, 0xb5, 0x81, 0xd6, 0xda, 0x10, 0x8f, 0xb8, 0x0b, + 0x14, 0x7a, 0x10, 0x43, 0xe0, 0x02, 0xb6, 0x51, 0x2d, 0x60, 0x43, 0x48, 0x22, 0xe6, 0x82, 0xee, + 0xf9, 0x93, 0xee, 0x59, 0x3b, 0xc8, 0x13, 0x74, 0x5a, 0x83, 0x5b, 0xa8, 0x24, 0x03, 0xb5, 0x82, + 0xda, 0xf4, 0x5f, 0xc9, 0x5a, 0xaa, 0x32, 0xf8, 0x77, 0x54, 0x8a, 0x98, 0xf0, 0x1b, 0x45, 0x55, + 0x51, 0x95, 0xd9, 0x43, 0x26, 0x7c, 0xaa, 0x50, 0x95, 0x0d, 0x63, 0xd1, 0x28, 0xb5, 0x8c, 0xcd, + 0xb2, 0xce, 0x86, 0xb1, 0xa0, 0x0a, 0xb5, 0x3e, 0x19, 0x28, 0xdf, 0x0e, 0xee, 0xa1, 0xaa, 0xf0, + 0xe3, 0x50, 0x88, 0x01, 0xe8, 0x43, 0xde, 0xbc, 0xde, 0xc2, 0xef, 0x69, 0xf6, 0x5e, 0x18, 0xf4, + 0xb8, 0xe7, 0xd4, 0xe5, 0xbf, 0xcc, 0x31, 0x3a, 0xd1, 0xc6, 0x02, 0xd5, 0xdd, 0x01, 0x87, 0x40, + 0x64, 0x75, 0xfa, 0xb8, 0x37, 0xae, 0xd7, 0x6b, 0x6f, 0x46, 0xc1, 0xf9, 0x59, 0x6f, 0xa5, 0x3e, + 0x8b, 0xd2, 0xaf, 0xba, 0x58, 0x6f, 0x0c, 0xb4, 0x7e, 0x05, 0x17, 0xff, 0x86, 0x8a, 0x69, 0x9c, + 0x9f, 0xbf, 0x32, 0x3e, 0x6f, 0x16, 0x8f, 0xe8, 0x3e, 0x95, 0x18, 0x7e, 0x82, 0x2a, 0x49, 0x76, + 0x3f, 0xed, 0xf1, 0xbf, 0x05, 0x3d, 0x5e, 0xbe, 0xba, 0xb3, 0x22, 0x7f, 0x61, 0x8e, 0xe6, 0xa2, + 0x78, 0x13, 0x55, 0x5d, 0xe6, 0xa4, 0x41, 0x77, 0x00, 0xea, 0x78, 0xf5, 0x6c, 0x65, 0x7b, 0xbb, + 0x19, 0x46, 0x27, 0x59, 0xab, 0x8d, 0x7e, 0xb9, 0x72, 0xc7, 0xd2, 0xfd, 0xf3, 0x28, 0x51, 0xee, + 0x8b, 0x99, 0xfb, 0xbb, 0x87, 0x6d, 0x2a, 0x31, 0xdc, 0x44, 0xe5, 0x4e, 0x1a, 0x27, 0x42, 0x79, + 0x2f, 0x3a, 0x35, 0xf9, 0xab, 0x1d, 0x09, 0xd0, 0x0c, 0x77, 0xc8, 0xc9, 0x85, 0x59, 0x38, 0xbd, + 0x30, 0x0b, 0x67, 0x17, 0x66, 0xe1, 0xe5, 0xd8, 0x34, 0x4e, 0xc6, 0xa6, 0x71, 0x3a, 0x36, 0x8d, + 0xb3, 0xb1, 0x69, 0x7c, 0x18, 0x9b, 0xc6, 0xab, 0x8f, 0x66, 0xe1, 0x51, 0x35, 0x9f, 0xea, 0x4b, + 0x00, 0x00, 0x00, 0xff, 0xff, 0x0a, 0x6c, 0xff, 0x86, 0xcd, 0x06, 0x00, 0x00, +} + +func (m *AuditSink) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuditSink) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AuditSink) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *AuditSinkList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuditSinkList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AuditSinkList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *AuditSinkSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuditSinkSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AuditSinkSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Webhook.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.Policy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Policy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Policy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Policy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Stages) > 0 { + for iNdEx := len(m.Stages) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Stages[iNdEx]) + copy(dAtA[i:], m.Stages[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Stages[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Level) + copy(dAtA[i:], m.Level) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Level))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ServiceReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServiceReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Port != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Port)) + i-- + dAtA[i] = 0x20 + } + if m.Path != nil { + i -= len(*m.Path) + copy(dAtA[i:], *m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Path))) + i-- + dAtA[i] = 0x1a + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Webhook) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Webhook) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Webhook) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.ClientConfig.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if m.Throttle != nil { + { + size, err := m.Throttle.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *WebhookClientConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WebhookClientConfig) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WebhookClientConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.CABundle != nil { + i -= len(m.CABundle) + copy(dAtA[i:], m.CABundle) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CABundle))) + i-- + dAtA[i] = 0x1a + } + if m.Service != nil { + { + size, err := m.Service.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.URL != nil { + i -= len(*m.URL) + copy(dAtA[i:], *m.URL) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.URL))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *WebhookThrottleConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WebhookThrottleConfig) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WebhookThrottleConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Burst != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Burst)) + i-- + dAtA[i] = 0x10 + } + if m.QPS != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.QPS)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *AuditSink) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *AuditSinkList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *AuditSinkSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Policy.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Webhook.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Policy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Level) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Stages) > 0 { + for _, s := range m.Stages { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ServiceReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.Path != nil { + l = len(*m.Path) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Port != nil { + n += 1 + sovGenerated(uint64(*m.Port)) + } + return n +} + +func (m *Webhook) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Throttle != nil { + l = m.Throttle.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.ClientConfig.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *WebhookClientConfig) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.URL != nil { + l = len(*m.URL) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Service != nil { + l = m.Service.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.CABundle != nil { + l = len(m.CABundle) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *WebhookThrottleConfig) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.QPS != nil { + n += 1 + sovGenerated(uint64(*m.QPS)) + } + if m.Burst != nil { + n += 1 + sovGenerated(uint64(*m.Burst)) + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *AuditSink) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AuditSink{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "AuditSinkSpec", "AuditSinkSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *AuditSinkList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]AuditSink{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "AuditSink", "AuditSink", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&AuditSinkList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *AuditSinkSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AuditSinkSpec{`, + `Policy:` + strings.Replace(strings.Replace(this.Policy.String(), "Policy", "Policy", 1), `&`, ``, 1) + `,`, + `Webhook:` + strings.Replace(strings.Replace(this.Webhook.String(), "Webhook", "Webhook", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Policy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Policy{`, + `Level:` + fmt.Sprintf("%v", this.Level) + `,`, + `Stages:` + fmt.Sprintf("%v", this.Stages) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceReference{`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Path:` + valueToStringGenerated(this.Path) + `,`, + `Port:` + valueToStringGenerated(this.Port) + `,`, + `}`, + }, "") + return s +} +func (this *Webhook) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Webhook{`, + `Throttle:` + strings.Replace(this.Throttle.String(), "WebhookThrottleConfig", "WebhookThrottleConfig", 1) + `,`, + `ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *WebhookClientConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WebhookClientConfig{`, + `URL:` + valueToStringGenerated(this.URL) + `,`, + `Service:` + strings.Replace(this.Service.String(), "ServiceReference", "ServiceReference", 1) + `,`, + `CABundle:` + valueToStringGenerated(this.CABundle) + `,`, + `}`, + }, "") + return s +} +func (this *WebhookThrottleConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WebhookThrottleConfig{`, + `QPS:` + valueToStringGenerated(this.QPS) + `,`, + `Burst:` + valueToStringGenerated(this.Burst) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *AuditSink) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuditSink: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuditSink: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuditSinkList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuditSinkList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuditSinkList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, AuditSink{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuditSinkSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuditSinkSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuditSinkSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Policy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Policy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Webhook", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Webhook.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Policy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Policy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Policy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Level", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Level = Level(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Stages", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Stages = append(m.Stages, Stage(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServiceReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Path = &s + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Port = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Webhook) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Webhook: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Webhook: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Throttle", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Throttle == nil { + m.Throttle = &WebhookThrottleConfig{} + } + if err := m.Throttle.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ClientConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WebhookClientConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WebhookClientConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.URL = &s + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Service == nil { + m.Service = &ServiceReference{} + } + if err := m.Service.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CABundle", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CABundle = append(m.CABundle[:0], dAtA[iNdEx:postIndex]...) + if m.CABundle == nil { + m.CABundle = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WebhookThrottleConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WebhookThrottleConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WebhookThrottleConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field QPS", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.QPS = &v + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Burst", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Burst = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) diff --git a/vendor/k8s.io/api/auditregistration/v1alpha1/generated.proto b/vendor/k8s.io/api/auditregistration/v1alpha1/generated.proto new file mode 100644 index 00000000..674debee --- /dev/null +++ b/vendor/k8s.io/api/auditregistration/v1alpha1/generated.proto @@ -0,0 +1,162 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.api.auditregistration.v1alpha1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1alpha1"; + +// AuditSink represents a cluster level audit sink +message AuditSink { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec defines the audit configuration spec + optional AuditSinkSpec spec = 2; +} + +// AuditSinkList is a list of AuditSink items. +message AuditSinkList { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of audit configurations. + repeated AuditSink items = 2; +} + +// AuditSinkSpec holds the spec for the audit sink +message AuditSinkSpec { + // Policy defines the policy for selecting which events should be sent to the webhook + // required + optional Policy policy = 1; + + // Webhook to send events + // required + optional Webhook webhook = 2; +} + +// Policy defines the configuration of how audit events are logged +message Policy { + // The Level that all requests are recorded at. + // available options: None, Metadata, Request, RequestResponse + // required + optional string level = 1; + + // Stages is a list of stages for which events are created. + // +optional + repeated string stages = 2; +} + +// ServiceReference holds a reference to Service.legacy.k8s.io +message ServiceReference { + // `namespace` is the namespace of the service. + // Required + optional string namespace = 1; + + // `name` is the name of the service. + // Required + optional string name = 2; + + // `path` is an optional URL path which will be sent in any request to + // this service. + // +optional + optional string path = 3; + + // If specified, the port on the service that hosting webhook. + // Default to 443 for backward compatibility. + // `port` should be a valid port number (1-65535, inclusive). + // +optional + optional int32 port = 4; +} + +// Webhook holds the configuration of the webhook +message Webhook { + // Throttle holds the options for throttling the webhook + // +optional + optional WebhookThrottleConfig throttle = 1; + + // ClientConfig holds the connection parameters for the webhook + // required + optional WebhookClientConfig clientConfig = 2; +} + +// WebhookClientConfig contains the information to make a connection with the webhook +message WebhookClientConfig { + // `url` gives the location of the webhook, in standard URL form + // (`scheme://host:port/path`). Exactly one of `url` or `service` + // must be specified. + // + // The `host` should not refer to a service running in the cluster; use + // the `service` field instead. The host might be resolved via external + // DNS in some apiservers (e.g., `kube-apiserver` cannot resolve + // in-cluster DNS as that would be a layering violation). `host` may + // also be an IP address. + // + // Please note that using `localhost` or `127.0.0.1` as a `host` is + // risky unless you take great care to run this webhook on all hosts + // which run an apiserver which might need to make calls to this + // webhook. Such installs are likely to be non-portable, i.e., not easy + // to turn up in a new cluster. + // + // The scheme must be "https"; the URL must begin with "https://". + // + // A path is optional, and if present may be any string permissible in + // a URL. You may use the path to pass an arbitrary string to the + // webhook, for example, a cluster identifier. + // + // Attempting to use a user or basic auth e.g. "user:password@" is not + // allowed. Fragments ("#...") and query parameters ("?...") are not + // allowed, either. + // + // +optional + optional string url = 1; + + // `service` is a reference to the service for this webhook. Either + // `service` or `url` must be specified. + // + // If the webhook is running within the cluster, then you should use `service`. + // + // +optional + optional ServiceReference service = 2; + + // `caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. + // If unspecified, system trust roots on the apiserver are used. + // +optional + optional bytes caBundle = 3; +} + +// WebhookThrottleConfig holds the configuration for throttling events +message WebhookThrottleConfig { + // ThrottleQPS maximum number of batches per second + // default 10 QPS + // +optional + optional int64 qps = 1; + + // ThrottleBurst is the maximum number of events sent at the same moment + // default 15 QPS + // +optional + optional int64 burst = 2; +} + diff --git a/vendor/k8s.io/api/auditregistration/v1alpha1/register.go b/vendor/k8s.io/api/auditregistration/v1alpha1/register.go new file mode 100644 index 00000000..d6271608 --- /dev/null +++ b/vendor/k8s.io/api/auditregistration/v1alpha1/register.go @@ -0,0 +1,56 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "auditregistration.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder runtime.SchemeBuilder + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +func init() { + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + localSchemeBuilder.Register(addKnownTypes) +} + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &AuditSink{}, + &AuditSinkList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/auditregistration/v1alpha1/types.go b/vendor/k8s.io/api/auditregistration/v1alpha1/types.go new file mode 100644 index 00000000..a0fb48c3 --- /dev/null +++ b/vendor/k8s.io/api/auditregistration/v1alpha1/types.go @@ -0,0 +1,198 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:openapi-gen=true + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Level defines the amount of information logged during auditing +type Level string + +// Valid audit levels +const ( + // LevelNone disables auditing + LevelNone Level = "None" + // LevelMetadata provides the basic level of auditing. + LevelMetadata Level = "Metadata" + // LevelRequest provides Metadata level of auditing, and additionally + // logs the request object (does not apply for non-resource requests). + LevelRequest Level = "Request" + // LevelRequestResponse provides Request level of auditing, and additionally + // logs the response object (does not apply for non-resource requests and watches). + LevelRequestResponse Level = "RequestResponse" +) + +// Stage defines the stages in request handling during which audit events may be generated. +type Stage string + +// Valid audit stages. +const ( + // The stage for events generated after the audit handler receives the request, but before it + // is delegated down the handler chain. + StageRequestReceived = "RequestReceived" + // The stage for events generated after the response headers are sent, but before the response body + // is sent. This stage is only generated for long-running requests (e.g. watch). + StageResponseStarted = "ResponseStarted" + // The stage for events generated after the response body has been completed, and no more bytes + // will be sent. + StageResponseComplete = "ResponseComplete" + // The stage for events generated when a panic occurred. + StagePanic = "Panic" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// AuditSink represents a cluster level audit sink +type AuditSink struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec defines the audit configuration spec + Spec AuditSinkSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +// AuditSinkSpec holds the spec for the audit sink +type AuditSinkSpec struct { + // Policy defines the policy for selecting which events should be sent to the webhook + // required + Policy Policy `json:"policy" protobuf:"bytes,1,opt,name=policy"` + + // Webhook to send events + // required + Webhook Webhook `json:"webhook" protobuf:"bytes,2,opt,name=webhook"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// AuditSinkList is a list of AuditSink items. +type AuditSinkList struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // List of audit configurations. + Items []AuditSink `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// Policy defines the configuration of how audit events are logged +type Policy struct { + // The Level that all requests are recorded at. + // available options: None, Metadata, Request, RequestResponse + // required + Level Level `json:"level" protobuf:"bytes,1,opt,name=level"` + + // Stages is a list of stages for which events are created. + // +optional + Stages []Stage `json:"stages" protobuf:"bytes,2,opt,name=stages"` +} + +// Webhook holds the configuration of the webhook +type Webhook struct { + // Throttle holds the options for throttling the webhook + // +optional + Throttle *WebhookThrottleConfig `json:"throttle,omitempty" protobuf:"bytes,1,opt,name=throttle"` + + // ClientConfig holds the connection parameters for the webhook + // required + ClientConfig WebhookClientConfig `json:"clientConfig" protobuf:"bytes,2,opt,name=clientConfig"` +} + +// WebhookThrottleConfig holds the configuration for throttling events +type WebhookThrottleConfig struct { + // ThrottleQPS maximum number of batches per second + // default 10 QPS + // +optional + QPS *int64 `json:"qps,omitempty" protobuf:"bytes,1,opt,name=qps"` + + // ThrottleBurst is the maximum number of events sent at the same moment + // default 15 QPS + // +optional + Burst *int64 `json:"burst,omitempty" protobuf:"bytes,2,opt,name=burst"` +} + +// WebhookClientConfig contains the information to make a connection with the webhook +type WebhookClientConfig struct { + // `url` gives the location of the webhook, in standard URL form + // (`scheme://host:port/path`). Exactly one of `url` or `service` + // must be specified. + // + // The `host` should not refer to a service running in the cluster; use + // the `service` field instead. The host might be resolved via external + // DNS in some apiservers (e.g., `kube-apiserver` cannot resolve + // in-cluster DNS as that would be a layering violation). `host` may + // also be an IP address. + // + // Please note that using `localhost` or `127.0.0.1` as a `host` is + // risky unless you take great care to run this webhook on all hosts + // which run an apiserver which might need to make calls to this + // webhook. Such installs are likely to be non-portable, i.e., not easy + // to turn up in a new cluster. + // + // The scheme must be "https"; the URL must begin with "https://". + // + // A path is optional, and if present may be any string permissible in + // a URL. You may use the path to pass an arbitrary string to the + // webhook, for example, a cluster identifier. + // + // Attempting to use a user or basic auth e.g. "user:password@" is not + // allowed. Fragments ("#...") and query parameters ("?...") are not + // allowed, either. + // + // +optional + URL *string `json:"url,omitempty" protobuf:"bytes,1,opt,name=url"` + + // `service` is a reference to the service for this webhook. Either + // `service` or `url` must be specified. + // + // If the webhook is running within the cluster, then you should use `service`. + // + // +optional + Service *ServiceReference `json:"service,omitempty" protobuf:"bytes,2,opt,name=service"` + + // `caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. + // If unspecified, system trust roots on the apiserver are used. + // +optional + CABundle []byte `json:"caBundle,omitempty" protobuf:"bytes,3,opt,name=caBundle"` +} + +// ServiceReference holds a reference to Service.legacy.k8s.io +type ServiceReference struct { + // `namespace` is the namespace of the service. + // Required + Namespace string `json:"namespace" protobuf:"bytes,1,opt,name=namespace"` + + // `name` is the name of the service. + // Required + Name string `json:"name" protobuf:"bytes,2,opt,name=name"` + + // `path` is an optional URL path which will be sent in any request to + // this service. + // +optional + Path *string `json:"path,omitempty" protobuf:"bytes,3,opt,name=path"` + + // If specified, the port on the service that hosting webhook. + // Default to 443 for backward compatibility. + // `port` should be a valid port number (1-65535, inclusive). + // +optional + Port *int32 `json:"port,omitempty" protobuf:"varint,4,opt,name=port"` +} diff --git a/vendor/k8s.io/api/auditregistration/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/auditregistration/v1alpha1/types_swagger_doc_generated.go new file mode 100644 index 00000000..1a86f4da --- /dev/null +++ b/vendor/k8s.io/api/auditregistration/v1alpha1/types_swagger_doc_generated.go @@ -0,0 +1,111 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_AuditSink = map[string]string{ + "": "AuditSink represents a cluster level audit sink", + "spec": "Spec defines the audit configuration spec", +} + +func (AuditSink) SwaggerDoc() map[string]string { + return map_AuditSink +} + +var map_AuditSinkList = map[string]string{ + "": "AuditSinkList is a list of AuditSink items.", + "items": "List of audit configurations.", +} + +func (AuditSinkList) SwaggerDoc() map[string]string { + return map_AuditSinkList +} + +var map_AuditSinkSpec = map[string]string{ + "": "AuditSinkSpec holds the spec for the audit sink", + "policy": "Policy defines the policy for selecting which events should be sent to the webhook required", + "webhook": "Webhook to send events required", +} + +func (AuditSinkSpec) SwaggerDoc() map[string]string { + return map_AuditSinkSpec +} + +var map_Policy = map[string]string{ + "": "Policy defines the configuration of how audit events are logged", + "level": "The Level that all requests are recorded at. available options: None, Metadata, Request, RequestResponse required", + "stages": "Stages is a list of stages for which events are created.", +} + +func (Policy) SwaggerDoc() map[string]string { + return map_Policy +} + +var map_ServiceReference = map[string]string{ + "": "ServiceReference holds a reference to Service.legacy.k8s.io", + "namespace": "`namespace` is the namespace of the service. Required", + "name": "`name` is the name of the service. Required", + "path": "`path` is an optional URL path which will be sent in any request to this service.", + "port": "If specified, the port on the service that hosting webhook. Default to 443 for backward compatibility. `port` should be a valid port number (1-65535, inclusive).", +} + +func (ServiceReference) SwaggerDoc() map[string]string { + return map_ServiceReference +} + +var map_Webhook = map[string]string{ + "": "Webhook holds the configuration of the webhook", + "throttle": "Throttle holds the options for throttling the webhook", + "clientConfig": "ClientConfig holds the connection parameters for the webhook required", +} + +func (Webhook) SwaggerDoc() map[string]string { + return map_Webhook +} + +var map_WebhookClientConfig = map[string]string{ + "": "WebhookClientConfig contains the information to make a connection with the webhook", + "url": "`url` gives the location of the webhook, in standard URL form (`scheme://host:port/path`). Exactly one of `url` or `service` must be specified.\n\nThe `host` should not refer to a service running in the cluster; use the `service` field instead. The host might be resolved via external DNS in some apiservers (e.g., `kube-apiserver` cannot resolve in-cluster DNS as that would be a layering violation). `host` may also be an IP address.\n\nPlease note that using `localhost` or `127.0.0.1` as a `host` is risky unless you take great care to run this webhook on all hosts which run an apiserver which might need to make calls to this webhook. Such installs are likely to be non-portable, i.e., not easy to turn up in a new cluster.\n\nThe scheme must be \"https\"; the URL must begin with \"https://\".\n\nA path is optional, and if present may be any string permissible in a URL. You may use the path to pass an arbitrary string to the webhook, for example, a cluster identifier.\n\nAttempting to use a user or basic auth e.g. \"user:password@\" is not allowed. Fragments (\"#...\") and query parameters (\"?...\") are not allowed, either.", + "service": "`service` is a reference to the service for this webhook. Either `service` or `url` must be specified.\n\nIf the webhook is running within the cluster, then you should use `service`.", + "caBundle": "`caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. If unspecified, system trust roots on the apiserver are used.", +} + +func (WebhookClientConfig) SwaggerDoc() map[string]string { + return map_WebhookClientConfig +} + +var map_WebhookThrottleConfig = map[string]string{ + "": "WebhookThrottleConfig holds the configuration for throttling events", + "qps": "ThrottleQPS maximum number of batches per second default 10 QPS", + "burst": "ThrottleBurst is the maximum number of events sent at the same moment default 15 QPS", +} + +func (WebhookThrottleConfig) SwaggerDoc() map[string]string { + return map_WebhookThrottleConfig +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/auditregistration/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/auditregistration/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 00000000..621a19e8 --- /dev/null +++ b/vendor/k8s.io/api/auditregistration/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,229 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuditSink) DeepCopyInto(out *AuditSink) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuditSink. +func (in *AuditSink) DeepCopy() *AuditSink { + if in == nil { + return nil + } + out := new(AuditSink) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AuditSink) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuditSinkList) DeepCopyInto(out *AuditSinkList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]AuditSink, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuditSinkList. +func (in *AuditSinkList) DeepCopy() *AuditSinkList { + if in == nil { + return nil + } + out := new(AuditSinkList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AuditSinkList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuditSinkSpec) DeepCopyInto(out *AuditSinkSpec) { + *out = *in + in.Policy.DeepCopyInto(&out.Policy) + in.Webhook.DeepCopyInto(&out.Webhook) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuditSinkSpec. +func (in *AuditSinkSpec) DeepCopy() *AuditSinkSpec { + if in == nil { + return nil + } + out := new(AuditSinkSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Policy) DeepCopyInto(out *Policy) { + *out = *in + if in.Stages != nil { + in, out := &in.Stages, &out.Stages + *out = make([]Stage, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Policy. +func (in *Policy) DeepCopy() *Policy { + if in == nil { + return nil + } + out := new(Policy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceReference) DeepCopyInto(out *ServiceReference) { + *out = *in + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceReference. +func (in *ServiceReference) DeepCopy() *ServiceReference { + if in == nil { + return nil + } + out := new(ServiceReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Webhook) DeepCopyInto(out *Webhook) { + *out = *in + if in.Throttle != nil { + in, out := &in.Throttle, &out.Throttle + *out = new(WebhookThrottleConfig) + (*in).DeepCopyInto(*out) + } + in.ClientConfig.DeepCopyInto(&out.ClientConfig) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Webhook. +func (in *Webhook) DeepCopy() *Webhook { + if in == nil { + return nil + } + out := new(Webhook) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookClientConfig) DeepCopyInto(out *WebhookClientConfig) { + *out = *in + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = new(ServiceReference) + (*in).DeepCopyInto(*out) + } + if in.CABundle != nil { + in, out := &in.CABundle, &out.CABundle + *out = make([]byte, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookClientConfig. +func (in *WebhookClientConfig) DeepCopy() *WebhookClientConfig { + if in == nil { + return nil + } + out := new(WebhookClientConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookThrottleConfig) DeepCopyInto(out *WebhookThrottleConfig) { + *out = *in + if in.QPS != nil { + in, out := &in.QPS, &out.QPS + *out = new(int64) + **out = **in + } + if in.Burst != nil { + in, out := &in.Burst, &out.Burst + *out = new(int64) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookThrottleConfig. +func (in *WebhookThrottleConfig) DeepCopy() *WebhookThrottleConfig { + if in == nil { + return nil + } + out := new(WebhookThrottleConfig) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/authentication/v1/doc.go b/vendor/k8s.io/api/authentication/v1/doc.go index 2d2ed2ee..1614265b 100644 --- a/vendor/k8s.io/api/authentication/v1/doc.go +++ b/vendor/k8s.io/api/authentication/v1/doc.go @@ -15,6 +15,8 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +groupName=authentication.k8s.io // +k8s:openapi-gen=true + package v1 // import "k8s.io/api/authentication/v1" diff --git a/vendor/k8s.io/api/authentication/v1/generated.pb.go b/vendor/k8s.io/api/authentication/v1/generated.pb.go index 2ce2e2d7..02be20de 100644 --- a/vendor/k8s.io/api/authentication/v1/generated.pb.go +++ b/vendor/k8s.io/api/authentication/v1/generated.pb.go @@ -14,41 +14,26 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/authentication/v1/generated.proto -// DO NOT EDIT! -/* - Package v1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/authentication/v1/generated.proto - - It has these top-level messages: - BoundObjectReference - ExtraValue - TokenRequest - TokenRequestSpec - TokenRequestStatus - TokenReview - TokenReviewSpec - TokenReviewStatus - UserInfo -*/ package v1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" + io "io" -import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" -import strings "strings" -import reflect "reflect" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" -import io "io" + k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -61,41 +46,257 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *BoundObjectReference) Reset() { *m = BoundObjectReference{} } -func (*BoundObjectReference) ProtoMessage() {} -func (*BoundObjectReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *BoundObjectReference) Reset() { *m = BoundObjectReference{} } +func (*BoundObjectReference) ProtoMessage() {} +func (*BoundObjectReference) Descriptor() ([]byte, []int) { + return fileDescriptor_2953ea822e7ffe1e, []int{0} +} +func (m *BoundObjectReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BoundObjectReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BoundObjectReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_BoundObjectReference.Merge(m, src) +} +func (m *BoundObjectReference) XXX_Size() int { + return m.Size() +} +func (m *BoundObjectReference) XXX_DiscardUnknown() { + xxx_messageInfo_BoundObjectReference.DiscardUnknown(m) +} -func (m *ExtraValue) Reset() { *m = ExtraValue{} } -func (*ExtraValue) ProtoMessage() {} -func (*ExtraValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +var xxx_messageInfo_BoundObjectReference proto.InternalMessageInfo -func (m *TokenRequest) Reset() { *m = TokenRequest{} } -func (*TokenRequest) ProtoMessage() {} -func (*TokenRequest) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +func (m *ExtraValue) Reset() { *m = ExtraValue{} } +func (*ExtraValue) ProtoMessage() {} +func (*ExtraValue) Descriptor() ([]byte, []int) { + return fileDescriptor_2953ea822e7ffe1e, []int{1} +} +func (m *ExtraValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExtraValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExtraValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExtraValue.Merge(m, src) +} +func (m *ExtraValue) XXX_Size() int { + return m.Size() +} +func (m *ExtraValue) XXX_DiscardUnknown() { + xxx_messageInfo_ExtraValue.DiscardUnknown(m) +} -func (m *TokenRequestSpec) Reset() { *m = TokenRequestSpec{} } -func (*TokenRequestSpec) ProtoMessage() {} -func (*TokenRequestSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +var xxx_messageInfo_ExtraValue proto.InternalMessageInfo -func (m *TokenRequestStatus) Reset() { *m = TokenRequestStatus{} } -func (*TokenRequestStatus) ProtoMessage() {} -func (*TokenRequestStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +func (m *TokenRequest) Reset() { *m = TokenRequest{} } +func (*TokenRequest) ProtoMessage() {} +func (*TokenRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_2953ea822e7ffe1e, []int{2} +} +func (m *TokenRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TokenRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TokenRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_TokenRequest.Merge(m, src) +} +func (m *TokenRequest) XXX_Size() int { + return m.Size() +} +func (m *TokenRequest) XXX_DiscardUnknown() { + xxx_messageInfo_TokenRequest.DiscardUnknown(m) +} -func (m *TokenReview) Reset() { *m = TokenReview{} } -func (*TokenReview) ProtoMessage() {} -func (*TokenReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +var xxx_messageInfo_TokenRequest proto.InternalMessageInfo -func (m *TokenReviewSpec) Reset() { *m = TokenReviewSpec{} } -func (*TokenReviewSpec) ProtoMessage() {} -func (*TokenReviewSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +func (m *TokenRequestSpec) Reset() { *m = TokenRequestSpec{} } +func (*TokenRequestSpec) ProtoMessage() {} +func (*TokenRequestSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_2953ea822e7ffe1e, []int{3} +} +func (m *TokenRequestSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TokenRequestSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TokenRequestSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_TokenRequestSpec.Merge(m, src) +} +func (m *TokenRequestSpec) XXX_Size() int { + return m.Size() +} +func (m *TokenRequestSpec) XXX_DiscardUnknown() { + xxx_messageInfo_TokenRequestSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_TokenRequestSpec proto.InternalMessageInfo + +func (m *TokenRequestStatus) Reset() { *m = TokenRequestStatus{} } +func (*TokenRequestStatus) ProtoMessage() {} +func (*TokenRequestStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_2953ea822e7ffe1e, []int{4} +} +func (m *TokenRequestStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TokenRequestStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TokenRequestStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_TokenRequestStatus.Merge(m, src) +} +func (m *TokenRequestStatus) XXX_Size() int { + return m.Size() +} +func (m *TokenRequestStatus) XXX_DiscardUnknown() { + xxx_messageInfo_TokenRequestStatus.DiscardUnknown(m) +} -func (m *TokenReviewStatus) Reset() { *m = TokenReviewStatus{} } -func (*TokenReviewStatus) ProtoMessage() {} -func (*TokenReviewStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +var xxx_messageInfo_TokenRequestStatus proto.InternalMessageInfo -func (m *UserInfo) Reset() { *m = UserInfo{} } -func (*UserInfo) ProtoMessage() {} -func (*UserInfo) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +func (m *TokenReview) Reset() { *m = TokenReview{} } +func (*TokenReview) ProtoMessage() {} +func (*TokenReview) Descriptor() ([]byte, []int) { + return fileDescriptor_2953ea822e7ffe1e, []int{5} +} +func (m *TokenReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TokenReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TokenReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_TokenReview.Merge(m, src) +} +func (m *TokenReview) XXX_Size() int { + return m.Size() +} +func (m *TokenReview) XXX_DiscardUnknown() { + xxx_messageInfo_TokenReview.DiscardUnknown(m) +} + +var xxx_messageInfo_TokenReview proto.InternalMessageInfo + +func (m *TokenReviewSpec) Reset() { *m = TokenReviewSpec{} } +func (*TokenReviewSpec) ProtoMessage() {} +func (*TokenReviewSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_2953ea822e7ffe1e, []int{6} +} +func (m *TokenReviewSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TokenReviewSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TokenReviewSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_TokenReviewSpec.Merge(m, src) +} +func (m *TokenReviewSpec) XXX_Size() int { + return m.Size() +} +func (m *TokenReviewSpec) XXX_DiscardUnknown() { + xxx_messageInfo_TokenReviewSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_TokenReviewSpec proto.InternalMessageInfo + +func (m *TokenReviewStatus) Reset() { *m = TokenReviewStatus{} } +func (*TokenReviewStatus) ProtoMessage() {} +func (*TokenReviewStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_2953ea822e7ffe1e, []int{7} +} +func (m *TokenReviewStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TokenReviewStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TokenReviewStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_TokenReviewStatus.Merge(m, src) +} +func (m *TokenReviewStatus) XXX_Size() int { + return m.Size() +} +func (m *TokenReviewStatus) XXX_DiscardUnknown() { + xxx_messageInfo_TokenReviewStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_TokenReviewStatus proto.InternalMessageInfo + +func (m *UserInfo) Reset() { *m = UserInfo{} } +func (*UserInfo) ProtoMessage() {} +func (*UserInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_2953ea822e7ffe1e, []int{8} +} +func (m *UserInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UserInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *UserInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_UserInfo.Merge(m, src) +} +func (m *UserInfo) XXX_Size() int { + return m.Size() +} +func (m *UserInfo) XXX_DiscardUnknown() { + xxx_messageInfo_UserInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_UserInfo proto.InternalMessageInfo func init() { proto.RegisterType((*BoundObjectReference)(nil), "k8s.io.api.authentication.v1.BoundObjectReference") @@ -107,11 +308,78 @@ func init() { proto.RegisterType((*TokenReviewSpec)(nil), "k8s.io.api.authentication.v1.TokenReviewSpec") proto.RegisterType((*TokenReviewStatus)(nil), "k8s.io.api.authentication.v1.TokenReviewStatus") proto.RegisterType((*UserInfo)(nil), "k8s.io.api.authentication.v1.UserInfo") + proto.RegisterMapType((map[string]ExtraValue)(nil), "k8s.io.api.authentication.v1.UserInfo.ExtraEntry") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/authentication/v1/generated.proto", fileDescriptor_2953ea822e7ffe1e) +} + +var fileDescriptor_2953ea822e7ffe1e = []byte{ + // 903 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0xcf, 0x6f, 0xe3, 0x44, + 0x14, 0x8e, 0xf3, 0xa3, 0x4a, 0x26, 0xdb, 0xd2, 0xce, 0xb2, 0x52, 0x54, 0x20, 0x2e, 0x5e, 0x09, + 0x55, 0xc0, 0xda, 0x9b, 0x08, 0xc1, 0x6a, 0x91, 0x90, 0x6a, 0x1a, 0x41, 0x84, 0x60, 0x57, 0xb3, + 0xdb, 0x82, 0x38, 0x31, 0xb1, 0x5f, 0x53, 0x13, 0x3c, 0x36, 0xf6, 0x38, 0x6c, 0x6e, 0xfb, 0x27, + 0x70, 0x04, 0x89, 0x03, 0x7f, 0x04, 0x12, 0xff, 0x42, 0x8f, 0x2b, 0x4e, 0x3d, 0xa0, 0x88, 0x9a, + 0x2b, 0x47, 0x4e, 0x9c, 0xd0, 0x8c, 0xa7, 0x71, 0x9c, 0xb4, 0x69, 0x4e, 0x7b, 0x8b, 0xdf, 0xfb, + 0xde, 0xf7, 0xde, 0xfb, 0xe6, 0xcb, 0x0c, 0xea, 0x8d, 0x1e, 0xc4, 0xa6, 0x17, 0x58, 0xa3, 0x64, + 0x00, 0x11, 0x03, 0x0e, 0xb1, 0x35, 0x06, 0xe6, 0x06, 0x91, 0xa5, 0x12, 0x34, 0xf4, 0x2c, 0x9a, + 0xf0, 0x53, 0x60, 0xdc, 0x73, 0x28, 0xf7, 0x02, 0x66, 0x8d, 0x3b, 0xd6, 0x10, 0x18, 0x44, 0x94, + 0x83, 0x6b, 0x86, 0x51, 0xc0, 0x03, 0xfc, 0x7a, 0x86, 0x36, 0x69, 0xe8, 0x99, 0x45, 0xb4, 0x39, + 0xee, 0xec, 0xde, 0x1b, 0x7a, 0xfc, 0x34, 0x19, 0x98, 0x4e, 0xe0, 0x5b, 0xc3, 0x60, 0x18, 0x58, + 0xb2, 0x68, 0x90, 0x9c, 0xc8, 0x2f, 0xf9, 0x21, 0x7f, 0x65, 0x64, 0xbb, 0xef, 0xe5, 0xad, 0x7d, + 0xea, 0x9c, 0x7a, 0x0c, 0xa2, 0x89, 0x15, 0x8e, 0x86, 0x22, 0x10, 0x5b, 0x3e, 0x70, 0x7a, 0xc5, + 0x08, 0xbb, 0xd6, 0x75, 0x55, 0x51, 0xc2, 0xb8, 0xe7, 0xc3, 0x52, 0xc1, 0xfb, 0x37, 0x15, 0xc4, + 0xce, 0x29, 0xf8, 0x74, 0xb1, 0xce, 0xf8, 0x43, 0x43, 0xaf, 0xda, 0x41, 0xc2, 0xdc, 0x47, 0x83, + 0x6f, 0xc1, 0xe1, 0x04, 0x4e, 0x20, 0x02, 0xe6, 0x00, 0xde, 0x43, 0xd5, 0x91, 0xc7, 0xdc, 0x96, + 0xb6, 0xa7, 0xed, 0x37, 0xec, 0x5b, 0x67, 0x53, 0xbd, 0x94, 0x4e, 0xf5, 0xea, 0x67, 0x1e, 0x73, + 0x89, 0xcc, 0xe0, 0x2e, 0x42, 0xf4, 0x71, 0xff, 0x18, 0xa2, 0xd8, 0x0b, 0x58, 0xab, 0x2c, 0x71, + 0x58, 0xe1, 0xd0, 0xc1, 0x2c, 0x43, 0xe6, 0x50, 0x82, 0x95, 0x51, 0x1f, 0x5a, 0x95, 0x22, 0xeb, + 0x17, 0xd4, 0x07, 0x22, 0x33, 0xd8, 0x46, 0x95, 0xa4, 0x7f, 0xd8, 0xaa, 0x4a, 0xc0, 0x7d, 0x05, + 0xa8, 0x1c, 0xf5, 0x0f, 0xff, 0x9b, 0xea, 0x6f, 0x5e, 0xb7, 0x24, 0x9f, 0x84, 0x10, 0x9b, 0x47, + 0xfd, 0x43, 0x22, 0x8a, 0x8d, 0x0f, 0x10, 0xea, 0x3d, 0xe3, 0x11, 0x3d, 0xa6, 0xdf, 0x25, 0x80, + 0x75, 0x54, 0xf3, 0x38, 0xf8, 0x71, 0x4b, 0xdb, 0xab, 0xec, 0x37, 0xec, 0x46, 0x3a, 0xd5, 0x6b, + 0x7d, 0x11, 0x20, 0x59, 0xfc, 0x61, 0xfd, 0xa7, 0x5f, 0xf5, 0xd2, 0xf3, 0x3f, 0xf7, 0x4a, 0xc6, + 0x2f, 0x65, 0x74, 0xeb, 0x69, 0x30, 0x02, 0x46, 0xe0, 0xfb, 0x04, 0x62, 0x8e, 0xbf, 0x41, 0x75, + 0x71, 0x44, 0x2e, 0xe5, 0x54, 0x2a, 0xd1, 0xec, 0xde, 0x37, 0x73, 0x77, 0xcc, 0x86, 0x30, 0xc3, + 0xd1, 0x50, 0x04, 0x62, 0x53, 0xa0, 0xcd, 0x71, 0xc7, 0xcc, 0xe4, 0xfc, 0x1c, 0x38, 0xcd, 0x35, + 0xc9, 0x63, 0x64, 0xc6, 0x8a, 0x1f, 0xa3, 0x6a, 0x1c, 0x82, 0x23, 0xf5, 0x6b, 0x76, 0x4d, 0x73, + 0x95, 0xf7, 0xcc, 0xf9, 0xd9, 0x9e, 0x84, 0xe0, 0xe4, 0x0a, 0x8a, 0x2f, 0x22, 0x99, 0xf0, 0x57, + 0x68, 0x23, 0xe6, 0x94, 0x27, 0xb1, 0x54, 0xb9, 0x38, 0xf1, 0x4d, 0x9c, 0xb2, 0xce, 0xde, 0x52, + 0xac, 0x1b, 0xd9, 0x37, 0x51, 0x7c, 0xc6, 0xbf, 0x1a, 0xda, 0x5e, 0x1c, 0x01, 0xbf, 0x83, 0x1a, + 0x34, 0x71, 0x3d, 0x61, 0x9a, 0x4b, 0x89, 0x37, 0xd3, 0xa9, 0xde, 0x38, 0xb8, 0x0c, 0x92, 0x3c, + 0x8f, 0x3f, 0x46, 0x3b, 0xf0, 0x2c, 0xf4, 0x22, 0xd9, 0xfd, 0x09, 0x38, 0x01, 0x73, 0x63, 0x79, + 0xd6, 0x15, 0xfb, 0x4e, 0x3a, 0xd5, 0x77, 0x7a, 0x8b, 0x49, 0xb2, 0x8c, 0xc7, 0x0c, 0x6d, 0x0d, + 0x0a, 0x96, 0x55, 0x8b, 0x76, 0x57, 0x2f, 0x7a, 0x95, 0xcd, 0x6d, 0x9c, 0x4e, 0xf5, 0xad, 0x62, + 0x86, 0x2c, 0xb0, 0x1b, 0xbf, 0x69, 0x08, 0x2f, 0xab, 0x84, 0xef, 0xa2, 0x1a, 0x17, 0x51, 0xf5, + 0x17, 0xd9, 0x54, 0xa2, 0xd5, 0x32, 0x68, 0x96, 0xc3, 0x13, 0x74, 0x3b, 0x5f, 0xe0, 0xa9, 0xe7, + 0x43, 0xcc, 0xa9, 0x1f, 0xaa, 0xd3, 0x7e, 0x7b, 0x3d, 0x2f, 0x89, 0x32, 0xfb, 0x35, 0x45, 0x7f, + 0xbb, 0xb7, 0x4c, 0x47, 0xae, 0xea, 0x61, 0xfc, 0x5c, 0x46, 0x4d, 0x35, 0xf6, 0xd8, 0x83, 0x1f, + 0x5e, 0x82, 0x97, 0x1f, 0x15, 0xbc, 0x7c, 0x6f, 0x2d, 0xdf, 0x89, 0xd1, 0xae, 0xb5, 0xf2, 0x97, + 0x0b, 0x56, 0xb6, 0xd6, 0xa7, 0x5c, 0xed, 0x64, 0x07, 0xbd, 0xb2, 0xd0, 0x7f, 0xbd, 0xe3, 0x2c, + 0x98, 0xbd, 0xbc, 0xda, 0xec, 0xc6, 0x3f, 0x1a, 0xda, 0x59, 0x1a, 0x09, 0x7f, 0x88, 0x36, 0xe7, + 0x26, 0x87, 0xec, 0x86, 0xad, 0xdb, 0x77, 0x54, 0xbf, 0xcd, 0x83, 0xf9, 0x24, 0x29, 0x62, 0xf1, + 0xa7, 0xa8, 0x9a, 0xc4, 0x10, 0x29, 0x85, 0xdf, 0x5a, 0x2d, 0xc7, 0x51, 0x0c, 0x51, 0x9f, 0x9d, + 0x04, 0xb9, 0xb4, 0x22, 0x42, 0x24, 0x43, 0x71, 0x93, 0xea, 0x0d, 0x7f, 0xdb, 0xbb, 0xa8, 0x06, + 0x51, 0x14, 0x44, 0xea, 0xde, 0x9e, 0x69, 0xd3, 0x13, 0x41, 0x92, 0xe5, 0x8c, 0xdf, 0xcb, 0xa8, + 0x7e, 0xd9, 0x12, 0xbf, 0x8b, 0xea, 0xa2, 0x8d, 0xbc, 0xec, 0x33, 0x41, 0xb7, 0x55, 0x91, 0xc4, + 0x88, 0x38, 0x99, 0x21, 0xf0, 0x1b, 0xa8, 0x92, 0x78, 0xae, 0x7a, 0x43, 0x9a, 0x73, 0x97, 0x3e, + 0x11, 0x71, 0x6c, 0xa0, 0x8d, 0x61, 0x14, 0x24, 0xa1, 0xb0, 0x81, 0x18, 0x14, 0x89, 0x13, 0xfd, + 0x44, 0x46, 0x88, 0xca, 0xe0, 0x63, 0x54, 0x03, 0x71, 0xe7, 0xcb, 0x5d, 0x9a, 0xdd, 0xce, 0x7a, + 0xd2, 0x98, 0xf2, 0x9d, 0xe8, 0x31, 0x1e, 0x4d, 0xe6, 0xb6, 0x12, 0x31, 0x92, 0xd1, 0xed, 0x0e, + 0xd4, 0x5b, 0x22, 0x31, 0x78, 0x1b, 0x55, 0x46, 0x30, 0xc9, 0x36, 0x22, 0xe2, 0x27, 0xfe, 0x08, + 0xd5, 0xc6, 0xe2, 0x99, 0x51, 0x47, 0xb2, 0xbf, 0xba, 0x6f, 0xfe, 0x2c, 0x91, 0xac, 0xec, 0x61, + 0xf9, 0x81, 0x66, 0xef, 0x9f, 0x5d, 0xb4, 0x4b, 0x2f, 0x2e, 0xda, 0xa5, 0xf3, 0x8b, 0x76, 0xe9, + 0x79, 0xda, 0xd6, 0xce, 0xd2, 0xb6, 0xf6, 0x22, 0x6d, 0x6b, 0xe7, 0x69, 0x5b, 0xfb, 0x2b, 0x6d, + 0x6b, 0x3f, 0xfe, 0xdd, 0x2e, 0x7d, 0x5d, 0x1e, 0x77, 0xfe, 0x0f, 0x00, 0x00, 0xff, 0xff, 0x8c, + 0x44, 0x87, 0xd0, 0xe2, 0x08, 0x00, 0x00, } + func (m *BoundObjectReference) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -119,33 +387,42 @@ func (m *BoundObjectReference) Marshal() (dAtA []byte, err error) { } func (m *BoundObjectReference) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BoundObjectReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) - i += copy(dAtA[i:], m.APIVersion) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x22 - i++ + i -= len(m.UID) + copy(dAtA[i:], m.UID) i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) - i += copy(dAtA[i:], m.UID) - return i, nil + i-- + dAtA[i] = 0x22 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x1a + i -= len(m.APIVersion) + copy(dAtA[i:], m.APIVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) + i-- + dAtA[i] = 0x12 + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m ExtraValue) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -153,32 +430,31 @@ func (m ExtraValue) Marshal() (dAtA []byte, err error) { } func (m ExtraValue) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m ExtraValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if len(m) > 0 { - for _, s := range m { + for iNdEx := len(m) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m[iNdEx]) + copy(dAtA[i:], m[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - return i, nil + return len(dAtA) - i, nil } func (m *TokenRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -186,41 +462,52 @@ func (m *TokenRequest) Marshal() (dAtA []byte, err error) { } func (m *TokenRequest) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TokenRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n2 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n3, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n3 - return i, nil + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *TokenRequestSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -228,47 +515,48 @@ func (m *TokenRequestSpec) Marshal() (dAtA []byte, err error) { } func (m *TokenRequestSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TokenRequestSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Audiences) > 0 { - for _, s := range m.Audiences { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } + if m.ExpirationSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.ExpirationSeconds)) + i-- + dAtA[i] = 0x20 } if m.BoundObjectRef != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.BoundObjectRef.Size())) - n4, err := m.BoundObjectRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.BoundObjectRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n4 + i-- + dAtA[i] = 0x1a } - if m.ExpirationSeconds != nil { - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.ExpirationSeconds)) + if len(m.Audiences) > 0 { + for iNdEx := len(m.Audiences) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Audiences[iNdEx]) + copy(dAtA[i:], m.Audiences[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Audiences[iNdEx]))) + i-- + dAtA[i] = 0xa + } } - return i, nil + return len(dAtA) - i, nil } func (m *TokenRequestStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -276,29 +564,37 @@ func (m *TokenRequestStatus) Marshal() (dAtA []byte, err error) { } func (m *TokenRequestStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TokenRequestStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Token))) - i += copy(dAtA[i:], m.Token) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ExpirationTimestamp.Size())) - n5, err := m.ExpirationTimestamp.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ExpirationTimestamp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n5 - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.Token) + copy(dAtA[i:], m.Token) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Token))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *TokenReview) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -306,41 +602,52 @@ func (m *TokenReview) Marshal() (dAtA []byte, err error) { } func (m *TokenReview) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TokenReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n6, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n7, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n7 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n8, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n8 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *TokenReviewSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -348,21 +655,36 @@ func (m *TokenReviewSpec) Marshal() (dAtA []byte, err error) { } func (m *TokenReviewSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TokenReviewSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ + if len(m.Audiences) > 0 { + for iNdEx := len(m.Audiences) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Audiences[iNdEx]) + copy(dAtA[i:], m.Audiences[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Audiences[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Token) + copy(dAtA[i:], m.Token) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Token))) - i += copy(dAtA[i:], m.Token) - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *TokenReviewStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -370,37 +692,54 @@ func (m *TokenReviewStatus) Marshal() (dAtA []byte, err error) { } func (m *TokenReviewStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TokenReviewStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ + if len(m.Audiences) > 0 { + for iNdEx := len(m.Audiences) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Audiences[iNdEx]) + copy(dAtA[i:], m.Audiences[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Audiences[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + i -= len(m.Error) + copy(dAtA[i:], m.Error) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Error))) + i-- + dAtA[i] = 0x1a + { + size, err := m.User.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i-- if m.Authenticated { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.User.Size())) - n9, err := m.User.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n9 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Error))) - i += copy(dAtA[i:], m.Error) - return i, nil + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *UserInfo) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -408,95 +747,81 @@ func (m *UserInfo) Marshal() (dAtA []byte, err error) { } func (m *UserInfo) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UserInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Username))) - i += copy(dAtA[i:], m.Username) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) - i += copy(dAtA[i:], m.UID) - if len(m.Groups) > 0 { - for _, s := range m.Groups { - dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } if len(m.Extra) > 0 { keysForExtra := make([]string, 0, len(m.Extra)) for k := range m.Extra { keysForExtra = append(keysForExtra, string(k)) } github_com_gogo_protobuf_sortkeys.Strings(keysForExtra) - for _, k := range keysForExtra { - dAtA[i] = 0x22 - i++ - v := m.Extra[string(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n10, err := (&v).MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(keysForExtra) - 1; iNdEx >= 0; iNdEx-- { + v := m.Extra[string(keysForExtra[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n10 + i-- + dAtA[i] = 0x12 + i -= len(keysForExtra[iNdEx]) + copy(dAtA[i:], keysForExtra[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForExtra[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x22 } } - return i, nil + if len(m.Groups) > 0 { + for iNdEx := len(m.Groups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Groups[iNdEx]) + copy(dAtA[i:], m.Groups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Groups[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0x12 + i -= len(m.Username) + copy(dAtA[i:], m.Username) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Username))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *BoundObjectReference) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Kind) @@ -511,6 +836,9 @@ func (m *BoundObjectReference) Size() (n int) { } func (m ExtraValue) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m) > 0 { @@ -523,6 +851,9 @@ func (m ExtraValue) Size() (n int) { } func (m *TokenRequest) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -535,6 +866,9 @@ func (m *TokenRequest) Size() (n int) { } func (m *TokenRequestSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Audiences) > 0 { @@ -554,6 +888,9 @@ func (m *TokenRequestSpec) Size() (n int) { } func (m *TokenRequestStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Token) @@ -564,6 +901,9 @@ func (m *TokenRequestStatus) Size() (n int) { } func (m *TokenReview) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -576,14 +916,26 @@ func (m *TokenReview) Size() (n int) { } func (m *TokenReviewSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Token) n += 1 + l + sovGenerated(uint64(l)) + if len(m.Audiences) > 0 { + for _, s := range m.Audiences { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } func (m *TokenReviewStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 2 @@ -591,10 +943,19 @@ func (m *TokenReviewStatus) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = len(m.Error) n += 1 + l + sovGenerated(uint64(l)) + if len(m.Audiences) > 0 { + for _, s := range m.Audiences { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } func (m *UserInfo) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Username) @@ -620,14 +981,7 @@ func (m *UserInfo) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -650,7 +1004,7 @@ func (this *TokenRequest) String() string { return "nil" } s := strings.Join([]string{`&TokenRequest{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "TokenRequestSpec", "TokenRequestSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "TokenRequestStatus", "TokenRequestStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -663,7 +1017,7 @@ func (this *TokenRequestSpec) String() string { } s := strings.Join([]string{`&TokenRequestSpec{`, `Audiences:` + fmt.Sprintf("%v", this.Audiences) + `,`, - `BoundObjectRef:` + strings.Replace(fmt.Sprintf("%v", this.BoundObjectRef), "BoundObjectReference", "BoundObjectReference", 1) + `,`, + `BoundObjectRef:` + strings.Replace(this.BoundObjectRef.String(), "BoundObjectReference", "BoundObjectReference", 1) + `,`, `ExpirationSeconds:` + valueToStringGenerated(this.ExpirationSeconds) + `,`, `}`, }, "") @@ -675,7 +1029,7 @@ func (this *TokenRequestStatus) String() string { } s := strings.Join([]string{`&TokenRequestStatus{`, `Token:` + fmt.Sprintf("%v", this.Token) + `,`, - `ExpirationTimestamp:` + strings.Replace(strings.Replace(this.ExpirationTimestamp.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `ExpirationTimestamp:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ExpirationTimestamp), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -685,7 +1039,7 @@ func (this *TokenReview) String() string { return "nil" } s := strings.Join([]string{`&TokenReview{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "TokenReviewSpec", "TokenReviewSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "TokenReviewStatus", "TokenReviewStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -698,6 +1052,7 @@ func (this *TokenReviewSpec) String() string { } s := strings.Join([]string{`&TokenReviewSpec{`, `Token:` + fmt.Sprintf("%v", this.Token) + `,`, + `Audiences:` + fmt.Sprintf("%v", this.Audiences) + `,`, `}`, }, "") return s @@ -710,6 +1065,7 @@ func (this *TokenReviewStatus) String() string { `Authenticated:` + fmt.Sprintf("%v", this.Authenticated) + `,`, `User:` + strings.Replace(strings.Replace(this.User.String(), "UserInfo", "UserInfo", 1), `&`, ``, 1) + `,`, `Error:` + fmt.Sprintf("%v", this.Error) + `,`, + `Audiences:` + fmt.Sprintf("%v", this.Audiences) + `,`, `}`, }, "") return s @@ -760,7 +1116,7 @@ func (m *BoundObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -788,7 +1144,7 @@ func (m *BoundObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -798,6 +1154,9 @@ func (m *BoundObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -817,7 +1176,7 @@ func (m *BoundObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -827,6 +1186,9 @@ func (m *BoundObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -846,7 +1208,7 @@ func (m *BoundObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -856,6 +1218,9 @@ func (m *BoundObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -875,7 +1240,7 @@ func (m *BoundObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -885,6 +1250,9 @@ func (m *BoundObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -899,6 +1267,9 @@ func (m *BoundObjectReference) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -926,7 +1297,7 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -954,7 +1325,7 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -964,6 +1335,9 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -978,6 +1352,9 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1005,7 +1382,7 @@ func (m *TokenRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1033,7 +1410,7 @@ func (m *TokenRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1042,6 +1419,9 @@ func (m *TokenRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1063,7 +1443,7 @@ func (m *TokenRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1072,6 +1452,9 @@ func (m *TokenRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1093,7 +1476,7 @@ func (m *TokenRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1102,6 +1485,9 @@ func (m *TokenRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1118,6 +1504,9 @@ func (m *TokenRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1145,7 +1534,7 @@ func (m *TokenRequestSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1173,7 +1562,7 @@ func (m *TokenRequestSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1183,6 +1572,9 @@ func (m *TokenRequestSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1202,7 +1594,7 @@ func (m *TokenRequestSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1211,6 +1603,9 @@ func (m *TokenRequestSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1235,7 +1630,7 @@ func (m *TokenRequestSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -1250,6 +1645,9 @@ func (m *TokenRequestSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1277,7 +1675,7 @@ func (m *TokenRequestStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1305,7 +1703,7 @@ func (m *TokenRequestStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1315,6 +1713,9 @@ func (m *TokenRequestStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1334,7 +1735,7 @@ func (m *TokenRequestStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1343,6 +1744,9 @@ func (m *TokenRequestStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1359,6 +1763,9 @@ func (m *TokenRequestStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1386,7 +1793,7 @@ func (m *TokenReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1414,7 +1821,7 @@ func (m *TokenReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1423,6 +1830,9 @@ func (m *TokenReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1444,7 +1854,7 @@ func (m *TokenReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1453,6 +1863,9 @@ func (m *TokenReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1474,7 +1887,7 @@ func (m *TokenReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1483,6 +1896,9 @@ func (m *TokenReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1499,6 +1915,9 @@ func (m *TokenReview) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1526,7 +1945,7 @@ func (m *TokenReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1554,7 +1973,7 @@ func (m *TokenReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1564,11 +1983,46 @@ func (m *TokenReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } m.Token = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Audiences", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Audiences = append(m.Audiences, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -1578,6 +2032,9 @@ func (m *TokenReviewSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1605,7 +2062,7 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1633,7 +2090,7 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1653,7 +2110,7 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1662,6 +2119,9 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1683,7 +2143,7 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1693,11 +2153,46 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } m.Error = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Audiences", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Audiences = append(m.Audiences, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -1707,6 +2202,9 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1734,7 +2232,7 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1762,7 +2260,7 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1772,6 +2270,9 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1791,7 +2292,7 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1801,6 +2302,9 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1820,7 +2324,7 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1830,6 +2334,9 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1849,7 +2356,7 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1858,54 +2365,20 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { + if postIndex > l { return io.ErrUnexpectedEOF } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Extra == nil { m.Extra = make(map[string]ExtraValue) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + mapvalue := &ExtraValue{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1915,46 +2388,88 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &ExtraValue{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &ExtraValue{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.Extra[mapkey] = *mapvalue - } else { - var mapvalue ExtraValue - m.Extra[mapkey] = mapvalue } + m.Extra[mapkey] = *mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -1965,6 +2480,9 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2031,10 +2549,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -2063,6 +2584,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -2081,67 +2605,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/authentication/v1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 892 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0xcf, 0x8f, 0xdb, 0x44, - 0x14, 0x8e, 0xf3, 0x63, 0xb5, 0x99, 0x74, 0x97, 0xdd, 0x29, 0x95, 0xa2, 0x05, 0xec, 0x60, 0x24, - 0x14, 0x01, 0xb5, 0x9b, 0x08, 0x95, 0xaa, 0x48, 0x48, 0x6b, 0x36, 0x82, 0x08, 0x41, 0xab, 0x69, - 0x77, 0x41, 0x9c, 0x98, 0xd8, 0x6f, 0xb3, 0x26, 0x78, 0x6c, 0xec, 0x71, 0x68, 0x6e, 0xfd, 0x13, - 0x38, 0x82, 0xc4, 0x81, 0x3f, 0x02, 0x89, 0x23, 0xd7, 0x3d, 0x56, 0x9c, 0x7a, 0x40, 0x11, 0x6b, - 0xfe, 0x05, 0x4e, 0x9c, 0xd0, 0x8c, 0x67, 0xe3, 0xfc, 0xd8, 0x4d, 0x73, 0xea, 0x2d, 0xf3, 0xde, - 0xf7, 0xbe, 0x79, 0xef, 0x9b, 0x2f, 0xcf, 0xa8, 0x37, 0xba, 0x97, 0x58, 0x7e, 0x68, 0x8f, 0xd2, - 0x01, 0xc4, 0x0c, 0x38, 0x24, 0xf6, 0x18, 0x98, 0x17, 0xc6, 0xb6, 0x4a, 0xd0, 0xc8, 0xb7, 0x69, - 0xca, 0xcf, 0x80, 0x71, 0xdf, 0xa5, 0xdc, 0x0f, 0x99, 0x3d, 0xee, 0xd8, 0x43, 0x60, 0x10, 0x53, - 0x0e, 0x9e, 0x15, 0xc5, 0x21, 0x0f, 0xf1, 0xeb, 0x39, 0xda, 0xa2, 0x91, 0x6f, 0x2d, 0xa2, 0xad, - 0x71, 0xe7, 0xe0, 0xf6, 0xd0, 0xe7, 0x67, 0xe9, 0xc0, 0x72, 0xc3, 0xc0, 0x1e, 0x86, 0xc3, 0xd0, - 0x96, 0x45, 0x83, 0xf4, 0x54, 0x9e, 0xe4, 0x41, 0xfe, 0xca, 0xc9, 0x0e, 0xde, 0x2f, 0xae, 0x0e, - 0xa8, 0x7b, 0xe6, 0x33, 0x88, 0x27, 0x76, 0x34, 0x1a, 0x8a, 0x40, 0x62, 0x07, 0xc0, 0xe9, 0x15, - 0x2d, 0x1c, 0xd8, 0xd7, 0x55, 0xc5, 0x29, 0xe3, 0x7e, 0x00, 0x2b, 0x05, 0x77, 0x5f, 0x54, 0x90, - 0xb8, 0x67, 0x10, 0xd0, 0xe5, 0x3a, 0xf3, 0x4f, 0x0d, 0xbd, 0xea, 0x84, 0x29, 0xf3, 0x1e, 0x0c, - 0xbe, 0x05, 0x97, 0x13, 0x38, 0x85, 0x18, 0x98, 0x0b, 0xb8, 0x85, 0xaa, 0x23, 0x9f, 0x79, 0x4d, - 0xad, 0xa5, 0xb5, 0xeb, 0xce, 0x8d, 0xf3, 0xa9, 0x51, 0xca, 0xa6, 0x46, 0xf5, 0x33, 0x9f, 0x79, - 0x44, 0x66, 0x70, 0x17, 0x21, 0xfa, 0xb0, 0x7f, 0x02, 0x71, 0xe2, 0x87, 0xac, 0x59, 0x96, 0x38, - 0xac, 0x70, 0xe8, 0x70, 0x96, 0x21, 0x73, 0x28, 0xc1, 0xca, 0x68, 0x00, 0xcd, 0xca, 0x22, 0xeb, - 0x17, 0x34, 0x00, 0x22, 0x33, 0xd8, 0x41, 0x95, 0xb4, 0x7f, 0xd4, 0xac, 0x4a, 0xc0, 0x1d, 0x05, - 0xa8, 0x1c, 0xf7, 0x8f, 0xfe, 0x9b, 0x1a, 0x6f, 0x5e, 0x37, 0x24, 0x9f, 0x44, 0x90, 0x58, 0xc7, - 0xfd, 0x23, 0x22, 0x8a, 0xcd, 0x0f, 0x10, 0xea, 0x3d, 0xe1, 0x31, 0x3d, 0xa1, 0xdf, 0xa5, 0x80, - 0x0d, 0x54, 0xf3, 0x39, 0x04, 0x49, 0x53, 0x6b, 0x55, 0xda, 0x75, 0xa7, 0x9e, 0x4d, 0x8d, 0x5a, - 0x5f, 0x04, 0x48, 0x1e, 0xbf, 0xbf, 0xfd, 0xd3, 0xaf, 0x46, 0xe9, 0xe9, 0x5f, 0xad, 0x92, 0xf9, - 0x4b, 0x19, 0xdd, 0x78, 0x1c, 0x8e, 0x80, 0x11, 0xf8, 0x3e, 0x85, 0x84, 0xe3, 0x6f, 0xd0, 0xb6, - 0x78, 0x22, 0x8f, 0x72, 0x2a, 0x95, 0x68, 0x74, 0xef, 0x58, 0x85, 0x3b, 0x66, 0x4d, 0x58, 0xd1, - 0x68, 0x28, 0x02, 0x89, 0x25, 0xd0, 0xd6, 0xb8, 0x63, 0xe5, 0x72, 0x7e, 0x0e, 0x9c, 0x16, 0x9a, - 0x14, 0x31, 0x32, 0x63, 0xc5, 0x0f, 0x51, 0x35, 0x89, 0xc0, 0x95, 0xfa, 0x35, 0xba, 0x96, 0xb5, - 0xce, 0x7b, 0xd6, 0x7c, 0x6f, 0x8f, 0x22, 0x70, 0x0b, 0x05, 0xc5, 0x89, 0x48, 0x26, 0xfc, 0x15, - 0xda, 0x4a, 0x38, 0xe5, 0x69, 0x22, 0x55, 0x5e, 0xec, 0xf8, 0x45, 0x9c, 0xb2, 0xce, 0xd9, 0x55, - 0xac, 0x5b, 0xf9, 0x99, 0x28, 0x3e, 0xf3, 0x5f, 0x0d, 0xed, 0x2d, 0xb7, 0x80, 0xdf, 0x45, 0x75, - 0x9a, 0x7a, 0xbe, 0x30, 0xcd, 0xa5, 0xc4, 0x3b, 0xd9, 0xd4, 0xa8, 0x1f, 0x5e, 0x06, 0x49, 0x91, - 0xc7, 0x0c, 0xed, 0x0e, 0x16, 0xdc, 0xa6, 0x7a, 0xec, 0xae, 0xef, 0xf1, 0x2a, 0x87, 0x3a, 0x38, - 0x9b, 0x1a, 0xbb, 0x8b, 0x19, 0xb2, 0xc4, 0x8e, 0x3f, 0x46, 0xfb, 0xf0, 0x24, 0xf2, 0x63, 0xc9, - 0xf4, 0x08, 0xdc, 0x90, 0x79, 0x89, 0xf4, 0x56, 0xc5, 0xb9, 0x95, 0x4d, 0x8d, 0xfd, 0xde, 0x72, - 0x92, 0xac, 0xe2, 0xcd, 0xdf, 0x34, 0x84, 0x57, 0x55, 0xc2, 0x6f, 0xa1, 0x1a, 0x17, 0x51, 0xf5, - 0x17, 0xd9, 0x51, 0xa2, 0xd5, 0x72, 0x68, 0x9e, 0xc3, 0x13, 0x74, 0xb3, 0x20, 0x7c, 0xec, 0x07, - 0x90, 0x70, 0x1a, 0x44, 0xea, 0xb5, 0xdf, 0xd9, 0xcc, 0x4b, 0xa2, 0xcc, 0x79, 0x4d, 0xd1, 0xdf, - 0xec, 0xad, 0xd2, 0x91, 0xab, 0xee, 0x30, 0x7f, 0x2e, 0xa3, 0x86, 0x6a, 0x7b, 0xec, 0xc3, 0x0f, - 0x2f, 0xc1, 0xcb, 0x0f, 0x16, 0xbc, 0x7c, 0x7b, 0x23, 0xdf, 0x89, 0xd6, 0xae, 0xb5, 0xf2, 0x97, - 0x4b, 0x56, 0xb6, 0x37, 0xa7, 0x5c, 0xef, 0xe4, 0xbb, 0xe8, 0x95, 0xa5, 0xfb, 0x37, 0x7a, 0x4e, - 0xf3, 0x0f, 0x0d, 0xed, 0xaf, 0xdc, 0x82, 0x3f, 0x44, 0x3b, 0x73, 0xcd, 0x40, 0xbe, 0x34, 0xb7, - 0x9d, 0x5b, 0x8a, 0x62, 0xe7, 0x70, 0x3e, 0x49, 0x16, 0xb1, 0xf8, 0x53, 0x54, 0x4d, 0x13, 0x88, - 0x95, 0x68, 0x6f, 0xaf, 0x9f, 0xf0, 0x38, 0x81, 0xb8, 0xcf, 0x4e, 0xc3, 0x42, 0x2d, 0x11, 0x21, - 0x92, 0x41, 0x4c, 0x00, 0x71, 0x1c, 0xc6, 0x6a, 0xbb, 0xce, 0x26, 0xe8, 0x89, 0x20, 0xc9, 0x73, - 0xe6, 0xef, 0x65, 0xb4, 0x7d, 0xc9, 0x82, 0xdf, 0x43, 0xdb, 0xa2, 0x52, 0xae, 0xe4, 0x7c, 0xec, - 0x3d, 0x55, 0x24, 0x31, 0x22, 0x4e, 0x66, 0x08, 0xfc, 0x06, 0xaa, 0xa4, 0xbe, 0xa7, 0x36, 0x7d, - 0x63, 0x6e, 0x35, 0x13, 0x11, 0xc7, 0x26, 0xda, 0x1a, 0xc6, 0x61, 0x1a, 0x89, 0xc7, 0x12, 0x5b, - 0x00, 0x09, 0xdd, 0x3f, 0x91, 0x11, 0xa2, 0x32, 0xf8, 0x04, 0xd5, 0x40, 0x6c, 0xe6, 0x66, 0xb5, - 0x55, 0x69, 0x37, 0xba, 0x9d, 0xcd, 0xa6, 0xb5, 0xe4, 0x36, 0xef, 0x31, 0x1e, 0x4f, 0xe6, 0xa6, - 0x12, 0x31, 0x92, 0xd3, 0x1d, 0x0c, 0xd4, 0xc6, 0x97, 0x18, 0xbc, 0x87, 0x2a, 0x23, 0x98, 0xe4, - 0x13, 0x11, 0xf1, 0x13, 0x7f, 0x84, 0x6a, 0x63, 0xf1, 0x31, 0x50, 0x2a, 0xb7, 0xd7, 0xdf, 0x5b, - 0x7c, 0x3c, 0x48, 0x5e, 0x76, 0xbf, 0x7c, 0x4f, 0x73, 0xda, 0xe7, 0x17, 0x7a, 0xe9, 0xd9, 0x85, - 0x5e, 0x7a, 0x7e, 0xa1, 0x97, 0x9e, 0x66, 0xba, 0x76, 0x9e, 0xe9, 0xda, 0xb3, 0x4c, 0xd7, 0x9e, - 0x67, 0xba, 0xf6, 0x77, 0xa6, 0x6b, 0x3f, 0xfe, 0xa3, 0x97, 0xbe, 0x2e, 0x8f, 0x3b, 0xff, 0x07, - 0x00, 0x00, 0xff, 0xff, 0x5e, 0x8d, 0x94, 0x78, 0x88, 0x08, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/authentication/v1/generated.proto b/vendor/k8s.io/api/authentication/v1/generated.proto index 10c79217..db7be173 100644 --- a/vendor/k8s.io/api/authentication/v1/generated.proto +++ b/vendor/k8s.io/api/authentication/v1/generated.proto @@ -84,7 +84,10 @@ message TokenRequestSpec { optional int64 expirationSeconds = 4; // BoundObjectRef is a reference to an object that the token will be bound to. - // The token will only be valid for as long as the bound objet exists. + // The token will only be valid for as long as the bound object exists. + // NOTE: The API server's TokenReview endpoint will validate the + // BoundObjectRef, but other audiences may not. Keep ExpirationSeconds + // small if you want prompt revocation. // +optional optional BoundObjectReference boundObjectRef = 3; } @@ -118,6 +121,14 @@ message TokenReviewSpec { // Token is the opaque bearer token. // +optional optional string token = 1; + + // Audiences is a list of the identifiers that the resource server presented + // with the token identifies as. Audience-aware token authenticators will + // verify that the token was intended for at least one of the audiences in + // this list. If no audiences are provided, the audience will default to the + // audience of the Kubernetes apiserver. + // +optional + repeated string audiences = 2; } // TokenReviewStatus is the result of the token authentication request. @@ -130,6 +141,18 @@ message TokenReviewStatus { // +optional optional UserInfo user = 2; + // Audiences are audience identifiers chosen by the authenticator that are + // compatible with both the TokenReview and token. An identifier is any + // identifier in the intersection of the TokenReviewSpec audiences and the + // token's audiences. A client of the TokenReview API that sets the + // spec.audiences field should validate that a compatible audience identifier + // is returned in the status.audiences field to ensure that the TokenReview + // server is audience aware. If a TokenReview returns an empty + // status.audience field where status.authenticated is "true", the token is + // valid against the audience of the Kubernetes API server. + // +optional + repeated string audiences = 4; + // Error indicates that the token couldn't be checked // +optional optional string error = 3; diff --git a/vendor/k8s.io/api/authentication/v1/types.go b/vendor/k8s.io/api/authentication/v1/types.go index 723457a3..c48b0369 100644 --- a/vendor/k8s.io/api/authentication/v1/types.go +++ b/vendor/k8s.io/api/authentication/v1/types.go @@ -64,6 +64,13 @@ type TokenReviewSpec struct { // Token is the opaque bearer token. // +optional Token string `json:"token,omitempty" protobuf:"bytes,1,opt,name=token"` + // Audiences is a list of the identifiers that the resource server presented + // with the token identifies as. Audience-aware token authenticators will + // verify that the token was intended for at least one of the audiences in + // this list. If no audiences are provided, the audience will default to the + // audience of the Kubernetes apiserver. + // +optional + Audiences []string `json:"audiences,omitempty" protobuf:"bytes,2,rep,name=audiences"` } // TokenReviewStatus is the result of the token authentication request. @@ -74,6 +81,17 @@ type TokenReviewStatus struct { // User is the UserInfo associated with the provided token. // +optional User UserInfo `json:"user,omitempty" protobuf:"bytes,2,opt,name=user"` + // Audiences are audience identifiers chosen by the authenticator that are + // compatible with both the TokenReview and token. An identifier is any + // identifier in the intersection of the TokenReviewSpec audiences and the + // token's audiences. A client of the TokenReview API that sets the + // spec.audiences field should validate that a compatible audience identifier + // is returned in the status.audiences field to ensure that the TokenReview + // server is audience aware. If a TokenReview returns an empty + // status.audience field where status.authenticated is "true", the token is + // valid against the audience of the Kubernetes API server. + // +optional + Audiences []string `json:"audiences,omitempty" protobuf:"bytes,4,rep,name=audiences"` // Error indicates that the token couldn't be checked // +optional Error string `json:"error,omitempty" protobuf:"bytes,3,opt,name=error"` @@ -137,7 +155,10 @@ type TokenRequestSpec struct { ExpirationSeconds *int64 `json:"expirationSeconds" protobuf:"varint,4,opt,name=expirationSeconds"` // BoundObjectRef is a reference to an object that the token will be bound to. - // The token will only be valid for as long as the bound objet exists. + // The token will only be valid for as long as the bound object exists. + // NOTE: The API server's TokenReview endpoint will validate the + // BoundObjectRef, but other audiences may not. Keep ExpirationSeconds + // small if you want prompt revocation. // +optional BoundObjectRef *BoundObjectReference `json:"boundObjectRef" protobuf:"bytes,3,opt,name=boundObjectRef"` } diff --git a/vendor/k8s.io/api/authentication/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/authentication/v1/types_swagger_doc_generated.go index 6632a5dd..09f6b920 100644 --- a/vendor/k8s.io/api/authentication/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/authentication/v1/types_swagger_doc_generated.go @@ -51,7 +51,7 @@ var map_TokenRequestSpec = map[string]string{ "": "TokenRequestSpec contains client provided parameters of a token request.", "audiences": "Audiences are the intendend audiences of the token. A recipient of a token must identitfy themself with an identifier in the list of audiences of the token, and otherwise should reject the token. A token issued for multiple audiences may be used to authenticate against any of the audiences listed but implies a high degree of trust between the target audiences.", "expirationSeconds": "ExpirationSeconds is the requested duration of validity of the request. The token issuer may return a token with a different validity duration so a client needs to check the 'expiration' field in a response.", - "boundObjectRef": "BoundObjectRef is a reference to an object that the token will be bound to. The token will only be valid for as long as the bound objet exists.", + "boundObjectRef": "BoundObjectRef is a reference to an object that the token will be bound to. The token will only be valid for as long as the bound object exists. NOTE: The API server's TokenReview endpoint will validate the BoundObjectRef, but other audiences may not. Keep ExpirationSeconds small if you want prompt revocation.", } func (TokenRequestSpec) SwaggerDoc() map[string]string { @@ -79,8 +79,9 @@ func (TokenReview) SwaggerDoc() map[string]string { } var map_TokenReviewSpec = map[string]string{ - "": "TokenReviewSpec is a description of the token authentication request.", - "token": "Token is the opaque bearer token.", + "": "TokenReviewSpec is a description of the token authentication request.", + "token": "Token is the opaque bearer token.", + "audiences": "Audiences is a list of the identifiers that the resource server presented with the token identifies as. Audience-aware token authenticators will verify that the token was intended for at least one of the audiences in this list. If no audiences are provided, the audience will default to the audience of the Kubernetes apiserver.", } func (TokenReviewSpec) SwaggerDoc() map[string]string { @@ -91,6 +92,7 @@ var map_TokenReviewStatus = map[string]string{ "": "TokenReviewStatus is the result of the token authentication request.", "authenticated": "Authenticated indicates that the token was associated with a known user.", "user": "User is the UserInfo associated with the provided token.", + "audiences": "Audiences are audience identifiers chosen by the authenticator that are compatible with both the TokenReview and token. An identifier is any identifier in the intersection of the TokenReviewSpec audiences and the token's audiences. A client of the TokenReview API that sets the spec.audiences field should validate that a compatible audience identifier is returned in the status.audiences field to ensure that the TokenReview server is audience aware. If a TokenReview returns an empty status.audience field where status.authenticated is \"true\", the token is valid against the audience of the Kubernetes API server.", "error": "Error indicates that the token couldn't be checked", } diff --git a/vendor/k8s.io/api/authentication/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/authentication/v1/zz_generated.deepcopy.go index f36c253b..aca99c42 100644 --- a/vendor/k8s.io/api/authentication/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/authentication/v1/zz_generated.deepcopy.go @@ -141,7 +141,7 @@ func (in *TokenReview) DeepCopyInto(out *TokenReview) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec + in.Spec.DeepCopyInto(&out.Spec) in.Status.DeepCopyInto(&out.Status) return } @@ -167,6 +167,11 @@ func (in *TokenReview) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TokenReviewSpec) DeepCopyInto(out *TokenReviewSpec) { *out = *in + if in.Audiences != nil { + in, out := &in.Audiences, &out.Audiences + *out = make([]string, len(*in)) + copy(*out, *in) + } return } @@ -184,6 +189,11 @@ func (in *TokenReviewSpec) DeepCopy() *TokenReviewSpec { func (in *TokenReviewStatus) DeepCopyInto(out *TokenReviewStatus) { *out = *in in.User.DeepCopyInto(&out.User) + if in.Audiences != nil { + in, out := &in.Audiences, &out.Audiences + *out = make([]string, len(*in)) + copy(*out, *in) + } return } diff --git a/vendor/k8s.io/api/authentication/v1beta1/doc.go b/vendor/k8s.io/api/authentication/v1beta1/doc.go index e0de315d..185a2240 100644 --- a/vendor/k8s.io/api/authentication/v1beta1/doc.go +++ b/vendor/k8s.io/api/authentication/v1beta1/doc.go @@ -15,6 +15,8 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +groupName=authentication.k8s.io // +k8s:openapi-gen=true + package v1beta1 // import "k8s.io/api/authentication/v1beta1" diff --git a/vendor/k8s.io/api/authentication/v1beta1/generated.pb.go b/vendor/k8s.io/api/authentication/v1beta1/generated.pb.go index 8503d212..0721bda8 100644 --- a/vendor/k8s.io/api/authentication/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/authentication/v1beta1/generated.pb.go @@ -14,35 +14,24 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/authentication/v1beta1/generated.proto -// DO NOT EDIT! -/* - Package v1beta1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/authentication/v1beta1/generated.proto - - It has these top-level messages: - ExtraValue - TokenReview - TokenReviewSpec - TokenReviewStatus - UserInfo -*/ package v1beta1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + io "io" -import strings "strings" -import reflect "reflect" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" -import io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -55,25 +44,145 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *ExtraValue) Reset() { *m = ExtraValue{} } -func (*ExtraValue) ProtoMessage() {} -func (*ExtraValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *ExtraValue) Reset() { *m = ExtraValue{} } +func (*ExtraValue) ProtoMessage() {} +func (*ExtraValue) Descriptor() ([]byte, []int) { + return fileDescriptor_77c9b20d3ad27844, []int{0} +} +func (m *ExtraValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExtraValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExtraValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExtraValue.Merge(m, src) +} +func (m *ExtraValue) XXX_Size() int { + return m.Size() +} +func (m *ExtraValue) XXX_DiscardUnknown() { + xxx_messageInfo_ExtraValue.DiscardUnknown(m) +} + +var xxx_messageInfo_ExtraValue proto.InternalMessageInfo -func (m *TokenReview) Reset() { *m = TokenReview{} } -func (*TokenReview) ProtoMessage() {} -func (*TokenReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +func (m *TokenReview) Reset() { *m = TokenReview{} } +func (*TokenReview) ProtoMessage() {} +func (*TokenReview) Descriptor() ([]byte, []int) { + return fileDescriptor_77c9b20d3ad27844, []int{1} +} +func (m *TokenReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TokenReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TokenReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_TokenReview.Merge(m, src) +} +func (m *TokenReview) XXX_Size() int { + return m.Size() +} +func (m *TokenReview) XXX_DiscardUnknown() { + xxx_messageInfo_TokenReview.DiscardUnknown(m) +} + +var xxx_messageInfo_TokenReview proto.InternalMessageInfo + +func (m *TokenReviewSpec) Reset() { *m = TokenReviewSpec{} } +func (*TokenReviewSpec) ProtoMessage() {} +func (*TokenReviewSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_77c9b20d3ad27844, []int{2} +} +func (m *TokenReviewSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TokenReviewSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TokenReviewSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_TokenReviewSpec.Merge(m, src) +} +func (m *TokenReviewSpec) XXX_Size() int { + return m.Size() +} +func (m *TokenReviewSpec) XXX_DiscardUnknown() { + xxx_messageInfo_TokenReviewSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_TokenReviewSpec proto.InternalMessageInfo + +func (m *TokenReviewStatus) Reset() { *m = TokenReviewStatus{} } +func (*TokenReviewStatus) ProtoMessage() {} +func (*TokenReviewStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_77c9b20d3ad27844, []int{3} +} +func (m *TokenReviewStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TokenReviewStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TokenReviewStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_TokenReviewStatus.Merge(m, src) +} +func (m *TokenReviewStatus) XXX_Size() int { + return m.Size() +} +func (m *TokenReviewStatus) XXX_DiscardUnknown() { + xxx_messageInfo_TokenReviewStatus.DiscardUnknown(m) +} -func (m *TokenReviewSpec) Reset() { *m = TokenReviewSpec{} } -func (*TokenReviewSpec) ProtoMessage() {} -func (*TokenReviewSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +var xxx_messageInfo_TokenReviewStatus proto.InternalMessageInfo -func (m *TokenReviewStatus) Reset() { *m = TokenReviewStatus{} } -func (*TokenReviewStatus) ProtoMessage() {} -func (*TokenReviewStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +func (m *UserInfo) Reset() { *m = UserInfo{} } +func (*UserInfo) ProtoMessage() {} +func (*UserInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_77c9b20d3ad27844, []int{4} +} +func (m *UserInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UserInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *UserInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_UserInfo.Merge(m, src) +} +func (m *UserInfo) XXX_Size() int { + return m.Size() +} +func (m *UserInfo) XXX_DiscardUnknown() { + xxx_messageInfo_UserInfo.DiscardUnknown(m) +} -func (m *UserInfo) Reset() { *m = UserInfo{} } -func (*UserInfo) ProtoMessage() {} -func (*UserInfo) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +var xxx_messageInfo_UserInfo proto.InternalMessageInfo func init() { proto.RegisterType((*ExtraValue)(nil), "k8s.io.api.authentication.v1beta1.ExtraValue") @@ -81,11 +190,63 @@ func init() { proto.RegisterType((*TokenReviewSpec)(nil), "k8s.io.api.authentication.v1beta1.TokenReviewSpec") proto.RegisterType((*TokenReviewStatus)(nil), "k8s.io.api.authentication.v1beta1.TokenReviewStatus") proto.RegisterType((*UserInfo)(nil), "k8s.io.api.authentication.v1beta1.UserInfo") + proto.RegisterMapType((map[string]ExtraValue)(nil), "k8s.io.api.authentication.v1beta1.UserInfo.ExtraEntry") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/authentication/v1beta1/generated.proto", fileDescriptor_77c9b20d3ad27844) +} + +var fileDescriptor_77c9b20d3ad27844 = []byte{ + // 663 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x53, 0xcd, 0x6e, 0xd3, 0x40, + 0x10, 0xb6, 0xf3, 0x53, 0x92, 0x0d, 0x81, 0xb2, 0x12, 0x52, 0x14, 0x09, 0xa7, 0x84, 0x4b, 0xa5, + 0xd2, 0x35, 0xad, 0xaa, 0x52, 0x95, 0x53, 0x0d, 0x15, 0x2a, 0x52, 0x85, 0xb4, 0xb4, 0x1c, 0x80, + 0x03, 0x1b, 0x67, 0xea, 0x98, 0xe0, 0x1f, 0xad, 0xd7, 0x81, 0xde, 0xfa, 0x08, 0x1c, 0x39, 0x22, + 0xf1, 0x24, 0xdc, 0x7a, 0xec, 0xb1, 0x07, 0x14, 0x51, 0xf3, 0x04, 0xbc, 0x01, 0xda, 0xf5, 0xb6, + 0x4e, 0x1b, 0x41, 0xdb, 0x9b, 0xf7, 0x9b, 0xf9, 0xbe, 0x99, 0xf9, 0xc6, 0x83, 0x5e, 0x0c, 0xd7, + 0x12, 0xe2, 0x47, 0xf6, 0x30, 0xed, 0x01, 0x0f, 0x41, 0x40, 0x62, 0x8f, 0x20, 0xec, 0x47, 0xdc, + 0xd6, 0x01, 0x16, 0xfb, 0x36, 0x4b, 0xc5, 0x00, 0x42, 0xe1, 0xbb, 0x4c, 0xf8, 0x51, 0x68, 0x8f, + 0x96, 0x7a, 0x20, 0xd8, 0x92, 0xed, 0x41, 0x08, 0x9c, 0x09, 0xe8, 0x93, 0x98, 0x47, 0x22, 0xc2, + 0xf7, 0x73, 0x0a, 0x61, 0xb1, 0x4f, 0xce, 0x53, 0x88, 0xa6, 0xb4, 0x17, 0x3d, 0x5f, 0x0c, 0xd2, + 0x1e, 0x71, 0xa3, 0xc0, 0xf6, 0x22, 0x2f, 0xb2, 0x15, 0xb3, 0x97, 0xee, 0xa9, 0x97, 0x7a, 0xa8, + 0xaf, 0x5c, 0xb1, 0xbd, 0x52, 0x34, 0x11, 0x30, 0x77, 0xe0, 0x87, 0xc0, 0xf7, 0xed, 0x78, 0xe8, + 0x49, 0x20, 0xb1, 0x03, 0x10, 0xcc, 0x1e, 0x4d, 0xf5, 0xd1, 0xb6, 0xff, 0xc5, 0xe2, 0x69, 0x28, + 0xfc, 0x00, 0xa6, 0x08, 0xab, 0x97, 0x11, 0x12, 0x77, 0x00, 0x01, 0xbb, 0xc8, 0xeb, 0x3e, 0x46, + 0x68, 0xf3, 0xb3, 0xe0, 0xec, 0x35, 0xfb, 0x98, 0x02, 0xee, 0xa0, 0xaa, 0x2f, 0x20, 0x48, 0x5a, + 0xe6, 0x5c, 0x79, 0xbe, 0xee, 0xd4, 0xb3, 0x71, 0xa7, 0xba, 0x25, 0x01, 0x9a, 0xe3, 0xeb, 0xb5, + 0xaf, 0xdf, 0x3a, 0xc6, 0xc1, 0xcf, 0x39, 0xa3, 0xfb, 0xbd, 0x84, 0x1a, 0x3b, 0xd1, 0x10, 0x42, + 0x0a, 0x23, 0x1f, 0x3e, 0xe1, 0xf7, 0xa8, 0x26, 0x87, 0xe9, 0x33, 0xc1, 0x5a, 0xe6, 0x9c, 0x39, + 0xdf, 0x58, 0x7e, 0x44, 0x0a, 0x33, 0xcf, 0x7a, 0x22, 0xf1, 0xd0, 0x93, 0x40, 0x42, 0x64, 0x36, + 0x19, 0x2d, 0x91, 0x97, 0xbd, 0x0f, 0xe0, 0x8a, 0x6d, 0x10, 0xcc, 0xc1, 0x87, 0xe3, 0x8e, 0x91, + 0x8d, 0x3b, 0xa8, 0xc0, 0xe8, 0x99, 0x2a, 0xde, 0x41, 0x95, 0x24, 0x06, 0xb7, 0x55, 0x52, 0xea, + 0xcb, 0xe4, 0xd2, 0x55, 0x91, 0x89, 0xfe, 0x5e, 0xc5, 0xe0, 0x3a, 0x37, 0xb5, 0x7e, 0x45, 0xbe, + 0xa8, 0x52, 0xc3, 0xef, 0xd0, 0x4c, 0x22, 0x98, 0x48, 0x93, 0x56, 0x59, 0xe9, 0xae, 0x5c, 0x53, + 0x57, 0x71, 0x9d, 0x5b, 0x5a, 0x79, 0x26, 0x7f, 0x53, 0xad, 0xd9, 0x75, 0xd1, 0xed, 0x0b, 0x4d, + 0xe0, 0x07, 0xa8, 0x2a, 0x24, 0xa4, 0x5c, 0xaa, 0x3b, 0x4d, 0xcd, 0xac, 0xe6, 0x79, 0x79, 0x0c, + 0x2f, 0xa0, 0x3a, 0x4b, 0xfb, 0x3e, 0x84, 0x2e, 0x24, 0xad, 0x92, 0x5a, 0x46, 0x33, 0x1b, 0x77, + 0xea, 0x1b, 0xa7, 0x20, 0x2d, 0xe2, 0xdd, 0x3f, 0x26, 0xba, 0x33, 0xd5, 0x12, 0x7e, 0x82, 0x9a, + 0x13, 0xed, 0x43, 0x5f, 0xd5, 0xab, 0x39, 0x77, 0x75, 0xbd, 0xe6, 0xc6, 0x64, 0x90, 0x9e, 0xcf, + 0xc5, 0xdb, 0xa8, 0x92, 0x26, 0xc0, 0xb5, 0xd7, 0x0b, 0x57, 0xf0, 0x64, 0x37, 0x01, 0xbe, 0x15, + 0xee, 0x45, 0x85, 0xc9, 0x12, 0xa1, 0x4a, 0xe6, 0xfc, 0x38, 0x95, 0xff, 0x8f, 0x23, 0x0d, 0x02, + 0xce, 0x23, 0xae, 0x16, 0x32, 0x61, 0xd0, 0xa6, 0x04, 0x69, 0x1e, 0xeb, 0xfe, 0x28, 0xa1, 0xda, + 0x69, 0x49, 0xfc, 0x10, 0xd5, 0x64, 0x99, 0x90, 0x05, 0xa0, 0x5d, 0x9d, 0xd5, 0x24, 0x95, 0x23, + 0x71, 0x7a, 0x96, 0x81, 0xef, 0xa1, 0x72, 0xea, 0xf7, 0xd5, 0x68, 0x75, 0xa7, 0xa1, 0x13, 0xcb, + 0xbb, 0x5b, 0xcf, 0xa8, 0xc4, 0x71, 0x17, 0xcd, 0x78, 0x3c, 0x4a, 0x63, 0xf9, 0x43, 0xc8, 0x46, + 0x91, 0x5c, 0xeb, 0x73, 0x85, 0x50, 0x1d, 0xc1, 0x6f, 0x51, 0x15, 0xe4, 0xd5, 0xa8, 0x59, 0x1a, + 0xcb, 0xab, 0xd7, 0xf0, 0x87, 0xa8, 0x73, 0xdb, 0x0c, 0x05, 0xdf, 0x9f, 0x18, 0x4d, 0x62, 0x34, + 0xd7, 0x6c, 0x7b, 0xfa, 0x24, 0x55, 0x0e, 0x9e, 0x45, 0xe5, 0x21, 0xec, 0xe7, 0x63, 0x51, 0xf9, + 0x89, 0x9f, 0xa2, 0xea, 0x48, 0x5e, 0xab, 0x5e, 0xce, 0xe2, 0x15, 0x8a, 0x17, 0x27, 0x4e, 0x73, + 0xee, 0x7a, 0x69, 0xcd, 0x74, 0x16, 0x0f, 0x4f, 0x2c, 0xe3, 0xe8, 0xc4, 0x32, 0x8e, 0x4f, 0x2c, + 0xe3, 0x20, 0xb3, 0xcc, 0xc3, 0xcc, 0x32, 0x8f, 0x32, 0xcb, 0x3c, 0xce, 0x2c, 0xf3, 0x57, 0x66, + 0x99, 0x5f, 0x7e, 0x5b, 0xc6, 0x9b, 0x1b, 0x5a, 0xe4, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x66, + 0xbb, 0x89, 0x53, 0x68, 0x05, 0x00, 0x00, +} + func (m ExtraValue) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -93,32 +254,31 @@ func (m ExtraValue) Marshal() (dAtA []byte, err error) { } func (m ExtraValue) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m ExtraValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if len(m) > 0 { - for _, s := range m { + for iNdEx := len(m) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m[iNdEx]) + copy(dAtA[i:], m[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - return i, nil + return len(dAtA) - i, nil } func (m *TokenReview) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -126,41 +286,52 @@ func (m *TokenReview) Marshal() (dAtA []byte, err error) { } func (m *TokenReview) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TokenReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n2 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n3, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n3 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *TokenReviewSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -168,21 +339,36 @@ func (m *TokenReviewSpec) Marshal() (dAtA []byte, err error) { } func (m *TokenReviewSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TokenReviewSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ + if len(m.Audiences) > 0 { + for iNdEx := len(m.Audiences) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Audiences[iNdEx]) + copy(dAtA[i:], m.Audiences[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Audiences[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Token) + copy(dAtA[i:], m.Token) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Token))) - i += copy(dAtA[i:], m.Token) - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *TokenReviewStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -190,37 +376,54 @@ func (m *TokenReviewStatus) Marshal() (dAtA []byte, err error) { } func (m *TokenReviewStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TokenReviewStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ + if len(m.Audiences) > 0 { + for iNdEx := len(m.Audiences) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Audiences[iNdEx]) + copy(dAtA[i:], m.Audiences[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Audiences[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + i -= len(m.Error) + copy(dAtA[i:], m.Error) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Error))) + i-- + dAtA[i] = 0x1a + { + size, err := m.User.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i-- if m.Authenticated { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.User.Size())) - n4, err := m.User.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Error))) - i += copy(dAtA[i:], m.Error) - return i, nil + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *UserInfo) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -228,95 +431,81 @@ func (m *UserInfo) Marshal() (dAtA []byte, err error) { } func (m *UserInfo) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UserInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Username))) - i += copy(dAtA[i:], m.Username) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) - i += copy(dAtA[i:], m.UID) - if len(m.Groups) > 0 { - for _, s := range m.Groups { - dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } if len(m.Extra) > 0 { keysForExtra := make([]string, 0, len(m.Extra)) for k := range m.Extra { keysForExtra = append(keysForExtra, string(k)) } github_com_gogo_protobuf_sortkeys.Strings(keysForExtra) - for _, k := range keysForExtra { - dAtA[i] = 0x22 - i++ - v := m.Extra[string(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n5, err := (&v).MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(keysForExtra) - 1; iNdEx >= 0; iNdEx-- { + v := m.Extra[string(keysForExtra[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n5 + i-- + dAtA[i] = 0x12 + i -= len(keysForExtra[iNdEx]) + copy(dAtA[i:], keysForExtra[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForExtra[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x22 + } + } + if len(m.Groups) > 0 { + for iNdEx := len(m.Groups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Groups[iNdEx]) + copy(dAtA[i:], m.Groups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Groups[iNdEx]))) + i-- + dAtA[i] = 0x1a } } - return i, nil + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0x12 + i -= len(m.Username) + copy(dAtA[i:], m.Username) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Username))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m ExtraValue) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m) > 0 { @@ -329,6 +518,9 @@ func (m ExtraValue) Size() (n int) { } func (m *TokenReview) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -341,14 +533,26 @@ func (m *TokenReview) Size() (n int) { } func (m *TokenReviewSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Token) n += 1 + l + sovGenerated(uint64(l)) + if len(m.Audiences) > 0 { + for _, s := range m.Audiences { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } func (m *TokenReviewStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 2 @@ -356,10 +560,19 @@ func (m *TokenReviewStatus) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = len(m.Error) n += 1 + l + sovGenerated(uint64(l)) + if len(m.Audiences) > 0 { + for _, s := range m.Audiences { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } func (m *UserInfo) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Username) @@ -385,14 +598,7 @@ func (m *UserInfo) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -402,7 +608,7 @@ func (this *TokenReview) String() string { return "nil" } s := strings.Join([]string{`&TokenReview{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "TokenReviewSpec", "TokenReviewSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "TokenReviewStatus", "TokenReviewStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -415,6 +621,7 @@ func (this *TokenReviewSpec) String() string { } s := strings.Join([]string{`&TokenReviewSpec{`, `Token:` + fmt.Sprintf("%v", this.Token) + `,`, + `Audiences:` + fmt.Sprintf("%v", this.Audiences) + `,`, `}`, }, "") return s @@ -427,6 +634,7 @@ func (this *TokenReviewStatus) String() string { `Authenticated:` + fmt.Sprintf("%v", this.Authenticated) + `,`, `User:` + strings.Replace(strings.Replace(this.User.String(), "UserInfo", "UserInfo", 1), `&`, ``, 1) + `,`, `Error:` + fmt.Sprintf("%v", this.Error) + `,`, + `Audiences:` + fmt.Sprintf("%v", this.Audiences) + `,`, `}`, }, "") return s @@ -477,7 +685,7 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -505,7 +713,7 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -515,6 +723,9 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -529,6 +740,9 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -556,7 +770,7 @@ func (m *TokenReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -584,7 +798,7 @@ func (m *TokenReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -593,6 +807,9 @@ func (m *TokenReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -614,7 +831,7 @@ func (m *TokenReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -623,6 +840,9 @@ func (m *TokenReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -644,7 +864,7 @@ func (m *TokenReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -653,6 +873,9 @@ func (m *TokenReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -669,6 +892,9 @@ func (m *TokenReview) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -696,7 +922,7 @@ func (m *TokenReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -724,7 +950,7 @@ func (m *TokenReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -734,11 +960,46 @@ func (m *TokenReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } m.Token = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Audiences", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Audiences = append(m.Audiences, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -748,6 +1009,9 @@ func (m *TokenReviewSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -775,7 +1039,7 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -803,7 +1067,7 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -823,7 +1087,7 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -832,6 +1096,9 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -853,7 +1120,7 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -863,11 +1130,46 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } m.Error = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Audiences", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Audiences = append(m.Audiences, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -877,6 +1179,9 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -904,7 +1209,7 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -932,7 +1237,7 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -942,6 +1247,9 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -961,7 +1269,7 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -971,6 +1279,9 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -990,7 +1301,7 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1000,6 +1311,9 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1019,7 +1333,7 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1028,54 +1342,20 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { + if postIndex > l { return io.ErrUnexpectedEOF } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Extra == nil { m.Extra = make(map[string]ExtraValue) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + mapvalue := &ExtraValue{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1085,46 +1365,88 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &ExtraValue{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &ExtraValue{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.Extra[mapkey] = *mapvalue - } else { - var mapvalue ExtraValue - m.Extra[mapkey] = mapvalue } + m.Extra[mapkey] = *mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -1135,6 +1457,9 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1201,10 +1526,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -1233,6 +1561,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -1251,51 +1582,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/authentication/v1beta1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 635 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x53, 0xcf, 0x4f, 0xd4, 0x40, - 0x14, 0x6e, 0xf7, 0x07, 0xee, 0xce, 0x8a, 0xe2, 0x24, 0x26, 0x9b, 0x4d, 0xec, 0xae, 0xeb, 0x85, - 0x44, 0x99, 0x0a, 0x21, 0x48, 0xf0, 0x64, 0x95, 0x18, 0x4c, 0x88, 0xc9, 0x08, 0x1e, 0xd4, 0x83, - 0xb3, 0xdd, 0x47, 0xb7, 0xae, 0xed, 0x34, 0xd3, 0x69, 0x95, 0x1b, 0x7f, 0x82, 0x47, 0x8f, 0x26, - 0xfe, 0x25, 0x26, 0x1e, 0x38, 0x72, 0xe4, 0x60, 0x88, 0xd4, 0x7f, 0xc4, 0xcc, 0x74, 0x64, 0x17, - 0x88, 0x01, 0x6e, 0xf3, 0xbe, 0xf7, 0xbe, 0x6f, 0xde, 0xf7, 0x66, 0x1e, 0x7a, 0x31, 0x5e, 0x4d, - 0x49, 0xc8, 0xdd, 0x71, 0x36, 0x00, 0x11, 0x83, 0x84, 0xd4, 0xcd, 0x21, 0x1e, 0x72, 0xe1, 0x9a, - 0x04, 0x4b, 0x42, 0x97, 0x65, 0x72, 0x04, 0xb1, 0x0c, 0x7d, 0x26, 0x43, 0x1e, 0xbb, 0xf9, 0xe2, - 0x00, 0x24, 0x5b, 0x74, 0x03, 0x88, 0x41, 0x30, 0x09, 0x43, 0x92, 0x08, 0x2e, 0x39, 0xbe, 0x5b, - 0x52, 0x08, 0x4b, 0x42, 0x72, 0x9a, 0x42, 0x0c, 0xa5, 0xb3, 0x10, 0x84, 0x72, 0x94, 0x0d, 0x88, - 0xcf, 0x23, 0x37, 0xe0, 0x01, 0x77, 0x35, 0x73, 0x90, 0xed, 0xe8, 0x48, 0x07, 0xfa, 0x54, 0x2a, - 0x76, 0x96, 0x27, 0x4d, 0x44, 0xcc, 0x1f, 0x85, 0x31, 0x88, 0x5d, 0x37, 0x19, 0x07, 0x0a, 0x48, - 0xdd, 0x08, 0x24, 0x73, 0xf3, 0x73, 0x7d, 0x74, 0xdc, 0xff, 0xb1, 0x44, 0x16, 0xcb, 0x30, 0x82, - 0x73, 0x84, 0x95, 0x8b, 0x08, 0xa9, 0x3f, 0x82, 0x88, 0x9d, 0xe5, 0xf5, 0x1f, 0x21, 0xb4, 0xfe, - 0x59, 0x0a, 0xf6, 0x9a, 0x7d, 0xcc, 0x00, 0x77, 0x51, 0x3d, 0x94, 0x10, 0xa5, 0x6d, 0xbb, 0x57, - 0x9d, 0x6f, 0x7a, 0xcd, 0xe2, 0xa8, 0x5b, 0xdf, 0x50, 0x00, 0x2d, 0xf1, 0xb5, 0xc6, 0xd7, 0x6f, - 0x5d, 0x6b, 0xef, 0x57, 0xcf, 0xea, 0x7f, 0xaf, 0xa0, 0xd6, 0x16, 0x1f, 0x43, 0x4c, 0x21, 0x0f, - 0xe1, 0x13, 0x7e, 0x8f, 0x1a, 0xca, 0xcc, 0x90, 0x49, 0xd6, 0xb6, 0x7b, 0xf6, 0x7c, 0x6b, 0xe9, - 0x21, 0x99, 0x0c, 0xf3, 0xa4, 0x27, 0x92, 0x8c, 0x03, 0x05, 0xa4, 0x44, 0x55, 0x93, 0x7c, 0x91, - 0xbc, 0x1c, 0x7c, 0x00, 0x5f, 0x6e, 0x82, 0x64, 0x1e, 0xde, 0x3f, 0xea, 0x5a, 0xc5, 0x51, 0x17, - 0x4d, 0x30, 0x7a, 0xa2, 0x8a, 0xb7, 0x50, 0x2d, 0x4d, 0xc0, 0x6f, 0x57, 0xb4, 0xfa, 0x12, 0xb9, - 0xf0, 0xa9, 0xc8, 0x54, 0x7f, 0xaf, 0x12, 0xf0, 0xbd, 0xeb, 0x46, 0xbf, 0xa6, 0x22, 0xaa, 0xd5, - 0xf0, 0x3b, 0x34, 0x93, 0x4a, 0x26, 0xb3, 0xb4, 0x5d, 0xd5, 0xba, 0xcb, 0x57, 0xd4, 0xd5, 0x5c, - 0xef, 0x86, 0x51, 0x9e, 0x29, 0x63, 0x6a, 0x34, 0xfb, 0x2b, 0xe8, 0xe6, 0x99, 0x26, 0xf0, 0x3d, - 0x54, 0x97, 0x0a, 0xd2, 0x53, 0x6a, 0x7a, 0xb3, 0x86, 0x59, 0x2f, 0xeb, 0xca, 0x5c, 0xff, 0xa7, - 0x8d, 0x6e, 0x9d, 0xbb, 0x05, 0x3f, 0x46, 0xb3, 0x53, 0x1d, 0xc1, 0x50, 0x4b, 0x34, 0xbc, 0xdb, - 0x46, 0x62, 0xf6, 0xc9, 0x74, 0x92, 0x9e, 0xae, 0xc5, 0x9b, 0xa8, 0x96, 0xa5, 0x20, 0xcc, 0xf8, - 0xee, 0x5f, 0xc2, 0xe6, 0x76, 0x0a, 0x62, 0x23, 0xde, 0xe1, 0x93, 0xb9, 0x29, 0x84, 0x6a, 0x19, - 0x65, 0x03, 0x84, 0xe0, 0x42, 0x8f, 0x6d, 0xca, 0xc6, 0xba, 0x02, 0x69, 0x99, 0xeb, 0xff, 0xa8, - 0xa0, 0xc6, 0x3f, 0x15, 0xfc, 0x00, 0x35, 0x14, 0x33, 0x66, 0x11, 0x18, 0xef, 0x73, 0x86, 0xa4, - 0x6b, 0x14, 0x4e, 0x4f, 0x2a, 0xf0, 0x1d, 0x54, 0xcd, 0xc2, 0xa1, 0xee, 0xb6, 0xe9, 0xb5, 0x4c, - 0x61, 0x75, 0x7b, 0xe3, 0x19, 0x55, 0x38, 0xee, 0xa3, 0x99, 0x40, 0xf0, 0x2c, 0x51, 0xcf, 0xa6, - 0xbe, 0x2a, 0x52, 0xc3, 0x7f, 0xae, 0x11, 0x6a, 0x32, 0xf8, 0x2d, 0xaa, 0x83, 0xfa, 0xdb, 0xed, - 0x5a, 0xaf, 0x3a, 0xdf, 0x5a, 0x5a, 0xb9, 0x82, 0x65, 0xa2, 0x97, 0x62, 0x3d, 0x96, 0x62, 0x77, - 0xca, 0x9a, 0xc2, 0x68, 0xa9, 0xd9, 0x09, 0xcc, 0xe2, 0xe8, 0x1a, 0x3c, 0x87, 0xaa, 0x63, 0xd8, - 0x2d, 0x6d, 0x51, 0x75, 0xc4, 0x4f, 0x51, 0x3d, 0x57, 0x3b, 0x65, 0xe6, 0xbd, 0x70, 0x89, 0xcb, - 0x27, 0x8b, 0x48, 0x4b, 0xee, 0x5a, 0x65, 0xd5, 0xf6, 0x16, 0xf6, 0x8f, 0x1d, 0xeb, 0xe0, 0xd8, - 0xb1, 0x0e, 0x8f, 0x1d, 0x6b, 0xaf, 0x70, 0xec, 0xfd, 0xc2, 0xb1, 0x0f, 0x0a, 0xc7, 0x3e, 0x2c, - 0x1c, 0xfb, 0x77, 0xe1, 0xd8, 0x5f, 0xfe, 0x38, 0xd6, 0x9b, 0x6b, 0x46, 0xe4, 0x6f, 0x00, 0x00, - 0x00, 0xff, 0xff, 0x39, 0x00, 0xe7, 0xfa, 0x0e, 0x05, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/authentication/v1beta1/generated.proto b/vendor/k8s.io/api/authentication/v1beta1/generated.proto index a057bc59..caf2a6a5 100644 --- a/vendor/k8s.io/api/authentication/v1beta1/generated.proto +++ b/vendor/k8s.io/api/authentication/v1beta1/generated.proto @@ -57,6 +57,14 @@ message TokenReviewSpec { // Token is the opaque bearer token. // +optional optional string token = 1; + + // Audiences is a list of the identifiers that the resource server presented + // with the token identifies as. Audience-aware token authenticators will + // verify that the token was intended for at least one of the audiences in + // this list. If no audiences are provided, the audience will default to the + // audience of the Kubernetes apiserver. + // +optional + repeated string audiences = 2; } // TokenReviewStatus is the result of the token authentication request. @@ -69,6 +77,18 @@ message TokenReviewStatus { // +optional optional UserInfo user = 2; + // Audiences are audience identifiers chosen by the authenticator that are + // compatible with both the TokenReview and token. An identifier is any + // identifier in the intersection of the TokenReviewSpec audiences and the + // token's audiences. A client of the TokenReview API that sets the + // spec.audiences field should validate that a compatible audience identifier + // is returned in the status.audiences field to ensure that the TokenReview + // server is audience aware. If a TokenReview returns an empty + // status.audience field where status.authenticated is "true", the token is + // valid against the audience of the Kubernetes API server. + // +optional + repeated string audiences = 4; + // Error indicates that the token couldn't be checked // +optional optional string error = 3; diff --git a/vendor/k8s.io/api/authentication/v1beta1/types.go b/vendor/k8s.io/api/authentication/v1beta1/types.go index a90949dc..0b6cba82 100644 --- a/vendor/k8s.io/api/authentication/v1beta1/types.go +++ b/vendor/k8s.io/api/authentication/v1beta1/types.go @@ -48,6 +48,13 @@ type TokenReviewSpec struct { // Token is the opaque bearer token. // +optional Token string `json:"token,omitempty" protobuf:"bytes,1,opt,name=token"` + // Audiences is a list of the identifiers that the resource server presented + // with the token identifies as. Audience-aware token authenticators will + // verify that the token was intended for at least one of the audiences in + // this list. If no audiences are provided, the audience will default to the + // audience of the Kubernetes apiserver. + // +optional + Audiences []string `json:"audiences,omitempty" protobuf:"bytes,2,rep,name=audiences"` } // TokenReviewStatus is the result of the token authentication request. @@ -58,6 +65,17 @@ type TokenReviewStatus struct { // User is the UserInfo associated with the provided token. // +optional User UserInfo `json:"user,omitempty" protobuf:"bytes,2,opt,name=user"` + // Audiences are audience identifiers chosen by the authenticator that are + // compatible with both the TokenReview and token. An identifier is any + // identifier in the intersection of the TokenReviewSpec audiences and the + // token's audiences. A client of the TokenReview API that sets the + // spec.audiences field should validate that a compatible audience identifier + // is returned in the status.audiences field to ensure that the TokenReview + // server is audience aware. If a TokenReview returns an empty + // status.audience field where status.authenticated is "true", the token is + // valid against the audience of the Kubernetes API server. + // +optional + Audiences []string `json:"audiences,omitempty" protobuf:"bytes,4,rep,name=audiences"` // Error indicates that the token couldn't be checked // +optional Error string `json:"error,omitempty" protobuf:"bytes,3,opt,name=error"` diff --git a/vendor/k8s.io/api/authentication/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/authentication/v1beta1/types_swagger_doc_generated.go index 968999d1..8c9acfb5 100644 --- a/vendor/k8s.io/api/authentication/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/authentication/v1beta1/types_swagger_doc_generated.go @@ -38,8 +38,9 @@ func (TokenReview) SwaggerDoc() map[string]string { } var map_TokenReviewSpec = map[string]string{ - "": "TokenReviewSpec is a description of the token authentication request.", - "token": "Token is the opaque bearer token.", + "": "TokenReviewSpec is a description of the token authentication request.", + "token": "Token is the opaque bearer token.", + "audiences": "Audiences is a list of the identifiers that the resource server presented with the token identifies as. Audience-aware token authenticators will verify that the token was intended for at least one of the audiences in this list. If no audiences are provided, the audience will default to the audience of the Kubernetes apiserver.", } func (TokenReviewSpec) SwaggerDoc() map[string]string { @@ -50,6 +51,7 @@ var map_TokenReviewStatus = map[string]string{ "": "TokenReviewStatus is the result of the token authentication request.", "authenticated": "Authenticated indicates that the token was associated with a known user.", "user": "User is the UserInfo associated with the provided token.", + "audiences": "Audiences are audience identifiers chosen by the authenticator that are compatible with both the TokenReview and token. An identifier is any identifier in the intersection of the TokenReviewSpec audiences and the token's audiences. A client of the TokenReview API that sets the spec.audiences field should validate that a compatible audience identifier is returned in the status.audiences field to ensure that the TokenReview server is audience aware. If a TokenReview returns an empty status.audience field where status.authenticated is \"true\", the token is valid against the audience of the Kubernetes API server.", "error": "Error indicates that the token couldn't be checked", } diff --git a/vendor/k8s.io/api/authentication/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/authentication/v1beta1/zz_generated.deepcopy.go index 3a5f6d5a..a5d82a81 100644 --- a/vendor/k8s.io/api/authentication/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/authentication/v1beta1/zz_generated.deepcopy.go @@ -49,7 +49,7 @@ func (in *TokenReview) DeepCopyInto(out *TokenReview) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec + in.Spec.DeepCopyInto(&out.Spec) in.Status.DeepCopyInto(&out.Status) return } @@ -75,6 +75,11 @@ func (in *TokenReview) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TokenReviewSpec) DeepCopyInto(out *TokenReviewSpec) { *out = *in + if in.Audiences != nil { + in, out := &in.Audiences, &out.Audiences + *out = make([]string, len(*in)) + copy(*out, *in) + } return } @@ -92,6 +97,11 @@ func (in *TokenReviewSpec) DeepCopy() *TokenReviewSpec { func (in *TokenReviewStatus) DeepCopyInto(out *TokenReviewStatus) { *out = *in in.User.DeepCopyInto(&out.User) + if in.Audiences != nil { + in, out := &in.Audiences, &out.Audiences + *out = make([]string, len(*in)) + copy(*out, *in) + } return } diff --git a/vendor/k8s.io/api/authorization/v1/doc.go b/vendor/k8s.io/api/authorization/v1/doc.go index c06b798d..cf100e6b 100644 --- a/vendor/k8s.io/api/authorization/v1/doc.go +++ b/vendor/k8s.io/api/authorization/v1/doc.go @@ -15,7 +15,9 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +k8s:openapi-gen=true // +groupName=authorization.k8s.io + package v1 // import "k8s.io/api/authorization/v1" diff --git a/vendor/k8s.io/api/authorization/v1/generated.pb.go b/vendor/k8s.io/api/authorization/v1/generated.pb.go index e9145af0..0dc01bc9 100644 --- a/vendor/k8s.io/api/authorization/v1/generated.pb.go +++ b/vendor/k8s.io/api/authorization/v1/generated.pb.go @@ -14,44 +14,24 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/authorization/v1/generated.proto -// DO NOT EDIT! -/* - Package v1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/authorization/v1/generated.proto - - It has these top-level messages: - ExtraValue - LocalSubjectAccessReview - NonResourceAttributes - NonResourceRule - ResourceAttributes - ResourceRule - SelfSubjectAccessReview - SelfSubjectAccessReviewSpec - SelfSubjectRulesReview - SelfSubjectRulesReviewSpec - SubjectAccessReview - SubjectAccessReviewSpec - SubjectAccessReviewStatus - SubjectRulesReviewStatus -*/ package v1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + io "io" -import strings "strings" -import reflect "reflect" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" -import io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -64,73 +44,397 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *ExtraValue) Reset() { *m = ExtraValue{} } -func (*ExtraValue) ProtoMessage() {} -func (*ExtraValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *ExtraValue) Reset() { *m = ExtraValue{} } +func (*ExtraValue) ProtoMessage() {} +func (*ExtraValue) Descriptor() ([]byte, []int) { + return fileDescriptor_e50da13573e369bd, []int{0} +} +func (m *ExtraValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExtraValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExtraValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExtraValue.Merge(m, src) +} +func (m *ExtraValue) XXX_Size() int { + return m.Size() +} +func (m *ExtraValue) XXX_DiscardUnknown() { + xxx_messageInfo_ExtraValue.DiscardUnknown(m) +} + +var xxx_messageInfo_ExtraValue proto.InternalMessageInfo func (m *LocalSubjectAccessReview) Reset() { *m = LocalSubjectAccessReview{} } func (*LocalSubjectAccessReview) ProtoMessage() {} func (*LocalSubjectAccessReview) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{1} + return fileDescriptor_e50da13573e369bd, []int{1} +} +func (m *LocalSubjectAccessReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LocalSubjectAccessReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LocalSubjectAccessReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_LocalSubjectAccessReview.Merge(m, src) +} +func (m *LocalSubjectAccessReview) XXX_Size() int { + return m.Size() +} +func (m *LocalSubjectAccessReview) XXX_DiscardUnknown() { + xxx_messageInfo_LocalSubjectAccessReview.DiscardUnknown(m) +} + +var xxx_messageInfo_LocalSubjectAccessReview proto.InternalMessageInfo + +func (m *NonResourceAttributes) Reset() { *m = NonResourceAttributes{} } +func (*NonResourceAttributes) ProtoMessage() {} +func (*NonResourceAttributes) Descriptor() ([]byte, []int) { + return fileDescriptor_e50da13573e369bd, []int{2} +} +func (m *NonResourceAttributes) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NonResourceAttributes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NonResourceAttributes) XXX_Merge(src proto.Message) { + xxx_messageInfo_NonResourceAttributes.Merge(m, src) +} +func (m *NonResourceAttributes) XXX_Size() int { + return m.Size() +} +func (m *NonResourceAttributes) XXX_DiscardUnknown() { + xxx_messageInfo_NonResourceAttributes.DiscardUnknown(m) +} + +var xxx_messageInfo_NonResourceAttributes proto.InternalMessageInfo + +func (m *NonResourceRule) Reset() { *m = NonResourceRule{} } +func (*NonResourceRule) ProtoMessage() {} +func (*NonResourceRule) Descriptor() ([]byte, []int) { + return fileDescriptor_e50da13573e369bd, []int{3} +} +func (m *NonResourceRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NonResourceRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NonResourceRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_NonResourceRule.Merge(m, src) +} +func (m *NonResourceRule) XXX_Size() int { + return m.Size() +} +func (m *NonResourceRule) XXX_DiscardUnknown() { + xxx_messageInfo_NonResourceRule.DiscardUnknown(m) +} + +var xxx_messageInfo_NonResourceRule proto.InternalMessageInfo + +func (m *ResourceAttributes) Reset() { *m = ResourceAttributes{} } +func (*ResourceAttributes) ProtoMessage() {} +func (*ResourceAttributes) Descriptor() ([]byte, []int) { + return fileDescriptor_e50da13573e369bd, []int{4} +} +func (m *ResourceAttributes) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceAttributes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceAttributes) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceAttributes.Merge(m, src) +} +func (m *ResourceAttributes) XXX_Size() int { + return m.Size() +} +func (m *ResourceAttributes) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceAttributes.DiscardUnknown(m) } -func (m *NonResourceAttributes) Reset() { *m = NonResourceAttributes{} } -func (*NonResourceAttributes) ProtoMessage() {} -func (*NonResourceAttributes) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +var xxx_messageInfo_ResourceAttributes proto.InternalMessageInfo -func (m *NonResourceRule) Reset() { *m = NonResourceRule{} } -func (*NonResourceRule) ProtoMessage() {} -func (*NonResourceRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +func (m *ResourceRule) Reset() { *m = ResourceRule{} } +func (*ResourceRule) ProtoMessage() {} +func (*ResourceRule) Descriptor() ([]byte, []int) { + return fileDescriptor_e50da13573e369bd, []int{5} +} +func (m *ResourceRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceRule.Merge(m, src) +} +func (m *ResourceRule) XXX_Size() int { + return m.Size() +} +func (m *ResourceRule) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceRule.DiscardUnknown(m) +} -func (m *ResourceAttributes) Reset() { *m = ResourceAttributes{} } -func (*ResourceAttributes) ProtoMessage() {} -func (*ResourceAttributes) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +var xxx_messageInfo_ResourceRule proto.InternalMessageInfo -func (m *ResourceRule) Reset() { *m = ResourceRule{} } -func (*ResourceRule) ProtoMessage() {} -func (*ResourceRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +func (m *SelfSubjectAccessReview) Reset() { *m = SelfSubjectAccessReview{} } +func (*SelfSubjectAccessReview) ProtoMessage() {} +func (*SelfSubjectAccessReview) Descriptor() ([]byte, []int) { + return fileDescriptor_e50da13573e369bd, []int{6} +} +func (m *SelfSubjectAccessReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SelfSubjectAccessReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SelfSubjectAccessReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_SelfSubjectAccessReview.Merge(m, src) +} +func (m *SelfSubjectAccessReview) XXX_Size() int { + return m.Size() +} +func (m *SelfSubjectAccessReview) XXX_DiscardUnknown() { + xxx_messageInfo_SelfSubjectAccessReview.DiscardUnknown(m) +} -func (m *SelfSubjectAccessReview) Reset() { *m = SelfSubjectAccessReview{} } -func (*SelfSubjectAccessReview) ProtoMessage() {} -func (*SelfSubjectAccessReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +var xxx_messageInfo_SelfSubjectAccessReview proto.InternalMessageInfo func (m *SelfSubjectAccessReviewSpec) Reset() { *m = SelfSubjectAccessReviewSpec{} } func (*SelfSubjectAccessReviewSpec) ProtoMessage() {} func (*SelfSubjectAccessReviewSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{7} + return fileDescriptor_e50da13573e369bd, []int{7} +} +func (m *SelfSubjectAccessReviewSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SelfSubjectAccessReviewSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SelfSubjectAccessReviewSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_SelfSubjectAccessReviewSpec.Merge(m, src) +} +func (m *SelfSubjectAccessReviewSpec) XXX_Size() int { + return m.Size() +} +func (m *SelfSubjectAccessReviewSpec) XXX_DiscardUnknown() { + xxx_messageInfo_SelfSubjectAccessReviewSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_SelfSubjectAccessReviewSpec proto.InternalMessageInfo + +func (m *SelfSubjectRulesReview) Reset() { *m = SelfSubjectRulesReview{} } +func (*SelfSubjectRulesReview) ProtoMessage() {} +func (*SelfSubjectRulesReview) Descriptor() ([]byte, []int) { + return fileDescriptor_e50da13573e369bd, []int{8} +} +func (m *SelfSubjectRulesReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SelfSubjectRulesReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SelfSubjectRulesReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_SelfSubjectRulesReview.Merge(m, src) +} +func (m *SelfSubjectRulesReview) XXX_Size() int { + return m.Size() +} +func (m *SelfSubjectRulesReview) XXX_DiscardUnknown() { + xxx_messageInfo_SelfSubjectRulesReview.DiscardUnknown(m) } -func (m *SelfSubjectRulesReview) Reset() { *m = SelfSubjectRulesReview{} } -func (*SelfSubjectRulesReview) ProtoMessage() {} -func (*SelfSubjectRulesReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +var xxx_messageInfo_SelfSubjectRulesReview proto.InternalMessageInfo func (m *SelfSubjectRulesReviewSpec) Reset() { *m = SelfSubjectRulesReviewSpec{} } func (*SelfSubjectRulesReviewSpec) ProtoMessage() {} func (*SelfSubjectRulesReviewSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{9} + return fileDescriptor_e50da13573e369bd, []int{9} +} +func (m *SelfSubjectRulesReviewSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SelfSubjectRulesReviewSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SelfSubjectRulesReviewSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_SelfSubjectRulesReviewSpec.Merge(m, src) +} +func (m *SelfSubjectRulesReviewSpec) XXX_Size() int { + return m.Size() +} +func (m *SelfSubjectRulesReviewSpec) XXX_DiscardUnknown() { + xxx_messageInfo_SelfSubjectRulesReviewSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_SelfSubjectRulesReviewSpec proto.InternalMessageInfo + +func (m *SubjectAccessReview) Reset() { *m = SubjectAccessReview{} } +func (*SubjectAccessReview) ProtoMessage() {} +func (*SubjectAccessReview) Descriptor() ([]byte, []int) { + return fileDescriptor_e50da13573e369bd, []int{10} +} +func (m *SubjectAccessReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SubjectAccessReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SubjectAccessReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_SubjectAccessReview.Merge(m, src) +} +func (m *SubjectAccessReview) XXX_Size() int { + return m.Size() +} +func (m *SubjectAccessReview) XXX_DiscardUnknown() { + xxx_messageInfo_SubjectAccessReview.DiscardUnknown(m) } -func (m *SubjectAccessReview) Reset() { *m = SubjectAccessReview{} } -func (*SubjectAccessReview) ProtoMessage() {} -func (*SubjectAccessReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } +var xxx_messageInfo_SubjectAccessReview proto.InternalMessageInfo func (m *SubjectAccessReviewSpec) Reset() { *m = SubjectAccessReviewSpec{} } func (*SubjectAccessReviewSpec) ProtoMessage() {} func (*SubjectAccessReviewSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{11} + return fileDescriptor_e50da13573e369bd, []int{11} +} +func (m *SubjectAccessReviewSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SubjectAccessReviewSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SubjectAccessReviewSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_SubjectAccessReviewSpec.Merge(m, src) +} +func (m *SubjectAccessReviewSpec) XXX_Size() int { + return m.Size() } +func (m *SubjectAccessReviewSpec) XXX_DiscardUnknown() { + xxx_messageInfo_SubjectAccessReviewSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_SubjectAccessReviewSpec proto.InternalMessageInfo func (m *SubjectAccessReviewStatus) Reset() { *m = SubjectAccessReviewStatus{} } func (*SubjectAccessReviewStatus) ProtoMessage() {} func (*SubjectAccessReviewStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{12} + return fileDescriptor_e50da13573e369bd, []int{12} +} +func (m *SubjectAccessReviewStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SubjectAccessReviewStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SubjectAccessReviewStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_SubjectAccessReviewStatus.Merge(m, src) +} +func (m *SubjectAccessReviewStatus) XXX_Size() int { + return m.Size() +} +func (m *SubjectAccessReviewStatus) XXX_DiscardUnknown() { + xxx_messageInfo_SubjectAccessReviewStatus.DiscardUnknown(m) } +var xxx_messageInfo_SubjectAccessReviewStatus proto.InternalMessageInfo + func (m *SubjectRulesReviewStatus) Reset() { *m = SubjectRulesReviewStatus{} } func (*SubjectRulesReviewStatus) ProtoMessage() {} func (*SubjectRulesReviewStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{13} + return fileDescriptor_e50da13573e369bd, []int{13} +} +func (m *SubjectRulesReviewStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SubjectRulesReviewStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SubjectRulesReviewStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_SubjectRulesReviewStatus.Merge(m, src) } +func (m *SubjectRulesReviewStatus) XXX_Size() int { + return m.Size() +} +func (m *SubjectRulesReviewStatus) XXX_DiscardUnknown() { + xxx_messageInfo_SubjectRulesReviewStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_SubjectRulesReviewStatus proto.InternalMessageInfo func init() { proto.RegisterType((*ExtraValue)(nil), "k8s.io.api.authorization.v1.ExtraValue") @@ -145,13 +449,95 @@ func init() { proto.RegisterType((*SelfSubjectRulesReviewSpec)(nil), "k8s.io.api.authorization.v1.SelfSubjectRulesReviewSpec") proto.RegisterType((*SubjectAccessReview)(nil), "k8s.io.api.authorization.v1.SubjectAccessReview") proto.RegisterType((*SubjectAccessReviewSpec)(nil), "k8s.io.api.authorization.v1.SubjectAccessReviewSpec") + proto.RegisterMapType((map[string]ExtraValue)(nil), "k8s.io.api.authorization.v1.SubjectAccessReviewSpec.ExtraEntry") proto.RegisterType((*SubjectAccessReviewStatus)(nil), "k8s.io.api.authorization.v1.SubjectAccessReviewStatus") proto.RegisterType((*SubjectRulesReviewStatus)(nil), "k8s.io.api.authorization.v1.SubjectRulesReviewStatus") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/authorization/v1/generated.proto", fileDescriptor_e50da13573e369bd) +} + +var fileDescriptor_e50da13573e369bd = []byte{ + // 1140 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x56, 0xcf, 0x6f, 0x1b, 0xc5, + 0x17, 0xf7, 0xae, 0xed, 0xc4, 0x1e, 0x37, 0xdf, 0xa4, 0x13, 0xa5, 0xd9, 0xa6, 0xfa, 0xda, 0xd1, + 0x22, 0x41, 0x2a, 0xca, 0x2e, 0xb1, 0xda, 0x26, 0xaa, 0x54, 0xa1, 0x58, 0x89, 0x50, 0xa4, 0xb6, + 0x54, 0x13, 0x25, 0x12, 0x45, 0x20, 0xc6, 0xeb, 0x89, 0xbd, 0xc4, 0xde, 0x5d, 0x66, 0x66, 0x1d, + 0xc2, 0xa9, 0x12, 0xff, 0x00, 0x47, 0x0e, 0x1c, 0xf8, 0x0f, 0xb8, 0x20, 0x71, 0xe3, 0xc0, 0x01, + 0xe5, 0xd8, 0x63, 0x91, 0x90, 0x45, 0x96, 0x33, 0xff, 0x03, 0x9a, 0xd9, 0xb1, 0x77, 0x9d, 0xac, + 0xdd, 0x84, 0x03, 0xbd, 0xf4, 0xb6, 0xfb, 0x3e, 0x9f, 0xf7, 0xe6, 0xcd, 0xfb, 0x35, 0x0f, 0x6c, + 0x1f, 0x6d, 0x32, 0xcb, 0xf5, 0xed, 0xa3, 0xb0, 0x49, 0xa8, 0x47, 0x38, 0x61, 0x76, 0x9f, 0x78, + 0x2d, 0x9f, 0xda, 0x0a, 0xc0, 0x81, 0x6b, 0xe3, 0x90, 0x77, 0x7c, 0xea, 0x7e, 0x8d, 0xb9, 0xeb, + 0x7b, 0x76, 0x7f, 0xdd, 0x6e, 0x13, 0x8f, 0x50, 0xcc, 0x49, 0xcb, 0x0a, 0xa8, 0xcf, 0x7d, 0x78, + 0x2b, 0x26, 0x5b, 0x38, 0x70, 0xad, 0x31, 0xb2, 0xd5, 0x5f, 0x5f, 0x79, 0xaf, 0xed, 0xf2, 0x4e, + 0xd8, 0xb4, 0x1c, 0xbf, 0x67, 0xb7, 0xfd, 0xb6, 0x6f, 0x4b, 0x9d, 0x66, 0x78, 0x28, 0xff, 0xe4, + 0x8f, 0xfc, 0x8a, 0x6d, 0xad, 0xdc, 0x4d, 0x0e, 0xee, 0x61, 0xa7, 0xe3, 0x7a, 0x84, 0x9e, 0xd8, + 0xc1, 0x51, 0x5b, 0x08, 0x98, 0xdd, 0x23, 0x1c, 0x67, 0x78, 0xb0, 0x62, 0x4f, 0xd2, 0xa2, 0xa1, + 0xc7, 0xdd, 0x1e, 0xb9, 0xa0, 0x70, 0xff, 0x55, 0x0a, 0xcc, 0xe9, 0x90, 0x1e, 0x3e, 0xaf, 0x67, + 0x6e, 0x00, 0xb0, 0xf3, 0x15, 0xa7, 0xf8, 0x00, 0x77, 0x43, 0x02, 0x6b, 0xa0, 0xe8, 0x72, 0xd2, + 0x63, 0x86, 0xb6, 0x9a, 0x5f, 0x2b, 0x37, 0xca, 0xd1, 0xa0, 0x56, 0xdc, 0x15, 0x02, 0x14, 0xcb, + 0x1f, 0x94, 0xbe, 0xfb, 0xa1, 0x96, 0x7b, 0xfe, 0xc7, 0x6a, 0xce, 0xfc, 0x49, 0x07, 0xc6, 0x23, + 0xdf, 0xc1, 0xdd, 0xbd, 0xb0, 0xf9, 0x05, 0x71, 0xf8, 0x96, 0xe3, 0x10, 0xc6, 0x10, 0xe9, 0xbb, + 0xe4, 0x18, 0x7e, 0x0e, 0x4a, 0xe2, 0x66, 0x2d, 0xcc, 0xb1, 0xa1, 0xad, 0x6a, 0x6b, 0x95, 0xfa, + 0xfb, 0x56, 0x12, 0xd3, 0x91, 0x83, 0x56, 0x70, 0xd4, 0x16, 0x02, 0x66, 0x09, 0xb6, 0xd5, 0x5f, + 0xb7, 0x3e, 0x92, 0xb6, 0x1e, 0x13, 0x8e, 0x1b, 0xf0, 0x74, 0x50, 0xcb, 0x45, 0x83, 0x1a, 0x48, + 0x64, 0x68, 0x64, 0x15, 0x1e, 0x80, 0x02, 0x0b, 0x88, 0x63, 0xe8, 0xd2, 0xfa, 0x5d, 0x6b, 0x4a, + 0xc6, 0xac, 0x0c, 0x0f, 0xf7, 0x02, 0xe2, 0x34, 0xae, 0xa9, 0x13, 0x0a, 0xe2, 0x0f, 0x49, 0x7b, + 0xf0, 0x33, 0x30, 0xc3, 0x38, 0xe6, 0x21, 0x33, 0xf2, 0xd2, 0xf2, 0xfd, 0x2b, 0x5b, 0x96, 0xda, + 0x8d, 0xff, 0x29, 0xdb, 0x33, 0xf1, 0x3f, 0x52, 0x56, 0xcd, 0x4f, 0xc0, 0xd2, 0x13, 0xdf, 0x43, + 0x84, 0xf9, 0x21, 0x75, 0xc8, 0x16, 0xe7, 0xd4, 0x6d, 0x86, 0x9c, 0x30, 0xb8, 0x0a, 0x0a, 0x01, + 0xe6, 0x1d, 0x19, 0xae, 0x72, 0xe2, 0xda, 0x53, 0xcc, 0x3b, 0x48, 0x22, 0x82, 0xd1, 0x27, 0xb4, + 0x29, 0xaf, 0x9c, 0x62, 0x1c, 0x10, 0xda, 0x44, 0x12, 0x31, 0xbf, 0x04, 0xf3, 0x29, 0xe3, 0x28, + 0xec, 0xca, 0x8c, 0x0a, 0x68, 0x2c, 0xa3, 0x42, 0x83, 0xa1, 0x58, 0x0e, 0x1f, 0x82, 0x79, 0x2f, + 0xd1, 0xd9, 0x47, 0x8f, 0x98, 0xa1, 0x4b, 0xea, 0x62, 0x34, 0xa8, 0xa5, 0xcd, 0x09, 0x08, 0x9d, + 0xe7, 0x9a, 0xbf, 0xe8, 0x00, 0x66, 0xdc, 0xc6, 0x06, 0x65, 0x0f, 0xf7, 0x08, 0x0b, 0xb0, 0x43, + 0xd4, 0x95, 0xae, 0x2b, 0x87, 0xcb, 0x4f, 0x86, 0x00, 0x4a, 0x38, 0xaf, 0xbe, 0x1c, 0x7c, 0x0b, + 0x14, 0xdb, 0xd4, 0x0f, 0x03, 0x99, 0x98, 0x72, 0x63, 0x4e, 0x51, 0x8a, 0x1f, 0x0a, 0x21, 0x8a, + 0x31, 0x78, 0x1b, 0xcc, 0xf6, 0x09, 0x65, 0xae, 0xef, 0x19, 0x05, 0x49, 0x9b, 0x57, 0xb4, 0xd9, + 0x83, 0x58, 0x8c, 0x86, 0x38, 0xbc, 0x03, 0x4a, 0x54, 0x39, 0x6e, 0x14, 0x25, 0x77, 0x41, 0x71, + 0x4b, 0xa3, 0x08, 0x8e, 0x18, 0xf0, 0x1e, 0xa8, 0xb0, 0xb0, 0x39, 0x52, 0x98, 0x91, 0x0a, 0x8b, + 0x4a, 0xa1, 0xb2, 0x97, 0x40, 0x28, 0xcd, 0x13, 0xd7, 0x12, 0x77, 0x34, 0x66, 0xc7, 0xaf, 0x25, + 0x42, 0x80, 0x24, 0x62, 0xfe, 0xaa, 0x81, 0x6b, 0x57, 0xcb, 0xd8, 0xbb, 0xa0, 0x8c, 0x03, 0x57, + 0x5e, 0x7b, 0x98, 0xab, 0x39, 0x11, 0xd7, 0xad, 0xa7, 0xbb, 0xb1, 0x10, 0x25, 0xb8, 0x20, 0x0f, + 0x9d, 0x11, 0x25, 0x3d, 0x22, 0x0f, 0x8f, 0x64, 0x28, 0xc1, 0xe1, 0x06, 0x98, 0x1b, 0xfe, 0xc8, + 0x24, 0x19, 0x05, 0xa9, 0x70, 0x3d, 0x1a, 0xd4, 0xe6, 0x50, 0x1a, 0x40, 0xe3, 0x3c, 0xf3, 0x67, + 0x1d, 0x2c, 0xef, 0x91, 0xee, 0xe1, 0xeb, 0x99, 0x05, 0xcf, 0xc6, 0x66, 0xc1, 0xe6, 0xf4, 0x8e, + 0xcd, 0xf6, 0xf2, 0xb5, 0xcd, 0x83, 0xef, 0x75, 0x70, 0x6b, 0x8a, 0x4f, 0xf0, 0x18, 0x40, 0x7a, + 0xa1, 0xbd, 0x54, 0x1c, 0xed, 0xa9, 0xbe, 0x5c, 0xec, 0xca, 0xc6, 0x8d, 0x68, 0x50, 0xcb, 0xe8, + 0x56, 0x94, 0x71, 0x04, 0xfc, 0x46, 0x03, 0x4b, 0x5e, 0xd6, 0xa4, 0x52, 0x61, 0xae, 0x4f, 0x3d, + 0x3c, 0x73, 0xc6, 0x35, 0x6e, 0x46, 0x83, 0x5a, 0xf6, 0xf8, 0x43, 0xd9, 0x67, 0x89, 0x57, 0xe6, + 0x46, 0x2a, 0x3c, 0xa2, 0x41, 0xfe, 0xbb, 0xba, 0xfa, 0x78, 0xac, 0xae, 0x36, 0x2e, 0x5b, 0x57, + 0x29, 0x27, 0x27, 0x96, 0xd5, 0xa7, 0xe7, 0xca, 0xea, 0xde, 0x65, 0xca, 0x2a, 0x6d, 0x78, 0x7a, + 0x55, 0x3d, 0x06, 0x2b, 0x93, 0x1d, 0xba, 0xf2, 0x70, 0x36, 0x7f, 0xd4, 0xc1, 0xe2, 0x9b, 0x67, + 0xfe, 0x2a, 0x6d, 0xfd, 0x5b, 0x01, 0x2c, 0xbf, 0x69, 0xe9, 0x49, 0x8b, 0x4e, 0xc8, 0x08, 0x55, + 0xcf, 0xf8, 0x28, 0x39, 0xfb, 0x8c, 0x50, 0x24, 0x11, 0x68, 0x82, 0x99, 0x76, 0xfc, 0xba, 0xc5, + 0xef, 0x0f, 0x10, 0x01, 0x56, 0x4f, 0x9b, 0x42, 0x60, 0x0b, 0x14, 0x89, 0xd8, 0x5b, 0x8d, 0xe2, + 0x6a, 0x7e, 0xad, 0x52, 0xff, 0xe0, 0xdf, 0x54, 0x86, 0x25, 0x37, 0xdf, 0x1d, 0x8f, 0xd3, 0x93, + 0x64, 0x9d, 0x90, 0x32, 0x14, 0x1b, 0x87, 0xff, 0x07, 0xf9, 0xd0, 0x6d, 0xa9, 0xd7, 0xbe, 0xa2, + 0x28, 0xf9, 0xfd, 0xdd, 0x6d, 0x24, 0xe4, 0x2b, 0x58, 0x2d, 0xcf, 0xd2, 0x04, 0x5c, 0x00, 0xf9, + 0x23, 0x72, 0x12, 0x37, 0x14, 0x12, 0x9f, 0xf0, 0x21, 0x28, 0xf6, 0xc5, 0x5e, 0xad, 0xe2, 0xfb, + 0xce, 0x54, 0x27, 0x93, 0x35, 0x1c, 0xc5, 0x5a, 0x0f, 0xf4, 0x4d, 0xcd, 0xfc, 0x5d, 0x03, 0x37, + 0x27, 0x96, 0x9f, 0x58, 0x77, 0x70, 0xb7, 0xeb, 0x1f, 0x93, 0x96, 0x3c, 0xb6, 0x94, 0xac, 0x3b, + 0x5b, 0xb1, 0x18, 0x0d, 0x71, 0xf8, 0x36, 0x98, 0x69, 0x11, 0xcf, 0x25, 0x2d, 0xb9, 0x18, 0x95, + 0x92, 0xca, 0xdd, 0x96, 0x52, 0xa4, 0x50, 0xc1, 0xa3, 0x04, 0x33, 0xdf, 0x53, 0xab, 0xd8, 0x88, + 0x87, 0xa4, 0x14, 0x29, 0x14, 0x6e, 0x81, 0x79, 0x22, 0xdc, 0x94, 0xfe, 0xef, 0x50, 0xea, 0x0f, + 0x33, 0xba, 0xac, 0x14, 0xe6, 0x77, 0xc6, 0x61, 0x74, 0x9e, 0x6f, 0xfe, 0xad, 0x03, 0x63, 0xd2, + 0x68, 0x83, 0x87, 0xc9, 0x2e, 0x22, 0x41, 0xb9, 0x0e, 0x55, 0xea, 0xb7, 0x2f, 0xd5, 0x20, 0x42, + 0xa3, 0xb1, 0xa4, 0x1c, 0x99, 0x4b, 0x4b, 0x53, 0xab, 0x8b, 0xfc, 0x85, 0x14, 0x2c, 0x78, 0xe3, + 0x3b, 0x73, 0xbc, 0x54, 0x55, 0xea, 0x77, 0x2e, 0xdb, 0x0e, 0xf2, 0x34, 0x43, 0x9d, 0xb6, 0x70, + 0x0e, 0x60, 0xe8, 0x82, 0x7d, 0x58, 0x07, 0xc0, 0xf5, 0x1c, 0xbf, 0x17, 0x74, 0x09, 0x27, 0x32, + 0x6c, 0xa5, 0x64, 0x0e, 0xee, 0x8e, 0x10, 0x94, 0x62, 0x65, 0xc5, 0xbb, 0x70, 0xb5, 0x78, 0x37, + 0xd6, 0x4e, 0xcf, 0xaa, 0xb9, 0x17, 0x67, 0xd5, 0xdc, 0xcb, 0xb3, 0x6a, 0xee, 0x79, 0x54, 0xd5, + 0x4e, 0xa3, 0xaa, 0xf6, 0x22, 0xaa, 0x6a, 0x2f, 0xa3, 0xaa, 0xf6, 0x67, 0x54, 0xd5, 0xbe, 0xfd, + 0xab, 0x9a, 0x7b, 0xa6, 0xf7, 0xd7, 0xff, 0x09, 0x00, 0x00, 0xff, 0xff, 0x99, 0x87, 0xb8, 0x24, + 0x47, 0x0f, 0x00, 0x00, +} + func (m ExtraValue) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -159,32 +545,31 @@ func (m ExtraValue) Marshal() (dAtA []byte, err error) { } func (m ExtraValue) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m ExtraValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if len(m) > 0 { - for _, s := range m { + for iNdEx := len(m) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m[iNdEx]) + copy(dAtA[i:], m[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - return i, nil + return len(dAtA) - i, nil } func (m *LocalSubjectAccessReview) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -192,41 +577,52 @@ func (m *LocalSubjectAccessReview) Marshal() (dAtA []byte, err error) { } func (m *LocalSubjectAccessReview) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LocalSubjectAccessReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n2 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n3, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n3 - return i, nil + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *NonResourceAttributes) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -234,25 +630,32 @@ func (m *NonResourceAttributes) Marshal() (dAtA []byte, err error) { } func (m *NonResourceAttributes) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NonResourceAttributes) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i += copy(dAtA[i:], m.Path) - dAtA[i] = 0x12 - i++ + i -= len(m.Verb) + copy(dAtA[i:], m.Verb) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verb))) - i += copy(dAtA[i:], m.Verb) - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *NonResourceRule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -260,47 +663,40 @@ func (m *NonResourceRule) Marshal() (dAtA []byte, err error) { } func (m *NonResourceRule) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NonResourceRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Verbs) > 0 { - for _, s := range m.Verbs { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } if len(m.NonResourceURLs) > 0 { - for _, s := range m.NonResourceURLs { + for iNdEx := len(m.NonResourceURLs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.NonResourceURLs[iNdEx]) + copy(dAtA[i:], m.NonResourceURLs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NonResourceURLs[iNdEx]))) + i-- dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - return i, nil + if len(m.Verbs) > 0 { + for iNdEx := len(m.Verbs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Verbs[iNdEx]) + copy(dAtA[i:], m.Verbs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verbs[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil } func (m *ResourceAttributes) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -308,45 +704,57 @@ func (m *ResourceAttributes) Marshal() (dAtA []byte, err error) { } func (m *ResourceAttributes) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceAttributes) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i += copy(dAtA[i:], m.Namespace) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verb))) - i += copy(dAtA[i:], m.Verb) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) - i += copy(dAtA[i:], m.Group) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) - i += copy(dAtA[i:], m.Version) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) - i += copy(dAtA[i:], m.Resource) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Subresource))) - i += copy(dAtA[i:], m.Subresource) - dAtA[i] = 0x3a - i++ + i -= len(m.Name) + copy(dAtA[i:], m.Name) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - return i, nil + i-- + dAtA[i] = 0x3a + i -= len(m.Subresource) + copy(dAtA[i:], m.Subresource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Subresource))) + i-- + dAtA[i] = 0x32 + i -= len(m.Resource) + copy(dAtA[i:], m.Resource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) + i-- + dAtA[i] = 0x2a + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0x22 + i -= len(m.Group) + copy(dAtA[i:], m.Group) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i-- + dAtA[i] = 0x1a + i -= len(m.Verb) + copy(dAtA[i:], m.Verb) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verb))) + i-- + dAtA[i] = 0x12 + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ResourceRule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -354,77 +762,58 @@ func (m *ResourceRule) Marshal() (dAtA []byte, err error) { } func (m *ResourceRule) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Verbs) > 0 { - for _, s := range m.Verbs { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if len(m.APIGroups) > 0 { - for _, s := range m.APIGroups { - dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + if len(m.ResourceNames) > 0 { + for iNdEx := len(m.ResourceNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ResourceNames[iNdEx]) + copy(dAtA[i:], m.ResourceNames[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceNames[iNdEx]))) + i-- + dAtA[i] = 0x22 } } if len(m.Resources) > 0 { - for _, s := range m.Resources { + for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Resources[iNdEx]) + copy(dAtA[i:], m.Resources[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resources[iNdEx]))) + i-- dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - if len(m.ResourceNames) > 0 { - for _, s := range m.ResourceNames { - dAtA[i] = 0x22 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + if len(m.APIGroups) > 0 { + for iNdEx := len(m.APIGroups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.APIGroups[iNdEx]) + copy(dAtA[i:], m.APIGroups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroups[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Verbs) > 0 { + for iNdEx := len(m.Verbs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Verbs[iNdEx]) + copy(dAtA[i:], m.Verbs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verbs[iNdEx]))) + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } func (m *SelfSubjectAccessReview) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -432,41 +821,52 @@ func (m *SelfSubjectAccessReview) Marshal() (dAtA []byte, err error) { } func (m *SelfSubjectAccessReview) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SelfSubjectAccessReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n4, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n5, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n5 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n6, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n6 - return i, nil + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *SelfSubjectAccessReviewSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -474,37 +874,46 @@ func (m *SelfSubjectAccessReviewSpec) Marshal() (dAtA []byte, err error) { } func (m *SelfSubjectAccessReviewSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SelfSubjectAccessReviewSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.ResourceAttributes != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ResourceAttributes.Size())) - n7, err := m.ResourceAttributes.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 - } if m.NonResourceAttributes != nil { + { + size, err := m.NonResourceAttributes.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NonResourceAttributes.Size())) - n8, err := m.NonResourceAttributes.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + } + if m.ResourceAttributes != nil { + { + size, err := m.ResourceAttributes.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n8 + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *SelfSubjectRulesReview) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -512,41 +921,52 @@ func (m *SelfSubjectRulesReview) Marshal() (dAtA []byte, err error) { } func (m *SelfSubjectRulesReview) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SelfSubjectRulesReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n9, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n9 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n10, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n10 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n11, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n11 - return i, nil + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *SelfSubjectRulesReviewSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -554,21 +974,27 @@ func (m *SelfSubjectRulesReviewSpec) Marshal() (dAtA []byte, err error) { } func (m *SelfSubjectRulesReviewSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SelfSubjectRulesReviewSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i += copy(dAtA[i:], m.Namespace) - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *SubjectAccessReview) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -576,41 +1002,52 @@ func (m *SubjectAccessReview) Marshal() (dAtA []byte, err error) { } func (m *SubjectAccessReview) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SubjectAccessReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n12, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n12 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n13, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n13 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n14, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n14 - return i, nil + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *SubjectAccessReviewSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -618,91 +1055,94 @@ func (m *SubjectAccessReviewSpec) Marshal() (dAtA []byte, err error) { } func (m *SubjectAccessReviewSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SubjectAccessReviewSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.ResourceAttributes != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ResourceAttributes.Size())) - n15, err := m.ResourceAttributes.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0x32 + if len(m.Extra) > 0 { + keysForExtra := make([]string, 0, len(m.Extra)) + for k := range m.Extra { + keysForExtra = append(keysForExtra, string(k)) } - i += n15 - } - if m.NonResourceAttributes != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NonResourceAttributes.Size())) - n16, err := m.NonResourceAttributes.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + github_com_gogo_protobuf_sortkeys.Strings(keysForExtra) + for iNdEx := len(keysForExtra) - 1; iNdEx >= 0; iNdEx-- { + v := m.Extra[string(keysForExtra[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForExtra[iNdEx]) + copy(dAtA[i:], keysForExtra[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForExtra[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2a } - i += n16 } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.User))) - i += copy(dAtA[i:], m.User) if len(m.Groups) > 0 { - for _, s := range m.Groups { + for iNdEx := len(m.Groups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Groups[iNdEx]) + copy(dAtA[i:], m.Groups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Groups[iNdEx]))) + i-- dAtA[i] = 0x22 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - if len(m.Extra) > 0 { - keysForExtra := make([]string, 0, len(m.Extra)) - for k := range m.Extra { - keysForExtra = append(keysForExtra, string(k)) + i -= len(m.User) + copy(dAtA[i:], m.User) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.User))) + i-- + dAtA[i] = 0x1a + if m.NonResourceAttributes != nil { + { + size, err := m.NonResourceAttributes.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForExtra) - for _, k := range keysForExtra { - dAtA[i] = 0x2a - i++ - v := m.Extra[string(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n17, err := (&v).MarshalTo(dAtA[i:]) + i-- + dAtA[i] = 0x12 + } + if m.ResourceAttributes != nil { + { + size, err := m.ResourceAttributes.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n17 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa } - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) - i += copy(dAtA[i:], m.UID) - return i, nil + return len(dAtA) - i, nil } func (m *SubjectAccessReviewStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -710,41 +1150,48 @@ func (m *SubjectAccessReviewStatus) Marshal() (dAtA []byte, err error) { } func (m *SubjectAccessReviewStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SubjectAccessReviewStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - if m.Allowed { + i-- + if m.Denied { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.EvaluationError))) - i += copy(dAtA[i:], m.EvaluationError) + i-- dAtA[i] = 0x20 - i++ - if m.Denied { + i -= len(m.EvaluationError) + copy(dAtA[i:], m.EvaluationError) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.EvaluationError))) + i-- + dAtA[i] = 0x1a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x12 + i-- + if m.Allowed { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - return i, nil + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *SubjectRulesReviewStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -752,77 +1199,74 @@ func (m *SubjectRulesReviewStatus) Marshal() (dAtA []byte, err error) { } func (m *SubjectRulesReviewStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SubjectRulesReviewStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.ResourceRules) > 0 { - for _, msg := range m.ResourceRules { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } + i -= len(m.EvaluationError) + copy(dAtA[i:], m.EvaluationError) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.EvaluationError))) + i-- + dAtA[i] = 0x22 + i-- + if m.Incomplete { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i-- + dAtA[i] = 0x18 if len(m.NonResourceRules) > 0 { - for _, msg := range m.NonResourceRules { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.NonResourceRules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.NonResourceRules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - dAtA[i] = 0x18 - i++ - if m.Incomplete { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if len(m.ResourceRules) > 0 { + for iNdEx := len(m.ResourceRules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ResourceRules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } } - i++ - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.EvaluationError))) - i += copy(dAtA[i:], m.EvaluationError) - return i, nil + return len(dAtA) - i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m ExtraValue) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m) > 0 { @@ -835,6 +1279,9 @@ func (m ExtraValue) Size() (n int) { } func (m *LocalSubjectAccessReview) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -847,6 +1294,9 @@ func (m *LocalSubjectAccessReview) Size() (n int) { } func (m *NonResourceAttributes) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Path) @@ -857,6 +1307,9 @@ func (m *NonResourceAttributes) Size() (n int) { } func (m *NonResourceRule) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Verbs) > 0 { @@ -875,6 +1328,9 @@ func (m *NonResourceRule) Size() (n int) { } func (m *ResourceAttributes) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Namespace) @@ -895,6 +1351,9 @@ func (m *ResourceAttributes) Size() (n int) { } func (m *ResourceRule) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Verbs) > 0 { @@ -925,6 +1384,9 @@ func (m *ResourceRule) Size() (n int) { } func (m *SelfSubjectAccessReview) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -937,6 +1399,9 @@ func (m *SelfSubjectAccessReview) Size() (n int) { } func (m *SelfSubjectAccessReviewSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.ResourceAttributes != nil { @@ -951,6 +1416,9 @@ func (m *SelfSubjectAccessReviewSpec) Size() (n int) { } func (m *SelfSubjectRulesReview) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -963,6 +1431,9 @@ func (m *SelfSubjectRulesReview) Size() (n int) { } func (m *SelfSubjectRulesReviewSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Namespace) @@ -971,6 +1442,9 @@ func (m *SelfSubjectRulesReviewSpec) Size() (n int) { } func (m *SubjectAccessReview) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -983,6 +1457,9 @@ func (m *SubjectAccessReview) Size() (n int) { } func (m *SubjectAccessReviewSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.ResourceAttributes != nil { @@ -1016,6 +1493,9 @@ func (m *SubjectAccessReviewSpec) Size() (n int) { } func (m *SubjectAccessReviewStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 2 @@ -1028,6 +1508,9 @@ func (m *SubjectAccessReviewStatus) Size() (n int) { } func (m *SubjectRulesReviewStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.ResourceRules) > 0 { @@ -1049,14 +1532,7 @@ func (m *SubjectRulesReviewStatus) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -1066,7 +1542,7 @@ func (this *LocalSubjectAccessReview) String() string { return "nil" } s := strings.Join([]string{`&LocalSubjectAccessReview{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SubjectAccessReviewSpec", "SubjectAccessReviewSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SubjectAccessReviewStatus", "SubjectAccessReviewStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -1129,7 +1605,7 @@ func (this *SelfSubjectAccessReview) String() string { return "nil" } s := strings.Join([]string{`&SelfSubjectAccessReview{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SelfSubjectAccessReviewSpec", "SelfSubjectAccessReviewSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SubjectAccessReviewStatus", "SubjectAccessReviewStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -1141,8 +1617,8 @@ func (this *SelfSubjectAccessReviewSpec) String() string { return "nil" } s := strings.Join([]string{`&SelfSubjectAccessReviewSpec{`, - `ResourceAttributes:` + strings.Replace(fmt.Sprintf("%v", this.ResourceAttributes), "ResourceAttributes", "ResourceAttributes", 1) + `,`, - `NonResourceAttributes:` + strings.Replace(fmt.Sprintf("%v", this.NonResourceAttributes), "NonResourceAttributes", "NonResourceAttributes", 1) + `,`, + `ResourceAttributes:` + strings.Replace(this.ResourceAttributes.String(), "ResourceAttributes", "ResourceAttributes", 1) + `,`, + `NonResourceAttributes:` + strings.Replace(this.NonResourceAttributes.String(), "NonResourceAttributes", "NonResourceAttributes", 1) + `,`, `}`, }, "") return s @@ -1152,7 +1628,7 @@ func (this *SelfSubjectRulesReview) String() string { return "nil" } s := strings.Join([]string{`&SelfSubjectRulesReview{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SelfSubjectRulesReviewSpec", "SelfSubjectRulesReviewSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SubjectRulesReviewStatus", "SubjectRulesReviewStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -1174,7 +1650,7 @@ func (this *SubjectAccessReview) String() string { return "nil" } s := strings.Join([]string{`&SubjectAccessReview{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SubjectAccessReviewSpec", "SubjectAccessReviewSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SubjectAccessReviewStatus", "SubjectAccessReviewStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -1196,8 +1672,8 @@ func (this *SubjectAccessReviewSpec) String() string { } mapStringForExtra += "}" s := strings.Join([]string{`&SubjectAccessReviewSpec{`, - `ResourceAttributes:` + strings.Replace(fmt.Sprintf("%v", this.ResourceAttributes), "ResourceAttributes", "ResourceAttributes", 1) + `,`, - `NonResourceAttributes:` + strings.Replace(fmt.Sprintf("%v", this.NonResourceAttributes), "NonResourceAttributes", "NonResourceAttributes", 1) + `,`, + `ResourceAttributes:` + strings.Replace(this.ResourceAttributes.String(), "ResourceAttributes", "ResourceAttributes", 1) + `,`, + `NonResourceAttributes:` + strings.Replace(this.NonResourceAttributes.String(), "NonResourceAttributes", "NonResourceAttributes", 1) + `,`, `User:` + fmt.Sprintf("%v", this.User) + `,`, `Groups:` + fmt.Sprintf("%v", this.Groups) + `,`, `Extra:` + mapStringForExtra + `,`, @@ -1223,9 +1699,19 @@ func (this *SubjectRulesReviewStatus) String() string { if this == nil { return "nil" } + repeatedStringForResourceRules := "[]ResourceRule{" + for _, f := range this.ResourceRules { + repeatedStringForResourceRules += strings.Replace(strings.Replace(f.String(), "ResourceRule", "ResourceRule", 1), `&`, ``, 1) + "," + } + repeatedStringForResourceRules += "}" + repeatedStringForNonResourceRules := "[]NonResourceRule{" + for _, f := range this.NonResourceRules { + repeatedStringForNonResourceRules += strings.Replace(strings.Replace(f.String(), "NonResourceRule", "NonResourceRule", 1), `&`, ``, 1) + "," + } + repeatedStringForNonResourceRules += "}" s := strings.Join([]string{`&SubjectRulesReviewStatus{`, - `ResourceRules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ResourceRules), "ResourceRule", "ResourceRule", 1), `&`, ``, 1) + `,`, - `NonResourceRules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.NonResourceRules), "NonResourceRule", "NonResourceRule", 1), `&`, ``, 1) + `,`, + `ResourceRules:` + repeatedStringForResourceRules + `,`, + `NonResourceRules:` + repeatedStringForNonResourceRules + `,`, `Incomplete:` + fmt.Sprintf("%v", this.Incomplete) + `,`, `EvaluationError:` + fmt.Sprintf("%v", this.EvaluationError) + `,`, `}`, @@ -1255,7 +1741,7 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1283,7 +1769,7 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1293,6 +1779,9 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1307,6 +1796,9 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1334,7 +1826,7 @@ func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1362,7 +1854,7 @@ func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1371,6 +1863,9 @@ func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1392,7 +1887,7 @@ func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1401,6 +1896,9 @@ func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1422,7 +1920,7 @@ func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1431,6 +1929,9 @@ func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1447,6 +1948,9 @@ func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1474,7 +1978,7 @@ func (m *NonResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1502,7 +2006,7 @@ func (m *NonResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1512,6 +2016,9 @@ func (m *NonResourceAttributes) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1531,7 +2038,7 @@ func (m *NonResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1541,6 +2048,9 @@ func (m *NonResourceAttributes) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1555,6 +2065,9 @@ func (m *NonResourceAttributes) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1582,7 +2095,7 @@ func (m *NonResourceRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1610,7 +2123,7 @@ func (m *NonResourceRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1620,6 +2133,9 @@ func (m *NonResourceRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1639,7 +2155,7 @@ func (m *NonResourceRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1649,6 +2165,9 @@ func (m *NonResourceRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1663,6 +2182,9 @@ func (m *NonResourceRule) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1690,7 +2212,7 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1718,7 +2240,7 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1728,6 +2250,9 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1747,7 +2272,7 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1757,6 +2282,9 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1776,7 +2304,7 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1786,6 +2314,9 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1805,7 +2336,7 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1815,6 +2346,9 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1834,7 +2368,7 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1844,6 +2378,9 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1863,7 +2400,7 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1873,6 +2410,9 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1892,7 +2432,7 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1902,6 +2442,9 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1916,6 +2459,9 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1943,7 +2489,7 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1971,7 +2517,7 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1981,6 +2527,9 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2000,7 +2549,7 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2010,6 +2559,9 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2029,7 +2581,7 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2039,6 +2591,9 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2058,7 +2613,7 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2068,6 +2623,9 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2082,6 +2640,9 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2109,7 +2670,7 @@ func (m *SelfSubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2137,7 +2698,7 @@ func (m *SelfSubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2146,6 +2707,9 @@ func (m *SelfSubjectAccessReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2167,7 +2731,7 @@ func (m *SelfSubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2176,6 +2740,9 @@ func (m *SelfSubjectAccessReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2197,7 +2764,7 @@ func (m *SelfSubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2206,6 +2773,9 @@ func (m *SelfSubjectAccessReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2222,6 +2792,9 @@ func (m *SelfSubjectAccessReview) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2249,7 +2822,7 @@ func (m *SelfSubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2277,7 +2850,7 @@ func (m *SelfSubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2286,6 +2859,9 @@ func (m *SelfSubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2310,7 +2886,7 @@ func (m *SelfSubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2319,6 +2895,9 @@ func (m *SelfSubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2338,6 +2917,9 @@ func (m *SelfSubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2365,7 +2947,7 @@ func (m *SelfSubjectRulesReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2393,7 +2975,7 @@ func (m *SelfSubjectRulesReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2402,6 +2984,9 @@ func (m *SelfSubjectRulesReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2423,7 +3008,7 @@ func (m *SelfSubjectRulesReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2432,6 +3017,9 @@ func (m *SelfSubjectRulesReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2453,7 +3041,7 @@ func (m *SelfSubjectRulesReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2462,6 +3050,9 @@ func (m *SelfSubjectRulesReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2478,6 +3069,9 @@ func (m *SelfSubjectRulesReview) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2505,7 +3099,7 @@ func (m *SelfSubjectRulesReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2533,7 +3127,7 @@ func (m *SelfSubjectRulesReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2543,6 +3137,9 @@ func (m *SelfSubjectRulesReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2557,6 +3154,9 @@ func (m *SelfSubjectRulesReviewSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2584,7 +3184,7 @@ func (m *SubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2612,7 +3212,7 @@ func (m *SubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2621,6 +3221,9 @@ func (m *SubjectAccessReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2642,7 +3245,7 @@ func (m *SubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2651,6 +3254,9 @@ func (m *SubjectAccessReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2672,7 +3278,7 @@ func (m *SubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2681,6 +3287,9 @@ func (m *SubjectAccessReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2697,6 +3306,9 @@ func (m *SubjectAccessReview) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2724,7 +3336,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2752,7 +3364,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2761,6 +3373,9 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2785,7 +3400,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2794,6 +3409,9 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2818,7 +3436,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2828,6 +3446,9 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2847,7 +3468,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2857,6 +3478,9 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2876,7 +3500,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2885,54 +3509,20 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { + if postIndex > l { return io.ErrUnexpectedEOF } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Extra == nil { m.Extra = make(map[string]ExtraValue) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + mapvalue := &ExtraValue{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2942,46 +3532,88 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &ExtraValue{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &ExtraValue{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.Extra[mapkey] = *mapvalue - } else { - var mapvalue ExtraValue - m.Extra[mapkey] = mapvalue } + m.Extra[mapkey] = *mapvalue iNdEx = postIndex case 6: if wireType != 2 { @@ -2997,7 +3629,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3007,6 +3639,9 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3021,6 +3656,9 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3048,7 +3686,7 @@ func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3076,7 +3714,7 @@ func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3096,7 +3734,7 @@ func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3106,6 +3744,9 @@ func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3125,7 +3766,7 @@ func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3135,6 +3776,9 @@ func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3154,7 +3798,7 @@ func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3169,6 +3813,9 @@ func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3196,7 +3843,7 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3224,7 +3871,7 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3233,6 +3880,9 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3255,7 +3905,7 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3264,6 +3914,9 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3286,7 +3939,7 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3306,7 +3959,7 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3316,6 +3969,9 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3330,6 +3986,9 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3396,10 +4055,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -3428,6 +4090,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -3446,83 +4111,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/authorization/v1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 1140 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x56, 0x4d, 0x6f, 0x1b, 0xc5, - 0x1b, 0xf7, 0xae, 0xed, 0xc4, 0x1e, 0x37, 0xff, 0xa4, 0x13, 0xa5, 0xd9, 0xa6, 0xfa, 0xdb, 0xd1, - 0x22, 0x41, 0x2a, 0xca, 0x2e, 0xb1, 0xda, 0x26, 0xaa, 0x54, 0xa1, 0x58, 0x89, 0x50, 0xa4, 0xb6, - 0x54, 0x13, 0x25, 0x12, 0x45, 0x20, 0xc6, 0xeb, 0x89, 0xbd, 0xc4, 0xde, 0x5d, 0x66, 0x66, 0x1d, - 0xc2, 0xa9, 0x12, 0x5f, 0x80, 0x23, 0x07, 0x0e, 0x7c, 0x03, 0x2e, 0x48, 0xdc, 0x38, 0x70, 0x40, - 0x39, 0xf6, 0x58, 0x24, 0x64, 0x91, 0xe5, 0xcc, 0x77, 0x40, 0x33, 0x3b, 0xf6, 0xae, 0x93, 0xb5, - 0x9b, 0x70, 0xa0, 0x97, 0xde, 0x76, 0x9f, 0xdf, 0xef, 0x79, 0x99, 0xe7, 0x65, 0xe6, 0x01, 0xdb, - 0x47, 0x9b, 0xcc, 0x72, 0x7d, 0xfb, 0x28, 0x6c, 0x12, 0xea, 0x11, 0x4e, 0x98, 0xdd, 0x27, 0x5e, - 0xcb, 0xa7, 0xb6, 0x02, 0x70, 0xe0, 0xda, 0x38, 0xe4, 0x1d, 0x9f, 0xba, 0x5f, 0x63, 0xee, 0xfa, - 0x9e, 0xdd, 0x5f, 0xb7, 0xdb, 0xc4, 0x23, 0x14, 0x73, 0xd2, 0xb2, 0x02, 0xea, 0x73, 0x1f, 0xde, - 0x8a, 0xc9, 0x16, 0x0e, 0x5c, 0x6b, 0x8c, 0x6c, 0xf5, 0xd7, 0x57, 0xde, 0x6b, 0xbb, 0xbc, 0x13, - 0x36, 0x2d, 0xc7, 0xef, 0xd9, 0x6d, 0xbf, 0xed, 0xdb, 0x52, 0xa7, 0x19, 0x1e, 0xca, 0x3f, 0xf9, - 0x23, 0xbf, 0x62, 0x5b, 0x2b, 0x77, 0x13, 0xc7, 0x3d, 0xec, 0x74, 0x5c, 0x8f, 0xd0, 0x13, 0x3b, - 0x38, 0x6a, 0x0b, 0x01, 0xb3, 0x7b, 0x84, 0xe3, 0x8c, 0x08, 0x56, 0xec, 0x49, 0x5a, 0x34, 0xf4, - 0xb8, 0xdb, 0x23, 0x17, 0x14, 0xee, 0xbf, 0x4a, 0x81, 0x39, 0x1d, 0xd2, 0xc3, 0xe7, 0xf5, 0xcc, - 0x0d, 0x00, 0x76, 0xbe, 0xe2, 0x14, 0x1f, 0xe0, 0x6e, 0x48, 0x60, 0x0d, 0x14, 0x5d, 0x4e, 0x7a, - 0xcc, 0xd0, 0x56, 0xf3, 0x6b, 0xe5, 0x46, 0x39, 0x1a, 0xd4, 0x8a, 0xbb, 0x42, 0x80, 0x62, 0xf9, - 0x83, 0xd2, 0x77, 0x3f, 0xd4, 0x72, 0xcf, 0xff, 0x58, 0xcd, 0x99, 0x3f, 0xe9, 0xc0, 0x78, 0xe4, - 0x3b, 0xb8, 0xbb, 0x17, 0x36, 0xbf, 0x20, 0x0e, 0xdf, 0x72, 0x1c, 0xc2, 0x18, 0x22, 0x7d, 0x97, - 0x1c, 0xc3, 0xcf, 0x41, 0x49, 0x9c, 0xac, 0x85, 0x39, 0x36, 0xb4, 0x55, 0x6d, 0xad, 0x52, 0x7f, - 0xdf, 0x4a, 0x72, 0x3a, 0x0a, 0xd0, 0x0a, 0x8e, 0xda, 0x42, 0xc0, 0x2c, 0xc1, 0xb6, 0xfa, 0xeb, - 0xd6, 0x47, 0xd2, 0xd6, 0x63, 0xc2, 0x71, 0x03, 0x9e, 0x0e, 0x6a, 0xb9, 0x68, 0x50, 0x03, 0x89, - 0x0c, 0x8d, 0xac, 0xc2, 0x03, 0x50, 0x60, 0x01, 0x71, 0x0c, 0x5d, 0x5a, 0xbf, 0x6b, 0x4d, 0xa9, - 0x98, 0x95, 0x11, 0xe1, 0x5e, 0x40, 0x9c, 0xc6, 0x35, 0xe5, 0xa1, 0x20, 0xfe, 0x90, 0xb4, 0x07, - 0x3f, 0x03, 0x33, 0x8c, 0x63, 0x1e, 0x32, 0x23, 0x2f, 0x2d, 0xdf, 0xbf, 0xb2, 0x65, 0xa9, 0xdd, - 0xf8, 0x9f, 0xb2, 0x3d, 0x13, 0xff, 0x23, 0x65, 0xd5, 0xfc, 0x04, 0x2c, 0x3d, 0xf1, 0x3d, 0x44, - 0x98, 0x1f, 0x52, 0x87, 0x6c, 0x71, 0x4e, 0xdd, 0x66, 0xc8, 0x09, 0x83, 0xab, 0xa0, 0x10, 0x60, - 0xde, 0x91, 0xe9, 0x2a, 0x27, 0xa1, 0x3d, 0xc5, 0xbc, 0x83, 0x24, 0x22, 0x18, 0x7d, 0x42, 0x9b, - 0xf2, 0xc8, 0x29, 0xc6, 0x01, 0xa1, 0x4d, 0x24, 0x11, 0xf3, 0x4b, 0x30, 0x9f, 0x32, 0x8e, 0xc2, - 0xae, 0xac, 0xa8, 0x80, 0xc6, 0x2a, 0x2a, 0x34, 0x18, 0x8a, 0xe5, 0xf0, 0x21, 0x98, 0xf7, 0x12, - 0x9d, 0x7d, 0xf4, 0x88, 0x19, 0xba, 0xa4, 0x2e, 0x46, 0x83, 0x5a, 0xda, 0x9c, 0x80, 0xd0, 0x79, - 0xae, 0xf9, 0x8b, 0x0e, 0x60, 0xc6, 0x69, 0x6c, 0x50, 0xf6, 0x70, 0x8f, 0xb0, 0x00, 0x3b, 0x44, - 0x1d, 0xe9, 0xba, 0x0a, 0xb8, 0xfc, 0x64, 0x08, 0xa0, 0x84, 0xf3, 0xea, 0xc3, 0xc1, 0xb7, 0x40, - 0xb1, 0x4d, 0xfd, 0x30, 0x90, 0x85, 0x29, 0x37, 0xe6, 0x14, 0xa5, 0xf8, 0xa1, 0x10, 0xa2, 0x18, - 0x83, 0xb7, 0xc1, 0x6c, 0x9f, 0x50, 0xe6, 0xfa, 0x9e, 0x51, 0x90, 0xb4, 0x79, 0x45, 0x9b, 0x3d, - 0x88, 0xc5, 0x68, 0x88, 0xc3, 0x3b, 0xa0, 0x44, 0x55, 0xe0, 0x46, 0x51, 0x72, 0x17, 0x14, 0xb7, - 0x34, 0xca, 0xe0, 0x88, 0x01, 0xef, 0x81, 0x0a, 0x0b, 0x9b, 0x23, 0x85, 0x19, 0xa9, 0xb0, 0xa8, - 0x14, 0x2a, 0x7b, 0x09, 0x84, 0xd2, 0x3c, 0x71, 0x2c, 0x71, 0x46, 0x63, 0x76, 0xfc, 0x58, 0x22, - 0x05, 0x48, 0x22, 0xe6, 0xaf, 0x1a, 0xb8, 0x76, 0xb5, 0x8a, 0xbd, 0x0b, 0xca, 0x38, 0x70, 0xe5, - 0xb1, 0x87, 0xb5, 0x9a, 0x13, 0x79, 0xdd, 0x7a, 0xba, 0x1b, 0x0b, 0x51, 0x82, 0x0b, 0xf2, 0x30, - 0x18, 0xd1, 0xd2, 0x23, 0xf2, 0xd0, 0x25, 0x43, 0x09, 0x0e, 0x37, 0xc0, 0xdc, 0xf0, 0x47, 0x16, - 0xc9, 0x28, 0x48, 0x85, 0xeb, 0xd1, 0xa0, 0x36, 0x87, 0xd2, 0x00, 0x1a, 0xe7, 0x99, 0x3f, 0xeb, - 0x60, 0x79, 0x8f, 0x74, 0x0f, 0x5f, 0xcf, 0x5d, 0xf0, 0x6c, 0xec, 0x2e, 0xd8, 0x9c, 0x3e, 0xb1, - 0xd9, 0x51, 0xbe, 0xb6, 0xfb, 0xe0, 0x7b, 0x1d, 0xdc, 0x9a, 0x12, 0x13, 0x3c, 0x06, 0x90, 0x5e, - 0x18, 0x2f, 0x95, 0x47, 0x7b, 0x6a, 0x2c, 0x17, 0xa7, 0xb2, 0x71, 0x23, 0x1a, 0xd4, 0x32, 0xa6, - 0x15, 0x65, 0xb8, 0x80, 0xdf, 0x68, 0x60, 0xc9, 0xcb, 0xba, 0xa9, 0x54, 0x9a, 0xeb, 0x53, 0x9d, - 0x67, 0xde, 0x71, 0x8d, 0x9b, 0xd1, 0xa0, 0x96, 0x7d, 0xfd, 0xa1, 0x6c, 0x5f, 0xe2, 0x95, 0xb9, - 0x91, 0x4a, 0x8f, 0x18, 0x90, 0xff, 0xae, 0xaf, 0x3e, 0x1e, 0xeb, 0xab, 0x8d, 0xcb, 0xf6, 0x55, - 0x2a, 0xc8, 0x89, 0x6d, 0xf5, 0xe9, 0xb9, 0xb6, 0xba, 0x77, 0x99, 0xb6, 0x4a, 0x1b, 0x9e, 0xde, - 0x55, 0x8f, 0xc1, 0xca, 0xe4, 0x80, 0xae, 0x7c, 0x39, 0x9b, 0x3f, 0xea, 0x60, 0xf1, 0xcd, 0x33, - 0x7f, 0x95, 0xb1, 0xfe, 0xad, 0x00, 0x96, 0xdf, 0x8c, 0xf4, 0xa4, 0x45, 0x27, 0x64, 0x84, 0xaa, - 0x67, 0x7c, 0x54, 0x9c, 0x7d, 0x46, 0x28, 0x92, 0x08, 0x34, 0xc1, 0x4c, 0x3b, 0x7e, 0xdd, 0xe2, - 0xf7, 0x07, 0x88, 0x04, 0xab, 0xa7, 0x4d, 0x21, 0xb0, 0x05, 0x8a, 0x44, 0xec, 0xad, 0x46, 0x71, - 0x35, 0xbf, 0x56, 0xa9, 0x7f, 0xf0, 0x6f, 0x3a, 0xc3, 0x92, 0x9b, 0xef, 0x8e, 0xc7, 0xe9, 0x49, - 0xb2, 0x4e, 0x48, 0x19, 0x8a, 0x8d, 0xc3, 0xff, 0x83, 0x7c, 0xe8, 0xb6, 0xd4, 0x6b, 0x5f, 0x51, - 0x94, 0xfc, 0xfe, 0xee, 0x36, 0x12, 0xf2, 0x15, 0xac, 0x96, 0x67, 0x69, 0x02, 0x2e, 0x80, 0xfc, - 0x11, 0x39, 0x89, 0x07, 0x0a, 0x89, 0x4f, 0xf8, 0x10, 0x14, 0xfb, 0x62, 0xaf, 0x56, 0xf9, 0x7d, - 0x67, 0x6a, 0x90, 0xc9, 0x1a, 0x8e, 0x62, 0xad, 0x07, 0xfa, 0xa6, 0x66, 0xfe, 0xae, 0x81, 0x9b, - 0x13, 0xdb, 0x4f, 0xac, 0x3b, 0xb8, 0xdb, 0xf5, 0x8f, 0x49, 0x4b, 0xba, 0x2d, 0x25, 0xeb, 0xce, - 0x56, 0x2c, 0x46, 0x43, 0x1c, 0xbe, 0x0d, 0x66, 0x28, 0xc1, 0xcc, 0xf7, 0xd4, 0x8a, 0x35, 0xea, - 0x5c, 0x24, 0xa5, 0x48, 0xa1, 0x70, 0x0b, 0xcc, 0x13, 0xe1, 0x5e, 0xc6, 0xb5, 0x43, 0xa9, 0x3f, - 0xac, 0xd4, 0xb2, 0x52, 0x98, 0xdf, 0x19, 0x87, 0xd1, 0x79, 0xbe, 0x70, 0xd5, 0x22, 0x9e, 0x4b, - 0x5a, 0x72, 0x07, 0x2b, 0x25, 0xae, 0xb6, 0xa5, 0x14, 0x29, 0xd4, 0xfc, 0x5b, 0x07, 0xc6, 0xa4, - 0xab, 0x0d, 0x1e, 0x26, 0xbb, 0x88, 0x04, 0xe5, 0x3a, 0x54, 0xa9, 0xdf, 0xbe, 0xd4, 0x80, 0x08, - 0x8d, 0xc6, 0x92, 0x72, 0x3b, 0x97, 0x96, 0xa6, 0x56, 0x17, 0xf9, 0x0b, 0x29, 0x58, 0xf0, 0xc6, - 0x77, 0xe6, 0x78, 0xa9, 0xaa, 0xd4, 0xef, 0x5c, 0x76, 0x1c, 0xa4, 0x37, 0x43, 0x79, 0x5b, 0x38, - 0x07, 0x30, 0x74, 0xc1, 0x3e, 0xac, 0x03, 0xe0, 0x7a, 0x8e, 0xdf, 0x0b, 0xba, 0x84, 0x13, 0x99, - 0xde, 0x52, 0x72, 0x0f, 0xee, 0x8e, 0x10, 0x94, 0x62, 0x65, 0xd5, 0xa5, 0x70, 0xb5, 0xba, 0x34, - 0xd6, 0x4e, 0xcf, 0xaa, 0xb9, 0x17, 0x67, 0xd5, 0xdc, 0xcb, 0xb3, 0x6a, 0xee, 0x79, 0x54, 0xd5, - 0x4e, 0xa3, 0xaa, 0xf6, 0x22, 0xaa, 0x6a, 0x2f, 0xa3, 0xaa, 0xf6, 0x67, 0x54, 0xd5, 0xbe, 0xfd, - 0xab, 0x9a, 0x7b, 0xa6, 0xf7, 0xd7, 0xff, 0x09, 0x00, 0x00, 0xff, 0xff, 0x9f, 0x85, 0x45, 0x74, - 0x47, 0x0f, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/authorization/v1beta1/doc.go b/vendor/k8s.io/api/authorization/v1beta1/doc.go index ea4f802e..7046f111 100644 --- a/vendor/k8s.io/api/authorization/v1beta1/doc.go +++ b/vendor/k8s.io/api/authorization/v1beta1/doc.go @@ -15,7 +15,9 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +k8s:openapi-gen=true // +groupName=authorization.k8s.io + package v1beta1 // import "k8s.io/api/authorization/v1beta1" diff --git a/vendor/k8s.io/api/authorization/v1beta1/generated.pb.go b/vendor/k8s.io/api/authorization/v1beta1/generated.pb.go index 75ee6cf9..f0def20b 100644 --- a/vendor/k8s.io/api/authorization/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/authorization/v1beta1/generated.pb.go @@ -14,44 +14,24 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/authorization/v1beta1/generated.proto -// DO NOT EDIT! -/* - Package v1beta1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/authorization/v1beta1/generated.proto - - It has these top-level messages: - ExtraValue - LocalSubjectAccessReview - NonResourceAttributes - NonResourceRule - ResourceAttributes - ResourceRule - SelfSubjectAccessReview - SelfSubjectAccessReviewSpec - SelfSubjectRulesReview - SelfSubjectRulesReviewSpec - SubjectAccessReview - SubjectAccessReviewSpec - SubjectAccessReviewStatus - SubjectRulesReviewStatus -*/ package v1beta1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + io "io" -import strings "strings" -import reflect "reflect" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" -import io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -64,74 +44,398 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *ExtraValue) Reset() { *m = ExtraValue{} } -func (*ExtraValue) ProtoMessage() {} -func (*ExtraValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *ExtraValue) Reset() { *m = ExtraValue{} } +func (*ExtraValue) ProtoMessage() {} +func (*ExtraValue) Descriptor() ([]byte, []int) { + return fileDescriptor_43130d8376f09103, []int{0} +} +func (m *ExtraValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExtraValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExtraValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExtraValue.Merge(m, src) +} +func (m *ExtraValue) XXX_Size() int { + return m.Size() +} +func (m *ExtraValue) XXX_DiscardUnknown() { + xxx_messageInfo_ExtraValue.DiscardUnknown(m) +} + +var xxx_messageInfo_ExtraValue proto.InternalMessageInfo func (m *LocalSubjectAccessReview) Reset() { *m = LocalSubjectAccessReview{} } func (*LocalSubjectAccessReview) ProtoMessage() {} func (*LocalSubjectAccessReview) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{1} + return fileDescriptor_43130d8376f09103, []int{1} +} +func (m *LocalSubjectAccessReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LocalSubjectAccessReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LocalSubjectAccessReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_LocalSubjectAccessReview.Merge(m, src) +} +func (m *LocalSubjectAccessReview) XXX_Size() int { + return m.Size() +} +func (m *LocalSubjectAccessReview) XXX_DiscardUnknown() { + xxx_messageInfo_LocalSubjectAccessReview.DiscardUnknown(m) } -func (m *NonResourceAttributes) Reset() { *m = NonResourceAttributes{} } -func (*NonResourceAttributes) ProtoMessage() {} -func (*NonResourceAttributes) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +var xxx_messageInfo_LocalSubjectAccessReview proto.InternalMessageInfo -func (m *NonResourceRule) Reset() { *m = NonResourceRule{} } -func (*NonResourceRule) ProtoMessage() {} -func (*NonResourceRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +func (m *NonResourceAttributes) Reset() { *m = NonResourceAttributes{} } +func (*NonResourceAttributes) ProtoMessage() {} +func (*NonResourceAttributes) Descriptor() ([]byte, []int) { + return fileDescriptor_43130d8376f09103, []int{2} +} +func (m *NonResourceAttributes) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NonResourceAttributes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NonResourceAttributes) XXX_Merge(src proto.Message) { + xxx_messageInfo_NonResourceAttributes.Merge(m, src) +} +func (m *NonResourceAttributes) XXX_Size() int { + return m.Size() +} +func (m *NonResourceAttributes) XXX_DiscardUnknown() { + xxx_messageInfo_NonResourceAttributes.DiscardUnknown(m) +} -func (m *ResourceAttributes) Reset() { *m = ResourceAttributes{} } -func (*ResourceAttributes) ProtoMessage() {} -func (*ResourceAttributes) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +var xxx_messageInfo_NonResourceAttributes proto.InternalMessageInfo -func (m *ResourceRule) Reset() { *m = ResourceRule{} } -func (*ResourceRule) ProtoMessage() {} -func (*ResourceRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +func (m *NonResourceRule) Reset() { *m = NonResourceRule{} } +func (*NonResourceRule) ProtoMessage() {} +func (*NonResourceRule) Descriptor() ([]byte, []int) { + return fileDescriptor_43130d8376f09103, []int{3} +} +func (m *NonResourceRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NonResourceRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NonResourceRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_NonResourceRule.Merge(m, src) +} +func (m *NonResourceRule) XXX_Size() int { + return m.Size() +} +func (m *NonResourceRule) XXX_DiscardUnknown() { + xxx_messageInfo_NonResourceRule.DiscardUnknown(m) +} -func (m *SelfSubjectAccessReview) Reset() { *m = SelfSubjectAccessReview{} } -func (*SelfSubjectAccessReview) ProtoMessage() {} -func (*SelfSubjectAccessReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +var xxx_messageInfo_NonResourceRule proto.InternalMessageInfo + +func (m *ResourceAttributes) Reset() { *m = ResourceAttributes{} } +func (*ResourceAttributes) ProtoMessage() {} +func (*ResourceAttributes) Descriptor() ([]byte, []int) { + return fileDescriptor_43130d8376f09103, []int{4} +} +func (m *ResourceAttributes) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceAttributes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceAttributes) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceAttributes.Merge(m, src) +} +func (m *ResourceAttributes) XXX_Size() int { + return m.Size() +} +func (m *ResourceAttributes) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceAttributes.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceAttributes proto.InternalMessageInfo + +func (m *ResourceRule) Reset() { *m = ResourceRule{} } +func (*ResourceRule) ProtoMessage() {} +func (*ResourceRule) Descriptor() ([]byte, []int) { + return fileDescriptor_43130d8376f09103, []int{5} +} +func (m *ResourceRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceRule.Merge(m, src) +} +func (m *ResourceRule) XXX_Size() int { + return m.Size() +} +func (m *ResourceRule) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceRule.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceRule proto.InternalMessageInfo + +func (m *SelfSubjectAccessReview) Reset() { *m = SelfSubjectAccessReview{} } +func (*SelfSubjectAccessReview) ProtoMessage() {} +func (*SelfSubjectAccessReview) Descriptor() ([]byte, []int) { + return fileDescriptor_43130d8376f09103, []int{6} +} +func (m *SelfSubjectAccessReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SelfSubjectAccessReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SelfSubjectAccessReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_SelfSubjectAccessReview.Merge(m, src) +} +func (m *SelfSubjectAccessReview) XXX_Size() int { + return m.Size() +} +func (m *SelfSubjectAccessReview) XXX_DiscardUnknown() { + xxx_messageInfo_SelfSubjectAccessReview.DiscardUnknown(m) +} + +var xxx_messageInfo_SelfSubjectAccessReview proto.InternalMessageInfo func (m *SelfSubjectAccessReviewSpec) Reset() { *m = SelfSubjectAccessReviewSpec{} } func (*SelfSubjectAccessReviewSpec) ProtoMessage() {} func (*SelfSubjectAccessReviewSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{7} + return fileDescriptor_43130d8376f09103, []int{7} +} +func (m *SelfSubjectAccessReviewSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SelfSubjectAccessReviewSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SelfSubjectAccessReviewSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_SelfSubjectAccessReviewSpec.Merge(m, src) +} +func (m *SelfSubjectAccessReviewSpec) XXX_Size() int { + return m.Size() +} +func (m *SelfSubjectAccessReviewSpec) XXX_DiscardUnknown() { + xxx_messageInfo_SelfSubjectAccessReviewSpec.DiscardUnknown(m) } -func (m *SelfSubjectRulesReview) Reset() { *m = SelfSubjectRulesReview{} } -func (*SelfSubjectRulesReview) ProtoMessage() {} -func (*SelfSubjectRulesReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +var xxx_messageInfo_SelfSubjectAccessReviewSpec proto.InternalMessageInfo + +func (m *SelfSubjectRulesReview) Reset() { *m = SelfSubjectRulesReview{} } +func (*SelfSubjectRulesReview) ProtoMessage() {} +func (*SelfSubjectRulesReview) Descriptor() ([]byte, []int) { + return fileDescriptor_43130d8376f09103, []int{8} +} +func (m *SelfSubjectRulesReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SelfSubjectRulesReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SelfSubjectRulesReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_SelfSubjectRulesReview.Merge(m, src) +} +func (m *SelfSubjectRulesReview) XXX_Size() int { + return m.Size() +} +func (m *SelfSubjectRulesReview) XXX_DiscardUnknown() { + xxx_messageInfo_SelfSubjectRulesReview.DiscardUnknown(m) +} + +var xxx_messageInfo_SelfSubjectRulesReview proto.InternalMessageInfo func (m *SelfSubjectRulesReviewSpec) Reset() { *m = SelfSubjectRulesReviewSpec{} } func (*SelfSubjectRulesReviewSpec) ProtoMessage() {} func (*SelfSubjectRulesReviewSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{9} + return fileDescriptor_43130d8376f09103, []int{9} +} +func (m *SelfSubjectRulesReviewSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SelfSubjectRulesReviewSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SelfSubjectRulesReviewSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_SelfSubjectRulesReviewSpec.Merge(m, src) +} +func (m *SelfSubjectRulesReviewSpec) XXX_Size() int { + return m.Size() +} +func (m *SelfSubjectRulesReviewSpec) XXX_DiscardUnknown() { + xxx_messageInfo_SelfSubjectRulesReviewSpec.DiscardUnknown(m) } -func (m *SubjectAccessReview) Reset() { *m = SubjectAccessReview{} } -func (*SubjectAccessReview) ProtoMessage() {} -func (*SubjectAccessReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } +var xxx_messageInfo_SelfSubjectRulesReviewSpec proto.InternalMessageInfo + +func (m *SubjectAccessReview) Reset() { *m = SubjectAccessReview{} } +func (*SubjectAccessReview) ProtoMessage() {} +func (*SubjectAccessReview) Descriptor() ([]byte, []int) { + return fileDescriptor_43130d8376f09103, []int{10} +} +func (m *SubjectAccessReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SubjectAccessReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SubjectAccessReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_SubjectAccessReview.Merge(m, src) +} +func (m *SubjectAccessReview) XXX_Size() int { + return m.Size() +} +func (m *SubjectAccessReview) XXX_DiscardUnknown() { + xxx_messageInfo_SubjectAccessReview.DiscardUnknown(m) +} + +var xxx_messageInfo_SubjectAccessReview proto.InternalMessageInfo func (m *SubjectAccessReviewSpec) Reset() { *m = SubjectAccessReviewSpec{} } func (*SubjectAccessReviewSpec) ProtoMessage() {} func (*SubjectAccessReviewSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{11} + return fileDescriptor_43130d8376f09103, []int{11} +} +func (m *SubjectAccessReviewSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) } +func (m *SubjectAccessReviewSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SubjectAccessReviewSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_SubjectAccessReviewSpec.Merge(m, src) +} +func (m *SubjectAccessReviewSpec) XXX_Size() int { + return m.Size() +} +func (m *SubjectAccessReviewSpec) XXX_DiscardUnknown() { + xxx_messageInfo_SubjectAccessReviewSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_SubjectAccessReviewSpec proto.InternalMessageInfo func (m *SubjectAccessReviewStatus) Reset() { *m = SubjectAccessReviewStatus{} } func (*SubjectAccessReviewStatus) ProtoMessage() {} func (*SubjectAccessReviewStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{12} + return fileDescriptor_43130d8376f09103, []int{12} +} +func (m *SubjectAccessReviewStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SubjectAccessReviewStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SubjectAccessReviewStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_SubjectAccessReviewStatus.Merge(m, src) +} +func (m *SubjectAccessReviewStatus) XXX_Size() int { + return m.Size() } +func (m *SubjectAccessReviewStatus) XXX_DiscardUnknown() { + xxx_messageInfo_SubjectAccessReviewStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_SubjectAccessReviewStatus proto.InternalMessageInfo func (m *SubjectRulesReviewStatus) Reset() { *m = SubjectRulesReviewStatus{} } func (*SubjectRulesReviewStatus) ProtoMessage() {} func (*SubjectRulesReviewStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{13} + return fileDescriptor_43130d8376f09103, []int{13} +} +func (m *SubjectRulesReviewStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SubjectRulesReviewStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SubjectRulesReviewStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_SubjectRulesReviewStatus.Merge(m, src) +} +func (m *SubjectRulesReviewStatus) XXX_Size() int { + return m.Size() +} +func (m *SubjectRulesReviewStatus) XXX_DiscardUnknown() { + xxx_messageInfo_SubjectRulesReviewStatus.DiscardUnknown(m) } +var xxx_messageInfo_SubjectRulesReviewStatus proto.InternalMessageInfo + func init() { proto.RegisterType((*ExtraValue)(nil), "k8s.io.api.authorization.v1beta1.ExtraValue") proto.RegisterType((*LocalSubjectAccessReview)(nil), "k8s.io.api.authorization.v1beta1.LocalSubjectAccessReview") @@ -145,13 +449,95 @@ func init() { proto.RegisterType((*SelfSubjectRulesReviewSpec)(nil), "k8s.io.api.authorization.v1beta1.SelfSubjectRulesReviewSpec") proto.RegisterType((*SubjectAccessReview)(nil), "k8s.io.api.authorization.v1beta1.SubjectAccessReview") proto.RegisterType((*SubjectAccessReviewSpec)(nil), "k8s.io.api.authorization.v1beta1.SubjectAccessReviewSpec") + proto.RegisterMapType((map[string]ExtraValue)(nil), "k8s.io.api.authorization.v1beta1.SubjectAccessReviewSpec.ExtraEntry") proto.RegisterType((*SubjectAccessReviewStatus)(nil), "k8s.io.api.authorization.v1beta1.SubjectAccessReviewStatus") proto.RegisterType((*SubjectRulesReviewStatus)(nil), "k8s.io.api.authorization.v1beta1.SubjectRulesReviewStatus") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/authorization/v1beta1/generated.proto", fileDescriptor_43130d8376f09103) +} + +var fileDescriptor_43130d8376f09103 = []byte{ + // 1141 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x56, 0x4f, 0x6f, 0x1b, 0x45, + 0x14, 0xf7, 0xfa, 0x4f, 0x62, 0x8f, 0x1b, 0x92, 0x4e, 0x94, 0x66, 0x1b, 0x84, 0x6d, 0x19, 0x09, + 0x05, 0xd1, 0xee, 0x92, 0xa8, 0x90, 0x12, 0xe8, 0x21, 0x56, 0x22, 0x14, 0xa9, 0x2d, 0xd5, 0x44, + 0xc9, 0x81, 0x4a, 0xc0, 0xec, 0x7a, 0x62, 0x2f, 0xb6, 0x77, 0x97, 0x99, 0x59, 0x87, 0x20, 0x0e, + 0x3d, 0x72, 0xe4, 0xc8, 0x91, 0x13, 0xdf, 0x81, 0x0b, 0x12, 0x9c, 0x72, 0xec, 0x31, 0x48, 0xc8, + 0x22, 0xcb, 0x87, 0xe0, 0x8a, 0x66, 0x76, 0xec, 0x5d, 0x27, 0x9b, 0x38, 0xce, 0x81, 0x5e, 0x7a, + 0xdb, 0x79, 0xbf, 0xdf, 0x7b, 0xf3, 0xde, 0x9b, 0xf7, 0xde, 0x3e, 0xb0, 0xdb, 0x79, 0xc8, 0x0c, + 0xc7, 0x33, 0x3b, 0x81, 0x45, 0xa8, 0x4b, 0x38, 0x61, 0x66, 0x9f, 0xb8, 0x4d, 0x8f, 0x9a, 0x0a, + 0xc0, 0xbe, 0x63, 0xe2, 0x80, 0xb7, 0x3d, 0xea, 0x7c, 0x87, 0xb9, 0xe3, 0xb9, 0x66, 0x7f, 0xcd, + 0x22, 0x1c, 0xaf, 0x99, 0x2d, 0xe2, 0x12, 0x8a, 0x39, 0x69, 0x1a, 0x3e, 0xf5, 0xb8, 0x07, 0x6b, + 0x91, 0x86, 0x81, 0x7d, 0xc7, 0x18, 0xd3, 0x30, 0x94, 0xc6, 0xca, 0xfd, 0x96, 0xc3, 0xdb, 0x81, + 0x65, 0xd8, 0x5e, 0xcf, 0x6c, 0x79, 0x2d, 0xcf, 0x94, 0x8a, 0x56, 0x70, 0x28, 0x4f, 0xf2, 0x20, + 0xbf, 0x22, 0x83, 0x2b, 0x0f, 0x62, 0x17, 0x7a, 0xd8, 0x6e, 0x3b, 0x2e, 0xa1, 0xc7, 0xa6, 0xdf, + 0x69, 0x09, 0x01, 0x33, 0x7b, 0x84, 0x63, 0xb3, 0x7f, 0xc1, 0x8d, 0x15, 0xf3, 0x32, 0x2d, 0x1a, + 0xb8, 0xdc, 0xe9, 0x91, 0x0b, 0x0a, 0x1f, 0x4e, 0x52, 0x60, 0x76, 0x9b, 0xf4, 0xf0, 0x79, 0xbd, + 0xfa, 0x06, 0x00, 0x3b, 0xdf, 0x72, 0x8a, 0x0f, 0x70, 0x37, 0x20, 0xb0, 0x0a, 0x0a, 0x0e, 0x27, + 0x3d, 0xa6, 0x6b, 0xb5, 0xdc, 0x6a, 0xa9, 0x51, 0x0a, 0x07, 0xd5, 0xc2, 0xae, 0x10, 0xa0, 0x48, + 0xbe, 0x59, 0xfc, 0xe9, 0xe7, 0x6a, 0xe6, 0xc5, 0x5f, 0xb5, 0x4c, 0xfd, 0xb7, 0x2c, 0xd0, 0x1f, + 0x7b, 0x36, 0xee, 0xee, 0x05, 0xd6, 0xd7, 0xc4, 0xe6, 0x5b, 0xb6, 0x4d, 0x18, 0x43, 0xa4, 0xef, + 0x90, 0x23, 0xf8, 0x15, 0x28, 0x8a, 0xc8, 0x9a, 0x98, 0x63, 0x5d, 0xab, 0x69, 0xab, 0xe5, 0xf5, + 0xf7, 0x8d, 0x38, 0xb1, 0x23, 0x07, 0x0d, 0xbf, 0xd3, 0x12, 0x02, 0x66, 0x08, 0xb6, 0xd1, 0x5f, + 0x33, 0x3e, 0x93, 0xb6, 0x9e, 0x10, 0x8e, 0x1b, 0xf0, 0x64, 0x50, 0xcd, 0x84, 0x83, 0x2a, 0x88, + 0x65, 0x68, 0x64, 0x15, 0x3e, 0x07, 0x79, 0xe6, 0x13, 0x5b, 0xcf, 0x4a, 0xeb, 0x1f, 0x19, 0x93, + 0x9e, 0xcd, 0x48, 0x71, 0x73, 0xcf, 0x27, 0x76, 0xe3, 0x96, 0xba, 0x26, 0x2f, 0x4e, 0x48, 0x1a, + 0x85, 0x36, 0x98, 0x61, 0x1c, 0xf3, 0x80, 0xe9, 0x39, 0x69, 0xfe, 0xe3, 0x9b, 0x99, 0x97, 0x26, + 0x1a, 0x6f, 0xa8, 0x0b, 0x66, 0xa2, 0x33, 0x52, 0xa6, 0xeb, 0xcf, 0xc1, 0xd2, 0x53, 0xcf, 0x45, + 0x84, 0x79, 0x01, 0xb5, 0xc9, 0x16, 0xe7, 0xd4, 0xb1, 0x02, 0x4e, 0x18, 0xac, 0x81, 0xbc, 0x8f, + 0x79, 0x5b, 0x26, 0xae, 0x14, 0xfb, 0xf7, 0x0c, 0xf3, 0x36, 0x92, 0x88, 0x60, 0xf4, 0x09, 0xb5, + 0x64, 0xf0, 0x09, 0xc6, 0x01, 0xa1, 0x16, 0x92, 0x48, 0xfd, 0x1b, 0x30, 0x9f, 0x30, 0x8e, 0x82, + 0xae, 0x7c, 0x5b, 0x01, 0x8d, 0xbd, 0xad, 0xd0, 0x60, 0x28, 0x92, 0xc3, 0x47, 0x60, 0xde, 0x8d, + 0x75, 0xf6, 0xd1, 0x63, 0xa6, 0x67, 0x25, 0x75, 0x31, 0x1c, 0x54, 0x93, 0xe6, 0x04, 0x84, 0xce, + 0x73, 0x45, 0x41, 0xc0, 0x94, 0x68, 0x4c, 0x50, 0x72, 0x71, 0x8f, 0x30, 0x1f, 0xdb, 0x44, 0x85, + 0x74, 0x5b, 0x39, 0x5c, 0x7a, 0x3a, 0x04, 0x50, 0xcc, 0x99, 0x1c, 0x1c, 0x7c, 0x1b, 0x14, 0x5a, + 0xd4, 0x0b, 0x7c, 0xf9, 0x3a, 0xa5, 0xc6, 0x9c, 0xa2, 0x14, 0x3e, 0x15, 0x42, 0x14, 0x61, 0xf0, + 0x5d, 0x30, 0xdb, 0x27, 0x94, 0x39, 0x9e, 0xab, 0xe7, 0x25, 0x6d, 0x5e, 0xd1, 0x66, 0x0f, 0x22, + 0x31, 0x1a, 0xe2, 0xf0, 0x1e, 0x28, 0x52, 0xe5, 0xb8, 0x5e, 0x90, 0xdc, 0x05, 0xc5, 0x2d, 0x8e, + 0x32, 0x38, 0x62, 0xc0, 0x0f, 0x40, 0x99, 0x05, 0xd6, 0x48, 0x61, 0x46, 0x2a, 0x2c, 0x2a, 0x85, + 0xf2, 0x5e, 0x0c, 0xa1, 0x24, 0x4f, 0x84, 0x25, 0x62, 0xd4, 0x67, 0xc7, 0xc3, 0x12, 0x29, 0x40, + 0x12, 0xa9, 0xff, 0xa1, 0x81, 0x5b, 0xd3, 0xbd, 0xd8, 0x7b, 0xa0, 0x84, 0x7d, 0x47, 0x86, 0x3d, + 0x7c, 0xab, 0x39, 0x91, 0xd7, 0xad, 0x67, 0xbb, 0x91, 0x10, 0xc5, 0xb8, 0x20, 0x0f, 0x9d, 0x11, + 0x75, 0x3d, 0x22, 0x0f, 0xaf, 0x64, 0x28, 0xc6, 0xe1, 0x06, 0x98, 0x1b, 0x1e, 0xe4, 0x23, 0xe9, + 0x79, 0xa9, 0x70, 0x3b, 0x1c, 0x54, 0xe7, 0x50, 0x12, 0x40, 0xe3, 0xbc, 0xfa, 0xef, 0x59, 0xb0, + 0xbc, 0x47, 0xba, 0x87, 0xaf, 0x66, 0x2a, 0x7c, 0x39, 0x36, 0x15, 0x1e, 0x5d, 0xa3, 0x6d, 0xd3, + 0x5d, 0x7d, 0xb5, 0x93, 0xe1, 0x97, 0x2c, 0x78, 0xf3, 0x0a, 0xc7, 0xe0, 0xf7, 0x00, 0xd2, 0x0b, + 0x8d, 0xa6, 0x32, 0xfa, 0x60, 0xb2, 0x43, 0x17, 0x9b, 0xb4, 0x71, 0x27, 0x1c, 0x54, 0x53, 0x9a, + 0x17, 0xa5, 0xdc, 0x03, 0x7f, 0xd0, 0xc0, 0x92, 0x9b, 0x36, 0xb8, 0x54, 0xd6, 0x37, 0x26, 0x7b, + 0x90, 0x3a, 0xf7, 0x1a, 0x77, 0xc3, 0x41, 0x35, 0x7d, 0x24, 0xa2, 0xf4, 0x0b, 0xc5, 0xc8, 0xb9, + 0x93, 0x48, 0x94, 0x68, 0x9a, 0xff, 0xaf, 0xd6, 0xbe, 0x18, 0xab, 0xb5, 0x4f, 0xa6, 0xaa, 0xb5, + 0x84, 0xa7, 0x97, 0x96, 0x9a, 0x75, 0xae, 0xd4, 0x36, 0xaf, 0x5d, 0x6a, 0x49, 0xeb, 0x57, 0x57, + 0xda, 0x13, 0xb0, 0x72, 0xb9, 0x57, 0x53, 0x8f, 0xee, 0xfa, 0xaf, 0x59, 0xb0, 0xf8, 0x7a, 0x1d, + 0xb8, 0x59, 0xd3, 0x9f, 0xe6, 0xc1, 0xf2, 0xeb, 0x86, 0xbf, 0xba, 0xe1, 0xc5, 0x4f, 0x34, 0x60, + 0x84, 0xaa, 0x1f, 0xff, 0xe8, 0xad, 0xf6, 0x19, 0xa1, 0x48, 0x22, 0xb0, 0x36, 0xdc, 0x0d, 0xa2, + 0x1f, 0x16, 0x10, 0x99, 0x56, 0xff, 0x42, 0xb5, 0x18, 0x38, 0xa0, 0x40, 0xc4, 0xc6, 0xab, 0x17, + 0x6a, 0xb9, 0xd5, 0xf2, 0xfa, 0xf6, 0x8d, 0x6b, 0xc5, 0x90, 0x8b, 0xf3, 0x8e, 0xcb, 0xe9, 0x71, + 0xbc, 0x83, 0x48, 0x19, 0x8a, 0x6e, 0x80, 0x6f, 0x81, 0x5c, 0xe0, 0x34, 0xd5, 0x8a, 0x50, 0x56, + 0x94, 0xdc, 0xfe, 0xee, 0x36, 0x12, 0xf2, 0x95, 0x43, 0xb5, 0x7b, 0x4b, 0x13, 0x70, 0x01, 0xe4, + 0x3a, 0xe4, 0x38, 0xea, 0x33, 0x24, 0x3e, 0x61, 0x03, 0x14, 0xfa, 0x62, 0x2d, 0x57, 0x79, 0xbe, + 0x37, 0xd9, 0xd3, 0x78, 0x95, 0x47, 0x91, 0xea, 0x66, 0xf6, 0xa1, 0x56, 0xff, 0x53, 0x03, 0x77, + 0x2f, 0x2d, 0x48, 0xb1, 0x28, 0xe1, 0x6e, 0xd7, 0x3b, 0x22, 0x4d, 0x79, 0x77, 0x31, 0x5e, 0x94, + 0xb6, 0x22, 0x31, 0x1a, 0xe2, 0xf0, 0x1d, 0x30, 0xd3, 0x24, 0xae, 0x43, 0x9a, 0x72, 0xa5, 0x2a, + 0xc6, 0xb5, 0xbc, 0x2d, 0xa5, 0x48, 0xa1, 0x82, 0x47, 0x09, 0x66, 0x9e, 0xab, 0x96, 0xb8, 0x11, + 0x0f, 0x49, 0x29, 0x52, 0x28, 0xdc, 0x02, 0xf3, 0x44, 0xb8, 0x29, 0x83, 0xd8, 0xa1, 0xd4, 0x1b, + 0xbe, 0xec, 0xb2, 0x52, 0x98, 0xdf, 0x19, 0x87, 0xd1, 0x79, 0x7e, 0xfd, 0xdf, 0x2c, 0xd0, 0x2f, + 0x1b, 0x7b, 0xb0, 0x13, 0x6f, 0x31, 0x12, 0x94, 0x8b, 0x54, 0x79, 0xdd, 0xb8, 0x7e, 0xcb, 0x08, + 0xb5, 0xc6, 0x92, 0xf2, 0x66, 0x2e, 0x29, 0x4d, 0x6c, 0x3e, 0xf2, 0x08, 0x8f, 0xc0, 0x82, 0x3b, + 0xbe, 0x72, 0x47, 0x3b, 0x59, 0x79, 0x7d, 0x6d, 0xaa, 0x06, 0x91, 0x57, 0xea, 0xea, 0xca, 0x85, + 0x73, 0x00, 0x43, 0x17, 0x2e, 0x81, 0xeb, 0x00, 0x38, 0xae, 0xed, 0xf5, 0xfc, 0x2e, 0xe1, 0x44, + 0x26, 0xb0, 0x18, 0x4f, 0xcb, 0xdd, 0x11, 0x82, 0x12, 0xac, 0xb4, 0xcc, 0xe7, 0xa7, 0xcb, 0x7c, + 0xe3, 0xfe, 0xc9, 0x59, 0x25, 0xf3, 0xf2, 0xac, 0x92, 0x39, 0x3d, 0xab, 0x64, 0x5e, 0x84, 0x15, + 0xed, 0x24, 0xac, 0x68, 0x2f, 0xc3, 0x8a, 0x76, 0x1a, 0x56, 0xb4, 0xbf, 0xc3, 0x8a, 0xf6, 0xe3, + 0x3f, 0x95, 0xcc, 0xe7, 0xb3, 0x2a, 0xc2, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0x7b, 0xc9, 0xa5, + 0x34, 0xa4, 0x0f, 0x00, 0x00, +} + func (m ExtraValue) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -159,32 +545,31 @@ func (m ExtraValue) Marshal() (dAtA []byte, err error) { } func (m ExtraValue) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m ExtraValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if len(m) > 0 { - for _, s := range m { + for iNdEx := len(m) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m[iNdEx]) + copy(dAtA[i:], m[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - return i, nil + return len(dAtA) - i, nil } func (m *LocalSubjectAccessReview) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -192,41 +577,52 @@ func (m *LocalSubjectAccessReview) Marshal() (dAtA []byte, err error) { } func (m *LocalSubjectAccessReview) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LocalSubjectAccessReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n2 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n3, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n3 - return i, nil + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *NonResourceAttributes) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -234,25 +630,32 @@ func (m *NonResourceAttributes) Marshal() (dAtA []byte, err error) { } func (m *NonResourceAttributes) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NonResourceAttributes) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i += copy(dAtA[i:], m.Path) - dAtA[i] = 0x12 - i++ + i -= len(m.Verb) + copy(dAtA[i:], m.Verb) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verb))) - i += copy(dAtA[i:], m.Verb) - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *NonResourceRule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -260,47 +663,40 @@ func (m *NonResourceRule) Marshal() (dAtA []byte, err error) { } func (m *NonResourceRule) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NonResourceRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Verbs) > 0 { - for _, s := range m.Verbs { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } if len(m.NonResourceURLs) > 0 { - for _, s := range m.NonResourceURLs { + for iNdEx := len(m.NonResourceURLs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.NonResourceURLs[iNdEx]) + copy(dAtA[i:], m.NonResourceURLs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NonResourceURLs[iNdEx]))) + i-- dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - return i, nil + if len(m.Verbs) > 0 { + for iNdEx := len(m.Verbs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Verbs[iNdEx]) + copy(dAtA[i:], m.Verbs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verbs[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil } func (m *ResourceAttributes) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -308,45 +704,57 @@ func (m *ResourceAttributes) Marshal() (dAtA []byte, err error) { } func (m *ResourceAttributes) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceAttributes) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i += copy(dAtA[i:], m.Namespace) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verb))) - i += copy(dAtA[i:], m.Verb) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) - i += copy(dAtA[i:], m.Group) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) - i += copy(dAtA[i:], m.Version) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) - i += copy(dAtA[i:], m.Resource) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Subresource))) - i += copy(dAtA[i:], m.Subresource) - dAtA[i] = 0x3a - i++ + i -= len(m.Name) + copy(dAtA[i:], m.Name) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - return i, nil + i-- + dAtA[i] = 0x3a + i -= len(m.Subresource) + copy(dAtA[i:], m.Subresource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Subresource))) + i-- + dAtA[i] = 0x32 + i -= len(m.Resource) + copy(dAtA[i:], m.Resource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) + i-- + dAtA[i] = 0x2a + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0x22 + i -= len(m.Group) + copy(dAtA[i:], m.Group) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i-- + dAtA[i] = 0x1a + i -= len(m.Verb) + copy(dAtA[i:], m.Verb) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verb))) + i-- + dAtA[i] = 0x12 + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ResourceRule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -354,77 +762,58 @@ func (m *ResourceRule) Marshal() (dAtA []byte, err error) { } func (m *ResourceRule) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Verbs) > 0 { - for _, s := range m.Verbs { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if len(m.APIGroups) > 0 { - for _, s := range m.APIGroups { - dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + if len(m.ResourceNames) > 0 { + for iNdEx := len(m.ResourceNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ResourceNames[iNdEx]) + copy(dAtA[i:], m.ResourceNames[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceNames[iNdEx]))) + i-- + dAtA[i] = 0x22 } } if len(m.Resources) > 0 { - for _, s := range m.Resources { + for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Resources[iNdEx]) + copy(dAtA[i:], m.Resources[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resources[iNdEx]))) + i-- dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - if len(m.ResourceNames) > 0 { - for _, s := range m.ResourceNames { - dAtA[i] = 0x22 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + if len(m.APIGroups) > 0 { + for iNdEx := len(m.APIGroups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.APIGroups[iNdEx]) + copy(dAtA[i:], m.APIGroups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroups[iNdEx]))) + i-- + dAtA[i] = 0x12 } } - return i, nil + if len(m.Verbs) > 0 { + for iNdEx := len(m.Verbs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Verbs[iNdEx]) + copy(dAtA[i:], m.Verbs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verbs[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil } func (m *SelfSubjectAccessReview) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -432,41 +821,52 @@ func (m *SelfSubjectAccessReview) Marshal() (dAtA []byte, err error) { } func (m *SelfSubjectAccessReview) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SelfSubjectAccessReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n4, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n5, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n5 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n6, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n6 - return i, nil + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *SelfSubjectAccessReviewSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -474,37 +874,46 @@ func (m *SelfSubjectAccessReviewSpec) Marshal() (dAtA []byte, err error) { } func (m *SelfSubjectAccessReviewSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SelfSubjectAccessReviewSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.ResourceAttributes != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ResourceAttributes.Size())) - n7, err := m.ResourceAttributes.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 - } if m.NonResourceAttributes != nil { + { + size, err := m.NonResourceAttributes.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NonResourceAttributes.Size())) - n8, err := m.NonResourceAttributes.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + } + if m.ResourceAttributes != nil { + { + size, err := m.ResourceAttributes.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n8 + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *SelfSubjectRulesReview) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -512,41 +921,52 @@ func (m *SelfSubjectRulesReview) Marshal() (dAtA []byte, err error) { } func (m *SelfSubjectRulesReview) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SelfSubjectRulesReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n9, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n9 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n10, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n10 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n11, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n11 - return i, nil + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *SelfSubjectRulesReviewSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -554,21 +974,27 @@ func (m *SelfSubjectRulesReviewSpec) Marshal() (dAtA []byte, err error) { } func (m *SelfSubjectRulesReviewSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SelfSubjectRulesReviewSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i += copy(dAtA[i:], m.Namespace) - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *SubjectAccessReview) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -576,41 +1002,52 @@ func (m *SubjectAccessReview) Marshal() (dAtA []byte, err error) { } func (m *SubjectAccessReview) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SubjectAccessReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n12, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n12 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n13, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n13 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n14, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n14 - return i, nil + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *SubjectAccessReviewSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -618,91 +1055,94 @@ func (m *SubjectAccessReviewSpec) Marshal() (dAtA []byte, err error) { } func (m *SubjectAccessReviewSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SubjectAccessReviewSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.ResourceAttributes != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ResourceAttributes.Size())) - n15, err := m.ResourceAttributes.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0x32 + if len(m.Extra) > 0 { + keysForExtra := make([]string, 0, len(m.Extra)) + for k := range m.Extra { + keysForExtra = append(keysForExtra, string(k)) } - i += n15 - } - if m.NonResourceAttributes != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NonResourceAttributes.Size())) - n16, err := m.NonResourceAttributes.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + github_com_gogo_protobuf_sortkeys.Strings(keysForExtra) + for iNdEx := len(keysForExtra) - 1; iNdEx >= 0; iNdEx-- { + v := m.Extra[string(keysForExtra[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForExtra[iNdEx]) + copy(dAtA[i:], keysForExtra[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForExtra[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2a } - i += n16 } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.User))) - i += copy(dAtA[i:], m.User) if len(m.Groups) > 0 { - for _, s := range m.Groups { + for iNdEx := len(m.Groups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Groups[iNdEx]) + copy(dAtA[i:], m.Groups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Groups[iNdEx]))) + i-- dAtA[i] = 0x22 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - if len(m.Extra) > 0 { - keysForExtra := make([]string, 0, len(m.Extra)) - for k := range m.Extra { - keysForExtra = append(keysForExtra, string(k)) + i -= len(m.User) + copy(dAtA[i:], m.User) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.User))) + i-- + dAtA[i] = 0x1a + if m.NonResourceAttributes != nil { + { + size, err := m.NonResourceAttributes.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForExtra) - for _, k := range keysForExtra { - dAtA[i] = 0x2a - i++ - v := m.Extra[string(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n17, err := (&v).MarshalTo(dAtA[i:]) + i-- + dAtA[i] = 0x12 + } + if m.ResourceAttributes != nil { + { + size, err := m.ResourceAttributes.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n17 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa } - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) - i += copy(dAtA[i:], m.UID) - return i, nil + return len(dAtA) - i, nil } func (m *SubjectAccessReviewStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -710,41 +1150,48 @@ func (m *SubjectAccessReviewStatus) Marshal() (dAtA []byte, err error) { } func (m *SubjectAccessReviewStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SubjectAccessReviewStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - if m.Allowed { + i-- + if m.Denied { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.EvaluationError))) - i += copy(dAtA[i:], m.EvaluationError) + i-- dAtA[i] = 0x20 - i++ - if m.Denied { + i -= len(m.EvaluationError) + copy(dAtA[i:], m.EvaluationError) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.EvaluationError))) + i-- + dAtA[i] = 0x1a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x12 + i-- + if m.Allowed { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - return i, nil + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *SubjectRulesReviewStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -752,77 +1199,74 @@ func (m *SubjectRulesReviewStatus) Marshal() (dAtA []byte, err error) { } func (m *SubjectRulesReviewStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SubjectRulesReviewStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.ResourceRules) > 0 { - for _, msg := range m.ResourceRules { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } + i -= len(m.EvaluationError) + copy(dAtA[i:], m.EvaluationError) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.EvaluationError))) + i-- + dAtA[i] = 0x22 + i-- + if m.Incomplete { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i-- + dAtA[i] = 0x18 if len(m.NonResourceRules) > 0 { - for _, msg := range m.NonResourceRules { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.NonResourceRules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.NonResourceRules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - dAtA[i] = 0x18 - i++ - if m.Incomplete { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if len(m.ResourceRules) > 0 { + for iNdEx := len(m.ResourceRules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ResourceRules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } } - i++ - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.EvaluationError))) - i += copy(dAtA[i:], m.EvaluationError) - return i, nil + return len(dAtA) - i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m ExtraValue) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m) > 0 { @@ -835,6 +1279,9 @@ func (m ExtraValue) Size() (n int) { } func (m *LocalSubjectAccessReview) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -847,6 +1294,9 @@ func (m *LocalSubjectAccessReview) Size() (n int) { } func (m *NonResourceAttributes) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Path) @@ -857,6 +1307,9 @@ func (m *NonResourceAttributes) Size() (n int) { } func (m *NonResourceRule) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Verbs) > 0 { @@ -875,6 +1328,9 @@ func (m *NonResourceRule) Size() (n int) { } func (m *ResourceAttributes) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Namespace) @@ -895,6 +1351,9 @@ func (m *ResourceAttributes) Size() (n int) { } func (m *ResourceRule) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Verbs) > 0 { @@ -925,6 +1384,9 @@ func (m *ResourceRule) Size() (n int) { } func (m *SelfSubjectAccessReview) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -937,6 +1399,9 @@ func (m *SelfSubjectAccessReview) Size() (n int) { } func (m *SelfSubjectAccessReviewSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.ResourceAttributes != nil { @@ -951,6 +1416,9 @@ func (m *SelfSubjectAccessReviewSpec) Size() (n int) { } func (m *SelfSubjectRulesReview) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -963,6 +1431,9 @@ func (m *SelfSubjectRulesReview) Size() (n int) { } func (m *SelfSubjectRulesReviewSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Namespace) @@ -971,6 +1442,9 @@ func (m *SelfSubjectRulesReviewSpec) Size() (n int) { } func (m *SubjectAccessReview) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -983,6 +1457,9 @@ func (m *SubjectAccessReview) Size() (n int) { } func (m *SubjectAccessReviewSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.ResourceAttributes != nil { @@ -1016,6 +1493,9 @@ func (m *SubjectAccessReviewSpec) Size() (n int) { } func (m *SubjectAccessReviewStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 2 @@ -1028,6 +1508,9 @@ func (m *SubjectAccessReviewStatus) Size() (n int) { } func (m *SubjectRulesReviewStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.ResourceRules) > 0 { @@ -1049,14 +1532,7 @@ func (m *SubjectRulesReviewStatus) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -1066,7 +1542,7 @@ func (this *LocalSubjectAccessReview) String() string { return "nil" } s := strings.Join([]string{`&LocalSubjectAccessReview{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SubjectAccessReviewSpec", "SubjectAccessReviewSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SubjectAccessReviewStatus", "SubjectAccessReviewStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -1129,7 +1605,7 @@ func (this *SelfSubjectAccessReview) String() string { return "nil" } s := strings.Join([]string{`&SelfSubjectAccessReview{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SelfSubjectAccessReviewSpec", "SelfSubjectAccessReviewSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SubjectAccessReviewStatus", "SubjectAccessReviewStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -1141,8 +1617,8 @@ func (this *SelfSubjectAccessReviewSpec) String() string { return "nil" } s := strings.Join([]string{`&SelfSubjectAccessReviewSpec{`, - `ResourceAttributes:` + strings.Replace(fmt.Sprintf("%v", this.ResourceAttributes), "ResourceAttributes", "ResourceAttributes", 1) + `,`, - `NonResourceAttributes:` + strings.Replace(fmt.Sprintf("%v", this.NonResourceAttributes), "NonResourceAttributes", "NonResourceAttributes", 1) + `,`, + `ResourceAttributes:` + strings.Replace(this.ResourceAttributes.String(), "ResourceAttributes", "ResourceAttributes", 1) + `,`, + `NonResourceAttributes:` + strings.Replace(this.NonResourceAttributes.String(), "NonResourceAttributes", "NonResourceAttributes", 1) + `,`, `}`, }, "") return s @@ -1152,7 +1628,7 @@ func (this *SelfSubjectRulesReview) String() string { return "nil" } s := strings.Join([]string{`&SelfSubjectRulesReview{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SelfSubjectRulesReviewSpec", "SelfSubjectRulesReviewSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SubjectRulesReviewStatus", "SubjectRulesReviewStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -1174,7 +1650,7 @@ func (this *SubjectAccessReview) String() string { return "nil" } s := strings.Join([]string{`&SubjectAccessReview{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SubjectAccessReviewSpec", "SubjectAccessReviewSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SubjectAccessReviewStatus", "SubjectAccessReviewStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -1196,8 +1672,8 @@ func (this *SubjectAccessReviewSpec) String() string { } mapStringForExtra += "}" s := strings.Join([]string{`&SubjectAccessReviewSpec{`, - `ResourceAttributes:` + strings.Replace(fmt.Sprintf("%v", this.ResourceAttributes), "ResourceAttributes", "ResourceAttributes", 1) + `,`, - `NonResourceAttributes:` + strings.Replace(fmt.Sprintf("%v", this.NonResourceAttributes), "NonResourceAttributes", "NonResourceAttributes", 1) + `,`, + `ResourceAttributes:` + strings.Replace(this.ResourceAttributes.String(), "ResourceAttributes", "ResourceAttributes", 1) + `,`, + `NonResourceAttributes:` + strings.Replace(this.NonResourceAttributes.String(), "NonResourceAttributes", "NonResourceAttributes", 1) + `,`, `User:` + fmt.Sprintf("%v", this.User) + `,`, `Groups:` + fmt.Sprintf("%v", this.Groups) + `,`, `Extra:` + mapStringForExtra + `,`, @@ -1223,9 +1699,19 @@ func (this *SubjectRulesReviewStatus) String() string { if this == nil { return "nil" } + repeatedStringForResourceRules := "[]ResourceRule{" + for _, f := range this.ResourceRules { + repeatedStringForResourceRules += strings.Replace(strings.Replace(f.String(), "ResourceRule", "ResourceRule", 1), `&`, ``, 1) + "," + } + repeatedStringForResourceRules += "}" + repeatedStringForNonResourceRules := "[]NonResourceRule{" + for _, f := range this.NonResourceRules { + repeatedStringForNonResourceRules += strings.Replace(strings.Replace(f.String(), "NonResourceRule", "NonResourceRule", 1), `&`, ``, 1) + "," + } + repeatedStringForNonResourceRules += "}" s := strings.Join([]string{`&SubjectRulesReviewStatus{`, - `ResourceRules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ResourceRules), "ResourceRule", "ResourceRule", 1), `&`, ``, 1) + `,`, - `NonResourceRules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.NonResourceRules), "NonResourceRule", "NonResourceRule", 1), `&`, ``, 1) + `,`, + `ResourceRules:` + repeatedStringForResourceRules + `,`, + `NonResourceRules:` + repeatedStringForNonResourceRules + `,`, `Incomplete:` + fmt.Sprintf("%v", this.Incomplete) + `,`, `EvaluationError:` + fmt.Sprintf("%v", this.EvaluationError) + `,`, `}`, @@ -1255,7 +1741,7 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1283,7 +1769,7 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1293,6 +1779,9 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1307,6 +1796,9 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1334,7 +1826,7 @@ func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1362,7 +1854,7 @@ func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1371,6 +1863,9 @@ func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1392,7 +1887,7 @@ func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1401,6 +1896,9 @@ func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1422,7 +1920,7 @@ func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1431,6 +1929,9 @@ func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1447,6 +1948,9 @@ func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1474,7 +1978,7 @@ func (m *NonResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1502,7 +2006,7 @@ func (m *NonResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1512,6 +2016,9 @@ func (m *NonResourceAttributes) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1531,7 +2038,7 @@ func (m *NonResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1541,6 +2048,9 @@ func (m *NonResourceAttributes) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1555,6 +2065,9 @@ func (m *NonResourceAttributes) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1582,7 +2095,7 @@ func (m *NonResourceRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1610,7 +2123,7 @@ func (m *NonResourceRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1620,6 +2133,9 @@ func (m *NonResourceRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1639,7 +2155,7 @@ func (m *NonResourceRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1649,6 +2165,9 @@ func (m *NonResourceRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1663,6 +2182,9 @@ func (m *NonResourceRule) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1690,7 +2212,7 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1718,7 +2240,7 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1728,6 +2250,9 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1747,7 +2272,7 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1757,6 +2282,9 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1776,7 +2304,7 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1786,6 +2314,9 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1805,7 +2336,7 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1815,6 +2346,9 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1834,7 +2368,7 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1844,6 +2378,9 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1863,7 +2400,7 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1873,6 +2410,9 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1892,7 +2432,7 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1902,6 +2442,9 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1916,6 +2459,9 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1943,7 +2489,7 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1971,7 +2517,7 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1981,6 +2527,9 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2000,7 +2549,7 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2010,6 +2559,9 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2029,7 +2581,7 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2039,6 +2591,9 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2058,7 +2613,7 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2068,6 +2623,9 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2082,6 +2640,9 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2109,7 +2670,7 @@ func (m *SelfSubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2137,7 +2698,7 @@ func (m *SelfSubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2146,6 +2707,9 @@ func (m *SelfSubjectAccessReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2167,7 +2731,7 @@ func (m *SelfSubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2176,6 +2740,9 @@ func (m *SelfSubjectAccessReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2197,7 +2764,7 @@ func (m *SelfSubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2206,6 +2773,9 @@ func (m *SelfSubjectAccessReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2222,6 +2792,9 @@ func (m *SelfSubjectAccessReview) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2249,7 +2822,7 @@ func (m *SelfSubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2277,7 +2850,7 @@ func (m *SelfSubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2286,6 +2859,9 @@ func (m *SelfSubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2310,7 +2886,7 @@ func (m *SelfSubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2319,6 +2895,9 @@ func (m *SelfSubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2338,6 +2917,9 @@ func (m *SelfSubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2365,7 +2947,7 @@ func (m *SelfSubjectRulesReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2393,7 +2975,7 @@ func (m *SelfSubjectRulesReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2402,6 +2984,9 @@ func (m *SelfSubjectRulesReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2423,7 +3008,7 @@ func (m *SelfSubjectRulesReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2432,6 +3017,9 @@ func (m *SelfSubjectRulesReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2453,7 +3041,7 @@ func (m *SelfSubjectRulesReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2462,6 +3050,9 @@ func (m *SelfSubjectRulesReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2478,6 +3069,9 @@ func (m *SelfSubjectRulesReview) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2505,7 +3099,7 @@ func (m *SelfSubjectRulesReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2533,7 +3127,7 @@ func (m *SelfSubjectRulesReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2543,6 +3137,9 @@ func (m *SelfSubjectRulesReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2557,6 +3154,9 @@ func (m *SelfSubjectRulesReviewSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2584,7 +3184,7 @@ func (m *SubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2612,7 +3212,7 @@ func (m *SubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2621,6 +3221,9 @@ func (m *SubjectAccessReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2642,7 +3245,7 @@ func (m *SubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2651,6 +3254,9 @@ func (m *SubjectAccessReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2672,7 +3278,7 @@ func (m *SubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2681,6 +3287,9 @@ func (m *SubjectAccessReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2697,6 +3306,9 @@ func (m *SubjectAccessReview) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2724,7 +3336,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2752,7 +3364,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2761,6 +3373,9 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2785,7 +3400,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2794,6 +3409,9 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2818,7 +3436,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2828,6 +3446,9 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2847,7 +3468,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2857,6 +3478,9 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2876,7 +3500,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2885,54 +3509,20 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { + if postIndex > l { return io.ErrUnexpectedEOF } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Extra == nil { m.Extra = make(map[string]ExtraValue) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + mapvalue := &ExtraValue{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2942,46 +3532,88 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &ExtraValue{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &ExtraValue{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.Extra[mapkey] = *mapvalue - } else { - var mapvalue ExtraValue - m.Extra[mapkey] = mapvalue } + m.Extra[mapkey] = *mapvalue iNdEx = postIndex case 6: if wireType != 2 { @@ -2997,7 +3629,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3007,6 +3639,9 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3021,6 +3656,9 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3048,7 +3686,7 @@ func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3076,7 +3714,7 @@ func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3096,7 +3734,7 @@ func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3106,6 +3744,9 @@ func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3125,7 +3766,7 @@ func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3135,6 +3776,9 @@ func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3154,7 +3798,7 @@ func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3169,6 +3813,9 @@ func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3196,7 +3843,7 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3224,7 +3871,7 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3233,6 +3880,9 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3255,7 +3905,7 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3264,6 +3914,9 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3286,7 +3939,7 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3306,7 +3959,7 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3316,6 +3969,9 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3330,6 +3986,9 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3396,10 +4055,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -3428,6 +4090,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -3446,83 +4111,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/authorization/v1beta1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 1137 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x56, 0xcb, 0x6f, 0x1b, 0x45, - 0x18, 0xf7, 0xfa, 0x91, 0xd8, 0xe3, 0x86, 0xa4, 0x13, 0xa5, 0xd9, 0x06, 0x61, 0x5b, 0x46, 0x42, - 0x41, 0xb4, 0xbb, 0x24, 0x2a, 0xa4, 0x04, 0x7a, 0x88, 0x95, 0x08, 0x45, 0x6a, 0x4b, 0x35, 0x51, - 0x72, 0xa0, 0x12, 0x30, 0xbb, 0x9e, 0xd8, 0x8b, 0xed, 0xdd, 0x65, 0x66, 0xd6, 0x21, 0x88, 0x43, - 0x8f, 0x1c, 0x39, 0x72, 0xe4, 0xc4, 0xff, 0xc0, 0x05, 0x09, 0x4e, 0x39, 0xf6, 0x18, 0x24, 0x64, - 0x91, 0xe5, 0x8f, 0xe0, 0x8a, 0x66, 0x76, 0xec, 0x5d, 0x27, 0x9b, 0x38, 0xce, 0x81, 0x5e, 0x7a, - 0xdb, 0xf9, 0x7e, 0xdf, 0xfb, 0xb5, 0x1f, 0xd8, 0xed, 0x3c, 0x64, 0x86, 0xe3, 0x99, 0x9d, 0xc0, - 0x22, 0xd4, 0x25, 0x9c, 0x30, 0xb3, 0x4f, 0xdc, 0xa6, 0x47, 0x4d, 0x05, 0x60, 0xdf, 0x31, 0x71, - 0xc0, 0xdb, 0x1e, 0x75, 0xbe, 0xc3, 0xdc, 0xf1, 0x5c, 0xb3, 0xbf, 0x66, 0x11, 0x8e, 0xd7, 0xcc, - 0x16, 0x71, 0x09, 0xc5, 0x9c, 0x34, 0x0d, 0x9f, 0x7a, 0xdc, 0x83, 0xb5, 0x48, 0xc2, 0xc0, 0xbe, - 0x63, 0x8c, 0x49, 0x18, 0x4a, 0x62, 0xe5, 0x7e, 0xcb, 0xe1, 0xed, 0xc0, 0x32, 0x6c, 0xaf, 0x67, - 0xb6, 0xbc, 0x96, 0x67, 0x4a, 0x41, 0x2b, 0x38, 0x94, 0x2f, 0xf9, 0x90, 0x5f, 0x91, 0xc2, 0x95, - 0x07, 0xb1, 0x0b, 0x3d, 0x6c, 0xb7, 0x1d, 0x97, 0xd0, 0x63, 0xd3, 0xef, 0xb4, 0x04, 0x81, 0x99, - 0x3d, 0xc2, 0xb1, 0xd9, 0xbf, 0xe0, 0xc6, 0x8a, 0x79, 0x99, 0x14, 0x0d, 0x5c, 0xee, 0xf4, 0xc8, - 0x05, 0x81, 0x0f, 0x27, 0x09, 0x30, 0xbb, 0x4d, 0x7a, 0xf8, 0xbc, 0x5c, 0x7d, 0x03, 0x80, 0x9d, - 0x6f, 0x39, 0xc5, 0x07, 0xb8, 0x1b, 0x10, 0x58, 0x05, 0x05, 0x87, 0x93, 0x1e, 0xd3, 0xb5, 0x5a, - 0x6e, 0xb5, 0xd4, 0x28, 0x85, 0x83, 0x6a, 0x61, 0x57, 0x10, 0x50, 0x44, 0xdf, 0x2c, 0xfe, 0xf4, - 0x73, 0x35, 0xf3, 0xe2, 0xaf, 0x5a, 0xa6, 0xfe, 0x5b, 0x16, 0xe8, 0x8f, 0x3d, 0x1b, 0x77, 0xf7, - 0x02, 0xeb, 0x6b, 0x62, 0xf3, 0x2d, 0xdb, 0x26, 0x8c, 0x21, 0xd2, 0x77, 0xc8, 0x11, 0xfc, 0x0a, - 0x14, 0x45, 0x64, 0x4d, 0xcc, 0xb1, 0xae, 0xd5, 0xb4, 0xd5, 0xf2, 0xfa, 0xfb, 0x46, 0x9c, 0xd8, - 0x91, 0x83, 0x86, 0xdf, 0x69, 0x09, 0x02, 0x33, 0x04, 0xb7, 0xd1, 0x5f, 0x33, 0x3e, 0x93, 0xba, - 0x9e, 0x10, 0x8e, 0x1b, 0xf0, 0x64, 0x50, 0xcd, 0x84, 0x83, 0x2a, 0x88, 0x69, 0x68, 0xa4, 0x15, - 0x3e, 0x07, 0x79, 0xe6, 0x13, 0x5b, 0xcf, 0x4a, 0xed, 0x1f, 0x19, 0x93, 0xca, 0x66, 0xa4, 0xb8, - 0xb9, 0xe7, 0x13, 0xbb, 0x71, 0x4b, 0x99, 0xc9, 0x8b, 0x17, 0x92, 0x4a, 0xa1, 0x0d, 0x66, 0x18, - 0xc7, 0x3c, 0x60, 0x7a, 0x4e, 0xaa, 0xff, 0xf8, 0x66, 0xea, 0xa5, 0x8a, 0xc6, 0x1b, 0xca, 0xc0, - 0x4c, 0xf4, 0x46, 0x4a, 0x75, 0xfd, 0x39, 0x58, 0x7a, 0xea, 0xb9, 0x88, 0x30, 0x2f, 0xa0, 0x36, - 0xd9, 0xe2, 0x9c, 0x3a, 0x56, 0xc0, 0x09, 0x83, 0x35, 0x90, 0xf7, 0x31, 0x6f, 0xcb, 0xc4, 0x95, - 0x62, 0xff, 0x9e, 0x61, 0xde, 0x46, 0x12, 0x11, 0x1c, 0x7d, 0x42, 0x2d, 0x19, 0x7c, 0x82, 0xe3, - 0x80, 0x50, 0x0b, 0x49, 0xa4, 0xfe, 0x0d, 0x98, 0x4f, 0x28, 0x47, 0x41, 0x57, 0xd6, 0x56, 0x40, - 0x63, 0xb5, 0x15, 0x12, 0x0c, 0x45, 0x74, 0xf8, 0x08, 0xcc, 0xbb, 0xb1, 0xcc, 0x3e, 0x7a, 0xcc, - 0xf4, 0xac, 0x64, 0x5d, 0x0c, 0x07, 0xd5, 0xa4, 0x3a, 0x01, 0xa1, 0xf3, 0xbc, 0xa2, 0x21, 0x60, - 0x4a, 0x34, 0x26, 0x28, 0xb9, 0xb8, 0x47, 0x98, 0x8f, 0x6d, 0xa2, 0x42, 0xba, 0xad, 0x1c, 0x2e, - 0x3d, 0x1d, 0x02, 0x28, 0xe6, 0x99, 0x1c, 0x1c, 0x7c, 0x1b, 0x14, 0x5a, 0xd4, 0x0b, 0x7c, 0x59, - 0x9d, 0x52, 0x63, 0x4e, 0xb1, 0x14, 0x3e, 0x15, 0x44, 0x14, 0x61, 0xf0, 0x5d, 0x30, 0xdb, 0x27, - 0x94, 0x39, 0x9e, 0xab, 0xe7, 0x25, 0xdb, 0xbc, 0x62, 0x9b, 0x3d, 0x88, 0xc8, 0x68, 0x88, 0xc3, - 0x7b, 0xa0, 0x48, 0x95, 0xe3, 0x7a, 0x41, 0xf2, 0x2e, 0x28, 0xde, 0xe2, 0x28, 0x83, 0x23, 0x0e, - 0xf8, 0x01, 0x28, 0xb3, 0xc0, 0x1a, 0x09, 0xcc, 0x48, 0x81, 0x45, 0x25, 0x50, 0xde, 0x8b, 0x21, - 0x94, 0xe4, 0x13, 0x61, 0x89, 0x18, 0xf5, 0xd9, 0xf1, 0xb0, 0x44, 0x0a, 0x90, 0x44, 0xea, 0x7f, - 0x68, 0xe0, 0xd6, 0x74, 0x15, 0x7b, 0x0f, 0x94, 0xb0, 0xef, 0xc8, 0xb0, 0x87, 0xb5, 0x9a, 0x13, - 0x79, 0xdd, 0x7a, 0xb6, 0x1b, 0x11, 0x51, 0x8c, 0x0b, 0xe6, 0xa1, 0x33, 0xa2, 0xaf, 0x47, 0xcc, - 0x43, 0x93, 0x0c, 0xc5, 0x38, 0xdc, 0x00, 0x73, 0xc3, 0x87, 0x2c, 0x92, 0x9e, 0x97, 0x02, 0xb7, - 0xc3, 0x41, 0x75, 0x0e, 0x25, 0x01, 0x34, 0xce, 0x57, 0xff, 0x3d, 0x0b, 0x96, 0xf7, 0x48, 0xf7, - 0xf0, 0xd5, 0x6c, 0x85, 0x2f, 0xc7, 0xb6, 0xc2, 0xa3, 0x6b, 0x8c, 0x6d, 0xba, 0xab, 0xaf, 0x76, - 0x33, 0xfc, 0x92, 0x05, 0x6f, 0x5e, 0xe1, 0x18, 0xfc, 0x1e, 0x40, 0x7a, 0x61, 0xd0, 0x54, 0x46, - 0x1f, 0x4c, 0x76, 0xe8, 0xe2, 0x90, 0x36, 0xee, 0x84, 0x83, 0x6a, 0xca, 0xf0, 0xa2, 0x14, 0x3b, - 0xf0, 0x07, 0x0d, 0x2c, 0xb9, 0x69, 0x8b, 0x4b, 0x65, 0x7d, 0x63, 0xb2, 0x07, 0xa9, 0x7b, 0xaf, - 0x71, 0x37, 0x1c, 0x54, 0xd3, 0x57, 0x22, 0x4a, 0x37, 0x28, 0x56, 0xce, 0x9d, 0x44, 0xa2, 0xc4, - 0xd0, 0xfc, 0x7f, 0xbd, 0xf6, 0xc5, 0x58, 0xaf, 0x7d, 0x32, 0x55, 0xaf, 0x25, 0x3c, 0xbd, 0xb4, - 0xd5, 0xac, 0x73, 0xad, 0xb6, 0x79, 0xed, 0x56, 0x4b, 0x6a, 0xbf, 0xba, 0xd3, 0x9e, 0x80, 0x95, - 0xcb, 0xbd, 0x9a, 0x7a, 0x75, 0xd7, 0x7f, 0xcd, 0x82, 0xc5, 0xd7, 0xe7, 0xc0, 0xcd, 0x86, 0xfe, - 0x34, 0x0f, 0x96, 0x5f, 0x0f, 0xfc, 0xd5, 0x03, 0x2f, 0x7e, 0xa2, 0x01, 0x23, 0x54, 0xfd, 0xf8, - 0x47, 0xb5, 0xda, 0x67, 0x84, 0x22, 0x89, 0xc0, 0xda, 0xf0, 0x36, 0x88, 0x7e, 0x58, 0x40, 0x64, - 0x5a, 0xfd, 0x0b, 0xd5, 0x61, 0xe0, 0x80, 0x02, 0x11, 0x17, 0xaf, 0x5e, 0xa8, 0xe5, 0x56, 0xcb, - 0xeb, 0xdb, 0x37, 0xee, 0x15, 0x43, 0x1e, 0xce, 0x3b, 0x2e, 0xa7, 0xc7, 0xf1, 0x0d, 0x22, 0x69, - 0x28, 0xb2, 0x00, 0xdf, 0x02, 0xb9, 0xc0, 0x69, 0xaa, 0x13, 0xa1, 0xac, 0x58, 0x72, 0xfb, 0xbb, - 0xdb, 0x48, 0xd0, 0x57, 0x0e, 0xd5, 0xed, 0x2d, 0x55, 0xc0, 0x05, 0x90, 0xeb, 0x90, 0xe3, 0x68, - 0xce, 0x90, 0xf8, 0x84, 0x0d, 0x50, 0xe8, 0x8b, 0xb3, 0x5c, 0xe5, 0xf9, 0xde, 0x64, 0x4f, 0xe3, - 0x53, 0x1e, 0x45, 0xa2, 0x9b, 0xd9, 0x87, 0x5a, 0xfd, 0x4f, 0x0d, 0xdc, 0xbd, 0xb4, 0x21, 0xc5, - 0xa1, 0x84, 0xbb, 0x5d, 0xef, 0x88, 0x34, 0xa5, 0xed, 0x62, 0x7c, 0x28, 0x6d, 0x45, 0x64, 0x34, - 0xc4, 0xe1, 0x3b, 0x60, 0x86, 0x12, 0xcc, 0x3c, 0x57, 0x1d, 0x67, 0xa3, 0x5e, 0x46, 0x92, 0x8a, - 0x14, 0x0a, 0xb7, 0xc0, 0x3c, 0x11, 0xe6, 0xa5, 0x73, 0x3b, 0x94, 0x7a, 0xc3, 0x8a, 0x2d, 0x2b, - 0x81, 0xf9, 0x9d, 0x71, 0x18, 0x9d, 0xe7, 0x17, 0xa6, 0x9a, 0xc4, 0x75, 0x48, 0x53, 0x5e, 0x6f, - 0xc5, 0xd8, 0xd4, 0xb6, 0xa4, 0x22, 0x85, 0xd6, 0xff, 0xcd, 0x02, 0xfd, 0xb2, 0xb5, 0x07, 0x3b, - 0xf1, 0x15, 0x23, 0x41, 0x79, 0x48, 0x95, 0xd7, 0x8d, 0xeb, 0x8f, 0x8c, 0x10, 0x6b, 0x2c, 0x29, - 0xdb, 0x73, 0x49, 0x6a, 0xe2, 0xf2, 0x91, 0x4f, 0x78, 0x04, 0x16, 0xdc, 0xf1, 0x93, 0x3b, 0xba, - 0xc9, 0xca, 0xeb, 0x6b, 0x53, 0x0d, 0x88, 0x34, 0xa9, 0x2b, 0x93, 0x0b, 0xe7, 0x00, 0x86, 0x2e, - 0x18, 0x81, 0xeb, 0x00, 0x38, 0xae, 0xed, 0xf5, 0xfc, 0x2e, 0xe1, 0x44, 0x26, 0xba, 0x18, 0x6f, - 0xcb, 0xdd, 0x11, 0x82, 0x12, 0x5c, 0x69, 0x15, 0xca, 0x4f, 0x57, 0xa1, 0xc6, 0xfd, 0x93, 0xb3, - 0x4a, 0xe6, 0xe5, 0x59, 0x25, 0x73, 0x7a, 0x56, 0xc9, 0xbc, 0x08, 0x2b, 0xda, 0x49, 0x58, 0xd1, - 0x5e, 0x86, 0x15, 0xed, 0x34, 0xac, 0x68, 0x7f, 0x87, 0x15, 0xed, 0xc7, 0x7f, 0x2a, 0x99, 0xcf, - 0x67, 0x55, 0x84, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0xc5, 0xba, 0xf8, 0x96, 0xa4, 0x0f, 0x00, - 0x00, -} diff --git a/vendor/k8s.io/api/autoscaling/v1/doc.go b/vendor/k8s.io/api/autoscaling/v1/doc.go index 9c3be845..8c9c09b5 100644 --- a/vendor/k8s.io/api/autoscaling/v1/doc.go +++ b/vendor/k8s.io/api/autoscaling/v1/doc.go @@ -15,6 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +k8s:openapi-gen=true package v1 // import "k8s.io/api/autoscaling/v1" diff --git a/vendor/k8s.io/api/autoscaling/v1/generated.pb.go b/vendor/k8s.io/api/autoscaling/v1/generated.pb.go index 47a46a55..174e6f5f 100644 --- a/vendor/k8s.io/api/autoscaling/v1/generated.pb.go +++ b/vendor/k8s.io/api/autoscaling/v1/generated.pb.go @@ -14,52 +14,27 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v1/generated.proto -// DO NOT EDIT! -/* - Package v1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v1/generated.proto - - It has these top-level messages: - CrossVersionObjectReference - ExternalMetricSource - ExternalMetricStatus - HorizontalPodAutoscaler - HorizontalPodAutoscalerCondition - HorizontalPodAutoscalerList - HorizontalPodAutoscalerSpec - HorizontalPodAutoscalerStatus - MetricSpec - MetricStatus - ObjectMetricSource - ObjectMetricStatus - PodsMetricSource - PodsMetricStatus - ResourceMetricSource - ResourceMetricStatus - Scale - ScaleSpec - ScaleStatus -*/ package v1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import k8s_io_apimachinery_pkg_api_resource "k8s.io/apimachinery/pkg/api/resource" -import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + io "io" -import k8s_io_api_core_v1 "k8s.io/api/core/v1" + proto "github.com/gogo/protobuf/proto" -import strings "strings" -import reflect "reflect" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + resource "k8s.io/apimachinery/pkg/api/resource" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" -import io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -75,114 +50,664 @@ const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package func (m *CrossVersionObjectReference) Reset() { *m = CrossVersionObjectReference{} } func (*CrossVersionObjectReference) ProtoMessage() {} func (*CrossVersionObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{0} + return fileDescriptor_2bb1f2101a7f10e2, []int{0} +} +func (m *CrossVersionObjectReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CrossVersionObjectReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CrossVersionObjectReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_CrossVersionObjectReference.Merge(m, src) +} +func (m *CrossVersionObjectReference) XXX_Size() int { + return m.Size() +} +func (m *CrossVersionObjectReference) XXX_DiscardUnknown() { + xxx_messageInfo_CrossVersionObjectReference.DiscardUnknown(m) } -func (m *ExternalMetricSource) Reset() { *m = ExternalMetricSource{} } -func (*ExternalMetricSource) ProtoMessage() {} -func (*ExternalMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +var xxx_messageInfo_CrossVersionObjectReference proto.InternalMessageInfo + +func (m *ExternalMetricSource) Reset() { *m = ExternalMetricSource{} } +func (*ExternalMetricSource) ProtoMessage() {} +func (*ExternalMetricSource) Descriptor() ([]byte, []int) { + return fileDescriptor_2bb1f2101a7f10e2, []int{1} +} +func (m *ExternalMetricSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExternalMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExternalMetricSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExternalMetricSource.Merge(m, src) +} +func (m *ExternalMetricSource) XXX_Size() int { + return m.Size() +} +func (m *ExternalMetricSource) XXX_DiscardUnknown() { + xxx_messageInfo_ExternalMetricSource.DiscardUnknown(m) +} -func (m *ExternalMetricStatus) Reset() { *m = ExternalMetricStatus{} } -func (*ExternalMetricStatus) ProtoMessage() {} -func (*ExternalMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +var xxx_messageInfo_ExternalMetricSource proto.InternalMessageInfo -func (m *HorizontalPodAutoscaler) Reset() { *m = HorizontalPodAutoscaler{} } -func (*HorizontalPodAutoscaler) ProtoMessage() {} -func (*HorizontalPodAutoscaler) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +func (m *ExternalMetricStatus) Reset() { *m = ExternalMetricStatus{} } +func (*ExternalMetricStatus) ProtoMessage() {} +func (*ExternalMetricStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_2bb1f2101a7f10e2, []int{2} +} +func (m *ExternalMetricStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExternalMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExternalMetricStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExternalMetricStatus.Merge(m, src) +} +func (m *ExternalMetricStatus) XXX_Size() int { + return m.Size() +} +func (m *ExternalMetricStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ExternalMetricStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ExternalMetricStatus proto.InternalMessageInfo + +func (m *HorizontalPodAutoscaler) Reset() { *m = HorizontalPodAutoscaler{} } +func (*HorizontalPodAutoscaler) ProtoMessage() {} +func (*HorizontalPodAutoscaler) Descriptor() ([]byte, []int) { + return fileDescriptor_2bb1f2101a7f10e2, []int{3} +} +func (m *HorizontalPodAutoscaler) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HorizontalPodAutoscaler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HorizontalPodAutoscaler) XXX_Merge(src proto.Message) { + xxx_messageInfo_HorizontalPodAutoscaler.Merge(m, src) +} +func (m *HorizontalPodAutoscaler) XXX_Size() int { + return m.Size() +} +func (m *HorizontalPodAutoscaler) XXX_DiscardUnknown() { + xxx_messageInfo_HorizontalPodAutoscaler.DiscardUnknown(m) +} + +var xxx_messageInfo_HorizontalPodAutoscaler proto.InternalMessageInfo func (m *HorizontalPodAutoscalerCondition) Reset() { *m = HorizontalPodAutoscalerCondition{} } func (*HorizontalPodAutoscalerCondition) ProtoMessage() {} func (*HorizontalPodAutoscalerCondition) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{4} + return fileDescriptor_2bb1f2101a7f10e2, []int{4} +} +func (m *HorizontalPodAutoscalerCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HorizontalPodAutoscalerCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HorizontalPodAutoscalerCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_HorizontalPodAutoscalerCondition.Merge(m, src) +} +func (m *HorizontalPodAutoscalerCondition) XXX_Size() int { + return m.Size() } +func (m *HorizontalPodAutoscalerCondition) XXX_DiscardUnknown() { + xxx_messageInfo_HorizontalPodAutoscalerCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_HorizontalPodAutoscalerCondition proto.InternalMessageInfo func (m *HorizontalPodAutoscalerList) Reset() { *m = HorizontalPodAutoscalerList{} } func (*HorizontalPodAutoscalerList) ProtoMessage() {} func (*HorizontalPodAutoscalerList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{5} + return fileDescriptor_2bb1f2101a7f10e2, []int{5} +} +func (m *HorizontalPodAutoscalerList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HorizontalPodAutoscalerList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HorizontalPodAutoscalerList) XXX_Merge(src proto.Message) { + xxx_messageInfo_HorizontalPodAutoscalerList.Merge(m, src) +} +func (m *HorizontalPodAutoscalerList) XXX_Size() int { + return m.Size() +} +func (m *HorizontalPodAutoscalerList) XXX_DiscardUnknown() { + xxx_messageInfo_HorizontalPodAutoscalerList.DiscardUnknown(m) } +var xxx_messageInfo_HorizontalPodAutoscalerList proto.InternalMessageInfo + func (m *HorizontalPodAutoscalerSpec) Reset() { *m = HorizontalPodAutoscalerSpec{} } func (*HorizontalPodAutoscalerSpec) ProtoMessage() {} func (*HorizontalPodAutoscalerSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{6} + return fileDescriptor_2bb1f2101a7f10e2, []int{6} +} +func (m *HorizontalPodAutoscalerSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) } +func (m *HorizontalPodAutoscalerSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HorizontalPodAutoscalerSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_HorizontalPodAutoscalerSpec.Merge(m, src) +} +func (m *HorizontalPodAutoscalerSpec) XXX_Size() int { + return m.Size() +} +func (m *HorizontalPodAutoscalerSpec) XXX_DiscardUnknown() { + xxx_messageInfo_HorizontalPodAutoscalerSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_HorizontalPodAutoscalerSpec proto.InternalMessageInfo func (m *HorizontalPodAutoscalerStatus) Reset() { *m = HorizontalPodAutoscalerStatus{} } func (*HorizontalPodAutoscalerStatus) ProtoMessage() {} func (*HorizontalPodAutoscalerStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{7} + return fileDescriptor_2bb1f2101a7f10e2, []int{7} +} +func (m *HorizontalPodAutoscalerStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HorizontalPodAutoscalerStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HorizontalPodAutoscalerStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_HorizontalPodAutoscalerStatus.Merge(m, src) +} +func (m *HorizontalPodAutoscalerStatus) XXX_Size() int { + return m.Size() +} +func (m *HorizontalPodAutoscalerStatus) XXX_DiscardUnknown() { + xxx_messageInfo_HorizontalPodAutoscalerStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_HorizontalPodAutoscalerStatus proto.InternalMessageInfo + +func (m *MetricSpec) Reset() { *m = MetricSpec{} } +func (*MetricSpec) ProtoMessage() {} +func (*MetricSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_2bb1f2101a7f10e2, []int{8} +} +func (m *MetricSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MetricSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MetricSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricSpec.Merge(m, src) +} +func (m *MetricSpec) XXX_Size() int { + return m.Size() +} +func (m *MetricSpec) XXX_DiscardUnknown() { + xxx_messageInfo_MetricSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_MetricSpec proto.InternalMessageInfo + +func (m *MetricStatus) Reset() { *m = MetricStatus{} } +func (*MetricStatus) ProtoMessage() {} +func (*MetricStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_2bb1f2101a7f10e2, []int{9} +} +func (m *MetricStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MetricStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricStatus.Merge(m, src) +} +func (m *MetricStatus) XXX_Size() int { + return m.Size() +} +func (m *MetricStatus) XXX_DiscardUnknown() { + xxx_messageInfo_MetricStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_MetricStatus proto.InternalMessageInfo + +func (m *ObjectMetricSource) Reset() { *m = ObjectMetricSource{} } +func (*ObjectMetricSource) ProtoMessage() {} +func (*ObjectMetricSource) Descriptor() ([]byte, []int) { + return fileDescriptor_2bb1f2101a7f10e2, []int{10} +} +func (m *ObjectMetricSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ObjectMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ObjectMetricSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ObjectMetricSource.Merge(m, src) +} +func (m *ObjectMetricSource) XXX_Size() int { + return m.Size() +} +func (m *ObjectMetricSource) XXX_DiscardUnknown() { + xxx_messageInfo_ObjectMetricSource.DiscardUnknown(m) +} + +var xxx_messageInfo_ObjectMetricSource proto.InternalMessageInfo + +func (m *ObjectMetricStatus) Reset() { *m = ObjectMetricStatus{} } +func (*ObjectMetricStatus) ProtoMessage() {} +func (*ObjectMetricStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_2bb1f2101a7f10e2, []int{11} +} +func (m *ObjectMetricStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ObjectMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ObjectMetricStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ObjectMetricStatus.Merge(m, src) +} +func (m *ObjectMetricStatus) XXX_Size() int { + return m.Size() +} +func (m *ObjectMetricStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ObjectMetricStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ObjectMetricStatus proto.InternalMessageInfo + +func (m *PodsMetricSource) Reset() { *m = PodsMetricSource{} } +func (*PodsMetricSource) ProtoMessage() {} +func (*PodsMetricSource) Descriptor() ([]byte, []int) { + return fileDescriptor_2bb1f2101a7f10e2, []int{12} +} +func (m *PodsMetricSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodsMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodsMetricSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodsMetricSource.Merge(m, src) +} +func (m *PodsMetricSource) XXX_Size() int { + return m.Size() +} +func (m *PodsMetricSource) XXX_DiscardUnknown() { + xxx_messageInfo_PodsMetricSource.DiscardUnknown(m) +} + +var xxx_messageInfo_PodsMetricSource proto.InternalMessageInfo + +func (m *PodsMetricStatus) Reset() { *m = PodsMetricStatus{} } +func (*PodsMetricStatus) ProtoMessage() {} +func (*PodsMetricStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_2bb1f2101a7f10e2, []int{13} +} +func (m *PodsMetricStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodsMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodsMetricStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodsMetricStatus.Merge(m, src) +} +func (m *PodsMetricStatus) XXX_Size() int { + return m.Size() +} +func (m *PodsMetricStatus) XXX_DiscardUnknown() { + xxx_messageInfo_PodsMetricStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_PodsMetricStatus proto.InternalMessageInfo + +func (m *ResourceMetricSource) Reset() { *m = ResourceMetricSource{} } +func (*ResourceMetricSource) ProtoMessage() {} +func (*ResourceMetricSource) Descriptor() ([]byte, []int) { + return fileDescriptor_2bb1f2101a7f10e2, []int{14} +} +func (m *ResourceMetricSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceMetricSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceMetricSource.Merge(m, src) +} +func (m *ResourceMetricSource) XXX_Size() int { + return m.Size() +} +func (m *ResourceMetricSource) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceMetricSource.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceMetricSource proto.InternalMessageInfo + +func (m *ResourceMetricStatus) Reset() { *m = ResourceMetricStatus{} } +func (*ResourceMetricStatus) ProtoMessage() {} +func (*ResourceMetricStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_2bb1f2101a7f10e2, []int{15} +} +func (m *ResourceMetricStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceMetricStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceMetricStatus.Merge(m, src) +} +func (m *ResourceMetricStatus) XXX_Size() int { + return m.Size() +} +func (m *ResourceMetricStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceMetricStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceMetricStatus proto.InternalMessageInfo + +func (m *Scale) Reset() { *m = Scale{} } +func (*Scale) ProtoMessage() {} +func (*Scale) Descriptor() ([]byte, []int) { + return fileDescriptor_2bb1f2101a7f10e2, []int{16} +} +func (m *Scale) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Scale) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Scale) XXX_Merge(src proto.Message) { + xxx_messageInfo_Scale.Merge(m, src) +} +func (m *Scale) XXX_Size() int { + return m.Size() +} +func (m *Scale) XXX_DiscardUnknown() { + xxx_messageInfo_Scale.DiscardUnknown(m) +} + +var xxx_messageInfo_Scale proto.InternalMessageInfo + +func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } +func (*ScaleSpec) ProtoMessage() {} +func (*ScaleSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_2bb1f2101a7f10e2, []int{17} +} +func (m *ScaleSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ScaleSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ScaleSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ScaleSpec.Merge(m, src) +} +func (m *ScaleSpec) XXX_Size() int { + return m.Size() +} +func (m *ScaleSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ScaleSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ScaleSpec proto.InternalMessageInfo + +func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } +func (*ScaleStatus) ProtoMessage() {} +func (*ScaleStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_2bb1f2101a7f10e2, []int{18} +} +func (m *ScaleStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ScaleStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ScaleStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ScaleStatus.Merge(m, src) +} +func (m *ScaleStatus) XXX_Size() int { + return m.Size() +} +func (m *ScaleStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ScaleStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ScaleStatus proto.InternalMessageInfo + +func init() { + proto.RegisterType((*CrossVersionObjectReference)(nil), "k8s.io.api.autoscaling.v1.CrossVersionObjectReference") + proto.RegisterType((*ExternalMetricSource)(nil), "k8s.io.api.autoscaling.v1.ExternalMetricSource") + proto.RegisterType((*ExternalMetricStatus)(nil), "k8s.io.api.autoscaling.v1.ExternalMetricStatus") + proto.RegisterType((*HorizontalPodAutoscaler)(nil), "k8s.io.api.autoscaling.v1.HorizontalPodAutoscaler") + proto.RegisterType((*HorizontalPodAutoscalerCondition)(nil), "k8s.io.api.autoscaling.v1.HorizontalPodAutoscalerCondition") + proto.RegisterType((*HorizontalPodAutoscalerList)(nil), "k8s.io.api.autoscaling.v1.HorizontalPodAutoscalerList") + proto.RegisterType((*HorizontalPodAutoscalerSpec)(nil), "k8s.io.api.autoscaling.v1.HorizontalPodAutoscalerSpec") + proto.RegisterType((*HorizontalPodAutoscalerStatus)(nil), "k8s.io.api.autoscaling.v1.HorizontalPodAutoscalerStatus") + proto.RegisterType((*MetricSpec)(nil), "k8s.io.api.autoscaling.v1.MetricSpec") + proto.RegisterType((*MetricStatus)(nil), "k8s.io.api.autoscaling.v1.MetricStatus") + proto.RegisterType((*ObjectMetricSource)(nil), "k8s.io.api.autoscaling.v1.ObjectMetricSource") + proto.RegisterType((*ObjectMetricStatus)(nil), "k8s.io.api.autoscaling.v1.ObjectMetricStatus") + proto.RegisterType((*PodsMetricSource)(nil), "k8s.io.api.autoscaling.v1.PodsMetricSource") + proto.RegisterType((*PodsMetricStatus)(nil), "k8s.io.api.autoscaling.v1.PodsMetricStatus") + proto.RegisterType((*ResourceMetricSource)(nil), "k8s.io.api.autoscaling.v1.ResourceMetricSource") + proto.RegisterType((*ResourceMetricStatus)(nil), "k8s.io.api.autoscaling.v1.ResourceMetricStatus") + proto.RegisterType((*Scale)(nil), "k8s.io.api.autoscaling.v1.Scale") + proto.RegisterType((*ScaleSpec)(nil), "k8s.io.api.autoscaling.v1.ScaleSpec") + proto.RegisterType((*ScaleStatus)(nil), "k8s.io.api.autoscaling.v1.ScaleStatus") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v1/generated.proto", fileDescriptor_2bb1f2101a7f10e2) +} + +var fileDescriptor_2bb1f2101a7f10e2 = []byte{ + // 1516 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x58, 0xcf, 0x6f, 0x13, 0xc7, + 0x17, 0x8f, 0x7f, 0x24, 0x24, 0xe3, 0x90, 0xe4, 0x3b, 0x20, 0x08, 0xe1, 0x8b, 0x37, 0xda, 0x22, + 0x44, 0x7f, 0xb0, 0x6e, 0x52, 0x8a, 0xe8, 0x31, 0x76, 0x4b, 0x41, 0x8d, 0x21, 0x4c, 0x02, 0xa5, + 0x3f, 0xc5, 0x64, 0x3d, 0x38, 0x43, 0xbc, 0xbb, 0xd6, 0xec, 0xd8, 0x22, 0x48, 0x95, 0xda, 0x43, + 0xef, 0xbd, 0xb4, 0xea, 0xb1, 0x95, 0x7a, 0xed, 0x99, 0x73, 0x6f, 0x1c, 0x39, 0x20, 0x95, 0xd3, + 0xaa, 0x6c, 0x8f, 0xfd, 0x0f, 0x38, 0x55, 0xf3, 0xc3, 0xeb, 0x5d, 0xdb, 0xeb, 0x24, 0x26, 0x44, + 0x6d, 0x6f, 0x3b, 0x33, 0xef, 0x7d, 0xde, 0xec, 0x7b, 0x6f, 0xde, 0x2f, 0x50, 0xde, 0xbe, 0xec, + 0x5b, 0xd4, 0x2b, 0x6d, 0xb7, 0x36, 0x09, 0x73, 0x09, 0x27, 0x7e, 0xa9, 0x4d, 0xdc, 0x9a, 0xc7, + 0x4a, 0xfa, 0x00, 0x37, 0x69, 0x09, 0xb7, 0xb8, 0xe7, 0xdb, 0xb8, 0x41, 0xdd, 0x7a, 0xa9, 0xbd, + 0x54, 0xaa, 0x13, 0x97, 0x30, 0xcc, 0x49, 0xcd, 0x6a, 0x32, 0x8f, 0x7b, 0xf0, 0x94, 0x22, 0xb5, + 0x70, 0x93, 0x5a, 0x31, 0x52, 0xab, 0xbd, 0xb4, 0x70, 0xa1, 0x4e, 0xf9, 0x56, 0x6b, 0xd3, 0xb2, + 0x3d, 0xa7, 0x54, 0xf7, 0xea, 0x5e, 0x49, 0x72, 0x6c, 0xb6, 0xee, 0xc9, 0x95, 0x5c, 0xc8, 0x2f, + 0x85, 0xb4, 0x60, 0xc6, 0x84, 0xda, 0x1e, 0x23, 0x03, 0xa4, 0x2d, 0x5c, 0xec, 0xd2, 0x38, 0xd8, + 0xde, 0xa2, 0x2e, 0x61, 0x3b, 0xa5, 0xe6, 0x76, 0x5d, 0x32, 0x31, 0xe2, 0x7b, 0x2d, 0x66, 0x93, + 0x7d, 0x71, 0xf9, 0x25, 0x87, 0x70, 0x3c, 0x48, 0x56, 0x29, 0x8d, 0x8b, 0xb5, 0x5c, 0x4e, 0x9d, + 0x7e, 0x31, 0x97, 0x76, 0x63, 0xf0, 0xed, 0x2d, 0xe2, 0xe0, 0x5e, 0x3e, 0xf3, 0xfb, 0x0c, 0x38, + 0x5d, 0x61, 0x9e, 0xef, 0xdf, 0x26, 0xcc, 0xa7, 0x9e, 0x7b, 0x63, 0xf3, 0x3e, 0xb1, 0x39, 0x22, + 0xf7, 0x08, 0x23, 0xae, 0x4d, 0xe0, 0x22, 0xc8, 0x6f, 0x53, 0xb7, 0x36, 0x9f, 0x59, 0xcc, 0x9c, + 0x9f, 0x2a, 0x4f, 0x3f, 0x0e, 0x8c, 0xb1, 0x30, 0x30, 0xf2, 0x1f, 0x51, 0xb7, 0x86, 0xe4, 0x89, + 0xa0, 0x70, 0xb1, 0x43, 0xe6, 0xb3, 0x49, 0x8a, 0xeb, 0xd8, 0x21, 0x48, 0x9e, 0xc0, 0x65, 0x00, + 0x70, 0x93, 0x6a, 0x01, 0xf3, 0x39, 0x49, 0x07, 0x35, 0x1d, 0x58, 0x59, 0xbb, 0xa6, 0x4f, 0x50, + 0x8c, 0xca, 0xfc, 0x21, 0x07, 0x8e, 0x7f, 0xf0, 0x80, 0x13, 0xe6, 0xe2, 0x46, 0x95, 0x70, 0x46, + 0xed, 0x75, 0xa9, 0x5f, 0x01, 0xe6, 0xc8, 0xb5, 0x10, 0xa0, 0xaf, 0x15, 0x81, 0x55, 0xa3, 0x13, + 0x14, 0xa3, 0x82, 0x1e, 0x98, 0x51, 0xab, 0x75, 0xd2, 0x20, 0x36, 0xf7, 0x98, 0xbc, 0x6c, 0x61, + 0xf9, 0x1d, 0xab, 0xeb, 0x40, 0x91, 0xd6, 0xac, 0xe6, 0x76, 0x5d, 0x6c, 0xf8, 0x96, 0x30, 0x8e, + 0xd5, 0x5e, 0xb2, 0x56, 0xf1, 0x26, 0x69, 0x74, 0x58, 0xcb, 0x30, 0x0c, 0x8c, 0x99, 0x6a, 0x02, + 0x0e, 0xf5, 0xc0, 0x43, 0x0c, 0x0a, 0x1c, 0xb3, 0x3a, 0xe1, 0xb7, 0x71, 0xa3, 0x45, 0xe4, 0x2f, + 0x17, 0x96, 0xad, 0x61, 0xd2, 0xac, 0x8e, 0x03, 0x59, 0x37, 0x5b, 0xd8, 0xe5, 0x94, 0xef, 0x94, + 0x67, 0xc3, 0xc0, 0x28, 0x6c, 0x74, 0x61, 0x50, 0x1c, 0x13, 0xb6, 0x01, 0x54, 0xcb, 0x95, 0x36, + 0x61, 0xb8, 0x4e, 0x94, 0xa4, 0xfc, 0x48, 0x92, 0x4e, 0x84, 0x81, 0x01, 0x37, 0xfa, 0xd0, 0xd0, + 0x00, 0x09, 0xe6, 0x4f, 0xfd, 0x86, 0xe1, 0x98, 0xb7, 0xfc, 0x7f, 0x87, 0x61, 0xb6, 0xc0, 0xb4, + 0xdd, 0x62, 0x8c, 0xb8, 0x2f, 0x65, 0x99, 0xe3, 0xfa, 0xb7, 0xa6, 0x2b, 0x31, 0x2c, 0x94, 0x40, + 0x86, 0x3b, 0xe0, 0x98, 0x5e, 0x1f, 0x80, 0x81, 0x4e, 0x86, 0x81, 0x71, 0xac, 0xd2, 0x0f, 0x87, + 0x06, 0xc9, 0x30, 0x1f, 0x65, 0xc1, 0xc9, 0xab, 0x1e, 0xa3, 0x0f, 0x3d, 0x97, 0xe3, 0xc6, 0x9a, + 0x57, 0x5b, 0xd1, 0xb1, 0x91, 0x30, 0x78, 0x17, 0x4c, 0x0a, 0xed, 0xd5, 0x30, 0xc7, 0xd2, 0x46, + 0x85, 0xe5, 0xb7, 0xf7, 0xa6, 0x6b, 0x15, 0x18, 0xaa, 0x84, 0xe3, 0xae, 0x55, 0xbb, 0x7b, 0x28, + 0x42, 0x85, 0x77, 0x40, 0xde, 0x6f, 0x12, 0x5b, 0x5b, 0xf2, 0x92, 0x95, 0x1a, 0xa3, 0xad, 0x94, + 0x3b, 0xae, 0x37, 0x89, 0xdd, 0x8d, 0x23, 0x62, 0x85, 0x24, 0x22, 0xbc, 0x0b, 0x26, 0x7c, 0xe9, + 0x6b, 0xda, 0x6c, 0x97, 0x47, 0xc0, 0x96, 0xfc, 0xe5, 0x19, 0x8d, 0x3e, 0xa1, 0xd6, 0x48, 0xe3, + 0x9a, 0xdf, 0xe6, 0xc0, 0x62, 0x0a, 0x67, 0xc5, 0x73, 0x6b, 0x94, 0x53, 0xcf, 0x85, 0x57, 0x41, + 0x9e, 0xef, 0x34, 0x3b, 0x2e, 0x7e, 0xb1, 0x73, 0xd1, 0x8d, 0x9d, 0x26, 0x79, 0x11, 0x18, 0x67, + 0x77, 0xe3, 0x17, 0x74, 0x48, 0x22, 0xc0, 0xd5, 0xe8, 0x87, 0xb2, 0x09, 0x2c, 0x7d, 0xad, 0x17, + 0x81, 0x31, 0x20, 0x2f, 0x59, 0x11, 0x52, 0xf2, 0xf2, 0x22, 0x22, 0x34, 0xb0, 0xcf, 0x37, 0x18, + 0x76, 0x7d, 0x25, 0x89, 0x3a, 0x1d, 0x0f, 0x7f, 0x63, 0x6f, 0x46, 0x16, 0x1c, 0xe5, 0x05, 0x7d, + 0x0b, 0xb8, 0xda, 0x87, 0x86, 0x06, 0x48, 0x80, 0xe7, 0xc0, 0x04, 0x23, 0xd8, 0xf7, 0x5c, 0xe9, + 0xdc, 0x53, 0x5d, 0xe5, 0x22, 0xb9, 0x8b, 0xf4, 0x29, 0x7c, 0x1d, 0x1c, 0x71, 0x88, 0xef, 0xe3, + 0x3a, 0x99, 0x1f, 0x97, 0x84, 0xb3, 0x9a, 0xf0, 0x48, 0x55, 0x6d, 0xa3, 0xce, 0xb9, 0xf9, 0x34, + 0x03, 0x4e, 0xa7, 0xe8, 0x71, 0x95, 0xfa, 0x1c, 0x7e, 0xde, 0xe7, 0xc5, 0xd6, 0x1e, 0x23, 0x06, + 0xf5, 0x95, 0x0f, 0xcf, 0x69, 0xd9, 0x93, 0x9d, 0x9d, 0x98, 0x07, 0x7f, 0x0c, 0xc6, 0x29, 0x27, + 0x8e, 0xb0, 0x4a, 0xee, 0x7c, 0x61, 0x79, 0x79, 0xff, 0x6e, 0x56, 0x3e, 0xaa, 0xe1, 0xc7, 0xaf, + 0x09, 0x20, 0xa4, 0xf0, 0xcc, 0xbf, 0xb2, 0xa9, 0xbf, 0x25, 0xdc, 0x1c, 0xb6, 0xc1, 0x8c, 0x5c, + 0xa9, 0x50, 0x8c, 0xc8, 0x3d, 0xfd, 0x73, 0xc3, 0x1e, 0xd1, 0x90, 0xe4, 0x5d, 0x3e, 0xa1, 0x6f, + 0x31, 0xb3, 0x9e, 0x40, 0x45, 0x3d, 0x52, 0xe0, 0x12, 0x28, 0x38, 0xd4, 0x45, 0xa4, 0xd9, 0xa0, + 0x36, 0x56, 0xce, 0x38, 0xae, 0xd2, 0x4f, 0xb5, 0xbb, 0x8d, 0xe2, 0x34, 0xf0, 0x5d, 0x50, 0x70, + 0xf0, 0x83, 0x88, 0x25, 0x27, 0x59, 0x8e, 0x69, 0x79, 0x85, 0x6a, 0xf7, 0x08, 0xc5, 0xe9, 0xe0, + 0x7d, 0x50, 0x54, 0x39, 0xa5, 0xb2, 0x76, 0xeb, 0x16, 0xa7, 0x0d, 0xfa, 0x10, 0x0b, 0x3f, 0x5a, + 0x23, 0xcc, 0x26, 0x2e, 0x17, 0xae, 0x91, 0x97, 0x48, 0x66, 0x18, 0x18, 0xc5, 0x8d, 0xa1, 0x94, + 0x68, 0x17, 0x24, 0xf3, 0xb7, 0x1c, 0x38, 0x33, 0x34, 0x0c, 0xc0, 0x2b, 0x00, 0x7a, 0x9b, 0x3e, + 0x61, 0x6d, 0x52, 0xfb, 0x50, 0xd5, 0x45, 0xa2, 0x40, 0x11, 0x3a, 0xcf, 0xa9, 0x9c, 0x78, 0xa3, + 0xef, 0x14, 0x0d, 0xe0, 0x80, 0x36, 0x38, 0x2a, 0xde, 0x85, 0xd2, 0x32, 0xd5, 0xb5, 0xd0, 0xfe, + 0x1e, 0xdd, 0xff, 0xc2, 0xc0, 0x38, 0xba, 0x1a, 0x07, 0x41, 0x49, 0x4c, 0xb8, 0x02, 0x66, 0x75, + 0xb0, 0xef, 0xd1, 0xfa, 0x49, 0xad, 0xf5, 0xd9, 0x4a, 0xf2, 0x18, 0xf5, 0xd2, 0x0b, 0x88, 0x1a, + 0xf1, 0x29, 0x23, 0xb5, 0x08, 0x22, 0x9f, 0x84, 0x78, 0x3f, 0x79, 0x8c, 0x7a, 0xe9, 0xa1, 0x03, + 0x0c, 0x8d, 0x9a, 0x6a, 0xc1, 0x71, 0x09, 0xf9, 0x5a, 0x18, 0x18, 0x46, 0x65, 0x38, 0x29, 0xda, + 0x0d, 0x4b, 0x94, 0x81, 0xba, 0x76, 0x90, 0x0f, 0xe4, 0x62, 0x22, 0xf4, 0x2e, 0xf6, 0x84, 0xde, + 0xb9, 0x78, 0xa1, 0x18, 0x0b, 0xb3, 0x37, 0xc1, 0x84, 0x27, 0x5f, 0x86, 0xb6, 0xcb, 0x85, 0x21, + 0xcf, 0x29, 0x4a, 0x69, 0x11, 0x50, 0x19, 0x88, 0x58, 0xa6, 0x9f, 0x96, 0x06, 0x82, 0xd7, 0x40, + 0xbe, 0xe9, 0xd5, 0x3a, 0x89, 0xe8, 0xcd, 0x21, 0x80, 0x6b, 0x5e, 0xcd, 0x4f, 0xc0, 0x4d, 0x8a, + 0x1b, 0x8b, 0x5d, 0x24, 0x21, 0xe0, 0x27, 0x60, 0xb2, 0x93, 0xf0, 0x75, 0x75, 0x50, 0x1a, 0x02, + 0x87, 0x34, 0x69, 0x02, 0x72, 0x5a, 0x04, 0xb2, 0xce, 0x09, 0x8a, 0xe0, 0x04, 0x34, 0xd1, 0xa5, + 0x9a, 0xb4, 0xca, 0x70, 0xe8, 0x41, 0xe5, 0xb6, 0x82, 0xee, 0x9c, 0xa0, 0x08, 0xce, 0xfc, 0x31, + 0x07, 0xa6, 0x13, 0xe5, 0xdf, 0x21, 0x9b, 0x46, 0xe5, 0xf1, 0x03, 0x33, 0x8d, 0x82, 0x3b, 0x50, + 0xd3, 0x28, 0xc8, 0x57, 0x62, 0x9a, 0x18, 0xf4, 0x00, 0xd3, 0x3c, 0xcd, 0x01, 0xd8, 0xef, 0xc6, + 0xf0, 0x4b, 0x30, 0xa1, 0x02, 0xe6, 0x4b, 0x26, 0x95, 0x28, 0xbd, 0xeb, 0xfc, 0xa1, 0x51, 0x7b, + 0xea, 0xff, 0xec, 0x9e, 0xea, 0x7f, 0x72, 0x10, 0x7d, 0x52, 0x94, 0x75, 0x52, 0x7b, 0xa5, 0x2f, + 0xc0, 0xa4, 0xdf, 0x69, 0x30, 0xf2, 0xa3, 0x37, 0x18, 0x52, 0xe1, 0x51, 0x6b, 0x11, 0x41, 0xc2, + 0x1a, 0x98, 0xc6, 0xf1, 0x1a, 0x7f, 0x7c, 0xa4, 0xdf, 0x98, 0x13, 0x0d, 0x45, 0xa2, 0xb8, 0x4f, + 0xa0, 0x9a, 0xbf, 0xf7, 0x9a, 0x55, 0xbd, 0xbb, 0x7f, 0xa2, 0x59, 0x0f, 0xaf, 0xcb, 0xfa, 0x4f, + 0x58, 0xf6, 0xe7, 0x2c, 0x98, 0xeb, 0x4d, 0x13, 0x23, 0xb5, 0xd3, 0x0f, 0x07, 0xce, 0x04, 0xb2, + 0x23, 0x5d, 0x3a, 0xea, 0x02, 0xf6, 0x36, 0x17, 0x48, 0x58, 0x22, 0x77, 0xe0, 0x96, 0x30, 0x7f, + 0x49, 0xea, 0x68, 0xf4, 0x91, 0xc3, 0x57, 0x83, 0xfb, 0xf2, 0xd1, 0x94, 0x74, 0x5a, 0x0b, 0xdb, + 0x73, 0x6f, 0xfe, 0xaa, 0xd5, 0xf4, 0x6b, 0x16, 0x1c, 0x1f, 0x54, 0x22, 0xc0, 0x8a, 0x9e, 0xd2, + 0x29, 0x25, 0x95, 0xe2, 0x53, 0xba, 0x17, 0x81, 0x61, 0x0c, 0x68, 0x33, 0x3b, 0x30, 0xb1, 0x41, + 0xde, 0x1d, 0x30, 0x9f, 0xb0, 0x7c, 0xac, 0x66, 0xd3, 0x4d, 0xc3, 0xff, 0xc3, 0xc0, 0x98, 0xdf, + 0x48, 0xa1, 0x41, 0xa9, 0xdc, 0x29, 0xd3, 0xac, 0xdc, 0x2b, 0x9f, 0x66, 0x3d, 0xea, 0xd7, 0x97, + 0x72, 0xad, 0x03, 0xd1, 0xd7, 0x67, 0xe0, 0x54, 0xd2, 0x07, 0xfa, 0x15, 0x76, 0x26, 0x0c, 0x8c, + 0x53, 0x95, 0x34, 0x22, 0x94, 0xce, 0x9f, 0xe6, 0xc8, 0xb9, 0xc3, 0x71, 0x64, 0xf3, 0x9b, 0x2c, + 0x18, 0x97, 0xcd, 0xc9, 0x21, 0x8c, 0x94, 0xae, 0x24, 0x46, 0x4a, 0x67, 0x87, 0x64, 0x38, 0x79, + 0xa3, 0xd4, 0x01, 0xd2, 0xf5, 0x9e, 0x01, 0xd2, 0xb9, 0x5d, 0x91, 0x86, 0x8f, 0x8b, 0xde, 0x03, + 0x53, 0x91, 0x40, 0xf8, 0x96, 0x28, 0x16, 0x75, 0x57, 0x95, 0x91, 0xb6, 0x8d, 0x66, 0x0c, 0x51, + 0x3b, 0x15, 0x51, 0x98, 0x14, 0x14, 0x62, 0x12, 0xf6, 0xc7, 0x2c, 0xa8, 0xfd, 0xf8, 0xc0, 0x74, + 0xaa, 0x4b, 0xdd, 0x1f, 0x13, 0xca, 0xe7, 0x1f, 0x3f, 0x2f, 0x8e, 0x3d, 0x79, 0x5e, 0x1c, 0x7b, + 0xf6, 0xbc, 0x38, 0xf6, 0x75, 0x58, 0xcc, 0x3c, 0x0e, 0x8b, 0x99, 0x27, 0x61, 0x31, 0xf3, 0x2c, + 0x2c, 0x66, 0xfe, 0x08, 0x8b, 0x99, 0xef, 0xfe, 0x2c, 0x8e, 0x7d, 0x9a, 0x6d, 0x2f, 0xfd, 0x1d, + 0x00, 0x00, 0xff, 0xff, 0x3c, 0x26, 0x41, 0xcb, 0x94, 0x19, 0x00, 0x00, } -func (m *MetricSpec) Reset() { *m = MetricSpec{} } -func (*MetricSpec) ProtoMessage() {} -func (*MetricSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } - -func (m *MetricStatus) Reset() { *m = MetricStatus{} } -func (*MetricStatus) ProtoMessage() {} -func (*MetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } - -func (m *ObjectMetricSource) Reset() { *m = ObjectMetricSource{} } -func (*ObjectMetricSource) ProtoMessage() {} -func (*ObjectMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } - -func (m *ObjectMetricStatus) Reset() { *m = ObjectMetricStatus{} } -func (*ObjectMetricStatus) ProtoMessage() {} -func (*ObjectMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } - -func (m *PodsMetricSource) Reset() { *m = PodsMetricSource{} } -func (*PodsMetricSource) ProtoMessage() {} -func (*PodsMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } - -func (m *PodsMetricStatus) Reset() { *m = PodsMetricStatus{} } -func (*PodsMetricStatus) ProtoMessage() {} -func (*PodsMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } - -func (m *ResourceMetricSource) Reset() { *m = ResourceMetricSource{} } -func (*ResourceMetricSource) ProtoMessage() {} -func (*ResourceMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } - -func (m *ResourceMetricStatus) Reset() { *m = ResourceMetricStatus{} } -func (*ResourceMetricStatus) ProtoMessage() {} -func (*ResourceMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } - -func (m *Scale) Reset() { *m = Scale{} } -func (*Scale) ProtoMessage() {} -func (*Scale) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } - -func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } -func (*ScaleSpec) ProtoMessage() {} -func (*ScaleSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } - -func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } -func (*ScaleStatus) ProtoMessage() {} -func (*ScaleStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } - -func init() { - proto.RegisterType((*CrossVersionObjectReference)(nil), "k8s.io.api.autoscaling.v1.CrossVersionObjectReference") - proto.RegisterType((*ExternalMetricSource)(nil), "k8s.io.api.autoscaling.v1.ExternalMetricSource") - proto.RegisterType((*ExternalMetricStatus)(nil), "k8s.io.api.autoscaling.v1.ExternalMetricStatus") - proto.RegisterType((*HorizontalPodAutoscaler)(nil), "k8s.io.api.autoscaling.v1.HorizontalPodAutoscaler") - proto.RegisterType((*HorizontalPodAutoscalerCondition)(nil), "k8s.io.api.autoscaling.v1.HorizontalPodAutoscalerCondition") - proto.RegisterType((*HorizontalPodAutoscalerList)(nil), "k8s.io.api.autoscaling.v1.HorizontalPodAutoscalerList") - proto.RegisterType((*HorizontalPodAutoscalerSpec)(nil), "k8s.io.api.autoscaling.v1.HorizontalPodAutoscalerSpec") - proto.RegisterType((*HorizontalPodAutoscalerStatus)(nil), "k8s.io.api.autoscaling.v1.HorizontalPodAutoscalerStatus") - proto.RegisterType((*MetricSpec)(nil), "k8s.io.api.autoscaling.v1.MetricSpec") - proto.RegisterType((*MetricStatus)(nil), "k8s.io.api.autoscaling.v1.MetricStatus") - proto.RegisterType((*ObjectMetricSource)(nil), "k8s.io.api.autoscaling.v1.ObjectMetricSource") - proto.RegisterType((*ObjectMetricStatus)(nil), "k8s.io.api.autoscaling.v1.ObjectMetricStatus") - proto.RegisterType((*PodsMetricSource)(nil), "k8s.io.api.autoscaling.v1.PodsMetricSource") - proto.RegisterType((*PodsMetricStatus)(nil), "k8s.io.api.autoscaling.v1.PodsMetricStatus") - proto.RegisterType((*ResourceMetricSource)(nil), "k8s.io.api.autoscaling.v1.ResourceMetricSource") - proto.RegisterType((*ResourceMetricStatus)(nil), "k8s.io.api.autoscaling.v1.ResourceMetricStatus") - proto.RegisterType((*Scale)(nil), "k8s.io.api.autoscaling.v1.Scale") - proto.RegisterType((*ScaleSpec)(nil), "k8s.io.api.autoscaling.v1.ScaleSpec") - proto.RegisterType((*ScaleStatus)(nil), "k8s.io.api.autoscaling.v1.ScaleStatus") -} func (m *CrossVersionObjectReference) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -190,29 +715,37 @@ func (m *CrossVersionObjectReference) Marshal() (dAtA []byte, err error) { } func (m *CrossVersionObjectReference) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CrossVersionObjectReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x1a - i++ + i -= len(m.APIVersion) + copy(dAtA[i:], m.APIVersion) i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) - i += copy(dAtA[i:], m.APIVersion) - return i, nil + i-- + dAtA[i] = 0x1a + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ExternalMetricSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -220,51 +753,63 @@ func (m *ExternalMetricSource) Marshal() (dAtA []byte, err error) { } func (m *ExternalMetricSource) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExternalMetricSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) - i += copy(dAtA[i:], m.MetricName) - if m.MetricSelector != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MetricSelector.Size())) - n1, err := m.MetricSelector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.TargetAverageValue != nil { + { + size, err := m.TargetAverageValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n1 + i-- + dAtA[i] = 0x22 } if m.TargetValue != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.TargetValue.Size())) - n2, err := m.TargetValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.TargetValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n2 + i-- + dAtA[i] = 0x1a } - if m.TargetAverageValue != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.TargetAverageValue.Size())) - n3, err := m.TargetAverageValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.MetricSelector != nil { + { + size, err := m.MetricSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n3 + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.MetricName) + copy(dAtA[i:], m.MetricName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ExternalMetricStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -272,49 +817,61 @@ func (m *ExternalMetricStatus) Marshal() (dAtA []byte, err error) { } func (m *ExternalMetricStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExternalMetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) - i += copy(dAtA[i:], m.MetricName) - if m.MetricSelector != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MetricSelector.Size())) - n4, err := m.MetricSelector.MarshalTo(dAtA[i:]) + if m.CurrentAverageValue != nil { + { + size, err := m.CurrentAverageValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + { + size, err := m.CurrentValue.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n4 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentValue.Size())) - n5, err := m.CurrentValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 - if m.CurrentAverageValue != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentAverageValue.Size())) - n6, err := m.CurrentAverageValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.MetricSelector != nil { + { + size, err := m.MetricSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n6 + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.MetricName) + copy(dAtA[i:], m.MetricName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *HorizontalPodAutoscaler) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -322,41 +879,52 @@ func (m *HorizontalPodAutoscaler) Marshal() (dAtA []byte, err error) { } func (m *HorizontalPodAutoscaler) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HorizontalPodAutoscaler) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n7, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n8, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n8 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n9, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n9 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *HorizontalPodAutoscalerCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -364,41 +932,52 @@ func (m *HorizontalPodAutoscalerCondition) Marshal() (dAtA []byte, err error) { } func (m *HorizontalPodAutoscalerCondition) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HorizontalPodAutoscalerCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n10, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n10 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x2a - i++ + i -= len(m.Message) + copy(dAtA[i:], m.Message) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *HorizontalPodAutoscalerList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -406,37 +985,46 @@ func (m *HorizontalPodAutoscalerList) Marshal() (dAtA []byte, err error) { } func (m *HorizontalPodAutoscalerList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HorizontalPodAutoscalerList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n11, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n11 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *HorizontalPodAutoscalerSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -444,38 +1032,45 @@ func (m *HorizontalPodAutoscalerSpec) Marshal() (dAtA []byte, err error) { } func (m *HorizontalPodAutoscalerSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HorizontalPodAutoscalerSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ScaleTargetRef.Size())) - n12, err := m.ScaleTargetRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.TargetCPUUtilizationPercentage != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TargetCPUUtilizationPercentage)) + i-- + dAtA[i] = 0x20 } - i += n12 + i = encodeVarintGenerated(dAtA, i, uint64(m.MaxReplicas)) + i-- + dAtA[i] = 0x18 if m.MinReplicas != nil { - dAtA[i] = 0x10 - i++ i = encodeVarintGenerated(dAtA, i, uint64(*m.MinReplicas)) + i-- + dAtA[i] = 0x10 } - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MaxReplicas)) - if m.TargetCPUUtilizationPercentage != nil { - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.TargetCPUUtilizationPercentage)) + { + size, err := m.ScaleTargetRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *HorizontalPodAutoscalerStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -483,43 +1078,50 @@ func (m *HorizontalPodAutoscalerStatus) Marshal() (dAtA []byte, err error) { } func (m *HorizontalPodAutoscalerStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HorizontalPodAutoscalerStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.ObservedGeneration != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.ObservedGeneration)) + if m.CurrentCPUUtilizationPercentage != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.CurrentCPUUtilizationPercentage)) + i-- + dAtA[i] = 0x28 } + i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredReplicas)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentReplicas)) + i-- + dAtA[i] = 0x18 if m.LastScaleTime != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastScaleTime.Size())) - n13, err := m.LastScaleTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.LastScaleTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n13 + i-- + dAtA[i] = 0x12 } - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentReplicas)) - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredReplicas)) - if m.CurrentCPUUtilizationPercentage != nil { - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.CurrentCPUUtilizationPercentage)) + if m.ObservedGeneration != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.ObservedGeneration)) + i-- + dAtA[i] = 0x8 } - return i, nil + return len(dAtA) - i, nil } func (m *MetricSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -527,61 +1129,75 @@ func (m *MetricSpec) Marshal() (dAtA []byte, err error) { } func (m *MetricSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MetricSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - if m.Object != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Object.Size())) - n14, err := m.Object.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n14 - } - if m.Pods != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Pods.Size())) - n15, err := m.Pods.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.External != nil { + { + size, err := m.External.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n15 + i-- + dAtA[i] = 0x2a } if m.Resource != nil { + { + size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Resource.Size())) - n16, err := m.Resource.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + } + if m.Pods != nil { + { + size, err := m.Pods.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n16 + i-- + dAtA[i] = 0x1a } - if m.External != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.External.Size())) - n17, err := m.External.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.Object != nil { + { + size, err := m.Object.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n17 + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *MetricStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -589,61 +1205,75 @@ func (m *MetricStatus) Marshal() (dAtA []byte, err error) { } func (m *MetricStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - if m.Object != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Object.Size())) - n18, err := m.Object.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n18 - } - if m.Pods != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Pods.Size())) - n19, err := m.Pods.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.External != nil { + { + size, err := m.External.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n19 + i-- + dAtA[i] = 0x2a } if m.Resource != nil { + { + size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Resource.Size())) - n20, err := m.Resource.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + } + if m.Pods != nil { + { + size, err := m.Pods.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n20 + i-- + dAtA[i] = 0x1a } - if m.External != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.External.Size())) - n21, err := m.External.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.Object != nil { + { + size, err := m.Object.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n21 + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ObjectMetricSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -651,57 +1281,71 @@ func (m *ObjectMetricSource) Marshal() (dAtA []byte, err error) { } func (m *ObjectMetricSource) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ObjectMetricSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Target.Size())) - n22, err := m.Target.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n22 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) - i += copy(dAtA[i:], m.MetricName) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.TargetValue.Size())) - n23, err := m.TargetValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.AverageValue != nil { + { + size, err := m.AverageValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a } - i += n23 if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n24, err := m.Selector.MarshalTo(dAtA[i:]) + } + { + size, err := m.TargetValue.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n24 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.AverageValue != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AverageValue.Size())) - n25, err := m.AverageValue.MarshalTo(dAtA[i:]) + i-- + dAtA[i] = 0x1a + i -= len(m.MetricName) + copy(dAtA[i:], m.MetricName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) + i-- + dAtA[i] = 0x12 + { + size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n25 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ObjectMetricStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -709,57 +1353,71 @@ func (m *ObjectMetricStatus) Marshal() (dAtA []byte, err error) { } func (m *ObjectMetricStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ObjectMetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Target.Size())) - n26, err := m.Target.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n26 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) - i += copy(dAtA[i:], m.MetricName) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentValue.Size())) - n27, err := m.CurrentValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.AverageValue != nil { + { + size, err := m.AverageValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a } - i += n27 if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n28, err := m.Selector.MarshalTo(dAtA[i:]) + } + { + size, err := m.CurrentValue.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n28 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.AverageValue != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AverageValue.Size())) - n29, err := m.AverageValue.MarshalTo(dAtA[i:]) + i-- + dAtA[i] = 0x1a + i -= len(m.MetricName) + copy(dAtA[i:], m.MetricName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) + i-- + dAtA[i] = 0x12 + { + size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n29 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *PodsMetricSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -767,39 +1425,49 @@ func (m *PodsMetricSource) Marshal() (dAtA []byte, err error) { } func (m *PodsMetricSource) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodsMetricSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) - i += copy(dAtA[i:], m.MetricName) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.TargetAverageValue.Size())) - n30, err := m.TargetAverageValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n30 if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n31, err := m.Selector.MarshalTo(dAtA[i:]) + } + { + size, err := m.TargetAverageValue.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n31 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.MetricName) + copy(dAtA[i:], m.MetricName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *PodsMetricStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -807,39 +1475,49 @@ func (m *PodsMetricStatus) Marshal() (dAtA []byte, err error) { } func (m *PodsMetricStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodsMetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) - i += copy(dAtA[i:], m.MetricName) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentAverageValue.Size())) - n32, err := m.CurrentAverageValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n32 if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n33, err := m.Selector.MarshalTo(dAtA[i:]) + } + { + size, err := m.CurrentAverageValue.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n33 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.MetricName) + copy(dAtA[i:], m.MetricName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ResourceMetricSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -847,36 +1525,44 @@ func (m *ResourceMetricSource) Marshal() (dAtA []byte, err error) { } func (m *ResourceMetricSource) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceMetricSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - if m.TargetAverageUtilization != nil { - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.TargetAverageUtilization)) - } if m.TargetAverageValue != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.TargetAverageValue.Size())) - n34, err := m.TargetAverageValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.TargetAverageValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n34 + i-- + dAtA[i] = 0x1a + } + if m.TargetAverageUtilization != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TargetAverageUtilization)) + i-- + dAtA[i] = 0x10 } - return i, nil + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ResourceMetricStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -884,34 +1570,42 @@ func (m *ResourceMetricStatus) Marshal() (dAtA []byte, err error) { } func (m *ResourceMetricStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceMetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - if m.CurrentAverageUtilization != nil { - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.CurrentAverageUtilization)) + { + size, err := m.CurrentAverageValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentAverageValue.Size())) - n35, err := m.CurrentAverageValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.CurrentAverageUtilization != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.CurrentAverageUtilization)) + i-- + dAtA[i] = 0x10 } - i += n35 - return i, nil + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *Scale) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -919,41 +1613,52 @@ func (m *Scale) Marshal() (dAtA []byte, err error) { } func (m *Scale) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Scale) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n36, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n36 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n37, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n37 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n38, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n38 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ScaleSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -961,20 +1666,25 @@ func (m *ScaleSpec) Marshal() (dAtA []byte, err error) { } func (m *ScaleSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ScaleSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - return i, nil + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *ScaleStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -982,48 +1692,41 @@ func (m *ScaleStatus) Marshal() (dAtA []byte, err error) { } func (m *ScaleStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ScaleStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - dAtA[i] = 0x12 - i++ + i -= len(m.Selector) + copy(dAtA[i:], m.Selector) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Selector))) - i += copy(dAtA[i:], m.Selector) - return i, nil + i-- + dAtA[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *CrossVersionObjectReference) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Kind) @@ -1036,6 +1739,9 @@ func (m *CrossVersionObjectReference) Size() (n int) { } func (m *ExternalMetricSource) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.MetricName) @@ -1056,6 +1762,9 @@ func (m *ExternalMetricSource) Size() (n int) { } func (m *ExternalMetricStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.MetricName) @@ -1074,6 +1783,9 @@ func (m *ExternalMetricStatus) Size() (n int) { } func (m *HorizontalPodAutoscaler) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -1086,6 +1798,9 @@ func (m *HorizontalPodAutoscaler) Size() (n int) { } func (m *HorizontalPodAutoscalerCondition) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1102,6 +1817,9 @@ func (m *HorizontalPodAutoscalerCondition) Size() (n int) { } func (m *HorizontalPodAutoscalerList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -1116,6 +1834,9 @@ func (m *HorizontalPodAutoscalerList) Size() (n int) { } func (m *HorizontalPodAutoscalerSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ScaleTargetRef.Size() @@ -1131,6 +1852,9 @@ func (m *HorizontalPodAutoscalerSpec) Size() (n int) { } func (m *HorizontalPodAutoscalerStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.ObservedGeneration != nil { @@ -1149,6 +1873,9 @@ func (m *HorizontalPodAutoscalerStatus) Size() (n int) { } func (m *MetricSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1173,6 +1900,9 @@ func (m *MetricSpec) Size() (n int) { } func (m *MetricStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1197,6 +1927,9 @@ func (m *MetricStatus) Size() (n int) { } func (m *ObjectMetricSource) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.Target.Size() @@ -1217,6 +1950,9 @@ func (m *ObjectMetricSource) Size() (n int) { } func (m *ObjectMetricStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.Target.Size() @@ -1237,6 +1973,9 @@ func (m *ObjectMetricStatus) Size() (n int) { } func (m *PodsMetricSource) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.MetricName) @@ -1251,6 +1990,9 @@ func (m *PodsMetricSource) Size() (n int) { } func (m *PodsMetricStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.MetricName) @@ -1265,6 +2007,9 @@ func (m *PodsMetricStatus) Size() (n int) { } func (m *ResourceMetricSource) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Name) @@ -1280,6 +2025,9 @@ func (m *ResourceMetricSource) Size() (n int) { } func (m *ResourceMetricStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Name) @@ -1293,6 +2041,9 @@ func (m *ResourceMetricStatus) Size() (n int) { } func (m *Scale) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -1305,6 +2056,9 @@ func (m *Scale) Size() (n int) { } func (m *ScaleSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.Replicas)) @@ -1312,6 +2066,9 @@ func (m *ScaleSpec) Size() (n int) { } func (m *ScaleStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.Replicas)) @@ -1321,14 +2078,7 @@ func (m *ScaleStatus) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -1351,9 +2101,9 @@ func (this *ExternalMetricSource) String() string { } s := strings.Join([]string{`&ExternalMetricSource{`, `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, - `MetricSelector:` + strings.Replace(fmt.Sprintf("%v", this.MetricSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `TargetValue:` + strings.Replace(fmt.Sprintf("%v", this.TargetValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, - `TargetAverageValue:` + strings.Replace(fmt.Sprintf("%v", this.TargetAverageValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, + `MetricSelector:` + strings.Replace(fmt.Sprintf("%v", this.MetricSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `TargetValue:` + strings.Replace(fmt.Sprintf("%v", this.TargetValue), "Quantity", "resource.Quantity", 1) + `,`, + `TargetAverageValue:` + strings.Replace(fmt.Sprintf("%v", this.TargetAverageValue), "Quantity", "resource.Quantity", 1) + `,`, `}`, }, "") return s @@ -1364,9 +2114,9 @@ func (this *ExternalMetricStatus) String() string { } s := strings.Join([]string{`&ExternalMetricStatus{`, `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, - `MetricSelector:` + strings.Replace(fmt.Sprintf("%v", this.MetricSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `CurrentValue:` + strings.Replace(strings.Replace(this.CurrentValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, - `CurrentAverageValue:` + strings.Replace(fmt.Sprintf("%v", this.CurrentAverageValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, + `MetricSelector:` + strings.Replace(fmt.Sprintf("%v", this.MetricSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `CurrentValue:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CurrentValue), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, + `CurrentAverageValue:` + strings.Replace(fmt.Sprintf("%v", this.CurrentAverageValue), "Quantity", "resource.Quantity", 1) + `,`, `}`, }, "") return s @@ -1376,7 +2126,7 @@ func (this *HorizontalPodAutoscaler) String() string { return "nil" } s := strings.Join([]string{`&HorizontalPodAutoscaler{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "HorizontalPodAutoscalerSpec", "HorizontalPodAutoscalerSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "HorizontalPodAutoscalerStatus", "HorizontalPodAutoscalerStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -1390,7 +2140,7 @@ func (this *HorizontalPodAutoscalerCondition) String() string { s := strings.Join([]string{`&HorizontalPodAutoscalerCondition{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `}`, @@ -1401,9 +2151,14 @@ func (this *HorizontalPodAutoscalerList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]HorizontalPodAutoscaler{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "HorizontalPodAutoscaler", "HorizontalPodAutoscaler", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&HorizontalPodAutoscalerList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "HorizontalPodAutoscaler", "HorizontalPodAutoscaler", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -1427,7 +2182,7 @@ func (this *HorizontalPodAutoscalerStatus) String() string { } s := strings.Join([]string{`&HorizontalPodAutoscalerStatus{`, `ObservedGeneration:` + valueToStringGenerated(this.ObservedGeneration) + `,`, - `LastScaleTime:` + strings.Replace(fmt.Sprintf("%v", this.LastScaleTime), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, + `LastScaleTime:` + strings.Replace(fmt.Sprintf("%v", this.LastScaleTime), "Time", "v1.Time", 1) + `,`, `CurrentReplicas:` + fmt.Sprintf("%v", this.CurrentReplicas) + `,`, `DesiredReplicas:` + fmt.Sprintf("%v", this.DesiredReplicas) + `,`, `CurrentCPUUtilizationPercentage:` + valueToStringGenerated(this.CurrentCPUUtilizationPercentage) + `,`, @@ -1441,10 +2196,10 @@ func (this *MetricSpec) String() string { } s := strings.Join([]string{`&MetricSpec{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Object:` + strings.Replace(fmt.Sprintf("%v", this.Object), "ObjectMetricSource", "ObjectMetricSource", 1) + `,`, - `Pods:` + strings.Replace(fmt.Sprintf("%v", this.Pods), "PodsMetricSource", "PodsMetricSource", 1) + `,`, - `Resource:` + strings.Replace(fmt.Sprintf("%v", this.Resource), "ResourceMetricSource", "ResourceMetricSource", 1) + `,`, - `External:` + strings.Replace(fmt.Sprintf("%v", this.External), "ExternalMetricSource", "ExternalMetricSource", 1) + `,`, + `Object:` + strings.Replace(this.Object.String(), "ObjectMetricSource", "ObjectMetricSource", 1) + `,`, + `Pods:` + strings.Replace(this.Pods.String(), "PodsMetricSource", "PodsMetricSource", 1) + `,`, + `Resource:` + strings.Replace(this.Resource.String(), "ResourceMetricSource", "ResourceMetricSource", 1) + `,`, + `External:` + strings.Replace(this.External.String(), "ExternalMetricSource", "ExternalMetricSource", 1) + `,`, `}`, }, "") return s @@ -1455,10 +2210,10 @@ func (this *MetricStatus) String() string { } s := strings.Join([]string{`&MetricStatus{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Object:` + strings.Replace(fmt.Sprintf("%v", this.Object), "ObjectMetricStatus", "ObjectMetricStatus", 1) + `,`, - `Pods:` + strings.Replace(fmt.Sprintf("%v", this.Pods), "PodsMetricStatus", "PodsMetricStatus", 1) + `,`, - `Resource:` + strings.Replace(fmt.Sprintf("%v", this.Resource), "ResourceMetricStatus", "ResourceMetricStatus", 1) + `,`, - `External:` + strings.Replace(fmt.Sprintf("%v", this.External), "ExternalMetricStatus", "ExternalMetricStatus", 1) + `,`, + `Object:` + strings.Replace(this.Object.String(), "ObjectMetricStatus", "ObjectMetricStatus", 1) + `,`, + `Pods:` + strings.Replace(this.Pods.String(), "PodsMetricStatus", "PodsMetricStatus", 1) + `,`, + `Resource:` + strings.Replace(this.Resource.String(), "ResourceMetricStatus", "ResourceMetricStatus", 1) + `,`, + `External:` + strings.Replace(this.External.String(), "ExternalMetricStatus", "ExternalMetricStatus", 1) + `,`, `}`, }, "") return s @@ -1470,9 +2225,9 @@ func (this *ObjectMetricSource) String() string { s := strings.Join([]string{`&ObjectMetricSource{`, `Target:` + strings.Replace(strings.Replace(this.Target.String(), "CrossVersionObjectReference", "CrossVersionObjectReference", 1), `&`, ``, 1) + `,`, `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, - `TargetValue:` + strings.Replace(strings.Replace(this.TargetValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `AverageValue:` + strings.Replace(fmt.Sprintf("%v", this.AverageValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, + `TargetValue:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.TargetValue), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `AverageValue:` + strings.Replace(fmt.Sprintf("%v", this.AverageValue), "Quantity", "resource.Quantity", 1) + `,`, `}`, }, "") return s @@ -1484,9 +2239,9 @@ func (this *ObjectMetricStatus) String() string { s := strings.Join([]string{`&ObjectMetricStatus{`, `Target:` + strings.Replace(strings.Replace(this.Target.String(), "CrossVersionObjectReference", "CrossVersionObjectReference", 1), `&`, ``, 1) + `,`, `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, - `CurrentValue:` + strings.Replace(strings.Replace(this.CurrentValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `AverageValue:` + strings.Replace(fmt.Sprintf("%v", this.AverageValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, + `CurrentValue:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CurrentValue), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `AverageValue:` + strings.Replace(fmt.Sprintf("%v", this.AverageValue), "Quantity", "resource.Quantity", 1) + `,`, `}`, }, "") return s @@ -1497,8 +2252,8 @@ func (this *PodsMetricSource) String() string { } s := strings.Join([]string{`&PodsMetricSource{`, `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, - `TargetAverageValue:` + strings.Replace(strings.Replace(this.TargetAverageValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `TargetAverageValue:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.TargetAverageValue), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, `}`, }, "") return s @@ -1509,8 +2264,8 @@ func (this *PodsMetricStatus) String() string { } s := strings.Join([]string{`&PodsMetricStatus{`, `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, - `CurrentAverageValue:` + strings.Replace(strings.Replace(this.CurrentAverageValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `CurrentAverageValue:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CurrentAverageValue), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, `}`, }, "") return s @@ -1522,7 +2277,7 @@ func (this *ResourceMetricSource) String() string { s := strings.Join([]string{`&ResourceMetricSource{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `TargetAverageUtilization:` + valueToStringGenerated(this.TargetAverageUtilization) + `,`, - `TargetAverageValue:` + strings.Replace(fmt.Sprintf("%v", this.TargetAverageValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, + `TargetAverageValue:` + strings.Replace(fmt.Sprintf("%v", this.TargetAverageValue), "Quantity", "resource.Quantity", 1) + `,`, `}`, }, "") return s @@ -1534,7 +2289,7 @@ func (this *ResourceMetricStatus) String() string { s := strings.Join([]string{`&ResourceMetricStatus{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `CurrentAverageUtilization:` + valueToStringGenerated(this.CurrentAverageUtilization) + `,`, - `CurrentAverageValue:` + strings.Replace(strings.Replace(this.CurrentAverageValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, + `CurrentAverageValue:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CurrentAverageValue), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -1544,7 +2299,7 @@ func (this *Scale) String() string { return "nil" } s := strings.Join([]string{`&Scale{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ScaleSpec", "ScaleSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ScaleStatus", "ScaleStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -1595,7 +2350,7 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1623,7 +2378,7 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1633,6 +2388,9 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1652,7 +2410,7 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1662,6 +2420,9 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1681,7 +2442,7 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1691,6 +2452,9 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1705,6 +2469,9 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1732,7 +2499,7 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1760,7 +2527,7 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1770,6 +2537,9 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1789,7 +2559,7 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1798,11 +2568,14 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.MetricSelector == nil { - m.MetricSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.MetricSelector = &v1.LabelSelector{} } if err := m.MetricSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1822,7 +2595,7 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1831,11 +2604,14 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.TargetValue == nil { - m.TargetValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + m.TargetValue = &resource.Quantity{} } if err := m.TargetValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1855,7 +2631,7 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1864,11 +2640,14 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.TargetAverageValue == nil { - m.TargetAverageValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + m.TargetAverageValue = &resource.Quantity{} } if err := m.TargetAverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1883,6 +2662,9 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1910,7 +2692,7 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1938,7 +2720,7 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1948,6 +2730,9 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1967,7 +2752,7 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1976,11 +2761,14 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.MetricSelector == nil { - m.MetricSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.MetricSelector = &v1.LabelSelector{} } if err := m.MetricSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -2000,7 +2788,7 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2009,6 +2797,9 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2030,7 +2821,7 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2039,11 +2830,14 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.CurrentAverageValue == nil { - m.CurrentAverageValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + m.CurrentAverageValue = &resource.Quantity{} } if err := m.CurrentAverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -2058,6 +2852,9 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2085,7 +2882,7 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2113,7 +2910,7 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2122,6 +2919,9 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2143,7 +2943,7 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2152,6 +2952,9 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2173,7 +2976,7 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2182,6 +2985,9 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2198,6 +3004,9 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2225,7 +3034,7 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2253,7 +3062,7 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2263,6 +3072,9 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2282,7 +3094,7 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2292,6 +3104,9 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2311,7 +3126,7 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2320,6 +3135,9 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2341,7 +3159,7 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2351,6 +3169,9 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2370,7 +3191,7 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2380,6 +3201,9 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2394,6 +3218,9 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2421,7 +3248,7 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2449,7 +3276,7 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2458,6 +3285,9 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2479,7 +3309,7 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2488,6 +3318,9 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2505,6 +3338,9 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2532,7 +3368,7 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2560,7 +3396,7 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2569,6 +3405,9 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2590,7 +3429,7 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -2610,7 +3449,7 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MaxReplicas |= (int32(b) & 0x7F) << shift + m.MaxReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -2629,7 +3468,7 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -2644,6 +3483,9 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2671,7 +3513,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2699,7 +3541,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -2719,7 +3561,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2728,11 +3570,14 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.LastScaleTime == nil { - m.LastScaleTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} + m.LastScaleTime = &v1.Time{} } if err := m.LastScaleTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -2752,7 +3597,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.CurrentReplicas |= (int32(b) & 0x7F) << shift + m.CurrentReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -2771,7 +3616,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.DesiredReplicas |= (int32(b) & 0x7F) << shift + m.DesiredReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -2790,7 +3635,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -2805,6 +3650,9 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2832,7 +3680,7 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2860,7 +3708,7 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2870,6 +3718,9 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2889,7 +3740,7 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2898,6 +3749,9 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2922,7 +3776,7 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2931,6 +3785,9 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2955,7 +3812,7 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2964,6 +3821,9 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2988,7 +3848,7 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2997,6 +3857,9 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3016,6 +3879,9 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3043,7 +3909,7 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3071,7 +3937,7 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3081,6 +3947,9 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3100,7 +3969,7 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3109,6 +3978,9 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3133,7 +4005,7 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3142,6 +4014,9 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3166,7 +4041,7 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3175,6 +4050,9 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3199,7 +4077,7 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3208,6 +4086,9 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3227,6 +4108,9 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3254,7 +4138,7 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3282,7 +4166,7 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3291,6 +4175,9 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3312,7 +4199,7 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3322,6 +4209,9 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3341,7 +4231,7 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3350,6 +4240,9 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3371,7 +4264,7 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3380,11 +4273,14 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3404,7 +4300,7 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3413,11 +4309,14 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.AverageValue == nil { - m.AverageValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + m.AverageValue = &resource.Quantity{} } if err := m.AverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3432,6 +4331,9 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3459,7 +4361,7 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3487,7 +4389,7 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3496,6 +4398,9 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3517,7 +4422,7 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3527,6 +4432,9 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3546,7 +4454,7 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3555,6 +4463,9 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3576,7 +4487,7 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3585,11 +4496,14 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3609,7 +4523,7 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3618,11 +4532,14 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.AverageValue == nil { - m.AverageValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + m.AverageValue = &resource.Quantity{} } if err := m.AverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3637,6 +4554,9 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3664,7 +4584,7 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3692,7 +4612,7 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3702,6 +4622,9 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3721,7 +4644,7 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3730,6 +4653,9 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3751,7 +4677,7 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3760,11 +4686,14 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3779,6 +4708,9 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3806,7 +4738,7 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3834,7 +4766,7 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3844,6 +4776,9 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3863,7 +4798,7 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3872,6 +4807,9 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3893,7 +4831,7 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3902,11 +4840,14 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3921,6 +4862,9 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3948,7 +4892,7 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3976,7 +4920,7 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3986,6 +4930,9 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4005,7 +4952,7 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4025,7 +4972,7 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4034,11 +4981,14 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.TargetAverageValue == nil { - m.TargetAverageValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + m.TargetAverageValue = &resource.Quantity{} } if err := m.TargetAverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -4053,6 +5003,9 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4080,7 +5033,7 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4108,7 +5061,7 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4118,6 +5071,9 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4137,7 +5093,7 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4157,7 +5113,7 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4166,6 +5122,9 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4182,6 +5141,9 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4209,7 +5171,7 @@ func (m *Scale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4237,7 +5199,7 @@ func (m *Scale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4246,6 +5208,9 @@ func (m *Scale) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4267,7 +5232,7 @@ func (m *Scale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4276,6 +5241,9 @@ func (m *Scale) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4297,7 +5265,7 @@ func (m *Scale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4306,6 +5274,9 @@ func (m *Scale) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4322,6 +5293,9 @@ func (m *Scale) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4349,7 +5323,7 @@ func (m *ScaleSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4377,7 +5351,7 @@ func (m *ScaleSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift + m.Replicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4391,6 +5365,9 @@ func (m *ScaleSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4418,7 +5395,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4446,7 +5423,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift + m.Replicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4465,7 +5442,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4475,6 +5452,9 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4489,6 +5469,9 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4555,10 +5538,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -4587,6 +5573,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -4605,106 +5594,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 1516 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x58, 0xcf, 0x6f, 0x13, 0xc7, - 0x17, 0x8f, 0x7f, 0x24, 0x24, 0xe3, 0x90, 0xe4, 0x3b, 0x20, 0x08, 0xe1, 0x8b, 0x37, 0xda, 0x22, - 0x44, 0x7f, 0xb0, 0x6e, 0x52, 0x8a, 0xe8, 0x31, 0x76, 0x4b, 0x41, 0x8d, 0x21, 0x4c, 0x02, 0xa5, - 0x3f, 0xc5, 0x64, 0x3d, 0x38, 0x43, 0xbc, 0xbb, 0xd6, 0xec, 0xd8, 0x22, 0x48, 0x95, 0xda, 0x43, - 0xef, 0xbd, 0xb4, 0xea, 0xb1, 0x95, 0x7a, 0xed, 0x99, 0x73, 0x6f, 0x1c, 0x39, 0x20, 0x95, 0xd3, - 0xaa, 0x6c, 0x8f, 0xfd, 0x0f, 0x38, 0x55, 0xf3, 0xc3, 0xeb, 0x5d, 0xdb, 0xeb, 0x24, 0x26, 0x44, - 0x6d, 0x6f, 0x3b, 0x33, 0xef, 0x7d, 0xde, 0xec, 0x7b, 0x6f, 0xde, 0x2f, 0x50, 0xde, 0xbe, 0xec, - 0x5b, 0xd4, 0x2b, 0x6d, 0xb7, 0x36, 0x09, 0x73, 0x09, 0x27, 0x7e, 0xa9, 0x4d, 0xdc, 0x9a, 0xc7, - 0x4a, 0xfa, 0x00, 0x37, 0x69, 0x09, 0xb7, 0xb8, 0xe7, 0xdb, 0xb8, 0x41, 0xdd, 0x7a, 0xa9, 0xbd, - 0x54, 0xaa, 0x13, 0x97, 0x30, 0xcc, 0x49, 0xcd, 0x6a, 0x32, 0x8f, 0x7b, 0xf0, 0x94, 0x22, 0xb5, - 0x70, 0x93, 0x5a, 0x31, 0x52, 0xab, 0xbd, 0xb4, 0x70, 0xa1, 0x4e, 0xf9, 0x56, 0x6b, 0xd3, 0xb2, - 0x3d, 0xa7, 0x54, 0xf7, 0xea, 0x5e, 0x49, 0x72, 0x6c, 0xb6, 0xee, 0xc9, 0x95, 0x5c, 0xc8, 0x2f, - 0x85, 0xb4, 0x60, 0xc6, 0x84, 0xda, 0x1e, 0x23, 0x03, 0xa4, 0x2d, 0x5c, 0xec, 0xd2, 0x38, 0xd8, - 0xde, 0xa2, 0x2e, 0x61, 0x3b, 0xa5, 0xe6, 0x76, 0x5d, 0x32, 0x31, 0xe2, 0x7b, 0x2d, 0x66, 0x93, - 0x7d, 0x71, 0xf9, 0x25, 0x87, 0x70, 0x3c, 0x48, 0x56, 0x29, 0x8d, 0x8b, 0xb5, 0x5c, 0x4e, 0x9d, - 0x7e, 0x31, 0x97, 0x76, 0x63, 0xf0, 0xed, 0x2d, 0xe2, 0xe0, 0x5e, 0x3e, 0xf3, 0xfb, 0x0c, 0x38, - 0x5d, 0x61, 0x9e, 0xef, 0xdf, 0x26, 0xcc, 0xa7, 0x9e, 0x7b, 0x63, 0xf3, 0x3e, 0xb1, 0x39, 0x22, - 0xf7, 0x08, 0x23, 0xae, 0x4d, 0xe0, 0x22, 0xc8, 0x6f, 0x53, 0xb7, 0x36, 0x9f, 0x59, 0xcc, 0x9c, - 0x9f, 0x2a, 0x4f, 0x3f, 0x0e, 0x8c, 0xb1, 0x30, 0x30, 0xf2, 0x1f, 0x51, 0xb7, 0x86, 0xe4, 0x89, - 0xa0, 0x70, 0xb1, 0x43, 0xe6, 0xb3, 0x49, 0x8a, 0xeb, 0xd8, 0x21, 0x48, 0x9e, 0xc0, 0x65, 0x00, - 0x70, 0x93, 0x6a, 0x01, 0xf3, 0x39, 0x49, 0x07, 0x35, 0x1d, 0x58, 0x59, 0xbb, 0xa6, 0x4f, 0x50, - 0x8c, 0xca, 0xfc, 0x21, 0x07, 0x8e, 0x7f, 0xf0, 0x80, 0x13, 0xe6, 0xe2, 0x46, 0x95, 0x70, 0x46, - 0xed, 0x75, 0xa9, 0x5f, 0x01, 0xe6, 0xc8, 0xb5, 0x10, 0xa0, 0xaf, 0x15, 0x81, 0x55, 0xa3, 0x13, - 0x14, 0xa3, 0x82, 0x1e, 0x98, 0x51, 0xab, 0x75, 0xd2, 0x20, 0x36, 0xf7, 0x98, 0xbc, 0x6c, 0x61, - 0xf9, 0x1d, 0xab, 0xeb, 0x40, 0x91, 0xd6, 0xac, 0xe6, 0x76, 0x5d, 0x6c, 0xf8, 0x96, 0x30, 0x8e, - 0xd5, 0x5e, 0xb2, 0x56, 0xf1, 0x26, 0x69, 0x74, 0x58, 0xcb, 0x30, 0x0c, 0x8c, 0x99, 0x6a, 0x02, - 0x0e, 0xf5, 0xc0, 0x43, 0x0c, 0x0a, 0x1c, 0xb3, 0x3a, 0xe1, 0xb7, 0x71, 0xa3, 0x45, 0xe4, 0x2f, - 0x17, 0x96, 0xad, 0x61, 0xd2, 0xac, 0x8e, 0x03, 0x59, 0x37, 0x5b, 0xd8, 0xe5, 0x94, 0xef, 0x94, - 0x67, 0xc3, 0xc0, 0x28, 0x6c, 0x74, 0x61, 0x50, 0x1c, 0x13, 0xb6, 0x01, 0x54, 0xcb, 0x95, 0x36, - 0x61, 0xb8, 0x4e, 0x94, 0xa4, 0xfc, 0x48, 0x92, 0x4e, 0x84, 0x81, 0x01, 0x37, 0xfa, 0xd0, 0xd0, - 0x00, 0x09, 0xe6, 0x4f, 0xfd, 0x86, 0xe1, 0x98, 0xb7, 0xfc, 0x7f, 0x87, 0x61, 0xb6, 0xc0, 0xb4, - 0xdd, 0x62, 0x8c, 0xb8, 0x2f, 0x65, 0x99, 0xe3, 0xfa, 0xb7, 0xa6, 0x2b, 0x31, 0x2c, 0x94, 0x40, - 0x86, 0x3b, 0xe0, 0x98, 0x5e, 0x1f, 0x80, 0x81, 0x4e, 0x86, 0x81, 0x71, 0xac, 0xd2, 0x0f, 0x87, - 0x06, 0xc9, 0x30, 0x1f, 0x65, 0xc1, 0xc9, 0xab, 0x1e, 0xa3, 0x0f, 0x3d, 0x97, 0xe3, 0xc6, 0x9a, - 0x57, 0x5b, 0xd1, 0xb1, 0x91, 0x30, 0x78, 0x17, 0x4c, 0x0a, 0xed, 0xd5, 0x30, 0xc7, 0xd2, 0x46, - 0x85, 0xe5, 0xb7, 0xf7, 0xa6, 0x6b, 0x15, 0x18, 0xaa, 0x84, 0xe3, 0xae, 0x55, 0xbb, 0x7b, 0x28, - 0x42, 0x85, 0x77, 0x40, 0xde, 0x6f, 0x12, 0x5b, 0x5b, 0xf2, 0x92, 0x95, 0x1a, 0xa3, 0xad, 0x94, - 0x3b, 0xae, 0x37, 0x89, 0xdd, 0x8d, 0x23, 0x62, 0x85, 0x24, 0x22, 0xbc, 0x0b, 0x26, 0x7c, 0xe9, - 0x6b, 0xda, 0x6c, 0x97, 0x47, 0xc0, 0x96, 0xfc, 0xe5, 0x19, 0x8d, 0x3e, 0xa1, 0xd6, 0x48, 0xe3, - 0x9a, 0xdf, 0xe6, 0xc0, 0x62, 0x0a, 0x67, 0xc5, 0x73, 0x6b, 0x94, 0x53, 0xcf, 0x85, 0x57, 0x41, - 0x9e, 0xef, 0x34, 0x3b, 0x2e, 0x7e, 0xb1, 0x73, 0xd1, 0x8d, 0x9d, 0x26, 0x79, 0x11, 0x18, 0x67, - 0x77, 0xe3, 0x17, 0x74, 0x48, 0x22, 0xc0, 0xd5, 0xe8, 0x87, 0xb2, 0x09, 0x2c, 0x7d, 0xad, 0x17, - 0x81, 0x31, 0x20, 0x2f, 0x59, 0x11, 0x52, 0xf2, 0xf2, 0x22, 0x22, 0x34, 0xb0, 0xcf, 0x37, 0x18, - 0x76, 0x7d, 0x25, 0x89, 0x3a, 0x1d, 0x0f, 0x7f, 0x63, 0x6f, 0x46, 0x16, 0x1c, 0xe5, 0x05, 0x7d, - 0x0b, 0xb8, 0xda, 0x87, 0x86, 0x06, 0x48, 0x80, 0xe7, 0xc0, 0x04, 0x23, 0xd8, 0xf7, 0x5c, 0xe9, - 0xdc, 0x53, 0x5d, 0xe5, 0x22, 0xb9, 0x8b, 0xf4, 0x29, 0x7c, 0x1d, 0x1c, 0x71, 0x88, 0xef, 0xe3, - 0x3a, 0x99, 0x1f, 0x97, 0x84, 0xb3, 0x9a, 0xf0, 0x48, 0x55, 0x6d, 0xa3, 0xce, 0xb9, 0xf9, 0x34, - 0x03, 0x4e, 0xa7, 0xe8, 0x71, 0x95, 0xfa, 0x1c, 0x7e, 0xde, 0xe7, 0xc5, 0xd6, 0x1e, 0x23, 0x06, - 0xf5, 0x95, 0x0f, 0xcf, 0x69, 0xd9, 0x93, 0x9d, 0x9d, 0x98, 0x07, 0x7f, 0x0c, 0xc6, 0x29, 0x27, - 0x8e, 0xb0, 0x4a, 0xee, 0x7c, 0x61, 0x79, 0x79, 0xff, 0x6e, 0x56, 0x3e, 0xaa, 0xe1, 0xc7, 0xaf, - 0x09, 0x20, 0xa4, 0xf0, 0xcc, 0xbf, 0xb2, 0xa9, 0xbf, 0x25, 0xdc, 0x1c, 0xb6, 0xc1, 0x8c, 0x5c, - 0xa9, 0x50, 0x8c, 0xc8, 0x3d, 0xfd, 0x73, 0xc3, 0x1e, 0xd1, 0x90, 0xe4, 0x5d, 0x3e, 0xa1, 0x6f, - 0x31, 0xb3, 0x9e, 0x40, 0x45, 0x3d, 0x52, 0xe0, 0x12, 0x28, 0x38, 0xd4, 0x45, 0xa4, 0xd9, 0xa0, - 0x36, 0x56, 0xce, 0x38, 0xae, 0xd2, 0x4f, 0xb5, 0xbb, 0x8d, 0xe2, 0x34, 0xf0, 0x5d, 0x50, 0x70, - 0xf0, 0x83, 0x88, 0x25, 0x27, 0x59, 0x8e, 0x69, 0x79, 0x85, 0x6a, 0xf7, 0x08, 0xc5, 0xe9, 0xe0, - 0x7d, 0x50, 0x54, 0x39, 0xa5, 0xb2, 0x76, 0xeb, 0x16, 0xa7, 0x0d, 0xfa, 0x10, 0x0b, 0x3f, 0x5a, - 0x23, 0xcc, 0x26, 0x2e, 0x17, 0xae, 0x91, 0x97, 0x48, 0x66, 0x18, 0x18, 0xc5, 0x8d, 0xa1, 0x94, - 0x68, 0x17, 0x24, 0xf3, 0xb7, 0x1c, 0x38, 0x33, 0x34, 0x0c, 0xc0, 0x2b, 0x00, 0x7a, 0x9b, 0x3e, - 0x61, 0x6d, 0x52, 0xfb, 0x50, 0xd5, 0x45, 0xa2, 0x40, 0x11, 0x3a, 0xcf, 0xa9, 0x9c, 0x78, 0xa3, - 0xef, 0x14, 0x0d, 0xe0, 0x80, 0x36, 0x38, 0x2a, 0xde, 0x85, 0xd2, 0x32, 0xd5, 0xb5, 0xd0, 0xfe, - 0x1e, 0xdd, 0xff, 0xc2, 0xc0, 0x38, 0xba, 0x1a, 0x07, 0x41, 0x49, 0x4c, 0xb8, 0x02, 0x66, 0x75, - 0xb0, 0xef, 0xd1, 0xfa, 0x49, 0xad, 0xf5, 0xd9, 0x4a, 0xf2, 0x18, 0xf5, 0xd2, 0x0b, 0x88, 0x1a, - 0xf1, 0x29, 0x23, 0xb5, 0x08, 0x22, 0x9f, 0x84, 0x78, 0x3f, 0x79, 0x8c, 0x7a, 0xe9, 0xa1, 0x03, - 0x0c, 0x8d, 0x9a, 0x6a, 0xc1, 0x71, 0x09, 0xf9, 0x5a, 0x18, 0x18, 0x46, 0x65, 0x38, 0x29, 0xda, - 0x0d, 0x4b, 0x94, 0x81, 0xba, 0x76, 0x90, 0x0f, 0xe4, 0x62, 0x22, 0xf4, 0x2e, 0xf6, 0x84, 0xde, - 0xb9, 0x78, 0xa1, 0x18, 0x0b, 0xb3, 0x37, 0xc1, 0x84, 0x27, 0x5f, 0x86, 0xb6, 0xcb, 0x85, 0x21, - 0xcf, 0x29, 0x4a, 0x69, 0x11, 0x50, 0x19, 0x88, 0x58, 0xa6, 0x9f, 0x96, 0x06, 0x82, 0xd7, 0x40, - 0xbe, 0xe9, 0xd5, 0x3a, 0x89, 0xe8, 0xcd, 0x21, 0x80, 0x6b, 0x5e, 0xcd, 0x4f, 0xc0, 0x4d, 0x8a, - 0x1b, 0x8b, 0x5d, 0x24, 0x21, 0xe0, 0x27, 0x60, 0xb2, 0x93, 0xf0, 0x75, 0x75, 0x50, 0x1a, 0x02, - 0x87, 0x34, 0x69, 0x02, 0x72, 0x5a, 0x04, 0xb2, 0xce, 0x09, 0x8a, 0xe0, 0x04, 0x34, 0xd1, 0xa5, - 0x9a, 0xb4, 0xca, 0x70, 0xe8, 0x41, 0xe5, 0xb6, 0x82, 0xee, 0x9c, 0xa0, 0x08, 0xce, 0xfc, 0x31, - 0x07, 0xa6, 0x13, 0xe5, 0xdf, 0x21, 0x9b, 0x46, 0xe5, 0xf1, 0x03, 0x33, 0x8d, 0x82, 0x3b, 0x50, - 0xd3, 0x28, 0xc8, 0x57, 0x62, 0x9a, 0x18, 0xf4, 0x00, 0xd3, 0x3c, 0xcd, 0x01, 0xd8, 0xef, 0xc6, - 0xf0, 0x4b, 0x30, 0xa1, 0x02, 0xe6, 0x4b, 0x26, 0x95, 0x28, 0xbd, 0xeb, 0xfc, 0xa1, 0x51, 0x7b, - 0xea, 0xff, 0xec, 0x9e, 0xea, 0x7f, 0x72, 0x10, 0x7d, 0x52, 0x94, 0x75, 0x52, 0x7b, 0xa5, 0x2f, - 0xc0, 0xa4, 0xdf, 0x69, 0x30, 0xf2, 0xa3, 0x37, 0x18, 0x52, 0xe1, 0x51, 0x6b, 0x11, 0x41, 0xc2, - 0x1a, 0x98, 0xc6, 0xf1, 0x1a, 0x7f, 0x7c, 0xa4, 0xdf, 0x98, 0x13, 0x0d, 0x45, 0xa2, 0xb8, 0x4f, - 0xa0, 0x9a, 0xbf, 0xf7, 0x9a, 0x55, 0xbd, 0xbb, 0x7f, 0xa2, 0x59, 0x0f, 0xaf, 0xcb, 0xfa, 0x4f, - 0x58, 0xf6, 0xe7, 0x2c, 0x98, 0xeb, 0x4d, 0x13, 0x23, 0xb5, 0xd3, 0x0f, 0x07, 0xce, 0x04, 0xb2, - 0x23, 0x5d, 0x3a, 0xea, 0x02, 0xf6, 0x36, 0x17, 0x48, 0x58, 0x22, 0x77, 0xe0, 0x96, 0x30, 0x7f, - 0x49, 0xea, 0x68, 0xf4, 0x91, 0xc3, 0x57, 0x83, 0xfb, 0xf2, 0xd1, 0x94, 0x74, 0x5a, 0x0b, 0xdb, - 0x73, 0x6f, 0xfe, 0xaa, 0xd5, 0xf4, 0x6b, 0x16, 0x1c, 0x1f, 0x54, 0x22, 0xc0, 0x8a, 0x9e, 0xd2, - 0x29, 0x25, 0x95, 0xe2, 0x53, 0xba, 0x17, 0x81, 0x61, 0x0c, 0x68, 0x33, 0x3b, 0x30, 0xb1, 0x41, - 0xde, 0x1d, 0x30, 0x9f, 0xb0, 0x7c, 0xac, 0x66, 0xd3, 0x4d, 0xc3, 0xff, 0xc3, 0xc0, 0x98, 0xdf, - 0x48, 0xa1, 0x41, 0xa9, 0xdc, 0x29, 0xd3, 0xac, 0xdc, 0x2b, 0x9f, 0x66, 0x3d, 0xea, 0xd7, 0x97, - 0x72, 0xad, 0x03, 0xd1, 0xd7, 0x67, 0xe0, 0x54, 0xd2, 0x07, 0xfa, 0x15, 0x76, 0x26, 0x0c, 0x8c, - 0x53, 0x95, 0x34, 0x22, 0x94, 0xce, 0x9f, 0xe6, 0xc8, 0xb9, 0xc3, 0x71, 0x64, 0xf3, 0x9b, 0x2c, - 0x18, 0x97, 0xcd, 0xc9, 0x21, 0x8c, 0x94, 0xae, 0x24, 0x46, 0x4a, 0x67, 0x87, 0x64, 0x38, 0x79, - 0xa3, 0xd4, 0x01, 0xd2, 0xf5, 0x9e, 0x01, 0xd2, 0xb9, 0x5d, 0x91, 0x86, 0x8f, 0x8b, 0xde, 0x03, - 0x53, 0x91, 0x40, 0xf8, 0x96, 0x28, 0x16, 0x75, 0x57, 0x95, 0x91, 0xb6, 0x8d, 0x66, 0x0c, 0x51, - 0x3b, 0x15, 0x51, 0x98, 0x14, 0x14, 0x62, 0x12, 0xf6, 0xc7, 0x2c, 0xa8, 0xfd, 0xf8, 0xc0, 0x74, - 0xaa, 0x4b, 0xdd, 0x1f, 0x13, 0xca, 0xe7, 0x1f, 0x3f, 0x2f, 0x8e, 0x3d, 0x79, 0x5e, 0x1c, 0x7b, - 0xf6, 0xbc, 0x38, 0xf6, 0x75, 0x58, 0xcc, 0x3c, 0x0e, 0x8b, 0x99, 0x27, 0x61, 0x31, 0xf3, 0x2c, - 0x2c, 0x66, 0xfe, 0x08, 0x8b, 0x99, 0xef, 0xfe, 0x2c, 0x8e, 0x7d, 0x9a, 0x6d, 0x2f, 0xfd, 0x1d, - 0x00, 0x00, 0xff, 0xff, 0x3c, 0x26, 0x41, 0xcb, 0x94, 0x19, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/autoscaling/v1/generated.proto b/vendor/k8s.io/api/autoscaling/v1/generated.proto index 5b56b2ac..f50ed9d1 100644 --- a/vendor/k8s.io/api/autoscaling/v1/generated.proto +++ b/vendor/k8s.io/api/autoscaling/v1/generated.proto @@ -32,7 +32,7 @@ option go_package = "v1"; // CrossVersionObjectReference contains enough information to let you identify the referred resource. message CrossVersionObjectReference { - // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds" + // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" optional string kind = 1; // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names @@ -88,11 +88,11 @@ message ExternalMetricStatus { // configuration of a horizontal pod autoscaler. message HorizontalPodAutoscaler { - // Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // behaviour of autoscaler. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. + // behaviour of autoscaler. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. // +optional optional HorizontalPodAutoscalerSpec spec = 2; @@ -141,7 +141,11 @@ message HorizontalPodAutoscalerSpec { // and will set the desired number of pods by using its Scale subresource. optional CrossVersionObjectReference scaleTargetRef = 1; - // lower limit for the number of pods that can be set by the autoscaler, default 1. + // minReplicas is the lower limit for the number of replicas to which the autoscaler + // can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the + // alpha feature gate HPAScaleToZero is enabled and at least one Object or External + // metric is configured. Scaling is active as long as at least one metric value is + // available. // +optional optional int32 minReplicas = 2; @@ -380,15 +384,15 @@ message ResourceMetricStatus { // Scale represents a scaling request for a resource. message Scale { - // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. + // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. // +optional optional ScaleSpec spec = 2; - // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only. + // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only. // +optional optional ScaleStatus status = 3; } diff --git a/vendor/k8s.io/api/autoscaling/v1/types.go b/vendor/k8s.io/api/autoscaling/v1/types.go index c03af13a..55b2a0d6 100644 --- a/vendor/k8s.io/api/autoscaling/v1/types.go +++ b/vendor/k8s.io/api/autoscaling/v1/types.go @@ -17,14 +17,14 @@ limitations under the License. package v1 import ( - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // CrossVersionObjectReference contains enough information to let you identify the referred resource. type CrossVersionObjectReference struct { - // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds" + // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names Name string `json:"name" protobuf:"bytes,2,opt,name=name"` @@ -38,7 +38,11 @@ type HorizontalPodAutoscalerSpec struct { // reference to scaled resource; horizontal pod autoscaler will learn the current resource consumption // and will set the desired number of pods by using its Scale subresource. ScaleTargetRef CrossVersionObjectReference `json:"scaleTargetRef" protobuf:"bytes,1,opt,name=scaleTargetRef"` - // lower limit for the number of pods that can be set by the autoscaler, default 1. + // minReplicas is the lower limit for the number of replicas to which the autoscaler + // can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the + // alpha feature gate HPAScaleToZero is enabled and at least one Object or External + // metric is configured. Scaling is active as long as at least one metric value is + // available. // +optional MinReplicas *int32 `json:"minReplicas,omitempty" protobuf:"varint,2,opt,name=minReplicas"` // upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas. @@ -78,11 +82,11 @@ type HorizontalPodAutoscalerStatus struct { // configuration of a horizontal pod autoscaler. type HorizontalPodAutoscaler struct { metav1.TypeMeta `json:",inline"` - // Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // behaviour of autoscaler. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. + // behaviour of autoscaler. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. // +optional Spec HorizontalPodAutoscalerSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` @@ -109,15 +113,15 @@ type HorizontalPodAutoscalerList struct { // Scale represents a scaling request for a resource. type Scale struct { metav1.TypeMeta `json:",inline"` - // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. + // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. // +optional Spec ScaleSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only. + // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only. // +optional Status ScaleStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -147,7 +151,7 @@ type ScaleStatus struct { // MetricSourceType indicates the type of metric. type MetricSourceType string -var ( +const ( // ObjectMetricSourceType is a metric describing a kubernetes object // (for example, hits-per-second on an Ingress object). ObjectMetricSourceType MetricSourceType = "Object" @@ -318,7 +322,7 @@ type MetricStatus struct { // a HorizontalPodAutoscaler. type HorizontalPodAutoscalerConditionType string -var ( +const ( // ScalingActive indicates that the HPA controller is able to scale if necessary: // it's correctly configured, can fetch the desired metrics, and isn't disabled. ScalingActive HorizontalPodAutoscalerConditionType = "ScalingActive" diff --git a/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go index a6e874f3..129ce2b4 100644 --- a/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go @@ -29,7 +29,7 @@ package v1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_CrossVersionObjectReference = map[string]string{ "": "CrossVersionObjectReference contains enough information to let you identify the referred resource.", - "kind": "Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds\"", + "kind": "Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\"", "name": "Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names", "apiVersion": "API version of the referent", } @@ -64,8 +64,8 @@ func (ExternalMetricStatus) SwaggerDoc() map[string]string { var map_HorizontalPodAutoscaler = map[string]string{ "": "configuration of a horizontal pod autoscaler.", - "metadata": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "behaviour of autoscaler. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.", + "metadata": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "behaviour of autoscaler. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.", "status": "current information about the autoscaler.", } @@ -99,7 +99,7 @@ func (HorizontalPodAutoscalerList) SwaggerDoc() map[string]string { var map_HorizontalPodAutoscalerSpec = map[string]string{ "": "specification of a horizontal pod autoscaler.", "scaleTargetRef": "reference to scaled resource; horizontal pod autoscaler will learn the current resource consumption and will set the desired number of pods by using its Scale subresource.", - "minReplicas": "lower limit for the number of pods that can be set by the autoscaler, default 1.", + "minReplicas": "minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the alpha feature gate HPAScaleToZero is enabled and at least one Object or External metric is configured. Scaling is active as long as at least one metric value is available.", "maxReplicas": "upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas.", "targetCPUUtilizationPercentage": "target average CPU utilization (represented as a percentage of requested CPU) over all the pods; if not specified the default autoscaling policy will be used.", } @@ -196,8 +196,8 @@ func (PodsMetricStatus) SwaggerDoc() map[string]string { } var map_ResourceMetricSource = map[string]string{ - "": "ResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. Only one \"target\" type should be set.", - "name": "name is the name of the resource in question.", + "": "ResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. Only one \"target\" type should be set.", + "name": "name is the name of the resource in question.", "targetAverageUtilization": "targetAverageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods.", "targetAverageValue": "targetAverageValue is the target value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the \"pods\" metric source type.", } @@ -207,8 +207,8 @@ func (ResourceMetricSource) SwaggerDoc() map[string]string { } var map_ResourceMetricStatus = map[string]string{ - "": "ResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", - "name": "name is the name of the resource in question.", + "": "ResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", + "name": "name is the name of the resource in question.", "currentAverageUtilization": "currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. It will only be present if `targetAverageValue` was set in the corresponding metric specification.", "currentAverageValue": "currentAverageValue is the current value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the \"pods\" metric source type. It will always be set, regardless of the corresponding metric specification.", } @@ -219,9 +219,9 @@ func (ResourceMetricStatus) SwaggerDoc() map[string]string { var map_Scale = map[string]string{ "": "Scale represents a scaling request for a resource.", - "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.", - "spec": "defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.", - "status": "current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only.", + "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.", + "spec": "defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.", + "status": "current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only.", } func (Scale) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/autoscaling/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/autoscaling/v1/zz_generated.deepcopy.go index 3fda47d5..ddb60112 100644 --- a/vendor/k8s.io/api/autoscaling/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/autoscaling/v1/zz_generated.deepcopy.go @@ -148,7 +148,7 @@ func (in *HorizontalPodAutoscalerCondition) DeepCopy() *HorizontalPodAutoscalerC func (in *HorizontalPodAutoscalerList) DeepCopyInto(out *HorizontalPodAutoscalerList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]HorizontalPodAutoscaler, len(*in)) diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/doc.go b/vendor/k8s.io/api/autoscaling/v2beta1/doc.go index da9789e5..2cc9f11e 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta1/doc.go +++ b/vendor/k8s.io/api/autoscaling/v2beta1/doc.go @@ -15,6 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +k8s:openapi-gen=true package v2beta1 // import "k8s.io/api/autoscaling/v2beta1" diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/generated.pb.go b/vendor/k8s.io/api/autoscaling/v2beta1/generated.pb.go index bee94129..0b6ed381 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta1/generated.pb.go +++ b/vendor/k8s.io/api/autoscaling/v2beta1/generated.pb.go @@ -14,49 +14,27 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto -// DO NOT EDIT! -/* - Package v2beta1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto - - It has these top-level messages: - CrossVersionObjectReference - ExternalMetricSource - ExternalMetricStatus - HorizontalPodAutoscaler - HorizontalPodAutoscalerCondition - HorizontalPodAutoscalerList - HorizontalPodAutoscalerSpec - HorizontalPodAutoscalerStatus - MetricSpec - MetricStatus - ObjectMetricSource - ObjectMetricStatus - PodsMetricSource - PodsMetricStatus - ResourceMetricSource - ResourceMetricStatus -*/ package v2beta1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import k8s_io_apimachinery_pkg_api_resource "k8s.io/apimachinery/pkg/api/resource" -import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + io "io" -import k8s_io_api_core_v1 "k8s.io/api/core/v1" + proto "github.com/gogo/protobuf/proto" -import strings "strings" -import reflect "reflect" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + resource "k8s.io/apimachinery/pkg/api/resource" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" -import io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -72,76 +50,450 @@ const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package func (m *CrossVersionObjectReference) Reset() { *m = CrossVersionObjectReference{} } func (*CrossVersionObjectReference) ProtoMessage() {} func (*CrossVersionObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{0} + return fileDescriptor_26c1bfc7a52d0478, []int{0} +} +func (m *CrossVersionObjectReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CrossVersionObjectReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CrossVersionObjectReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_CrossVersionObjectReference.Merge(m, src) +} +func (m *CrossVersionObjectReference) XXX_Size() int { + return m.Size() +} +func (m *CrossVersionObjectReference) XXX_DiscardUnknown() { + xxx_messageInfo_CrossVersionObjectReference.DiscardUnknown(m) } -func (m *ExternalMetricSource) Reset() { *m = ExternalMetricSource{} } -func (*ExternalMetricSource) ProtoMessage() {} -func (*ExternalMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +var xxx_messageInfo_CrossVersionObjectReference proto.InternalMessageInfo -func (m *ExternalMetricStatus) Reset() { *m = ExternalMetricStatus{} } -func (*ExternalMetricStatus) ProtoMessage() {} -func (*ExternalMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +func (m *ExternalMetricSource) Reset() { *m = ExternalMetricSource{} } +func (*ExternalMetricSource) ProtoMessage() {} +func (*ExternalMetricSource) Descriptor() ([]byte, []int) { + return fileDescriptor_26c1bfc7a52d0478, []int{1} +} +func (m *ExternalMetricSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExternalMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExternalMetricSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExternalMetricSource.Merge(m, src) +} +func (m *ExternalMetricSource) XXX_Size() int { + return m.Size() +} +func (m *ExternalMetricSource) XXX_DiscardUnknown() { + xxx_messageInfo_ExternalMetricSource.DiscardUnknown(m) +} -func (m *HorizontalPodAutoscaler) Reset() { *m = HorizontalPodAutoscaler{} } -func (*HorizontalPodAutoscaler) ProtoMessage() {} -func (*HorizontalPodAutoscaler) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +var xxx_messageInfo_ExternalMetricSource proto.InternalMessageInfo + +func (m *ExternalMetricStatus) Reset() { *m = ExternalMetricStatus{} } +func (*ExternalMetricStatus) ProtoMessage() {} +func (*ExternalMetricStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_26c1bfc7a52d0478, []int{2} +} +func (m *ExternalMetricStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExternalMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExternalMetricStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExternalMetricStatus.Merge(m, src) +} +func (m *ExternalMetricStatus) XXX_Size() int { + return m.Size() +} +func (m *ExternalMetricStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ExternalMetricStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ExternalMetricStatus proto.InternalMessageInfo + +func (m *HorizontalPodAutoscaler) Reset() { *m = HorizontalPodAutoscaler{} } +func (*HorizontalPodAutoscaler) ProtoMessage() {} +func (*HorizontalPodAutoscaler) Descriptor() ([]byte, []int) { + return fileDescriptor_26c1bfc7a52d0478, []int{3} +} +func (m *HorizontalPodAutoscaler) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HorizontalPodAutoscaler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HorizontalPodAutoscaler) XXX_Merge(src proto.Message) { + xxx_messageInfo_HorizontalPodAutoscaler.Merge(m, src) +} +func (m *HorizontalPodAutoscaler) XXX_Size() int { + return m.Size() +} +func (m *HorizontalPodAutoscaler) XXX_DiscardUnknown() { + xxx_messageInfo_HorizontalPodAutoscaler.DiscardUnknown(m) +} + +var xxx_messageInfo_HorizontalPodAutoscaler proto.InternalMessageInfo func (m *HorizontalPodAutoscalerCondition) Reset() { *m = HorizontalPodAutoscalerCondition{} } func (*HorizontalPodAutoscalerCondition) ProtoMessage() {} func (*HorizontalPodAutoscalerCondition) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{4} + return fileDescriptor_26c1bfc7a52d0478, []int{4} +} +func (m *HorizontalPodAutoscalerCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HorizontalPodAutoscalerCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HorizontalPodAutoscalerCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_HorizontalPodAutoscalerCondition.Merge(m, src) +} +func (m *HorizontalPodAutoscalerCondition) XXX_Size() int { + return m.Size() +} +func (m *HorizontalPodAutoscalerCondition) XXX_DiscardUnknown() { + xxx_messageInfo_HorizontalPodAutoscalerCondition.DiscardUnknown(m) } +var xxx_messageInfo_HorizontalPodAutoscalerCondition proto.InternalMessageInfo + func (m *HorizontalPodAutoscalerList) Reset() { *m = HorizontalPodAutoscalerList{} } func (*HorizontalPodAutoscalerList) ProtoMessage() {} func (*HorizontalPodAutoscalerList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{5} + return fileDescriptor_26c1bfc7a52d0478, []int{5} +} +func (m *HorizontalPodAutoscalerList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HorizontalPodAutoscalerList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HorizontalPodAutoscalerList) XXX_Merge(src proto.Message) { + xxx_messageInfo_HorizontalPodAutoscalerList.Merge(m, src) +} +func (m *HorizontalPodAutoscalerList) XXX_Size() int { + return m.Size() +} +func (m *HorizontalPodAutoscalerList) XXX_DiscardUnknown() { + xxx_messageInfo_HorizontalPodAutoscalerList.DiscardUnknown(m) } +var xxx_messageInfo_HorizontalPodAutoscalerList proto.InternalMessageInfo + func (m *HorizontalPodAutoscalerSpec) Reset() { *m = HorizontalPodAutoscalerSpec{} } func (*HorizontalPodAutoscalerSpec) ProtoMessage() {} func (*HorizontalPodAutoscalerSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{6} + return fileDescriptor_26c1bfc7a52d0478, []int{6} +} +func (m *HorizontalPodAutoscalerSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HorizontalPodAutoscalerSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HorizontalPodAutoscalerSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_HorizontalPodAutoscalerSpec.Merge(m, src) +} +func (m *HorizontalPodAutoscalerSpec) XXX_Size() int { + return m.Size() +} +func (m *HorizontalPodAutoscalerSpec) XXX_DiscardUnknown() { + xxx_messageInfo_HorizontalPodAutoscalerSpec.DiscardUnknown(m) } +var xxx_messageInfo_HorizontalPodAutoscalerSpec proto.InternalMessageInfo + func (m *HorizontalPodAutoscalerStatus) Reset() { *m = HorizontalPodAutoscalerStatus{} } func (*HorizontalPodAutoscalerStatus) ProtoMessage() {} func (*HorizontalPodAutoscalerStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{7} + return fileDescriptor_26c1bfc7a52d0478, []int{7} +} +func (m *HorizontalPodAutoscalerStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HorizontalPodAutoscalerStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HorizontalPodAutoscalerStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_HorizontalPodAutoscalerStatus.Merge(m, src) +} +func (m *HorizontalPodAutoscalerStatus) XXX_Size() int { + return m.Size() +} +func (m *HorizontalPodAutoscalerStatus) XXX_DiscardUnknown() { + xxx_messageInfo_HorizontalPodAutoscalerStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_HorizontalPodAutoscalerStatus proto.InternalMessageInfo + +func (m *MetricSpec) Reset() { *m = MetricSpec{} } +func (*MetricSpec) ProtoMessage() {} +func (*MetricSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_26c1bfc7a52d0478, []int{8} +} +func (m *MetricSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MetricSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MetricSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricSpec.Merge(m, src) +} +func (m *MetricSpec) XXX_Size() int { + return m.Size() +} +func (m *MetricSpec) XXX_DiscardUnknown() { + xxx_messageInfo_MetricSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_MetricSpec proto.InternalMessageInfo + +func (m *MetricStatus) Reset() { *m = MetricStatus{} } +func (*MetricStatus) ProtoMessage() {} +func (*MetricStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_26c1bfc7a52d0478, []int{9} +} +func (m *MetricStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MetricStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricStatus.Merge(m, src) +} +func (m *MetricStatus) XXX_Size() int { + return m.Size() +} +func (m *MetricStatus) XXX_DiscardUnknown() { + xxx_messageInfo_MetricStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_MetricStatus proto.InternalMessageInfo + +func (m *ObjectMetricSource) Reset() { *m = ObjectMetricSource{} } +func (*ObjectMetricSource) ProtoMessage() {} +func (*ObjectMetricSource) Descriptor() ([]byte, []int) { + return fileDescriptor_26c1bfc7a52d0478, []int{10} +} +func (m *ObjectMetricSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ObjectMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ObjectMetricSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ObjectMetricSource.Merge(m, src) +} +func (m *ObjectMetricSource) XXX_Size() int { + return m.Size() +} +func (m *ObjectMetricSource) XXX_DiscardUnknown() { + xxx_messageInfo_ObjectMetricSource.DiscardUnknown(m) +} + +var xxx_messageInfo_ObjectMetricSource proto.InternalMessageInfo + +func (m *ObjectMetricStatus) Reset() { *m = ObjectMetricStatus{} } +func (*ObjectMetricStatus) ProtoMessage() {} +func (*ObjectMetricStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_26c1bfc7a52d0478, []int{11} +} +func (m *ObjectMetricStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ObjectMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ObjectMetricStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ObjectMetricStatus.Merge(m, src) +} +func (m *ObjectMetricStatus) XXX_Size() int { + return m.Size() +} +func (m *ObjectMetricStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ObjectMetricStatus.DiscardUnknown(m) } -func (m *MetricSpec) Reset() { *m = MetricSpec{} } -func (*MetricSpec) ProtoMessage() {} -func (*MetricSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +var xxx_messageInfo_ObjectMetricStatus proto.InternalMessageInfo -func (m *MetricStatus) Reset() { *m = MetricStatus{} } -func (*MetricStatus) ProtoMessage() {} -func (*MetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +func (m *PodsMetricSource) Reset() { *m = PodsMetricSource{} } +func (*PodsMetricSource) ProtoMessage() {} +func (*PodsMetricSource) Descriptor() ([]byte, []int) { + return fileDescriptor_26c1bfc7a52d0478, []int{12} +} +func (m *PodsMetricSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodsMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodsMetricSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodsMetricSource.Merge(m, src) +} +func (m *PodsMetricSource) XXX_Size() int { + return m.Size() +} +func (m *PodsMetricSource) XXX_DiscardUnknown() { + xxx_messageInfo_PodsMetricSource.DiscardUnknown(m) +} -func (m *ObjectMetricSource) Reset() { *m = ObjectMetricSource{} } -func (*ObjectMetricSource) ProtoMessage() {} -func (*ObjectMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } +var xxx_messageInfo_PodsMetricSource proto.InternalMessageInfo -func (m *ObjectMetricStatus) Reset() { *m = ObjectMetricStatus{} } -func (*ObjectMetricStatus) ProtoMessage() {} -func (*ObjectMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } +func (m *PodsMetricStatus) Reset() { *m = PodsMetricStatus{} } +func (*PodsMetricStatus) ProtoMessage() {} +func (*PodsMetricStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_26c1bfc7a52d0478, []int{13} +} +func (m *PodsMetricStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodsMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodsMetricStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodsMetricStatus.Merge(m, src) +} +func (m *PodsMetricStatus) XXX_Size() int { + return m.Size() +} +func (m *PodsMetricStatus) XXX_DiscardUnknown() { + xxx_messageInfo_PodsMetricStatus.DiscardUnknown(m) +} -func (m *PodsMetricSource) Reset() { *m = PodsMetricSource{} } -func (*PodsMetricSource) ProtoMessage() {} -func (*PodsMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } +var xxx_messageInfo_PodsMetricStatus proto.InternalMessageInfo -func (m *PodsMetricStatus) Reset() { *m = PodsMetricStatus{} } -func (*PodsMetricStatus) ProtoMessage() {} -func (*PodsMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } +func (m *ResourceMetricSource) Reset() { *m = ResourceMetricSource{} } +func (*ResourceMetricSource) ProtoMessage() {} +func (*ResourceMetricSource) Descriptor() ([]byte, []int) { + return fileDescriptor_26c1bfc7a52d0478, []int{14} +} +func (m *ResourceMetricSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceMetricSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceMetricSource.Merge(m, src) +} +func (m *ResourceMetricSource) XXX_Size() int { + return m.Size() +} +func (m *ResourceMetricSource) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceMetricSource.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceMetricSource proto.InternalMessageInfo -func (m *ResourceMetricSource) Reset() { *m = ResourceMetricSource{} } -func (*ResourceMetricSource) ProtoMessage() {} -func (*ResourceMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } +func (m *ResourceMetricStatus) Reset() { *m = ResourceMetricStatus{} } +func (*ResourceMetricStatus) ProtoMessage() {} +func (*ResourceMetricStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_26c1bfc7a52d0478, []int{15} +} +func (m *ResourceMetricStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceMetricStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceMetricStatus.Merge(m, src) +} +func (m *ResourceMetricStatus) XXX_Size() int { + return m.Size() +} +func (m *ResourceMetricStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceMetricStatus.DiscardUnknown(m) +} -func (m *ResourceMetricStatus) Reset() { *m = ResourceMetricStatus{} } -func (*ResourceMetricStatus) ProtoMessage() {} -func (*ResourceMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } +var xxx_messageInfo_ResourceMetricStatus proto.InternalMessageInfo func init() { proto.RegisterType((*CrossVersionObjectReference)(nil), "k8s.io.api.autoscaling.v2beta1.CrossVersionObjectReference") @@ -161,10 +513,112 @@ func init() { proto.RegisterType((*ResourceMetricSource)(nil), "k8s.io.api.autoscaling.v2beta1.ResourceMetricSource") proto.RegisterType((*ResourceMetricStatus)(nil), "k8s.io.api.autoscaling.v2beta1.ResourceMetricStatus") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto", fileDescriptor_26c1bfc7a52d0478) +} + +var fileDescriptor_26c1bfc7a52d0478 = []byte{ + // 1475 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x58, 0xcb, 0x8f, 0x1b, 0x45, + 0x13, 0x5f, 0x3f, 0x76, 0xb3, 0x69, 0x6f, 0x76, 0xf7, 0xeb, 0x44, 0x89, 0xb3, 0xf9, 0x62, 0xaf, + 0x2c, 0x84, 0x42, 0x44, 0x66, 0x12, 0xb3, 0x3c, 0x24, 0x84, 0xc4, 0xda, 0x40, 0x12, 0xb1, 0x4e, + 0x42, 0xef, 0x26, 0x42, 0x90, 0x20, 0xda, 0x33, 0x1d, 0x6f, 0xb3, 0x9e, 0x19, 0x6b, 0xba, 0x6d, + 0x65, 0x83, 0x90, 0xb8, 0x70, 0xe7, 0x02, 0x67, 0x90, 0x38, 0x21, 0xb8, 0xc2, 0x99, 0x5b, 0x8e, + 0x39, 0x26, 0x02, 0x59, 0x64, 0xf8, 0x2f, 0x72, 0x42, 0xfd, 0x98, 0xf1, 0x8c, 0x1f, 0x6b, 0xc7, + 0x38, 0xe1, 0x71, 0x9b, 0xee, 0xaa, 0xfa, 0x55, 0x4f, 0xfd, 0xaa, 0xab, 0xbb, 0x1a, 0x5c, 0xdc, + 0x7b, 0x8d, 0x19, 0xd4, 0x33, 0xf7, 0xda, 0x75, 0xe2, 0xbb, 0x84, 0x13, 0x66, 0x76, 0x88, 0x6b, + 0x7b, 0xbe, 0xa9, 0x05, 0xb8, 0x45, 0x4d, 0xdc, 0xe6, 0x1e, 0xb3, 0x70, 0x93, 0xba, 0x0d, 0xb3, + 0x53, 0xae, 0x13, 0x8e, 0x2f, 0x98, 0x0d, 0xe2, 0x12, 0x1f, 0x73, 0x62, 0x1b, 0x2d, 0xdf, 0xe3, + 0x1e, 0x2c, 0x28, 0x7d, 0x03, 0xb7, 0xa8, 0x11, 0xd3, 0x37, 0xb4, 0xfe, 0xda, 0xb9, 0x06, 0xe5, + 0xbb, 0xed, 0xba, 0x61, 0x79, 0x8e, 0xd9, 0xf0, 0x1a, 0x9e, 0x29, 0xcd, 0xea, 0xed, 0xdb, 0x72, + 0x24, 0x07, 0xf2, 0x4b, 0xc1, 0xad, 0x95, 0x62, 0xee, 0x2d, 0xcf, 0x27, 0x66, 0x67, 0xc0, 0xe5, + 0xda, 0x46, 0x4f, 0xc7, 0xc1, 0xd6, 0x2e, 0x75, 0x89, 0xbf, 0x6f, 0xb6, 0xf6, 0x1a, 0xd2, 0xc8, + 0x27, 0xcc, 0x6b, 0xfb, 0x16, 0x79, 0x22, 0x2b, 0x66, 0x3a, 0x84, 0xe3, 0x61, 0xbe, 0xcc, 0x51, + 0x56, 0x7e, 0xdb, 0xe5, 0xd4, 0x19, 0x74, 0xf3, 0xca, 0x38, 0x03, 0x66, 0xed, 0x12, 0x07, 0xf7, + 0xdb, 0x95, 0xbe, 0x4a, 0x81, 0x53, 0x55, 0xdf, 0x63, 0xec, 0x06, 0xf1, 0x19, 0xf5, 0xdc, 0xab, + 0xf5, 0x4f, 0x88, 0xc5, 0x11, 0xb9, 0x4d, 0x7c, 0xe2, 0x5a, 0x04, 0xae, 0x83, 0xec, 0x1e, 0x75, + 0xed, 0x7c, 0x6a, 0x3d, 0x75, 0xe6, 0x70, 0x65, 0xe9, 0x5e, 0xb7, 0x38, 0x17, 0x74, 0x8b, 0xd9, + 0x77, 0xa9, 0x6b, 0x23, 0x29, 0x11, 0x1a, 0x2e, 0x76, 0x48, 0x3e, 0x9d, 0xd4, 0xb8, 0x82, 0x1d, + 0x82, 0xa4, 0x04, 0x96, 0x01, 0xc0, 0x2d, 0xaa, 0x1d, 0xe4, 0x33, 0x52, 0x0f, 0x6a, 0x3d, 0xb0, + 0x79, 0xed, 0xb2, 0x96, 0xa0, 0x98, 0x56, 0xe9, 0xeb, 0x0c, 0x38, 0xf6, 0xf6, 0x1d, 0x4e, 0x7c, + 0x17, 0x37, 0x6b, 0x84, 0xfb, 0xd4, 0xda, 0x96, 0xf1, 0x15, 0x60, 0x8e, 0x1c, 0x0b, 0x07, 0x7a, + 0x59, 0x11, 0x58, 0x2d, 0x92, 0xa0, 0x98, 0x16, 0xf4, 0xc0, 0xb2, 0x1a, 0x6d, 0x93, 0x26, 0xb1, + 0xb8, 0xe7, 0xcb, 0xc5, 0xe6, 0xca, 0x2f, 0x19, 0xbd, 0x2c, 0x8a, 0xa2, 0x66, 0xb4, 0xf6, 0x1a, + 0x62, 0x82, 0x19, 0x82, 0x1c, 0xa3, 0x73, 0xc1, 0xd8, 0xc2, 0x75, 0xd2, 0x0c, 0x4d, 0x2b, 0x30, + 0xe8, 0x16, 0x97, 0x6b, 0x09, 0x38, 0xd4, 0x07, 0x0f, 0x31, 0xc8, 0x71, 0xec, 0x37, 0x08, 0xbf, + 0x81, 0x9b, 0x6d, 0x22, 0x7f, 0x39, 0x57, 0x36, 0x0e, 0xf2, 0x66, 0x84, 0x09, 0x64, 0xbc, 0xd7, + 0xc6, 0x2e, 0xa7, 0x7c, 0xbf, 0xb2, 0x12, 0x74, 0x8b, 0xb9, 0x9d, 0x1e, 0x0c, 0x8a, 0x63, 0xc2, + 0x0e, 0x80, 0x6a, 0xb8, 0xd9, 0x21, 0x3e, 0x6e, 0x10, 0xe5, 0x29, 0x3b, 0x95, 0xa7, 0xe3, 0x41, + 0xb7, 0x08, 0x77, 0x06, 0xd0, 0xd0, 0x10, 0x0f, 0xa5, 0x6f, 0x06, 0x89, 0xe1, 0x98, 0xb7, 0xd9, + 0xbf, 0x83, 0x98, 0x5d, 0xb0, 0x64, 0xb5, 0x7d, 0x9f, 0xb8, 0x7f, 0x89, 0x99, 0x63, 0xfa, 0xb7, + 0x96, 0xaa, 0x31, 0x2c, 0x94, 0x40, 0x86, 0xfb, 0xe0, 0xa8, 0x1e, 0xcf, 0x80, 0xa0, 0x13, 0x41, + 0xb7, 0x78, 0xb4, 0x3a, 0x08, 0x87, 0x86, 0xf9, 0x28, 0xfd, 0x92, 0x06, 0x27, 0x2e, 0x79, 0x3e, + 0xbd, 0xeb, 0xb9, 0x1c, 0x37, 0xaf, 0x79, 0xf6, 0xa6, 0x2e, 0x90, 0xc4, 0x87, 0x1f, 0x83, 0x45, + 0x11, 0x3d, 0x1b, 0x73, 0x2c, 0x39, 0xca, 0x95, 0xcf, 0x4f, 0x16, 0x6b, 0x55, 0x18, 0x6a, 0x84, + 0xe3, 0x1e, 0xab, 0xbd, 0x39, 0x14, 0xa1, 0xc2, 0x5b, 0x20, 0xcb, 0x5a, 0xc4, 0xd2, 0x4c, 0xbe, + 0x6e, 0x1c, 0x5c, 0xa8, 0x8d, 0x11, 0x0b, 0xdd, 0x6e, 0x11, 0xab, 0x57, 0x4c, 0xc4, 0x08, 0x49, + 0x58, 0x48, 0xc0, 0x02, 0x93, 0x09, 0xa7, 0xb9, 0x7b, 0x63, 0x5a, 0x07, 0x12, 0xa4, 0xb2, 0xac, + 0x5d, 0x2c, 0xa8, 0x31, 0xd2, 0xe0, 0xa5, 0x2f, 0x32, 0x60, 0x7d, 0x84, 0x65, 0xd5, 0x73, 0x6d, + 0xca, 0xa9, 0xe7, 0xc2, 0x4b, 0x20, 0xcb, 0xf7, 0x5b, 0x61, 0xb2, 0x6f, 0x84, 0xab, 0xdd, 0xd9, + 0x6f, 0x91, 0xc7, 0xdd, 0xe2, 0x73, 0xe3, 0xec, 0x85, 0x1e, 0x92, 0x08, 0x70, 0x2b, 0xfa, 0xab, + 0x74, 0x02, 0x4b, 0x2f, 0xeb, 0x71, 0xb7, 0x38, 0xe4, 0x84, 0x32, 0x22, 0xa4, 0xe4, 0xe2, 0x45, + 0x6d, 0x68, 0x62, 0xc6, 0x77, 0x7c, 0xec, 0x32, 0xe5, 0x89, 0x3a, 0x61, 0xae, 0x9f, 0x9d, 0x8c, + 0x6e, 0x61, 0x51, 0x59, 0xd3, 0xab, 0x80, 0x5b, 0x03, 0x68, 0x68, 0x88, 0x07, 0xf8, 0x3c, 0x58, + 0xf0, 0x09, 0x66, 0x9e, 0x2b, 0xd3, 0xfc, 0x70, 0x2f, 0xb8, 0x48, 0xce, 0x22, 0x2d, 0x85, 0x2f, + 0x80, 0x43, 0x0e, 0x61, 0x0c, 0x37, 0x48, 0x7e, 0x5e, 0x2a, 0xae, 0x68, 0xc5, 0x43, 0x35, 0x35, + 0x8d, 0x42, 0x79, 0xe9, 0x61, 0x0a, 0x9c, 0x1a, 0x11, 0xc7, 0x2d, 0xca, 0x38, 0xbc, 0x39, 0x90, + 0xcf, 0xc6, 0x84, 0xb5, 0x83, 0x32, 0x95, 0xcd, 0xab, 0xda, 0xf7, 0x62, 0x38, 0x13, 0xcb, 0xe5, + 0x9b, 0x60, 0x9e, 0x72, 0xe2, 0x08, 0x56, 0x32, 0x67, 0x72, 0xe5, 0x57, 0xa7, 0xcc, 0xb5, 0xca, + 0x11, 0xed, 0x63, 0xfe, 0xb2, 0x40, 0x43, 0x0a, 0xb4, 0xf4, 0x6b, 0x7a, 0xe4, 0xbf, 0x89, 0x84, + 0x87, 0x9f, 0x82, 0x65, 0x39, 0x52, 0x95, 0x19, 0x91, 0xdb, 0xfa, 0x0f, 0xc7, 0xee, 0xa9, 0x03, + 0x0e, 0xf4, 0xca, 0x71, 0xbd, 0x94, 0xe5, 0xed, 0x04, 0x34, 0xea, 0x73, 0x05, 0x2f, 0x80, 0x9c, + 0x43, 0x5d, 0x44, 0x5a, 0x4d, 0x6a, 0x61, 0x95, 0x96, 0xf3, 0xea, 0x48, 0xaa, 0xf5, 0xa6, 0x51, + 0x5c, 0x07, 0xbe, 0x0c, 0x72, 0x0e, 0xbe, 0x13, 0x99, 0x64, 0xa4, 0xc9, 0x51, 0xed, 0x2f, 0x57, + 0xeb, 0x89, 0x50, 0x5c, 0x0f, 0x5e, 0x17, 0xd9, 0x20, 0xaa, 0x34, 0xcb, 0x67, 0x65, 0x98, 0xcf, + 0x8e, 0xfb, 0x3f, 0x5d, 0xe4, 0x45, 0x89, 0x88, 0x65, 0x8e, 0x84, 0x40, 0x21, 0x56, 0xe9, 0xa7, + 0x2c, 0x38, 0x7d, 0xe0, 0xde, 0x87, 0xef, 0x00, 0xe8, 0xd5, 0x19, 0xf1, 0x3b, 0xc4, 0xbe, 0xa8, + 0xae, 0x45, 0xe2, 0x7e, 0x22, 0x62, 0x9c, 0x51, 0x47, 0xe2, 0xd5, 0x01, 0x29, 0x1a, 0x62, 0x01, + 0x2d, 0x70, 0x44, 0x6c, 0x06, 0x15, 0x50, 0xaa, 0xaf, 0x42, 0x4f, 0xb6, 0xd3, 0xfe, 0x17, 0x74, + 0x8b, 0x47, 0xb6, 0xe2, 0x20, 0x28, 0x89, 0x09, 0x37, 0xc1, 0x8a, 0xae, 0xf5, 0x7d, 0x01, 0x3e, + 0xa1, 0x23, 0xb0, 0x52, 0x4d, 0x8a, 0x51, 0xbf, 0xbe, 0x80, 0xb0, 0x09, 0xa3, 0x3e, 0xb1, 0x23, + 0x88, 0x6c, 0x12, 0xe2, 0xad, 0xa4, 0x18, 0xf5, 0xeb, 0xc3, 0x26, 0x58, 0xd6, 0xa8, 0x3a, 0xde, + 0xf9, 0x79, 0x49, 0xd9, 0x8b, 0x13, 0x52, 0xa6, 0x8a, 0x6e, 0x94, 0x83, 0xd5, 0x04, 0x16, 0xea, + 0xc3, 0x86, 0x1c, 0x00, 0x2b, 0x2c, 0x71, 0x2c, 0xbf, 0x20, 0x3d, 0xbd, 0x39, 0xe5, 0x1e, 0x8c, + 0x6a, 0x65, 0xef, 0xf8, 0x8a, 0xa6, 0x18, 0x8a, 0xf9, 0x29, 0x7d, 0x9f, 0x01, 0xa0, 0x97, 0x61, + 0x70, 0x23, 0x51, 0xe4, 0xd7, 0xfb, 0x8a, 0xfc, 0x6a, 0xfc, 0x72, 0x1a, 0x2b, 0xe8, 0x37, 0xc0, + 0x82, 0x27, 0x77, 0x9e, 0x4e, 0x86, 0xf2, 0xb8, 0x65, 0x47, 0x67, 0x69, 0x84, 0x56, 0x01, 0xa2, + 0x74, 0xea, 0xfd, 0xab, 0xd1, 0xe0, 0x15, 0x90, 0x6d, 0x79, 0x76, 0x78, 0xf8, 0x9d, 0x1f, 0x87, + 0x7a, 0xcd, 0xb3, 0x59, 0x02, 0x73, 0x51, 0xac, 0x5d, 0xcc, 0x22, 0x89, 0x03, 0x3f, 0x02, 0x8b, + 0xe1, 0x75, 0x43, 0xdf, 0x4d, 0x36, 0xc6, 0x61, 0x22, 0xad, 0x9f, 0xc0, 0x5d, 0x12, 0x15, 0x34, + 0x94, 0xa0, 0x08, 0x53, 0xe0, 0x13, 0x7d, 0x5b, 0x94, 0xb5, 0x7e, 0x02, 0xfc, 0x61, 0xd7, 0x7e, + 0x85, 0x1f, 0x4a, 0x50, 0x84, 0x59, 0xfa, 0x21, 0x03, 0x96, 0x12, 0xd7, 0xd0, 0xbf, 0x83, 0x2e, + 0x95, 0xd5, 0xb3, 0xa5, 0x4b, 0x61, 0xce, 0x9e, 0x2e, 0x85, 0xfb, 0xf4, 0xe8, 0x8a, 0xe1, 0x0f, + 0xa1, 0xeb, 0x61, 0x06, 0xc0, 0xc1, 0x4c, 0x87, 0x16, 0x58, 0x50, 0xad, 0xc6, 0x2c, 0x4e, 0xb8, + 0xe8, 0xd6, 0xa1, 0x0f, 0x33, 0x0d, 0xdd, 0xd7, 0xa0, 0xa4, 0x27, 0x6a, 0x50, 0xc8, 0x2c, 0x1a, + 0xb9, 0xe8, 0x08, 0x1c, 0xd9, 0xcc, 0xdd, 0x02, 0x8b, 0x2c, 0xec, 0x80, 0xb2, 0xd3, 0x77, 0x40, + 0x32, 0xea, 0x51, 0xef, 0x13, 0x41, 0x42, 0x1b, 0x2c, 0xe1, 0x78, 0x13, 0x32, 0x3f, 0xd5, 0x6f, + 0xac, 0x8a, 0x8e, 0x27, 0xd1, 0x7d, 0x24, 0x50, 0x4b, 0xbf, 0xf5, 0x73, 0xab, 0x36, 0xe4, 0x3f, + 0x96, 0xdb, 0x67, 0xd7, 0x0b, 0xfe, 0x27, 0xe8, 0xfd, 0x36, 0x0d, 0x56, 0xfb, 0x8f, 0x93, 0xa9, + 0x9a, 0xfe, 0xbb, 0x43, 0x5f, 0x2e, 0xd2, 0x53, 0x2d, 0x3a, 0xea, 0x50, 0x26, 0x7b, 0xbd, 0x48, + 0x30, 0x91, 0x99, 0x39, 0x13, 0xa5, 0xef, 0x92, 0x31, 0x9a, 0xfe, 0x61, 0xe4, 0xb3, 0xe1, 0xaf, + 0x07, 0xd3, 0x05, 0xe9, 0x94, 0x76, 0x36, 0xf1, 0x0b, 0xc2, 0xd3, 0x0e, 0xd3, 0x8f, 0x69, 0x70, + 0x6c, 0xd8, 0x2d, 0x02, 0x56, 0xf5, 0x5b, 0xa2, 0x0a, 0x92, 0x19, 0x7f, 0x4b, 0x7c, 0xdc, 0x2d, + 0x16, 0x87, 0xb4, 0xc0, 0x21, 0x4c, 0xec, 0xb9, 0xf1, 0x7d, 0x90, 0x4f, 0x30, 0x7f, 0x9d, 0xd3, + 0x26, 0xbd, 0xab, 0x2e, 0xf7, 0xaa, 0x8d, 0xf9, 0x7f, 0xd0, 0x2d, 0xe6, 0x77, 0x46, 0xe8, 0xa0, + 0x91, 0xd6, 0x23, 0xde, 0xdc, 0x32, 0x4f, 0xfd, 0xcd, 0xed, 0xe7, 0xc1, 0x78, 0xa9, 0xd4, 0x9a, + 0x49, 0xbc, 0x3e, 0x04, 0x27, 0x93, 0x39, 0x30, 0x18, 0xb0, 0xd3, 0x41, 0xb7, 0x78, 0xb2, 0x3a, + 0x4a, 0x09, 0x8d, 0xb6, 0x1f, 0x95, 0xc8, 0x99, 0x67, 0x93, 0xc8, 0x95, 0x73, 0xf7, 0x1e, 0x15, + 0xe6, 0xee, 0x3f, 0x2a, 0xcc, 0x3d, 0x78, 0x54, 0x98, 0xfb, 0x3c, 0x28, 0xa4, 0xee, 0x05, 0x85, + 0xd4, 0xfd, 0xa0, 0x90, 0x7a, 0x10, 0x14, 0x52, 0xbf, 0x07, 0x85, 0xd4, 0x97, 0x7f, 0x14, 0xe6, + 0x3e, 0x38, 0xa4, 0x8f, 0x9e, 0x3f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x05, 0x26, 0x31, 0x5d, 0x9f, + 0x18, 0x00, 0x00, +} + func (m *CrossVersionObjectReference) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -172,29 +626,37 @@ func (m *CrossVersionObjectReference) Marshal() (dAtA []byte, err error) { } func (m *CrossVersionObjectReference) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CrossVersionObjectReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x1a - i++ + i -= len(m.APIVersion) + copy(dAtA[i:], m.APIVersion) i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) - i += copy(dAtA[i:], m.APIVersion) - return i, nil + i-- + dAtA[i] = 0x1a + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ExternalMetricSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -202,51 +664,63 @@ func (m *ExternalMetricSource) Marshal() (dAtA []byte, err error) { } func (m *ExternalMetricSource) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExternalMetricSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) - i += copy(dAtA[i:], m.MetricName) - if m.MetricSelector != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MetricSelector.Size())) - n1, err := m.MetricSelector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.TargetAverageValue != nil { + { + size, err := m.TargetAverageValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n1 + i-- + dAtA[i] = 0x22 } if m.TargetValue != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.TargetValue.Size())) - n2, err := m.TargetValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.TargetValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n2 + i-- + dAtA[i] = 0x1a } - if m.TargetAverageValue != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.TargetAverageValue.Size())) - n3, err := m.TargetAverageValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.MetricSelector != nil { + { + size, err := m.MetricSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n3 + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.MetricName) + copy(dAtA[i:], m.MetricName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ExternalMetricStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -254,49 +728,61 @@ func (m *ExternalMetricStatus) Marshal() (dAtA []byte, err error) { } func (m *ExternalMetricStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExternalMetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) - i += copy(dAtA[i:], m.MetricName) - if m.MetricSelector != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MetricSelector.Size())) - n4, err := m.MetricSelector.MarshalTo(dAtA[i:]) + if m.CurrentAverageValue != nil { + { + size, err := m.CurrentAverageValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + { + size, err := m.CurrentValue.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n4 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentValue.Size())) - n5, err := m.CurrentValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 - if m.CurrentAverageValue != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentAverageValue.Size())) - n6, err := m.CurrentAverageValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.MetricSelector != nil { + { + size, err := m.MetricSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n6 + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.MetricName) + copy(dAtA[i:], m.MetricName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *HorizontalPodAutoscaler) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -304,41 +790,52 @@ func (m *HorizontalPodAutoscaler) Marshal() (dAtA []byte, err error) { } func (m *HorizontalPodAutoscaler) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HorizontalPodAutoscaler) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n7, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n8, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n8 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n9, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n9 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *HorizontalPodAutoscalerCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -346,41 +843,52 @@ func (m *HorizontalPodAutoscalerCondition) Marshal() (dAtA []byte, err error) { } func (m *HorizontalPodAutoscalerCondition) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HorizontalPodAutoscalerCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n10, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n10 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x2a - i++ + i -= len(m.Message) + copy(dAtA[i:], m.Message) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *HorizontalPodAutoscalerList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -388,37 +896,46 @@ func (m *HorizontalPodAutoscalerList) Marshal() (dAtA []byte, err error) { } func (m *HorizontalPodAutoscalerList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HorizontalPodAutoscalerList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n11, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n11 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *HorizontalPodAutoscalerSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -426,45 +943,54 @@ func (m *HorizontalPodAutoscalerSpec) Marshal() (dAtA []byte, err error) { } func (m *HorizontalPodAutoscalerSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HorizontalPodAutoscalerSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ScaleTargetRef.Size())) - n12, err := m.ScaleTargetRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Metrics) > 0 { + for iNdEx := len(m.Metrics) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Metrics[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } } - i += n12 + i = encodeVarintGenerated(dAtA, i, uint64(m.MaxReplicas)) + i-- + dAtA[i] = 0x18 if m.MinReplicas != nil { - dAtA[i] = 0x10 - i++ i = encodeVarintGenerated(dAtA, i, uint64(*m.MinReplicas)) + i-- + dAtA[i] = 0x10 } - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MaxReplicas)) - if len(m.Metrics) > 0 { - for _, msg := range m.Metrics { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + { + size, err := m.ScaleTargetRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *HorizontalPodAutoscalerStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -472,62 +998,73 @@ func (m *HorizontalPodAutoscalerStatus) Marshal() (dAtA []byte, err error) { } func (m *HorizontalPodAutoscalerStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HorizontalPodAutoscalerStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.ObservedGeneration != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.ObservedGeneration)) - } - if m.LastScaleTime != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastScaleTime.Size())) - n13, err := m.LastScaleTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 } - i += n13 } - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentReplicas)) - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredReplicas)) if len(m.CurrentMetrics) > 0 { - for _, msg := range m.CurrentMetrics { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.CurrentMetrics) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.CurrentMetrics[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x2a } } - if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredReplicas)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentReplicas)) + i-- + dAtA[i] = 0x18 + if m.LastScaleTime != nil { + { + size, err := m.LastScaleTime.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 + } + if m.ObservedGeneration != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.ObservedGeneration)) + i-- + dAtA[i] = 0x8 } - return i, nil + return len(dAtA) - i, nil } func (m *MetricSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -535,61 +1072,75 @@ func (m *MetricSpec) Marshal() (dAtA []byte, err error) { } func (m *MetricSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MetricSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - if m.Object != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Object.Size())) - n14, err := m.Object.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n14 - } - if m.Pods != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Pods.Size())) - n15, err := m.Pods.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.External != nil { + { + size, err := m.External.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n15 + i-- + dAtA[i] = 0x2a } if m.Resource != nil { + { + size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Resource.Size())) - n16, err := m.Resource.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + } + if m.Pods != nil { + { + size, err := m.Pods.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n16 + i-- + dAtA[i] = 0x1a } - if m.External != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.External.Size())) - n17, err := m.External.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.Object != nil { + { + size, err := m.Object.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n17 + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *MetricStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -597,61 +1148,75 @@ func (m *MetricStatus) Marshal() (dAtA []byte, err error) { } func (m *MetricStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - if m.Object != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Object.Size())) - n18, err := m.Object.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n18 - } - if m.Pods != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Pods.Size())) - n19, err := m.Pods.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.External != nil { + { + size, err := m.External.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n19 + i-- + dAtA[i] = 0x2a } if m.Resource != nil { + { + size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Resource.Size())) - n20, err := m.Resource.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + } + if m.Pods != nil { + { + size, err := m.Pods.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n20 + i-- + dAtA[i] = 0x1a } - if m.External != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.External.Size())) - n21, err := m.External.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.Object != nil { + { + size, err := m.Object.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n21 + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ObjectMetricSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -659,57 +1224,71 @@ func (m *ObjectMetricSource) Marshal() (dAtA []byte, err error) { } func (m *ObjectMetricSource) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ObjectMetricSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Target.Size())) - n22, err := m.Target.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n22 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) - i += copy(dAtA[i:], m.MetricName) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.TargetValue.Size())) - n23, err := m.TargetValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.AverageValue != nil { + { + size, err := m.AverageValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a } - i += n23 if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n24, err := m.Selector.MarshalTo(dAtA[i:]) + } + { + size, err := m.TargetValue.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n24 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.AverageValue != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AverageValue.Size())) - n25, err := m.AverageValue.MarshalTo(dAtA[i:]) + i-- + dAtA[i] = 0x1a + i -= len(m.MetricName) + copy(dAtA[i:], m.MetricName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) + i-- + dAtA[i] = 0x12 + { + size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n25 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ObjectMetricStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -717,57 +1296,71 @@ func (m *ObjectMetricStatus) Marshal() (dAtA []byte, err error) { } func (m *ObjectMetricStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ObjectMetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Target.Size())) - n26, err := m.Target.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n26 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) - i += copy(dAtA[i:], m.MetricName) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentValue.Size())) - n27, err := m.CurrentValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.AverageValue != nil { + { + size, err := m.AverageValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a } - i += n27 if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n28, err := m.Selector.MarshalTo(dAtA[i:]) + } + { + size, err := m.CurrentValue.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n28 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.AverageValue != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AverageValue.Size())) - n29, err := m.AverageValue.MarshalTo(dAtA[i:]) + i-- + dAtA[i] = 0x1a + i -= len(m.MetricName) + copy(dAtA[i:], m.MetricName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) + i-- + dAtA[i] = 0x12 + { + size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n29 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *PodsMetricSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -775,39 +1368,49 @@ func (m *PodsMetricSource) Marshal() (dAtA []byte, err error) { } func (m *PodsMetricSource) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodsMetricSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) - i += copy(dAtA[i:], m.MetricName) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.TargetAverageValue.Size())) - n30, err := m.TargetAverageValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n30 if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n31, err := m.Selector.MarshalTo(dAtA[i:]) + } + { + size, err := m.TargetAverageValue.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n31 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.MetricName) + copy(dAtA[i:], m.MetricName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *PodsMetricStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -815,39 +1418,49 @@ func (m *PodsMetricStatus) Marshal() (dAtA []byte, err error) { } func (m *PodsMetricStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodsMetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) - i += copy(dAtA[i:], m.MetricName) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentAverageValue.Size())) - n32, err := m.CurrentAverageValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n32 if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n33, err := m.Selector.MarshalTo(dAtA[i:]) + } + { + size, err := m.CurrentAverageValue.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n33 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.MetricName) + copy(dAtA[i:], m.MetricName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ResourceMetricSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -855,36 +1468,44 @@ func (m *ResourceMetricSource) Marshal() (dAtA []byte, err error) { } func (m *ResourceMetricSource) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceMetricSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - if m.TargetAverageUtilization != nil { - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.TargetAverageUtilization)) - } if m.TargetAverageValue != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.TargetAverageValue.Size())) - n34, err := m.TargetAverageValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.TargetAverageValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n34 + i-- + dAtA[i] = 0x1a + } + if m.TargetAverageUtilization != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TargetAverageUtilization)) + i-- + dAtA[i] = 0x10 } - return i, nil + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ResourceMetricStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -892,58 +1513,53 @@ func (m *ResourceMetricStatus) Marshal() (dAtA []byte, err error) { } func (m *ResourceMetricStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceMetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - if m.CurrentAverageUtilization != nil { - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.CurrentAverageUtilization)) + { + size, err := m.CurrentAverageValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentAverageValue.Size())) - n35, err := m.CurrentAverageValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.CurrentAverageUtilization != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.CurrentAverageUtilization)) + i-- + dAtA[i] = 0x10 } - i += n35 - return i, nil + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *CrossVersionObjectReference) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Kind) @@ -956,6 +1572,9 @@ func (m *CrossVersionObjectReference) Size() (n int) { } func (m *ExternalMetricSource) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.MetricName) @@ -976,6 +1595,9 @@ func (m *ExternalMetricSource) Size() (n int) { } func (m *ExternalMetricStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.MetricName) @@ -994,6 +1616,9 @@ func (m *ExternalMetricStatus) Size() (n int) { } func (m *HorizontalPodAutoscaler) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -1006,6 +1631,9 @@ func (m *HorizontalPodAutoscaler) Size() (n int) { } func (m *HorizontalPodAutoscalerCondition) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1022,6 +1650,9 @@ func (m *HorizontalPodAutoscalerCondition) Size() (n int) { } func (m *HorizontalPodAutoscalerList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -1036,6 +1667,9 @@ func (m *HorizontalPodAutoscalerList) Size() (n int) { } func (m *HorizontalPodAutoscalerSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ScaleTargetRef.Size() @@ -1054,6 +1688,9 @@ func (m *HorizontalPodAutoscalerSpec) Size() (n int) { } func (m *HorizontalPodAutoscalerStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.ObservedGeneration != nil { @@ -1081,6 +1718,9 @@ func (m *HorizontalPodAutoscalerStatus) Size() (n int) { } func (m *MetricSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1105,6 +1745,9 @@ func (m *MetricSpec) Size() (n int) { } func (m *MetricStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1129,6 +1772,9 @@ func (m *MetricStatus) Size() (n int) { } func (m *ObjectMetricSource) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.Target.Size() @@ -1149,6 +1795,9 @@ func (m *ObjectMetricSource) Size() (n int) { } func (m *ObjectMetricStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.Target.Size() @@ -1169,6 +1818,9 @@ func (m *ObjectMetricStatus) Size() (n int) { } func (m *PodsMetricSource) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.MetricName) @@ -1183,6 +1835,9 @@ func (m *PodsMetricSource) Size() (n int) { } func (m *PodsMetricStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.MetricName) @@ -1197,6 +1852,9 @@ func (m *PodsMetricStatus) Size() (n int) { } func (m *ResourceMetricSource) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Name) @@ -1212,6 +1870,9 @@ func (m *ResourceMetricSource) Size() (n int) { } func (m *ResourceMetricStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Name) @@ -1225,14 +1886,7 @@ func (m *ResourceMetricStatus) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -1255,9 +1909,9 @@ func (this *ExternalMetricSource) String() string { } s := strings.Join([]string{`&ExternalMetricSource{`, `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, - `MetricSelector:` + strings.Replace(fmt.Sprintf("%v", this.MetricSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `TargetValue:` + strings.Replace(fmt.Sprintf("%v", this.TargetValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, - `TargetAverageValue:` + strings.Replace(fmt.Sprintf("%v", this.TargetAverageValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, + `MetricSelector:` + strings.Replace(fmt.Sprintf("%v", this.MetricSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `TargetValue:` + strings.Replace(fmt.Sprintf("%v", this.TargetValue), "Quantity", "resource.Quantity", 1) + `,`, + `TargetAverageValue:` + strings.Replace(fmt.Sprintf("%v", this.TargetAverageValue), "Quantity", "resource.Quantity", 1) + `,`, `}`, }, "") return s @@ -1268,9 +1922,9 @@ func (this *ExternalMetricStatus) String() string { } s := strings.Join([]string{`&ExternalMetricStatus{`, `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, - `MetricSelector:` + strings.Replace(fmt.Sprintf("%v", this.MetricSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `CurrentValue:` + strings.Replace(strings.Replace(this.CurrentValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, - `CurrentAverageValue:` + strings.Replace(fmt.Sprintf("%v", this.CurrentAverageValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, + `MetricSelector:` + strings.Replace(fmt.Sprintf("%v", this.MetricSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `CurrentValue:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CurrentValue), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, + `CurrentAverageValue:` + strings.Replace(fmt.Sprintf("%v", this.CurrentAverageValue), "Quantity", "resource.Quantity", 1) + `,`, `}`, }, "") return s @@ -1280,7 +1934,7 @@ func (this *HorizontalPodAutoscaler) String() string { return "nil" } s := strings.Join([]string{`&HorizontalPodAutoscaler{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "HorizontalPodAutoscalerSpec", "HorizontalPodAutoscalerSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "HorizontalPodAutoscalerStatus", "HorizontalPodAutoscalerStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -1294,7 +1948,7 @@ func (this *HorizontalPodAutoscalerCondition) String() string { s := strings.Join([]string{`&HorizontalPodAutoscalerCondition{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `}`, @@ -1305,9 +1959,14 @@ func (this *HorizontalPodAutoscalerList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]HorizontalPodAutoscaler{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "HorizontalPodAutoscaler", "HorizontalPodAutoscaler", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&HorizontalPodAutoscalerList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "HorizontalPodAutoscaler", "HorizontalPodAutoscaler", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -1316,11 +1975,16 @@ func (this *HorizontalPodAutoscalerSpec) String() string { if this == nil { return "nil" } + repeatedStringForMetrics := "[]MetricSpec{" + for _, f := range this.Metrics { + repeatedStringForMetrics += strings.Replace(strings.Replace(f.String(), "MetricSpec", "MetricSpec", 1), `&`, ``, 1) + "," + } + repeatedStringForMetrics += "}" s := strings.Join([]string{`&HorizontalPodAutoscalerSpec{`, `ScaleTargetRef:` + strings.Replace(strings.Replace(this.ScaleTargetRef.String(), "CrossVersionObjectReference", "CrossVersionObjectReference", 1), `&`, ``, 1) + `,`, `MinReplicas:` + valueToStringGenerated(this.MinReplicas) + `,`, `MaxReplicas:` + fmt.Sprintf("%v", this.MaxReplicas) + `,`, - `Metrics:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Metrics), "MetricSpec", "MetricSpec", 1), `&`, ``, 1) + `,`, + `Metrics:` + repeatedStringForMetrics + `,`, `}`, }, "") return s @@ -1329,13 +1993,23 @@ func (this *HorizontalPodAutoscalerStatus) String() string { if this == nil { return "nil" } + repeatedStringForCurrentMetrics := "[]MetricStatus{" + for _, f := range this.CurrentMetrics { + repeatedStringForCurrentMetrics += strings.Replace(strings.Replace(f.String(), "MetricStatus", "MetricStatus", 1), `&`, ``, 1) + "," + } + repeatedStringForCurrentMetrics += "}" + repeatedStringForConditions := "[]HorizontalPodAutoscalerCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "HorizontalPodAutoscalerCondition", "HorizontalPodAutoscalerCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" s := strings.Join([]string{`&HorizontalPodAutoscalerStatus{`, `ObservedGeneration:` + valueToStringGenerated(this.ObservedGeneration) + `,`, - `LastScaleTime:` + strings.Replace(fmt.Sprintf("%v", this.LastScaleTime), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, + `LastScaleTime:` + strings.Replace(fmt.Sprintf("%v", this.LastScaleTime), "Time", "v1.Time", 1) + `,`, `CurrentReplicas:` + fmt.Sprintf("%v", this.CurrentReplicas) + `,`, `DesiredReplicas:` + fmt.Sprintf("%v", this.DesiredReplicas) + `,`, - `CurrentMetrics:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CurrentMetrics), "MetricStatus", "MetricStatus", 1), `&`, ``, 1) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "HorizontalPodAutoscalerCondition", "HorizontalPodAutoscalerCondition", 1), `&`, ``, 1) + `,`, + `CurrentMetrics:` + repeatedStringForCurrentMetrics + `,`, + `Conditions:` + repeatedStringForConditions + `,`, `}`, }, "") return s @@ -1346,10 +2020,10 @@ func (this *MetricSpec) String() string { } s := strings.Join([]string{`&MetricSpec{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Object:` + strings.Replace(fmt.Sprintf("%v", this.Object), "ObjectMetricSource", "ObjectMetricSource", 1) + `,`, - `Pods:` + strings.Replace(fmt.Sprintf("%v", this.Pods), "PodsMetricSource", "PodsMetricSource", 1) + `,`, - `Resource:` + strings.Replace(fmt.Sprintf("%v", this.Resource), "ResourceMetricSource", "ResourceMetricSource", 1) + `,`, - `External:` + strings.Replace(fmt.Sprintf("%v", this.External), "ExternalMetricSource", "ExternalMetricSource", 1) + `,`, + `Object:` + strings.Replace(this.Object.String(), "ObjectMetricSource", "ObjectMetricSource", 1) + `,`, + `Pods:` + strings.Replace(this.Pods.String(), "PodsMetricSource", "PodsMetricSource", 1) + `,`, + `Resource:` + strings.Replace(this.Resource.String(), "ResourceMetricSource", "ResourceMetricSource", 1) + `,`, + `External:` + strings.Replace(this.External.String(), "ExternalMetricSource", "ExternalMetricSource", 1) + `,`, `}`, }, "") return s @@ -1360,10 +2034,10 @@ func (this *MetricStatus) String() string { } s := strings.Join([]string{`&MetricStatus{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Object:` + strings.Replace(fmt.Sprintf("%v", this.Object), "ObjectMetricStatus", "ObjectMetricStatus", 1) + `,`, - `Pods:` + strings.Replace(fmt.Sprintf("%v", this.Pods), "PodsMetricStatus", "PodsMetricStatus", 1) + `,`, - `Resource:` + strings.Replace(fmt.Sprintf("%v", this.Resource), "ResourceMetricStatus", "ResourceMetricStatus", 1) + `,`, - `External:` + strings.Replace(fmt.Sprintf("%v", this.External), "ExternalMetricStatus", "ExternalMetricStatus", 1) + `,`, + `Object:` + strings.Replace(this.Object.String(), "ObjectMetricStatus", "ObjectMetricStatus", 1) + `,`, + `Pods:` + strings.Replace(this.Pods.String(), "PodsMetricStatus", "PodsMetricStatus", 1) + `,`, + `Resource:` + strings.Replace(this.Resource.String(), "ResourceMetricStatus", "ResourceMetricStatus", 1) + `,`, + `External:` + strings.Replace(this.External.String(), "ExternalMetricStatus", "ExternalMetricStatus", 1) + `,`, `}`, }, "") return s @@ -1375,9 +2049,9 @@ func (this *ObjectMetricSource) String() string { s := strings.Join([]string{`&ObjectMetricSource{`, `Target:` + strings.Replace(strings.Replace(this.Target.String(), "CrossVersionObjectReference", "CrossVersionObjectReference", 1), `&`, ``, 1) + `,`, `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, - `TargetValue:` + strings.Replace(strings.Replace(this.TargetValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `AverageValue:` + strings.Replace(fmt.Sprintf("%v", this.AverageValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, + `TargetValue:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.TargetValue), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `AverageValue:` + strings.Replace(fmt.Sprintf("%v", this.AverageValue), "Quantity", "resource.Quantity", 1) + `,`, `}`, }, "") return s @@ -1389,9 +2063,9 @@ func (this *ObjectMetricStatus) String() string { s := strings.Join([]string{`&ObjectMetricStatus{`, `Target:` + strings.Replace(strings.Replace(this.Target.String(), "CrossVersionObjectReference", "CrossVersionObjectReference", 1), `&`, ``, 1) + `,`, `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, - `CurrentValue:` + strings.Replace(strings.Replace(this.CurrentValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `AverageValue:` + strings.Replace(fmt.Sprintf("%v", this.AverageValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, + `CurrentValue:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CurrentValue), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `AverageValue:` + strings.Replace(fmt.Sprintf("%v", this.AverageValue), "Quantity", "resource.Quantity", 1) + `,`, `}`, }, "") return s @@ -1402,8 +2076,8 @@ func (this *PodsMetricSource) String() string { } s := strings.Join([]string{`&PodsMetricSource{`, `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, - `TargetAverageValue:` + strings.Replace(strings.Replace(this.TargetAverageValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `TargetAverageValue:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.TargetAverageValue), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, `}`, }, "") return s @@ -1414,8 +2088,8 @@ func (this *PodsMetricStatus) String() string { } s := strings.Join([]string{`&PodsMetricStatus{`, `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, - `CurrentAverageValue:` + strings.Replace(strings.Replace(this.CurrentAverageValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `CurrentAverageValue:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CurrentAverageValue), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, `}`, }, "") return s @@ -1427,7 +2101,7 @@ func (this *ResourceMetricSource) String() string { s := strings.Join([]string{`&ResourceMetricSource{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `TargetAverageUtilization:` + valueToStringGenerated(this.TargetAverageUtilization) + `,`, - `TargetAverageValue:` + strings.Replace(fmt.Sprintf("%v", this.TargetAverageValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, + `TargetAverageValue:` + strings.Replace(fmt.Sprintf("%v", this.TargetAverageValue), "Quantity", "resource.Quantity", 1) + `,`, `}`, }, "") return s @@ -1439,7 +2113,7 @@ func (this *ResourceMetricStatus) String() string { s := strings.Join([]string{`&ResourceMetricStatus{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `CurrentAverageUtilization:` + valueToStringGenerated(this.CurrentAverageUtilization) + `,`, - `CurrentAverageValue:` + strings.Replace(strings.Replace(this.CurrentAverageValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, + `CurrentAverageValue:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CurrentAverageValue), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -1467,7 +2141,7 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1495,7 +2169,7 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1505,6 +2179,9 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1524,7 +2201,7 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1534,6 +2211,9 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1553,7 +2233,7 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1563,6 +2243,9 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1574,7 +2257,10 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1604,7 +2290,7 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1632,7 +2318,7 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1642,6 +2328,9 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1661,7 +2350,7 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1670,11 +2359,14 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.MetricSelector == nil { - m.MetricSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.MetricSelector = &v1.LabelSelector{} } if err := m.MetricSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1694,7 +2386,7 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1703,11 +2395,14 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.TargetValue == nil { - m.TargetValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + m.TargetValue = &resource.Quantity{} } if err := m.TargetValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1727,7 +2422,7 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1736,11 +2431,14 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.TargetAverageValue == nil { - m.TargetAverageValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + m.TargetAverageValue = &resource.Quantity{} } if err := m.TargetAverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1755,6 +2453,9 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1782,7 +2483,7 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1810,7 +2511,7 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1820,6 +2521,9 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1839,7 +2543,7 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1848,11 +2552,14 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.MetricSelector == nil { - m.MetricSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.MetricSelector = &v1.LabelSelector{} } if err := m.MetricSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1872,7 +2579,7 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1881,6 +2588,9 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1902,7 +2612,7 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1911,11 +2621,14 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.CurrentAverageValue == nil { - m.CurrentAverageValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + m.CurrentAverageValue = &resource.Quantity{} } if err := m.CurrentAverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1930,6 +2643,9 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1957,7 +2673,7 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1985,7 +2701,7 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1994,6 +2710,9 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2015,7 +2734,7 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2024,6 +2743,9 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2045,7 +2767,7 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2054,6 +2776,9 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2070,6 +2795,9 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2097,7 +2825,7 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2125,7 +2853,7 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2135,6 +2863,9 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2154,7 +2885,7 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2164,6 +2895,9 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2183,7 +2917,7 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2192,6 +2926,9 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2213,7 +2950,7 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2223,6 +2960,9 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2242,7 +2982,7 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2252,6 +2992,9 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2266,6 +3009,9 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2293,7 +3039,7 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2321,7 +3067,7 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2330,6 +3076,9 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2351,7 +3100,7 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2360,6 +3109,9 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2377,6 +3129,9 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2404,7 +3159,7 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2432,7 +3187,7 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2441,6 +3196,9 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2462,7 +3220,7 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -2482,7 +3240,7 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MaxReplicas |= (int32(b) & 0x7F) << shift + m.MaxReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -2501,7 +3259,7 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2510,6 +3268,9 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2527,6 +3288,9 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2554,7 +3318,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2582,7 +3346,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -2602,7 +3366,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2611,11 +3375,14 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.LastScaleTime == nil { - m.LastScaleTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} + m.LastScaleTime = &v1.Time{} } if err := m.LastScaleTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -2635,7 +3402,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.CurrentReplicas |= (int32(b) & 0x7F) << shift + m.CurrentReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -2654,7 +3421,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.DesiredReplicas |= (int32(b) & 0x7F) << shift + m.DesiredReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -2673,7 +3440,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2682,6 +3449,9 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2704,7 +3474,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2713,6 +3483,9 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2730,6 +3503,9 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2757,7 +3533,7 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2785,7 +3561,7 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2795,6 +3571,9 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2814,7 +3593,7 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2823,6 +3602,9 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2847,7 +3629,7 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2856,6 +3638,9 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2880,7 +3665,7 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2889,6 +3674,9 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2913,7 +3701,7 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2922,6 +3710,9 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2941,6 +3732,9 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2968,7 +3762,7 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2996,7 +3790,7 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3006,6 +3800,9 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3025,7 +3822,7 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3034,6 +3831,9 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3058,7 +3858,7 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3067,6 +3867,9 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3091,7 +3894,7 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3100,6 +3903,9 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3124,7 +3930,7 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3133,6 +3939,9 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3152,6 +3961,9 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3179,7 +3991,7 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3207,7 +4019,7 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3216,6 +4028,9 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3237,7 +4052,7 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3247,6 +4062,9 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3266,7 +4084,7 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3275,6 +4093,9 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3296,7 +4117,7 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3305,11 +4126,14 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3329,7 +4153,7 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3338,11 +4162,14 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.AverageValue == nil { - m.AverageValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + m.AverageValue = &resource.Quantity{} } if err := m.AverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3357,6 +4184,9 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3384,7 +4214,7 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3412,7 +4242,7 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3421,6 +4251,9 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3442,7 +4275,7 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3452,6 +4285,9 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3471,7 +4307,7 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3480,6 +4316,9 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3501,7 +4340,7 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3510,11 +4349,14 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3534,7 +4376,7 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3543,11 +4385,14 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.AverageValue == nil { - m.AverageValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + m.AverageValue = &resource.Quantity{} } if err := m.AverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3562,6 +4407,9 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3589,7 +4437,7 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3617,7 +4465,7 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3627,6 +4475,9 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3646,7 +4497,7 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3655,6 +4506,9 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3676,7 +4530,7 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3685,11 +4539,14 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3704,6 +4561,9 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3731,7 +4591,7 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3759,7 +4619,7 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3769,6 +4629,9 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3788,7 +4651,7 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3797,6 +4660,9 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3818,7 +4684,7 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3827,11 +4693,14 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3846,6 +4715,9 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3873,7 +4745,7 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3901,7 +4773,7 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3911,6 +4783,9 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3930,7 +4805,7 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3950,7 +4825,7 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3959,11 +4834,14 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.TargetAverageValue == nil { - m.TargetAverageValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + m.TargetAverageValue = &resource.Quantity{} } if err := m.TargetAverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3978,6 +4856,9 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4005,7 +4886,7 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4033,7 +4914,7 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4043,6 +4924,9 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4062,7 +4946,7 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4082,7 +4966,7 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4091,6 +4975,9 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4107,6 +4994,9 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4173,10 +5063,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -4205,6 +5098,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -4223,104 +5119,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 1475 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x58, 0xcb, 0x8f, 0x1b, 0x45, - 0x13, 0x5f, 0x3f, 0x76, 0xb3, 0x69, 0x6f, 0x76, 0xf7, 0xeb, 0x44, 0x89, 0xb3, 0xf9, 0x62, 0xaf, - 0x2c, 0x84, 0x42, 0x44, 0x66, 0x12, 0xb3, 0x3c, 0x24, 0x84, 0xc4, 0xda, 0x40, 0x12, 0xb1, 0x4e, - 0x42, 0xef, 0x26, 0x42, 0x90, 0x20, 0xda, 0x33, 0x1d, 0x6f, 0xb3, 0x9e, 0x19, 0x6b, 0xba, 0x6d, - 0x65, 0x83, 0x90, 0xb8, 0x70, 0xe7, 0x02, 0x67, 0x90, 0x38, 0x21, 0xb8, 0xc2, 0x99, 0x5b, 0x8e, - 0x39, 0x26, 0x02, 0x59, 0x64, 0xf8, 0x2f, 0x72, 0x42, 0xfd, 0x98, 0xf1, 0x8c, 0x1f, 0x6b, 0xc7, - 0x38, 0xe1, 0x71, 0x9b, 0xee, 0xaa, 0xfa, 0x55, 0x4f, 0xfd, 0xaa, 0xab, 0xbb, 0x1a, 0x5c, 0xdc, - 0x7b, 0x8d, 0x19, 0xd4, 0x33, 0xf7, 0xda, 0x75, 0xe2, 0xbb, 0x84, 0x13, 0x66, 0x76, 0x88, 0x6b, - 0x7b, 0xbe, 0xa9, 0x05, 0xb8, 0x45, 0x4d, 0xdc, 0xe6, 0x1e, 0xb3, 0x70, 0x93, 0xba, 0x0d, 0xb3, - 0x53, 0xae, 0x13, 0x8e, 0x2f, 0x98, 0x0d, 0xe2, 0x12, 0x1f, 0x73, 0x62, 0x1b, 0x2d, 0xdf, 0xe3, - 0x1e, 0x2c, 0x28, 0x7d, 0x03, 0xb7, 0xa8, 0x11, 0xd3, 0x37, 0xb4, 0xfe, 0xda, 0xb9, 0x06, 0xe5, - 0xbb, 0xed, 0xba, 0x61, 0x79, 0x8e, 0xd9, 0xf0, 0x1a, 0x9e, 0x29, 0xcd, 0xea, 0xed, 0xdb, 0x72, - 0x24, 0x07, 0xf2, 0x4b, 0xc1, 0xad, 0x95, 0x62, 0xee, 0x2d, 0xcf, 0x27, 0x66, 0x67, 0xc0, 0xe5, - 0xda, 0x46, 0x4f, 0xc7, 0xc1, 0xd6, 0x2e, 0x75, 0x89, 0xbf, 0x6f, 0xb6, 0xf6, 0x1a, 0xd2, 0xc8, - 0x27, 0xcc, 0x6b, 0xfb, 0x16, 0x79, 0x22, 0x2b, 0x66, 0x3a, 0x84, 0xe3, 0x61, 0xbe, 0xcc, 0x51, - 0x56, 0x7e, 0xdb, 0xe5, 0xd4, 0x19, 0x74, 0xf3, 0xca, 0x38, 0x03, 0x66, 0xed, 0x12, 0x07, 0xf7, - 0xdb, 0x95, 0xbe, 0x4a, 0x81, 0x53, 0x55, 0xdf, 0x63, 0xec, 0x06, 0xf1, 0x19, 0xf5, 0xdc, 0xab, - 0xf5, 0x4f, 0x88, 0xc5, 0x11, 0xb9, 0x4d, 0x7c, 0xe2, 0x5a, 0x04, 0xae, 0x83, 0xec, 0x1e, 0x75, - 0xed, 0x7c, 0x6a, 0x3d, 0x75, 0xe6, 0x70, 0x65, 0xe9, 0x5e, 0xb7, 0x38, 0x17, 0x74, 0x8b, 0xd9, - 0x77, 0xa9, 0x6b, 0x23, 0x29, 0x11, 0x1a, 0x2e, 0x76, 0x48, 0x3e, 0x9d, 0xd4, 0xb8, 0x82, 0x1d, - 0x82, 0xa4, 0x04, 0x96, 0x01, 0xc0, 0x2d, 0xaa, 0x1d, 0xe4, 0x33, 0x52, 0x0f, 0x6a, 0x3d, 0xb0, - 0x79, 0xed, 0xb2, 0x96, 0xa0, 0x98, 0x56, 0xe9, 0xeb, 0x0c, 0x38, 0xf6, 0xf6, 0x1d, 0x4e, 0x7c, - 0x17, 0x37, 0x6b, 0x84, 0xfb, 0xd4, 0xda, 0x96, 0xf1, 0x15, 0x60, 0x8e, 0x1c, 0x0b, 0x07, 0x7a, - 0x59, 0x11, 0x58, 0x2d, 0x92, 0xa0, 0x98, 0x16, 0xf4, 0xc0, 0xb2, 0x1a, 0x6d, 0x93, 0x26, 0xb1, - 0xb8, 0xe7, 0xcb, 0xc5, 0xe6, 0xca, 0x2f, 0x19, 0xbd, 0x2c, 0x8a, 0xa2, 0x66, 0xb4, 0xf6, 0x1a, - 0x62, 0x82, 0x19, 0x82, 0x1c, 0xa3, 0x73, 0xc1, 0xd8, 0xc2, 0x75, 0xd2, 0x0c, 0x4d, 0x2b, 0x30, - 0xe8, 0x16, 0x97, 0x6b, 0x09, 0x38, 0xd4, 0x07, 0x0f, 0x31, 0xc8, 0x71, 0xec, 0x37, 0x08, 0xbf, - 0x81, 0x9b, 0x6d, 0x22, 0x7f, 0x39, 0x57, 0x36, 0x0e, 0xf2, 0x66, 0x84, 0x09, 0x64, 0xbc, 0xd7, - 0xc6, 0x2e, 0xa7, 0x7c, 0xbf, 0xb2, 0x12, 0x74, 0x8b, 0xb9, 0x9d, 0x1e, 0x0c, 0x8a, 0x63, 0xc2, - 0x0e, 0x80, 0x6a, 0xb8, 0xd9, 0x21, 0x3e, 0x6e, 0x10, 0xe5, 0x29, 0x3b, 0x95, 0xa7, 0xe3, 0x41, - 0xb7, 0x08, 0x77, 0x06, 0xd0, 0xd0, 0x10, 0x0f, 0xa5, 0x6f, 0x06, 0x89, 0xe1, 0x98, 0xb7, 0xd9, - 0xbf, 0x83, 0x98, 0x5d, 0xb0, 0x64, 0xb5, 0x7d, 0x9f, 0xb8, 0x7f, 0x89, 0x99, 0x63, 0xfa, 0xb7, - 0x96, 0xaa, 0x31, 0x2c, 0x94, 0x40, 0x86, 0xfb, 0xe0, 0xa8, 0x1e, 0xcf, 0x80, 0xa0, 0x13, 0x41, - 0xb7, 0x78, 0xb4, 0x3a, 0x08, 0x87, 0x86, 0xf9, 0x28, 0xfd, 0x92, 0x06, 0x27, 0x2e, 0x79, 0x3e, - 0xbd, 0xeb, 0xb9, 0x1c, 0x37, 0xaf, 0x79, 0xf6, 0xa6, 0x2e, 0x90, 0xc4, 0x87, 0x1f, 0x83, 0x45, - 0x11, 0x3d, 0x1b, 0x73, 0x2c, 0x39, 0xca, 0x95, 0xcf, 0x4f, 0x16, 0x6b, 0x55, 0x18, 0x6a, 0x84, - 0xe3, 0x1e, 0xab, 0xbd, 0x39, 0x14, 0xa1, 0xc2, 0x5b, 0x20, 0xcb, 0x5a, 0xc4, 0xd2, 0x4c, 0xbe, - 0x6e, 0x1c, 0x5c, 0xa8, 0x8d, 0x11, 0x0b, 0xdd, 0x6e, 0x11, 0xab, 0x57, 0x4c, 0xc4, 0x08, 0x49, - 0x58, 0x48, 0xc0, 0x02, 0x93, 0x09, 0xa7, 0xb9, 0x7b, 0x63, 0x5a, 0x07, 0x12, 0xa4, 0xb2, 0xac, - 0x5d, 0x2c, 0xa8, 0x31, 0xd2, 0xe0, 0xa5, 0x2f, 0x32, 0x60, 0x7d, 0x84, 0x65, 0xd5, 0x73, 0x6d, - 0xca, 0xa9, 0xe7, 0xc2, 0x4b, 0x20, 0xcb, 0xf7, 0x5b, 0x61, 0xb2, 0x6f, 0x84, 0xab, 0xdd, 0xd9, - 0x6f, 0x91, 0xc7, 0xdd, 0xe2, 0x73, 0xe3, 0xec, 0x85, 0x1e, 0x92, 0x08, 0x70, 0x2b, 0xfa, 0xab, - 0x74, 0x02, 0x4b, 0x2f, 0xeb, 0x71, 0xb7, 0x38, 0xe4, 0x84, 0x32, 0x22, 0xa4, 0xe4, 0xe2, 0x45, - 0x6d, 0x68, 0x62, 0xc6, 0x77, 0x7c, 0xec, 0x32, 0xe5, 0x89, 0x3a, 0x61, 0xae, 0x9f, 0x9d, 0x8c, - 0x6e, 0x61, 0x51, 0x59, 0xd3, 0xab, 0x80, 0x5b, 0x03, 0x68, 0x68, 0x88, 0x07, 0xf8, 0x3c, 0x58, - 0xf0, 0x09, 0x66, 0x9e, 0x2b, 0xd3, 0xfc, 0x70, 0x2f, 0xb8, 0x48, 0xce, 0x22, 0x2d, 0x85, 0x2f, - 0x80, 0x43, 0x0e, 0x61, 0x0c, 0x37, 0x48, 0x7e, 0x5e, 0x2a, 0xae, 0x68, 0xc5, 0x43, 0x35, 0x35, - 0x8d, 0x42, 0x79, 0xe9, 0x61, 0x0a, 0x9c, 0x1a, 0x11, 0xc7, 0x2d, 0xca, 0x38, 0xbc, 0x39, 0x90, - 0xcf, 0xc6, 0x84, 0xb5, 0x83, 0x32, 0x95, 0xcd, 0xab, 0xda, 0xf7, 0x62, 0x38, 0x13, 0xcb, 0xe5, - 0x9b, 0x60, 0x9e, 0x72, 0xe2, 0x08, 0x56, 0x32, 0x67, 0x72, 0xe5, 0x57, 0xa7, 0xcc, 0xb5, 0xca, - 0x11, 0xed, 0x63, 0xfe, 0xb2, 0x40, 0x43, 0x0a, 0xb4, 0xf4, 0x6b, 0x7a, 0xe4, 0xbf, 0x89, 0x84, - 0x87, 0x9f, 0x82, 0x65, 0x39, 0x52, 0x95, 0x19, 0x91, 0xdb, 0xfa, 0x0f, 0xc7, 0xee, 0xa9, 0x03, - 0x0e, 0xf4, 0xca, 0x71, 0xbd, 0x94, 0xe5, 0xed, 0x04, 0x34, 0xea, 0x73, 0x05, 0x2f, 0x80, 0x9c, - 0x43, 0x5d, 0x44, 0x5a, 0x4d, 0x6a, 0x61, 0x95, 0x96, 0xf3, 0xea, 0x48, 0xaa, 0xf5, 0xa6, 0x51, - 0x5c, 0x07, 0xbe, 0x0c, 0x72, 0x0e, 0xbe, 0x13, 0x99, 0x64, 0xa4, 0xc9, 0x51, 0xed, 0x2f, 0x57, - 0xeb, 0x89, 0x50, 0x5c, 0x0f, 0x5e, 0x17, 0xd9, 0x20, 0xaa, 0x34, 0xcb, 0x67, 0x65, 0x98, 0xcf, - 0x8e, 0xfb, 0x3f, 0x5d, 0xe4, 0x45, 0x89, 0x88, 0x65, 0x8e, 0x84, 0x40, 0x21, 0x56, 0xe9, 0xa7, - 0x2c, 0x38, 0x7d, 0xe0, 0xde, 0x87, 0xef, 0x00, 0xe8, 0xd5, 0x19, 0xf1, 0x3b, 0xc4, 0xbe, 0xa8, - 0xae, 0x45, 0xe2, 0x7e, 0x22, 0x62, 0x9c, 0x51, 0x47, 0xe2, 0xd5, 0x01, 0x29, 0x1a, 0x62, 0x01, - 0x2d, 0x70, 0x44, 0x6c, 0x06, 0x15, 0x50, 0xaa, 0xaf, 0x42, 0x4f, 0xb6, 0xd3, 0xfe, 0x17, 0x74, - 0x8b, 0x47, 0xb6, 0xe2, 0x20, 0x28, 0x89, 0x09, 0x37, 0xc1, 0x8a, 0xae, 0xf5, 0x7d, 0x01, 0x3e, - 0xa1, 0x23, 0xb0, 0x52, 0x4d, 0x8a, 0x51, 0xbf, 0xbe, 0x80, 0xb0, 0x09, 0xa3, 0x3e, 0xb1, 0x23, - 0x88, 0x6c, 0x12, 0xe2, 0xad, 0xa4, 0x18, 0xf5, 0xeb, 0xc3, 0x26, 0x58, 0xd6, 0xa8, 0x3a, 0xde, - 0xf9, 0x79, 0x49, 0xd9, 0x8b, 0x13, 0x52, 0xa6, 0x8a, 0x6e, 0x94, 0x83, 0xd5, 0x04, 0x16, 0xea, - 0xc3, 0x86, 0x1c, 0x00, 0x2b, 0x2c, 0x71, 0x2c, 0xbf, 0x20, 0x3d, 0xbd, 0x39, 0xe5, 0x1e, 0x8c, - 0x6a, 0x65, 0xef, 0xf8, 0x8a, 0xa6, 0x18, 0x8a, 0xf9, 0x29, 0x7d, 0x9f, 0x01, 0xa0, 0x97, 0x61, - 0x70, 0x23, 0x51, 0xe4, 0xd7, 0xfb, 0x8a, 0xfc, 0x6a, 0xfc, 0x72, 0x1a, 0x2b, 0xe8, 0x37, 0xc0, - 0x82, 0x27, 0x77, 0x9e, 0x4e, 0x86, 0xf2, 0xb8, 0x65, 0x47, 0x67, 0x69, 0x84, 0x56, 0x01, 0xa2, - 0x74, 0xea, 0xfd, 0xab, 0xd1, 0xe0, 0x15, 0x90, 0x6d, 0x79, 0x76, 0x78, 0xf8, 0x9d, 0x1f, 0x87, - 0x7a, 0xcd, 0xb3, 0x59, 0x02, 0x73, 0x51, 0xac, 0x5d, 0xcc, 0x22, 0x89, 0x03, 0x3f, 0x02, 0x8b, - 0xe1, 0x75, 0x43, 0xdf, 0x4d, 0x36, 0xc6, 0x61, 0x22, 0xad, 0x9f, 0xc0, 0x5d, 0x12, 0x15, 0x34, - 0x94, 0xa0, 0x08, 0x53, 0xe0, 0x13, 0x7d, 0x5b, 0x94, 0xb5, 0x7e, 0x02, 0xfc, 0x61, 0xd7, 0x7e, - 0x85, 0x1f, 0x4a, 0x50, 0x84, 0x59, 0xfa, 0x21, 0x03, 0x96, 0x12, 0xd7, 0xd0, 0xbf, 0x83, 0x2e, - 0x95, 0xd5, 0xb3, 0xa5, 0x4b, 0x61, 0xce, 0x9e, 0x2e, 0x85, 0xfb, 0xf4, 0xe8, 0x8a, 0xe1, 0x0f, - 0xa1, 0xeb, 0x61, 0x06, 0xc0, 0xc1, 0x4c, 0x87, 0x16, 0x58, 0x50, 0xad, 0xc6, 0x2c, 0x4e, 0xb8, - 0xe8, 0xd6, 0xa1, 0x0f, 0x33, 0x0d, 0xdd, 0xd7, 0xa0, 0xa4, 0x27, 0x6a, 0x50, 0xc8, 0x2c, 0x1a, - 0xb9, 0xe8, 0x08, 0x1c, 0xd9, 0xcc, 0xdd, 0x02, 0x8b, 0x2c, 0xec, 0x80, 0xb2, 0xd3, 0x77, 0x40, - 0x32, 0xea, 0x51, 0xef, 0x13, 0x41, 0x42, 0x1b, 0x2c, 0xe1, 0x78, 0x13, 0x32, 0x3f, 0xd5, 0x6f, - 0xac, 0x8a, 0x8e, 0x27, 0xd1, 0x7d, 0x24, 0x50, 0x4b, 0xbf, 0xf5, 0x73, 0xab, 0x36, 0xe4, 0x3f, - 0x96, 0xdb, 0x67, 0xd7, 0x0b, 0xfe, 0x27, 0xe8, 0xfd, 0x36, 0x0d, 0x56, 0xfb, 0x8f, 0x93, 0xa9, - 0x9a, 0xfe, 0xbb, 0x43, 0x5f, 0x2e, 0xd2, 0x53, 0x2d, 0x3a, 0xea, 0x50, 0x26, 0x7b, 0xbd, 0x48, - 0x30, 0x91, 0x99, 0x39, 0x13, 0xa5, 0xef, 0x92, 0x31, 0x9a, 0xfe, 0x61, 0xe4, 0xb3, 0xe1, 0xaf, - 0x07, 0xd3, 0x05, 0xe9, 0x94, 0x76, 0x36, 0xf1, 0x0b, 0xc2, 0xd3, 0x0e, 0xd3, 0x8f, 0x69, 0x70, - 0x6c, 0xd8, 0x2d, 0x02, 0x56, 0xf5, 0x5b, 0xa2, 0x0a, 0x92, 0x19, 0x7f, 0x4b, 0x7c, 0xdc, 0x2d, - 0x16, 0x87, 0xb4, 0xc0, 0x21, 0x4c, 0xec, 0xb9, 0xf1, 0x7d, 0x90, 0x4f, 0x30, 0x7f, 0x9d, 0xd3, - 0x26, 0xbd, 0xab, 0x2e, 0xf7, 0xaa, 0x8d, 0xf9, 0x7f, 0xd0, 0x2d, 0xe6, 0x77, 0x46, 0xe8, 0xa0, - 0x91, 0xd6, 0x23, 0xde, 0xdc, 0x32, 0x4f, 0xfd, 0xcd, 0xed, 0xe7, 0xc1, 0x78, 0xa9, 0xd4, 0x9a, - 0x49, 0xbc, 0x3e, 0x04, 0x27, 0x93, 0x39, 0x30, 0x18, 0xb0, 0xd3, 0x41, 0xb7, 0x78, 0xb2, 0x3a, - 0x4a, 0x09, 0x8d, 0xb6, 0x1f, 0x95, 0xc8, 0x99, 0x67, 0x93, 0xc8, 0x95, 0x73, 0xf7, 0x1e, 0x15, - 0xe6, 0xee, 0x3f, 0x2a, 0xcc, 0x3d, 0x78, 0x54, 0x98, 0xfb, 0x3c, 0x28, 0xa4, 0xee, 0x05, 0x85, - 0xd4, 0xfd, 0xa0, 0x90, 0x7a, 0x10, 0x14, 0x52, 0xbf, 0x07, 0x85, 0xd4, 0x97, 0x7f, 0x14, 0xe6, - 0x3e, 0x38, 0xa4, 0x8f, 0x9e, 0x3f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x05, 0x26, 0x31, 0x5d, 0x9f, - 0x18, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto b/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto index 04bc0ed6..f90f93f9 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto +++ b/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto @@ -32,7 +32,7 @@ option go_package = "v2beta1"; // CrossVersionObjectReference contains enough information to let you identify the referred resource. message CrossVersionObjectReference { - // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds" + // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" optional string kind = 1; // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names @@ -92,12 +92,12 @@ message ExternalMetricStatus { // implementing the scale subresource based on the metrics specified. message HorizontalPodAutoscaler { // metadata is the standard object metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec is the specification for the behaviour of the autoscaler. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. // +optional optional HorizontalPodAutoscalerSpec spec = 2; @@ -146,8 +146,11 @@ message HorizontalPodAutoscalerSpec { // should be collected, as well as to actually change the replica count. optional CrossVersionObjectReference scaleTargetRef = 1; - // minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. - // It defaults to 1 pod. + // minReplicas is the lower limit for the number of replicas to which the autoscaler + // can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the + // alpha feature gate HPAScaleToZero is enabled and at least one Object or External + // metric is configured. Scaling is active as long as at least one metric value is + // available. // +optional optional int32 minReplicas = 2; diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/types.go b/vendor/k8s.io/api/autoscaling/v2beta1/types.go index 6a30e677..53a53a3a 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta1/types.go +++ b/vendor/k8s.io/api/autoscaling/v2beta1/types.go @@ -17,14 +17,14 @@ limitations under the License. package v2beta1 import ( - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // CrossVersionObjectReference contains enough information to let you identify the referred resource. type CrossVersionObjectReference struct { - // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds" + // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names Name string `json:"name" protobuf:"bytes,2,opt,name=name"` @@ -38,8 +38,11 @@ type HorizontalPodAutoscalerSpec struct { // scaleTargetRef points to the target resource to scale, and is used to the pods for which metrics // should be collected, as well as to actually change the replica count. ScaleTargetRef CrossVersionObjectReference `json:"scaleTargetRef" protobuf:"bytes,1,opt,name=scaleTargetRef"` - // minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. - // It defaults to 1 pod. + // minReplicas is the lower limit for the number of replicas to which the autoscaler + // can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the + // alpha feature gate HPAScaleToZero is enabled and at least one Object or External + // metric is configured. Scaling is active as long as at least one metric value is + // available. // +optional MinReplicas *int32 `json:"minReplicas,omitempty" protobuf:"varint,2,opt,name=minReplicas"` // maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up. @@ -59,7 +62,7 @@ type HorizontalPodAutoscalerSpec struct { // MetricSourceType indicates the type of metric. type MetricSourceType string -var ( +const ( // ObjectMetricSourceType is a metric describing a kubernetes object // (for example, hits-per-second on an Ingress object). ObjectMetricSourceType MetricSourceType = "Object" @@ -228,7 +231,7 @@ type HorizontalPodAutoscalerStatus struct { // a HorizontalPodAutoscaler. type HorizontalPodAutoscalerConditionType string -var ( +const ( // ScalingActive indicates that the HPA controller is able to scale if necessary: // it's correctly configured, can fetch the desired metrics, and isn't disabled. ScalingActive HorizontalPodAutoscalerConditionType = "ScalingActive" @@ -377,12 +380,12 @@ type ExternalMetricStatus struct { type HorizontalPodAutoscaler struct { metav1.TypeMeta `json:",inline"` // metadata is the standard object metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // spec is the specification for the behaviour of the autoscaler. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. // +optional Spec HorizontalPodAutoscalerSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go index 411b817d..a0d5f533 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go @@ -29,7 +29,7 @@ package v2beta1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_CrossVersionObjectReference = map[string]string{ "": "CrossVersionObjectReference contains enough information to let you identify the referred resource.", - "kind": "Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds\"", + "kind": "Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\"", "name": "Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names", "apiVersion": "API version of the referent", } @@ -64,8 +64,8 @@ func (ExternalMetricStatus) SwaggerDoc() map[string]string { var map_HorizontalPodAutoscaler = map[string]string{ "": "HorizontalPodAutoscaler is the configuration for a horizontal pod autoscaler, which automatically manages the replica count of any resource implementing the scale subresource based on the metrics specified.", - "metadata": "metadata is the standard object metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "spec is the specification for the behaviour of the autoscaler. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.", + "metadata": "metadata is the standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec is the specification for the behaviour of the autoscaler. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.", "status": "status is the current information about the autoscaler.", } @@ -99,7 +99,7 @@ func (HorizontalPodAutoscalerList) SwaggerDoc() map[string]string { var map_HorizontalPodAutoscalerSpec = map[string]string{ "": "HorizontalPodAutoscalerSpec describes the desired functionality of the HorizontalPodAutoscaler.", "scaleTargetRef": "scaleTargetRef points to the target resource to scale, and is used to the pods for which metrics should be collected, as well as to actually change the replica count.", - "minReplicas": "minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod.", + "minReplicas": "minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the alpha feature gate HPAScaleToZero is enabled and at least one Object or External metric is configured. Scaling is active as long as at least one metric value is available.", "maxReplicas": "maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up. It cannot be less that minReplicas.", "metrics": "metrics contains the specifications for which to use to calculate the desired replica count (the maximum replica count across all metrics will be used). The desired replica count is calculated multiplying the ratio between the target value and the current value by the current number of pods. Ergo, metrics used must decrease as the pod count is increased, and vice-versa. See the individual metric source types for more information about how each type of metric must respond.", } @@ -197,8 +197,8 @@ func (PodsMetricStatus) SwaggerDoc() map[string]string { } var map_ResourceMetricSource = map[string]string{ - "": "ResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. Only one \"target\" type should be set.", - "name": "name is the name of the resource in question.", + "": "ResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. Only one \"target\" type should be set.", + "name": "name is the name of the resource in question.", "targetAverageUtilization": "targetAverageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods.", "targetAverageValue": "targetAverageValue is the target value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the \"pods\" metric source type.", } @@ -208,8 +208,8 @@ func (ResourceMetricSource) SwaggerDoc() map[string]string { } var map_ResourceMetricStatus = map[string]string{ - "": "ResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", - "name": "name is the name of the resource in question.", + "": "ResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", + "name": "name is the name of the resource in question.", "currentAverageUtilization": "currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. It will only be present if `targetAverageValue` was set in the corresponding metric specification.", "currentAverageValue": "currentAverageValue is the current value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the \"pods\" metric source type. It will always be set, regardless of the corresponding metric specification.", } diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/autoscaling/v2beta1/zz_generated.deepcopy.go index 2ec7e615..c51e05b8 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/autoscaling/v2beta1/zz_generated.deepcopy.go @@ -148,7 +148,7 @@ func (in *HorizontalPodAutoscalerCondition) DeepCopy() *HorizontalPodAutoscalerC func (in *HorizontalPodAutoscalerList) DeepCopyInto(out *HorizontalPodAutoscalerList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]HorizontalPodAutoscaler, len(*in)) diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/doc.go b/vendor/k8s.io/api/autoscaling/v2beta2/doc.go index 7c7d2b6f..6d275f6d 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta2/doc.go +++ b/vendor/k8s.io/api/autoscaling/v2beta2/doc.go @@ -15,6 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +k8s:openapi-gen=true package v2beta2 // import "k8s.io/api/autoscaling/v2beta2" diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/generated.pb.go b/vendor/k8s.io/api/autoscaling/v2beta2/generated.pb.go index be752a14..23bc5b98 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta2/generated.pb.go +++ b/vendor/k8s.io/api/autoscaling/v2beta2/generated.pb.go @@ -14,52 +14,27 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto -// DO NOT EDIT! -/* - Package v2beta2 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto - - It has these top-level messages: - CrossVersionObjectReference - ExternalMetricSource - ExternalMetricStatus - HorizontalPodAutoscaler - HorizontalPodAutoscalerCondition - HorizontalPodAutoscalerList - HorizontalPodAutoscalerSpec - HorizontalPodAutoscalerStatus - MetricIdentifier - MetricSpec - MetricStatus - MetricTarget - MetricValueStatus - ObjectMetricSource - ObjectMetricStatus - PodsMetricSource - PodsMetricStatus - ResourceMetricSource - ResourceMetricStatus -*/ package v2beta2 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import k8s_io_apimachinery_pkg_api_resource "k8s.io/apimachinery/pkg/api/resource" -import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + io "io" -import k8s_io_api_core_v1 "k8s.io/api/core/v1" + proto "github.com/gogo/protobuf/proto" -import strings "strings" -import reflect "reflect" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + resource "k8s.io/apimachinery/pkg/api/resource" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" -import io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -75,88 +50,534 @@ const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package func (m *CrossVersionObjectReference) Reset() { *m = CrossVersionObjectReference{} } func (*CrossVersionObjectReference) ProtoMessage() {} func (*CrossVersionObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{0} + return fileDescriptor_592ad94d7d6be24f, []int{0} +} +func (m *CrossVersionObjectReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CrossVersionObjectReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CrossVersionObjectReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_CrossVersionObjectReference.Merge(m, src) +} +func (m *CrossVersionObjectReference) XXX_Size() int { + return m.Size() +} +func (m *CrossVersionObjectReference) XXX_DiscardUnknown() { + xxx_messageInfo_CrossVersionObjectReference.DiscardUnknown(m) +} + +var xxx_messageInfo_CrossVersionObjectReference proto.InternalMessageInfo + +func (m *ExternalMetricSource) Reset() { *m = ExternalMetricSource{} } +func (*ExternalMetricSource) ProtoMessage() {} +func (*ExternalMetricSource) Descriptor() ([]byte, []int) { + return fileDescriptor_592ad94d7d6be24f, []int{1} +} +func (m *ExternalMetricSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExternalMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExternalMetricSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExternalMetricSource.Merge(m, src) +} +func (m *ExternalMetricSource) XXX_Size() int { + return m.Size() +} +func (m *ExternalMetricSource) XXX_DiscardUnknown() { + xxx_messageInfo_ExternalMetricSource.DiscardUnknown(m) +} + +var xxx_messageInfo_ExternalMetricSource proto.InternalMessageInfo + +func (m *ExternalMetricStatus) Reset() { *m = ExternalMetricStatus{} } +func (*ExternalMetricStatus) ProtoMessage() {} +func (*ExternalMetricStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_592ad94d7d6be24f, []int{2} +} +func (m *ExternalMetricStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExternalMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExternalMetricStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExternalMetricStatus.Merge(m, src) +} +func (m *ExternalMetricStatus) XXX_Size() int { + return m.Size() +} +func (m *ExternalMetricStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ExternalMetricStatus.DiscardUnknown(m) } -func (m *ExternalMetricSource) Reset() { *m = ExternalMetricSource{} } -func (*ExternalMetricSource) ProtoMessage() {} -func (*ExternalMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +var xxx_messageInfo_ExternalMetricStatus proto.InternalMessageInfo -func (m *ExternalMetricStatus) Reset() { *m = ExternalMetricStatus{} } -func (*ExternalMetricStatus) ProtoMessage() {} -func (*ExternalMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +func (m *HorizontalPodAutoscaler) Reset() { *m = HorizontalPodAutoscaler{} } +func (*HorizontalPodAutoscaler) ProtoMessage() {} +func (*HorizontalPodAutoscaler) Descriptor() ([]byte, []int) { + return fileDescriptor_592ad94d7d6be24f, []int{3} +} +func (m *HorizontalPodAutoscaler) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HorizontalPodAutoscaler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HorizontalPodAutoscaler) XXX_Merge(src proto.Message) { + xxx_messageInfo_HorizontalPodAutoscaler.Merge(m, src) +} +func (m *HorizontalPodAutoscaler) XXX_Size() int { + return m.Size() +} +func (m *HorizontalPodAutoscaler) XXX_DiscardUnknown() { + xxx_messageInfo_HorizontalPodAutoscaler.DiscardUnknown(m) +} -func (m *HorizontalPodAutoscaler) Reset() { *m = HorizontalPodAutoscaler{} } -func (*HorizontalPodAutoscaler) ProtoMessage() {} -func (*HorizontalPodAutoscaler) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +var xxx_messageInfo_HorizontalPodAutoscaler proto.InternalMessageInfo func (m *HorizontalPodAutoscalerCondition) Reset() { *m = HorizontalPodAutoscalerCondition{} } func (*HorizontalPodAutoscalerCondition) ProtoMessage() {} func (*HorizontalPodAutoscalerCondition) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{4} + return fileDescriptor_592ad94d7d6be24f, []int{4} +} +func (m *HorizontalPodAutoscalerCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HorizontalPodAutoscalerCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil } +func (m *HorizontalPodAutoscalerCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_HorizontalPodAutoscalerCondition.Merge(m, src) +} +func (m *HorizontalPodAutoscalerCondition) XXX_Size() int { + return m.Size() +} +func (m *HorizontalPodAutoscalerCondition) XXX_DiscardUnknown() { + xxx_messageInfo_HorizontalPodAutoscalerCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_HorizontalPodAutoscalerCondition proto.InternalMessageInfo func (m *HorizontalPodAutoscalerList) Reset() { *m = HorizontalPodAutoscalerList{} } func (*HorizontalPodAutoscalerList) ProtoMessage() {} func (*HorizontalPodAutoscalerList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{5} + return fileDescriptor_592ad94d7d6be24f, []int{5} +} +func (m *HorizontalPodAutoscalerList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HorizontalPodAutoscalerList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HorizontalPodAutoscalerList) XXX_Merge(src proto.Message) { + xxx_messageInfo_HorizontalPodAutoscalerList.Merge(m, src) +} +func (m *HorizontalPodAutoscalerList) XXX_Size() int { + return m.Size() +} +func (m *HorizontalPodAutoscalerList) XXX_DiscardUnknown() { + xxx_messageInfo_HorizontalPodAutoscalerList.DiscardUnknown(m) } +var xxx_messageInfo_HorizontalPodAutoscalerList proto.InternalMessageInfo + func (m *HorizontalPodAutoscalerSpec) Reset() { *m = HorizontalPodAutoscalerSpec{} } func (*HorizontalPodAutoscalerSpec) ProtoMessage() {} func (*HorizontalPodAutoscalerSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{6} + return fileDescriptor_592ad94d7d6be24f, []int{6} +} +func (m *HorizontalPodAutoscalerSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HorizontalPodAutoscalerSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HorizontalPodAutoscalerSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_HorizontalPodAutoscalerSpec.Merge(m, src) +} +func (m *HorizontalPodAutoscalerSpec) XXX_Size() int { + return m.Size() +} +func (m *HorizontalPodAutoscalerSpec) XXX_DiscardUnknown() { + xxx_messageInfo_HorizontalPodAutoscalerSpec.DiscardUnknown(m) } +var xxx_messageInfo_HorizontalPodAutoscalerSpec proto.InternalMessageInfo + func (m *HorizontalPodAutoscalerStatus) Reset() { *m = HorizontalPodAutoscalerStatus{} } func (*HorizontalPodAutoscalerStatus) ProtoMessage() {} func (*HorizontalPodAutoscalerStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{7} + return fileDescriptor_592ad94d7d6be24f, []int{7} +} +func (m *HorizontalPodAutoscalerStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HorizontalPodAutoscalerStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HorizontalPodAutoscalerStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_HorizontalPodAutoscalerStatus.Merge(m, src) +} +func (m *HorizontalPodAutoscalerStatus) XXX_Size() int { + return m.Size() +} +func (m *HorizontalPodAutoscalerStatus) XXX_DiscardUnknown() { + xxx_messageInfo_HorizontalPodAutoscalerStatus.DiscardUnknown(m) } -func (m *MetricIdentifier) Reset() { *m = MetricIdentifier{} } -func (*MetricIdentifier) ProtoMessage() {} -func (*MetricIdentifier) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +var xxx_messageInfo_HorizontalPodAutoscalerStatus proto.InternalMessageInfo -func (m *MetricSpec) Reset() { *m = MetricSpec{} } -func (*MetricSpec) ProtoMessage() {} -func (*MetricSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +func (m *MetricIdentifier) Reset() { *m = MetricIdentifier{} } +func (*MetricIdentifier) ProtoMessage() {} +func (*MetricIdentifier) Descriptor() ([]byte, []int) { + return fileDescriptor_592ad94d7d6be24f, []int{8} +} +func (m *MetricIdentifier) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MetricIdentifier) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MetricIdentifier) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricIdentifier.Merge(m, src) +} +func (m *MetricIdentifier) XXX_Size() int { + return m.Size() +} +func (m *MetricIdentifier) XXX_DiscardUnknown() { + xxx_messageInfo_MetricIdentifier.DiscardUnknown(m) +} -func (m *MetricStatus) Reset() { *m = MetricStatus{} } -func (*MetricStatus) ProtoMessage() {} -func (*MetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } +var xxx_messageInfo_MetricIdentifier proto.InternalMessageInfo -func (m *MetricTarget) Reset() { *m = MetricTarget{} } -func (*MetricTarget) ProtoMessage() {} -func (*MetricTarget) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } +func (m *MetricSpec) Reset() { *m = MetricSpec{} } +func (*MetricSpec) ProtoMessage() {} +func (*MetricSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_592ad94d7d6be24f, []int{9} +} +func (m *MetricSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MetricSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MetricSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricSpec.Merge(m, src) +} +func (m *MetricSpec) XXX_Size() int { + return m.Size() +} +func (m *MetricSpec) XXX_DiscardUnknown() { + xxx_messageInfo_MetricSpec.DiscardUnknown(m) +} -func (m *MetricValueStatus) Reset() { *m = MetricValueStatus{} } -func (*MetricValueStatus) ProtoMessage() {} -func (*MetricValueStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } +var xxx_messageInfo_MetricSpec proto.InternalMessageInfo -func (m *ObjectMetricSource) Reset() { *m = ObjectMetricSource{} } -func (*ObjectMetricSource) ProtoMessage() {} -func (*ObjectMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } +func (m *MetricStatus) Reset() { *m = MetricStatus{} } +func (*MetricStatus) ProtoMessage() {} +func (*MetricStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_592ad94d7d6be24f, []int{10} +} +func (m *MetricStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MetricStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricStatus.Merge(m, src) +} +func (m *MetricStatus) XXX_Size() int { + return m.Size() +} +func (m *MetricStatus) XXX_DiscardUnknown() { + xxx_messageInfo_MetricStatus.DiscardUnknown(m) +} -func (m *ObjectMetricStatus) Reset() { *m = ObjectMetricStatus{} } -func (*ObjectMetricStatus) ProtoMessage() {} -func (*ObjectMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } +var xxx_messageInfo_MetricStatus proto.InternalMessageInfo -func (m *PodsMetricSource) Reset() { *m = PodsMetricSource{} } -func (*PodsMetricSource) ProtoMessage() {} -func (*PodsMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } +func (m *MetricTarget) Reset() { *m = MetricTarget{} } +func (*MetricTarget) ProtoMessage() {} +func (*MetricTarget) Descriptor() ([]byte, []int) { + return fileDescriptor_592ad94d7d6be24f, []int{11} +} +func (m *MetricTarget) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MetricTarget) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MetricTarget) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricTarget.Merge(m, src) +} +func (m *MetricTarget) XXX_Size() int { + return m.Size() +} +func (m *MetricTarget) XXX_DiscardUnknown() { + xxx_messageInfo_MetricTarget.DiscardUnknown(m) +} -func (m *PodsMetricStatus) Reset() { *m = PodsMetricStatus{} } -func (*PodsMetricStatus) ProtoMessage() {} -func (*PodsMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } +var xxx_messageInfo_MetricTarget proto.InternalMessageInfo -func (m *ResourceMetricSource) Reset() { *m = ResourceMetricSource{} } -func (*ResourceMetricSource) ProtoMessage() {} -func (*ResourceMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } +func (m *MetricValueStatus) Reset() { *m = MetricValueStatus{} } +func (*MetricValueStatus) ProtoMessage() {} +func (*MetricValueStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_592ad94d7d6be24f, []int{12} +} +func (m *MetricValueStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MetricValueStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MetricValueStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricValueStatus.Merge(m, src) +} +func (m *MetricValueStatus) XXX_Size() int { + return m.Size() +} +func (m *MetricValueStatus) XXX_DiscardUnknown() { + xxx_messageInfo_MetricValueStatus.DiscardUnknown(m) +} -func (m *ResourceMetricStatus) Reset() { *m = ResourceMetricStatus{} } -func (*ResourceMetricStatus) ProtoMessage() {} -func (*ResourceMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } +var xxx_messageInfo_MetricValueStatus proto.InternalMessageInfo + +func (m *ObjectMetricSource) Reset() { *m = ObjectMetricSource{} } +func (*ObjectMetricSource) ProtoMessage() {} +func (*ObjectMetricSource) Descriptor() ([]byte, []int) { + return fileDescriptor_592ad94d7d6be24f, []int{13} +} +func (m *ObjectMetricSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ObjectMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ObjectMetricSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ObjectMetricSource.Merge(m, src) +} +func (m *ObjectMetricSource) XXX_Size() int { + return m.Size() +} +func (m *ObjectMetricSource) XXX_DiscardUnknown() { + xxx_messageInfo_ObjectMetricSource.DiscardUnknown(m) +} + +var xxx_messageInfo_ObjectMetricSource proto.InternalMessageInfo + +func (m *ObjectMetricStatus) Reset() { *m = ObjectMetricStatus{} } +func (*ObjectMetricStatus) ProtoMessage() {} +func (*ObjectMetricStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_592ad94d7d6be24f, []int{14} +} +func (m *ObjectMetricStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ObjectMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ObjectMetricStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ObjectMetricStatus.Merge(m, src) +} +func (m *ObjectMetricStatus) XXX_Size() int { + return m.Size() +} +func (m *ObjectMetricStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ObjectMetricStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ObjectMetricStatus proto.InternalMessageInfo + +func (m *PodsMetricSource) Reset() { *m = PodsMetricSource{} } +func (*PodsMetricSource) ProtoMessage() {} +func (*PodsMetricSource) Descriptor() ([]byte, []int) { + return fileDescriptor_592ad94d7d6be24f, []int{15} +} +func (m *PodsMetricSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodsMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodsMetricSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodsMetricSource.Merge(m, src) +} +func (m *PodsMetricSource) XXX_Size() int { + return m.Size() +} +func (m *PodsMetricSource) XXX_DiscardUnknown() { + xxx_messageInfo_PodsMetricSource.DiscardUnknown(m) +} + +var xxx_messageInfo_PodsMetricSource proto.InternalMessageInfo + +func (m *PodsMetricStatus) Reset() { *m = PodsMetricStatus{} } +func (*PodsMetricStatus) ProtoMessage() {} +func (*PodsMetricStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_592ad94d7d6be24f, []int{16} +} +func (m *PodsMetricStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodsMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodsMetricStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodsMetricStatus.Merge(m, src) +} +func (m *PodsMetricStatus) XXX_Size() int { + return m.Size() +} +func (m *PodsMetricStatus) XXX_DiscardUnknown() { + xxx_messageInfo_PodsMetricStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_PodsMetricStatus proto.InternalMessageInfo + +func (m *ResourceMetricSource) Reset() { *m = ResourceMetricSource{} } +func (*ResourceMetricSource) ProtoMessage() {} +func (*ResourceMetricSource) Descriptor() ([]byte, []int) { + return fileDescriptor_592ad94d7d6be24f, []int{17} +} +func (m *ResourceMetricSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceMetricSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceMetricSource.Merge(m, src) +} +func (m *ResourceMetricSource) XXX_Size() int { + return m.Size() +} +func (m *ResourceMetricSource) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceMetricSource.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceMetricSource proto.InternalMessageInfo + +func (m *ResourceMetricStatus) Reset() { *m = ResourceMetricStatus{} } +func (*ResourceMetricStatus) ProtoMessage() {} +func (*ResourceMetricStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_592ad94d7d6be24f, []int{18} +} +func (m *ResourceMetricStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceMetricStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceMetricStatus.Merge(m, src) +} +func (m *ResourceMetricStatus) XXX_Size() int { + return m.Size() +} +func (m *ResourceMetricStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceMetricStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceMetricStatus proto.InternalMessageInfo func init() { proto.RegisterType((*CrossVersionObjectReference)(nil), "k8s.io.api.autoscaling.v2beta2.CrossVersionObjectReference") @@ -179,10 +600,109 @@ func init() { proto.RegisterType((*ResourceMetricSource)(nil), "k8s.io.api.autoscaling.v2beta2.ResourceMetricSource") proto.RegisterType((*ResourceMetricStatus)(nil), "k8s.io.api.autoscaling.v2beta2.ResourceMetricStatus") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto", fileDescriptor_592ad94d7d6be24f) +} + +var fileDescriptor_592ad94d7d6be24f = []byte{ + // 1425 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x58, 0xdd, 0x6f, 0x1b, 0xc5, + 0x16, 0xcf, 0xda, 0x8e, 0x93, 0x8e, 0xd3, 0x24, 0x9d, 0x5b, 0xb5, 0x56, 0xaa, 0x6b, 0x47, 0xab, + 0xab, 0xab, 0x52, 0xd1, 0x35, 0x31, 0xe1, 0x43, 0x42, 0x48, 0xc4, 0x01, 0xda, 0x8a, 0xa4, 0x2d, + 0x93, 0xb4, 0x42, 0xa8, 0x45, 0x8c, 0x77, 0x4f, 0xdc, 0x21, 0xde, 0x5d, 0x6b, 0x76, 0x6c, 0x35, + 0x45, 0x42, 0xbc, 0xf0, 0x8e, 0x40, 0xfc, 0x13, 0x88, 0x17, 0x5e, 0x90, 0x78, 0xe4, 0x43, 0xa8, + 0x42, 0x08, 0xf5, 0xb1, 0x08, 0xc9, 0xa2, 0xe6, 0xbf, 0xe8, 0x13, 0xda, 0x99, 0xd9, 0xf5, 0xae, + 0xed, 0xc4, 0x4e, 0x95, 0x14, 0xf5, 0xcd, 0x33, 0xe7, 0x9c, 0xdf, 0xf9, 0x9c, 0x73, 0xce, 0x1a, + 0x5d, 0xda, 0x7d, 0x35, 0xb0, 0x98, 0x5f, 0xd9, 0x6d, 0xd7, 0x81, 0x7b, 0x20, 0x20, 0xa8, 0x74, + 0xc0, 0x73, 0x7c, 0x5e, 0xd1, 0x04, 0xda, 0x62, 0x15, 0xda, 0x16, 0x7e, 0x60, 0xd3, 0x26, 0xf3, + 0x1a, 0x95, 0x4e, 0xb5, 0x0e, 0x82, 0x56, 0x2b, 0x0d, 0xf0, 0x80, 0x53, 0x01, 0x8e, 0xd5, 0xe2, + 0xbe, 0xf0, 0x71, 0x49, 0xf1, 0x5b, 0xb4, 0xc5, 0xac, 0x04, 0xbf, 0xa5, 0xf9, 0x97, 0x2e, 0x36, + 0x98, 0xb8, 0xd3, 0xae, 0x5b, 0xb6, 0xef, 0x56, 0x1a, 0x7e, 0xc3, 0xaf, 0x48, 0xb1, 0x7a, 0x7b, + 0x47, 0x9e, 0xe4, 0x41, 0xfe, 0x52, 0x70, 0x4b, 0x66, 0x42, 0xbd, 0xed, 0x73, 0xa8, 0x74, 0x56, + 0x06, 0x55, 0x2e, 0xad, 0xf6, 0x79, 0x5c, 0x6a, 0xdf, 0x61, 0x1e, 0xf0, 0xbd, 0x4a, 0x6b, 0xb7, + 0x21, 0x85, 0x38, 0x04, 0x7e, 0x9b, 0xdb, 0x70, 0x28, 0xa9, 0xa0, 0xe2, 0x82, 0xa0, 0xa3, 0x74, + 0x55, 0xf6, 0x93, 0xe2, 0x6d, 0x4f, 0x30, 0x77, 0x58, 0xcd, 0xcb, 0xe3, 0x04, 0x02, 0xfb, 0x0e, + 0xb8, 0x74, 0x50, 0xce, 0xfc, 0xca, 0x40, 0xe7, 0xd6, 0xb9, 0x1f, 0x04, 0x37, 0x81, 0x07, 0xcc, + 0xf7, 0xae, 0xd5, 0x3f, 0x02, 0x5b, 0x10, 0xd8, 0x01, 0x0e, 0x9e, 0x0d, 0x78, 0x19, 0xe5, 0x76, + 0x99, 0xe7, 0x14, 0x8d, 0x65, 0xe3, 0xfc, 0x89, 0xda, 0xdc, 0xfd, 0x6e, 0x79, 0xaa, 0xd7, 0x2d, + 0xe7, 0xde, 0x61, 0x9e, 0x43, 0x24, 0x25, 0xe4, 0xf0, 0xa8, 0x0b, 0xc5, 0x4c, 0x9a, 0xe3, 0x2a, + 0x75, 0x81, 0x48, 0x0a, 0xae, 0x22, 0x44, 0x5b, 0x4c, 0x2b, 0x28, 0x66, 0x25, 0x1f, 0xd6, 0x7c, + 0x68, 0xed, 0xfa, 0x15, 0x4d, 0x21, 0x09, 0x2e, 0xf3, 0x17, 0x03, 0x9d, 0x7e, 0xeb, 0xae, 0x00, + 0xee, 0xd1, 0xe6, 0x26, 0x08, 0xce, 0xec, 0x2d, 0x19, 0x5f, 0xfc, 0x1e, 0xca, 0xbb, 0xf2, 0x2c, + 0x4d, 0x2a, 0x54, 0x5f, 0xb0, 0x0e, 0xae, 0x04, 0x4b, 0x49, 0x5f, 0x71, 0xc0, 0x13, 0x6c, 0x87, + 0x01, 0xaf, 0xcd, 0x6b, 0xd5, 0x79, 0x45, 0x21, 0x1a, 0x0f, 0x6f, 0xa3, 0xbc, 0xa0, 0xbc, 0x01, + 0x42, 0xba, 0x52, 0xa8, 0x3e, 0x3f, 0x19, 0xf2, 0xb6, 0x94, 0xe9, 0xa3, 0xaa, 0x33, 0xd1, 0x58, + 0xe6, 0xef, 0xc3, 0x8e, 0x08, 0x2a, 0xda, 0xc1, 0x31, 0x3a, 0x72, 0x0b, 0xcd, 0xd8, 0x6d, 0xce, + 0xc1, 0x8b, 0x3c, 0x59, 0x99, 0x0c, 0xfa, 0x26, 0x6d, 0xb6, 0x41, 0x59, 0x57, 0x5b, 0xd0, 0xd8, + 0x33, 0xeb, 0x0a, 0x89, 0x44, 0x90, 0xe6, 0x0f, 0x19, 0x74, 0xf6, 0xb2, 0xcf, 0xd9, 0x3d, 0xdf, + 0x13, 0xb4, 0x79, 0xdd, 0x77, 0xd6, 0x34, 0x20, 0x70, 0xfc, 0x21, 0x9a, 0x0d, 0x2b, 0xda, 0xa1, + 0x82, 0x8e, 0xf0, 0x2a, 0x2e, 0x4c, 0xab, 0xb5, 0xdb, 0x08, 0x2f, 0x02, 0x2b, 0xe4, 0xb6, 0x3a, + 0x2b, 0x96, 0x2a, 0xbb, 0x4d, 0x10, 0xb4, 0x5f, 0x19, 0xfd, 0x3b, 0x12, 0xa3, 0xe2, 0xdb, 0x28, + 0x17, 0xb4, 0xc0, 0xd6, 0x8e, 0xbd, 0x36, 0xce, 0xb1, 0x7d, 0x0c, 0xdd, 0x6a, 0x81, 0xdd, 0x2f, + 0xd5, 0xf0, 0x44, 0x24, 0x2c, 0x06, 0x94, 0x0f, 0x64, 0x00, 0x64, 0x99, 0x16, 0xaa, 0xaf, 0x3f, + 0xa9, 0x02, 0x15, 0xc5, 0x38, 0x43, 0xea, 0x4c, 0x34, 0xb8, 0xf9, 0x59, 0x16, 0x2d, 0xef, 0x23, + 0xb9, 0xee, 0x7b, 0x0e, 0x13, 0xcc, 0xf7, 0xf0, 0x65, 0x94, 0x13, 0x7b, 0x2d, 0xd0, 0x4f, 0x6f, + 0x35, 0xb2, 0x76, 0x7b, 0xaf, 0x05, 0x8f, 0xbb, 0xe5, 0xff, 0x8d, 0x93, 0x0f, 0xf9, 0x88, 0x44, + 0xc0, 0x1b, 0xb1, 0x57, 0x99, 0x14, 0x96, 0x36, 0xeb, 0x71, 0xb7, 0x3c, 0xa2, 0xff, 0x59, 0x31, + 0x52, 0xda, 0x78, 0xdc, 0x41, 0xb8, 0x49, 0x03, 0xb1, 0xcd, 0xa9, 0x17, 0x28, 0x4d, 0xcc, 0x05, + 0x1d, 0xaf, 0x0b, 0x93, 0xa5, 0x3b, 0x94, 0xa8, 0x2d, 0x69, 0x2b, 0xf0, 0xc6, 0x10, 0x1a, 0x19, + 0xa1, 0x01, 0xff, 0x1f, 0xe5, 0x39, 0xd0, 0xc0, 0xf7, 0x8a, 0x39, 0xe9, 0x45, 0x1c, 0x5c, 0x22, + 0x6f, 0x89, 0xa6, 0xe2, 0xe7, 0xd0, 0x8c, 0x0b, 0x41, 0x40, 0x1b, 0x50, 0x9c, 0x96, 0x8c, 0x71, + 0x2d, 0x6f, 0xaa, 0x6b, 0x12, 0xd1, 0xcd, 0x3f, 0x0c, 0x74, 0x6e, 0x9f, 0x38, 0x6e, 0xb0, 0x40, + 0xe0, 0x5b, 0x43, 0xf5, 0x6c, 0x4d, 0xe6, 0x60, 0x28, 0x2d, 0xab, 0x79, 0x51, 0xeb, 0x9e, 0x8d, + 0x6e, 0x12, 0xb5, 0x7c, 0x0b, 0x4d, 0x33, 0x01, 0x6e, 0x98, 0x95, 0xec, 0xf9, 0x42, 0xf5, 0x95, + 0x27, 0xac, 0xb5, 0xda, 0x49, 0xad, 0x63, 0xfa, 0x4a, 0x88, 0x46, 0x14, 0xa8, 0xf9, 0x67, 0x66, + 0x5f, 0xdf, 0xc2, 0x82, 0xc7, 0x1f, 0xa3, 0x79, 0x79, 0xd2, 0xfd, 0x0a, 0x76, 0xb4, 0x87, 0x63, + 0xdf, 0xd4, 0x01, 0xe3, 0xa2, 0x76, 0x46, 0x9b, 0x32, 0xbf, 0x95, 0x82, 0x26, 0x03, 0xaa, 0xf0, + 0x0a, 0x2a, 0xb8, 0xcc, 0x23, 0xd0, 0x6a, 0x32, 0x9b, 0xaa, 0xb2, 0x9c, 0xae, 0x2d, 0xf4, 0xba, + 0xe5, 0xc2, 0x66, 0xff, 0x9a, 0x24, 0x79, 0xf0, 0x4b, 0xa8, 0xe0, 0xd2, 0xbb, 0xb1, 0x48, 0x56, + 0x8a, 0xfc, 0x47, 0xeb, 0x2b, 0x6c, 0xf6, 0x49, 0x24, 0xc9, 0x87, 0x6f, 0x84, 0xd5, 0x10, 0x76, + 0xb7, 0xa0, 0x98, 0x93, 0x61, 0xbe, 0x30, 0x59, 0x33, 0x94, 0x2d, 0x22, 0x51, 0x39, 0x12, 0x82, + 0x44, 0x58, 0xe6, 0x77, 0x39, 0xf4, 0xdf, 0x03, 0xdf, 0x3e, 0x7e, 0x1b, 0x61, 0xbf, 0x1e, 0x00, + 0xef, 0x80, 0x73, 0x49, 0x0d, 0xdd, 0x70, 0xfa, 0x85, 0x31, 0xce, 0xd6, 0xce, 0x84, 0x65, 0x7f, + 0x6d, 0x88, 0x4a, 0x46, 0x48, 0x60, 0x1b, 0x9d, 0x0c, 0x1f, 0x83, 0x0a, 0x28, 0xd3, 0x83, 0xf6, + 0x70, 0x2f, 0xed, 0x54, 0xaf, 0x5b, 0x3e, 0xb9, 0x91, 0x04, 0x21, 0x69, 0x4c, 0xbc, 0x86, 0x16, + 0x74, 0x7f, 0x1f, 0x08, 0xf0, 0x59, 0x1d, 0x81, 0x85, 0xf5, 0x34, 0x99, 0x0c, 0xf2, 0x87, 0x10, + 0x0e, 0x04, 0x8c, 0x83, 0x13, 0x43, 0xe4, 0xd2, 0x10, 0x6f, 0xa6, 0xc9, 0x64, 0x90, 0x1f, 0x37, + 0xd1, 0xbc, 0x46, 0xd5, 0xf1, 0x2e, 0x4e, 0xcb, 0x94, 0x4d, 0x38, 0x89, 0x75, 0xd3, 0x8d, 0x6b, + 0x70, 0x3d, 0x85, 0x45, 0x06, 0xb0, 0xb1, 0x40, 0xc8, 0x8e, 0x5a, 0x5c, 0x50, 0xcc, 0x4b, 0x4d, + 0x6f, 0x3c, 0xe1, 0x1b, 0x8c, 0x7b, 0x65, 0x7f, 0x7c, 0xc5, 0x57, 0x01, 0x49, 0xe8, 0x31, 0xbf, + 0x34, 0xd0, 0xe2, 0xe0, 0x24, 0x8f, 0x77, 0x28, 0x63, 0xdf, 0x1d, 0xea, 0x36, 0x9a, 0x0d, 0xa0, + 0x09, 0xb6, 0xf0, 0xb9, 0x2e, 0x80, 0x17, 0x27, 0xec, 0x44, 0xb4, 0x0e, 0xcd, 0x2d, 0x2d, 0x5a, + 0x9b, 0x0b, 0x5b, 0x51, 0x74, 0x22, 0x31, 0xa4, 0xf9, 0x75, 0x16, 0xa1, 0x7e, 0xdd, 0xe3, 0xd5, + 0xd4, 0xe8, 0x59, 0x1e, 0x18, 0x3d, 0x8b, 0xc9, 0x85, 0x2c, 0x31, 0x66, 0x6e, 0xa2, 0xbc, 0x2f, + 0xfb, 0x81, 0xb6, 0xb0, 0x3a, 0x2e, 0x98, 0xf1, 0x84, 0x8f, 0xd1, 0x6a, 0x28, 0x6c, 0xe8, 0xba, + 0xab, 0x68, 0x34, 0x7c, 0x15, 0xe5, 0x5a, 0xbe, 0x13, 0x8d, 0xe4, 0xb1, 0x7b, 0xd2, 0x75, 0xdf, + 0x09, 0x52, 0x98, 0xb3, 0xa1, 0xed, 0xe1, 0x2d, 0x91, 0x38, 0xf8, 0x03, 0x34, 0x1b, 0xad, 0xeb, + 0xb2, 0x44, 0x0b, 0xd5, 0xd5, 0x71, 0x98, 0x44, 0xf3, 0xa7, 0x70, 0x65, 0x30, 0x23, 0x0a, 0x89, + 0x31, 0x43, 0x7c, 0xd0, 0x1b, 0x9f, 0x9c, 0x40, 0x13, 0xe0, 0x8f, 0x5a, 0x75, 0x15, 0x7e, 0x44, + 0x21, 0x31, 0xa6, 0xf9, 0x4d, 0x16, 0xcd, 0xa5, 0x56, 0xc9, 0x7f, 0x23, 0x5d, 0xea, 0xad, 0x1d, + 0x6d, 0xba, 0x14, 0xe6, 0xd1, 0xa7, 0x4b, 0xe1, 0x1e, 0x5f, 0xba, 0x12, 0xf8, 0x23, 0xd2, 0xf5, + 0x53, 0x26, 0x4a, 0x97, 0x9a, 0x7f, 0x93, 0xa5, 0x4b, 0xf1, 0x26, 0xd2, 0x75, 0x0d, 0x4d, 0x77, + 0xc2, 0x05, 0x5d, 0x67, 0xeb, 0xc0, 0x45, 0xc4, 0x8a, 0x9c, 0xb3, 0xde, 0x6d, 0x53, 0x4f, 0x30, + 0xb1, 0x57, 0x3b, 0x11, 0x2e, 0x08, 0x72, 0xc3, 0x27, 0x0a, 0x07, 0x3b, 0x68, 0x8e, 0x76, 0x80, + 0xd3, 0x06, 0xc8, 0x6b, 0x9d, 0xaf, 0xc3, 0xe2, 0x2e, 0xf6, 0xba, 0xe5, 0xb9, 0xb5, 0x04, 0x0e, + 0x49, 0xa1, 0x86, 0x63, 0x50, 0x9f, 0x6f, 0x08, 0xd6, 0x64, 0xf7, 0xd4, 0x18, 0x54, 0x93, 0x41, + 0x8e, 0xc1, 0xb5, 0x21, 0x2a, 0x19, 0x21, 0x61, 0x7e, 0x91, 0x41, 0xa7, 0x86, 0x3e, 0x53, 0xfa, + 0x41, 0x31, 0x8e, 0x29, 0x28, 0x99, 0xa7, 0x18, 0x94, 0xec, 0xa1, 0x83, 0xf2, 0x73, 0x06, 0xe1, + 0xe1, 0x26, 0x8a, 0x3f, 0x91, 0xa3, 0xd8, 0xe6, 0xac, 0x0e, 0x8e, 0x22, 0x1f, 0xc5, 0x6e, 0x97, + 0x9c, 0xe3, 0x49, 0x6c, 0x32, 0xa8, 0xec, 0x78, 0xbe, 0xa4, 0x13, 0x1f, 0xcc, 0xd9, 0xa3, 0xfd, + 0x60, 0x36, 0x7f, 0x1b, 0x0c, 0xe3, 0x33, 0xfd, 0x85, 0x3e, 0x2a, 0xfd, 0xd9, 0xa7, 0x98, 0x7e, + 0xf3, 0x47, 0x03, 0x2d, 0x0e, 0x0e, 0xe1, 0x67, 0xee, 0x7f, 0x9b, 0x5f, 0xd3, 0x4e, 0x3c, 0xdb, + 0xff, 0xd9, 0x7c, 0x6b, 0xa0, 0xd3, 0xa3, 0x56, 0x18, 0xbc, 0x9e, 0x5a, 0x3c, 0x2b, 0xc9, 0xc5, + 0xf3, 0x71, 0xb7, 0x5c, 0x1e, 0xf1, 0xaf, 0x40, 0x04, 0x93, 0xd8, 0x4d, 0x8f, 0x27, 0x01, 0xdf, + 0x0f, 0xdb, 0xac, 0x92, 0x70, 0x24, 0x36, 0x1f, 0x6b, 0xbc, 0x6b, 0x17, 0xef, 0x3f, 0x2a, 0x4d, + 0x3d, 0x78, 0x54, 0x9a, 0x7a, 0xf8, 0xa8, 0x34, 0xf5, 0x69, 0xaf, 0x64, 0xdc, 0xef, 0x95, 0x8c, + 0x07, 0xbd, 0x92, 0xf1, 0xb0, 0x57, 0x32, 0xfe, 0xea, 0x95, 0x8c, 0xcf, 0xff, 0x2e, 0x4d, 0xbd, + 0x3f, 0xa3, 0xa1, 0xff, 0x09, 0x00, 0x00, 0xff, 0xff, 0x7e, 0xa0, 0xce, 0xf5, 0x16, 0x17, 0x00, + 0x00, +} + func (m *CrossVersionObjectReference) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -190,29 +710,37 @@ func (m *CrossVersionObjectReference) Marshal() (dAtA []byte, err error) { } func (m *CrossVersionObjectReference) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CrossVersionObjectReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x1a - i++ + i -= len(m.APIVersion) + copy(dAtA[i:], m.APIVersion) i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) - i += copy(dAtA[i:], m.APIVersion) - return i, nil + i-- + dAtA[i] = 0x1a + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ExternalMetricSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -220,33 +748,42 @@ func (m *ExternalMetricSource) Marshal() (dAtA []byte, err error) { } func (m *ExternalMetricSource) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExternalMetricSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Metric.Size())) - n1, err := m.Metric.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n1 + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Target.Size())) - n2, err := m.Target.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Metric.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n2 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ExternalMetricStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -254,33 +791,42 @@ func (m *ExternalMetricStatus) Marshal() (dAtA []byte, err error) { } func (m *ExternalMetricStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExternalMetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Metric.Size())) - n3, err := m.Metric.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Current.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n3 + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Current.Size())) - n4, err := m.Current.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Metric.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n4 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *HorizontalPodAutoscaler) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -288,41 +834,52 @@ func (m *HorizontalPodAutoscaler) Marshal() (dAtA []byte, err error) { } func (m *HorizontalPodAutoscaler) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HorizontalPodAutoscaler) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n5, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n6, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n6 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n7, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n7 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *HorizontalPodAutoscalerCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -330,41 +887,52 @@ func (m *HorizontalPodAutoscalerCondition) Marshal() (dAtA []byte, err error) { } func (m *HorizontalPodAutoscalerCondition) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HorizontalPodAutoscalerCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n8, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n8 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x2a - i++ + i -= len(m.Message) + copy(dAtA[i:], m.Message) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *HorizontalPodAutoscalerList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -372,37 +940,46 @@ func (m *HorizontalPodAutoscalerList) Marshal() (dAtA []byte, err error) { } func (m *HorizontalPodAutoscalerList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HorizontalPodAutoscalerList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n9, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n9 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *HorizontalPodAutoscalerSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -410,45 +987,54 @@ func (m *HorizontalPodAutoscalerSpec) Marshal() (dAtA []byte, err error) { } func (m *HorizontalPodAutoscalerSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HorizontalPodAutoscalerSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ScaleTargetRef.Size())) - n10, err := m.ScaleTargetRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Metrics) > 0 { + for iNdEx := len(m.Metrics) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Metrics[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } } - i += n10 + i = encodeVarintGenerated(dAtA, i, uint64(m.MaxReplicas)) + i-- + dAtA[i] = 0x18 if m.MinReplicas != nil { - dAtA[i] = 0x10 - i++ i = encodeVarintGenerated(dAtA, i, uint64(*m.MinReplicas)) + i-- + dAtA[i] = 0x10 } - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MaxReplicas)) - if len(m.Metrics) > 0 { - for _, msg := range m.Metrics { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + { + size, err := m.ScaleTargetRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *HorizontalPodAutoscalerStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -456,62 +1042,73 @@ func (m *HorizontalPodAutoscalerStatus) Marshal() (dAtA []byte, err error) { } func (m *HorizontalPodAutoscalerStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HorizontalPodAutoscalerStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.ObservedGeneration != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.ObservedGeneration)) - } - if m.LastScaleTime != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastScaleTime.Size())) - n11, err := m.LastScaleTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 } - i += n11 } - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentReplicas)) - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredReplicas)) if len(m.CurrentMetrics) > 0 { - for _, msg := range m.CurrentMetrics { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.CurrentMetrics) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.CurrentMetrics[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x2a } } - if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredReplicas)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentReplicas)) + i-- + dAtA[i] = 0x18 + if m.LastScaleTime != nil { + { + size, err := m.LastScaleTime.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 } - return i, nil + if m.ObservedGeneration != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.ObservedGeneration)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil } func (m *MetricIdentifier) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -519,31 +1116,39 @@ func (m *MetricIdentifier) Marshal() (dAtA []byte, err error) { } func (m *MetricIdentifier) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MetricIdentifier) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) if m.Selector != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n12, err := m.Selector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n12 + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *MetricSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -551,61 +1156,75 @@ func (m *MetricSpec) Marshal() (dAtA []byte, err error) { } func (m *MetricSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MetricSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - if m.Object != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Object.Size())) - n13, err := m.Object.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.External != nil { + { + size, err := m.External.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n13 + i-- + dAtA[i] = 0x2a } - if m.Pods != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Pods.Size())) - n14, err := m.Pods.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.Resource != nil { + { + size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n14 - } - if m.Resource != nil { + i-- dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Resource.Size())) - n15, err := m.Resource.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + } + if m.Pods != nil { + { + size, err := m.Pods.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n15 + i-- + dAtA[i] = 0x1a } - if m.External != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.External.Size())) - n16, err := m.External.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.Object != nil { + { + size, err := m.Object.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n16 + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *MetricStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -613,61 +1232,75 @@ func (m *MetricStatus) Marshal() (dAtA []byte, err error) { } func (m *MetricStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - if m.Object != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Object.Size())) - n17, err := m.Object.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n17 - } - if m.Pods != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Pods.Size())) - n18, err := m.Pods.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.External != nil { + { + size, err := m.External.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n18 + i-- + dAtA[i] = 0x2a } if m.Resource != nil { + { + size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Resource.Size())) - n19, err := m.Resource.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + } + if m.Pods != nil { + { + size, err := m.Pods.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n19 + i-- + dAtA[i] = 0x1a } - if m.External != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.External.Size())) - n20, err := m.External.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.Object != nil { + { + size, err := m.Object.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n20 + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *MetricTarget) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -675,46 +1308,56 @@ func (m *MetricTarget) Marshal() (dAtA []byte, err error) { } func (m *MetricTarget) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MetricTarget) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - if m.Value != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Value.Size())) - n21, err := m.Value.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n21 + if m.AverageUtilization != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.AverageUtilization)) + i-- + dAtA[i] = 0x20 } if m.AverageValue != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AverageValue.Size())) - n22, err := m.AverageValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.AverageValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n22 + i-- + dAtA[i] = 0x1a } - if m.AverageUtilization != nil { - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.AverageUtilization)) + if m.Value != nil { + { + size, err := m.Value.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *MetricValueStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -722,42 +1365,51 @@ func (m *MetricValueStatus) Marshal() (dAtA []byte, err error) { } func (m *MetricValueStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MetricValueStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Value != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Value.Size())) - n23, err := m.Value.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n23 + if m.AverageUtilization != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.AverageUtilization)) + i-- + dAtA[i] = 0x18 } if m.AverageValue != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AverageValue.Size())) - n24, err := m.AverageValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.AverageValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n24 + i-- + dAtA[i] = 0x12 } - if m.AverageUtilization != nil { - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.AverageUtilization)) + if m.Value != nil { + { + size, err := m.Value.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *ObjectMetricSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -765,41 +1417,52 @@ func (m *ObjectMetricSource) Marshal() (dAtA []byte, err error) { } func (m *ObjectMetricSource) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ObjectMetricSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DescribedObject.Size())) - n25, err := m.DescribedObject.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n25 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Target.Size())) - n26, err := m.Target.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Metric.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n26 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Metric.Size())) - n27, err := m.Metric.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.DescribedObject.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n27 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ObjectMetricStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -807,41 +1470,52 @@ func (m *ObjectMetricStatus) Marshal() (dAtA []byte, err error) { } func (m *ObjectMetricStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ObjectMetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Metric.Size())) - n28, err := m.Metric.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n28 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Current.Size())) - n29, err := m.Current.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.DescribedObject.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n29 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DescribedObject.Size())) - n30, err := m.DescribedObject.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Current.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.Metric.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n30 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *PodsMetricSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -849,33 +1523,42 @@ func (m *PodsMetricSource) Marshal() (dAtA []byte, err error) { } func (m *PodsMetricSource) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodsMetricSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Metric.Size())) - n31, err := m.Metric.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n31 + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Target.Size())) - n32, err := m.Target.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Metric.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n32 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *PodsMetricStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -883,33 +1566,42 @@ func (m *PodsMetricStatus) Marshal() (dAtA []byte, err error) { } func (m *PodsMetricStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodsMetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Metric.Size())) - n33, err := m.Metric.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Current.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n33 + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Current.Size())) - n34, err := m.Current.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Metric.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n34 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ResourceMetricSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -917,29 +1609,37 @@ func (m *ResourceMetricSource) Marshal() (dAtA []byte, err error) { } func (m *ResourceMetricSource) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceMetricSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Target.Size())) - n35, err := m.Target.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n35 - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ResourceMetricStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -947,53 +1647,48 @@ func (m *ResourceMetricStatus) Marshal() (dAtA []byte, err error) { } func (m *ResourceMetricStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceMetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Current.Size())) - n36, err := m.Current.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Current.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n36 - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *CrossVersionObjectReference) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Kind) @@ -1006,6 +1701,9 @@ func (m *CrossVersionObjectReference) Size() (n int) { } func (m *ExternalMetricSource) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.Metric.Size() @@ -1016,6 +1714,9 @@ func (m *ExternalMetricSource) Size() (n int) { } func (m *ExternalMetricStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.Metric.Size() @@ -1026,6 +1727,9 @@ func (m *ExternalMetricStatus) Size() (n int) { } func (m *HorizontalPodAutoscaler) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -1038,6 +1742,9 @@ func (m *HorizontalPodAutoscaler) Size() (n int) { } func (m *HorizontalPodAutoscalerCondition) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1054,6 +1761,9 @@ func (m *HorizontalPodAutoscalerCondition) Size() (n int) { } func (m *HorizontalPodAutoscalerList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -1068,6 +1778,9 @@ func (m *HorizontalPodAutoscalerList) Size() (n int) { } func (m *HorizontalPodAutoscalerSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ScaleTargetRef.Size() @@ -1086,6 +1799,9 @@ func (m *HorizontalPodAutoscalerSpec) Size() (n int) { } func (m *HorizontalPodAutoscalerStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.ObservedGeneration != nil { @@ -1113,6 +1829,9 @@ func (m *HorizontalPodAutoscalerStatus) Size() (n int) { } func (m *MetricIdentifier) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Name) @@ -1125,6 +1844,9 @@ func (m *MetricIdentifier) Size() (n int) { } func (m *MetricSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1149,6 +1871,9 @@ func (m *MetricSpec) Size() (n int) { } func (m *MetricStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1173,6 +1898,9 @@ func (m *MetricStatus) Size() (n int) { } func (m *MetricTarget) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1192,6 +1920,9 @@ func (m *MetricTarget) Size() (n int) { } func (m *MetricValueStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Value != nil { @@ -1209,6 +1940,9 @@ func (m *MetricValueStatus) Size() (n int) { } func (m *ObjectMetricSource) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.DescribedObject.Size() @@ -1221,6 +1955,9 @@ func (m *ObjectMetricSource) Size() (n int) { } func (m *ObjectMetricStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.Metric.Size() @@ -1233,6 +1970,9 @@ func (m *ObjectMetricStatus) Size() (n int) { } func (m *PodsMetricSource) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.Metric.Size() @@ -1243,6 +1983,9 @@ func (m *PodsMetricSource) Size() (n int) { } func (m *PodsMetricStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.Metric.Size() @@ -1253,6 +1996,9 @@ func (m *PodsMetricStatus) Size() (n int) { } func (m *ResourceMetricSource) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Name) @@ -1263,6 +2009,9 @@ func (m *ResourceMetricSource) Size() (n int) { } func (m *ResourceMetricStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Name) @@ -1273,14 +2022,7 @@ func (m *ResourceMetricStatus) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -1324,7 +2066,7 @@ func (this *HorizontalPodAutoscaler) String() string { return "nil" } s := strings.Join([]string{`&HorizontalPodAutoscaler{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "HorizontalPodAutoscalerSpec", "HorizontalPodAutoscalerSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "HorizontalPodAutoscalerStatus", "HorizontalPodAutoscalerStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -1338,7 +2080,7 @@ func (this *HorizontalPodAutoscalerCondition) String() string { s := strings.Join([]string{`&HorizontalPodAutoscalerCondition{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `}`, @@ -1349,9 +2091,14 @@ func (this *HorizontalPodAutoscalerList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]HorizontalPodAutoscaler{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "HorizontalPodAutoscaler", "HorizontalPodAutoscaler", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&HorizontalPodAutoscalerList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "HorizontalPodAutoscaler", "HorizontalPodAutoscaler", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -1360,11 +2107,16 @@ func (this *HorizontalPodAutoscalerSpec) String() string { if this == nil { return "nil" } + repeatedStringForMetrics := "[]MetricSpec{" + for _, f := range this.Metrics { + repeatedStringForMetrics += strings.Replace(strings.Replace(f.String(), "MetricSpec", "MetricSpec", 1), `&`, ``, 1) + "," + } + repeatedStringForMetrics += "}" s := strings.Join([]string{`&HorizontalPodAutoscalerSpec{`, `ScaleTargetRef:` + strings.Replace(strings.Replace(this.ScaleTargetRef.String(), "CrossVersionObjectReference", "CrossVersionObjectReference", 1), `&`, ``, 1) + `,`, `MinReplicas:` + valueToStringGenerated(this.MinReplicas) + `,`, `MaxReplicas:` + fmt.Sprintf("%v", this.MaxReplicas) + `,`, - `Metrics:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Metrics), "MetricSpec", "MetricSpec", 1), `&`, ``, 1) + `,`, + `Metrics:` + repeatedStringForMetrics + `,`, `}`, }, "") return s @@ -1373,13 +2125,23 @@ func (this *HorizontalPodAutoscalerStatus) String() string { if this == nil { return "nil" } + repeatedStringForCurrentMetrics := "[]MetricStatus{" + for _, f := range this.CurrentMetrics { + repeatedStringForCurrentMetrics += strings.Replace(strings.Replace(f.String(), "MetricStatus", "MetricStatus", 1), `&`, ``, 1) + "," + } + repeatedStringForCurrentMetrics += "}" + repeatedStringForConditions := "[]HorizontalPodAutoscalerCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "HorizontalPodAutoscalerCondition", "HorizontalPodAutoscalerCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" s := strings.Join([]string{`&HorizontalPodAutoscalerStatus{`, `ObservedGeneration:` + valueToStringGenerated(this.ObservedGeneration) + `,`, - `LastScaleTime:` + strings.Replace(fmt.Sprintf("%v", this.LastScaleTime), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, + `LastScaleTime:` + strings.Replace(fmt.Sprintf("%v", this.LastScaleTime), "Time", "v1.Time", 1) + `,`, `CurrentReplicas:` + fmt.Sprintf("%v", this.CurrentReplicas) + `,`, `DesiredReplicas:` + fmt.Sprintf("%v", this.DesiredReplicas) + `,`, - `CurrentMetrics:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CurrentMetrics), "MetricStatus", "MetricStatus", 1), `&`, ``, 1) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "HorizontalPodAutoscalerCondition", "HorizontalPodAutoscalerCondition", 1), `&`, ``, 1) + `,`, + `CurrentMetrics:` + repeatedStringForCurrentMetrics + `,`, + `Conditions:` + repeatedStringForConditions + `,`, `}`, }, "") return s @@ -1390,7 +2152,7 @@ func (this *MetricIdentifier) String() string { } s := strings.Join([]string{`&MetricIdentifier{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, `}`, }, "") return s @@ -1401,10 +2163,10 @@ func (this *MetricSpec) String() string { } s := strings.Join([]string{`&MetricSpec{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Object:` + strings.Replace(fmt.Sprintf("%v", this.Object), "ObjectMetricSource", "ObjectMetricSource", 1) + `,`, - `Pods:` + strings.Replace(fmt.Sprintf("%v", this.Pods), "PodsMetricSource", "PodsMetricSource", 1) + `,`, - `Resource:` + strings.Replace(fmt.Sprintf("%v", this.Resource), "ResourceMetricSource", "ResourceMetricSource", 1) + `,`, - `External:` + strings.Replace(fmt.Sprintf("%v", this.External), "ExternalMetricSource", "ExternalMetricSource", 1) + `,`, + `Object:` + strings.Replace(this.Object.String(), "ObjectMetricSource", "ObjectMetricSource", 1) + `,`, + `Pods:` + strings.Replace(this.Pods.String(), "PodsMetricSource", "PodsMetricSource", 1) + `,`, + `Resource:` + strings.Replace(this.Resource.String(), "ResourceMetricSource", "ResourceMetricSource", 1) + `,`, + `External:` + strings.Replace(this.External.String(), "ExternalMetricSource", "ExternalMetricSource", 1) + `,`, `}`, }, "") return s @@ -1415,10 +2177,10 @@ func (this *MetricStatus) String() string { } s := strings.Join([]string{`&MetricStatus{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Object:` + strings.Replace(fmt.Sprintf("%v", this.Object), "ObjectMetricStatus", "ObjectMetricStatus", 1) + `,`, - `Pods:` + strings.Replace(fmt.Sprintf("%v", this.Pods), "PodsMetricStatus", "PodsMetricStatus", 1) + `,`, - `Resource:` + strings.Replace(fmt.Sprintf("%v", this.Resource), "ResourceMetricStatus", "ResourceMetricStatus", 1) + `,`, - `External:` + strings.Replace(fmt.Sprintf("%v", this.External), "ExternalMetricStatus", "ExternalMetricStatus", 1) + `,`, + `Object:` + strings.Replace(this.Object.String(), "ObjectMetricStatus", "ObjectMetricStatus", 1) + `,`, + `Pods:` + strings.Replace(this.Pods.String(), "PodsMetricStatus", "PodsMetricStatus", 1) + `,`, + `Resource:` + strings.Replace(this.Resource.String(), "ResourceMetricStatus", "ResourceMetricStatus", 1) + `,`, + `External:` + strings.Replace(this.External.String(), "ExternalMetricStatus", "ExternalMetricStatus", 1) + `,`, `}`, }, "") return s @@ -1429,8 +2191,8 @@ func (this *MetricTarget) String() string { } s := strings.Join([]string{`&MetricTarget{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Value:` + strings.Replace(fmt.Sprintf("%v", this.Value), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, - `AverageValue:` + strings.Replace(fmt.Sprintf("%v", this.AverageValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, + `Value:` + strings.Replace(fmt.Sprintf("%v", this.Value), "Quantity", "resource.Quantity", 1) + `,`, + `AverageValue:` + strings.Replace(fmt.Sprintf("%v", this.AverageValue), "Quantity", "resource.Quantity", 1) + `,`, `AverageUtilization:` + valueToStringGenerated(this.AverageUtilization) + `,`, `}`, }, "") @@ -1441,8 +2203,8 @@ func (this *MetricValueStatus) String() string { return "nil" } s := strings.Join([]string{`&MetricValueStatus{`, - `Value:` + strings.Replace(fmt.Sprintf("%v", this.Value), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, - `AverageValue:` + strings.Replace(fmt.Sprintf("%v", this.AverageValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, + `Value:` + strings.Replace(fmt.Sprintf("%v", this.Value), "Quantity", "resource.Quantity", 1) + `,`, + `AverageValue:` + strings.Replace(fmt.Sprintf("%v", this.AverageValue), "Quantity", "resource.Quantity", 1) + `,`, `AverageUtilization:` + valueToStringGenerated(this.AverageUtilization) + `,`, `}`, }, "") @@ -1539,7 +2301,7 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1567,7 +2329,7 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1577,6 +2339,9 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1596,7 +2361,7 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1606,6 +2371,9 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1625,7 +2393,7 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1635,6 +2403,9 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1646,7 +2417,10 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1676,7 +2450,7 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1704,7 +2478,7 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1713,6 +2487,9 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1734,7 +2511,7 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1743,6 +2520,9 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1759,6 +2539,9 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1786,7 +2569,7 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1814,7 +2597,7 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1823,6 +2606,9 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1844,7 +2630,7 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1853,6 +2639,9 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1869,6 +2658,9 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1896,7 +2688,7 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1924,7 +2716,7 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1933,6 +2725,9 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1954,7 +2749,7 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1963,6 +2758,9 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1984,7 +2782,7 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1993,6 +2791,9 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2009,6 +2810,9 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2036,7 +2840,7 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2064,7 +2868,7 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2074,6 +2878,9 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2093,7 +2900,7 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2103,6 +2910,9 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2122,7 +2932,7 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2131,6 +2941,9 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2152,7 +2965,7 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2162,6 +2975,9 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2181,7 +2997,7 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2191,6 +3007,9 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2205,6 +3024,9 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2232,7 +3054,7 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2260,7 +3082,7 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2269,6 +3091,9 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2290,7 +3115,7 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2299,6 +3124,9 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2316,6 +3144,9 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2343,7 +3174,7 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2371,7 +3202,7 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2380,6 +3211,9 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2401,7 +3235,7 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -2421,7 +3255,7 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MaxReplicas |= (int32(b) & 0x7F) << shift + m.MaxReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -2440,7 +3274,7 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2449,6 +3283,9 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2466,6 +3303,9 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2493,7 +3333,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2521,7 +3361,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -2541,7 +3381,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2550,11 +3390,14 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.LastScaleTime == nil { - m.LastScaleTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} + m.LastScaleTime = &v1.Time{} } if err := m.LastScaleTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -2574,7 +3417,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.CurrentReplicas |= (int32(b) & 0x7F) << shift + m.CurrentReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -2593,7 +3436,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.DesiredReplicas |= (int32(b) & 0x7F) << shift + m.DesiredReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -2612,7 +3455,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2621,6 +3464,9 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2643,7 +3489,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2652,6 +3498,9 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2669,6 +3518,9 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2696,7 +3548,7 @@ func (m *MetricIdentifier) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2724,7 +3576,7 @@ func (m *MetricIdentifier) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2734,6 +3586,9 @@ func (m *MetricIdentifier) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2753,7 +3608,7 @@ func (m *MetricIdentifier) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2762,11 +3617,14 @@ func (m *MetricIdentifier) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -2781,6 +3639,9 @@ func (m *MetricIdentifier) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2808,7 +3669,7 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2836,7 +3697,7 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2846,6 +3707,9 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2865,7 +3729,7 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2874,6 +3738,9 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2898,7 +3765,7 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2907,6 +3774,9 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2931,7 +3801,7 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2940,6 +3810,9 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2964,7 +3837,7 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2973,6 +3846,9 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2992,6 +3868,9 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3019,7 +3898,7 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3047,7 +3926,7 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3057,6 +3936,9 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3076,7 +3958,7 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3085,6 +3967,9 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3109,7 +3994,7 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3118,6 +4003,9 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3142,7 +4030,7 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3151,6 +4039,9 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3175,7 +4066,7 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3184,6 +4075,9 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3203,6 +4097,9 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3230,7 +4127,7 @@ func (m *MetricTarget) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3258,7 +4155,7 @@ func (m *MetricTarget) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3268,6 +4165,9 @@ func (m *MetricTarget) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3287,7 +4187,7 @@ func (m *MetricTarget) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3296,11 +4196,14 @@ func (m *MetricTarget) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Value == nil { - m.Value = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + m.Value = &resource.Quantity{} } if err := m.Value.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3320,7 +4223,7 @@ func (m *MetricTarget) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3329,11 +4232,14 @@ func (m *MetricTarget) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.AverageValue == nil { - m.AverageValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + m.AverageValue = &resource.Quantity{} } if err := m.AverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3353,7 +4259,7 @@ func (m *MetricTarget) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3368,6 +4274,9 @@ func (m *MetricTarget) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3395,7 +4304,7 @@ func (m *MetricValueStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3423,7 +4332,7 @@ func (m *MetricValueStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3432,11 +4341,14 @@ func (m *MetricValueStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Value == nil { - m.Value = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + m.Value = &resource.Quantity{} } if err := m.Value.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3456,7 +4368,7 @@ func (m *MetricValueStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3465,11 +4377,14 @@ func (m *MetricValueStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.AverageValue == nil { - m.AverageValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + m.AverageValue = &resource.Quantity{} } if err := m.AverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3489,7 +4404,7 @@ func (m *MetricValueStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3504,6 +4419,9 @@ func (m *MetricValueStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3531,7 +4449,7 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3559,7 +4477,7 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3568,6 +4486,9 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3589,7 +4510,7 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3598,6 +4519,9 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3619,7 +4543,7 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3628,6 +4552,9 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3644,6 +4571,9 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3671,7 +4601,7 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3699,7 +4629,7 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3708,6 +4638,9 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3729,7 +4662,7 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3738,6 +4671,9 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3759,7 +4695,7 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3768,6 +4704,9 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3784,6 +4723,9 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3811,7 +4753,7 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3839,7 +4781,7 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3848,6 +4790,9 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3869,7 +4814,7 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3878,6 +4823,9 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3894,6 +4842,9 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3921,7 +4872,7 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3949,7 +4900,7 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3958,6 +4909,9 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3979,7 +4933,7 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3988,6 +4942,9 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4004,6 +4961,9 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4031,7 +4991,7 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4059,7 +5019,7 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4069,6 +5029,9 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4088,7 +5051,7 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4097,6 +5060,9 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4113,6 +5079,9 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4140,7 +5109,7 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4168,7 +5137,7 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4178,6 +5147,9 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4197,7 +5169,7 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4206,6 +5178,9 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4222,6 +5197,9 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4288,10 +5266,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -4320,6 +5301,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -4338,101 +5322,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 1425 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x58, 0xdd, 0x6f, 0x1b, 0xc5, - 0x16, 0xcf, 0xda, 0x8e, 0x93, 0x8e, 0xd3, 0x24, 0x9d, 0x5b, 0xb5, 0x56, 0xaa, 0x6b, 0x47, 0xab, - 0xab, 0xab, 0x52, 0xd1, 0x35, 0x31, 0xe1, 0x43, 0x42, 0x48, 0xc4, 0x01, 0xda, 0x8a, 0xa4, 0x2d, - 0x93, 0xb4, 0x42, 0xa8, 0x45, 0x8c, 0x77, 0x4f, 0xdc, 0x21, 0xde, 0x5d, 0x6b, 0x76, 0x6c, 0x35, - 0x45, 0x42, 0xbc, 0xf0, 0x8e, 0x40, 0xfc, 0x13, 0x88, 0x17, 0x5e, 0x90, 0x78, 0xe4, 0x43, 0xa8, - 0x42, 0x08, 0xf5, 0xb1, 0x08, 0xc9, 0xa2, 0xe6, 0xbf, 0xe8, 0x13, 0xda, 0x99, 0xd9, 0xf5, 0xae, - 0xed, 0xc4, 0x4e, 0x95, 0x14, 0xf5, 0xcd, 0x33, 0xe7, 0x9c, 0xdf, 0xf9, 0x9c, 0x73, 0xce, 0x1a, - 0x5d, 0xda, 0x7d, 0x35, 0xb0, 0x98, 0x5f, 0xd9, 0x6d, 0xd7, 0x81, 0x7b, 0x20, 0x20, 0xa8, 0x74, - 0xc0, 0x73, 0x7c, 0x5e, 0xd1, 0x04, 0xda, 0x62, 0x15, 0xda, 0x16, 0x7e, 0x60, 0xd3, 0x26, 0xf3, - 0x1a, 0x95, 0x4e, 0xb5, 0x0e, 0x82, 0x56, 0x2b, 0x0d, 0xf0, 0x80, 0x53, 0x01, 0x8e, 0xd5, 0xe2, - 0xbe, 0xf0, 0x71, 0x49, 0xf1, 0x5b, 0xb4, 0xc5, 0xac, 0x04, 0xbf, 0xa5, 0xf9, 0x97, 0x2e, 0x36, - 0x98, 0xb8, 0xd3, 0xae, 0x5b, 0xb6, 0xef, 0x56, 0x1a, 0x7e, 0xc3, 0xaf, 0x48, 0xb1, 0x7a, 0x7b, - 0x47, 0x9e, 0xe4, 0x41, 0xfe, 0x52, 0x70, 0x4b, 0x66, 0x42, 0xbd, 0xed, 0x73, 0xa8, 0x74, 0x56, - 0x06, 0x55, 0x2e, 0xad, 0xf6, 0x79, 0x5c, 0x6a, 0xdf, 0x61, 0x1e, 0xf0, 0xbd, 0x4a, 0x6b, 0xb7, - 0x21, 0x85, 0x38, 0x04, 0x7e, 0x9b, 0xdb, 0x70, 0x28, 0xa9, 0xa0, 0xe2, 0x82, 0xa0, 0xa3, 0x74, - 0x55, 0xf6, 0x93, 0xe2, 0x6d, 0x4f, 0x30, 0x77, 0x58, 0xcd, 0xcb, 0xe3, 0x04, 0x02, 0xfb, 0x0e, - 0xb8, 0x74, 0x50, 0xce, 0xfc, 0xca, 0x40, 0xe7, 0xd6, 0xb9, 0x1f, 0x04, 0x37, 0x81, 0x07, 0xcc, - 0xf7, 0xae, 0xd5, 0x3f, 0x02, 0x5b, 0x10, 0xd8, 0x01, 0x0e, 0x9e, 0x0d, 0x78, 0x19, 0xe5, 0x76, - 0x99, 0xe7, 0x14, 0x8d, 0x65, 0xe3, 0xfc, 0x89, 0xda, 0xdc, 0xfd, 0x6e, 0x79, 0xaa, 0xd7, 0x2d, - 0xe7, 0xde, 0x61, 0x9e, 0x43, 0x24, 0x25, 0xe4, 0xf0, 0xa8, 0x0b, 0xc5, 0x4c, 0x9a, 0xe3, 0x2a, - 0x75, 0x81, 0x48, 0x0a, 0xae, 0x22, 0x44, 0x5b, 0x4c, 0x2b, 0x28, 0x66, 0x25, 0x1f, 0xd6, 0x7c, - 0x68, 0xed, 0xfa, 0x15, 0x4d, 0x21, 0x09, 0x2e, 0xf3, 0x17, 0x03, 0x9d, 0x7e, 0xeb, 0xae, 0x00, - 0xee, 0xd1, 0xe6, 0x26, 0x08, 0xce, 0xec, 0x2d, 0x19, 0x5f, 0xfc, 0x1e, 0xca, 0xbb, 0xf2, 0x2c, - 0x4d, 0x2a, 0x54, 0x5f, 0xb0, 0x0e, 0xae, 0x04, 0x4b, 0x49, 0x5f, 0x71, 0xc0, 0x13, 0x6c, 0x87, - 0x01, 0xaf, 0xcd, 0x6b, 0xd5, 0x79, 0x45, 0x21, 0x1a, 0x0f, 0x6f, 0xa3, 0xbc, 0xa0, 0xbc, 0x01, - 0x42, 0xba, 0x52, 0xa8, 0x3e, 0x3f, 0x19, 0xf2, 0xb6, 0x94, 0xe9, 0xa3, 0xaa, 0x33, 0xd1, 0x58, - 0xe6, 0xef, 0xc3, 0x8e, 0x08, 0x2a, 0xda, 0xc1, 0x31, 0x3a, 0x72, 0x0b, 0xcd, 0xd8, 0x6d, 0xce, - 0xc1, 0x8b, 0x3c, 0x59, 0x99, 0x0c, 0xfa, 0x26, 0x6d, 0xb6, 0x41, 0x59, 0x57, 0x5b, 0xd0, 0xd8, - 0x33, 0xeb, 0x0a, 0x89, 0x44, 0x90, 0xe6, 0x0f, 0x19, 0x74, 0xf6, 0xb2, 0xcf, 0xd9, 0x3d, 0xdf, - 0x13, 0xb4, 0x79, 0xdd, 0x77, 0xd6, 0x34, 0x20, 0x70, 0xfc, 0x21, 0x9a, 0x0d, 0x2b, 0xda, 0xa1, - 0x82, 0x8e, 0xf0, 0x2a, 0x2e, 0x4c, 0xab, 0xb5, 0xdb, 0x08, 0x2f, 0x02, 0x2b, 0xe4, 0xb6, 0x3a, - 0x2b, 0x96, 0x2a, 0xbb, 0x4d, 0x10, 0xb4, 0x5f, 0x19, 0xfd, 0x3b, 0x12, 0xa3, 0xe2, 0xdb, 0x28, - 0x17, 0xb4, 0xc0, 0xd6, 0x8e, 0xbd, 0x36, 0xce, 0xb1, 0x7d, 0x0c, 0xdd, 0x6a, 0x81, 0xdd, 0x2f, - 0xd5, 0xf0, 0x44, 0x24, 0x2c, 0x06, 0x94, 0x0f, 0x64, 0x00, 0x64, 0x99, 0x16, 0xaa, 0xaf, 0x3f, - 0xa9, 0x02, 0x15, 0xc5, 0x38, 0x43, 0xea, 0x4c, 0x34, 0xb8, 0xf9, 0x59, 0x16, 0x2d, 0xef, 0x23, - 0xb9, 0xee, 0x7b, 0x0e, 0x13, 0xcc, 0xf7, 0xf0, 0x65, 0x94, 0x13, 0x7b, 0x2d, 0xd0, 0x4f, 0x6f, - 0x35, 0xb2, 0x76, 0x7b, 0xaf, 0x05, 0x8f, 0xbb, 0xe5, 0xff, 0x8d, 0x93, 0x0f, 0xf9, 0x88, 0x44, - 0xc0, 0x1b, 0xb1, 0x57, 0x99, 0x14, 0x96, 0x36, 0xeb, 0x71, 0xb7, 0x3c, 0xa2, 0xff, 0x59, 0x31, - 0x52, 0xda, 0x78, 0xdc, 0x41, 0xb8, 0x49, 0x03, 0xb1, 0xcd, 0xa9, 0x17, 0x28, 0x4d, 0xcc, 0x05, - 0x1d, 0xaf, 0x0b, 0x93, 0xa5, 0x3b, 0x94, 0xa8, 0x2d, 0x69, 0x2b, 0xf0, 0xc6, 0x10, 0x1a, 0x19, - 0xa1, 0x01, 0xff, 0x1f, 0xe5, 0x39, 0xd0, 0xc0, 0xf7, 0x8a, 0x39, 0xe9, 0x45, 0x1c, 0x5c, 0x22, - 0x6f, 0x89, 0xa6, 0xe2, 0xe7, 0xd0, 0x8c, 0x0b, 0x41, 0x40, 0x1b, 0x50, 0x9c, 0x96, 0x8c, 0x71, - 0x2d, 0x6f, 0xaa, 0x6b, 0x12, 0xd1, 0xcd, 0x3f, 0x0c, 0x74, 0x6e, 0x9f, 0x38, 0x6e, 0xb0, 0x40, - 0xe0, 0x5b, 0x43, 0xf5, 0x6c, 0x4d, 0xe6, 0x60, 0x28, 0x2d, 0xab, 0x79, 0x51, 0xeb, 0x9e, 0x8d, - 0x6e, 0x12, 0xb5, 0x7c, 0x0b, 0x4d, 0x33, 0x01, 0x6e, 0x98, 0x95, 0xec, 0xf9, 0x42, 0xf5, 0x95, - 0x27, 0xac, 0xb5, 0xda, 0x49, 0xad, 0x63, 0xfa, 0x4a, 0x88, 0x46, 0x14, 0xa8, 0xf9, 0x67, 0x66, - 0x5f, 0xdf, 0xc2, 0x82, 0xc7, 0x1f, 0xa3, 0x79, 0x79, 0xd2, 0xfd, 0x0a, 0x76, 0xb4, 0x87, 0x63, - 0xdf, 0xd4, 0x01, 0xe3, 0xa2, 0x76, 0x46, 0x9b, 0x32, 0xbf, 0x95, 0x82, 0x26, 0x03, 0xaa, 0xf0, - 0x0a, 0x2a, 0xb8, 0xcc, 0x23, 0xd0, 0x6a, 0x32, 0x9b, 0xaa, 0xb2, 0x9c, 0xae, 0x2d, 0xf4, 0xba, - 0xe5, 0xc2, 0x66, 0xff, 0x9a, 0x24, 0x79, 0xf0, 0x4b, 0xa8, 0xe0, 0xd2, 0xbb, 0xb1, 0x48, 0x56, - 0x8a, 0xfc, 0x47, 0xeb, 0x2b, 0x6c, 0xf6, 0x49, 0x24, 0xc9, 0x87, 0x6f, 0x84, 0xd5, 0x10, 0x76, - 0xb7, 0xa0, 0x98, 0x93, 0x61, 0xbe, 0x30, 0x59, 0x33, 0x94, 0x2d, 0x22, 0x51, 0x39, 0x12, 0x82, - 0x44, 0x58, 0xe6, 0x77, 0x39, 0xf4, 0xdf, 0x03, 0xdf, 0x3e, 0x7e, 0x1b, 0x61, 0xbf, 0x1e, 0x00, - 0xef, 0x80, 0x73, 0x49, 0x0d, 0xdd, 0x70, 0xfa, 0x85, 0x31, 0xce, 0xd6, 0xce, 0x84, 0x65, 0x7f, - 0x6d, 0x88, 0x4a, 0x46, 0x48, 0x60, 0x1b, 0x9d, 0x0c, 0x1f, 0x83, 0x0a, 0x28, 0xd3, 0x83, 0xf6, - 0x70, 0x2f, 0xed, 0x54, 0xaf, 0x5b, 0x3e, 0xb9, 0x91, 0x04, 0x21, 0x69, 0x4c, 0xbc, 0x86, 0x16, - 0x74, 0x7f, 0x1f, 0x08, 0xf0, 0x59, 0x1d, 0x81, 0x85, 0xf5, 0x34, 0x99, 0x0c, 0xf2, 0x87, 0x10, - 0x0e, 0x04, 0x8c, 0x83, 0x13, 0x43, 0xe4, 0xd2, 0x10, 0x6f, 0xa6, 0xc9, 0x64, 0x90, 0x1f, 0x37, - 0xd1, 0xbc, 0x46, 0xd5, 0xf1, 0x2e, 0x4e, 0xcb, 0x94, 0x4d, 0x38, 0x89, 0x75, 0xd3, 0x8d, 0x6b, - 0x70, 0x3d, 0x85, 0x45, 0x06, 0xb0, 0xb1, 0x40, 0xc8, 0x8e, 0x5a, 0x5c, 0x50, 0xcc, 0x4b, 0x4d, - 0x6f, 0x3c, 0xe1, 0x1b, 0x8c, 0x7b, 0x65, 0x7f, 0x7c, 0xc5, 0x57, 0x01, 0x49, 0xe8, 0x31, 0xbf, - 0x34, 0xd0, 0xe2, 0xe0, 0x24, 0x8f, 0x77, 0x28, 0x63, 0xdf, 0x1d, 0xea, 0x36, 0x9a, 0x0d, 0xa0, - 0x09, 0xb6, 0xf0, 0xb9, 0x2e, 0x80, 0x17, 0x27, 0xec, 0x44, 0xb4, 0x0e, 0xcd, 0x2d, 0x2d, 0x5a, - 0x9b, 0x0b, 0x5b, 0x51, 0x74, 0x22, 0x31, 0xa4, 0xf9, 0x75, 0x16, 0xa1, 0x7e, 0xdd, 0xe3, 0xd5, - 0xd4, 0xe8, 0x59, 0x1e, 0x18, 0x3d, 0x8b, 0xc9, 0x85, 0x2c, 0x31, 0x66, 0x6e, 0xa2, 0xbc, 0x2f, - 0xfb, 0x81, 0xb6, 0xb0, 0x3a, 0x2e, 0x98, 0xf1, 0x84, 0x8f, 0xd1, 0x6a, 0x28, 0x6c, 0xe8, 0xba, - 0xab, 0x68, 0x34, 0x7c, 0x15, 0xe5, 0x5a, 0xbe, 0x13, 0x8d, 0xe4, 0xb1, 0x7b, 0xd2, 0x75, 0xdf, - 0x09, 0x52, 0x98, 0xb3, 0xa1, 0xed, 0xe1, 0x2d, 0x91, 0x38, 0xf8, 0x03, 0x34, 0x1b, 0xad, 0xeb, - 0xb2, 0x44, 0x0b, 0xd5, 0xd5, 0x71, 0x98, 0x44, 0xf3, 0xa7, 0x70, 0x65, 0x30, 0x23, 0x0a, 0x89, - 0x31, 0x43, 0x7c, 0xd0, 0x1b, 0x9f, 0x9c, 0x40, 0x13, 0xe0, 0x8f, 0x5a, 0x75, 0x15, 0x7e, 0x44, - 0x21, 0x31, 0xa6, 0xf9, 0x4d, 0x16, 0xcd, 0xa5, 0x56, 0xc9, 0x7f, 0x23, 0x5d, 0xea, 0xad, 0x1d, - 0x6d, 0xba, 0x14, 0xe6, 0xd1, 0xa7, 0x4b, 0xe1, 0x1e, 0x5f, 0xba, 0x12, 0xf8, 0x23, 0xd2, 0xf5, - 0x53, 0x26, 0x4a, 0x97, 0x9a, 0x7f, 0x93, 0xa5, 0x4b, 0xf1, 0x26, 0xd2, 0x75, 0x0d, 0x4d, 0x77, - 0xc2, 0x05, 0x5d, 0x67, 0xeb, 0xc0, 0x45, 0xc4, 0x8a, 0x9c, 0xb3, 0xde, 0x6d, 0x53, 0x4f, 0x30, - 0xb1, 0x57, 0x3b, 0x11, 0x2e, 0x08, 0x72, 0xc3, 0x27, 0x0a, 0x07, 0x3b, 0x68, 0x8e, 0x76, 0x80, - 0xd3, 0x06, 0xc8, 0x6b, 0x9d, 0xaf, 0xc3, 0xe2, 0x2e, 0xf6, 0xba, 0xe5, 0xb9, 0xb5, 0x04, 0x0e, - 0x49, 0xa1, 0x86, 0x63, 0x50, 0x9f, 0x6f, 0x08, 0xd6, 0x64, 0xf7, 0xd4, 0x18, 0x54, 0x93, 0x41, - 0x8e, 0xc1, 0xb5, 0x21, 0x2a, 0x19, 0x21, 0x61, 0x7e, 0x91, 0x41, 0xa7, 0x86, 0x3e, 0x53, 0xfa, - 0x41, 0x31, 0x8e, 0x29, 0x28, 0x99, 0xa7, 0x18, 0x94, 0xec, 0xa1, 0x83, 0xf2, 0x73, 0x06, 0xe1, - 0xe1, 0x26, 0x8a, 0x3f, 0x91, 0xa3, 0xd8, 0xe6, 0xac, 0x0e, 0x8e, 0x22, 0x1f, 0xc5, 0x6e, 0x97, - 0x9c, 0xe3, 0x49, 0x6c, 0x32, 0xa8, 0xec, 0x78, 0xbe, 0xa4, 0x13, 0x1f, 0xcc, 0xd9, 0xa3, 0xfd, - 0x60, 0x36, 0x7f, 0x1b, 0x0c, 0xe3, 0x33, 0xfd, 0x85, 0x3e, 0x2a, 0xfd, 0xd9, 0xa7, 0x98, 0x7e, - 0xf3, 0x47, 0x03, 0x2d, 0x0e, 0x0e, 0xe1, 0x67, 0xee, 0x7f, 0x9b, 0x5f, 0xd3, 0x4e, 0x3c, 0xdb, - 0xff, 0xd9, 0x7c, 0x6b, 0xa0, 0xd3, 0xa3, 0x56, 0x18, 0xbc, 0x9e, 0x5a, 0x3c, 0x2b, 0xc9, 0xc5, - 0xf3, 0x71, 0xb7, 0x5c, 0x1e, 0xf1, 0xaf, 0x40, 0x04, 0x93, 0xd8, 0x4d, 0x8f, 0x27, 0x01, 0xdf, - 0x0f, 0xdb, 0xac, 0x92, 0x70, 0x24, 0x36, 0x1f, 0x6b, 0xbc, 0x6b, 0x17, 0xef, 0x3f, 0x2a, 0x4d, - 0x3d, 0x78, 0x54, 0x9a, 0x7a, 0xf8, 0xa8, 0x34, 0xf5, 0x69, 0xaf, 0x64, 0xdc, 0xef, 0x95, 0x8c, - 0x07, 0xbd, 0x92, 0xf1, 0xb0, 0x57, 0x32, 0xfe, 0xea, 0x95, 0x8c, 0xcf, 0xff, 0x2e, 0x4d, 0xbd, - 0x3f, 0xa3, 0xa1, 0xff, 0x09, 0x00, 0x00, 0xff, 0xff, 0x7e, 0xa0, 0xce, 0xf5, 0x16, 0x17, 0x00, - 0x00, -} diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto b/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto index b4e4c95a..80f1d345 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto +++ b/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto @@ -32,7 +32,7 @@ option go_package = "v2beta2"; // CrossVersionObjectReference contains enough information to let you identify the referred resource. message CrossVersionObjectReference { - // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds" + // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" optional string kind = 1; // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names @@ -69,12 +69,12 @@ message ExternalMetricStatus { // implementing the scale subresource based on the metrics specified. message HorizontalPodAutoscaler { // metadata is the standard object metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec is the specification for the behaviour of the autoscaler. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. // +optional optional HorizontalPodAutoscalerSpec spec = 2; @@ -123,8 +123,11 @@ message HorizontalPodAutoscalerSpec { // should be collected, as well as to actually change the replica count. optional CrossVersionObjectReference scaleTargetRef = 1; - // minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. - // It defaults to 1 pod. + // minReplicas is the lower limit for the number of replicas to which the autoscaler + // can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the + // alpha feature gate HPAScaleToZero is enabled and at least one Object or External + // metric is configured. Scaling is active as long as at least one metric value is + // available. // +optional optional int32 minReplicas = 2; diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/types.go b/vendor/k8s.io/api/autoscaling/v2beta2/types.go index 2d337953..4480c7da 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta2/types.go +++ b/vendor/k8s.io/api/autoscaling/v2beta2/types.go @@ -19,7 +19,7 @@ limitations under the License. package v2beta2 import ( - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -33,12 +33,12 @@ import ( type HorizontalPodAutoscaler struct { metav1.TypeMeta `json:",inline"` // metadata is the standard object metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // spec is the specification for the behaviour of the autoscaler. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. // +optional Spec HorizontalPodAutoscalerSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` @@ -52,8 +52,11 @@ type HorizontalPodAutoscalerSpec struct { // scaleTargetRef points to the target resource to scale, and is used to the pods for which metrics // should be collected, as well as to actually change the replica count. ScaleTargetRef CrossVersionObjectReference `json:"scaleTargetRef" protobuf:"bytes,1,opt,name=scaleTargetRef"` - // minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. - // It defaults to 1 pod. + // minReplicas is the lower limit for the number of replicas to which the autoscaler + // can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the + // alpha feature gate HPAScaleToZero is enabled and at least one Object or External + // metric is configured. Scaling is active as long as at least one metric value is + // available. // +optional MinReplicas *int32 `json:"minReplicas,omitempty" protobuf:"varint,2,opt,name=minReplicas"` // maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up. @@ -73,7 +76,7 @@ type HorizontalPodAutoscalerSpec struct { // CrossVersionObjectReference contains enough information to let you identify the referred resource. type CrossVersionObjectReference struct { - // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds" + // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names Name string `json:"name" protobuf:"bytes,2,opt,name=name"` @@ -117,7 +120,7 @@ type MetricSpec struct { // MetricSourceType indicates the type of metric. type MetricSourceType string -var ( +const ( // ObjectMetricSourceType is a metric describing a kubernetes object // (for example, hits-per-second on an Ingress object). ObjectMetricSourceType MetricSourceType = "Object" @@ -218,7 +221,7 @@ type MetricTarget struct { // "Value", "AverageValue", or "Utilization" type MetricTargetType string -var ( +const ( // UtilizationMetricType declares a MetricTarget is an AverageUtilization value UtilizationMetricType MetricTargetType = "Utilization" // ValueMetricType declares a MetricTarget is a raw value @@ -259,7 +262,7 @@ type HorizontalPodAutoscalerStatus struct { // a HorizontalPodAutoscaler. type HorizontalPodAutoscalerConditionType string -var ( +const ( // ScalingActive indicates that the HPA controller is able to scale if necessary: // it's correctly configured, can fetch the desired metrics, and isn't disabled. ScalingActive HorizontalPodAutoscalerConditionType = "ScalingActive" diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/types_swagger_doc_generated.go b/vendor/k8s.io/api/autoscaling/v2beta2/types_swagger_doc_generated.go index 996dc184..bb85b9f0 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta2/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/autoscaling/v2beta2/types_swagger_doc_generated.go @@ -29,7 +29,7 @@ package v2beta2 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_CrossVersionObjectReference = map[string]string{ "": "CrossVersionObjectReference contains enough information to let you identify the referred resource.", - "kind": "Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds\"", + "kind": "Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\"", "name": "Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names", "apiVersion": "API version of the referent", } @@ -60,8 +60,8 @@ func (ExternalMetricStatus) SwaggerDoc() map[string]string { var map_HorizontalPodAutoscaler = map[string]string{ "": "HorizontalPodAutoscaler is the configuration for a horizontal pod autoscaler, which automatically manages the replica count of any resource implementing the scale subresource based on the metrics specified.", - "metadata": "metadata is the standard object metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "spec is the specification for the behaviour of the autoscaler. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.", + "metadata": "metadata is the standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec is the specification for the behaviour of the autoscaler. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.", "status": "status is the current information about the autoscaler.", } @@ -95,7 +95,7 @@ func (HorizontalPodAutoscalerList) SwaggerDoc() map[string]string { var map_HorizontalPodAutoscalerSpec = map[string]string{ "": "HorizontalPodAutoscalerSpec describes the desired functionality of the HorizontalPodAutoscaler.", "scaleTargetRef": "scaleTargetRef points to the target resource to scale, and is used to the pods for which metrics should be collected, as well as to actually change the replica count.", - "minReplicas": "minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod.", + "minReplicas": "minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the alpha feature gate HPAScaleToZero is enabled and at least one Object or External metric is configured. Scaling is active as long as at least one metric value is available.", "maxReplicas": "maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up. It cannot be less that minReplicas.", "metrics": "metrics contains the specifications for which to use to calculate the desired replica count (the maximum replica count across all metrics will be used). The desired replica count is calculated multiplying the ratio between the target value and the current value by the current number of pods. Ergo, metrics used must decrease as the pod count is increased, and vice-versa. See the individual metric source types for more information about how each type of metric must respond. If not set, the default metric will be set to 80% average CPU utilization.", } diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/zz_generated.deepcopy.go b/vendor/k8s.io/api/autoscaling/v2beta2/zz_generated.deepcopy.go index a6a95653..2dffa333 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta2/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/autoscaling/v2beta2/zz_generated.deepcopy.go @@ -126,7 +126,7 @@ func (in *HorizontalPodAutoscalerCondition) DeepCopy() *HorizontalPodAutoscalerC func (in *HorizontalPodAutoscalerList) DeepCopyInto(out *HorizontalPodAutoscalerList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]HorizontalPodAutoscaler, len(*in)) diff --git a/vendor/k8s.io/api/batch/v1/doc.go b/vendor/k8s.io/api/batch/v1/doc.go index 04491807..c4a8db6e 100644 --- a/vendor/k8s.io/api/batch/v1/doc.go +++ b/vendor/k8s.io/api/batch/v1/doc.go @@ -15,6 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +k8s:openapi-gen=true package v1 // import "k8s.io/api/batch/v1" diff --git a/vendor/k8s.io/api/batch/v1/generated.pb.go b/vendor/k8s.io/api/batch/v1/generated.pb.go index 097a6ff2..fb9d21e1 100644 --- a/vendor/k8s.io/api/batch/v1/generated.pb.go +++ b/vendor/k8s.io/api/batch/v1/generated.pb.go @@ -14,36 +14,25 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/batch/v1/generated.proto -// DO NOT EDIT! -/* - Package v1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/batch/v1/generated.proto - - It has these top-level messages: - Job - JobCondition - JobList - JobSpec - JobStatus -*/ package v1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import k8s_io_api_core_v1 "k8s.io/api/core/v1" -import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + io "io" -import strings "strings" -import reflect "reflect" + proto "github.com/gogo/protobuf/proto" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" -import io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -56,25 +45,145 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *Job) Reset() { *m = Job{} } -func (*Job) ProtoMessage() {} -func (*Job) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *Job) Reset() { *m = Job{} } +func (*Job) ProtoMessage() {} +func (*Job) Descriptor() ([]byte, []int) { + return fileDescriptor_3b52da57c93de713, []int{0} +} +func (m *Job) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Job) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Job) XXX_Merge(src proto.Message) { + xxx_messageInfo_Job.Merge(m, src) +} +func (m *Job) XXX_Size() int { + return m.Size() +} +func (m *Job) XXX_DiscardUnknown() { + xxx_messageInfo_Job.DiscardUnknown(m) +} + +var xxx_messageInfo_Job proto.InternalMessageInfo + +func (m *JobCondition) Reset() { *m = JobCondition{} } +func (*JobCondition) ProtoMessage() {} +func (*JobCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_3b52da57c93de713, []int{1} +} +func (m *JobCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *JobCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *JobCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_JobCondition.Merge(m, src) +} +func (m *JobCondition) XXX_Size() int { + return m.Size() +} +func (m *JobCondition) XXX_DiscardUnknown() { + xxx_messageInfo_JobCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_JobCondition proto.InternalMessageInfo + +func (m *JobList) Reset() { *m = JobList{} } +func (*JobList) ProtoMessage() {} +func (*JobList) Descriptor() ([]byte, []int) { + return fileDescriptor_3b52da57c93de713, []int{2} +} +func (m *JobList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *JobList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *JobList) XXX_Merge(src proto.Message) { + xxx_messageInfo_JobList.Merge(m, src) +} +func (m *JobList) XXX_Size() int { + return m.Size() +} +func (m *JobList) XXX_DiscardUnknown() { + xxx_messageInfo_JobList.DiscardUnknown(m) +} + +var xxx_messageInfo_JobList proto.InternalMessageInfo -func (m *JobCondition) Reset() { *m = JobCondition{} } -func (*JobCondition) ProtoMessage() {} -func (*JobCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +func (m *JobSpec) Reset() { *m = JobSpec{} } +func (*JobSpec) ProtoMessage() {} +func (*JobSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_3b52da57c93de713, []int{3} +} +func (m *JobSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *JobSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *JobSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_JobSpec.Merge(m, src) +} +func (m *JobSpec) XXX_Size() int { + return m.Size() +} +func (m *JobSpec) XXX_DiscardUnknown() { + xxx_messageInfo_JobSpec.DiscardUnknown(m) +} -func (m *JobList) Reset() { *m = JobList{} } -func (*JobList) ProtoMessage() {} -func (*JobList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +var xxx_messageInfo_JobSpec proto.InternalMessageInfo -func (m *JobSpec) Reset() { *m = JobSpec{} } -func (*JobSpec) ProtoMessage() {} -func (*JobSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +func (m *JobStatus) Reset() { *m = JobStatus{} } +func (*JobStatus) ProtoMessage() {} +func (*JobStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_3b52da57c93de713, []int{4} +} +func (m *JobStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *JobStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *JobStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_JobStatus.Merge(m, src) +} +func (m *JobStatus) XXX_Size() int { + return m.Size() +} +func (m *JobStatus) XXX_DiscardUnknown() { + xxx_messageInfo_JobStatus.DiscardUnknown(m) +} -func (m *JobStatus) Reset() { *m = JobStatus{} } -func (*JobStatus) ProtoMessage() {} -func (*JobStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +var xxx_messageInfo_JobStatus proto.InternalMessageInfo func init() { proto.RegisterType((*Job)(nil), "k8s.io.api.batch.v1.Job") @@ -83,10 +192,78 @@ func init() { proto.RegisterType((*JobSpec)(nil), "k8s.io.api.batch.v1.JobSpec") proto.RegisterType((*JobStatus)(nil), "k8s.io.api.batch.v1.JobStatus") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/batch/v1/generated.proto", fileDescriptor_3b52da57c93de713) +} + +var fileDescriptor_3b52da57c93de713 = []byte{ + // 929 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x54, 0x5d, 0x6f, 0xe3, 0x44, + 0x14, 0xad, 0x9b, 0xa6, 0x4d, 0xa6, 0x1f, 0x5b, 0x06, 0x55, 0x1b, 0x0a, 0xb2, 0x97, 0x20, 0xa1, + 0x82, 0x84, 0x4d, 0x4b, 0x85, 0x10, 0x02, 0xa4, 0x75, 0x51, 0x25, 0xaa, 0x54, 0x5b, 0x26, 0x59, + 0x21, 0x21, 0x90, 0x18, 0xdb, 0x37, 0x89, 0x89, 0xed, 0xb1, 0x3c, 0x93, 0x48, 0x7d, 0xe3, 0x27, + 0xf0, 0x23, 0x10, 0x7f, 0x82, 0x77, 0xd4, 0xc7, 0x7d, 0xdc, 0x27, 0x8b, 0x9a, 0x1f, 0xc0, 0xfb, + 0x3e, 0xa1, 0x19, 0x3b, 0xb6, 0xd3, 0x26, 0xa2, 0xcb, 0x5b, 0xe6, 0xcc, 0x39, 0xe7, 0x5e, 0xcf, + 0x3d, 0xb9, 0xe8, 0x8b, 0xc9, 0x67, 0xdc, 0xf4, 0x99, 0x35, 0x99, 0x3a, 0x90, 0x44, 0x20, 0x80, + 0x5b, 0x33, 0x88, 0x3c, 0x96, 0x58, 0xc5, 0x05, 0x8d, 0x7d, 0xcb, 0xa1, 0xc2, 0x1d, 0x5b, 0xb3, + 0x63, 0x6b, 0x04, 0x11, 0x24, 0x54, 0x80, 0x67, 0xc6, 0x09, 0x13, 0x0c, 0xbf, 0x99, 0x93, 0x4c, + 0x1a, 0xfb, 0xa6, 0x22, 0x99, 0xb3, 0xe3, 0xc3, 0x8f, 0x46, 0xbe, 0x18, 0x4f, 0x1d, 0xd3, 0x65, + 0xa1, 0x35, 0x62, 0x23, 0x66, 0x29, 0xae, 0x33, 0x1d, 0xaa, 0x93, 0x3a, 0xa8, 0x5f, 0xb9, 0xc7, + 0x61, 0xb7, 0x56, 0xc8, 0x65, 0x09, 0x2c, 0xa9, 0x73, 0x78, 0x5a, 0x71, 0x42, 0xea, 0x8e, 0xfd, + 0x08, 0x92, 0x6b, 0x2b, 0x9e, 0x8c, 0x24, 0xc0, 0xad, 0x10, 0x04, 0x5d, 0xa6, 0xb2, 0x56, 0xa9, + 0x92, 0x69, 0x24, 0xfc, 0x10, 0xee, 0x09, 0x3e, 0xfd, 0x2f, 0x01, 0x77, 0xc7, 0x10, 0xd2, 0xbb, + 0xba, 0xee, 0x3f, 0x1a, 0x6a, 0x5c, 0x30, 0x07, 0xff, 0x84, 0x5a, 0xb2, 0x17, 0x8f, 0x0a, 0xda, + 0xd1, 0x9e, 0x68, 0x47, 0xdb, 0x27, 0x1f, 0x9b, 0xd5, 0x0b, 0x95, 0x96, 0x66, 0x3c, 0x19, 0x49, + 0x80, 0x9b, 0x92, 0x6d, 0xce, 0x8e, 0xcd, 0x67, 0xce, 0xcf, 0xe0, 0x8a, 0x4b, 0x10, 0xd4, 0xc6, + 0x37, 0xa9, 0xb1, 0x96, 0xa5, 0x06, 0xaa, 0x30, 0x52, 0xba, 0xe2, 0xaf, 0xd0, 0x06, 0x8f, 0xc1, + 0xed, 0xac, 0x2b, 0xf7, 0x77, 0xcc, 0x25, 0xef, 0x6f, 0x5e, 0x30, 0xa7, 0x1f, 0x83, 0x6b, 0xef, + 0x14, 0x4e, 0x1b, 0xf2, 0x44, 0x94, 0x0e, 0x9f, 0xa3, 0x4d, 0x2e, 0xa8, 0x98, 0xf2, 0x4e, 0x43, + 0x39, 0xe8, 0x2b, 0x1d, 0x14, 0xcb, 0xde, 0x2b, 0x3c, 0x36, 0xf3, 0x33, 0x29, 0xd4, 0xdd, 0x3f, + 0x1b, 0x68, 0xe7, 0x82, 0x39, 0x67, 0x2c, 0xf2, 0x7c, 0xe1, 0xb3, 0x08, 0x9f, 0xa2, 0x0d, 0x71, + 0x1d, 0x83, 0xfa, 0xec, 0xb6, 0xfd, 0x64, 0x5e, 0x7a, 0x70, 0x1d, 0xc3, 0xab, 0xd4, 0xd8, 0xaf, + 0x73, 0x25, 0x46, 0x14, 0x1b, 0xf7, 0xca, 0x76, 0xd6, 0x95, 0xee, 0x74, 0xb1, 0xdc, 0xab, 0xd4, + 0x58, 0x92, 0x0e, 0xb3, 0x74, 0x5a, 0x6c, 0x0a, 0x8f, 0xd0, 0x6e, 0x40, 0xb9, 0xb8, 0x4a, 0x98, + 0x03, 0x03, 0x3f, 0x84, 0xe2, 0x1b, 0x3f, 0x7c, 0xd8, 0x0c, 0xa4, 0xc2, 0x3e, 0x28, 0x1a, 0xd8, + 0xed, 0xd5, 0x8d, 0xc8, 0xa2, 0x2f, 0x9e, 0x21, 0x2c, 0x81, 0x41, 0x42, 0x23, 0x9e, 0x7f, 0x92, + 0xac, 0xb6, 0xf1, 0xda, 0xd5, 0x0e, 0x8b, 0x6a, 0xb8, 0x77, 0xcf, 0x8d, 0x2c, 0xa9, 0x80, 0xdf, + 0x47, 0x9b, 0x09, 0x50, 0xce, 0xa2, 0x4e, 0x53, 0x3d, 0x57, 0x39, 0x1d, 0xa2, 0x50, 0x52, 0xdc, + 0xe2, 0x0f, 0xd0, 0x56, 0x08, 0x9c, 0xd3, 0x11, 0x74, 0x36, 0x15, 0xf1, 0x51, 0x41, 0xdc, 0xba, + 0xcc, 0x61, 0x32, 0xbf, 0xef, 0xfe, 0xae, 0xa1, 0xad, 0x0b, 0xe6, 0xf4, 0x7c, 0x2e, 0xf0, 0x0f, + 0xf7, 0xe2, 0x6b, 0x3e, 0xec, 0x63, 0xa4, 0x5a, 0x85, 0x77, 0xbf, 0xa8, 0xd3, 0x9a, 0x23, 0xb5, + 0xe8, 0x7e, 0x89, 0x9a, 0xbe, 0x80, 0x50, 0x8e, 0xba, 0x71, 0xb4, 0x7d, 0xd2, 0x59, 0x95, 0x3c, + 0x7b, 0xb7, 0x30, 0x69, 0x7e, 0x23, 0xe9, 0x24, 0x57, 0x75, 0xff, 0xd8, 0x50, 0x8d, 0xca, 0x2c, + 0xe3, 0x63, 0xb4, 0x1d, 0xd3, 0x84, 0x06, 0x01, 0x04, 0x3e, 0x0f, 0x55, 0xaf, 0x4d, 0xfb, 0x51, + 0x96, 0x1a, 0xdb, 0x57, 0x15, 0x4c, 0xea, 0x1c, 0x29, 0x71, 0x59, 0x18, 0x07, 0x20, 0x1f, 0x33, + 0x8f, 0x5b, 0x21, 0x39, 0xab, 0x60, 0x52, 0xe7, 0xe0, 0x67, 0xe8, 0x80, 0xba, 0xc2, 0x9f, 0xc1, + 0xd7, 0x40, 0xbd, 0xc0, 0x8f, 0xa0, 0x0f, 0x2e, 0x8b, 0xbc, 0xfc, 0xaf, 0xd3, 0xb0, 0xdf, 0xca, + 0x52, 0xe3, 0xe0, 0xe9, 0x32, 0x02, 0x59, 0xae, 0xc3, 0xa7, 0x68, 0xc7, 0xa1, 0xee, 0x84, 0x0d, + 0x87, 0x3d, 0x3f, 0xf4, 0x45, 0x67, 0x4b, 0x35, 0xb1, 0x9f, 0xa5, 0xc6, 0x8e, 0x5d, 0xc3, 0xc9, + 0x02, 0x0b, 0xff, 0x88, 0x5a, 0x1c, 0x02, 0x70, 0x05, 0x4b, 0x8a, 0x88, 0x7d, 0xf2, 0xc0, 0xa9, + 0x50, 0x07, 0x82, 0x7e, 0x21, 0xb5, 0x77, 0xe4, 0x58, 0xe6, 0x27, 0x52, 0x5a, 0xe2, 0xcf, 0xd1, + 0x5e, 0x48, 0xa3, 0x29, 0x2d, 0x99, 0x2a, 0x5b, 0x2d, 0x1b, 0x67, 0xa9, 0xb1, 0x77, 0xb9, 0x70, + 0x43, 0xee, 0x30, 0xf1, 0xb7, 0xa8, 0x25, 0x20, 0x8c, 0x03, 0x2a, 0xf2, 0xa0, 0x6d, 0x9f, 0xbc, + 0x57, 0x9f, 0xaa, 0xfc, 0xbf, 0xca, 0x46, 0xae, 0x98, 0x37, 0x28, 0x68, 0x6a, 0x31, 0x95, 0x29, + 0x99, 0xa3, 0xa4, 0xb4, 0xc1, 0xcf, 0xd1, 0x63, 0x21, 0x82, 0xe2, 0xc5, 0x9e, 0x0e, 0x05, 0x24, + 0xe7, 0x7e, 0xe4, 0xf3, 0x31, 0x78, 0x9d, 0x96, 0x7a, 0xae, 0xb7, 0xb3, 0xd4, 0x78, 0x3c, 0x18, + 0xf4, 0x96, 0x51, 0xc8, 0x2a, 0x6d, 0xf7, 0xb7, 0x06, 0x6a, 0x97, 0x5b, 0x0d, 0x3f, 0x47, 0xc8, + 0x9d, 0xef, 0x10, 0xde, 0xd1, 0x54, 0x1e, 0xdf, 0x5d, 0x95, 0xc7, 0x72, 0xdb, 0x54, 0xab, 0xb9, + 0x84, 0x38, 0xa9, 0x19, 0xe1, 0xef, 0x50, 0x9b, 0x0b, 0x9a, 0x08, 0xb5, 0x0d, 0xd6, 0x5f, 0x7b, + 0x1b, 0xec, 0x66, 0xa9, 0xd1, 0xee, 0xcf, 0x0d, 0x48, 0xe5, 0x85, 0x87, 0x68, 0xaf, 0x0a, 0xe6, + 0xff, 0xdc, 0x6c, 0x6a, 0x9e, 0x67, 0x0b, 0x2e, 0xe4, 0x8e, 0xab, 0xdc, 0x2f, 0x79, 0x72, 0x55, + 0xd0, 0x9a, 0xd5, 0x7e, 0xc9, 0x63, 0x4e, 0x8a, 0x5b, 0x6c, 0xa1, 0x36, 0x9f, 0xba, 0x2e, 0x80, + 0x07, 0x9e, 0x8a, 0x4b, 0xd3, 0x7e, 0xa3, 0xa0, 0xb6, 0xfb, 0xf3, 0x0b, 0x52, 0x71, 0xa4, 0xf1, + 0x90, 0xfa, 0x01, 0x78, 0x2a, 0x26, 0x35, 0xe3, 0x73, 0x85, 0x92, 0xe2, 0xd6, 0x3e, 0xba, 0xb9, + 0xd5, 0xd7, 0x5e, 0xdc, 0xea, 0x6b, 0x2f, 0x6f, 0xf5, 0xb5, 0x5f, 0x32, 0x5d, 0xbb, 0xc9, 0x74, + 0xed, 0x45, 0xa6, 0x6b, 0x2f, 0x33, 0x5d, 0xfb, 0x2b, 0xd3, 0xb5, 0x5f, 0xff, 0xd6, 0xd7, 0xbe, + 0x5f, 0x9f, 0x1d, 0xff, 0x1b, 0x00, 0x00, 0xff, 0xff, 0xc8, 0x73, 0xe7, 0x7a, 0xb8, 0x08, 0x00, + 0x00, +} + func (m *Job) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -94,41 +271,52 @@ func (m *Job) Marshal() (dAtA []byte, err error) { } func (m *Job) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Job) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n2 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n3, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n3 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *JobCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -136,49 +324,62 @@ func (m *JobCondition) Marshal() (dAtA []byte, err error) { } func (m *JobCondition) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *JobCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastProbeTime.Size())) - n4, err := m.LastProbeTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x32 + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x2a + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n4 + i-- dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n5, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.LastProbeTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n5 - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *JobList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -186,37 +387,46 @@ func (m *JobList) Marshal() (dAtA []byte, err error) { } func (m *JobList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *JobList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n6, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *JobSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -224,70 +434,79 @@ func (m *JobSpec) Marshal() (dAtA []byte, err error) { } func (m *JobSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *JobSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Parallelism != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Parallelism)) - } - if m.Completions != nil { - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Completions)) + if m.TTLSecondsAfterFinished != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TTLSecondsAfterFinished)) + i-- + dAtA[i] = 0x40 } - if m.ActiveDeadlineSeconds != nil { - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.ActiveDeadlineSeconds)) + if m.BackoffLimit != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.BackoffLimit)) + i-- + dAtA[i] = 0x38 } - if m.Selector != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n7, err := m.Selector.MarshalTo(dAtA[i:]) + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n7 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x32 if m.ManualSelector != nil { - dAtA[i] = 0x28 - i++ + i-- if *m.ManualSelector { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ + i-- + dAtA[i] = 0x28 } - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n8, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 } - i += n8 - if m.BackoffLimit != nil { - dAtA[i] = 0x38 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.BackoffLimit)) + if m.ActiveDeadlineSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.ActiveDeadlineSeconds)) + i-- + dAtA[i] = 0x18 } - if m.TTLSecondsAfterFinished != nil { - dAtA[i] = 0x40 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.TTLSecondsAfterFinished)) + if m.Completions != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Completions)) + i-- + dAtA[i] = 0x10 + } + if m.Parallelism != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Parallelism)) + i-- + dAtA[i] = 0x8 } - return i, nil + return len(dAtA) - i, nil } func (m *JobStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -295,82 +514,80 @@ func (m *JobStatus) Marshal() (dAtA []byte, err error) { } func (m *JobStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *JobStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Failed)) + i-- + dAtA[i] = 0x30 + i = encodeVarintGenerated(dAtA, i, uint64(m.Succeeded)) + i-- + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.Active)) + i-- + dAtA[i] = 0x20 + if m.CompletionTime != nil { + { + size, err := m.CompletionTime.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x1a } if m.StartTime != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.StartTime.Size())) - n9, err := m.StartTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.StartTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n9 + i-- + dAtA[i] = 0x12 } - if m.CompletionTime != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CompletionTime.Size())) - n10, err := m.CompletionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } - i += n10 } - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Active)) - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Succeeded)) - dAtA[i] = 0x30 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Failed)) - return i, nil + return len(dAtA) - i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *Job) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -383,6 +600,9 @@ func (m *Job) Size() (n int) { } func (m *JobCondition) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -401,6 +621,9 @@ func (m *JobCondition) Size() (n int) { } func (m *JobList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -415,6 +638,9 @@ func (m *JobList) Size() (n int) { } func (m *JobSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Parallelism != nil { @@ -445,6 +671,9 @@ func (m *JobSpec) Size() (n int) { } func (m *JobStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Conditions) > 0 { @@ -468,14 +697,7 @@ func (m *JobStatus) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -485,7 +707,7 @@ func (this *Job) String() string { return "nil" } s := strings.Join([]string{`&Job{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "JobSpec", "JobSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "JobStatus", "JobStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -499,8 +721,8 @@ func (this *JobCondition) String() string { s := strings.Join([]string{`&JobCondition{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastProbeTime:` + strings.Replace(strings.Replace(this.LastProbeTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastProbeTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastProbeTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `}`, @@ -511,9 +733,14 @@ func (this *JobList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]Job{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Job", "Job", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&JobList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Job", "Job", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -526,9 +753,9 @@ func (this *JobSpec) String() string { `Parallelism:` + valueToStringGenerated(this.Parallelism) + `,`, `Completions:` + valueToStringGenerated(this.Completions) + `,`, `ActiveDeadlineSeconds:` + valueToStringGenerated(this.ActiveDeadlineSeconds) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, `ManualSelector:` + valueToStringGenerated(this.ManualSelector) + `,`, - `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, `BackoffLimit:` + valueToStringGenerated(this.BackoffLimit) + `,`, `TTLSecondsAfterFinished:` + valueToStringGenerated(this.TTLSecondsAfterFinished) + `,`, `}`, @@ -539,10 +766,15 @@ func (this *JobStatus) String() string { if this == nil { return "nil" } + repeatedStringForConditions := "[]JobCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "JobCondition", "JobCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" s := strings.Join([]string{`&JobStatus{`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "JobCondition", "JobCondition", 1), `&`, ``, 1) + `,`, - `StartTime:` + strings.Replace(fmt.Sprintf("%v", this.StartTime), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, - `CompletionTime:` + strings.Replace(fmt.Sprintf("%v", this.CompletionTime), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `StartTime:` + strings.Replace(fmt.Sprintf("%v", this.StartTime), "Time", "v1.Time", 1) + `,`, + `CompletionTime:` + strings.Replace(fmt.Sprintf("%v", this.CompletionTime), "Time", "v1.Time", 1) + `,`, `Active:` + fmt.Sprintf("%v", this.Active) + `,`, `Succeeded:` + fmt.Sprintf("%v", this.Succeeded) + `,`, `Failed:` + fmt.Sprintf("%v", this.Failed) + `,`, @@ -573,7 +805,7 @@ func (m *Job) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -601,7 +833,7 @@ func (m *Job) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -610,6 +842,9 @@ func (m *Job) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -631,7 +866,7 @@ func (m *Job) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -640,6 +875,9 @@ func (m *Job) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -661,7 +899,7 @@ func (m *Job) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -670,6 +908,9 @@ func (m *Job) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -686,6 +927,9 @@ func (m *Job) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -713,7 +957,7 @@ func (m *JobCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -741,7 +985,7 @@ func (m *JobCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -751,6 +995,9 @@ func (m *JobCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -770,7 +1017,7 @@ func (m *JobCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -780,6 +1027,9 @@ func (m *JobCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -799,7 +1049,7 @@ func (m *JobCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -808,6 +1058,9 @@ func (m *JobCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -829,7 +1082,7 @@ func (m *JobCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -838,6 +1091,9 @@ func (m *JobCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -859,7 +1115,7 @@ func (m *JobCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -869,6 +1125,9 @@ func (m *JobCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -888,7 +1147,7 @@ func (m *JobCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -898,6 +1157,9 @@ func (m *JobCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -912,6 +1174,9 @@ func (m *JobCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -939,7 +1204,7 @@ func (m *JobList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -967,7 +1232,7 @@ func (m *JobList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -976,6 +1241,9 @@ func (m *JobList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -997,7 +1265,7 @@ func (m *JobList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1006,6 +1274,9 @@ func (m *JobList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1023,6 +1294,9 @@ func (m *JobList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1050,7 +1324,7 @@ func (m *JobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1078,7 +1352,7 @@ func (m *JobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -1098,7 +1372,7 @@ func (m *JobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -1118,7 +1392,7 @@ func (m *JobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -1138,7 +1412,7 @@ func (m *JobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1147,11 +1421,14 @@ func (m *JobSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1171,7 +1448,7 @@ func (m *JobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1192,7 +1469,7 @@ func (m *JobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1201,6 +1478,9 @@ func (m *JobSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1222,7 +1502,7 @@ func (m *JobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -1242,7 +1522,7 @@ func (m *JobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -1257,6 +1537,9 @@ func (m *JobSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1284,7 +1567,7 @@ func (m *JobStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1312,7 +1595,7 @@ func (m *JobStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1321,6 +1604,9 @@ func (m *JobStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1343,7 +1629,7 @@ func (m *JobStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1352,11 +1638,14 @@ func (m *JobStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.StartTime == nil { - m.StartTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} + m.StartTime = &v1.Time{} } if err := m.StartTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1376,7 +1665,7 @@ func (m *JobStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1385,11 +1674,14 @@ func (m *JobStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.CompletionTime == nil { - m.CompletionTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} + m.CompletionTime = &v1.Time{} } if err := m.CompletionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1409,7 +1701,7 @@ func (m *JobStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Active |= (int32(b) & 0x7F) << shift + m.Active |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -1428,7 +1720,7 @@ func (m *JobStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Succeeded |= (int32(b) & 0x7F) << shift + m.Succeeded |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -1447,7 +1739,7 @@ func (m *JobStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Failed |= (int32(b) & 0x7F) << shift + m.Failed |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -1461,6 +1753,9 @@ func (m *JobStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1527,10 +1822,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -1559,6 +1857,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -1577,70 +1878,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/batch/v1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 929 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x54, 0x5d, 0x6f, 0xe3, 0x44, - 0x14, 0xad, 0x9b, 0xa6, 0x4d, 0xa6, 0x1f, 0x5b, 0x06, 0x55, 0x1b, 0x0a, 0xb2, 0x97, 0x20, 0xa1, - 0x82, 0x84, 0x4d, 0x4b, 0x85, 0x10, 0x02, 0xa4, 0x75, 0x51, 0x25, 0xaa, 0x54, 0x5b, 0x26, 0x59, - 0x21, 0x21, 0x90, 0x18, 0xdb, 0x37, 0x89, 0x89, 0xed, 0xb1, 0x3c, 0x93, 0x48, 0x7d, 0xe3, 0x27, - 0xf0, 0x23, 0x10, 0x7f, 0x82, 0x77, 0xd4, 0xc7, 0x7d, 0xdc, 0x27, 0x8b, 0x9a, 0x1f, 0xc0, 0xfb, - 0x3e, 0xa1, 0x19, 0x3b, 0xb6, 0xd3, 0x26, 0xa2, 0xcb, 0x5b, 0xe6, 0xcc, 0x39, 0xe7, 0x5e, 0xdf, - 0x39, 0xb9, 0xe8, 0x8b, 0xc9, 0x67, 0xdc, 0xf4, 0x99, 0x35, 0x99, 0x3a, 0x90, 0x44, 0x20, 0x80, - 0x5b, 0x33, 0x88, 0x3c, 0x96, 0x58, 0xc5, 0x05, 0x8d, 0x7d, 0xcb, 0xa1, 0xc2, 0x1d, 0x5b, 0xb3, - 0x63, 0x6b, 0x04, 0x11, 0x24, 0x54, 0x80, 0x67, 0xc6, 0x09, 0x13, 0x0c, 0xbf, 0x99, 0x93, 0x4c, - 0x1a, 0xfb, 0xa6, 0x22, 0x99, 0xb3, 0xe3, 0xc3, 0x8f, 0x46, 0xbe, 0x18, 0x4f, 0x1d, 0xd3, 0x65, - 0xa1, 0x35, 0x62, 0x23, 0x66, 0x29, 0xae, 0x33, 0x1d, 0xaa, 0x93, 0x3a, 0xa8, 0x5f, 0xb9, 0xc7, - 0x61, 0xb7, 0x56, 0xc8, 0x65, 0x09, 0x2c, 0xa9, 0x73, 0x78, 0x5a, 0x71, 0x42, 0xea, 0x8e, 0xfd, - 0x08, 0x92, 0x6b, 0x2b, 0x9e, 0x8c, 0x24, 0xc0, 0xad, 0x10, 0x04, 0x5d, 0xa6, 0xb2, 0x56, 0xa9, - 0x92, 0x69, 0x24, 0xfc, 0x10, 0xee, 0x09, 0x3e, 0xfd, 0x2f, 0x01, 0x77, 0xc7, 0x10, 0xd2, 0xbb, - 0xba, 0xee, 0x3f, 0x1a, 0x6a, 0x5c, 0x30, 0x07, 0xff, 0x84, 0x5a, 0xb2, 0x17, 0x8f, 0x0a, 0xda, - 0xd1, 0x9e, 0x68, 0x47, 0xdb, 0x27, 0x1f, 0x9b, 0xd5, 0x84, 0x4a, 0x4b, 0x33, 0x9e, 0x8c, 0x24, - 0xc0, 0x4d, 0xc9, 0x36, 0x67, 0xc7, 0xe6, 0x33, 0xe7, 0x67, 0x70, 0xc5, 0x25, 0x08, 0x6a, 0xe3, - 0x9b, 0xd4, 0x58, 0xcb, 0x52, 0x03, 0x55, 0x18, 0x29, 0x5d, 0xf1, 0x57, 0x68, 0x83, 0xc7, 0xe0, - 0x76, 0xd6, 0x95, 0xfb, 0x3b, 0xe6, 0x92, 0xf9, 0x9b, 0x17, 0xcc, 0xe9, 0xc7, 0xe0, 0xda, 0x3b, - 0x85, 0xd3, 0x86, 0x3c, 0x11, 0xa5, 0xc3, 0xe7, 0x68, 0x93, 0x0b, 0x2a, 0xa6, 0xbc, 0xd3, 0x50, - 0x0e, 0xfa, 0x4a, 0x07, 0xc5, 0xb2, 0xf7, 0x0a, 0x8f, 0xcd, 0xfc, 0x4c, 0x0a, 0x75, 0xf7, 0xcf, - 0x06, 0xda, 0xb9, 0x60, 0xce, 0x19, 0x8b, 0x3c, 0x5f, 0xf8, 0x2c, 0xc2, 0xa7, 0x68, 0x43, 0x5c, - 0xc7, 0xa0, 0x3e, 0xbb, 0x6d, 0x3f, 0x99, 0x97, 0x1e, 0x5c, 0xc7, 0xf0, 0x2a, 0x35, 0xf6, 0xeb, - 0x5c, 0x89, 0x11, 0xc5, 0xc6, 0xbd, 0xb2, 0x9d, 0x75, 0xa5, 0x3b, 0x5d, 0x2c, 0xf7, 0x2a, 0x35, - 0x96, 0xa4, 0xc3, 0x2c, 0x9d, 0x16, 0x9b, 0xc2, 0x23, 0xb4, 0x1b, 0x50, 0x2e, 0xae, 0x12, 0xe6, - 0xc0, 0xc0, 0x0f, 0xa1, 0xf8, 0xc6, 0x0f, 0x1f, 0xf6, 0x06, 0x52, 0x61, 0x1f, 0x14, 0x0d, 0xec, - 0xf6, 0xea, 0x46, 0x64, 0xd1, 0x17, 0xcf, 0x10, 0x96, 0xc0, 0x20, 0xa1, 0x11, 0xcf, 0x3f, 0x49, - 0x56, 0xdb, 0x78, 0xed, 0x6a, 0x87, 0x45, 0x35, 0xdc, 0xbb, 0xe7, 0x46, 0x96, 0x54, 0xc0, 0xef, - 0xa3, 0xcd, 0x04, 0x28, 0x67, 0x51, 0xa7, 0xa9, 0xc6, 0x55, 0xbe, 0x0e, 0x51, 0x28, 0x29, 0x6e, - 0xf1, 0x07, 0x68, 0x2b, 0x04, 0xce, 0xe9, 0x08, 0x3a, 0x9b, 0x8a, 0xf8, 0xa8, 0x20, 0x6e, 0x5d, - 0xe6, 0x30, 0x99, 0xdf, 0x77, 0x7f, 0xd7, 0xd0, 0xd6, 0x05, 0x73, 0x7a, 0x3e, 0x17, 0xf8, 0x87, - 0x7b, 0xf1, 0x35, 0x1f, 0xf6, 0x31, 0x52, 0xad, 0xc2, 0xbb, 0x5f, 0xd4, 0x69, 0xcd, 0x91, 0x5a, - 0x74, 0xbf, 0x44, 0x4d, 0x5f, 0x40, 0x28, 0x9f, 0xba, 0x71, 0xb4, 0x7d, 0xd2, 0x59, 0x95, 0x3c, - 0x7b, 0xb7, 0x30, 0x69, 0x7e, 0x23, 0xe9, 0x24, 0x57, 0x75, 0xff, 0xd8, 0x50, 0x8d, 0xca, 0x2c, - 0xe3, 0x63, 0xb4, 0x1d, 0xd3, 0x84, 0x06, 0x01, 0x04, 0x3e, 0x0f, 0x55, 0xaf, 0x4d, 0xfb, 0x51, - 0x96, 0x1a, 0xdb, 0x57, 0x15, 0x4c, 0xea, 0x1c, 0x29, 0x71, 0x59, 0x18, 0x07, 0x20, 0x87, 0x99, - 0xc7, 0xad, 0x90, 0x9c, 0x55, 0x30, 0xa9, 0x73, 0xf0, 0x33, 0x74, 0x40, 0x5d, 0xe1, 0xcf, 0xe0, - 0x6b, 0xa0, 0x5e, 0xe0, 0x47, 0xd0, 0x07, 0x97, 0x45, 0x5e, 0xfe, 0xd7, 0x69, 0xd8, 0x6f, 0x65, - 0xa9, 0x71, 0xf0, 0x74, 0x19, 0x81, 0x2c, 0xd7, 0xe1, 0x1f, 0x51, 0x8b, 0x43, 0x00, 0xae, 0x60, - 0x49, 0x11, 0x96, 0x4f, 0x1e, 0x38, 0x5f, 0xea, 0x40, 0xd0, 0x2f, 0xa4, 0xf6, 0x8e, 0x1c, 0xf0, - 0xfc, 0x44, 0x4a, 0x4b, 0xfc, 0x39, 0xda, 0x0b, 0x69, 0x34, 0xa5, 0x25, 0x53, 0xa5, 0xa4, 0x65, - 0xe3, 0x2c, 0x35, 0xf6, 0x2e, 0x17, 0x6e, 0xc8, 0x1d, 0x26, 0xfe, 0x16, 0xb5, 0x04, 0x84, 0x71, - 0x40, 0x45, 0x1e, 0x99, 0xed, 0x93, 0xf7, 0xea, 0xef, 0x23, 0xff, 0x79, 0xb2, 0x91, 0x2b, 0xe6, - 0x0d, 0x0a, 0x9a, 0x5a, 0x31, 0xe5, 0x7b, 0xcf, 0x51, 0x52, 0xda, 0xe0, 0x53, 0xb4, 0xe3, 0x50, - 0x77, 0xc2, 0x86, 0xc3, 0x9e, 0x1f, 0xfa, 0xa2, 0xb3, 0xa5, 0x46, 0xbe, 0x9f, 0xa5, 0xc6, 0x8e, - 0x5d, 0xc3, 0xc9, 0x02, 0x0b, 0x3f, 0x47, 0x8f, 0x85, 0x08, 0x8a, 0x89, 0x3d, 0x1d, 0x0a, 0x48, - 0xce, 0xfd, 0xc8, 0xe7, 0x63, 0xf0, 0x3a, 0x2d, 0x65, 0xf0, 0x76, 0x96, 0x1a, 0x8f, 0x07, 0x83, - 0xde, 0x32, 0x0a, 0x59, 0xa5, 0xed, 0xfe, 0xd6, 0x40, 0xed, 0x72, 0xab, 0xe1, 0xe7, 0x08, 0xb9, - 0xf3, 0x1d, 0xc2, 0x3b, 0x9a, 0xca, 0xe3, 0xbb, 0xab, 0xf2, 0x58, 0x6e, 0x9b, 0x6a, 0x35, 0x97, - 0x10, 0x27, 0x35, 0x23, 0xfc, 0x1d, 0x6a, 0x73, 0x41, 0x13, 0xa1, 0xb6, 0xc1, 0xfa, 0x6b, 0x6f, - 0x83, 0xdd, 0x2c, 0x35, 0xda, 0xfd, 0xb9, 0x01, 0xa9, 0xbc, 0xf0, 0x10, 0xed, 0x55, 0xc1, 0xfc, - 0x9f, 0x9b, 0x4d, 0xa5, 0xe0, 0x6c, 0xc1, 0x85, 0xdc, 0x71, 0x95, 0xfb, 0x25, 0x4f, 0xae, 0x8a, - 0x67, 0xb3, 0xda, 0x2f, 0x79, 0xcc, 0x49, 0x71, 0x8b, 0x2d, 0xd4, 0xe6, 0x53, 0xd7, 0x05, 0xf0, - 0xc0, 0x53, 0x21, 0x6b, 0xda, 0x6f, 0x14, 0xd4, 0x76, 0x7f, 0x7e, 0x41, 0x2a, 0x8e, 0x34, 0x1e, - 0x52, 0x3f, 0x00, 0x4f, 0x85, 0xab, 0x66, 0x7c, 0xae, 0x50, 0x52, 0xdc, 0xda, 0x47, 0x37, 0xb7, - 0xfa, 0xda, 0x8b, 0x5b, 0x7d, 0xed, 0xe5, 0xad, 0xbe, 0xf6, 0x4b, 0xa6, 0x6b, 0x37, 0x99, 0xae, - 0xbd, 0xc8, 0x74, 0xed, 0x65, 0xa6, 0x6b, 0x7f, 0x65, 0xba, 0xf6, 0xeb, 0xdf, 0xfa, 0xda, 0xf7, - 0xeb, 0xb3, 0xe3, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x13, 0xdb, 0x98, 0xf9, 0xb8, 0x08, 0x00, - 0x00, -} diff --git a/vendor/k8s.io/api/batch/v1/generated.proto b/vendor/k8s.io/api/batch/v1/generated.proto index 039149da..75de45f1 100644 --- a/vendor/k8s.io/api/batch/v1/generated.proto +++ b/vendor/k8s.io/api/batch/v1/generated.proto @@ -32,17 +32,17 @@ option go_package = "v1"; // Job represents the configuration of a single job. message Job { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of a job. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional JobSpec spec = 2; // Current status of a job. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional JobStatus status = 3; } @@ -75,7 +75,7 @@ message JobCondition { // JobList is a collection of jobs. message JobList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; diff --git a/vendor/k8s.io/api/batch/v1/types.go b/vendor/k8s.io/api/batch/v1/types.go index 8dad9043..646a3cd7 100644 --- a/vendor/k8s.io/api/batch/v1/types.go +++ b/vendor/k8s.io/api/batch/v1/types.go @@ -28,17 +28,17 @@ import ( type Job struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Specification of the desired behavior of a job. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec JobSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` // Current status of a job. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Status JobStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -49,7 +49,7 @@ type Job struct { type JobList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` diff --git a/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go index d8e2bdd7..0120e07d 100644 --- a/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go @@ -29,9 +29,9 @@ package v1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_Job = map[string]string{ "": "Job represents the configuration of a single job.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Specification of the desired behavior of a job. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - "status": "Current status of a job. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Specification of the desired behavior of a job. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "Current status of a job. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (Job) SwaggerDoc() map[string]string { @@ -54,7 +54,7 @@ func (JobCondition) SwaggerDoc() map[string]string { var map_JobList = map[string]string{ "": "JobList is a collection of jobs.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "items is the list of Jobs.", } diff --git a/vendor/k8s.io/api/batch/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/batch/v1/zz_generated.deepcopy.go index 88cb0167..beba55ac 100644 --- a/vendor/k8s.io/api/batch/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/batch/v1/zz_generated.deepcopy.go @@ -75,7 +75,7 @@ func (in *JobCondition) DeepCopy() *JobCondition { func (in *JobList) DeepCopyInto(out *JobList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Job, len(*in)) diff --git a/vendor/k8s.io/api/batch/v1beta1/doc.go b/vendor/k8s.io/api/batch/v1beta1/doc.go index 43020ed0..258ff028 100644 --- a/vendor/k8s.io/api/batch/v1beta1/doc.go +++ b/vendor/k8s.io/api/batch/v1beta1/doc.go @@ -15,6 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +k8s:openapi-gen=true package v1beta1 // import "k8s.io/api/batch/v1beta1" diff --git a/vendor/k8s.io/api/batch/v1beta1/generated.pb.go b/vendor/k8s.io/api/batch/v1beta1/generated.pb.go index ece2204f..837a2f9c 100644 --- a/vendor/k8s.io/api/batch/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/batch/v1beta1/generated.pb.go @@ -14,37 +14,25 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/batch/v1beta1/generated.proto -// DO NOT EDIT! -/* - Package v1beta1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/batch/v1beta1/generated.proto - - It has these top-level messages: - CronJob - CronJobList - CronJobSpec - CronJobStatus - JobTemplate - JobTemplateSpec -*/ package v1beta1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import k8s_io_api_core_v1 "k8s.io/api/core/v1" -import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + io "io" -import strings "strings" -import reflect "reflect" + proto "github.com/gogo/protobuf/proto" + v11 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" -import io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -57,29 +45,173 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *CronJob) Reset() { *m = CronJob{} } -func (*CronJob) ProtoMessage() {} -func (*CronJob) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *CronJob) Reset() { *m = CronJob{} } +func (*CronJob) ProtoMessage() {} +func (*CronJob) Descriptor() ([]byte, []int) { + return fileDescriptor_e57b277b05179ae7, []int{0} +} +func (m *CronJob) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CronJob) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CronJob) XXX_Merge(src proto.Message) { + xxx_messageInfo_CronJob.Merge(m, src) +} +func (m *CronJob) XXX_Size() int { + return m.Size() +} +func (m *CronJob) XXX_DiscardUnknown() { + xxx_messageInfo_CronJob.DiscardUnknown(m) +} + +var xxx_messageInfo_CronJob proto.InternalMessageInfo + +func (m *CronJobList) Reset() { *m = CronJobList{} } +func (*CronJobList) ProtoMessage() {} +func (*CronJobList) Descriptor() ([]byte, []int) { + return fileDescriptor_e57b277b05179ae7, []int{1} +} +func (m *CronJobList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CronJobList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CronJobList) XXX_Merge(src proto.Message) { + xxx_messageInfo_CronJobList.Merge(m, src) +} +func (m *CronJobList) XXX_Size() int { + return m.Size() +} +func (m *CronJobList) XXX_DiscardUnknown() { + xxx_messageInfo_CronJobList.DiscardUnknown(m) +} -func (m *CronJobList) Reset() { *m = CronJobList{} } -func (*CronJobList) ProtoMessage() {} -func (*CronJobList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +var xxx_messageInfo_CronJobList proto.InternalMessageInfo -func (m *CronJobSpec) Reset() { *m = CronJobSpec{} } -func (*CronJobSpec) ProtoMessage() {} -func (*CronJobSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +func (m *CronJobSpec) Reset() { *m = CronJobSpec{} } +func (*CronJobSpec) ProtoMessage() {} +func (*CronJobSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_e57b277b05179ae7, []int{2} +} +func (m *CronJobSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CronJobSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CronJobSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_CronJobSpec.Merge(m, src) +} +func (m *CronJobSpec) XXX_Size() int { + return m.Size() +} +func (m *CronJobSpec) XXX_DiscardUnknown() { + xxx_messageInfo_CronJobSpec.DiscardUnknown(m) +} -func (m *CronJobStatus) Reset() { *m = CronJobStatus{} } -func (*CronJobStatus) ProtoMessage() {} -func (*CronJobStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +var xxx_messageInfo_CronJobSpec proto.InternalMessageInfo -func (m *JobTemplate) Reset() { *m = JobTemplate{} } -func (*JobTemplate) ProtoMessage() {} -func (*JobTemplate) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +func (m *CronJobStatus) Reset() { *m = CronJobStatus{} } +func (*CronJobStatus) ProtoMessage() {} +func (*CronJobStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_e57b277b05179ae7, []int{3} +} +func (m *CronJobStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CronJobStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CronJobStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_CronJobStatus.Merge(m, src) +} +func (m *CronJobStatus) XXX_Size() int { + return m.Size() +} +func (m *CronJobStatus) XXX_DiscardUnknown() { + xxx_messageInfo_CronJobStatus.DiscardUnknown(m) +} -func (m *JobTemplateSpec) Reset() { *m = JobTemplateSpec{} } -func (*JobTemplateSpec) ProtoMessage() {} -func (*JobTemplateSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +var xxx_messageInfo_CronJobStatus proto.InternalMessageInfo + +func (m *JobTemplate) Reset() { *m = JobTemplate{} } +func (*JobTemplate) ProtoMessage() {} +func (*JobTemplate) Descriptor() ([]byte, []int) { + return fileDescriptor_e57b277b05179ae7, []int{4} +} +func (m *JobTemplate) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *JobTemplate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *JobTemplate) XXX_Merge(src proto.Message) { + xxx_messageInfo_JobTemplate.Merge(m, src) +} +func (m *JobTemplate) XXX_Size() int { + return m.Size() +} +func (m *JobTemplate) XXX_DiscardUnknown() { + xxx_messageInfo_JobTemplate.DiscardUnknown(m) +} + +var xxx_messageInfo_JobTemplate proto.InternalMessageInfo + +func (m *JobTemplateSpec) Reset() { *m = JobTemplateSpec{} } +func (*JobTemplateSpec) ProtoMessage() {} +func (*JobTemplateSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_e57b277b05179ae7, []int{5} +} +func (m *JobTemplateSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *JobTemplateSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *JobTemplateSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_JobTemplateSpec.Merge(m, src) +} +func (m *JobTemplateSpec) XXX_Size() int { + return m.Size() +} +func (m *JobTemplateSpec) XXX_DiscardUnknown() { + xxx_messageInfo_JobTemplateSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_JobTemplateSpec proto.InternalMessageInfo func init() { proto.RegisterType((*CronJob)(nil), "k8s.io.api.batch.v1beta1.CronJob") @@ -89,10 +221,68 @@ func init() { proto.RegisterType((*JobTemplate)(nil), "k8s.io.api.batch.v1beta1.JobTemplate") proto.RegisterType((*JobTemplateSpec)(nil), "k8s.io.api.batch.v1beta1.JobTemplateSpec") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/batch/v1beta1/generated.proto", fileDescriptor_e57b277b05179ae7) +} + +var fileDescriptor_e57b277b05179ae7 = []byte{ + // 771 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x94, 0xcf, 0x6f, 0xe3, 0x44, + 0x14, 0xc7, 0xe3, 0x34, 0xbf, 0x76, 0xc2, 0x42, 0xd7, 0xa0, 0x5d, 0x2b, 0x20, 0x27, 0x64, 0xb5, + 0x22, 0x20, 0x76, 0x4c, 0x2b, 0x84, 0x38, 0x21, 0xad, 0x17, 0x2d, 0x50, 0x8a, 0x16, 0x39, 0x45, + 0x48, 0xa8, 0x42, 0x1d, 0x8f, 0x5f, 0x92, 0x69, 0x6c, 0x8f, 0xe5, 0x19, 0x47, 0xca, 0x8d, 0x0b, + 0x77, 0xfe, 0x11, 0x4e, 0xfc, 0x13, 0x11, 0xa7, 0x1e, 0x7b, 0x8a, 0xa8, 0xf9, 0x2f, 0x38, 0x21, + 0x4f, 0x9c, 0x1f, 0xcd, 0x8f, 0xb6, 0x7b, 0xe9, 0xcd, 0xf3, 0xe6, 0xfb, 0xfd, 0xcc, 0xf3, 0x7b, + 0x6f, 0x06, 0xbd, 0x18, 0x7e, 0x29, 0x30, 0xe3, 0xd6, 0x30, 0x71, 0x21, 0x0e, 0x41, 0x82, 0xb0, + 0x46, 0x10, 0x7a, 0x3c, 0xb6, 0xf2, 0x0d, 0x12, 0x31, 0xcb, 0x25, 0x92, 0x0e, 0xac, 0xd1, 0x81, + 0x0b, 0x92, 0x1c, 0x58, 0x7d, 0x08, 0x21, 0x26, 0x12, 0x3c, 0x1c, 0xc5, 0x5c, 0x72, 0xdd, 0x98, + 0x29, 0x31, 0x89, 0x18, 0x56, 0x4a, 0x9c, 0x2b, 0x1b, 0xcf, 0xfb, 0x4c, 0x0e, 0x12, 0x17, 0x53, + 0x1e, 0x58, 0x7d, 0xde, 0xe7, 0x96, 0x32, 0xb8, 0x49, 0x4f, 0xad, 0xd4, 0x42, 0x7d, 0xcd, 0x40, + 0x8d, 0xa7, 0x5b, 0x8e, 0x5c, 0x3f, 0xad, 0xd1, 0x5e, 0x11, 0x51, 0x1e, 0xc3, 0x36, 0xcd, 0xe7, + 0x4b, 0x4d, 0x40, 0xe8, 0x80, 0x85, 0x10, 0x8f, 0xad, 0x68, 0xd8, 0xcf, 0x02, 0xc2, 0x0a, 0x40, + 0x92, 0x6d, 0x2e, 0x6b, 0x97, 0x2b, 0x4e, 0x42, 0xc9, 0x02, 0xd8, 0x30, 0x7c, 0x71, 0x9b, 0x41, + 0xd0, 0x01, 0x04, 0x64, 0xdd, 0xd7, 0xfe, 0xbd, 0x88, 0xaa, 0x2f, 0x63, 0x1e, 0x1e, 0x71, 0x57, + 0x3f, 0x43, 0xb5, 0x2c, 0x1f, 0x8f, 0x48, 0x62, 0x68, 0x2d, 0xad, 0x53, 0x3f, 0xfc, 0x0c, 0x2f, + 0xeb, 0xb9, 0xc0, 0xe2, 0x68, 0xd8, 0xcf, 0x02, 0x02, 0x67, 0x6a, 0x3c, 0x3a, 0xc0, 0xaf, 0xdd, + 0x73, 0xa0, 0xf2, 0x07, 0x90, 0xc4, 0xd6, 0x27, 0xd3, 0x66, 0x21, 0x9d, 0x36, 0xd1, 0x32, 0xe6, + 0x2c, 0xa8, 0xfa, 0x37, 0xa8, 0x24, 0x22, 0xa0, 0x46, 0x51, 0xd1, 0x9f, 0xe1, 0x5d, 0xdd, 0xc2, + 0x79, 0x4a, 0xdd, 0x08, 0xa8, 0xfd, 0x56, 0x8e, 0x2c, 0x65, 0x2b, 0x47, 0x01, 0xf4, 0xd7, 0xa8, + 0x22, 0x24, 0x91, 0x89, 0x30, 0xf6, 0x14, 0xea, 0xa3, 0xdb, 0x51, 0x4a, 0x6e, 0xbf, 0x9d, 0xc3, + 0x2a, 0xb3, 0xb5, 0x93, 0x63, 0xda, 0x7f, 0x69, 0xa8, 0x9e, 0x2b, 0x8f, 0x99, 0x90, 0xfa, 0xe9, + 0x46, 0x2d, 0xf0, 0xdd, 0x6a, 0x91, 0xb9, 0x55, 0x25, 0xf6, 0xf3, 0x93, 0x6a, 0xf3, 0xc8, 0x4a, + 0x1d, 0x5e, 0xa1, 0x32, 0x93, 0x10, 0x08, 0xa3, 0xd8, 0xda, 0xeb, 0xd4, 0x0f, 0x3f, 0xbc, 0x35, + 0x7b, 0xfb, 0x61, 0x4e, 0x2b, 0x7f, 0x97, 0xf9, 0x9c, 0x99, 0xbd, 0xfd, 0x67, 0x69, 0x91, 0x75, + 0x56, 0x1c, 0xfd, 0x53, 0x54, 0xcb, 0xfa, 0xec, 0x25, 0x3e, 0xa8, 0xac, 0x1f, 0x2c, 0xb3, 0xe8, + 0xe6, 0x71, 0x67, 0xa1, 0xd0, 0x7f, 0x42, 0x4f, 0x84, 0x24, 0xb1, 0x64, 0x61, 0xff, 0x6b, 0x20, + 0x9e, 0xcf, 0x42, 0xe8, 0x02, 0xe5, 0xa1, 0x27, 0x54, 0x83, 0xf6, 0xec, 0xf7, 0xd3, 0x69, 0xf3, + 0x49, 0x77, 0xbb, 0xc4, 0xd9, 0xe5, 0xd5, 0x4f, 0xd1, 0x23, 0xca, 0x43, 0x9a, 0xc4, 0x31, 0x84, + 0x74, 0xfc, 0x23, 0xf7, 0x19, 0x1d, 0xab, 0x36, 0x3d, 0xb0, 0x71, 0x9e, 0xcd, 0xa3, 0x97, 0xeb, + 0x82, 0xff, 0xb6, 0x05, 0x9d, 0x4d, 0x90, 0xfe, 0x0c, 0x55, 0x45, 0x22, 0x22, 0x08, 0x3d, 0xa3, + 0xd4, 0xd2, 0x3a, 0x35, 0xbb, 0x9e, 0x4e, 0x9b, 0xd5, 0xee, 0x2c, 0xe4, 0xcc, 0xf7, 0xf4, 0x33, + 0x54, 0x3f, 0xe7, 0xee, 0x09, 0x04, 0x91, 0x4f, 0x24, 0x18, 0x65, 0xd5, 0xc2, 0x8f, 0x77, 0xd7, + 0xf9, 0x68, 0x29, 0x56, 0x43, 0xf7, 0x6e, 0x9e, 0x69, 0x7d, 0x65, 0xc3, 0x59, 0x45, 0xea, 0xbf, + 0xa2, 0x86, 0x48, 0x28, 0x05, 0x21, 0x7a, 0x89, 0x7f, 0xc4, 0x5d, 0xf1, 0x2d, 0x13, 0x92, 0xc7, + 0xe3, 0x63, 0x16, 0x30, 0x69, 0x54, 0x5a, 0x5a, 0xa7, 0x6c, 0x9b, 0xe9, 0xb4, 0xd9, 0xe8, 0xee, + 0x54, 0x39, 0x37, 0x10, 0x74, 0x07, 0x3d, 0xee, 0x11, 0xe6, 0x83, 0xb7, 0xc1, 0xae, 0x2a, 0x76, + 0x23, 0x9d, 0x36, 0x1f, 0xbf, 0xda, 0xaa, 0x70, 0x76, 0x38, 0xdb, 0x7f, 0x6b, 0xe8, 0xe1, 0xb5, + 0xfb, 0xa0, 0x7f, 0x8f, 0x2a, 0x84, 0x4a, 0x36, 0xca, 0xe6, 0x25, 0x1b, 0xc5, 0xa7, 0xab, 0x25, + 0xca, 0xde, 0xb4, 0xe5, 0xfd, 0x76, 0xa0, 0x07, 0x59, 0x27, 0x60, 0x79, 0x89, 0x5e, 0x28, 0xab, + 0x93, 0x23, 0x74, 0x1f, 0xed, 0xfb, 0x44, 0xc8, 0xf9, 0xa8, 0x9d, 0xb0, 0x00, 0x54, 0x93, 0xea, + 0x87, 0x9f, 0xdc, 0xed, 0xf2, 0x64, 0x0e, 0xfb, 0xbd, 0x74, 0xda, 0xdc, 0x3f, 0x5e, 0xe3, 0x38, + 0x1b, 0xe4, 0xf6, 0x44, 0x43, 0xab, 0xdd, 0xb9, 0x87, 0xe7, 0xeb, 0x67, 0x54, 0x93, 0xf3, 0x89, + 0x2a, 0xbe, 0xe9, 0x44, 0x2d, 0x6e, 0xe2, 0x62, 0x9c, 0x16, 0xb0, 0xec, 0xf5, 0x79, 0x67, 0x4d, + 0x7f, 0x0f, 0xbf, 0xf3, 0xd5, 0xb5, 0xd7, 0xf8, 0x83, 0x6d, 0xbf, 0x82, 0x6f, 0x78, 0x84, 0xed, + 0xe7, 0x93, 0x2b, 0xb3, 0x70, 0x71, 0x65, 0x16, 0x2e, 0xaf, 0xcc, 0xc2, 0x6f, 0xa9, 0xa9, 0x4d, + 0x52, 0x53, 0xbb, 0x48, 0x4d, 0xed, 0x32, 0x35, 0xb5, 0x7f, 0x52, 0x53, 0xfb, 0xe3, 0x5f, 0xb3, + 0xf0, 0x4b, 0x35, 0x2f, 0xc8, 0xff, 0x01, 0x00, 0x00, 0xff, 0xff, 0xf6, 0x9f, 0xb3, 0xdd, 0xdf, + 0x07, 0x00, 0x00, +} + func (m *CronJob) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -100,41 +290,52 @@ func (m *CronJob) Marshal() (dAtA []byte, err error) { } func (m *CronJob) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CronJob) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n2 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n3, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n3 - return i, nil + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *CronJobList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -142,37 +343,46 @@ func (m *CronJobList) Marshal() (dAtA []byte, err error) { } func (m *CronJobList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CronJobList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n4, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *CronJobSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -180,58 +390,67 @@ func (m *CronJobSpec) Marshal() (dAtA []byte, err error) { } func (m *CronJobSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CronJobSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Schedule))) - i += copy(dAtA[i:], m.Schedule) - if m.StartingDeadlineSeconds != nil { - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.StartingDeadlineSeconds)) + if m.FailedJobsHistoryLimit != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.FailedJobsHistoryLimit)) + i-- + dAtA[i] = 0x38 } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ConcurrencyPolicy))) - i += copy(dAtA[i:], m.ConcurrencyPolicy) + if m.SuccessfulJobsHistoryLimit != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.SuccessfulJobsHistoryLimit)) + i-- + dAtA[i] = 0x30 + } + { + size, err := m.JobTemplate.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a if m.Suspend != nil { - dAtA[i] = 0x20 - i++ + i-- if *m.Suspend { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - } - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.JobTemplate.Size())) - n5, err := m.JobTemplate.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 - if m.SuccessfulJobsHistoryLimit != nil { - dAtA[i] = 0x30 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.SuccessfulJobsHistoryLimit)) + i-- + dAtA[i] = 0x20 } - if m.FailedJobsHistoryLimit != nil { - dAtA[i] = 0x38 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.FailedJobsHistoryLimit)) + i -= len(m.ConcurrencyPolicy) + copy(dAtA[i:], m.ConcurrencyPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ConcurrencyPolicy))) + i-- + dAtA[i] = 0x1a + if m.StartingDeadlineSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.StartingDeadlineSeconds)) + i-- + dAtA[i] = 0x10 } - return i, nil + i -= len(m.Schedule) + copy(dAtA[i:], m.Schedule) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Schedule))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *CronJobStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -239,39 +458,48 @@ func (m *CronJobStatus) Marshal() (dAtA []byte, err error) { } func (m *CronJobStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CronJobStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Active) > 0 { - for _, msg := range m.Active { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.LastScheduleTime != nil { + { + size, err := m.LastScheduleTime.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - } - if m.LastScheduleTime != nil { + i-- dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastScheduleTime.Size())) - n6, err := m.LastScheduleTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + } + if len(m.Active) > 0 { + for iNdEx := len(m.Active) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Active[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } - i += n6 } - return i, nil + return len(dAtA) - i, nil } func (m *JobTemplate) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -279,33 +507,42 @@ func (m *JobTemplate) Marshal() (dAtA []byte, err error) { } func (m *JobTemplate) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *JobTemplate) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n7, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n7 + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n8, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n8 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *JobTemplateSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -313,57 +550,53 @@ func (m *JobTemplateSpec) Marshal() (dAtA []byte, err error) { } func (m *JobTemplateSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *JobTemplateSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n9, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n9 + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n10, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n10 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *CronJob) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -376,6 +609,9 @@ func (m *CronJob) Size() (n int) { } func (m *CronJobList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -390,6 +626,9 @@ func (m *CronJobList) Size() (n int) { } func (m *CronJobSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Schedule) @@ -414,6 +653,9 @@ func (m *CronJobSpec) Size() (n int) { } func (m *CronJobStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Active) > 0 { @@ -430,6 +672,9 @@ func (m *CronJobStatus) Size() (n int) { } func (m *JobTemplate) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -440,6 +685,9 @@ func (m *JobTemplate) Size() (n int) { } func (m *JobTemplateSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -450,14 +698,7 @@ func (m *JobTemplateSpec) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -467,7 +708,7 @@ func (this *CronJob) String() string { return "nil" } s := strings.Join([]string{`&CronJob{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "CronJobSpec", "CronJobSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "CronJobStatus", "CronJobStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -478,9 +719,14 @@ func (this *CronJobList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]CronJob{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "CronJob", "CronJob", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&CronJobList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "CronJob", "CronJob", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -505,9 +751,14 @@ func (this *CronJobStatus) String() string { if this == nil { return "nil" } + repeatedStringForActive := "[]ObjectReference{" + for _, f := range this.Active { + repeatedStringForActive += fmt.Sprintf("%v", f) + "," + } + repeatedStringForActive += "}" s := strings.Join([]string{`&CronJobStatus{`, - `Active:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Active), "ObjectReference", "k8s_io_api_core_v1.ObjectReference", 1), `&`, ``, 1) + `,`, - `LastScheduleTime:` + strings.Replace(fmt.Sprintf("%v", this.LastScheduleTime), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, + `Active:` + repeatedStringForActive + `,`, + `LastScheduleTime:` + strings.Replace(fmt.Sprintf("%v", this.LastScheduleTime), "Time", "v1.Time", 1) + `,`, `}`, }, "") return s @@ -517,7 +768,7 @@ func (this *JobTemplate) String() string { return "nil" } s := strings.Join([]string{`&JobTemplate{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Template:` + strings.Replace(strings.Replace(this.Template.String(), "JobTemplateSpec", "JobTemplateSpec", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -528,8 +779,8 @@ func (this *JobTemplateSpec) String() string { return "nil" } s := strings.Join([]string{`&JobTemplateSpec{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "JobSpec", "k8s_io_api_batch_v1.JobSpec", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Spec), "JobSpec", "v12.JobSpec", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -557,7 +808,7 @@ func (m *CronJob) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -585,7 +836,7 @@ func (m *CronJob) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -594,6 +845,9 @@ func (m *CronJob) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -615,7 +869,7 @@ func (m *CronJob) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -624,6 +878,9 @@ func (m *CronJob) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -645,7 +902,7 @@ func (m *CronJob) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -654,6 +911,9 @@ func (m *CronJob) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -670,6 +930,9 @@ func (m *CronJob) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -697,7 +960,7 @@ func (m *CronJobList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -725,7 +988,7 @@ func (m *CronJobList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -734,6 +997,9 @@ func (m *CronJobList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -755,7 +1021,7 @@ func (m *CronJobList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -764,6 +1030,9 @@ func (m *CronJobList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -781,6 +1050,9 @@ func (m *CronJobList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -808,7 +1080,7 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -836,7 +1108,7 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -846,6 +1118,9 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -865,7 +1140,7 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -885,7 +1160,7 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -895,6 +1170,9 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -914,7 +1192,7 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -935,7 +1213,7 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -944,6 +1222,9 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -965,7 +1246,7 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -985,7 +1266,7 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -1000,6 +1281,9 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1027,7 +1311,7 @@ func (m *CronJobStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1055,7 +1339,7 @@ func (m *CronJobStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1064,10 +1348,13 @@ func (m *CronJobStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Active = append(m.Active, k8s_io_api_core_v1.ObjectReference{}) + m.Active = append(m.Active, v11.ObjectReference{}) if err := m.Active[len(m.Active)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -1086,7 +1373,7 @@ func (m *CronJobStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1095,11 +1382,14 @@ func (m *CronJobStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.LastScheduleTime == nil { - m.LastScheduleTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} + m.LastScheduleTime = &v1.Time{} } if err := m.LastScheduleTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1114,6 +1404,9 @@ func (m *CronJobStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1141,7 +1434,7 @@ func (m *JobTemplate) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1169,7 +1462,7 @@ func (m *JobTemplate) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1178,6 +1471,9 @@ func (m *JobTemplate) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1199,7 +1495,7 @@ func (m *JobTemplate) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1208,6 +1504,9 @@ func (m *JobTemplate) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1224,6 +1523,9 @@ func (m *JobTemplate) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1251,7 +1553,7 @@ func (m *JobTemplateSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1279,7 +1581,7 @@ func (m *JobTemplateSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1288,6 +1590,9 @@ func (m *JobTemplateSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1309,7 +1614,7 @@ func (m *JobTemplateSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1318,6 +1623,9 @@ func (m *JobTemplateSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1334,6 +1642,9 @@ func (m *JobTemplateSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1400,10 +1711,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -1432,6 +1746,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -1450,60 +1767,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/batch/v1beta1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 771 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x94, 0xcf, 0x6f, 0xe3, 0x44, - 0x14, 0xc7, 0xe3, 0x34, 0xbf, 0x76, 0xc2, 0x42, 0xd7, 0xa0, 0x5d, 0x2b, 0x20, 0x27, 0x64, 0xb5, - 0x22, 0x20, 0x76, 0x4c, 0x2b, 0x84, 0x38, 0x21, 0xad, 0x17, 0x2d, 0x50, 0x8a, 0x16, 0x39, 0x45, - 0x48, 0xa8, 0x42, 0x1d, 0x8f, 0x5f, 0x92, 0x69, 0x6c, 0x8f, 0xe5, 0x19, 0x47, 0xca, 0x8d, 0x0b, - 0x77, 0xfe, 0x11, 0x4e, 0xfc, 0x13, 0x11, 0xa7, 0x1e, 0x7b, 0x8a, 0xa8, 0xf9, 0x2f, 0x38, 0x21, - 0x4f, 0x9c, 0x1f, 0xcd, 0x8f, 0xb6, 0x7b, 0xe9, 0xcd, 0xf3, 0xe6, 0xfb, 0xfd, 0xcc, 0xf3, 0x7b, - 0x6f, 0x06, 0xbd, 0x18, 0x7e, 0x29, 0x30, 0xe3, 0xd6, 0x30, 0x71, 0x21, 0x0e, 0x41, 0x82, 0xb0, - 0x46, 0x10, 0x7a, 0x3c, 0xb6, 0xf2, 0x0d, 0x12, 0x31, 0xcb, 0x25, 0x92, 0x0e, 0xac, 0xd1, 0x81, - 0x0b, 0x92, 0x1c, 0x58, 0x7d, 0x08, 0x21, 0x26, 0x12, 0x3c, 0x1c, 0xc5, 0x5c, 0x72, 0xdd, 0x98, - 0x29, 0x31, 0x89, 0x18, 0x56, 0x4a, 0x9c, 0x2b, 0x1b, 0xcf, 0xfb, 0x4c, 0x0e, 0x12, 0x17, 0x53, - 0x1e, 0x58, 0x7d, 0xde, 0xe7, 0x96, 0x32, 0xb8, 0x49, 0x4f, 0xad, 0xd4, 0x42, 0x7d, 0xcd, 0x40, - 0x8d, 0xa7, 0x5b, 0x8e, 0x5c, 0x3f, 0xad, 0xd1, 0x5e, 0x11, 0x51, 0x1e, 0xc3, 0x36, 0xcd, 0xe7, - 0x4b, 0x4d, 0x40, 0xe8, 0x80, 0x85, 0x10, 0x8f, 0xad, 0x68, 0xd8, 0xcf, 0x02, 0xc2, 0x0a, 0x40, - 0x92, 0x6d, 0x2e, 0x6b, 0x97, 0x2b, 0x4e, 0x42, 0xc9, 0x02, 0xd8, 0x30, 0x7c, 0x71, 0x9b, 0x41, - 0xd0, 0x01, 0x04, 0x64, 0xdd, 0xd7, 0xfe, 0xbd, 0x88, 0xaa, 0x2f, 0x63, 0x1e, 0x1e, 0x71, 0x57, - 0x3f, 0x43, 0xb5, 0x2c, 0x1f, 0x8f, 0x48, 0x62, 0x68, 0x2d, 0xad, 0x53, 0x3f, 0xfc, 0x0c, 0x2f, - 0xeb, 0xb9, 0xc0, 0xe2, 0x68, 0xd8, 0xcf, 0x02, 0x02, 0x67, 0x6a, 0x3c, 0x3a, 0xc0, 0xaf, 0xdd, - 0x73, 0xa0, 0xf2, 0x07, 0x90, 0xc4, 0xd6, 0x27, 0xd3, 0x66, 0x21, 0x9d, 0x36, 0xd1, 0x32, 0xe6, - 0x2c, 0xa8, 0xfa, 0x37, 0xa8, 0x24, 0x22, 0xa0, 0x46, 0x51, 0xd1, 0x9f, 0xe1, 0x5d, 0xdd, 0xc2, - 0x79, 0x4a, 0xdd, 0x08, 0xa8, 0xfd, 0x56, 0x8e, 0x2c, 0x65, 0x2b, 0x47, 0x01, 0xf4, 0xd7, 0xa8, - 0x22, 0x24, 0x91, 0x89, 0x30, 0xf6, 0x14, 0xea, 0xa3, 0xdb, 0x51, 0x4a, 0x6e, 0xbf, 0x9d, 0xc3, - 0x2a, 0xb3, 0xb5, 0x93, 0x63, 0xda, 0x7f, 0x69, 0xa8, 0x9e, 0x2b, 0x8f, 0x99, 0x90, 0xfa, 0xe9, - 0x46, 0x2d, 0xf0, 0xdd, 0x6a, 0x91, 0xb9, 0x55, 0x25, 0xf6, 0xf3, 0x93, 0x6a, 0xf3, 0xc8, 0x4a, - 0x1d, 0x5e, 0xa1, 0x32, 0x93, 0x10, 0x08, 0xa3, 0xd8, 0xda, 0xeb, 0xd4, 0x0f, 0x3f, 0xbc, 0x35, - 0x7b, 0xfb, 0x61, 0x4e, 0x2b, 0x7f, 0x97, 0xf9, 0x9c, 0x99, 0xbd, 0xfd, 0x67, 0x69, 0x91, 0x75, - 0x56, 0x1c, 0xfd, 0x53, 0x54, 0xcb, 0xfa, 0xec, 0x25, 0x3e, 0xa8, 0xac, 0x1f, 0x2c, 0xb3, 0xe8, - 0xe6, 0x71, 0x67, 0xa1, 0xd0, 0x7f, 0x42, 0x4f, 0x84, 0x24, 0xb1, 0x64, 0x61, 0xff, 0x6b, 0x20, - 0x9e, 0xcf, 0x42, 0xe8, 0x02, 0xe5, 0xa1, 0x27, 0x54, 0x83, 0xf6, 0xec, 0xf7, 0xd3, 0x69, 0xf3, - 0x49, 0x77, 0xbb, 0xc4, 0xd9, 0xe5, 0xd5, 0x4f, 0xd1, 0x23, 0xca, 0x43, 0x9a, 0xc4, 0x31, 0x84, - 0x74, 0xfc, 0x23, 0xf7, 0x19, 0x1d, 0xab, 0x36, 0x3d, 0xb0, 0x71, 0x9e, 0xcd, 0xa3, 0x97, 0xeb, - 0x82, 0xff, 0xb6, 0x05, 0x9d, 0x4d, 0x90, 0xfe, 0x0c, 0x55, 0x45, 0x22, 0x22, 0x08, 0x3d, 0xa3, - 0xd4, 0xd2, 0x3a, 0x35, 0xbb, 0x9e, 0x4e, 0x9b, 0xd5, 0xee, 0x2c, 0xe4, 0xcc, 0xf7, 0xf4, 0x33, - 0x54, 0x3f, 0xe7, 0xee, 0x09, 0x04, 0x91, 0x4f, 0x24, 0x18, 0x65, 0xd5, 0xc2, 0x8f, 0x77, 0xd7, - 0xf9, 0x68, 0x29, 0x56, 0x43, 0xf7, 0x6e, 0x9e, 0x69, 0x7d, 0x65, 0xc3, 0x59, 0x45, 0xea, 0xbf, - 0xa2, 0x86, 0x48, 0x28, 0x05, 0x21, 0x7a, 0x89, 0x7f, 0xc4, 0x5d, 0xf1, 0x2d, 0x13, 0x92, 0xc7, - 0xe3, 0x63, 0x16, 0x30, 0x69, 0x54, 0x5a, 0x5a, 0xa7, 0x6c, 0x9b, 0xe9, 0xb4, 0xd9, 0xe8, 0xee, - 0x54, 0x39, 0x37, 0x10, 0x74, 0x07, 0x3d, 0xee, 0x11, 0xe6, 0x83, 0xb7, 0xc1, 0xae, 0x2a, 0x76, - 0x23, 0x9d, 0x36, 0x1f, 0xbf, 0xda, 0xaa, 0x70, 0x76, 0x38, 0xdb, 0x7f, 0x6b, 0xe8, 0xe1, 0xb5, - 0xfb, 0xa0, 0x7f, 0x8f, 0x2a, 0x84, 0x4a, 0x36, 0xca, 0xe6, 0x25, 0x1b, 0xc5, 0xa7, 0xab, 0x25, - 0xca, 0xde, 0xb4, 0xe5, 0xfd, 0x76, 0xa0, 0x07, 0x59, 0x27, 0x60, 0x79, 0x89, 0x5e, 0x28, 0xab, - 0x93, 0x23, 0x74, 0x1f, 0xed, 0xfb, 0x44, 0xc8, 0xf9, 0xa8, 0x9d, 0xb0, 0x00, 0x54, 0x93, 0xea, - 0x87, 0x9f, 0xdc, 0xed, 0xf2, 0x64, 0x0e, 0xfb, 0xbd, 0x74, 0xda, 0xdc, 0x3f, 0x5e, 0xe3, 0x38, - 0x1b, 0xe4, 0xf6, 0x44, 0x43, 0xab, 0xdd, 0xb9, 0x87, 0xe7, 0xeb, 0x67, 0x54, 0x93, 0xf3, 0x89, - 0x2a, 0xbe, 0xe9, 0x44, 0x2d, 0x6e, 0xe2, 0x62, 0x9c, 0x16, 0xb0, 0xec, 0xf5, 0x79, 0x67, 0x4d, - 0x7f, 0x0f, 0xbf, 0xf3, 0xd5, 0xb5, 0xd7, 0xf8, 0x83, 0x6d, 0xbf, 0x82, 0x6f, 0x78, 0x84, 0xed, - 0xe7, 0x93, 0x2b, 0xb3, 0x70, 0x71, 0x65, 0x16, 0x2e, 0xaf, 0xcc, 0xc2, 0x6f, 0xa9, 0xa9, 0x4d, - 0x52, 0x53, 0xbb, 0x48, 0x4d, 0xed, 0x32, 0x35, 0xb5, 0x7f, 0x52, 0x53, 0xfb, 0xe3, 0x5f, 0xb3, - 0xf0, 0x4b, 0x35, 0x2f, 0xc8, 0xff, 0x01, 0x00, 0x00, 0xff, 0xff, 0xf6, 0x9f, 0xb3, 0xdd, 0xdf, - 0x07, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/batch/v1beta1/generated.proto b/vendor/k8s.io/api/batch/v1beta1/generated.proto index 043b3551..995b4f3f 100644 --- a/vendor/k8s.io/api/batch/v1beta1/generated.proto +++ b/vendor/k8s.io/api/batch/v1beta1/generated.proto @@ -33,17 +33,17 @@ option go_package = "v1beta1"; // CronJob represents the configuration of a single cron job. message CronJob { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of a cron job, including the schedule. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional CronJobSpec spec = 2; // Current status of a cron job. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional CronJobStatus status = 3; } @@ -51,7 +51,7 @@ message CronJob { // CronJobList is a collection of cron jobs. message CronJobList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -112,12 +112,12 @@ message CronJobStatus { // JobTemplate describes a template for creating copies of a predefined pod. message JobTemplate { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Defines jobs that will be created from this template. - // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional JobTemplateSpec template = 2; } @@ -125,12 +125,12 @@ message JobTemplate { // JobTemplateSpec describes the data a Job should have when created from a template message JobTemplateSpec { // Standard object's metadata of the jobs created from this template. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of the job. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional k8s.io.api.batch.v1.JobSpec spec = 2; } diff --git a/vendor/k8s.io/api/batch/v1beta1/types.go b/vendor/k8s.io/api/batch/v1beta1/types.go index cb5c9bad..2978747a 100644 --- a/vendor/k8s.io/api/batch/v1beta1/types.go +++ b/vendor/k8s.io/api/batch/v1beta1/types.go @@ -28,12 +28,12 @@ import ( type JobTemplate struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Defines jobs that will be created from this template. - // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Template JobTemplateSpec `json:"template,omitempty" protobuf:"bytes,2,opt,name=template"` } @@ -41,12 +41,12 @@ type JobTemplate struct { // JobTemplateSpec describes the data a Job should have when created from a template type JobTemplateSpec struct { // Standard object's metadata of the jobs created from this template. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Specification of the desired behavior of the job. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec batchv1.JobSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` } @@ -58,17 +58,17 @@ type JobTemplateSpec struct { type CronJob struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Specification of the desired behavior of a cron job, including the schedule. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec CronJobSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` // Current status of a cron job. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Status CronJobStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -80,7 +80,7 @@ type CronJobList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` diff --git a/vendor/k8s.io/api/batch/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/batch/v1beta1/types_swagger_doc_generated.go index abbdfec0..ecc91444 100644 --- a/vendor/k8s.io/api/batch/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/batch/v1beta1/types_swagger_doc_generated.go @@ -29,9 +29,9 @@ package v1beta1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_CronJob = map[string]string{ "": "CronJob represents the configuration of a single cron job.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Specification of the desired behavior of a cron job, including the schedule. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - "status": "Current status of a cron job. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Specification of the desired behavior of a cron job, including the schedule. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "Current status of a cron job. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (CronJob) SwaggerDoc() map[string]string { @@ -40,7 +40,7 @@ func (CronJob) SwaggerDoc() map[string]string { var map_CronJobList = map[string]string{ "": "CronJobList is a collection of cron jobs.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "items is the list of CronJobs.", } @@ -75,8 +75,8 @@ func (CronJobStatus) SwaggerDoc() map[string]string { var map_JobTemplate = map[string]string{ "": "JobTemplate describes a template for creating copies of a predefined pod.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "template": "Defines jobs that will be created from this template. https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "template": "Defines jobs that will be created from this template. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (JobTemplate) SwaggerDoc() map[string]string { @@ -85,8 +85,8 @@ func (JobTemplate) SwaggerDoc() map[string]string { var map_JobTemplateSpec = map[string]string{ "": "JobTemplateSpec describes the data a Job should have when created from a template", - "metadata": "Standard object's metadata of the jobs created from this template. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Specification of the desired behavior of the job. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata of the jobs created from this template. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Specification of the desired behavior of the job. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (JobTemplateSpec) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/batch/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/batch/v1beta1/zz_generated.deepcopy.go index 1c8bc447..7c9dcb74 100644 --- a/vendor/k8s.io/api/batch/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/batch/v1beta1/zz_generated.deepcopy.go @@ -57,7 +57,7 @@ func (in *CronJob) DeepCopyObject() runtime.Object { func (in *CronJobList) DeepCopyInto(out *CronJobList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]CronJob, len(*in)) diff --git a/vendor/k8s.io/api/batch/v2alpha1/doc.go b/vendor/k8s.io/api/batch/v2alpha1/doc.go index f4ed01ad..3044b0c6 100644 --- a/vendor/k8s.io/api/batch/v2alpha1/doc.go +++ b/vendor/k8s.io/api/batch/v2alpha1/doc.go @@ -15,6 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +k8s:openapi-gen=true package v2alpha1 // import "k8s.io/api/batch/v2alpha1" diff --git a/vendor/k8s.io/api/batch/v2alpha1/generated.pb.go b/vendor/k8s.io/api/batch/v2alpha1/generated.pb.go index 6ab41ebb..8271c841 100644 --- a/vendor/k8s.io/api/batch/v2alpha1/generated.pb.go +++ b/vendor/k8s.io/api/batch/v2alpha1/generated.pb.go @@ -14,37 +14,25 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/batch/v2alpha1/generated.proto -// DO NOT EDIT! -/* - Package v2alpha1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/batch/v2alpha1/generated.proto - - It has these top-level messages: - CronJob - CronJobList - CronJobSpec - CronJobStatus - JobTemplate - JobTemplateSpec -*/ package v2alpha1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import k8s_io_api_core_v1 "k8s.io/api/core/v1" -import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + io "io" -import strings "strings" -import reflect "reflect" + proto "github.com/gogo/protobuf/proto" + v11 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" -import io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -57,29 +45,173 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *CronJob) Reset() { *m = CronJob{} } -func (*CronJob) ProtoMessage() {} -func (*CronJob) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *CronJob) Reset() { *m = CronJob{} } +func (*CronJob) ProtoMessage() {} +func (*CronJob) Descriptor() ([]byte, []int) { + return fileDescriptor_5495a0550fe29c46, []int{0} +} +func (m *CronJob) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CronJob) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CronJob) XXX_Merge(src proto.Message) { + xxx_messageInfo_CronJob.Merge(m, src) +} +func (m *CronJob) XXX_Size() int { + return m.Size() +} +func (m *CronJob) XXX_DiscardUnknown() { + xxx_messageInfo_CronJob.DiscardUnknown(m) +} + +var xxx_messageInfo_CronJob proto.InternalMessageInfo + +func (m *CronJobList) Reset() { *m = CronJobList{} } +func (*CronJobList) ProtoMessage() {} +func (*CronJobList) Descriptor() ([]byte, []int) { + return fileDescriptor_5495a0550fe29c46, []int{1} +} +func (m *CronJobList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CronJobList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CronJobList) XXX_Merge(src proto.Message) { + xxx_messageInfo_CronJobList.Merge(m, src) +} +func (m *CronJobList) XXX_Size() int { + return m.Size() +} +func (m *CronJobList) XXX_DiscardUnknown() { + xxx_messageInfo_CronJobList.DiscardUnknown(m) +} -func (m *CronJobList) Reset() { *m = CronJobList{} } -func (*CronJobList) ProtoMessage() {} -func (*CronJobList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +var xxx_messageInfo_CronJobList proto.InternalMessageInfo -func (m *CronJobSpec) Reset() { *m = CronJobSpec{} } -func (*CronJobSpec) ProtoMessage() {} -func (*CronJobSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +func (m *CronJobSpec) Reset() { *m = CronJobSpec{} } +func (*CronJobSpec) ProtoMessage() {} +func (*CronJobSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_5495a0550fe29c46, []int{2} +} +func (m *CronJobSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CronJobSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CronJobSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_CronJobSpec.Merge(m, src) +} +func (m *CronJobSpec) XXX_Size() int { + return m.Size() +} +func (m *CronJobSpec) XXX_DiscardUnknown() { + xxx_messageInfo_CronJobSpec.DiscardUnknown(m) +} -func (m *CronJobStatus) Reset() { *m = CronJobStatus{} } -func (*CronJobStatus) ProtoMessage() {} -func (*CronJobStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +var xxx_messageInfo_CronJobSpec proto.InternalMessageInfo -func (m *JobTemplate) Reset() { *m = JobTemplate{} } -func (*JobTemplate) ProtoMessage() {} -func (*JobTemplate) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +func (m *CronJobStatus) Reset() { *m = CronJobStatus{} } +func (*CronJobStatus) ProtoMessage() {} +func (*CronJobStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_5495a0550fe29c46, []int{3} +} +func (m *CronJobStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CronJobStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CronJobStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_CronJobStatus.Merge(m, src) +} +func (m *CronJobStatus) XXX_Size() int { + return m.Size() +} +func (m *CronJobStatus) XXX_DiscardUnknown() { + xxx_messageInfo_CronJobStatus.DiscardUnknown(m) +} -func (m *JobTemplateSpec) Reset() { *m = JobTemplateSpec{} } -func (*JobTemplateSpec) ProtoMessage() {} -func (*JobTemplateSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +var xxx_messageInfo_CronJobStatus proto.InternalMessageInfo + +func (m *JobTemplate) Reset() { *m = JobTemplate{} } +func (*JobTemplate) ProtoMessage() {} +func (*JobTemplate) Descriptor() ([]byte, []int) { + return fileDescriptor_5495a0550fe29c46, []int{4} +} +func (m *JobTemplate) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *JobTemplate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *JobTemplate) XXX_Merge(src proto.Message) { + xxx_messageInfo_JobTemplate.Merge(m, src) +} +func (m *JobTemplate) XXX_Size() int { + return m.Size() +} +func (m *JobTemplate) XXX_DiscardUnknown() { + xxx_messageInfo_JobTemplate.DiscardUnknown(m) +} + +var xxx_messageInfo_JobTemplate proto.InternalMessageInfo + +func (m *JobTemplateSpec) Reset() { *m = JobTemplateSpec{} } +func (*JobTemplateSpec) ProtoMessage() {} +func (*JobTemplateSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_5495a0550fe29c46, []int{5} +} +func (m *JobTemplateSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *JobTemplateSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *JobTemplateSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_JobTemplateSpec.Merge(m, src) +} +func (m *JobTemplateSpec) XXX_Size() int { + return m.Size() +} +func (m *JobTemplateSpec) XXX_DiscardUnknown() { + xxx_messageInfo_JobTemplateSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_JobTemplateSpec proto.InternalMessageInfo func init() { proto.RegisterType((*CronJob)(nil), "k8s.io.api.batch.v2alpha1.CronJob") @@ -89,10 +221,68 @@ func init() { proto.RegisterType((*JobTemplate)(nil), "k8s.io.api.batch.v2alpha1.JobTemplate") proto.RegisterType((*JobTemplateSpec)(nil), "k8s.io.api.batch.v2alpha1.JobTemplateSpec") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/batch/v2alpha1/generated.proto", fileDescriptor_5495a0550fe29c46) +} + +var fileDescriptor_5495a0550fe29c46 = []byte{ + // 774 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x94, 0x4d, 0x6f, 0xdb, 0x36, + 0x18, 0xc7, 0x2d, 0xc7, 0x6f, 0xa1, 0x97, 0x2d, 0xd1, 0x86, 0xc4, 0xf3, 0x06, 0xd9, 0x50, 0xb0, + 0xc1, 0x18, 0x36, 0x6a, 0x09, 0x86, 0x61, 0xa7, 0x01, 0x53, 0x86, 0x36, 0x4d, 0x53, 0x34, 0x90, + 0x53, 0xa0, 0x28, 0x82, 0xa2, 0x14, 0x45, 0xdb, 0x8c, 0x25, 0x51, 0x10, 0x29, 0x03, 0xbe, 0xf5, + 0xd6, 0x6b, 0x3f, 0x49, 0x2f, 0xed, 0x87, 0x48, 0x7b, 0xca, 0x31, 0x27, 0xa3, 0x51, 0xbf, 0x45, + 0x4f, 0x85, 0x68, 0xf9, 0x25, 0x7e, 0x49, 0xd2, 0x4b, 0x6e, 0xe2, 0xa3, 0xff, 0xff, 0xc7, 0x87, + 0xcf, 0xf3, 0x90, 0xc0, 0xec, 0xfe, 0xc3, 0x21, 0x65, 0x46, 0x37, 0xb2, 0x49, 0xe8, 0x13, 0x41, + 0xb8, 0xd1, 0x23, 0xbe, 0xc3, 0x42, 0x23, 0xfd, 0x81, 0x02, 0x6a, 0xd8, 0x48, 0xe0, 0x8e, 0xd1, + 0xdb, 0x45, 0x6e, 0xd0, 0x41, 0x3b, 0x46, 0x9b, 0xf8, 0x24, 0x44, 0x82, 0x38, 0x30, 0x08, 0x99, + 0x60, 0xea, 0x8f, 0x43, 0x29, 0x44, 0x01, 0x85, 0x52, 0x0a, 0x47, 0xd2, 0xea, 0x1f, 0x6d, 0x2a, + 0x3a, 0x91, 0x0d, 0x31, 0xf3, 0x8c, 0x36, 0x6b, 0x33, 0x43, 0x3a, 0xec, 0xa8, 0x25, 0x57, 0x72, + 0x21, 0xbf, 0x86, 0xa4, 0xea, 0xf6, 0xfc, 0xa6, 0x73, 0xdb, 0x55, 0xf5, 0x29, 0x11, 0x66, 0x21, + 0x59, 0xa4, 0xf9, 0x6b, 0xa2, 0xf1, 0x10, 0xee, 0x50, 0x9f, 0x84, 0x7d, 0x23, 0xe8, 0xb6, 0x93, + 0x00, 0x37, 0x3c, 0x22, 0xd0, 0x22, 0x97, 0xb1, 0xcc, 0x15, 0x46, 0xbe, 0xa0, 0x1e, 0x99, 0x33, + 0xfc, 0x7d, 0x93, 0x81, 0xe3, 0x0e, 0xf1, 0xd0, 0xac, 0x4f, 0x7f, 0x95, 0x05, 0xc5, 0xbd, 0x90, + 0xf9, 0x07, 0xcc, 0x56, 0x5f, 0x80, 0x52, 0x92, 0x8f, 0x83, 0x04, 0xaa, 0x28, 0x75, 0xa5, 0x51, + 0xde, 0xfd, 0x13, 0x4e, 0x0a, 0x3a, 0xc6, 0xc2, 0xa0, 0xdb, 0x4e, 0x02, 0x1c, 0x26, 0x6a, 0xd8, + 0xdb, 0x81, 0x8f, 0xed, 0x53, 0x82, 0xc5, 0x23, 0x22, 0x90, 0xa9, 0x9e, 0x0d, 0x6a, 0x99, 0x78, + 0x50, 0x03, 0x93, 0x98, 0x35, 0xa6, 0xaa, 0xfb, 0x20, 0xc7, 0x03, 0x82, 0x2b, 0x59, 0x49, 0xff, + 0x15, 0x2e, 0x6d, 0x17, 0x4c, 0x73, 0x6a, 0x06, 0x04, 0x9b, 0xdf, 0xa4, 0xcc, 0x5c, 0xb2, 0xb2, + 0x24, 0x41, 0x3d, 0x02, 0x05, 0x2e, 0x90, 0x88, 0x78, 0x65, 0x45, 0xb2, 0x1a, 0xb7, 0x60, 0x49, + 0xbd, 0xf9, 0x6d, 0x4a, 0x2b, 0x0c, 0xd7, 0x56, 0xca, 0xd1, 0xdf, 0x29, 0xa0, 0x9c, 0x2a, 0x0f, + 0x29, 0x17, 0xea, 0xc9, 0x5c, 0x35, 0xe0, 0xed, 0xaa, 0x91, 0xb8, 0x65, 0x2d, 0xd6, 0xd3, 0x9d, + 0x4a, 0xa3, 0xc8, 0x54, 0x25, 0xee, 0x83, 0x3c, 0x15, 0xc4, 0xe3, 0x95, 0x6c, 0x7d, 0xa5, 0x51, + 0xde, 0xd5, 0x6f, 0x4e, 0xdf, 0x5c, 0x4b, 0x71, 0xf9, 0x07, 0x89, 0xd1, 0x1a, 0xfa, 0xf5, 0x37, + 0xb9, 0x71, 0xda, 0x49, 0x79, 0xd4, 0xdf, 0x41, 0x29, 0x69, 0xb5, 0x13, 0xb9, 0x44, 0xa6, 0xbd, + 0x3a, 0x49, 0xa3, 0x99, 0xc6, 0xad, 0xb1, 0x42, 0x7d, 0x02, 0xb6, 0xb8, 0x40, 0xa1, 0xa0, 0x7e, + 0xfb, 0x7f, 0x82, 0x1c, 0x97, 0xfa, 0xa4, 0x49, 0x30, 0xf3, 0x1d, 0x2e, 0x7b, 0xb4, 0x62, 0xfe, + 0x14, 0x0f, 0x6a, 0x5b, 0xcd, 0xc5, 0x12, 0x6b, 0x99, 0x57, 0x3d, 0x01, 0x1b, 0x98, 0xf9, 0x38, + 0x0a, 0x43, 0xe2, 0xe3, 0xfe, 0x11, 0x73, 0x29, 0xee, 0xcb, 0x46, 0xad, 0x9a, 0x30, 0xcd, 0x66, + 0x63, 0x6f, 0x56, 0xf0, 0x79, 0x51, 0xd0, 0x9a, 0x07, 0xa9, 0xbf, 0x80, 0x22, 0x8f, 0x78, 0x40, + 0x7c, 0xa7, 0x92, 0xab, 0x2b, 0x8d, 0x92, 0x59, 0x8e, 0x07, 0xb5, 0x62, 0x73, 0x18, 0xb2, 0x46, + 0xff, 0x54, 0x04, 0xca, 0xa7, 0xcc, 0x3e, 0x26, 0x5e, 0xe0, 0x22, 0x41, 0x2a, 0x79, 0xd9, 0xc3, + 0xdf, 0xae, 0x29, 0xf4, 0xc1, 0x44, 0x2d, 0xe7, 0xee, 0xfb, 0x34, 0xd5, 0xf2, 0xd4, 0x0f, 0x6b, + 0x9a, 0xa9, 0x3e, 0x07, 0x55, 0x1e, 0x61, 0x4c, 0x38, 0x6f, 0x45, 0xee, 0x01, 0xb3, 0xf9, 0x3e, + 0xe5, 0x82, 0x85, 0xfd, 0x43, 0xea, 0x51, 0x51, 0x29, 0xd4, 0x95, 0x46, 0xde, 0xd4, 0xe2, 0x41, + 0xad, 0xda, 0x5c, 0xaa, 0xb2, 0xae, 0x21, 0xa8, 0x16, 0xd8, 0x6c, 0x21, 0xea, 0x12, 0x67, 0x8e, + 0x5d, 0x94, 0xec, 0x6a, 0x3c, 0xa8, 0x6d, 0xde, 0x5b, 0xa8, 0xb0, 0x96, 0x38, 0xf5, 0x0f, 0x0a, + 0x58, 0xbb, 0x72, 0x23, 0xd4, 0x87, 0xa0, 0x80, 0xb0, 0xa0, 0xbd, 0x64, 0x60, 0x92, 0x61, 0xdc, + 0x9e, 0xae, 0x51, 0xf2, 0xae, 0x4d, 0xee, 0xb8, 0x45, 0x5a, 0x24, 0x69, 0x05, 0x99, 0x5c, 0xa3, + 0xff, 0xa4, 0xd5, 0x4a, 0x11, 0xaa, 0x0b, 0xd6, 0x5d, 0xc4, 0xc5, 0x68, 0xd6, 0x8e, 0xa9, 0x47, + 0x64, 0x97, 0xae, 0x96, 0xfe, 0x9a, 0xeb, 0x93, 0x38, 0xcc, 0x1f, 0xe2, 0x41, 0x6d, 0xfd, 0x70, + 0x86, 0x63, 0xcd, 0x91, 0xf5, 0xf7, 0x0a, 0x98, 0xee, 0xce, 0x1d, 0x3c, 0x61, 0x4f, 0x41, 0x49, + 0x8c, 0x46, 0x2a, 0xfb, 0xd5, 0x23, 0x35, 0xbe, 0x8b, 0xe3, 0x79, 0x1a, 0xd3, 0xf4, 0xb7, 0x0a, + 0xf8, 0x6e, 0x46, 0x7f, 0x07, 0xe7, 0xf9, 0xf7, 0xca, 0x93, 0xfc, 0xf3, 0x82, 0xb3, 0xc8, 0x53, + 0x2c, 0x7b, 0x88, 0x4d, 0x78, 0x76, 0xa9, 0x65, 0xce, 0x2f, 0xb5, 0xcc, 0xc5, 0xa5, 0x96, 0x79, + 0x19, 0x6b, 0xca, 0x59, 0xac, 0x29, 0xe7, 0xb1, 0xa6, 0x5c, 0xc4, 0x9a, 0xf2, 0x31, 0xd6, 0x94, + 0xd7, 0x9f, 0xb4, 0xcc, 0xb3, 0xd2, 0xa8, 0x22, 0x5f, 0x02, 0x00, 0x00, 0xff, 0xff, 0x20, 0x1c, + 0xcf, 0x94, 0xe7, 0x07, 0x00, 0x00, +} + func (m *CronJob) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -100,41 +290,52 @@ func (m *CronJob) Marshal() (dAtA []byte, err error) { } func (m *CronJob) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CronJob) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n2 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n3, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n3 - return i, nil + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *CronJobList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -142,37 +343,46 @@ func (m *CronJobList) Marshal() (dAtA []byte, err error) { } func (m *CronJobList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CronJobList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n4, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *CronJobSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -180,58 +390,67 @@ func (m *CronJobSpec) Marshal() (dAtA []byte, err error) { } func (m *CronJobSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CronJobSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Schedule))) - i += copy(dAtA[i:], m.Schedule) - if m.StartingDeadlineSeconds != nil { - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.StartingDeadlineSeconds)) + if m.FailedJobsHistoryLimit != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.FailedJobsHistoryLimit)) + i-- + dAtA[i] = 0x38 } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ConcurrencyPolicy))) - i += copy(dAtA[i:], m.ConcurrencyPolicy) + if m.SuccessfulJobsHistoryLimit != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.SuccessfulJobsHistoryLimit)) + i-- + dAtA[i] = 0x30 + } + { + size, err := m.JobTemplate.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a if m.Suspend != nil { - dAtA[i] = 0x20 - i++ + i-- if *m.Suspend { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - } - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.JobTemplate.Size())) - n5, err := m.JobTemplate.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 - if m.SuccessfulJobsHistoryLimit != nil { - dAtA[i] = 0x30 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.SuccessfulJobsHistoryLimit)) + i-- + dAtA[i] = 0x20 } - if m.FailedJobsHistoryLimit != nil { - dAtA[i] = 0x38 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.FailedJobsHistoryLimit)) + i -= len(m.ConcurrencyPolicy) + copy(dAtA[i:], m.ConcurrencyPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ConcurrencyPolicy))) + i-- + dAtA[i] = 0x1a + if m.StartingDeadlineSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.StartingDeadlineSeconds)) + i-- + dAtA[i] = 0x10 } - return i, nil + i -= len(m.Schedule) + copy(dAtA[i:], m.Schedule) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Schedule))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *CronJobStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -239,39 +458,48 @@ func (m *CronJobStatus) Marshal() (dAtA []byte, err error) { } func (m *CronJobStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CronJobStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Active) > 0 { - for _, msg := range m.Active { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.LastScheduleTime != nil { + { + size, err := m.LastScheduleTime.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - } - if m.LastScheduleTime != nil { + i-- dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastScheduleTime.Size())) - n6, err := m.LastScheduleTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + } + if len(m.Active) > 0 { + for iNdEx := len(m.Active) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Active[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } - i += n6 } - return i, nil + return len(dAtA) - i, nil } func (m *JobTemplate) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -279,33 +507,42 @@ func (m *JobTemplate) Marshal() (dAtA []byte, err error) { } func (m *JobTemplate) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *JobTemplate) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n7, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n7 + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n8, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n8 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *JobTemplateSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -313,57 +550,53 @@ func (m *JobTemplateSpec) Marshal() (dAtA []byte, err error) { } func (m *JobTemplateSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *JobTemplateSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n9, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n9 + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n10, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n10 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *CronJob) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -376,6 +609,9 @@ func (m *CronJob) Size() (n int) { } func (m *CronJobList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -390,6 +626,9 @@ func (m *CronJobList) Size() (n int) { } func (m *CronJobSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Schedule) @@ -414,6 +653,9 @@ func (m *CronJobSpec) Size() (n int) { } func (m *CronJobStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Active) > 0 { @@ -430,6 +672,9 @@ func (m *CronJobStatus) Size() (n int) { } func (m *JobTemplate) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -440,6 +685,9 @@ func (m *JobTemplate) Size() (n int) { } func (m *JobTemplateSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -450,14 +698,7 @@ func (m *JobTemplateSpec) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -467,7 +708,7 @@ func (this *CronJob) String() string { return "nil" } s := strings.Join([]string{`&CronJob{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "CronJobSpec", "CronJobSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "CronJobStatus", "CronJobStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -478,9 +719,14 @@ func (this *CronJobList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]CronJob{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "CronJob", "CronJob", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&CronJobList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "CronJob", "CronJob", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -505,9 +751,14 @@ func (this *CronJobStatus) String() string { if this == nil { return "nil" } + repeatedStringForActive := "[]ObjectReference{" + for _, f := range this.Active { + repeatedStringForActive += fmt.Sprintf("%v", f) + "," + } + repeatedStringForActive += "}" s := strings.Join([]string{`&CronJobStatus{`, - `Active:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Active), "ObjectReference", "k8s_io_api_core_v1.ObjectReference", 1), `&`, ``, 1) + `,`, - `LastScheduleTime:` + strings.Replace(fmt.Sprintf("%v", this.LastScheduleTime), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, + `Active:` + repeatedStringForActive + `,`, + `LastScheduleTime:` + strings.Replace(fmt.Sprintf("%v", this.LastScheduleTime), "Time", "v1.Time", 1) + `,`, `}`, }, "") return s @@ -517,7 +768,7 @@ func (this *JobTemplate) String() string { return "nil" } s := strings.Join([]string{`&JobTemplate{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Template:` + strings.Replace(strings.Replace(this.Template.String(), "JobTemplateSpec", "JobTemplateSpec", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -528,8 +779,8 @@ func (this *JobTemplateSpec) String() string { return "nil" } s := strings.Join([]string{`&JobTemplateSpec{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "JobSpec", "k8s_io_api_batch_v1.JobSpec", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Spec), "JobSpec", "v12.JobSpec", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -557,7 +808,7 @@ func (m *CronJob) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -585,7 +836,7 @@ func (m *CronJob) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -594,6 +845,9 @@ func (m *CronJob) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -615,7 +869,7 @@ func (m *CronJob) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -624,6 +878,9 @@ func (m *CronJob) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -645,7 +902,7 @@ func (m *CronJob) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -654,6 +911,9 @@ func (m *CronJob) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -670,6 +930,9 @@ func (m *CronJob) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -697,7 +960,7 @@ func (m *CronJobList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -725,7 +988,7 @@ func (m *CronJobList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -734,6 +997,9 @@ func (m *CronJobList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -755,7 +1021,7 @@ func (m *CronJobList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -764,6 +1030,9 @@ func (m *CronJobList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -781,6 +1050,9 @@ func (m *CronJobList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -808,7 +1080,7 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -836,7 +1108,7 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -846,6 +1118,9 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -865,7 +1140,7 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -885,7 +1160,7 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -895,6 +1170,9 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -914,7 +1192,7 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -935,7 +1213,7 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -944,6 +1222,9 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -965,7 +1246,7 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -985,7 +1266,7 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -1000,6 +1281,9 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1027,7 +1311,7 @@ func (m *CronJobStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1055,7 +1339,7 @@ func (m *CronJobStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1064,10 +1348,13 @@ func (m *CronJobStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Active = append(m.Active, k8s_io_api_core_v1.ObjectReference{}) + m.Active = append(m.Active, v11.ObjectReference{}) if err := m.Active[len(m.Active)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -1086,7 +1373,7 @@ func (m *CronJobStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1095,11 +1382,14 @@ func (m *CronJobStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.LastScheduleTime == nil { - m.LastScheduleTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} + m.LastScheduleTime = &v1.Time{} } if err := m.LastScheduleTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1114,6 +1404,9 @@ func (m *CronJobStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1141,7 +1434,7 @@ func (m *JobTemplate) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1169,7 +1462,7 @@ func (m *JobTemplate) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1178,6 +1471,9 @@ func (m *JobTemplate) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1199,7 +1495,7 @@ func (m *JobTemplate) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1208,6 +1504,9 @@ func (m *JobTemplate) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1224,6 +1523,9 @@ func (m *JobTemplate) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1251,7 +1553,7 @@ func (m *JobTemplateSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1279,7 +1581,7 @@ func (m *JobTemplateSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1288,6 +1590,9 @@ func (m *JobTemplateSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1309,7 +1614,7 @@ func (m *JobTemplateSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1318,6 +1623,9 @@ func (m *JobTemplateSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1334,6 +1642,9 @@ func (m *JobTemplateSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1400,10 +1711,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -1432,6 +1746,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -1450,60 +1767,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/batch/v2alpha1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 774 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x94, 0x4d, 0x6f, 0xdb, 0x36, - 0x18, 0xc7, 0x2d, 0xc7, 0x6f, 0xa1, 0x97, 0x2d, 0xd1, 0x86, 0xc4, 0xf3, 0x06, 0xd9, 0x50, 0xb0, - 0xc1, 0x18, 0x36, 0x6a, 0x09, 0x86, 0x61, 0xa7, 0x01, 0x53, 0x86, 0x36, 0x4d, 0x53, 0x34, 0x90, - 0x53, 0xa0, 0x28, 0x82, 0xa2, 0x14, 0x45, 0xdb, 0x8c, 0x25, 0x51, 0x10, 0x29, 0x03, 0xbe, 0xf5, - 0xd6, 0x6b, 0x3f, 0x49, 0x2f, 0xed, 0x87, 0x48, 0x7b, 0xca, 0x31, 0x27, 0xa3, 0x51, 0xbf, 0x45, - 0x4f, 0x85, 0x68, 0xf9, 0x25, 0x7e, 0x49, 0xd2, 0x4b, 0x6e, 0xe2, 0xa3, 0xff, 0xff, 0xc7, 0x87, - 0xcf, 0xf3, 0x90, 0xc0, 0xec, 0xfe, 0xc3, 0x21, 0x65, 0x46, 0x37, 0xb2, 0x49, 0xe8, 0x13, 0x41, - 0xb8, 0xd1, 0x23, 0xbe, 0xc3, 0x42, 0x23, 0xfd, 0x81, 0x02, 0x6a, 0xd8, 0x48, 0xe0, 0x8e, 0xd1, - 0xdb, 0x45, 0x6e, 0xd0, 0x41, 0x3b, 0x46, 0x9b, 0xf8, 0x24, 0x44, 0x82, 0x38, 0x30, 0x08, 0x99, - 0x60, 0xea, 0x8f, 0x43, 0x29, 0x44, 0x01, 0x85, 0x52, 0x0a, 0x47, 0xd2, 0xea, 0x1f, 0x6d, 0x2a, - 0x3a, 0x91, 0x0d, 0x31, 0xf3, 0x8c, 0x36, 0x6b, 0x33, 0x43, 0x3a, 0xec, 0xa8, 0x25, 0x57, 0x72, - 0x21, 0xbf, 0x86, 0xa4, 0xea, 0xf6, 0xfc, 0xa6, 0x73, 0xdb, 0x55, 0xf5, 0x29, 0x11, 0x66, 0x21, - 0x59, 0xa4, 0xf9, 0x6b, 0xa2, 0xf1, 0x10, 0xee, 0x50, 0x9f, 0x84, 0x7d, 0x23, 0xe8, 0xb6, 0x93, - 0x00, 0x37, 0x3c, 0x22, 0xd0, 0x22, 0x97, 0xb1, 0xcc, 0x15, 0x46, 0xbe, 0xa0, 0x1e, 0x99, 0x33, - 0xfc, 0x7d, 0x93, 0x81, 0xe3, 0x0e, 0xf1, 0xd0, 0xac, 0x4f, 0x7f, 0x95, 0x05, 0xc5, 0xbd, 0x90, - 0xf9, 0x07, 0xcc, 0x56, 0x5f, 0x80, 0x52, 0x92, 0x8f, 0x83, 0x04, 0xaa, 0x28, 0x75, 0xa5, 0x51, - 0xde, 0xfd, 0x13, 0x4e, 0x0a, 0x3a, 0xc6, 0xc2, 0xa0, 0xdb, 0x4e, 0x02, 0x1c, 0x26, 0x6a, 0xd8, - 0xdb, 0x81, 0x8f, 0xed, 0x53, 0x82, 0xc5, 0x23, 0x22, 0x90, 0xa9, 0x9e, 0x0d, 0x6a, 0x99, 0x78, - 0x50, 0x03, 0x93, 0x98, 0x35, 0xa6, 0xaa, 0xfb, 0x20, 0xc7, 0x03, 0x82, 0x2b, 0x59, 0x49, 0xff, - 0x15, 0x2e, 0x6d, 0x17, 0x4c, 0x73, 0x6a, 0x06, 0x04, 0x9b, 0xdf, 0xa4, 0xcc, 0x5c, 0xb2, 0xb2, - 0x24, 0x41, 0x3d, 0x02, 0x05, 0x2e, 0x90, 0x88, 0x78, 0x65, 0x45, 0xb2, 0x1a, 0xb7, 0x60, 0x49, - 0xbd, 0xf9, 0x6d, 0x4a, 0x2b, 0x0c, 0xd7, 0x56, 0xca, 0xd1, 0xdf, 0x29, 0xa0, 0x9c, 0x2a, 0x0f, - 0x29, 0x17, 0xea, 0xc9, 0x5c, 0x35, 0xe0, 0xed, 0xaa, 0x91, 0xb8, 0x65, 0x2d, 0xd6, 0xd3, 0x9d, - 0x4a, 0xa3, 0xc8, 0x54, 0x25, 0xee, 0x83, 0x3c, 0x15, 0xc4, 0xe3, 0x95, 0x6c, 0x7d, 0xa5, 0x51, - 0xde, 0xd5, 0x6f, 0x4e, 0xdf, 0x5c, 0x4b, 0x71, 0xf9, 0x07, 0x89, 0xd1, 0x1a, 0xfa, 0xf5, 0x37, - 0xb9, 0x71, 0xda, 0x49, 0x79, 0xd4, 0xdf, 0x41, 0x29, 0x69, 0xb5, 0x13, 0xb9, 0x44, 0xa6, 0xbd, - 0x3a, 0x49, 0xa3, 0x99, 0xc6, 0xad, 0xb1, 0x42, 0x7d, 0x02, 0xb6, 0xb8, 0x40, 0xa1, 0xa0, 0x7e, - 0xfb, 0x7f, 0x82, 0x1c, 0x97, 0xfa, 0xa4, 0x49, 0x30, 0xf3, 0x1d, 0x2e, 0x7b, 0xb4, 0x62, 0xfe, - 0x14, 0x0f, 0x6a, 0x5b, 0xcd, 0xc5, 0x12, 0x6b, 0x99, 0x57, 0x3d, 0x01, 0x1b, 0x98, 0xf9, 0x38, - 0x0a, 0x43, 0xe2, 0xe3, 0xfe, 0x11, 0x73, 0x29, 0xee, 0xcb, 0x46, 0xad, 0x9a, 0x30, 0xcd, 0x66, - 0x63, 0x6f, 0x56, 0xf0, 0x79, 0x51, 0xd0, 0x9a, 0x07, 0xa9, 0xbf, 0x80, 0x22, 0x8f, 0x78, 0x40, - 0x7c, 0xa7, 0x92, 0xab, 0x2b, 0x8d, 0x92, 0x59, 0x8e, 0x07, 0xb5, 0x62, 0x73, 0x18, 0xb2, 0x46, - 0xff, 0x54, 0x04, 0xca, 0xa7, 0xcc, 0x3e, 0x26, 0x5e, 0xe0, 0x22, 0x41, 0x2a, 0x79, 0xd9, 0xc3, - 0xdf, 0xae, 0x29, 0xf4, 0xc1, 0x44, 0x2d, 0xe7, 0xee, 0xfb, 0x34, 0xd5, 0xf2, 0xd4, 0x0f, 0x6b, - 0x9a, 0xa9, 0x3e, 0x07, 0x55, 0x1e, 0x61, 0x4c, 0x38, 0x6f, 0x45, 0xee, 0x01, 0xb3, 0xf9, 0x3e, - 0xe5, 0x82, 0x85, 0xfd, 0x43, 0xea, 0x51, 0x51, 0x29, 0xd4, 0x95, 0x46, 0xde, 0xd4, 0xe2, 0x41, - 0xad, 0xda, 0x5c, 0xaa, 0xb2, 0xae, 0x21, 0xa8, 0x16, 0xd8, 0x6c, 0x21, 0xea, 0x12, 0x67, 0x8e, - 0x5d, 0x94, 0xec, 0x6a, 0x3c, 0xa8, 0x6d, 0xde, 0x5b, 0xa8, 0xb0, 0x96, 0x38, 0xf5, 0x0f, 0x0a, - 0x58, 0xbb, 0x72, 0x23, 0xd4, 0x87, 0xa0, 0x80, 0xb0, 0xa0, 0xbd, 0x64, 0x60, 0x92, 0x61, 0xdc, - 0x9e, 0xae, 0x51, 0xf2, 0xae, 0x4d, 0xee, 0xb8, 0x45, 0x5a, 0x24, 0x69, 0x05, 0x99, 0x5c, 0xa3, - 0xff, 0xa4, 0xd5, 0x4a, 0x11, 0xaa, 0x0b, 0xd6, 0x5d, 0xc4, 0xc5, 0x68, 0xd6, 0x8e, 0xa9, 0x47, - 0x64, 0x97, 0xae, 0x96, 0xfe, 0x9a, 0xeb, 0x93, 0x38, 0xcc, 0x1f, 0xe2, 0x41, 0x6d, 0xfd, 0x70, - 0x86, 0x63, 0xcd, 0x91, 0xf5, 0xf7, 0x0a, 0x98, 0xee, 0xce, 0x1d, 0x3c, 0x61, 0x4f, 0x41, 0x49, - 0x8c, 0x46, 0x2a, 0xfb, 0xd5, 0x23, 0x35, 0xbe, 0x8b, 0xe3, 0x79, 0x1a, 0xd3, 0xf4, 0xb7, 0x0a, - 0xf8, 0x6e, 0x46, 0x7f, 0x07, 0xe7, 0xf9, 0xf7, 0xca, 0x93, 0xfc, 0xf3, 0x82, 0xb3, 0xc8, 0x53, - 0x2c, 0x7b, 0x88, 0x4d, 0x78, 0x76, 0xa9, 0x65, 0xce, 0x2f, 0xb5, 0xcc, 0xc5, 0xa5, 0x96, 0x79, - 0x19, 0x6b, 0xca, 0x59, 0xac, 0x29, 0xe7, 0xb1, 0xa6, 0x5c, 0xc4, 0x9a, 0xf2, 0x31, 0xd6, 0x94, - 0xd7, 0x9f, 0xb4, 0xcc, 0xb3, 0xd2, 0xa8, 0x22, 0x5f, 0x02, 0x00, 0x00, 0xff, 0xff, 0x20, 0x1c, - 0xcf, 0x94, 0xe7, 0x07, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/batch/v2alpha1/generated.proto b/vendor/k8s.io/api/batch/v2alpha1/generated.proto index 4321c336..0bba13b8 100644 --- a/vendor/k8s.io/api/batch/v2alpha1/generated.proto +++ b/vendor/k8s.io/api/batch/v2alpha1/generated.proto @@ -33,17 +33,17 @@ option go_package = "v2alpha1"; // CronJob represents the configuration of a single cron job. message CronJob { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of a cron job, including the schedule. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional CronJobSpec spec = 2; // Current status of a cron job. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional CronJobStatus status = 3; } @@ -51,7 +51,7 @@ message CronJob { // CronJobList is a collection of cron jobs. message CronJobList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -110,12 +110,12 @@ message CronJobStatus { // JobTemplate describes a template for creating copies of a predefined pod. message JobTemplate { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Defines jobs that will be created from this template. - // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional JobTemplateSpec template = 2; } @@ -123,12 +123,12 @@ message JobTemplate { // JobTemplateSpec describes the data a Job should have when created from a template message JobTemplateSpec { // Standard object's metadata of the jobs created from this template. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of the job. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional k8s.io.api.batch.v1.JobSpec spec = 2; } diff --git a/vendor/k8s.io/api/batch/v2alpha1/types.go b/vendor/k8s.io/api/batch/v2alpha1/types.go index cccff94f..465e614a 100644 --- a/vendor/k8s.io/api/batch/v2alpha1/types.go +++ b/vendor/k8s.io/api/batch/v2alpha1/types.go @@ -28,12 +28,12 @@ import ( type JobTemplate struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Defines jobs that will be created from this template. - // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Template JobTemplateSpec `json:"template,omitempty" protobuf:"bytes,2,opt,name=template"` } @@ -41,12 +41,12 @@ type JobTemplate struct { // JobTemplateSpec describes the data a Job should have when created from a template type JobTemplateSpec struct { // Standard object's metadata of the jobs created from this template. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Specification of the desired behavior of the job. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec batchv1.JobSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` } @@ -58,17 +58,17 @@ type JobTemplateSpec struct { type CronJob struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Specification of the desired behavior of a cron job, including the schedule. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec CronJobSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` // Current status of a cron job. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Status CronJobStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -80,7 +80,7 @@ type CronJobList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` diff --git a/vendor/k8s.io/api/batch/v2alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/batch/v2alpha1/types_swagger_doc_generated.go index f448a92c..bc80eca4 100644 --- a/vendor/k8s.io/api/batch/v2alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/batch/v2alpha1/types_swagger_doc_generated.go @@ -29,9 +29,9 @@ package v2alpha1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_CronJob = map[string]string{ "": "CronJob represents the configuration of a single cron job.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Specification of the desired behavior of a cron job, including the schedule. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - "status": "Current status of a cron job. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Specification of the desired behavior of a cron job, including the schedule. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "Current status of a cron job. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (CronJob) SwaggerDoc() map[string]string { @@ -40,7 +40,7 @@ func (CronJob) SwaggerDoc() map[string]string { var map_CronJobList = map[string]string{ "": "CronJobList is a collection of cron jobs.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "items is the list of CronJobs.", } @@ -75,8 +75,8 @@ func (CronJobStatus) SwaggerDoc() map[string]string { var map_JobTemplate = map[string]string{ "": "JobTemplate describes a template for creating copies of a predefined pod.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "template": "Defines jobs that will be created from this template. https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "template": "Defines jobs that will be created from this template. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (JobTemplate) SwaggerDoc() map[string]string { @@ -85,8 +85,8 @@ func (JobTemplate) SwaggerDoc() map[string]string { var map_JobTemplateSpec = map[string]string{ "": "JobTemplateSpec describes the data a Job should have when created from a template", - "metadata": "Standard object's metadata of the jobs created from this template. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Specification of the desired behavior of the job. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata of the jobs created from this template. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Specification of the desired behavior of the job. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (JobTemplateSpec) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/batch/v2alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/batch/v2alpha1/zz_generated.deepcopy.go index 20d87e7e..1b03f674 100644 --- a/vendor/k8s.io/api/batch/v2alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/batch/v2alpha1/zz_generated.deepcopy.go @@ -57,7 +57,7 @@ func (in *CronJob) DeepCopyObject() runtime.Object { func (in *CronJobList) DeepCopyInto(out *CronJobList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]CronJob, len(*in)) diff --git a/vendor/k8s.io/api/certificates/v1beta1/doc.go b/vendor/k8s.io/api/certificates/v1beta1/doc.go index fb23aadb..9055248b 100644 --- a/vendor/k8s.io/api/certificates/v1beta1/doc.go +++ b/vendor/k8s.io/api/certificates/v1beta1/doc.go @@ -15,7 +15,9 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +k8s:openapi-gen=true // +groupName=certificates.k8s.io + package v1beta1 // import "k8s.io/api/certificates/v1beta1" diff --git a/vendor/k8s.io/api/certificates/v1beta1/generated.pb.go b/vendor/k8s.io/api/certificates/v1beta1/generated.pb.go index eda15990..2e61b568 100644 --- a/vendor/k8s.io/api/certificates/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/certificates/v1beta1/generated.pb.go @@ -14,36 +14,24 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/certificates/v1beta1/generated.proto -// DO NOT EDIT! -/* - Package v1beta1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/certificates/v1beta1/generated.proto - - It has these top-level messages: - CertificateSigningRequest - CertificateSigningRequestCondition - CertificateSigningRequestList - CertificateSigningRequestSpec - CertificateSigningRequestStatus - ExtraValue -*/ package v1beta1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + io "io" -import strings "strings" -import reflect "reflect" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" -import io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -59,49 +47,244 @@ const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package func (m *CertificateSigningRequest) Reset() { *m = CertificateSigningRequest{} } func (*CertificateSigningRequest) ProtoMessage() {} func (*CertificateSigningRequest) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{0} + return fileDescriptor_09d156762b8218ef, []int{0} +} +func (m *CertificateSigningRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CertificateSigningRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CertificateSigningRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CertificateSigningRequest.Merge(m, src) +} +func (m *CertificateSigningRequest) XXX_Size() int { + return m.Size() +} +func (m *CertificateSigningRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CertificateSigningRequest.DiscardUnknown(m) } +var xxx_messageInfo_CertificateSigningRequest proto.InternalMessageInfo + func (m *CertificateSigningRequestCondition) Reset() { *m = CertificateSigningRequestCondition{} } func (*CertificateSigningRequestCondition) ProtoMessage() {} func (*CertificateSigningRequestCondition) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{1} + return fileDescriptor_09d156762b8218ef, []int{1} +} +func (m *CertificateSigningRequestCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CertificateSigningRequestCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CertificateSigningRequestCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_CertificateSigningRequestCondition.Merge(m, src) } +func (m *CertificateSigningRequestCondition) XXX_Size() int { + return m.Size() +} +func (m *CertificateSigningRequestCondition) XXX_DiscardUnknown() { + xxx_messageInfo_CertificateSigningRequestCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_CertificateSigningRequestCondition proto.InternalMessageInfo func (m *CertificateSigningRequestList) Reset() { *m = CertificateSigningRequestList{} } func (*CertificateSigningRequestList) ProtoMessage() {} func (*CertificateSigningRequestList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{2} + return fileDescriptor_09d156762b8218ef, []int{2} +} +func (m *CertificateSigningRequestList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CertificateSigningRequestList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CertificateSigningRequestList) XXX_Merge(src proto.Message) { + xxx_messageInfo_CertificateSigningRequestList.Merge(m, src) +} +func (m *CertificateSigningRequestList) XXX_Size() int { + return m.Size() } +func (m *CertificateSigningRequestList) XXX_DiscardUnknown() { + xxx_messageInfo_CertificateSigningRequestList.DiscardUnknown(m) +} + +var xxx_messageInfo_CertificateSigningRequestList proto.InternalMessageInfo func (m *CertificateSigningRequestSpec) Reset() { *m = CertificateSigningRequestSpec{} } func (*CertificateSigningRequestSpec) ProtoMessage() {} func (*CertificateSigningRequestSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{3} + return fileDescriptor_09d156762b8218ef, []int{3} +} +func (m *CertificateSigningRequestSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CertificateSigningRequestSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CertificateSigningRequestSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_CertificateSigningRequestSpec.Merge(m, src) +} +func (m *CertificateSigningRequestSpec) XXX_Size() int { + return m.Size() +} +func (m *CertificateSigningRequestSpec) XXX_DiscardUnknown() { + xxx_messageInfo_CertificateSigningRequestSpec.DiscardUnknown(m) } +var xxx_messageInfo_CertificateSigningRequestSpec proto.InternalMessageInfo + func (m *CertificateSigningRequestStatus) Reset() { *m = CertificateSigningRequestStatus{} } func (*CertificateSigningRequestStatus) ProtoMessage() {} func (*CertificateSigningRequestStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{4} + return fileDescriptor_09d156762b8218ef, []int{4} +} +func (m *CertificateSigningRequestStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CertificateSigningRequestStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CertificateSigningRequestStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_CertificateSigningRequestStatus.Merge(m, src) +} +func (m *CertificateSigningRequestStatus) XXX_Size() int { + return m.Size() +} +func (m *CertificateSigningRequestStatus) XXX_DiscardUnknown() { + xxx_messageInfo_CertificateSigningRequestStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_CertificateSigningRequestStatus proto.InternalMessageInfo + +func (m *ExtraValue) Reset() { *m = ExtraValue{} } +func (*ExtraValue) ProtoMessage() {} +func (*ExtraValue) Descriptor() ([]byte, []int) { + return fileDescriptor_09d156762b8218ef, []int{5} +} +func (m *ExtraValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExtraValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExtraValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExtraValue.Merge(m, src) +} +func (m *ExtraValue) XXX_Size() int { + return m.Size() +} +func (m *ExtraValue) XXX_DiscardUnknown() { + xxx_messageInfo_ExtraValue.DiscardUnknown(m) } -func (m *ExtraValue) Reset() { *m = ExtraValue{} } -func (*ExtraValue) ProtoMessage() {} -func (*ExtraValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +var xxx_messageInfo_ExtraValue proto.InternalMessageInfo func init() { proto.RegisterType((*CertificateSigningRequest)(nil), "k8s.io.api.certificates.v1beta1.CertificateSigningRequest") proto.RegisterType((*CertificateSigningRequestCondition)(nil), "k8s.io.api.certificates.v1beta1.CertificateSigningRequestCondition") proto.RegisterType((*CertificateSigningRequestList)(nil), "k8s.io.api.certificates.v1beta1.CertificateSigningRequestList") proto.RegisterType((*CertificateSigningRequestSpec)(nil), "k8s.io.api.certificates.v1beta1.CertificateSigningRequestSpec") + proto.RegisterMapType((map[string]ExtraValue)(nil), "k8s.io.api.certificates.v1beta1.CertificateSigningRequestSpec.ExtraEntry") proto.RegisterType((*CertificateSigningRequestStatus)(nil), "k8s.io.api.certificates.v1beta1.CertificateSigningRequestStatus") proto.RegisterType((*ExtraValue)(nil), "k8s.io.api.certificates.v1beta1.ExtraValue") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/certificates/v1beta1/generated.proto", fileDescriptor_09d156762b8218ef) +} + +var fileDescriptor_09d156762b8218ef = []byte{ + // 805 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x54, 0x4b, 0x8f, 0x1b, 0x45, + 0x10, 0xf6, 0xf8, 0xb5, 0x76, 0x7b, 0xd9, 0x44, 0x2d, 0x14, 0x0d, 0x2b, 0x65, 0x66, 0x35, 0x02, + 0xb4, 0x3c, 0xd2, 0xc3, 0x46, 0x08, 0x56, 0x7b, 0x40, 0x30, 0x4b, 0x04, 0x2b, 0x12, 0x21, 0x75, + 0x62, 0x0e, 0x08, 0x89, 0xb4, 0xc7, 0x95, 0x71, 0xc7, 0x99, 0x07, 0xd3, 0x3d, 0x06, 0xdf, 0xf2, + 0x13, 0x38, 0x72, 0x41, 0xe2, 0x97, 0x70, 0x5e, 0x0e, 0x48, 0x39, 0xe6, 0x80, 0x2c, 0xd6, 0xfc, + 0x8b, 0x9c, 0x50, 0xf7, 0xb4, 0x3d, 0xc6, 0x2b, 0xe3, 0x28, 0x7b, 0x9b, 0xfa, 0xaa, 0xbe, 0xaf, + 0x1e, 0x5d, 0x35, 0xe8, 0xcb, 0xf1, 0xb1, 0x20, 0x3c, 0xf5, 0xc7, 0xc5, 0x00, 0xf2, 0x04, 0x24, + 0x08, 0x7f, 0x02, 0xc9, 0x30, 0xcd, 0x7d, 0xe3, 0x60, 0x19, 0xf7, 0x43, 0xc8, 0x25, 0x7f, 0xc4, + 0x43, 0xa6, 0xdd, 0x47, 0x03, 0x90, 0xec, 0xc8, 0x8f, 0x20, 0x81, 0x9c, 0x49, 0x18, 0x92, 0x2c, + 0x4f, 0x65, 0x8a, 0xdd, 0x92, 0x40, 0x58, 0xc6, 0xc9, 0x2a, 0x81, 0x18, 0xc2, 0xfe, 0xad, 0x88, + 0xcb, 0x51, 0x31, 0x20, 0x61, 0x1a, 0xfb, 0x51, 0x1a, 0xa5, 0xbe, 0xe6, 0x0d, 0x8a, 0x47, 0xda, + 0xd2, 0x86, 0xfe, 0x2a, 0xf5, 0xf6, 0x3f, 0xac, 0x0a, 0x88, 0x59, 0x38, 0xe2, 0x09, 0xe4, 0x53, + 0x3f, 0x1b, 0x47, 0x0a, 0x10, 0x7e, 0x0c, 0x92, 0xf9, 0x93, 0x4b, 0x55, 0xec, 0xfb, 0x9b, 0x58, + 0x79, 0x91, 0x48, 0x1e, 0xc3, 0x25, 0xc2, 0x47, 0xdb, 0x08, 0x22, 0x1c, 0x41, 0xcc, 0xd6, 0x79, + 0xde, 0x1f, 0x75, 0xf4, 0xc6, 0x69, 0xd5, 0xe6, 0x7d, 0x1e, 0x25, 0x3c, 0x89, 0x28, 0xfc, 0x50, + 0x80, 0x90, 0xf8, 0x21, 0xea, 0xa8, 0x0a, 0x87, 0x4c, 0x32, 0xdb, 0x3a, 0xb0, 0x0e, 0x7b, 0xb7, + 0x3f, 0x20, 0xd5, 0x7c, 0x96, 0x89, 0x48, 0x36, 0x8e, 0x14, 0x20, 0x88, 0x8a, 0x26, 0x93, 0x23, + 0xf2, 0xf5, 0xe0, 0x31, 0x84, 0xf2, 0x1e, 0x48, 0x16, 0xe0, 0xf3, 0x99, 0x5b, 0x9b, 0xcf, 0x5c, + 0x54, 0x61, 0x74, 0xa9, 0x8a, 0x1f, 0xa2, 0xa6, 0xc8, 0x20, 0xb4, 0xeb, 0x5a, 0xfd, 0x13, 0xb2, + 0x65, 0xfa, 0x64, 0x63, 0xad, 0xf7, 0x33, 0x08, 0x83, 0x5d, 0x93, 0xab, 0xa9, 0x2c, 0xaa, 0x95, + 0xf1, 0x08, 0xb5, 0x85, 0x64, 0xb2, 0x10, 0x76, 0x43, 0xe7, 0xf8, 0xf4, 0x0a, 0x39, 0xb4, 0x4e, + 0xb0, 0x67, 0xb2, 0xb4, 0x4b, 0x9b, 0x1a, 0x7d, 0xef, 0xd7, 0x3a, 0xf2, 0x36, 0x72, 0x4f, 0xd3, + 0x64, 0xc8, 0x25, 0x4f, 0x13, 0x7c, 0x8c, 0x9a, 0x72, 0x9a, 0x81, 0x1e, 0x68, 0x37, 0x78, 0x73, + 0x51, 0xf2, 0x83, 0x69, 0x06, 0x2f, 0x66, 0xee, 0xeb, 0xeb, 0xf1, 0x0a, 0xa7, 0x9a, 0x81, 0xdf, + 0x46, 0xed, 0x1c, 0x98, 0x48, 0x13, 0x3d, 0xae, 0x6e, 0x55, 0x08, 0xd5, 0x28, 0x35, 0x5e, 0xfc, + 0x0e, 0xda, 0x89, 0x41, 0x08, 0x16, 0x81, 0xee, 0xb9, 0x1b, 0x5c, 0x33, 0x81, 0x3b, 0xf7, 0x4a, + 0x98, 0x2e, 0xfc, 0xf8, 0x31, 0xda, 0x7b, 0xc2, 0x84, 0xec, 0x67, 0x43, 0x26, 0xe1, 0x01, 0x8f, + 0xc1, 0x6e, 0xea, 0x29, 0xbd, 0xfb, 0x72, 0xef, 0xac, 0x18, 0xc1, 0x0d, 0xa3, 0xbe, 0x77, 0xf7, + 0x3f, 0x4a, 0x74, 0x4d, 0xd9, 0x9b, 0x59, 0xe8, 0xe6, 0xc6, 0xf9, 0xdc, 0xe5, 0x42, 0xe2, 0xef, + 0x2e, 0xed, 0x1b, 0x79, 0xb9, 0x3a, 0x14, 0x5b, 0x6f, 0xdb, 0x75, 0x53, 0x4b, 0x67, 0x81, 0xac, + 0xec, 0xda, 0xf7, 0xa8, 0xc5, 0x25, 0xc4, 0xc2, 0xae, 0x1f, 0x34, 0x0e, 0x7b, 0xb7, 0x4f, 0x5e, + 0x7d, 0x11, 0x82, 0xd7, 0x4c, 0x9a, 0xd6, 0x99, 0x12, 0xa4, 0xa5, 0xae, 0xf7, 0x7b, 0xe3, 0x7f, + 0x1a, 0x54, 0x2b, 0x89, 0xdf, 0x42, 0x3b, 0x79, 0x69, 0xea, 0xfe, 0x76, 0x83, 0x9e, 0x7a, 0x15, + 0x13, 0x41, 0x17, 0x3e, 0x4c, 0x50, 0xbb, 0x50, 0xcf, 0x23, 0xec, 0xd6, 0x41, 0xe3, 0xb0, 0x1b, + 0xdc, 0x50, 0x8f, 0xdc, 0xd7, 0xc8, 0x8b, 0x99, 0xdb, 0xf9, 0x0a, 0xa6, 0xda, 0xa0, 0x26, 0x0a, + 0xbf, 0x8f, 0x3a, 0x85, 0x80, 0x3c, 0x61, 0x31, 0x98, 0xd5, 0x58, 0xce, 0xa1, 0x6f, 0x70, 0xba, + 0x8c, 0xc0, 0x37, 0x51, 0xa3, 0xe0, 0x43, 0xb3, 0x1a, 0x3d, 0x13, 0xd8, 0xe8, 0x9f, 0x7d, 0x4e, + 0x15, 0x8e, 0x3d, 0xd4, 0x8e, 0xf2, 0xb4, 0xc8, 0x84, 0xdd, 0xd4, 0xc9, 0x91, 0x4a, 0xfe, 0x85, + 0x46, 0xa8, 0xf1, 0xe0, 0x04, 0xb5, 0xe0, 0x27, 0x99, 0x33, 0xbb, 0xad, 0x47, 0x79, 0x76, 0xb5, + 0xbb, 0x25, 0x77, 0x94, 0xd6, 0x9d, 0x44, 0xe6, 0xd3, 0x6a, 0xb2, 0x1a, 0xa3, 0x65, 0x9a, 0x7d, + 0x40, 0xa8, 0x8a, 0xc1, 0xd7, 0x51, 0x63, 0x0c, 0xd3, 0xf2, 0x80, 0xa8, 0xfa, 0xc4, 0x9f, 0xa1, + 0xd6, 0x84, 0x3d, 0x29, 0xc0, 0xfc, 0x47, 0xde, 0xdb, 0x5a, 0x8f, 0x56, 0xfb, 0x46, 0x51, 0x68, + 0xc9, 0x3c, 0xa9, 0x1f, 0x5b, 0xde, 0x9f, 0x16, 0x72, 0xb7, 0x5c, 0x3f, 0xfe, 0x11, 0xa1, 0x70, + 0x71, 0x9b, 0xc2, 0xb6, 0x74, 0xff, 0xa7, 0xaf, 0xde, 0xff, 0xf2, 0xce, 0xab, 0x1f, 0xe5, 0x12, + 0x12, 0x74, 0x25, 0x15, 0x3e, 0x42, 0xbd, 0x15, 0x69, 0xdd, 0xe9, 0x6e, 0x70, 0x6d, 0x3e, 0x73, + 0x7b, 0x2b, 0xe2, 0x74, 0x35, 0xc6, 0xfb, 0xd8, 0x8c, 0x4d, 0x37, 0x8a, 0xdd, 0xc5, 0xfe, 0x5b, + 0xfa, 0x5d, 0xbb, 0xeb, 0xfb, 0x7b, 0xd2, 0xf9, 0xe5, 0x37, 0xb7, 0xf6, 0xf4, 0xaf, 0x83, 0x5a, + 0x70, 0xeb, 0xfc, 0xc2, 0xa9, 0x3d, 0xbb, 0x70, 0x6a, 0xcf, 0x2f, 0x9c, 0xda, 0xd3, 0xb9, 0x63, + 0x9d, 0xcf, 0x1d, 0xeb, 0xd9, 0xdc, 0xb1, 0x9e, 0xcf, 0x1d, 0xeb, 0xef, 0xb9, 0x63, 0xfd, 0xfc, + 0x8f, 0x53, 0xfb, 0x76, 0xc7, 0x74, 0xf7, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x39, 0x0e, 0xb6, + 0xcd, 0x7f, 0x07, 0x00, 0x00, +} + func (m *CertificateSigningRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -109,41 +292,52 @@ func (m *CertificateSigningRequest) Marshal() (dAtA []byte, err error) { } func (m *CertificateSigningRequest) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CertificateSigningRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n2 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n3, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n3 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *CertificateSigningRequestCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -151,37 +345,47 @@ func (m *CertificateSigningRequestCondition) Marshal() (dAtA []byte, err error) } func (m *CertificateSigningRequestCondition) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CertificateSigningRequestCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastUpdateTime.Size())) - n4, err := m.LastUpdateTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.LastUpdateTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n4 - return i, nil + i-- + dAtA[i] = 0x22 + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x1a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *CertificateSigningRequestList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -189,37 +393,46 @@ func (m *CertificateSigningRequestList) Marshal() (dAtA []byte, err error) { } func (m *CertificateSigningRequestList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CertificateSigningRequestList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n5, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *CertificateSigningRequestSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -227,92 +440,86 @@ func (m *CertificateSigningRequestSpec) Marshal() (dAtA []byte, err error) { } func (m *CertificateSigningRequestSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CertificateSigningRequestSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Request != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Request))) - i += copy(dAtA[i:], m.Request) - } - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Username))) - i += copy(dAtA[i:], m.Username) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) - i += copy(dAtA[i:], m.UID) - if len(m.Groups) > 0 { - for _, s := range m.Groups { - dAtA[i] = 0x22 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if len(m.Usages) > 0 { - for _, s := range m.Usages { - dAtA[i] = 0x2a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } if len(m.Extra) > 0 { keysForExtra := make([]string, 0, len(m.Extra)) for k := range m.Extra { keysForExtra = append(keysForExtra, string(k)) } github_com_gogo_protobuf_sortkeys.Strings(keysForExtra) - for _, k := range keysForExtra { - dAtA[i] = 0x32 - i++ - v := m.Extra[string(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n6, err := (&v).MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(keysForExtra) - 1; iNdEx >= 0; iNdEx-- { + v := m.Extra[string(keysForExtra[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n6 + i-- + dAtA[i] = 0x12 + i -= len(keysForExtra[iNdEx]) + copy(dAtA[i:], keysForExtra[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForExtra[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x32 } } - return i, nil + if len(m.Usages) > 0 { + for iNdEx := len(m.Usages) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Usages[iNdEx]) + copy(dAtA[i:], m.Usages[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Usages[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + if len(m.Groups) > 0 { + for iNdEx := len(m.Groups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Groups[iNdEx]) + copy(dAtA[i:], m.Groups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Groups[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0x1a + i -= len(m.Username) + copy(dAtA[i:], m.Username) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Username))) + i-- + dAtA[i] = 0x12 + if m.Request != nil { + i -= len(m.Request) + copy(dAtA[i:], m.Request) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Request))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } func (m *CertificateSigningRequestStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -320,35 +527,43 @@ func (m *CertificateSigningRequestStatus) Marshal() (dAtA []byte, err error) { } func (m *CertificateSigningRequestStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CertificateSigningRequestStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + if m.Certificate != nil { + i -= len(m.Certificate) + copy(dAtA[i:], m.Certificate) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Certificate))) + i-- + dAtA[i] = 0x12 + } if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0xa } } - if m.Certificate != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Certificate))) - i += copy(dAtA[i:], m.Certificate) - } - return i, nil + return len(dAtA) - i, nil } func (m ExtraValue) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -356,56 +571,42 @@ func (m ExtraValue) Marshal() (dAtA []byte, err error) { } func (m ExtraValue) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m ExtraValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if len(m) > 0 { - for _, s := range m { + for iNdEx := len(m) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m[iNdEx]) + copy(dAtA[i:], m[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - return i, nil + return len(dAtA) - i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *CertificateSigningRequest) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -418,6 +619,9 @@ func (m *CertificateSigningRequest) Size() (n int) { } func (m *CertificateSigningRequestCondition) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -432,6 +636,9 @@ func (m *CertificateSigningRequestCondition) Size() (n int) { } func (m *CertificateSigningRequestList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -446,6 +653,9 @@ func (m *CertificateSigningRequestList) Size() (n int) { } func (m *CertificateSigningRequestSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Request != nil { @@ -481,6 +691,9 @@ func (m *CertificateSigningRequestSpec) Size() (n int) { } func (m *CertificateSigningRequestStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Conditions) > 0 { @@ -497,6 +710,9 @@ func (m *CertificateSigningRequestStatus) Size() (n int) { } func (m ExtraValue) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m) > 0 { @@ -509,14 +725,7 @@ func (m ExtraValue) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -526,7 +735,7 @@ func (this *CertificateSigningRequest) String() string { return "nil" } s := strings.Join([]string{`&CertificateSigningRequest{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "CertificateSigningRequestSpec", "CertificateSigningRequestSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "CertificateSigningRequestStatus", "CertificateSigningRequestStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -541,7 +750,7 @@ func (this *CertificateSigningRequestCondition) String() string { `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `LastUpdateTime:` + strings.Replace(strings.Replace(this.LastUpdateTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastUpdateTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastUpdateTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -550,9 +759,14 @@ func (this *CertificateSigningRequestList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]CertificateSigningRequest{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "CertificateSigningRequest", "CertificateSigningRequest", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&CertificateSigningRequestList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "CertificateSigningRequest", "CertificateSigningRequest", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -586,8 +800,13 @@ func (this *CertificateSigningRequestStatus) String() string { if this == nil { return "nil" } + repeatedStringForConditions := "[]CertificateSigningRequestCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "CertificateSigningRequestCondition", "CertificateSigningRequestCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" s := strings.Join([]string{`&CertificateSigningRequestStatus{`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "CertificateSigningRequestCondition", "CertificateSigningRequestCondition", 1), `&`, ``, 1) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, `Certificate:` + valueToStringGenerated(this.Certificate) + `,`, `}`, }, "") @@ -616,7 +835,7 @@ func (m *CertificateSigningRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -644,7 +863,7 @@ func (m *CertificateSigningRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -653,6 +872,9 @@ func (m *CertificateSigningRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -674,7 +896,7 @@ func (m *CertificateSigningRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -683,6 +905,9 @@ func (m *CertificateSigningRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -704,7 +929,7 @@ func (m *CertificateSigningRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -713,6 +938,9 @@ func (m *CertificateSigningRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -729,6 +957,9 @@ func (m *CertificateSigningRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -756,7 +987,7 @@ func (m *CertificateSigningRequestCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -784,7 +1015,7 @@ func (m *CertificateSigningRequestCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -794,6 +1025,9 @@ func (m *CertificateSigningRequestCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -813,7 +1047,7 @@ func (m *CertificateSigningRequestCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -823,6 +1057,9 @@ func (m *CertificateSigningRequestCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -842,7 +1079,7 @@ func (m *CertificateSigningRequestCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -852,6 +1089,9 @@ func (m *CertificateSigningRequestCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -871,7 +1111,7 @@ func (m *CertificateSigningRequestCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -880,6 +1120,9 @@ func (m *CertificateSigningRequestCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -896,6 +1139,9 @@ func (m *CertificateSigningRequestCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -923,7 +1169,7 @@ func (m *CertificateSigningRequestList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -951,7 +1197,7 @@ func (m *CertificateSigningRequestList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -960,6 +1206,9 @@ func (m *CertificateSigningRequestList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -981,7 +1230,7 @@ func (m *CertificateSigningRequestList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -990,6 +1239,9 @@ func (m *CertificateSigningRequestList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1007,6 +1259,9 @@ func (m *CertificateSigningRequestList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1034,7 +1289,7 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1062,7 +1317,7 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= (int(b) & 0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1071,6 +1326,9 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1093,7 +1351,7 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1103,6 +1361,9 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1122,7 +1383,7 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1132,6 +1393,9 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1151,7 +1415,7 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1161,6 +1425,9 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1180,7 +1447,7 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1190,6 +1457,9 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1209,7 +1479,7 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1218,54 +1488,20 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { + if postIndex > l { return io.ErrUnexpectedEOF } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Extra == nil { m.Extra = make(map[string]ExtraValue) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + mapvalue := &ExtraValue{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1275,46 +1511,88 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &ExtraValue{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &ExtraValue{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.Extra[mapkey] = *mapvalue - } else { - var mapvalue ExtraValue - m.Extra[mapkey] = mapvalue } + m.Extra[mapkey] = *mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -1325,6 +1603,9 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1352,7 +1633,7 @@ func (m *CertificateSigningRequestStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1380,7 +1661,7 @@ func (m *CertificateSigningRequestStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1389,6 +1670,9 @@ func (m *CertificateSigningRequestStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1411,7 +1695,7 @@ func (m *CertificateSigningRequestStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= (int(b) & 0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1420,6 +1704,9 @@ func (m *CertificateSigningRequestStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1437,6 +1724,9 @@ func (m *CertificateSigningRequestStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1464,7 +1754,7 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1492,7 +1782,7 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1502,6 +1792,9 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1516,6 +1809,9 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1582,10 +1878,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -1614,6 +1913,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -1632,62 +1934,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/certificates/v1beta1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 804 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x54, 0x4d, 0x8f, 0xdb, 0x44, - 0x18, 0x8e, 0xf3, 0xb5, 0xc9, 0x64, 0xd9, 0x56, 0x23, 0x54, 0x99, 0x95, 0x6a, 0xaf, 0x2c, 0x40, - 0xcb, 0x47, 0xc7, 0x6c, 0x85, 0x60, 0xb5, 0x07, 0x04, 0x5e, 0x2a, 0x58, 0xd1, 0x0a, 0x69, 0xda, - 0x70, 0x40, 0x48, 0x74, 0xe2, 0xbc, 0x75, 0xa6, 0xa9, 0x3f, 0xf0, 0x8c, 0x03, 0xb9, 0xf5, 0x27, - 0x70, 0xe4, 0x82, 0xc4, 0x2f, 0xe1, 0xbc, 0x1c, 0x90, 0x7a, 0xec, 0x01, 0x45, 0x6c, 0xf8, 0x17, - 0x3d, 0xa1, 0x19, 0x4f, 0xe2, 0x90, 0x55, 0x48, 0xd5, 0xbd, 0x79, 0x9e, 0xf7, 0x79, 0x9e, 0xf7, - 0x63, 0xde, 0x31, 0xfa, 0x72, 0x7c, 0x2c, 0x08, 0x4f, 0xfd, 0x71, 0x31, 0x80, 0x3c, 0x01, 0x09, - 0xc2, 0x9f, 0x40, 0x32, 0x4c, 0x73, 0xdf, 0x04, 0x58, 0xc6, 0xfd, 0x10, 0x72, 0xc9, 0x1f, 0xf1, - 0x90, 0xe9, 0xf0, 0xd1, 0x00, 0x24, 0x3b, 0xf2, 0x23, 0x48, 0x20, 0x67, 0x12, 0x86, 0x24, 0xcb, - 0x53, 0x99, 0x62, 0xb7, 0x14, 0x10, 0x96, 0x71, 0xb2, 0x2a, 0x20, 0x46, 0xb0, 0x7f, 0x2b, 0xe2, - 0x72, 0x54, 0x0c, 0x48, 0x98, 0xc6, 0x7e, 0x94, 0x46, 0xa9, 0xaf, 0x75, 0x83, 0xe2, 0x91, 0x3e, - 0xe9, 0x83, 0xfe, 0x2a, 0xfd, 0xf6, 0x3f, 0xac, 0x0a, 0x88, 0x59, 0x38, 0xe2, 0x09, 0xe4, 0x53, - 0x3f, 0x1b, 0x47, 0x0a, 0x10, 0x7e, 0x0c, 0x92, 0xf9, 0x93, 0x4b, 0x55, 0xec, 0xfb, 0x9b, 0x54, - 0x79, 0x91, 0x48, 0x1e, 0xc3, 0x25, 0xc1, 0x47, 0xdb, 0x04, 0x22, 0x1c, 0x41, 0xcc, 0xd6, 0x75, - 0xde, 0x1f, 0x75, 0xf4, 0xc6, 0x69, 0xd5, 0xe6, 0x7d, 0x1e, 0x25, 0x3c, 0x89, 0x28, 0xfc, 0x50, - 0x80, 0x90, 0xf8, 0x21, 0xea, 0xa8, 0x0a, 0x87, 0x4c, 0x32, 0xdb, 0x3a, 0xb0, 0x0e, 0x7b, 0xb7, - 0x3f, 0x20, 0xd5, 0x7c, 0x96, 0x89, 0x48, 0x36, 0x8e, 0x14, 0x20, 0x88, 0x62, 0x93, 0xc9, 0x11, - 0xf9, 0x7a, 0xf0, 0x18, 0x42, 0x79, 0x0f, 0x24, 0x0b, 0xf0, 0xf9, 0xcc, 0xad, 0xcd, 0x67, 0x2e, - 0xaa, 0x30, 0xba, 0x74, 0xc5, 0x0f, 0x51, 0x53, 0x64, 0x10, 0xda, 0x75, 0xed, 0xfe, 0x09, 0xd9, - 0x32, 0x7d, 0xb2, 0xb1, 0xd6, 0xfb, 0x19, 0x84, 0xc1, 0xae, 0xc9, 0xd5, 0x54, 0x27, 0xaa, 0x9d, - 0xf1, 0x08, 0xb5, 0x85, 0x64, 0xb2, 0x10, 0x76, 0x43, 0xe7, 0xf8, 0xf4, 0x0a, 0x39, 0xb4, 0x4f, - 0xb0, 0x67, 0xb2, 0xb4, 0xcb, 0x33, 0x35, 0xfe, 0xde, 0xaf, 0x75, 0xe4, 0x6d, 0xd4, 0x9e, 0xa6, - 0xc9, 0x90, 0x4b, 0x9e, 0x26, 0xf8, 0x18, 0x35, 0xe5, 0x34, 0x03, 0x3d, 0xd0, 0x6e, 0xf0, 0xe6, - 0xa2, 0xe4, 0x07, 0xd3, 0x0c, 0x5e, 0xcc, 0xdc, 0xd7, 0xd7, 0xf9, 0x0a, 0xa7, 0x5a, 0x81, 0xdf, - 0x46, 0xed, 0x1c, 0x98, 0x48, 0x13, 0x3d, 0xae, 0x6e, 0x55, 0x08, 0xd5, 0x28, 0x35, 0x51, 0xfc, - 0x0e, 0xda, 0x89, 0x41, 0x08, 0x16, 0x81, 0xee, 0xb9, 0x1b, 0x5c, 0x33, 0xc4, 0x9d, 0x7b, 0x25, - 0x4c, 0x17, 0x71, 0xfc, 0x18, 0xed, 0x3d, 0x61, 0x42, 0xf6, 0xb3, 0x21, 0x93, 0xf0, 0x80, 0xc7, - 0x60, 0x37, 0xf5, 0x94, 0xde, 0x7d, 0xb9, 0x7b, 0x56, 0x8a, 0xe0, 0x86, 0x71, 0xdf, 0xbb, 0xfb, - 0x1f, 0x27, 0xba, 0xe6, 0xec, 0xcd, 0x2c, 0x74, 0x73, 0xe3, 0x7c, 0xee, 0x72, 0x21, 0xf1, 0x77, - 0x97, 0xf6, 0x8d, 0xbc, 0x5c, 0x1d, 0x4a, 0xad, 0xb7, 0xed, 0xba, 0xa9, 0xa5, 0xb3, 0x40, 0x56, - 0x76, 0xed, 0x7b, 0xd4, 0xe2, 0x12, 0x62, 0x61, 0xd7, 0x0f, 0x1a, 0x87, 0xbd, 0xdb, 0x27, 0xaf, - 0xbe, 0x08, 0xc1, 0x6b, 0x26, 0x4d, 0xeb, 0x4c, 0x19, 0xd2, 0xd2, 0xd7, 0xfb, 0xbd, 0xf1, 0x3f, - 0x0d, 0xaa, 0x95, 0xc4, 0x6f, 0xa1, 0x9d, 0xbc, 0x3c, 0xea, 0xfe, 0x76, 0x83, 0x9e, 0xba, 0x15, - 0xc3, 0xa0, 0x8b, 0x18, 0x7e, 0x1f, 0x75, 0x0a, 0x01, 0x79, 0xc2, 0x62, 0x30, 0x57, 0xbd, 0xec, - 0xab, 0x6f, 0x70, 0xba, 0x64, 0xe0, 0x9b, 0xa8, 0x51, 0xf0, 0xa1, 0xb9, 0xea, 0x9e, 0x21, 0x36, - 0xfa, 0x67, 0x9f, 0x53, 0x85, 0x63, 0x0f, 0xb5, 0xa3, 0x3c, 0x2d, 0x32, 0x61, 0x37, 0x0f, 0x1a, - 0x87, 0xdd, 0x00, 0xa9, 0x8d, 0xf9, 0x42, 0x23, 0xd4, 0x44, 0x30, 0x41, 0xed, 0x42, 0xed, 0x83, - 0xb0, 0x5b, 0x9a, 0x73, 0x43, 0x71, 0xfa, 0x1a, 0x79, 0x31, 0x73, 0x3b, 0x5f, 0xc1, 0x54, 0x1f, - 0xa8, 0x61, 0xe1, 0x04, 0xb5, 0xe0, 0x27, 0x99, 0x33, 0xbb, 0xad, 0x47, 0x79, 0x76, 0xb5, 0x77, - 0x4b, 0xee, 0x28, 0xaf, 0x3b, 0x89, 0xcc, 0xa7, 0xd5, 0x64, 0x35, 0x46, 0xcb, 0x34, 0xfb, 0x80, - 0x50, 0xc5, 0xc1, 0xd7, 0x51, 0x63, 0x0c, 0xd3, 0xf2, 0x01, 0x51, 0xf5, 0x89, 0x3f, 0x43, 0xad, - 0x09, 0x7b, 0x52, 0x80, 0xf9, 0x8f, 0xbc, 0xb7, 0xb5, 0x1e, 0xed, 0xf6, 0x8d, 0x92, 0xd0, 0x52, - 0x79, 0x52, 0x3f, 0xb6, 0xbc, 0x3f, 0x2d, 0xe4, 0x6e, 0x79, 0xfd, 0xf8, 0x47, 0x84, 0xc2, 0xc5, - 0xdb, 0x14, 0xb6, 0xa5, 0xfb, 0x3f, 0x7d, 0xf5, 0xfe, 0x97, 0xef, 0xbc, 0xfa, 0x51, 0x2e, 0x21, - 0x41, 0x57, 0x52, 0xe1, 0x23, 0xd4, 0x5b, 0xb1, 0xd6, 0x9d, 0xee, 0x06, 0xd7, 0xe6, 0x33, 0xb7, - 0xb7, 0x62, 0x4e, 0x57, 0x39, 0xde, 0xc7, 0x66, 0x6c, 0xba, 0x51, 0xec, 0x2e, 0xf6, 0xdf, 0xd2, - 0x77, 0xdc, 0x5d, 0xdf, 0xdf, 0x93, 0xce, 0x2f, 0xbf, 0xb9, 0xb5, 0xa7, 0x7f, 0x1d, 0xd4, 0x82, - 0x5b, 0xe7, 0x17, 0x4e, 0xed, 0xd9, 0x85, 0x53, 0x7b, 0x7e, 0xe1, 0xd4, 0x9e, 0xce, 0x1d, 0xeb, - 0x7c, 0xee, 0x58, 0xcf, 0xe6, 0x8e, 0xf5, 0x7c, 0xee, 0x58, 0x7f, 0xcf, 0x1d, 0xeb, 0xe7, 0x7f, - 0x9c, 0xda, 0xb7, 0x3b, 0xa6, 0xbb, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xb7, 0x6b, 0x5b, 0xf9, - 0x7f, 0x07, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/certificates/v1beta1/types.go b/vendor/k8s.io/api/certificates/v1beta1/types.go index bb9e82d3..93f81cd5 100644 --- a/vendor/k8s.io/api/certificates/v1beta1/types.go +++ b/vendor/k8s.io/api/certificates/v1beta1/types.go @@ -129,27 +129,27 @@ type CertificateSigningRequestList struct { type KeyUsage string const ( - UsageSigning KeyUsage = "signing" - UsageDigitalSignature KeyUsage = "digital signature" - UsageContentCommittment KeyUsage = "content commitment" - UsageKeyEncipherment KeyUsage = "key encipherment" - UsageKeyAgreement KeyUsage = "key agreement" - UsageDataEncipherment KeyUsage = "data encipherment" - UsageCertSign KeyUsage = "cert sign" - UsageCRLSign KeyUsage = "crl sign" - UsageEncipherOnly KeyUsage = "encipher only" - UsageDecipherOnly KeyUsage = "decipher only" - UsageAny KeyUsage = "any" - UsageServerAuth KeyUsage = "server auth" - UsageClientAuth KeyUsage = "client auth" - UsageCodeSigning KeyUsage = "code signing" - UsageEmailProtection KeyUsage = "email protection" - UsageSMIME KeyUsage = "s/mime" - UsageIPsecEndSystem KeyUsage = "ipsec end system" - UsageIPsecTunnel KeyUsage = "ipsec tunnel" - UsageIPsecUser KeyUsage = "ipsec user" - UsageTimestamping KeyUsage = "timestamping" - UsageOCSPSigning KeyUsage = "ocsp signing" - UsageMicrosoftSGC KeyUsage = "microsoft sgc" - UsageNetscapSGC KeyUsage = "netscape sgc" + UsageSigning KeyUsage = "signing" + UsageDigitalSignature KeyUsage = "digital signature" + UsageContentCommitment KeyUsage = "content commitment" + UsageKeyEncipherment KeyUsage = "key encipherment" + UsageKeyAgreement KeyUsage = "key agreement" + UsageDataEncipherment KeyUsage = "data encipherment" + UsageCertSign KeyUsage = "cert sign" + UsageCRLSign KeyUsage = "crl sign" + UsageEncipherOnly KeyUsage = "encipher only" + UsageDecipherOnly KeyUsage = "decipher only" + UsageAny KeyUsage = "any" + UsageServerAuth KeyUsage = "server auth" + UsageClientAuth KeyUsage = "client auth" + UsageCodeSigning KeyUsage = "code signing" + UsageEmailProtection KeyUsage = "email protection" + UsageSMIME KeyUsage = "s/mime" + UsageIPsecEndSystem KeyUsage = "ipsec end system" + UsageIPsecTunnel KeyUsage = "ipsec tunnel" + UsageIPsecUser KeyUsage = "ipsec user" + UsageTimestamping KeyUsage = "timestamping" + UsageOCSPSigning KeyUsage = "ocsp signing" + UsageMicrosoftSGC KeyUsage = "microsoft sgc" + UsageNetscapeSGC KeyUsage = "netscape sgc" ) diff --git a/vendor/k8s.io/api/certificates/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/certificates/v1beta1/zz_generated.deepcopy.go index 1b103f15..b3e0aeb5 100644 --- a/vendor/k8s.io/api/certificates/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/certificates/v1beta1/zz_generated.deepcopy.go @@ -73,7 +73,7 @@ func (in *CertificateSigningRequestCondition) DeepCopy() *CertificateSigningRequ func (in *CertificateSigningRequestList) DeepCopyInto(out *CertificateSigningRequestList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]CertificateSigningRequest, len(*in)) diff --git a/vendor/k8s.io/api/coordination/v1/doc.go b/vendor/k8s.io/api/coordination/v1/doc.go new file mode 100644 index 00000000..fc2f4f2c --- /dev/null +++ b/vendor/k8s.io/api/coordination/v1/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:openapi-gen=true + +// +groupName=coordination.k8s.io + +package v1 // import "k8s.io/api/coordination/v1" diff --git a/vendor/k8s.io/api/coordination/v1/generated.pb.go b/vendor/k8s.io/api/coordination/v1/generated.pb.go new file mode 100644 index 00000000..7e78be19 --- /dev/null +++ b/vendor/k8s.io/api/coordination/v1/generated.pb.go @@ -0,0 +1,1002 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/api/coordination/v1/generated.proto + +package v1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +func (m *Lease) Reset() { *m = Lease{} } +func (*Lease) ProtoMessage() {} +func (*Lease) Descriptor() ([]byte, []int) { + return fileDescriptor_929e1148ad9baca3, []int{0} +} +func (m *Lease) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Lease) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Lease) XXX_Merge(src proto.Message) { + xxx_messageInfo_Lease.Merge(m, src) +} +func (m *Lease) XXX_Size() int { + return m.Size() +} +func (m *Lease) XXX_DiscardUnknown() { + xxx_messageInfo_Lease.DiscardUnknown(m) +} + +var xxx_messageInfo_Lease proto.InternalMessageInfo + +func (m *LeaseList) Reset() { *m = LeaseList{} } +func (*LeaseList) ProtoMessage() {} +func (*LeaseList) Descriptor() ([]byte, []int) { + return fileDescriptor_929e1148ad9baca3, []int{1} +} +func (m *LeaseList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LeaseList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LeaseList) XXX_Merge(src proto.Message) { + xxx_messageInfo_LeaseList.Merge(m, src) +} +func (m *LeaseList) XXX_Size() int { + return m.Size() +} +func (m *LeaseList) XXX_DiscardUnknown() { + xxx_messageInfo_LeaseList.DiscardUnknown(m) +} + +var xxx_messageInfo_LeaseList proto.InternalMessageInfo + +func (m *LeaseSpec) Reset() { *m = LeaseSpec{} } +func (*LeaseSpec) ProtoMessage() {} +func (*LeaseSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_929e1148ad9baca3, []int{2} +} +func (m *LeaseSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LeaseSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LeaseSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_LeaseSpec.Merge(m, src) +} +func (m *LeaseSpec) XXX_Size() int { + return m.Size() +} +func (m *LeaseSpec) XXX_DiscardUnknown() { + xxx_messageInfo_LeaseSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_LeaseSpec proto.InternalMessageInfo + +func init() { + proto.RegisterType((*Lease)(nil), "k8s.io.api.coordination.v1.Lease") + proto.RegisterType((*LeaseList)(nil), "k8s.io.api.coordination.v1.LeaseList") + proto.RegisterType((*LeaseSpec)(nil), "k8s.io.api.coordination.v1.LeaseSpec") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/coordination/v1/generated.proto", fileDescriptor_929e1148ad9baca3) +} + +var fileDescriptor_929e1148ad9baca3 = []byte{ + // 535 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x90, 0xc1, 0x6e, 0xd3, 0x40, + 0x10, 0x86, 0xe3, 0x36, 0x91, 0x9a, 0x0d, 0x2d, 0x91, 0x95, 0x83, 0x95, 0x83, 0x5d, 0x22, 0x21, + 0xe5, 0xc2, 0x2e, 0xa9, 0x10, 0x42, 0x9c, 0xc0, 0x20, 0xa0, 0x52, 0x2a, 0x24, 0xb7, 0x27, 0xd4, + 0x03, 0x1b, 0x7b, 0x70, 0x96, 0xd4, 0x5e, 0xb3, 0xbb, 0x0e, 0xea, 0x8d, 0x47, 0xe0, 0xca, 0x63, + 0xc0, 0x53, 0xe4, 0xd8, 0x63, 0x4f, 0x16, 0x31, 0x2f, 0x82, 0x76, 0x93, 0x36, 0x21, 0x49, 0xd5, + 0x8a, 0xdb, 0xee, 0xcc, 0xfc, 0xdf, 0xfc, 0xf3, 0xa3, 0x57, 0xa3, 0x67, 0x12, 0x33, 0x4e, 0x46, + 0xf9, 0x00, 0x44, 0x0a, 0x0a, 0x24, 0x19, 0x43, 0x1a, 0x71, 0x41, 0xe6, 0x0d, 0x9a, 0x31, 0x12, + 0x72, 0x2e, 0x22, 0x96, 0x52, 0xc5, 0x78, 0x4a, 0xc6, 0x3d, 0x12, 0x43, 0x0a, 0x82, 0x2a, 0x88, + 0x70, 0x26, 0xb8, 0xe2, 0x76, 0x7b, 0x36, 0x8b, 0x69, 0xc6, 0xf0, 0xf2, 0x2c, 0x1e, 0xf7, 0xda, + 0x8f, 0x62, 0xa6, 0x86, 0xf9, 0x00, 0x87, 0x3c, 0x21, 0x31, 0x8f, 0x39, 0x31, 0x92, 0x41, 0xfe, + 0xc9, 0xfc, 0xcc, 0xc7, 0xbc, 0x66, 0xa8, 0xf6, 0x93, 0xc5, 0xda, 0x84, 0x86, 0x43, 0x96, 0x82, + 0x38, 0x27, 0xd9, 0x28, 0xd6, 0x05, 0x49, 0x12, 0x50, 0x74, 0x83, 0x81, 0x36, 0xb9, 0x49, 0x25, + 0xf2, 0x54, 0xb1, 0x04, 0xd6, 0x04, 0x4f, 0x6f, 0x13, 0xc8, 0x70, 0x08, 0x09, 0x5d, 0xd5, 0x75, + 0x7e, 0x59, 0xa8, 0xd6, 0x07, 0x2a, 0xc1, 0xfe, 0x88, 0x76, 0xb4, 0x9b, 0x88, 0x2a, 0xea, 0x58, + 0xfb, 0x56, 0xb7, 0x71, 0xf0, 0x18, 0x2f, 0x62, 0xb8, 0x86, 0xe2, 0x6c, 0x14, 0xeb, 0x82, 0xc4, + 0x7a, 0x1a, 0x8f, 0x7b, 0xf8, 0xfd, 0xe0, 0x33, 0x84, 0xea, 0x08, 0x14, 0xf5, 0xed, 0x49, 0xe1, + 0x55, 0xca, 0xc2, 0x43, 0x8b, 0x5a, 0x70, 0x4d, 0xb5, 0xdf, 0xa2, 0xaa, 0xcc, 0x20, 0x74, 0xb6, + 0x0c, 0xfd, 0x21, 0xbe, 0x39, 0x64, 0x6c, 0x2c, 0x1d, 0x67, 0x10, 0xfa, 0xf7, 0xe6, 0xc8, 0xaa, + 0xfe, 0x05, 0x06, 0xd0, 0xf9, 0x69, 0xa1, 0xba, 0x99, 0xe8, 0x33, 0xa9, 0xec, 0xd3, 0x35, 0xe3, + 0xf8, 0x6e, 0xc6, 0xb5, 0xda, 0xd8, 0x6e, 0xce, 0x77, 0xec, 0x5c, 0x55, 0x96, 0x4c, 0xbf, 0x41, + 0x35, 0xa6, 0x20, 0x91, 0xce, 0xd6, 0xfe, 0x76, 0xb7, 0x71, 0xf0, 0xe0, 0x56, 0xd7, 0xfe, 0xee, + 0x9c, 0x56, 0x3b, 0xd4, 0xba, 0x60, 0x26, 0xef, 0xfc, 0xd8, 0x9e, 0x7b, 0xd6, 0x77, 0xd8, 0xcf, + 0xd1, 0xde, 0x90, 0x9f, 0x45, 0x20, 0x0e, 0x23, 0x48, 0x15, 0x53, 0xe7, 0xc6, 0x79, 0xdd, 0xb7, + 0xcb, 0xc2, 0xdb, 0x7b, 0xf7, 0x4f, 0x27, 0x58, 0x99, 0xb4, 0xfb, 0xa8, 0x75, 0xa6, 0x41, 0xaf, + 0x73, 0x61, 0x36, 0x1f, 0x43, 0xc8, 0xd3, 0x48, 0x9a, 0x58, 0x6b, 0xbe, 0x53, 0x16, 0x5e, 0xab, + 0xbf, 0xa1, 0x1f, 0x6c, 0x54, 0xd9, 0x03, 0xd4, 0xa0, 0xe1, 0x97, 0x9c, 0x09, 0x38, 0x61, 0x09, + 0x38, 0xdb, 0x26, 0x40, 0x72, 0xb7, 0x00, 0x8f, 0x58, 0x28, 0xb8, 0x96, 0xf9, 0xf7, 0xcb, 0xc2, + 0x6b, 0xbc, 0x5c, 0x70, 0x82, 0x65, 0xa8, 0x7d, 0x8a, 0xea, 0x02, 0x52, 0xf8, 0x6a, 0x36, 0x54, + 0xff, 0x6f, 0xc3, 0x6e, 0x59, 0x78, 0xf5, 0xe0, 0x8a, 0x12, 0x2c, 0x80, 0xf6, 0x0b, 0xd4, 0x34, + 0x97, 0x9d, 0x08, 0x9a, 0x4a, 0xa6, 0x6f, 0x93, 0x4e, 0xcd, 0x64, 0xd1, 0x2a, 0x0b, 0xaf, 0xd9, + 0x5f, 0xe9, 0x05, 0x6b, 0xd3, 0x7e, 0x77, 0x32, 0x75, 0x2b, 0x17, 0x53, 0xb7, 0x72, 0x39, 0x75, + 0x2b, 0xdf, 0x4a, 0xd7, 0x9a, 0x94, 0xae, 0x75, 0x51, 0xba, 0xd6, 0x65, 0xe9, 0x5a, 0xbf, 0x4b, + 0xd7, 0xfa, 0xfe, 0xc7, 0xad, 0x7c, 0xd8, 0x1a, 0xf7, 0xfe, 0x06, 0x00, 0x00, 0xff, 0xff, 0x41, + 0x5e, 0x94, 0x96, 0x5e, 0x04, 0x00, 0x00, +} + +func (m *Lease) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Lease) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Lease) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *LeaseList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaseList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LeaseList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *LeaseSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaseSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LeaseSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.LeaseTransitions != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.LeaseTransitions)) + i-- + dAtA[i] = 0x28 + } + if m.RenewTime != nil { + { + size, err := m.RenewTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.AcquireTime != nil { + { + size, err := m.AcquireTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.LeaseDurationSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.LeaseDurationSeconds)) + i-- + dAtA[i] = 0x10 + } + if m.HolderIdentity != nil { + i -= len(*m.HolderIdentity) + copy(dAtA[i:], *m.HolderIdentity) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.HolderIdentity))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Lease) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *LeaseList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *LeaseSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HolderIdentity != nil { + l = len(*m.HolderIdentity) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.LeaseDurationSeconds != nil { + n += 1 + sovGenerated(uint64(*m.LeaseDurationSeconds)) + } + if m.AcquireTime != nil { + l = m.AcquireTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.RenewTime != nil { + l = m.RenewTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.LeaseTransitions != nil { + n += 1 + sovGenerated(uint64(*m.LeaseTransitions)) + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Lease) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Lease{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "LeaseSpec", "LeaseSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *LeaseList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Lease{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Lease", "Lease", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&LeaseList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *LeaseSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LeaseSpec{`, + `HolderIdentity:` + valueToStringGenerated(this.HolderIdentity) + `,`, + `LeaseDurationSeconds:` + valueToStringGenerated(this.LeaseDurationSeconds) + `,`, + `AcquireTime:` + strings.Replace(fmt.Sprintf("%v", this.AcquireTime), "MicroTime", "v1.MicroTime", 1) + `,`, + `RenewTime:` + strings.Replace(fmt.Sprintf("%v", this.RenewTime), "MicroTime", "v1.MicroTime", 1) + `,`, + `LeaseTransitions:` + valueToStringGenerated(this.LeaseTransitions) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Lease) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Lease: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Lease: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaseList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaseList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaseList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Lease{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaseSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaseSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaseSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HolderIdentity", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.HolderIdentity = &s + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LeaseDurationSeconds", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.LeaseDurationSeconds = &v + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AcquireTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AcquireTime == nil { + m.AcquireTime = &v1.MicroTime{} + } + if err := m.AcquireTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RenewTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RenewTime == nil { + m.RenewTime = &v1.MicroTime{} + } + if err := m.RenewTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LeaseTransitions", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.LeaseTransitions = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) diff --git a/vendor/k8s.io/api/coordination/v1/generated.proto b/vendor/k8s.io/api/coordination/v1/generated.proto new file mode 100644 index 00000000..4206746d --- /dev/null +++ b/vendor/k8s.io/api/coordination/v1/generated.proto @@ -0,0 +1,80 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.api.coordination.v1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1"; + +// Lease defines a lease concept. +message Lease { + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Specification of the Lease. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional LeaseSpec spec = 2; +} + +// LeaseList is a list of Lease objects. +message LeaseList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of schema objects. + repeated Lease items = 2; +} + +// LeaseSpec is a specification of a Lease. +message LeaseSpec { + // holderIdentity contains the identity of the holder of a current lease. + // +optional + optional string holderIdentity = 1; + + // leaseDurationSeconds is a duration that candidates for a lease need + // to wait to force acquire it. This is measure against time of last + // observed RenewTime. + // +optional + optional int32 leaseDurationSeconds = 2; + + // acquireTime is a time when the current lease was acquired. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime acquireTime = 3; + + // renewTime is a time when the current holder of a lease has last + // updated the lease. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime renewTime = 4; + + // leaseTransitions is the number of transitions of a lease between + // holders. + // +optional + optional int32 leaseTransitions = 5; +} + diff --git a/vendor/k8s.io/api/coordination/v1/register.go b/vendor/k8s.io/api/coordination/v1/register.go new file mode 100644 index 00000000..95b987b9 --- /dev/null +++ b/vendor/k8s.io/api/coordination/v1/register.go @@ -0,0 +1,53 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "coordination.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &Lease{}, + &LeaseList{}, + ) + + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/coordination/v1/types.go b/vendor/k8s.io/api/coordination/v1/types.go new file mode 100644 index 00000000..7a5605ac --- /dev/null +++ b/vendor/k8s.io/api/coordination/v1/types.go @@ -0,0 +1,74 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Lease defines a lease concept. +type Lease struct { + metav1.TypeMeta `json:",inline"` + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Specification of the Lease. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Spec LeaseSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +// LeaseSpec is a specification of a Lease. +type LeaseSpec struct { + // holderIdentity contains the identity of the holder of a current lease. + // +optional + HolderIdentity *string `json:"holderIdentity,omitempty" protobuf:"bytes,1,opt,name=holderIdentity"` + // leaseDurationSeconds is a duration that candidates for a lease need + // to wait to force acquire it. This is measure against time of last + // observed RenewTime. + // +optional + LeaseDurationSeconds *int32 `json:"leaseDurationSeconds,omitempty" protobuf:"varint,2,opt,name=leaseDurationSeconds"` + // acquireTime is a time when the current lease was acquired. + // +optional + AcquireTime *metav1.MicroTime `json:"acquireTime,omitempty" protobuf:"bytes,3,opt,name=acquireTime"` + // renewTime is a time when the current holder of a lease has last + // updated the lease. + // +optional + RenewTime *metav1.MicroTime `json:"renewTime,omitempty" protobuf:"bytes,4,opt,name=renewTime"` + // leaseTransitions is the number of transitions of a lease between + // holders. + // +optional + LeaseTransitions *int32 `json:"leaseTransitions,omitempty" protobuf:"varint,5,opt,name=leaseTransitions"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// LeaseList is a list of Lease objects. +type LeaseList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of schema objects. + Items []Lease `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/api/coordination/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/coordination/v1/types_swagger_doc_generated.go new file mode 100644 index 00000000..0f144043 --- /dev/null +++ b/vendor/k8s.io/api/coordination/v1/types_swagger_doc_generated.go @@ -0,0 +1,63 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_Lease = map[string]string{ + "": "Lease defines a lease concept.", + "metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Specification of the Lease. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", +} + +func (Lease) SwaggerDoc() map[string]string { + return map_Lease +} + +var map_LeaseList = map[string]string{ + "": "LeaseList is a list of Lease objects.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "Items is a list of schema objects.", +} + +func (LeaseList) SwaggerDoc() map[string]string { + return map_LeaseList +} + +var map_LeaseSpec = map[string]string{ + "": "LeaseSpec is a specification of a Lease.", + "holderIdentity": "holderIdentity contains the identity of the holder of a current lease.", + "leaseDurationSeconds": "leaseDurationSeconds is a duration that candidates for a lease need to wait to force acquire it. This is measure against time of last observed RenewTime.", + "acquireTime": "acquireTime is a time when the current lease was acquired.", + "renewTime": "renewTime is a time when the current holder of a lease has last updated the lease.", + "leaseTransitions": "leaseTransitions is the number of transitions of a lease between holders.", +} + +func (LeaseSpec) SwaggerDoc() map[string]string { + return map_LeaseSpec +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/coordination/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/coordination/v1/zz_generated.deepcopy.go new file mode 100644 index 00000000..2dd7eddb --- /dev/null +++ b/vendor/k8s.io/api/coordination/v1/zz_generated.deepcopy.go @@ -0,0 +1,124 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Lease) DeepCopyInto(out *Lease) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Lease. +func (in *Lease) DeepCopy() *Lease { + if in == nil { + return nil + } + out := new(Lease) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Lease) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LeaseList) DeepCopyInto(out *LeaseList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Lease, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LeaseList. +func (in *LeaseList) DeepCopy() *LeaseList { + if in == nil { + return nil + } + out := new(LeaseList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LeaseList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LeaseSpec) DeepCopyInto(out *LeaseSpec) { + *out = *in + if in.HolderIdentity != nil { + in, out := &in.HolderIdentity, &out.HolderIdentity + *out = new(string) + **out = **in + } + if in.LeaseDurationSeconds != nil { + in, out := &in.LeaseDurationSeconds, &out.LeaseDurationSeconds + *out = new(int32) + **out = **in + } + if in.AcquireTime != nil { + in, out := &in.AcquireTime, &out.AcquireTime + *out = (*in).DeepCopy() + } + if in.RenewTime != nil { + in, out := &in.RenewTime, &out.RenewTime + *out = (*in).DeepCopy() + } + if in.LeaseTransitions != nil { + in, out := &in.LeaseTransitions, &out.LeaseTransitions + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LeaseSpec. +func (in *LeaseSpec) DeepCopy() *LeaseSpec { + if in == nil { + return nil + } + out := new(LeaseSpec) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/coordination/v1beta1/doc.go b/vendor/k8s.io/api/coordination/v1beta1/doc.go index fecb513f..304732d5 100644 --- a/vendor/k8s.io/api/coordination/v1beta1/doc.go +++ b/vendor/k8s.io/api/coordination/v1beta1/doc.go @@ -15,7 +15,9 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +k8s:openapi-gen=true // +groupName=coordination.k8s.io + package v1beta1 // import "k8s.io/api/coordination/v1beta1" diff --git a/vendor/k8s.io/api/coordination/v1beta1/generated.pb.go b/vendor/k8s.io/api/coordination/v1beta1/generated.pb.go index 6c2dbd91..2463d625 100644 --- a/vendor/k8s.io/api/coordination/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/coordination/v1beta1/generated.pb.go @@ -14,33 +14,24 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/coordination/v1beta1/generated.proto -// DO NOT EDIT! -/* - Package v1beta1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/coordination/v1beta1/generated.proto - - It has these top-level messages: - Lease - LeaseList - LeaseSpec -*/ package v1beta1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + io "io" -import strings "strings" -import reflect "reflect" + proto "github.com/gogo/protobuf/proto" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" -import io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -53,27 +44,142 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *Lease) Reset() { *m = Lease{} } -func (*Lease) ProtoMessage() {} -func (*Lease) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *Lease) Reset() { *m = Lease{} } +func (*Lease) ProtoMessage() {} +func (*Lease) Descriptor() ([]byte, []int) { + return fileDescriptor_daca6bcd2ff63a80, []int{0} +} +func (m *Lease) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Lease) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Lease) XXX_Merge(src proto.Message) { + xxx_messageInfo_Lease.Merge(m, src) +} +func (m *Lease) XXX_Size() int { + return m.Size() +} +func (m *Lease) XXX_DiscardUnknown() { + xxx_messageInfo_Lease.DiscardUnknown(m) +} + +var xxx_messageInfo_Lease proto.InternalMessageInfo + +func (m *LeaseList) Reset() { *m = LeaseList{} } +func (*LeaseList) ProtoMessage() {} +func (*LeaseList) Descriptor() ([]byte, []int) { + return fileDescriptor_daca6bcd2ff63a80, []int{1} +} +func (m *LeaseList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LeaseList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LeaseList) XXX_Merge(src proto.Message) { + xxx_messageInfo_LeaseList.Merge(m, src) +} +func (m *LeaseList) XXX_Size() int { + return m.Size() +} +func (m *LeaseList) XXX_DiscardUnknown() { + xxx_messageInfo_LeaseList.DiscardUnknown(m) +} + +var xxx_messageInfo_LeaseList proto.InternalMessageInfo -func (m *LeaseList) Reset() { *m = LeaseList{} } -func (*LeaseList) ProtoMessage() {} -func (*LeaseList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +func (m *LeaseSpec) Reset() { *m = LeaseSpec{} } +func (*LeaseSpec) ProtoMessage() {} +func (*LeaseSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_daca6bcd2ff63a80, []int{2} +} +func (m *LeaseSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LeaseSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LeaseSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_LeaseSpec.Merge(m, src) +} +func (m *LeaseSpec) XXX_Size() int { + return m.Size() +} +func (m *LeaseSpec) XXX_DiscardUnknown() { + xxx_messageInfo_LeaseSpec.DiscardUnknown(m) +} -func (m *LeaseSpec) Reset() { *m = LeaseSpec{} } -func (*LeaseSpec) ProtoMessage() {} -func (*LeaseSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +var xxx_messageInfo_LeaseSpec proto.InternalMessageInfo func init() { proto.RegisterType((*Lease)(nil), "k8s.io.api.coordination.v1beta1.Lease") proto.RegisterType((*LeaseList)(nil), "k8s.io.api.coordination.v1beta1.LeaseList") proto.RegisterType((*LeaseSpec)(nil), "k8s.io.api.coordination.v1beta1.LeaseSpec") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/coordination/v1beta1/generated.proto", fileDescriptor_daca6bcd2ff63a80) +} + +var fileDescriptor_daca6bcd2ff63a80 = []byte{ + // 540 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x91, 0xc1, 0x6e, 0xd3, 0x40, + 0x10, 0x86, 0xe3, 0xb6, 0x11, 0xcd, 0x86, 0x96, 0xc8, 0xca, 0xc1, 0xca, 0xc1, 0xae, 0x72, 0x40, + 0x15, 0x52, 0x77, 0x49, 0x85, 0x10, 0xe2, 0x04, 0x16, 0x87, 0x56, 0xb8, 0x42, 0x72, 0x7b, 0x42, + 0x3d, 0xb0, 0xb6, 0x07, 0x67, 0x49, 0xed, 0x35, 0xbb, 0xeb, 0xa0, 0xde, 0x78, 0x04, 0xae, 0xbc, + 0x08, 0xbc, 0x42, 0x8e, 0x3d, 0xf6, 0x64, 0x11, 0xf3, 0x22, 0xc8, 0x1b, 0xb7, 0x09, 0x49, 0x51, + 0x23, 0x6e, 0xde, 0x99, 0xf9, 0xbf, 0xf9, 0xe7, 0x37, 0x3a, 0x1a, 0xbd, 0x90, 0x98, 0x71, 0x32, + 0xca, 0x03, 0x10, 0x29, 0x28, 0x90, 0x64, 0x0c, 0x69, 0xc4, 0x05, 0xa9, 0x1b, 0x34, 0x63, 0x24, + 0xe4, 0x5c, 0x44, 0x2c, 0xa5, 0x8a, 0xf1, 0x94, 0x8c, 0x07, 0x01, 0x28, 0x3a, 0x20, 0x31, 0xa4, + 0x20, 0xa8, 0x82, 0x08, 0x67, 0x82, 0x2b, 0x6e, 0x3a, 0x33, 0x01, 0xa6, 0x19, 0xc3, 0x8b, 0x02, + 0x5c, 0x0b, 0x7a, 0x07, 0x31, 0x53, 0xc3, 0x3c, 0xc0, 0x21, 0x4f, 0x48, 0xcc, 0x63, 0x4e, 0xb4, + 0x2e, 0xc8, 0x3f, 0xea, 0x97, 0x7e, 0xe8, 0xaf, 0x19, 0xaf, 0xf7, 0x6c, 0x6e, 0x20, 0xa1, 0xe1, + 0x90, 0xa5, 0x20, 0x2e, 0x49, 0x36, 0x8a, 0xab, 0x82, 0x24, 0x09, 0x28, 0x4a, 0xc6, 0x2b, 0x2e, + 0x7a, 0xe4, 0x5f, 0x2a, 0x91, 0xa7, 0x8a, 0x25, 0xb0, 0x22, 0x78, 0x7e, 0x9f, 0x40, 0x86, 0x43, + 0x48, 0xe8, 0xb2, 0xae, 0xff, 0xd3, 0x40, 0x4d, 0x0f, 0xa8, 0x04, 0xf3, 0x03, 0xda, 0xae, 0xdc, + 0x44, 0x54, 0x51, 0xcb, 0xd8, 0x33, 0xf6, 0xdb, 0x87, 0x4f, 0xf1, 0x3c, 0x8b, 0x5b, 0x28, 0xce, + 0x46, 0x71, 0x55, 0x90, 0xb8, 0x9a, 0xc6, 0xe3, 0x01, 0x7e, 0x17, 0x7c, 0x82, 0x50, 0x9d, 0x80, + 0xa2, 0xae, 0x39, 0x29, 0x9c, 0x46, 0x59, 0x38, 0x68, 0x5e, 0xf3, 0x6f, 0xa9, 0xa6, 0x87, 0xb6, + 0x64, 0x06, 0xa1, 0xb5, 0xa1, 0xe9, 0x4f, 0xf0, 0x3d, 0x49, 0x63, 0xed, 0xeb, 0x34, 0x83, 0xd0, + 0x7d, 0x58, 0x73, 0xb7, 0xaa, 0x97, 0xaf, 0x29, 0xfd, 0x1f, 0x06, 0x6a, 0xe9, 0x09, 0x8f, 0x49, + 0x65, 0x9e, 0xaf, 0xb8, 0xc7, 0xeb, 0xb9, 0xaf, 0xd4, 0xda, 0x7b, 0xa7, 0xde, 0xb1, 0x7d, 0x53, + 0x59, 0x70, 0xfe, 0x16, 0x35, 0x99, 0x82, 0x44, 0x5a, 0x1b, 0x7b, 0x9b, 0xfb, 0xed, 0xc3, 0xc7, + 0xeb, 0x59, 0x77, 0x77, 0x6a, 0x64, 0xf3, 0xb8, 0x12, 0xfb, 0x33, 0x46, 0xff, 0xfb, 0x66, 0x6d, + 0xbc, 0x3a, 0xc6, 0x7c, 0x89, 0x76, 0x87, 0xfc, 0x22, 0x02, 0x71, 0x1c, 0x41, 0xaa, 0x98, 0xba, + 0xd4, 0xf6, 0x5b, 0xae, 0x59, 0x16, 0xce, 0xee, 0xd1, 0x5f, 0x1d, 0x7f, 0x69, 0xd2, 0xf4, 0x50, + 0xf7, 0xa2, 0x02, 0xbd, 0xc9, 0x85, 0x5e, 0x7f, 0x0a, 0x21, 0x4f, 0x23, 0xa9, 0x03, 0x6e, 0xba, + 0x56, 0x59, 0x38, 0x5d, 0xef, 0x8e, 0xbe, 0x7f, 0xa7, 0xca, 0x0c, 0x50, 0x9b, 0x86, 0x9f, 0x73, + 0x26, 0xe0, 0x8c, 0x25, 0x60, 0x6d, 0xea, 0x14, 0xc9, 0x7a, 0x29, 0x9e, 0xb0, 0x50, 0xf0, 0x4a, + 0xe6, 0x3e, 0x2a, 0x0b, 0xa7, 0xfd, 0x7a, 0xce, 0xf1, 0x17, 0xa1, 0xe6, 0x39, 0x6a, 0x09, 0x48, + 0xe1, 0x8b, 0xde, 0xb0, 0xf5, 0x7f, 0x1b, 0x76, 0xca, 0xc2, 0x69, 0xf9, 0x37, 0x14, 0x7f, 0x0e, + 0x34, 0x5f, 0xa1, 0x8e, 0xbe, 0xec, 0x4c, 0xd0, 0x54, 0xb2, 0xea, 0x36, 0x69, 0x35, 0x75, 0x16, + 0xdd, 0xb2, 0x70, 0x3a, 0xde, 0x52, 0xcf, 0x5f, 0x99, 0x76, 0x0f, 0x26, 0x53, 0xbb, 0x71, 0x35, + 0xb5, 0x1b, 0xd7, 0x53, 0xbb, 0xf1, 0xb5, 0xb4, 0x8d, 0x49, 0x69, 0x1b, 0x57, 0xa5, 0x6d, 0x5c, + 0x97, 0xb6, 0xf1, 0xab, 0xb4, 0x8d, 0x6f, 0xbf, 0xed, 0xc6, 0xfb, 0x07, 0xf5, 0x6f, 0xfe, 0x13, + 0x00, 0x00, 0xff, 0xff, 0x51, 0x34, 0x6a, 0x0f, 0x77, 0x04, 0x00, 0x00, +} + func (m *Lease) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -81,33 +187,42 @@ func (m *Lease) Marshal() (dAtA []byte, err error) { } func (m *Lease) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Lease) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n1 + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n2 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *LeaseList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -115,37 +230,46 @@ func (m *LeaseList) Marshal() (dAtA []byte, err error) { } func (m *LeaseList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LeaseList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n3, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *LeaseSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -153,77 +277,74 @@ func (m *LeaseSpec) Marshal() (dAtA []byte, err error) { } func (m *LeaseSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LeaseSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.HolderIdentity != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.HolderIdentity))) - i += copy(dAtA[i:], *m.HolderIdentity) + if m.LeaseTransitions != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.LeaseTransitions)) + i-- + dAtA[i] = 0x28 } - if m.LeaseDurationSeconds != nil { - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.LeaseDurationSeconds)) + if m.RenewTime != nil { + { + size, err := m.RenewTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 } if m.AcquireTime != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AcquireTime.Size())) - n4, err := m.AcquireTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.AcquireTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n4 + i-- + dAtA[i] = 0x1a } - if m.RenewTime != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RenewTime.Size())) - n5, err := m.RenewTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 + if m.LeaseDurationSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.LeaseDurationSeconds)) + i-- + dAtA[i] = 0x10 } - if m.LeaseTransitions != nil { - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.LeaseTransitions)) + if m.HolderIdentity != nil { + i -= len(*m.HolderIdentity) + copy(dAtA[i:], *m.HolderIdentity) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.HolderIdentity))) + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *Lease) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -234,6 +355,9 @@ func (m *Lease) Size() (n int) { } func (m *LeaseList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -248,6 +372,9 @@ func (m *LeaseList) Size() (n int) { } func (m *LeaseSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.HolderIdentity != nil { @@ -272,14 +399,7 @@ func (m *LeaseSpec) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -289,7 +409,7 @@ func (this *Lease) String() string { return "nil" } s := strings.Join([]string{`&Lease{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "LeaseSpec", "LeaseSpec", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -299,9 +419,14 @@ func (this *LeaseList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]Lease{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Lease", "Lease", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&LeaseList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Lease", "Lease", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -313,8 +438,8 @@ func (this *LeaseSpec) String() string { s := strings.Join([]string{`&LeaseSpec{`, `HolderIdentity:` + valueToStringGenerated(this.HolderIdentity) + `,`, `LeaseDurationSeconds:` + valueToStringGenerated(this.LeaseDurationSeconds) + `,`, - `AcquireTime:` + strings.Replace(fmt.Sprintf("%v", this.AcquireTime), "MicroTime", "k8s_io_apimachinery_pkg_apis_meta_v1.MicroTime", 1) + `,`, - `RenewTime:` + strings.Replace(fmt.Sprintf("%v", this.RenewTime), "MicroTime", "k8s_io_apimachinery_pkg_apis_meta_v1.MicroTime", 1) + `,`, + `AcquireTime:` + strings.Replace(fmt.Sprintf("%v", this.AcquireTime), "MicroTime", "v1.MicroTime", 1) + `,`, + `RenewTime:` + strings.Replace(fmt.Sprintf("%v", this.RenewTime), "MicroTime", "v1.MicroTime", 1) + `,`, `LeaseTransitions:` + valueToStringGenerated(this.LeaseTransitions) + `,`, `}`, }, "") @@ -343,7 +468,7 @@ func (m *Lease) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -371,7 +496,7 @@ func (m *Lease) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -380,6 +505,9 @@ func (m *Lease) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -401,7 +529,7 @@ func (m *Lease) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -410,6 +538,9 @@ func (m *Lease) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -426,6 +557,9 @@ func (m *Lease) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -453,7 +587,7 @@ func (m *LeaseList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -481,7 +615,7 @@ func (m *LeaseList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -490,6 +624,9 @@ func (m *LeaseList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -511,7 +648,7 @@ func (m *LeaseList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -520,6 +657,9 @@ func (m *LeaseList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -537,6 +677,9 @@ func (m *LeaseList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -564,7 +707,7 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -592,7 +735,7 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -602,6 +745,9 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -622,7 +768,7 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -642,7 +788,7 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -651,11 +797,14 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.AcquireTime == nil { - m.AcquireTime = &k8s_io_apimachinery_pkg_apis_meta_v1.MicroTime{} + m.AcquireTime = &v1.MicroTime{} } if err := m.AcquireTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -675,7 +824,7 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -684,11 +833,14 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.RenewTime == nil { - m.RenewTime = &k8s_io_apimachinery_pkg_apis_meta_v1.MicroTime{} + m.RenewTime = &v1.MicroTime{} } if err := m.RenewTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -708,7 +860,7 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -723,6 +875,9 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -789,10 +944,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -821,6 +979,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -839,45 +1000,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/coordination/v1beta1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 540 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x91, 0xc1, 0x6e, 0xd3, 0x40, - 0x10, 0x86, 0xe3, 0xb6, 0x11, 0xcd, 0x86, 0x96, 0xc8, 0xca, 0xc1, 0xca, 0xc1, 0xae, 0x72, 0x40, - 0x15, 0x52, 0x77, 0x49, 0x85, 0x10, 0xe2, 0x04, 0x16, 0x87, 0x56, 0xb8, 0x42, 0x72, 0x7b, 0x42, - 0x3d, 0xb0, 0xb6, 0x07, 0x67, 0x49, 0xed, 0x35, 0xbb, 0xeb, 0xa0, 0xde, 0x78, 0x04, 0xae, 0xbc, - 0x08, 0xbc, 0x42, 0x8e, 0x3d, 0xf6, 0x64, 0x11, 0xf3, 0x22, 0xc8, 0x1b, 0xb7, 0x09, 0x49, 0x51, - 0x23, 0x6e, 0xde, 0x99, 0xf9, 0xbf, 0xf9, 0xe7, 0x37, 0x3a, 0x1a, 0xbd, 0x90, 0x98, 0x71, 0x32, - 0xca, 0x03, 0x10, 0x29, 0x28, 0x90, 0x64, 0x0c, 0x69, 0xc4, 0x05, 0xa9, 0x1b, 0x34, 0x63, 0x24, - 0xe4, 0x5c, 0x44, 0x2c, 0xa5, 0x8a, 0xf1, 0x94, 0x8c, 0x07, 0x01, 0x28, 0x3a, 0x20, 0x31, 0xa4, - 0x20, 0xa8, 0x82, 0x08, 0x67, 0x82, 0x2b, 0x6e, 0x3a, 0x33, 0x01, 0xa6, 0x19, 0xc3, 0x8b, 0x02, - 0x5c, 0x0b, 0x7a, 0x07, 0x31, 0x53, 0xc3, 0x3c, 0xc0, 0x21, 0x4f, 0x48, 0xcc, 0x63, 0x4e, 0xb4, - 0x2e, 0xc8, 0x3f, 0xea, 0x97, 0x7e, 0xe8, 0xaf, 0x19, 0xaf, 0xf7, 0x6c, 0x6e, 0x20, 0xa1, 0xe1, - 0x90, 0xa5, 0x20, 0x2e, 0x49, 0x36, 0x8a, 0xab, 0x82, 0x24, 0x09, 0x28, 0x4a, 0xc6, 0x2b, 0x2e, - 0x7a, 0xe4, 0x5f, 0x2a, 0x91, 0xa7, 0x8a, 0x25, 0xb0, 0x22, 0x78, 0x7e, 0x9f, 0x40, 0x86, 0x43, - 0x48, 0xe8, 0xb2, 0xae, 0xff, 0xd3, 0x40, 0x4d, 0x0f, 0xa8, 0x04, 0xf3, 0x03, 0xda, 0xae, 0xdc, - 0x44, 0x54, 0x51, 0xcb, 0xd8, 0x33, 0xf6, 0xdb, 0x87, 0x4f, 0xf1, 0x3c, 0x8b, 0x5b, 0x28, 0xce, - 0x46, 0x71, 0x55, 0x90, 0xb8, 0x9a, 0xc6, 0xe3, 0x01, 0x7e, 0x17, 0x7c, 0x82, 0x50, 0x9d, 0x80, - 0xa2, 0xae, 0x39, 0x29, 0x9c, 0x46, 0x59, 0x38, 0x68, 0x5e, 0xf3, 0x6f, 0xa9, 0xa6, 0x87, 0xb6, - 0x64, 0x06, 0xa1, 0xb5, 0xa1, 0xe9, 0x4f, 0xf0, 0x3d, 0x49, 0x63, 0xed, 0xeb, 0x34, 0x83, 0xd0, - 0x7d, 0x58, 0x73, 0xb7, 0xaa, 0x97, 0xaf, 0x29, 0xfd, 0x1f, 0x06, 0x6a, 0xe9, 0x09, 0x8f, 0x49, - 0x65, 0x9e, 0xaf, 0xb8, 0xc7, 0xeb, 0xb9, 0xaf, 0xd4, 0xda, 0x7b, 0xa7, 0xde, 0xb1, 0x7d, 0x53, - 0x59, 0x70, 0xfe, 0x16, 0x35, 0x99, 0x82, 0x44, 0x5a, 0x1b, 0x7b, 0x9b, 0xfb, 0xed, 0xc3, 0xc7, - 0xeb, 0x59, 0x77, 0x77, 0x6a, 0x64, 0xf3, 0xb8, 0x12, 0xfb, 0x33, 0x46, 0xff, 0xfb, 0x66, 0x6d, - 0xbc, 0x3a, 0xc6, 0x7c, 0x89, 0x76, 0x87, 0xfc, 0x22, 0x02, 0x71, 0x1c, 0x41, 0xaa, 0x98, 0xba, - 0xd4, 0xf6, 0x5b, 0xae, 0x59, 0x16, 0xce, 0xee, 0xd1, 0x5f, 0x1d, 0x7f, 0x69, 0xd2, 0xf4, 0x50, - 0xf7, 0xa2, 0x02, 0xbd, 0xc9, 0x85, 0x5e, 0x7f, 0x0a, 0x21, 0x4f, 0x23, 0xa9, 0x03, 0x6e, 0xba, - 0x56, 0x59, 0x38, 0x5d, 0xef, 0x8e, 0xbe, 0x7f, 0xa7, 0xca, 0x0c, 0x50, 0x9b, 0x86, 0x9f, 0x73, - 0x26, 0xe0, 0x8c, 0x25, 0x60, 0x6d, 0xea, 0x14, 0xc9, 0x7a, 0x29, 0x9e, 0xb0, 0x50, 0xf0, 0x4a, - 0xe6, 0x3e, 0x2a, 0x0b, 0xa7, 0xfd, 0x7a, 0xce, 0xf1, 0x17, 0xa1, 0xe6, 0x39, 0x6a, 0x09, 0x48, - 0xe1, 0x8b, 0xde, 0xb0, 0xf5, 0x7f, 0x1b, 0x76, 0xca, 0xc2, 0x69, 0xf9, 0x37, 0x14, 0x7f, 0x0e, - 0x34, 0x5f, 0xa1, 0x8e, 0xbe, 0xec, 0x4c, 0xd0, 0x54, 0xb2, 0xea, 0x36, 0x69, 0x35, 0x75, 0x16, - 0xdd, 0xb2, 0x70, 0x3a, 0xde, 0x52, 0xcf, 0x5f, 0x99, 0x76, 0x0f, 0x26, 0x53, 0xbb, 0x71, 0x35, - 0xb5, 0x1b, 0xd7, 0x53, 0xbb, 0xf1, 0xb5, 0xb4, 0x8d, 0x49, 0x69, 0x1b, 0x57, 0xa5, 0x6d, 0x5c, - 0x97, 0xb6, 0xf1, 0xab, 0xb4, 0x8d, 0x6f, 0xbf, 0xed, 0xc6, 0xfb, 0x07, 0xf5, 0x6f, 0xfe, 0x13, - 0x00, 0x00, 0xff, 0xff, 0x51, 0x34, 0x6a, 0x0f, 0x77, 0x04, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/coordination/v1beta1/generated.proto b/vendor/k8s.io/api/coordination/v1beta1/generated.proto index 918e0de1..cfc2711c 100644 --- a/vendor/k8s.io/api/coordination/v1beta1/generated.proto +++ b/vendor/k8s.io/api/coordination/v1beta1/generated.proto @@ -30,12 +30,12 @@ option go_package = "v1beta1"; // Lease defines a lease concept. message Lease { - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the Lease. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional LeaseSpec spec = 2; } @@ -43,7 +43,7 @@ message Lease { // LeaseList is a list of Lease objects. message LeaseList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; diff --git a/vendor/k8s.io/api/coordination/v1beta1/types.go b/vendor/k8s.io/api/coordination/v1beta1/types.go index 846f7280..da88f675 100644 --- a/vendor/k8s.io/api/coordination/v1beta1/types.go +++ b/vendor/k8s.io/api/coordination/v1beta1/types.go @@ -26,12 +26,12 @@ import ( // Lease defines a lease concept. type Lease struct { metav1.TypeMeta `json:",inline"` - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Specification of the Lease. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec LeaseSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` } @@ -65,7 +65,7 @@ type LeaseSpec struct { type LeaseList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` diff --git a/vendor/k8s.io/api/coordination/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/coordination/v1beta1/types_swagger_doc_generated.go index 4532d322..f557d265 100644 --- a/vendor/k8s.io/api/coordination/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/coordination/v1beta1/types_swagger_doc_generated.go @@ -29,8 +29,8 @@ package v1beta1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_Lease = map[string]string{ "": "Lease defines a lease concept.", - "metadata": "More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Specification of the Lease. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Specification of the Lease. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (Lease) SwaggerDoc() map[string]string { @@ -39,7 +39,7 @@ func (Lease) SwaggerDoc() map[string]string { var map_LeaseList = map[string]string{ "": "LeaseList is a list of Lease objects.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "Items is a list of schema objects.", } diff --git a/vendor/k8s.io/api/coordination/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/coordination/v1beta1/zz_generated.deepcopy.go index a628ac19..de696213 100644 --- a/vendor/k8s.io/api/coordination/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/coordination/v1beta1/zz_generated.deepcopy.go @@ -55,7 +55,7 @@ func (in *Lease) DeepCopyObject() runtime.Object { func (in *LeaseList) DeepCopyInto(out *LeaseList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Lease, len(*in)) diff --git a/vendor/k8s.io/api/core/v1/annotation_key_constants.go b/vendor/k8s.io/api/core/v1/annotation_key_constants.go index 16a0cfce..edc9b4d6 100644 --- a/vendor/k8s.io/api/core/v1/annotation_key_constants.go +++ b/vendor/k8s.io/api/core/v1/annotation_key_constants.go @@ -78,4 +78,29 @@ const ( // // Not all cloud providers support this annotation, though AWS & GCE do. AnnotationLoadBalancerSourceRangesKey = "service.beta.kubernetes.io/load-balancer-source-ranges" + + // EndpointsLastChangeTriggerTime is the annotation key, set for endpoints objects, that + // represents the timestamp (stored as RFC 3339 date-time string, e.g. '2018-10-22T19:32:52.1Z') + // of the last change, of some Pod or Service object, that triggered the endpoints object change. + // In other words, if a Pod / Service changed at time T0, that change was observed by endpoints + // controller at T1, and the Endpoints object was changed at T2, the + // EndpointsLastChangeTriggerTime would be set to T0. + // + // The "endpoints change trigger" here means any Pod or Service change that resulted in the + // Endpoints object change. + // + // Given the definition of the "endpoints change trigger", please note that this annotation will + // be set ONLY for endpoints object changes triggered by either Pod or Service change. If the + // Endpoints object changes due to other reasons, this annotation won't be set (or updated if it's + // already set). + // + // This annotation will be used to compute the in-cluster network programming latency SLI, see + // https://github.com/kubernetes/community/blob/master/sig-scalability/slos/network_programming_latency.md + EndpointsLastChangeTriggerTime = "endpoints.kubernetes.io/last-change-trigger-time" + + // MigratedPluginsAnnotationKey is the annotation key, set for CSINode objects, that is a comma-separated + // list of in-tree plugins that will be serviced by the CSI backend on the Node represented by CSINode. + // This annotation is used by the Attach Detach Controller to determine whether to use the in-tree or + // CSI Backend for a volume plugin on a specific node. + MigratedPluginsAnnotationKey = "storage.alpha.kubernetes.io/migrated-plugins" ) diff --git a/vendor/k8s.io/api/core/v1/doc.go b/vendor/k8s.io/api/core/v1/doc.go index 96994c62..1bdf0b25 100644 --- a/vendor/k8s.io/api/core/v1/doc.go +++ b/vendor/k8s.io/api/core/v1/doc.go @@ -16,6 +16,7 @@ limitations under the License. // +k8s:openapi-gen=true // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // Package v1 is the v1 version of the core API. package v1 // import "k8s.io/api/core/v1" diff --git a/vendor/k8s.io/api/core/v1/generated.pb.go b/vendor/k8s.io/api/core/v1/generated.pb.go index b569ea84..732385ce 100644 --- a/vendor/k8s.io/api/core/v1/generated.pb.go +++ b/vendor/k8s.io/api/core/v1/generated.pb.go @@ -14,231 +14,29 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/core/v1/generated.proto -// DO NOT EDIT! -/* - Package v1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/core/v1/generated.proto - - It has these top-level messages: - AWSElasticBlockStoreVolumeSource - Affinity - AttachedVolume - AvoidPods - AzureDiskVolumeSource - AzureFilePersistentVolumeSource - AzureFileVolumeSource - Binding - CSIPersistentVolumeSource - Capabilities - CephFSPersistentVolumeSource - CephFSVolumeSource - CinderPersistentVolumeSource - CinderVolumeSource - ClientIPConfig - ComponentCondition - ComponentStatus - ComponentStatusList - ConfigMap - ConfigMapEnvSource - ConfigMapKeySelector - ConfigMapList - ConfigMapNodeConfigSource - ConfigMapProjection - ConfigMapVolumeSource - Container - ContainerImage - ContainerPort - ContainerState - ContainerStateRunning - ContainerStateTerminated - ContainerStateWaiting - ContainerStatus - DaemonEndpoint - DownwardAPIProjection - DownwardAPIVolumeFile - DownwardAPIVolumeSource - EmptyDirVolumeSource - EndpointAddress - EndpointPort - EndpointSubset - Endpoints - EndpointsList - EnvFromSource - EnvVar - EnvVarSource - Event - EventList - EventSeries - EventSource - ExecAction - FCVolumeSource - FlexPersistentVolumeSource - FlexVolumeSource - FlockerVolumeSource - GCEPersistentDiskVolumeSource - GitRepoVolumeSource - GlusterfsVolumeSource - HTTPGetAction - HTTPHeader - Handler - HostAlias - HostPathVolumeSource - ISCSIPersistentVolumeSource - ISCSIVolumeSource - KeyToPath - Lifecycle - LimitRange - LimitRangeItem - LimitRangeList - LimitRangeSpec - List - LoadBalancerIngress - LoadBalancerStatus - LocalObjectReference - LocalVolumeSource - NFSVolumeSource - Namespace - NamespaceList - NamespaceSpec - NamespaceStatus - Node - NodeAddress - NodeAffinity - NodeCondition - NodeConfigSource - NodeConfigStatus - NodeDaemonEndpoints - NodeList - NodeProxyOptions - NodeResources - NodeSelector - NodeSelectorRequirement - NodeSelectorTerm - NodeSpec - NodeStatus - NodeSystemInfo - ObjectFieldSelector - ObjectReference - PersistentVolume - PersistentVolumeClaim - PersistentVolumeClaimCondition - PersistentVolumeClaimList - PersistentVolumeClaimSpec - PersistentVolumeClaimStatus - PersistentVolumeClaimVolumeSource - PersistentVolumeList - PersistentVolumeSource - PersistentVolumeSpec - PersistentVolumeStatus - PhotonPersistentDiskVolumeSource - Pod - PodAffinity - PodAffinityTerm - PodAntiAffinity - PodAttachOptions - PodCondition - PodDNSConfig - PodDNSConfigOption - PodExecOptions - PodList - PodLogOptions - PodPortForwardOptions - PodProxyOptions - PodReadinessGate - PodSecurityContext - PodSignature - PodSpec - PodStatus - PodStatusResult - PodTemplate - PodTemplateList - PodTemplateSpec - PortworxVolumeSource - Preconditions - PreferAvoidPodsEntry - PreferredSchedulingTerm - Probe - ProjectedVolumeSource - QuobyteVolumeSource - RBDPersistentVolumeSource - RBDVolumeSource - RangeAllocation - ReplicationController - ReplicationControllerCondition - ReplicationControllerList - ReplicationControllerSpec - ReplicationControllerStatus - ResourceFieldSelector - ResourceQuota - ResourceQuotaList - ResourceQuotaSpec - ResourceQuotaStatus - ResourceRequirements - SELinuxOptions - ScaleIOPersistentVolumeSource - ScaleIOVolumeSource - ScopeSelector - ScopedResourceSelectorRequirement - Secret - SecretEnvSource - SecretKeySelector - SecretList - SecretProjection - SecretReference - SecretVolumeSource - SecurityContext - SerializedReference - Service - ServiceAccount - ServiceAccountList - ServiceAccountTokenProjection - ServiceList - ServicePort - ServiceProxyOptions - ServiceSpec - ServiceStatus - SessionAffinityConfig - StorageOSPersistentVolumeSource - StorageOSVolumeSource - Sysctl - TCPSocketAction - Taint - Toleration - TopologySelectorLabelRequirement - TopologySelectorTerm - TypedLocalObjectReference - Volume - VolumeDevice - VolumeMount - VolumeNodeAffinity - VolumeProjection - VolumeSource - VsphereVirtualDiskVolumeSource - WeightedPodAffinityTerm -*/ package v1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import k8s_io_apimachinery_pkg_api_resource "k8s.io/apimachinery/pkg/api/resource" -import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" -import k8s_io_apimachinery_pkg_runtime "k8s.io/apimachinery/pkg/runtime" + io "io" -import k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + resource "k8s.io/apimachinery/pkg/api/resource" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" -import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" -import strings "strings" -import reflect "reflect" - -import io "io" + k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -254,13423 +52,19062 @@ const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package func (m *AWSElasticBlockStoreVolumeSource) Reset() { *m = AWSElasticBlockStoreVolumeSource{} } func (*AWSElasticBlockStoreVolumeSource) ProtoMessage() {} func (*AWSElasticBlockStoreVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{0} + return fileDescriptor_83c10c24ec417dc9, []int{0} } - -func (m *Affinity) Reset() { *m = Affinity{} } -func (*Affinity) ProtoMessage() {} -func (*Affinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } - -func (m *AttachedVolume) Reset() { *m = AttachedVolume{} } -func (*AttachedVolume) ProtoMessage() {} -func (*AttachedVolume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } - -func (m *AvoidPods) Reset() { *m = AvoidPods{} } -func (*AvoidPods) ProtoMessage() {} -func (*AvoidPods) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } - -func (m *AzureDiskVolumeSource) Reset() { *m = AzureDiskVolumeSource{} } -func (*AzureDiskVolumeSource) ProtoMessage() {} -func (*AzureDiskVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } - -func (m *AzureFilePersistentVolumeSource) Reset() { *m = AzureFilePersistentVolumeSource{} } -func (*AzureFilePersistentVolumeSource) ProtoMessage() {} -func (*AzureFilePersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{5} +func (m *AWSElasticBlockStoreVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) } - -func (m *AzureFileVolumeSource) Reset() { *m = AzureFileVolumeSource{} } -func (*AzureFileVolumeSource) ProtoMessage() {} -func (*AzureFileVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } - -func (m *Binding) Reset() { *m = Binding{} } -func (*Binding) ProtoMessage() {} -func (*Binding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } - -func (m *CSIPersistentVolumeSource) Reset() { *m = CSIPersistentVolumeSource{} } -func (*CSIPersistentVolumeSource) ProtoMessage() {} -func (*CSIPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{8} +func (m *AWSElasticBlockStoreVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil } - -func (m *Capabilities) Reset() { *m = Capabilities{} } -func (*Capabilities) ProtoMessage() {} -func (*Capabilities) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } - -func (m *CephFSPersistentVolumeSource) Reset() { *m = CephFSPersistentVolumeSource{} } -func (*CephFSPersistentVolumeSource) ProtoMessage() {} -func (*CephFSPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{10} +func (m *AWSElasticBlockStoreVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_AWSElasticBlockStoreVolumeSource.Merge(m, src) +} +func (m *AWSElasticBlockStoreVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *AWSElasticBlockStoreVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_AWSElasticBlockStoreVolumeSource.DiscardUnknown(m) } -func (m *CephFSVolumeSource) Reset() { *m = CephFSVolumeSource{} } -func (*CephFSVolumeSource) ProtoMessage() {} -func (*CephFSVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } +var xxx_messageInfo_AWSElasticBlockStoreVolumeSource proto.InternalMessageInfo -func (m *CinderPersistentVolumeSource) Reset() { *m = CinderPersistentVolumeSource{} } -func (*CinderPersistentVolumeSource) ProtoMessage() {} -func (*CinderPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{12} +func (m *Affinity) Reset() { *m = Affinity{} } +func (*Affinity) ProtoMessage() {} +func (*Affinity) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{1} +} +func (m *Affinity) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Affinity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Affinity) XXX_Merge(src proto.Message) { + xxx_messageInfo_Affinity.Merge(m, src) +} +func (m *Affinity) XXX_Size() int { + return m.Size() +} +func (m *Affinity) XXX_DiscardUnknown() { + xxx_messageInfo_Affinity.DiscardUnknown(m) } -func (m *CinderVolumeSource) Reset() { *m = CinderVolumeSource{} } -func (*CinderVolumeSource) ProtoMessage() {} -func (*CinderVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } +var xxx_messageInfo_Affinity proto.InternalMessageInfo -func (m *ClientIPConfig) Reset() { *m = ClientIPConfig{} } -func (*ClientIPConfig) ProtoMessage() {} -func (*ClientIPConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } +func (m *AttachedVolume) Reset() { *m = AttachedVolume{} } +func (*AttachedVolume) ProtoMessage() {} +func (*AttachedVolume) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{2} +} +func (m *AttachedVolume) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AttachedVolume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AttachedVolume) XXX_Merge(src proto.Message) { + xxx_messageInfo_AttachedVolume.Merge(m, src) +} +func (m *AttachedVolume) XXX_Size() int { + return m.Size() +} +func (m *AttachedVolume) XXX_DiscardUnknown() { + xxx_messageInfo_AttachedVolume.DiscardUnknown(m) +} -func (m *ComponentCondition) Reset() { *m = ComponentCondition{} } -func (*ComponentCondition) ProtoMessage() {} -func (*ComponentCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } +var xxx_messageInfo_AttachedVolume proto.InternalMessageInfo -func (m *ComponentStatus) Reset() { *m = ComponentStatus{} } -func (*ComponentStatus) ProtoMessage() {} -func (*ComponentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } +func (m *AvoidPods) Reset() { *m = AvoidPods{} } +func (*AvoidPods) ProtoMessage() {} +func (*AvoidPods) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{3} +} +func (m *AvoidPods) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AvoidPods) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AvoidPods) XXX_Merge(src proto.Message) { + xxx_messageInfo_AvoidPods.Merge(m, src) +} +func (m *AvoidPods) XXX_Size() int { + return m.Size() +} +func (m *AvoidPods) XXX_DiscardUnknown() { + xxx_messageInfo_AvoidPods.DiscardUnknown(m) +} -func (m *ComponentStatusList) Reset() { *m = ComponentStatusList{} } -func (*ComponentStatusList) ProtoMessage() {} -func (*ComponentStatusList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } +var xxx_messageInfo_AvoidPods proto.InternalMessageInfo -func (m *ConfigMap) Reset() { *m = ConfigMap{} } -func (*ConfigMap) ProtoMessage() {} -func (*ConfigMap) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } +func (m *AzureDiskVolumeSource) Reset() { *m = AzureDiskVolumeSource{} } +func (*AzureDiskVolumeSource) ProtoMessage() {} +func (*AzureDiskVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{4} +} +func (m *AzureDiskVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AzureDiskVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AzureDiskVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_AzureDiskVolumeSource.Merge(m, src) +} +func (m *AzureDiskVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *AzureDiskVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_AzureDiskVolumeSource.DiscardUnknown(m) +} -func (m *ConfigMapEnvSource) Reset() { *m = ConfigMapEnvSource{} } -func (*ConfigMapEnvSource) ProtoMessage() {} -func (*ConfigMapEnvSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } +var xxx_messageInfo_AzureDiskVolumeSource proto.InternalMessageInfo -func (m *ConfigMapKeySelector) Reset() { *m = ConfigMapKeySelector{} } -func (*ConfigMapKeySelector) ProtoMessage() {} -func (*ConfigMapKeySelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{20} } +func (m *AzureFilePersistentVolumeSource) Reset() { *m = AzureFilePersistentVolumeSource{} } +func (*AzureFilePersistentVolumeSource) ProtoMessage() {} +func (*AzureFilePersistentVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{5} +} +func (m *AzureFilePersistentVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AzureFilePersistentVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AzureFilePersistentVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_AzureFilePersistentVolumeSource.Merge(m, src) +} +func (m *AzureFilePersistentVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *AzureFilePersistentVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_AzureFilePersistentVolumeSource.DiscardUnknown(m) +} -func (m *ConfigMapList) Reset() { *m = ConfigMapList{} } -func (*ConfigMapList) ProtoMessage() {} -func (*ConfigMapList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{21} } +var xxx_messageInfo_AzureFilePersistentVolumeSource proto.InternalMessageInfo -func (m *ConfigMapNodeConfigSource) Reset() { *m = ConfigMapNodeConfigSource{} } -func (*ConfigMapNodeConfigSource) ProtoMessage() {} -func (*ConfigMapNodeConfigSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{22} +func (m *AzureFileVolumeSource) Reset() { *m = AzureFileVolumeSource{} } +func (*AzureFileVolumeSource) ProtoMessage() {} +func (*AzureFileVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{6} +} +func (m *AzureFileVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AzureFileVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AzureFileVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_AzureFileVolumeSource.Merge(m, src) +} +func (m *AzureFileVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *AzureFileVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_AzureFileVolumeSource.DiscardUnknown(m) } -func (m *ConfigMapProjection) Reset() { *m = ConfigMapProjection{} } -func (*ConfigMapProjection) ProtoMessage() {} -func (*ConfigMapProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } +var xxx_messageInfo_AzureFileVolumeSource proto.InternalMessageInfo -func (m *ConfigMapVolumeSource) Reset() { *m = ConfigMapVolumeSource{} } -func (*ConfigMapVolumeSource) ProtoMessage() {} -func (*ConfigMapVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } +func (m *Binding) Reset() { *m = Binding{} } +func (*Binding) ProtoMessage() {} +func (*Binding) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{7} +} +func (m *Binding) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Binding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Binding) XXX_Merge(src proto.Message) { + xxx_messageInfo_Binding.Merge(m, src) +} +func (m *Binding) XXX_Size() int { + return m.Size() +} +func (m *Binding) XXX_DiscardUnknown() { + xxx_messageInfo_Binding.DiscardUnknown(m) +} -func (m *Container) Reset() { *m = Container{} } -func (*Container) ProtoMessage() {} -func (*Container) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } +var xxx_messageInfo_Binding proto.InternalMessageInfo -func (m *ContainerImage) Reset() { *m = ContainerImage{} } -func (*ContainerImage) ProtoMessage() {} -func (*ContainerImage) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } +func (m *CSIPersistentVolumeSource) Reset() { *m = CSIPersistentVolumeSource{} } +func (*CSIPersistentVolumeSource) ProtoMessage() {} +func (*CSIPersistentVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{8} +} +func (m *CSIPersistentVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CSIPersistentVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CSIPersistentVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_CSIPersistentVolumeSource.Merge(m, src) +} +func (m *CSIPersistentVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *CSIPersistentVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_CSIPersistentVolumeSource.DiscardUnknown(m) +} -func (m *ContainerPort) Reset() { *m = ContainerPort{} } -func (*ContainerPort) ProtoMessage() {} -func (*ContainerPort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{27} } +var xxx_messageInfo_CSIPersistentVolumeSource proto.InternalMessageInfo -func (m *ContainerState) Reset() { *m = ContainerState{} } -func (*ContainerState) ProtoMessage() {} -func (*ContainerState) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{28} } +func (m *CSIVolumeSource) Reset() { *m = CSIVolumeSource{} } +func (*CSIVolumeSource) ProtoMessage() {} +func (*CSIVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{9} +} +func (m *CSIVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CSIVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CSIVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_CSIVolumeSource.Merge(m, src) +} +func (m *CSIVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *CSIVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_CSIVolumeSource.DiscardUnknown(m) +} -func (m *ContainerStateRunning) Reset() { *m = ContainerStateRunning{} } -func (*ContainerStateRunning) ProtoMessage() {} -func (*ContainerStateRunning) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{29} } +var xxx_messageInfo_CSIVolumeSource proto.InternalMessageInfo -func (m *ContainerStateTerminated) Reset() { *m = ContainerStateTerminated{} } -func (*ContainerStateTerminated) ProtoMessage() {} -func (*ContainerStateTerminated) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{30} +func (m *Capabilities) Reset() { *m = Capabilities{} } +func (*Capabilities) ProtoMessage() {} +func (*Capabilities) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{10} +} +func (m *Capabilities) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Capabilities) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Capabilities) XXX_Merge(src proto.Message) { + xxx_messageInfo_Capabilities.Merge(m, src) +} +func (m *Capabilities) XXX_Size() int { + return m.Size() +} +func (m *Capabilities) XXX_DiscardUnknown() { + xxx_messageInfo_Capabilities.DiscardUnknown(m) } -func (m *ContainerStateWaiting) Reset() { *m = ContainerStateWaiting{} } -func (*ContainerStateWaiting) ProtoMessage() {} -func (*ContainerStateWaiting) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{31} } +var xxx_messageInfo_Capabilities proto.InternalMessageInfo -func (m *ContainerStatus) Reset() { *m = ContainerStatus{} } -func (*ContainerStatus) ProtoMessage() {} -func (*ContainerStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{32} } +func (m *CephFSPersistentVolumeSource) Reset() { *m = CephFSPersistentVolumeSource{} } +func (*CephFSPersistentVolumeSource) ProtoMessage() {} +func (*CephFSPersistentVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{11} +} +func (m *CephFSPersistentVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CephFSPersistentVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CephFSPersistentVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_CephFSPersistentVolumeSource.Merge(m, src) +} +func (m *CephFSPersistentVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *CephFSPersistentVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_CephFSPersistentVolumeSource.DiscardUnknown(m) +} -func (m *DaemonEndpoint) Reset() { *m = DaemonEndpoint{} } -func (*DaemonEndpoint) ProtoMessage() {} -func (*DaemonEndpoint) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{33} } +var xxx_messageInfo_CephFSPersistentVolumeSource proto.InternalMessageInfo -func (m *DownwardAPIProjection) Reset() { *m = DownwardAPIProjection{} } -func (*DownwardAPIProjection) ProtoMessage() {} -func (*DownwardAPIProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{34} } +func (m *CephFSVolumeSource) Reset() { *m = CephFSVolumeSource{} } +func (*CephFSVolumeSource) ProtoMessage() {} +func (*CephFSVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{12} +} +func (m *CephFSVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CephFSVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CephFSVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_CephFSVolumeSource.Merge(m, src) +} +func (m *CephFSVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *CephFSVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_CephFSVolumeSource.DiscardUnknown(m) +} -func (m *DownwardAPIVolumeFile) Reset() { *m = DownwardAPIVolumeFile{} } -func (*DownwardAPIVolumeFile) ProtoMessage() {} -func (*DownwardAPIVolumeFile) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{35} } +var xxx_messageInfo_CephFSVolumeSource proto.InternalMessageInfo -func (m *DownwardAPIVolumeSource) Reset() { *m = DownwardAPIVolumeSource{} } -func (*DownwardAPIVolumeSource) ProtoMessage() {} -func (*DownwardAPIVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{36} +func (m *CinderPersistentVolumeSource) Reset() { *m = CinderPersistentVolumeSource{} } +func (*CinderPersistentVolumeSource) ProtoMessage() {} +func (*CinderPersistentVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{13} +} +func (m *CinderPersistentVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CinderPersistentVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CinderPersistentVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_CinderPersistentVolumeSource.Merge(m, src) +} +func (m *CinderPersistentVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *CinderPersistentVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_CinderPersistentVolumeSource.DiscardUnknown(m) } -func (m *EmptyDirVolumeSource) Reset() { *m = EmptyDirVolumeSource{} } -func (*EmptyDirVolumeSource) ProtoMessage() {} -func (*EmptyDirVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{37} } +var xxx_messageInfo_CinderPersistentVolumeSource proto.InternalMessageInfo -func (m *EndpointAddress) Reset() { *m = EndpointAddress{} } -func (*EndpointAddress) ProtoMessage() {} -func (*EndpointAddress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{38} } +func (m *CinderVolumeSource) Reset() { *m = CinderVolumeSource{} } +func (*CinderVolumeSource) ProtoMessage() {} +func (*CinderVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{14} +} +func (m *CinderVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CinderVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CinderVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_CinderVolumeSource.Merge(m, src) +} +func (m *CinderVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *CinderVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_CinderVolumeSource.DiscardUnknown(m) +} -func (m *EndpointPort) Reset() { *m = EndpointPort{} } -func (*EndpointPort) ProtoMessage() {} -func (*EndpointPort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{39} } +var xxx_messageInfo_CinderVolumeSource proto.InternalMessageInfo -func (m *EndpointSubset) Reset() { *m = EndpointSubset{} } -func (*EndpointSubset) ProtoMessage() {} -func (*EndpointSubset) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{40} } +func (m *ClientIPConfig) Reset() { *m = ClientIPConfig{} } +func (*ClientIPConfig) ProtoMessage() {} +func (*ClientIPConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{15} +} +func (m *ClientIPConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClientIPConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClientIPConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClientIPConfig.Merge(m, src) +} +func (m *ClientIPConfig) XXX_Size() int { + return m.Size() +} +func (m *ClientIPConfig) XXX_DiscardUnknown() { + xxx_messageInfo_ClientIPConfig.DiscardUnknown(m) +} -func (m *Endpoints) Reset() { *m = Endpoints{} } -func (*Endpoints) ProtoMessage() {} -func (*Endpoints) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{41} } +var xxx_messageInfo_ClientIPConfig proto.InternalMessageInfo -func (m *EndpointsList) Reset() { *m = EndpointsList{} } -func (*EndpointsList) ProtoMessage() {} -func (*EndpointsList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{42} } +func (m *ComponentCondition) Reset() { *m = ComponentCondition{} } +func (*ComponentCondition) ProtoMessage() {} +func (*ComponentCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{16} +} +func (m *ComponentCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ComponentCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ComponentCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_ComponentCondition.Merge(m, src) +} +func (m *ComponentCondition) XXX_Size() int { + return m.Size() +} +func (m *ComponentCondition) XXX_DiscardUnknown() { + xxx_messageInfo_ComponentCondition.DiscardUnknown(m) +} -func (m *EnvFromSource) Reset() { *m = EnvFromSource{} } -func (*EnvFromSource) ProtoMessage() {} -func (*EnvFromSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{43} } +var xxx_messageInfo_ComponentCondition proto.InternalMessageInfo -func (m *EnvVar) Reset() { *m = EnvVar{} } -func (*EnvVar) ProtoMessage() {} -func (*EnvVar) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{44} } +func (m *ComponentStatus) Reset() { *m = ComponentStatus{} } +func (*ComponentStatus) ProtoMessage() {} +func (*ComponentStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{17} +} +func (m *ComponentStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ComponentStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ComponentStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ComponentStatus.Merge(m, src) +} +func (m *ComponentStatus) XXX_Size() int { + return m.Size() +} +func (m *ComponentStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ComponentStatus.DiscardUnknown(m) +} -func (m *EnvVarSource) Reset() { *m = EnvVarSource{} } -func (*EnvVarSource) ProtoMessage() {} -func (*EnvVarSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{45} } +var xxx_messageInfo_ComponentStatus proto.InternalMessageInfo -func (m *Event) Reset() { *m = Event{} } -func (*Event) ProtoMessage() {} -func (*Event) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{46} } +func (m *ComponentStatusList) Reset() { *m = ComponentStatusList{} } +func (*ComponentStatusList) ProtoMessage() {} +func (*ComponentStatusList) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{18} +} +func (m *ComponentStatusList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ComponentStatusList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ComponentStatusList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ComponentStatusList.Merge(m, src) +} +func (m *ComponentStatusList) XXX_Size() int { + return m.Size() +} +func (m *ComponentStatusList) XXX_DiscardUnknown() { + xxx_messageInfo_ComponentStatusList.DiscardUnknown(m) +} -func (m *EventList) Reset() { *m = EventList{} } -func (*EventList) ProtoMessage() {} -func (*EventList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{47} } +var xxx_messageInfo_ComponentStatusList proto.InternalMessageInfo -func (m *EventSeries) Reset() { *m = EventSeries{} } -func (*EventSeries) ProtoMessage() {} -func (*EventSeries) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{48} } +func (m *ConfigMap) Reset() { *m = ConfigMap{} } +func (*ConfigMap) ProtoMessage() {} +func (*ConfigMap) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{19} +} +func (m *ConfigMap) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConfigMap) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ConfigMap) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConfigMap.Merge(m, src) +} +func (m *ConfigMap) XXX_Size() int { + return m.Size() +} +func (m *ConfigMap) XXX_DiscardUnknown() { + xxx_messageInfo_ConfigMap.DiscardUnknown(m) +} -func (m *EventSource) Reset() { *m = EventSource{} } -func (*EventSource) ProtoMessage() {} -func (*EventSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{49} } +var xxx_messageInfo_ConfigMap proto.InternalMessageInfo -func (m *ExecAction) Reset() { *m = ExecAction{} } -func (*ExecAction) ProtoMessage() {} -func (*ExecAction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{50} } +func (m *ConfigMapEnvSource) Reset() { *m = ConfigMapEnvSource{} } +func (*ConfigMapEnvSource) ProtoMessage() {} +func (*ConfigMapEnvSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{20} +} +func (m *ConfigMapEnvSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConfigMapEnvSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ConfigMapEnvSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConfigMapEnvSource.Merge(m, src) +} +func (m *ConfigMapEnvSource) XXX_Size() int { + return m.Size() +} +func (m *ConfigMapEnvSource) XXX_DiscardUnknown() { + xxx_messageInfo_ConfigMapEnvSource.DiscardUnknown(m) +} -func (m *FCVolumeSource) Reset() { *m = FCVolumeSource{} } -func (*FCVolumeSource) ProtoMessage() {} -func (*FCVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{51} } +var xxx_messageInfo_ConfigMapEnvSource proto.InternalMessageInfo -func (m *FlexPersistentVolumeSource) Reset() { *m = FlexPersistentVolumeSource{} } -func (*FlexPersistentVolumeSource) ProtoMessage() {} -func (*FlexPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{52} +func (m *ConfigMapKeySelector) Reset() { *m = ConfigMapKeySelector{} } +func (*ConfigMapKeySelector) ProtoMessage() {} +func (*ConfigMapKeySelector) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{21} +} +func (m *ConfigMapKeySelector) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConfigMapKeySelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ConfigMapKeySelector) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConfigMapKeySelector.Merge(m, src) +} +func (m *ConfigMapKeySelector) XXX_Size() int { + return m.Size() +} +func (m *ConfigMapKeySelector) XXX_DiscardUnknown() { + xxx_messageInfo_ConfigMapKeySelector.DiscardUnknown(m) } -func (m *FlexVolumeSource) Reset() { *m = FlexVolumeSource{} } -func (*FlexVolumeSource) ProtoMessage() {} -func (*FlexVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{53} } - -func (m *FlockerVolumeSource) Reset() { *m = FlockerVolumeSource{} } -func (*FlockerVolumeSource) ProtoMessage() {} -func (*FlockerVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{54} } +var xxx_messageInfo_ConfigMapKeySelector proto.InternalMessageInfo -func (m *GCEPersistentDiskVolumeSource) Reset() { *m = GCEPersistentDiskVolumeSource{} } -func (*GCEPersistentDiskVolumeSource) ProtoMessage() {} -func (*GCEPersistentDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{55} +func (m *ConfigMapList) Reset() { *m = ConfigMapList{} } +func (*ConfigMapList) ProtoMessage() {} +func (*ConfigMapList) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{22} +} +func (m *ConfigMapList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConfigMapList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ConfigMapList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConfigMapList.Merge(m, src) +} +func (m *ConfigMapList) XXX_Size() int { + return m.Size() +} +func (m *ConfigMapList) XXX_DiscardUnknown() { + xxx_messageInfo_ConfigMapList.DiscardUnknown(m) } -func (m *GitRepoVolumeSource) Reset() { *m = GitRepoVolumeSource{} } -func (*GitRepoVolumeSource) ProtoMessage() {} -func (*GitRepoVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{56} } +var xxx_messageInfo_ConfigMapList proto.InternalMessageInfo -func (m *GlusterfsVolumeSource) Reset() { *m = GlusterfsVolumeSource{} } -func (*GlusterfsVolumeSource) ProtoMessage() {} -func (*GlusterfsVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{57} } +func (m *ConfigMapNodeConfigSource) Reset() { *m = ConfigMapNodeConfigSource{} } +func (*ConfigMapNodeConfigSource) ProtoMessage() {} +func (*ConfigMapNodeConfigSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{23} +} +func (m *ConfigMapNodeConfigSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConfigMapNodeConfigSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ConfigMapNodeConfigSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConfigMapNodeConfigSource.Merge(m, src) +} +func (m *ConfigMapNodeConfigSource) XXX_Size() int { + return m.Size() +} +func (m *ConfigMapNodeConfigSource) XXX_DiscardUnknown() { + xxx_messageInfo_ConfigMapNodeConfigSource.DiscardUnknown(m) +} -func (m *HTTPGetAction) Reset() { *m = HTTPGetAction{} } -func (*HTTPGetAction) ProtoMessage() {} -func (*HTTPGetAction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{58} } +var xxx_messageInfo_ConfigMapNodeConfigSource proto.InternalMessageInfo -func (m *HTTPHeader) Reset() { *m = HTTPHeader{} } -func (*HTTPHeader) ProtoMessage() {} -func (*HTTPHeader) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{59} } +func (m *ConfigMapProjection) Reset() { *m = ConfigMapProjection{} } +func (*ConfigMapProjection) ProtoMessage() {} +func (*ConfigMapProjection) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{24} +} +func (m *ConfigMapProjection) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConfigMapProjection) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ConfigMapProjection) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConfigMapProjection.Merge(m, src) +} +func (m *ConfigMapProjection) XXX_Size() int { + return m.Size() +} +func (m *ConfigMapProjection) XXX_DiscardUnknown() { + xxx_messageInfo_ConfigMapProjection.DiscardUnknown(m) +} -func (m *Handler) Reset() { *m = Handler{} } -func (*Handler) ProtoMessage() {} -func (*Handler) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{60} } +var xxx_messageInfo_ConfigMapProjection proto.InternalMessageInfo -func (m *HostAlias) Reset() { *m = HostAlias{} } -func (*HostAlias) ProtoMessage() {} -func (*HostAlias) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{61} } +func (m *ConfigMapVolumeSource) Reset() { *m = ConfigMapVolumeSource{} } +func (*ConfigMapVolumeSource) ProtoMessage() {} +func (*ConfigMapVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{25} +} +func (m *ConfigMapVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConfigMapVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ConfigMapVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConfigMapVolumeSource.Merge(m, src) +} +func (m *ConfigMapVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *ConfigMapVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_ConfigMapVolumeSource.DiscardUnknown(m) +} -func (m *HostPathVolumeSource) Reset() { *m = HostPathVolumeSource{} } -func (*HostPathVolumeSource) ProtoMessage() {} -func (*HostPathVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{62} } +var xxx_messageInfo_ConfigMapVolumeSource proto.InternalMessageInfo -func (m *ISCSIPersistentVolumeSource) Reset() { *m = ISCSIPersistentVolumeSource{} } -func (*ISCSIPersistentVolumeSource) ProtoMessage() {} -func (*ISCSIPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{63} +func (m *Container) Reset() { *m = Container{} } +func (*Container) ProtoMessage() {} +func (*Container) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{26} +} +func (m *Container) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Container) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Container) XXX_Merge(src proto.Message) { + xxx_messageInfo_Container.Merge(m, src) +} +func (m *Container) XXX_Size() int { + return m.Size() +} +func (m *Container) XXX_DiscardUnknown() { + xxx_messageInfo_Container.DiscardUnknown(m) } -func (m *ISCSIVolumeSource) Reset() { *m = ISCSIVolumeSource{} } -func (*ISCSIVolumeSource) ProtoMessage() {} -func (*ISCSIVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{64} } - -func (m *KeyToPath) Reset() { *m = KeyToPath{} } -func (*KeyToPath) ProtoMessage() {} -func (*KeyToPath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{65} } +var xxx_messageInfo_Container proto.InternalMessageInfo -func (m *Lifecycle) Reset() { *m = Lifecycle{} } -func (*Lifecycle) ProtoMessage() {} -func (*Lifecycle) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{66} } +func (m *ContainerImage) Reset() { *m = ContainerImage{} } +func (*ContainerImage) ProtoMessage() {} +func (*ContainerImage) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{27} +} +func (m *ContainerImage) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerImage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ContainerImage) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerImage.Merge(m, src) +} +func (m *ContainerImage) XXX_Size() int { + return m.Size() +} +func (m *ContainerImage) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerImage.DiscardUnknown(m) +} -func (m *LimitRange) Reset() { *m = LimitRange{} } -func (*LimitRange) ProtoMessage() {} -func (*LimitRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{67} } +var xxx_messageInfo_ContainerImage proto.InternalMessageInfo -func (m *LimitRangeItem) Reset() { *m = LimitRangeItem{} } -func (*LimitRangeItem) ProtoMessage() {} -func (*LimitRangeItem) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{68} } +func (m *ContainerPort) Reset() { *m = ContainerPort{} } +func (*ContainerPort) ProtoMessage() {} +func (*ContainerPort) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{28} +} +func (m *ContainerPort) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerPort) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ContainerPort) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerPort.Merge(m, src) +} +func (m *ContainerPort) XXX_Size() int { + return m.Size() +} +func (m *ContainerPort) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerPort.DiscardUnknown(m) +} -func (m *LimitRangeList) Reset() { *m = LimitRangeList{} } -func (*LimitRangeList) ProtoMessage() {} -func (*LimitRangeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{69} } +var xxx_messageInfo_ContainerPort proto.InternalMessageInfo -func (m *LimitRangeSpec) Reset() { *m = LimitRangeSpec{} } -func (*LimitRangeSpec) ProtoMessage() {} -func (*LimitRangeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{70} } +func (m *ContainerState) Reset() { *m = ContainerState{} } +func (*ContainerState) ProtoMessage() {} +func (*ContainerState) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{29} +} +func (m *ContainerState) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ContainerState) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerState.Merge(m, src) +} +func (m *ContainerState) XXX_Size() int { + return m.Size() +} +func (m *ContainerState) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerState.DiscardUnknown(m) +} -func (m *List) Reset() { *m = List{} } -func (*List) ProtoMessage() {} -func (*List) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{71} } +var xxx_messageInfo_ContainerState proto.InternalMessageInfo -func (m *LoadBalancerIngress) Reset() { *m = LoadBalancerIngress{} } -func (*LoadBalancerIngress) ProtoMessage() {} -func (*LoadBalancerIngress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{72} } +func (m *ContainerStateRunning) Reset() { *m = ContainerStateRunning{} } +func (*ContainerStateRunning) ProtoMessage() {} +func (*ContainerStateRunning) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{30} +} +func (m *ContainerStateRunning) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerStateRunning) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ContainerStateRunning) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerStateRunning.Merge(m, src) +} +func (m *ContainerStateRunning) XXX_Size() int { + return m.Size() +} +func (m *ContainerStateRunning) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerStateRunning.DiscardUnknown(m) +} -func (m *LoadBalancerStatus) Reset() { *m = LoadBalancerStatus{} } -func (*LoadBalancerStatus) ProtoMessage() {} -func (*LoadBalancerStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{73} } +var xxx_messageInfo_ContainerStateRunning proto.InternalMessageInfo -func (m *LocalObjectReference) Reset() { *m = LocalObjectReference{} } -func (*LocalObjectReference) ProtoMessage() {} -func (*LocalObjectReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{74} } +func (m *ContainerStateTerminated) Reset() { *m = ContainerStateTerminated{} } +func (*ContainerStateTerminated) ProtoMessage() {} +func (*ContainerStateTerminated) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{31} +} +func (m *ContainerStateTerminated) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerStateTerminated) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ContainerStateTerminated) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerStateTerminated.Merge(m, src) +} +func (m *ContainerStateTerminated) XXX_Size() int { + return m.Size() +} +func (m *ContainerStateTerminated) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerStateTerminated.DiscardUnknown(m) +} -func (m *LocalVolumeSource) Reset() { *m = LocalVolumeSource{} } -func (*LocalVolumeSource) ProtoMessage() {} -func (*LocalVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{75} } +var xxx_messageInfo_ContainerStateTerminated proto.InternalMessageInfo -func (m *NFSVolumeSource) Reset() { *m = NFSVolumeSource{} } -func (*NFSVolumeSource) ProtoMessage() {} -func (*NFSVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{76} } +func (m *ContainerStateWaiting) Reset() { *m = ContainerStateWaiting{} } +func (*ContainerStateWaiting) ProtoMessage() {} +func (*ContainerStateWaiting) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{32} +} +func (m *ContainerStateWaiting) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerStateWaiting) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ContainerStateWaiting) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerStateWaiting.Merge(m, src) +} +func (m *ContainerStateWaiting) XXX_Size() int { + return m.Size() +} +func (m *ContainerStateWaiting) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerStateWaiting.DiscardUnknown(m) +} -func (m *Namespace) Reset() { *m = Namespace{} } -func (*Namespace) ProtoMessage() {} -func (*Namespace) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{77} } +var xxx_messageInfo_ContainerStateWaiting proto.InternalMessageInfo -func (m *NamespaceList) Reset() { *m = NamespaceList{} } -func (*NamespaceList) ProtoMessage() {} -func (*NamespaceList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{78} } +func (m *ContainerStatus) Reset() { *m = ContainerStatus{} } +func (*ContainerStatus) ProtoMessage() {} +func (*ContainerStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{33} +} +func (m *ContainerStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ContainerStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerStatus.Merge(m, src) +} +func (m *ContainerStatus) XXX_Size() int { + return m.Size() +} +func (m *ContainerStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerStatus.DiscardUnknown(m) +} -func (m *NamespaceSpec) Reset() { *m = NamespaceSpec{} } -func (*NamespaceSpec) ProtoMessage() {} -func (*NamespaceSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{79} } +var xxx_messageInfo_ContainerStatus proto.InternalMessageInfo -func (m *NamespaceStatus) Reset() { *m = NamespaceStatus{} } -func (*NamespaceStatus) ProtoMessage() {} -func (*NamespaceStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{80} } +func (m *DaemonEndpoint) Reset() { *m = DaemonEndpoint{} } +func (*DaemonEndpoint) ProtoMessage() {} +func (*DaemonEndpoint) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{34} +} +func (m *DaemonEndpoint) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DaemonEndpoint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DaemonEndpoint) XXX_Merge(src proto.Message) { + xxx_messageInfo_DaemonEndpoint.Merge(m, src) +} +func (m *DaemonEndpoint) XXX_Size() int { + return m.Size() +} +func (m *DaemonEndpoint) XXX_DiscardUnknown() { + xxx_messageInfo_DaemonEndpoint.DiscardUnknown(m) +} -func (m *Node) Reset() { *m = Node{} } -func (*Node) ProtoMessage() {} -func (*Node) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{81} } +var xxx_messageInfo_DaemonEndpoint proto.InternalMessageInfo -func (m *NodeAddress) Reset() { *m = NodeAddress{} } -func (*NodeAddress) ProtoMessage() {} -func (*NodeAddress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{82} } +func (m *DownwardAPIProjection) Reset() { *m = DownwardAPIProjection{} } +func (*DownwardAPIProjection) ProtoMessage() {} +func (*DownwardAPIProjection) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{35} +} +func (m *DownwardAPIProjection) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DownwardAPIProjection) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DownwardAPIProjection) XXX_Merge(src proto.Message) { + xxx_messageInfo_DownwardAPIProjection.Merge(m, src) +} +func (m *DownwardAPIProjection) XXX_Size() int { + return m.Size() +} +func (m *DownwardAPIProjection) XXX_DiscardUnknown() { + xxx_messageInfo_DownwardAPIProjection.DiscardUnknown(m) +} -func (m *NodeAffinity) Reset() { *m = NodeAffinity{} } -func (*NodeAffinity) ProtoMessage() {} -func (*NodeAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{83} } +var xxx_messageInfo_DownwardAPIProjection proto.InternalMessageInfo -func (m *NodeCondition) Reset() { *m = NodeCondition{} } -func (*NodeCondition) ProtoMessage() {} -func (*NodeCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{84} } +func (m *DownwardAPIVolumeFile) Reset() { *m = DownwardAPIVolumeFile{} } +func (*DownwardAPIVolumeFile) ProtoMessage() {} +func (*DownwardAPIVolumeFile) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{36} +} +func (m *DownwardAPIVolumeFile) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DownwardAPIVolumeFile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DownwardAPIVolumeFile) XXX_Merge(src proto.Message) { + xxx_messageInfo_DownwardAPIVolumeFile.Merge(m, src) +} +func (m *DownwardAPIVolumeFile) XXX_Size() int { + return m.Size() +} +func (m *DownwardAPIVolumeFile) XXX_DiscardUnknown() { + xxx_messageInfo_DownwardAPIVolumeFile.DiscardUnknown(m) +} -func (m *NodeConfigSource) Reset() { *m = NodeConfigSource{} } -func (*NodeConfigSource) ProtoMessage() {} -func (*NodeConfigSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{85} } +var xxx_messageInfo_DownwardAPIVolumeFile proto.InternalMessageInfo -func (m *NodeConfigStatus) Reset() { *m = NodeConfigStatus{} } -func (*NodeConfigStatus) ProtoMessage() {} -func (*NodeConfigStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{86} } +func (m *DownwardAPIVolumeSource) Reset() { *m = DownwardAPIVolumeSource{} } +func (*DownwardAPIVolumeSource) ProtoMessage() {} +func (*DownwardAPIVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{37} +} +func (m *DownwardAPIVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DownwardAPIVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DownwardAPIVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_DownwardAPIVolumeSource.Merge(m, src) +} +func (m *DownwardAPIVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *DownwardAPIVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_DownwardAPIVolumeSource.DiscardUnknown(m) +} -func (m *NodeDaemonEndpoints) Reset() { *m = NodeDaemonEndpoints{} } -func (*NodeDaemonEndpoints) ProtoMessage() {} -func (*NodeDaemonEndpoints) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{87} } +var xxx_messageInfo_DownwardAPIVolumeSource proto.InternalMessageInfo -func (m *NodeList) Reset() { *m = NodeList{} } -func (*NodeList) ProtoMessage() {} -func (*NodeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{88} } +func (m *EmptyDirVolumeSource) Reset() { *m = EmptyDirVolumeSource{} } +func (*EmptyDirVolumeSource) ProtoMessage() {} +func (*EmptyDirVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{38} +} +func (m *EmptyDirVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EmptyDirVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EmptyDirVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_EmptyDirVolumeSource.Merge(m, src) +} +func (m *EmptyDirVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *EmptyDirVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_EmptyDirVolumeSource.DiscardUnknown(m) +} -func (m *NodeProxyOptions) Reset() { *m = NodeProxyOptions{} } -func (*NodeProxyOptions) ProtoMessage() {} -func (*NodeProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{89} } +var xxx_messageInfo_EmptyDirVolumeSource proto.InternalMessageInfo -func (m *NodeResources) Reset() { *m = NodeResources{} } -func (*NodeResources) ProtoMessage() {} -func (*NodeResources) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{90} } +func (m *EndpointAddress) Reset() { *m = EndpointAddress{} } +func (*EndpointAddress) ProtoMessage() {} +func (*EndpointAddress) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{39} +} +func (m *EndpointAddress) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EndpointAddress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EndpointAddress) XXX_Merge(src proto.Message) { + xxx_messageInfo_EndpointAddress.Merge(m, src) +} +func (m *EndpointAddress) XXX_Size() int { + return m.Size() +} +func (m *EndpointAddress) XXX_DiscardUnknown() { + xxx_messageInfo_EndpointAddress.DiscardUnknown(m) +} -func (m *NodeSelector) Reset() { *m = NodeSelector{} } -func (*NodeSelector) ProtoMessage() {} -func (*NodeSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{91} } +var xxx_messageInfo_EndpointAddress proto.InternalMessageInfo -func (m *NodeSelectorRequirement) Reset() { *m = NodeSelectorRequirement{} } -func (*NodeSelectorRequirement) ProtoMessage() {} -func (*NodeSelectorRequirement) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{92} +func (m *EndpointPort) Reset() { *m = EndpointPort{} } +func (*EndpointPort) ProtoMessage() {} +func (*EndpointPort) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{40} +} +func (m *EndpointPort) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EndpointPort) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EndpointPort) XXX_Merge(src proto.Message) { + xxx_messageInfo_EndpointPort.Merge(m, src) +} +func (m *EndpointPort) XXX_Size() int { + return m.Size() +} +func (m *EndpointPort) XXX_DiscardUnknown() { + xxx_messageInfo_EndpointPort.DiscardUnknown(m) } -func (m *NodeSelectorTerm) Reset() { *m = NodeSelectorTerm{} } -func (*NodeSelectorTerm) ProtoMessage() {} -func (*NodeSelectorTerm) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{93} } - -func (m *NodeSpec) Reset() { *m = NodeSpec{} } -func (*NodeSpec) ProtoMessage() {} -func (*NodeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{94} } +var xxx_messageInfo_EndpointPort proto.InternalMessageInfo -func (m *NodeStatus) Reset() { *m = NodeStatus{} } -func (*NodeStatus) ProtoMessage() {} -func (*NodeStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{95} } +func (m *EndpointSubset) Reset() { *m = EndpointSubset{} } +func (*EndpointSubset) ProtoMessage() {} +func (*EndpointSubset) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{41} +} +func (m *EndpointSubset) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EndpointSubset) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EndpointSubset) XXX_Merge(src proto.Message) { + xxx_messageInfo_EndpointSubset.Merge(m, src) +} +func (m *EndpointSubset) XXX_Size() int { + return m.Size() +} +func (m *EndpointSubset) XXX_DiscardUnknown() { + xxx_messageInfo_EndpointSubset.DiscardUnknown(m) +} -func (m *NodeSystemInfo) Reset() { *m = NodeSystemInfo{} } -func (*NodeSystemInfo) ProtoMessage() {} -func (*NodeSystemInfo) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{96} } +var xxx_messageInfo_EndpointSubset proto.InternalMessageInfo -func (m *ObjectFieldSelector) Reset() { *m = ObjectFieldSelector{} } -func (*ObjectFieldSelector) ProtoMessage() {} -func (*ObjectFieldSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{97} } +func (m *Endpoints) Reset() { *m = Endpoints{} } +func (*Endpoints) ProtoMessage() {} +func (*Endpoints) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{42} +} +func (m *Endpoints) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Endpoints) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Endpoints) XXX_Merge(src proto.Message) { + xxx_messageInfo_Endpoints.Merge(m, src) +} +func (m *Endpoints) XXX_Size() int { + return m.Size() +} +func (m *Endpoints) XXX_DiscardUnknown() { + xxx_messageInfo_Endpoints.DiscardUnknown(m) +} -func (m *ObjectReference) Reset() { *m = ObjectReference{} } -func (*ObjectReference) ProtoMessage() {} -func (*ObjectReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{98} } +var xxx_messageInfo_Endpoints proto.InternalMessageInfo -func (m *PersistentVolume) Reset() { *m = PersistentVolume{} } -func (*PersistentVolume) ProtoMessage() {} -func (*PersistentVolume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{99} } +func (m *EndpointsList) Reset() { *m = EndpointsList{} } +func (*EndpointsList) ProtoMessage() {} +func (*EndpointsList) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{43} +} +func (m *EndpointsList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EndpointsList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EndpointsList) XXX_Merge(src proto.Message) { + xxx_messageInfo_EndpointsList.Merge(m, src) +} +func (m *EndpointsList) XXX_Size() int { + return m.Size() +} +func (m *EndpointsList) XXX_DiscardUnknown() { + xxx_messageInfo_EndpointsList.DiscardUnknown(m) +} -func (m *PersistentVolumeClaim) Reset() { *m = PersistentVolumeClaim{} } -func (*PersistentVolumeClaim) ProtoMessage() {} -func (*PersistentVolumeClaim) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{100} } +var xxx_messageInfo_EndpointsList proto.InternalMessageInfo -func (m *PersistentVolumeClaimCondition) Reset() { *m = PersistentVolumeClaimCondition{} } -func (*PersistentVolumeClaimCondition) ProtoMessage() {} -func (*PersistentVolumeClaimCondition) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{101} +func (m *EnvFromSource) Reset() { *m = EnvFromSource{} } +func (*EnvFromSource) ProtoMessage() {} +func (*EnvFromSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{44} } - -func (m *PersistentVolumeClaimList) Reset() { *m = PersistentVolumeClaimList{} } -func (*PersistentVolumeClaimList) ProtoMessage() {} -func (*PersistentVolumeClaimList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{102} +func (m *EnvFromSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) } - -func (m *PersistentVolumeClaimSpec) Reset() { *m = PersistentVolumeClaimSpec{} } -func (*PersistentVolumeClaimSpec) ProtoMessage() {} -func (*PersistentVolumeClaimSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{103} +func (m *EnvFromSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil } - -func (m *PersistentVolumeClaimStatus) Reset() { *m = PersistentVolumeClaimStatus{} } -func (*PersistentVolumeClaimStatus) ProtoMessage() {} -func (*PersistentVolumeClaimStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{104} +func (m *EnvFromSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnvFromSource.Merge(m, src) } - -func (m *PersistentVolumeClaimVolumeSource) Reset() { *m = PersistentVolumeClaimVolumeSource{} } -func (*PersistentVolumeClaimVolumeSource) ProtoMessage() {} -func (*PersistentVolumeClaimVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{105} +func (m *EnvFromSource) XXX_Size() int { + return m.Size() +} +func (m *EnvFromSource) XXX_DiscardUnknown() { + xxx_messageInfo_EnvFromSource.DiscardUnknown(m) } -func (m *PersistentVolumeList) Reset() { *m = PersistentVolumeList{} } -func (*PersistentVolumeList) ProtoMessage() {} -func (*PersistentVolumeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{106} } +var xxx_messageInfo_EnvFromSource proto.InternalMessageInfo -func (m *PersistentVolumeSource) Reset() { *m = PersistentVolumeSource{} } -func (*PersistentVolumeSource) ProtoMessage() {} -func (*PersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{107} +func (m *EnvVar) Reset() { *m = EnvVar{} } +func (*EnvVar) ProtoMessage() {} +func (*EnvVar) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{45} +} +func (m *EnvVar) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EnvVar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EnvVar) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnvVar.Merge(m, src) +} +func (m *EnvVar) XXX_Size() int { + return m.Size() +} +func (m *EnvVar) XXX_DiscardUnknown() { + xxx_messageInfo_EnvVar.DiscardUnknown(m) } -func (m *PersistentVolumeSpec) Reset() { *m = PersistentVolumeSpec{} } -func (*PersistentVolumeSpec) ProtoMessage() {} -func (*PersistentVolumeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{108} } +var xxx_messageInfo_EnvVar proto.InternalMessageInfo -func (m *PersistentVolumeStatus) Reset() { *m = PersistentVolumeStatus{} } -func (*PersistentVolumeStatus) ProtoMessage() {} -func (*PersistentVolumeStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{109} +func (m *EnvVarSource) Reset() { *m = EnvVarSource{} } +func (*EnvVarSource) ProtoMessage() {} +func (*EnvVarSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{46} } - -func (m *PhotonPersistentDiskVolumeSource) Reset() { *m = PhotonPersistentDiskVolumeSource{} } -func (*PhotonPersistentDiskVolumeSource) ProtoMessage() {} -func (*PhotonPersistentDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{110} +func (m *EnvVarSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EnvVarSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EnvVarSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnvVarSource.Merge(m, src) +} +func (m *EnvVarSource) XXX_Size() int { + return m.Size() +} +func (m *EnvVarSource) XXX_DiscardUnknown() { + xxx_messageInfo_EnvVarSource.DiscardUnknown(m) } -func (m *Pod) Reset() { *m = Pod{} } -func (*Pod) ProtoMessage() {} -func (*Pod) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{111} } +var xxx_messageInfo_EnvVarSource proto.InternalMessageInfo -func (m *PodAffinity) Reset() { *m = PodAffinity{} } -func (*PodAffinity) ProtoMessage() {} -func (*PodAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{112} } +func (m *EphemeralContainer) Reset() { *m = EphemeralContainer{} } +func (*EphemeralContainer) ProtoMessage() {} +func (*EphemeralContainer) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{47} +} +func (m *EphemeralContainer) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EphemeralContainer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EphemeralContainer) XXX_Merge(src proto.Message) { + xxx_messageInfo_EphemeralContainer.Merge(m, src) +} +func (m *EphemeralContainer) XXX_Size() int { + return m.Size() +} +func (m *EphemeralContainer) XXX_DiscardUnknown() { + xxx_messageInfo_EphemeralContainer.DiscardUnknown(m) +} -func (m *PodAffinityTerm) Reset() { *m = PodAffinityTerm{} } -func (*PodAffinityTerm) ProtoMessage() {} -func (*PodAffinityTerm) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{113} } +var xxx_messageInfo_EphemeralContainer proto.InternalMessageInfo -func (m *PodAntiAffinity) Reset() { *m = PodAntiAffinity{} } -func (*PodAntiAffinity) ProtoMessage() {} -func (*PodAntiAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{114} } +func (m *EphemeralContainerCommon) Reset() { *m = EphemeralContainerCommon{} } +func (*EphemeralContainerCommon) ProtoMessage() {} +func (*EphemeralContainerCommon) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{48} +} +func (m *EphemeralContainerCommon) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EphemeralContainerCommon) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EphemeralContainerCommon) XXX_Merge(src proto.Message) { + xxx_messageInfo_EphemeralContainerCommon.Merge(m, src) +} +func (m *EphemeralContainerCommon) XXX_Size() int { + return m.Size() +} +func (m *EphemeralContainerCommon) XXX_DiscardUnknown() { + xxx_messageInfo_EphemeralContainerCommon.DiscardUnknown(m) +} -func (m *PodAttachOptions) Reset() { *m = PodAttachOptions{} } -func (*PodAttachOptions) ProtoMessage() {} -func (*PodAttachOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{115} } +var xxx_messageInfo_EphemeralContainerCommon proto.InternalMessageInfo -func (m *PodCondition) Reset() { *m = PodCondition{} } -func (*PodCondition) ProtoMessage() {} -func (*PodCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{116} } +func (m *EphemeralContainers) Reset() { *m = EphemeralContainers{} } +func (*EphemeralContainers) ProtoMessage() {} +func (*EphemeralContainers) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{49} +} +func (m *EphemeralContainers) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EphemeralContainers) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EphemeralContainers) XXX_Merge(src proto.Message) { + xxx_messageInfo_EphemeralContainers.Merge(m, src) +} +func (m *EphemeralContainers) XXX_Size() int { + return m.Size() +} +func (m *EphemeralContainers) XXX_DiscardUnknown() { + xxx_messageInfo_EphemeralContainers.DiscardUnknown(m) +} -func (m *PodDNSConfig) Reset() { *m = PodDNSConfig{} } -func (*PodDNSConfig) ProtoMessage() {} -func (*PodDNSConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{117} } +var xxx_messageInfo_EphemeralContainers proto.InternalMessageInfo -func (m *PodDNSConfigOption) Reset() { *m = PodDNSConfigOption{} } -func (*PodDNSConfigOption) ProtoMessage() {} -func (*PodDNSConfigOption) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{118} } +func (m *Event) Reset() { *m = Event{} } +func (*Event) ProtoMessage() {} +func (*Event) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{50} +} +func (m *Event) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Event) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Event) XXX_Merge(src proto.Message) { + xxx_messageInfo_Event.Merge(m, src) +} +func (m *Event) XXX_Size() int { + return m.Size() +} +func (m *Event) XXX_DiscardUnknown() { + xxx_messageInfo_Event.DiscardUnknown(m) +} -func (m *PodExecOptions) Reset() { *m = PodExecOptions{} } -func (*PodExecOptions) ProtoMessage() {} -func (*PodExecOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{119} } +var xxx_messageInfo_Event proto.InternalMessageInfo -func (m *PodList) Reset() { *m = PodList{} } -func (*PodList) ProtoMessage() {} -func (*PodList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{120} } +func (m *EventList) Reset() { *m = EventList{} } +func (*EventList) ProtoMessage() {} +func (*EventList) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{51} +} +func (m *EventList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EventList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EventList) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventList.Merge(m, src) +} +func (m *EventList) XXX_Size() int { + return m.Size() +} +func (m *EventList) XXX_DiscardUnknown() { + xxx_messageInfo_EventList.DiscardUnknown(m) +} -func (m *PodLogOptions) Reset() { *m = PodLogOptions{} } -func (*PodLogOptions) ProtoMessage() {} -func (*PodLogOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{121} } +var xxx_messageInfo_EventList proto.InternalMessageInfo -func (m *PodPortForwardOptions) Reset() { *m = PodPortForwardOptions{} } -func (*PodPortForwardOptions) ProtoMessage() {} -func (*PodPortForwardOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{122} } +func (m *EventSeries) Reset() { *m = EventSeries{} } +func (*EventSeries) ProtoMessage() {} +func (*EventSeries) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{52} +} +func (m *EventSeries) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EventSeries) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EventSeries) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventSeries.Merge(m, src) +} +func (m *EventSeries) XXX_Size() int { + return m.Size() +} +func (m *EventSeries) XXX_DiscardUnknown() { + xxx_messageInfo_EventSeries.DiscardUnknown(m) +} -func (m *PodProxyOptions) Reset() { *m = PodProxyOptions{} } -func (*PodProxyOptions) ProtoMessage() {} -func (*PodProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{123} } +var xxx_messageInfo_EventSeries proto.InternalMessageInfo -func (m *PodReadinessGate) Reset() { *m = PodReadinessGate{} } -func (*PodReadinessGate) ProtoMessage() {} -func (*PodReadinessGate) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{124} } +func (m *EventSource) Reset() { *m = EventSource{} } +func (*EventSource) ProtoMessage() {} +func (*EventSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{53} +} +func (m *EventSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EventSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EventSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventSource.Merge(m, src) +} +func (m *EventSource) XXX_Size() int { + return m.Size() +} +func (m *EventSource) XXX_DiscardUnknown() { + xxx_messageInfo_EventSource.DiscardUnknown(m) +} -func (m *PodSecurityContext) Reset() { *m = PodSecurityContext{} } -func (*PodSecurityContext) ProtoMessage() {} -func (*PodSecurityContext) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{125} } +var xxx_messageInfo_EventSource proto.InternalMessageInfo -func (m *PodSignature) Reset() { *m = PodSignature{} } -func (*PodSignature) ProtoMessage() {} -func (*PodSignature) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{126} } +func (m *ExecAction) Reset() { *m = ExecAction{} } +func (*ExecAction) ProtoMessage() {} +func (*ExecAction) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{54} +} +func (m *ExecAction) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExecAction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExecAction) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExecAction.Merge(m, src) +} +func (m *ExecAction) XXX_Size() int { + return m.Size() +} +func (m *ExecAction) XXX_DiscardUnknown() { + xxx_messageInfo_ExecAction.DiscardUnknown(m) +} -func (m *PodSpec) Reset() { *m = PodSpec{} } -func (*PodSpec) ProtoMessage() {} -func (*PodSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{127} } +var xxx_messageInfo_ExecAction proto.InternalMessageInfo -func (m *PodStatus) Reset() { *m = PodStatus{} } -func (*PodStatus) ProtoMessage() {} -func (*PodStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{128} } +func (m *FCVolumeSource) Reset() { *m = FCVolumeSource{} } +func (*FCVolumeSource) ProtoMessage() {} +func (*FCVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{55} +} +func (m *FCVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FCVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FCVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_FCVolumeSource.Merge(m, src) +} +func (m *FCVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *FCVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_FCVolumeSource.DiscardUnknown(m) +} -func (m *PodStatusResult) Reset() { *m = PodStatusResult{} } -func (*PodStatusResult) ProtoMessage() {} -func (*PodStatusResult) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{129} } +var xxx_messageInfo_FCVolumeSource proto.InternalMessageInfo -func (m *PodTemplate) Reset() { *m = PodTemplate{} } -func (*PodTemplate) ProtoMessage() {} -func (*PodTemplate) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{130} } +func (m *FlexPersistentVolumeSource) Reset() { *m = FlexPersistentVolumeSource{} } +func (*FlexPersistentVolumeSource) ProtoMessage() {} +func (*FlexPersistentVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{56} +} +func (m *FlexPersistentVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FlexPersistentVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FlexPersistentVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_FlexPersistentVolumeSource.Merge(m, src) +} +func (m *FlexPersistentVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *FlexPersistentVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_FlexPersistentVolumeSource.DiscardUnknown(m) +} -func (m *PodTemplateList) Reset() { *m = PodTemplateList{} } -func (*PodTemplateList) ProtoMessage() {} -func (*PodTemplateList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{131} } +var xxx_messageInfo_FlexPersistentVolumeSource proto.InternalMessageInfo -func (m *PodTemplateSpec) Reset() { *m = PodTemplateSpec{} } -func (*PodTemplateSpec) ProtoMessage() {} -func (*PodTemplateSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{132} } +func (m *FlexVolumeSource) Reset() { *m = FlexVolumeSource{} } +func (*FlexVolumeSource) ProtoMessage() {} +func (*FlexVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{57} +} +func (m *FlexVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FlexVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FlexVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_FlexVolumeSource.Merge(m, src) +} +func (m *FlexVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *FlexVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_FlexVolumeSource.DiscardUnknown(m) +} -func (m *PortworxVolumeSource) Reset() { *m = PortworxVolumeSource{} } -func (*PortworxVolumeSource) ProtoMessage() {} -func (*PortworxVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{133} } +var xxx_messageInfo_FlexVolumeSource proto.InternalMessageInfo -func (m *Preconditions) Reset() { *m = Preconditions{} } -func (*Preconditions) ProtoMessage() {} -func (*Preconditions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{134} } +func (m *FlockerVolumeSource) Reset() { *m = FlockerVolumeSource{} } +func (*FlockerVolumeSource) ProtoMessage() {} +func (*FlockerVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{58} +} +func (m *FlockerVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FlockerVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FlockerVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_FlockerVolumeSource.Merge(m, src) +} +func (m *FlockerVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *FlockerVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_FlockerVolumeSource.DiscardUnknown(m) +} -func (m *PreferAvoidPodsEntry) Reset() { *m = PreferAvoidPodsEntry{} } -func (*PreferAvoidPodsEntry) ProtoMessage() {} -func (*PreferAvoidPodsEntry) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{135} } +var xxx_messageInfo_FlockerVolumeSource proto.InternalMessageInfo -func (m *PreferredSchedulingTerm) Reset() { *m = PreferredSchedulingTerm{} } -func (*PreferredSchedulingTerm) ProtoMessage() {} -func (*PreferredSchedulingTerm) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{136} +func (m *GCEPersistentDiskVolumeSource) Reset() { *m = GCEPersistentDiskVolumeSource{} } +func (*GCEPersistentDiskVolumeSource) ProtoMessage() {} +func (*GCEPersistentDiskVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{59} +} +func (m *GCEPersistentDiskVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GCEPersistentDiskVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GCEPersistentDiskVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_GCEPersistentDiskVolumeSource.Merge(m, src) +} +func (m *GCEPersistentDiskVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *GCEPersistentDiskVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_GCEPersistentDiskVolumeSource.DiscardUnknown(m) } -func (m *Probe) Reset() { *m = Probe{} } -func (*Probe) ProtoMessage() {} -func (*Probe) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{137} } +var xxx_messageInfo_GCEPersistentDiskVolumeSource proto.InternalMessageInfo -func (m *ProjectedVolumeSource) Reset() { *m = ProjectedVolumeSource{} } -func (*ProjectedVolumeSource) ProtoMessage() {} -func (*ProjectedVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{138} } +func (m *GitRepoVolumeSource) Reset() { *m = GitRepoVolumeSource{} } +func (*GitRepoVolumeSource) ProtoMessage() {} +func (*GitRepoVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{60} +} +func (m *GitRepoVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GitRepoVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GitRepoVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_GitRepoVolumeSource.Merge(m, src) +} +func (m *GitRepoVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *GitRepoVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_GitRepoVolumeSource.DiscardUnknown(m) +} -func (m *QuobyteVolumeSource) Reset() { *m = QuobyteVolumeSource{} } -func (*QuobyteVolumeSource) ProtoMessage() {} -func (*QuobyteVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{139} } +var xxx_messageInfo_GitRepoVolumeSource proto.InternalMessageInfo -func (m *RBDPersistentVolumeSource) Reset() { *m = RBDPersistentVolumeSource{} } -func (*RBDPersistentVolumeSource) ProtoMessage() {} -func (*RBDPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{140} +func (m *GlusterfsPersistentVolumeSource) Reset() { *m = GlusterfsPersistentVolumeSource{} } +func (*GlusterfsPersistentVolumeSource) ProtoMessage() {} +func (*GlusterfsPersistentVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{61} +} +func (m *GlusterfsPersistentVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GlusterfsPersistentVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GlusterfsPersistentVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_GlusterfsPersistentVolumeSource.Merge(m, src) +} +func (m *GlusterfsPersistentVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *GlusterfsPersistentVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_GlusterfsPersistentVolumeSource.DiscardUnknown(m) } -func (m *RBDVolumeSource) Reset() { *m = RBDVolumeSource{} } -func (*RBDVolumeSource) ProtoMessage() {} -func (*RBDVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{141} } +var xxx_messageInfo_GlusterfsPersistentVolumeSource proto.InternalMessageInfo -func (m *RangeAllocation) Reset() { *m = RangeAllocation{} } -func (*RangeAllocation) ProtoMessage() {} -func (*RangeAllocation) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{142} } +func (m *GlusterfsVolumeSource) Reset() { *m = GlusterfsVolumeSource{} } +func (*GlusterfsVolumeSource) ProtoMessage() {} +func (*GlusterfsVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{62} +} +func (m *GlusterfsVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GlusterfsVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GlusterfsVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_GlusterfsVolumeSource.Merge(m, src) +} +func (m *GlusterfsVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *GlusterfsVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_GlusterfsVolumeSource.DiscardUnknown(m) +} -func (m *ReplicationController) Reset() { *m = ReplicationController{} } -func (*ReplicationController) ProtoMessage() {} -func (*ReplicationController) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{143} } +var xxx_messageInfo_GlusterfsVolumeSource proto.InternalMessageInfo -func (m *ReplicationControllerCondition) Reset() { *m = ReplicationControllerCondition{} } -func (*ReplicationControllerCondition) ProtoMessage() {} -func (*ReplicationControllerCondition) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{144} +func (m *HTTPGetAction) Reset() { *m = HTTPGetAction{} } +func (*HTTPGetAction) ProtoMessage() {} +func (*HTTPGetAction) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{63} +} +func (m *HTTPGetAction) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HTTPGetAction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HTTPGetAction) XXX_Merge(src proto.Message) { + xxx_messageInfo_HTTPGetAction.Merge(m, src) +} +func (m *HTTPGetAction) XXX_Size() int { + return m.Size() +} +func (m *HTTPGetAction) XXX_DiscardUnknown() { + xxx_messageInfo_HTTPGetAction.DiscardUnknown(m) } -func (m *ReplicationControllerList) Reset() { *m = ReplicationControllerList{} } -func (*ReplicationControllerList) ProtoMessage() {} -func (*ReplicationControllerList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{145} +var xxx_messageInfo_HTTPGetAction proto.InternalMessageInfo + +func (m *HTTPHeader) Reset() { *m = HTTPHeader{} } +func (*HTTPHeader) ProtoMessage() {} +func (*HTTPHeader) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{64} +} +func (m *HTTPHeader) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HTTPHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HTTPHeader) XXX_Merge(src proto.Message) { + xxx_messageInfo_HTTPHeader.Merge(m, src) +} +func (m *HTTPHeader) XXX_Size() int { + return m.Size() +} +func (m *HTTPHeader) XXX_DiscardUnknown() { + xxx_messageInfo_HTTPHeader.DiscardUnknown(m) } -func (m *ReplicationControllerSpec) Reset() { *m = ReplicationControllerSpec{} } -func (*ReplicationControllerSpec) ProtoMessage() {} -func (*ReplicationControllerSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{146} +var xxx_messageInfo_HTTPHeader proto.InternalMessageInfo + +func (m *Handler) Reset() { *m = Handler{} } +func (*Handler) ProtoMessage() {} +func (*Handler) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{65} +} +func (m *Handler) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Handler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Handler) XXX_Merge(src proto.Message) { + xxx_messageInfo_Handler.Merge(m, src) +} +func (m *Handler) XXX_Size() int { + return m.Size() +} +func (m *Handler) XXX_DiscardUnknown() { + xxx_messageInfo_Handler.DiscardUnknown(m) } -func (m *ReplicationControllerStatus) Reset() { *m = ReplicationControllerStatus{} } -func (*ReplicationControllerStatus) ProtoMessage() {} -func (*ReplicationControllerStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{147} +var xxx_messageInfo_Handler proto.InternalMessageInfo + +func (m *HostAlias) Reset() { *m = HostAlias{} } +func (*HostAlias) ProtoMessage() {} +func (*HostAlias) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{66} +} +func (m *HostAlias) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HostAlias) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HostAlias) XXX_Merge(src proto.Message) { + xxx_messageInfo_HostAlias.Merge(m, src) +} +func (m *HostAlias) XXX_Size() int { + return m.Size() +} +func (m *HostAlias) XXX_DiscardUnknown() { + xxx_messageInfo_HostAlias.DiscardUnknown(m) } -func (m *ResourceFieldSelector) Reset() { *m = ResourceFieldSelector{} } -func (*ResourceFieldSelector) ProtoMessage() {} -func (*ResourceFieldSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{148} } +var xxx_messageInfo_HostAlias proto.InternalMessageInfo -func (m *ResourceQuota) Reset() { *m = ResourceQuota{} } -func (*ResourceQuota) ProtoMessage() {} -func (*ResourceQuota) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{149} } +func (m *HostPathVolumeSource) Reset() { *m = HostPathVolumeSource{} } +func (*HostPathVolumeSource) ProtoMessage() {} +func (*HostPathVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{67} +} +func (m *HostPathVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HostPathVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HostPathVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_HostPathVolumeSource.Merge(m, src) +} +func (m *HostPathVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *HostPathVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_HostPathVolumeSource.DiscardUnknown(m) +} -func (m *ResourceQuotaList) Reset() { *m = ResourceQuotaList{} } -func (*ResourceQuotaList) ProtoMessage() {} -func (*ResourceQuotaList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{150} } +var xxx_messageInfo_HostPathVolumeSource proto.InternalMessageInfo -func (m *ResourceQuotaSpec) Reset() { *m = ResourceQuotaSpec{} } -func (*ResourceQuotaSpec) ProtoMessage() {} -func (*ResourceQuotaSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{151} } +func (m *ISCSIPersistentVolumeSource) Reset() { *m = ISCSIPersistentVolumeSource{} } +func (*ISCSIPersistentVolumeSource) ProtoMessage() {} +func (*ISCSIPersistentVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{68} +} +func (m *ISCSIPersistentVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ISCSIPersistentVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ISCSIPersistentVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ISCSIPersistentVolumeSource.Merge(m, src) +} +func (m *ISCSIPersistentVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *ISCSIPersistentVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_ISCSIPersistentVolumeSource.DiscardUnknown(m) +} -func (m *ResourceQuotaStatus) Reset() { *m = ResourceQuotaStatus{} } -func (*ResourceQuotaStatus) ProtoMessage() {} -func (*ResourceQuotaStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{152} } +var xxx_messageInfo_ISCSIPersistentVolumeSource proto.InternalMessageInfo -func (m *ResourceRequirements) Reset() { *m = ResourceRequirements{} } -func (*ResourceRequirements) ProtoMessage() {} -func (*ResourceRequirements) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{153} } +func (m *ISCSIVolumeSource) Reset() { *m = ISCSIVolumeSource{} } +func (*ISCSIVolumeSource) ProtoMessage() {} +func (*ISCSIVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{69} +} +func (m *ISCSIVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ISCSIVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ISCSIVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ISCSIVolumeSource.Merge(m, src) +} +func (m *ISCSIVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *ISCSIVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_ISCSIVolumeSource.DiscardUnknown(m) +} -func (m *SELinuxOptions) Reset() { *m = SELinuxOptions{} } -func (*SELinuxOptions) ProtoMessage() {} -func (*SELinuxOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{154} } +var xxx_messageInfo_ISCSIVolumeSource proto.InternalMessageInfo -func (m *ScaleIOPersistentVolumeSource) Reset() { *m = ScaleIOPersistentVolumeSource{} } -func (*ScaleIOPersistentVolumeSource) ProtoMessage() {} -func (*ScaleIOPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{155} +func (m *KeyToPath) Reset() { *m = KeyToPath{} } +func (*KeyToPath) ProtoMessage() {} +func (*KeyToPath) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{70} +} +func (m *KeyToPath) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *KeyToPath) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *KeyToPath) XXX_Merge(src proto.Message) { + xxx_messageInfo_KeyToPath.Merge(m, src) +} +func (m *KeyToPath) XXX_Size() int { + return m.Size() } +func (m *KeyToPath) XXX_DiscardUnknown() { + xxx_messageInfo_KeyToPath.DiscardUnknown(m) +} + +var xxx_messageInfo_KeyToPath proto.InternalMessageInfo -func (m *ScaleIOVolumeSource) Reset() { *m = ScaleIOVolumeSource{} } -func (*ScaleIOVolumeSource) ProtoMessage() {} -func (*ScaleIOVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{156} } +func (m *Lifecycle) Reset() { *m = Lifecycle{} } +func (*Lifecycle) ProtoMessage() {} +func (*Lifecycle) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{71} +} +func (m *Lifecycle) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Lifecycle) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Lifecycle) XXX_Merge(src proto.Message) { + xxx_messageInfo_Lifecycle.Merge(m, src) +} +func (m *Lifecycle) XXX_Size() int { + return m.Size() +} +func (m *Lifecycle) XXX_DiscardUnknown() { + xxx_messageInfo_Lifecycle.DiscardUnknown(m) +} -func (m *ScopeSelector) Reset() { *m = ScopeSelector{} } -func (*ScopeSelector) ProtoMessage() {} -func (*ScopeSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{157} } +var xxx_messageInfo_Lifecycle proto.InternalMessageInfo -func (m *ScopedResourceSelectorRequirement) Reset() { *m = ScopedResourceSelectorRequirement{} } -func (*ScopedResourceSelectorRequirement) ProtoMessage() {} -func (*ScopedResourceSelectorRequirement) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{158} +func (m *LimitRange) Reset() { *m = LimitRange{} } +func (*LimitRange) ProtoMessage() {} +func (*LimitRange) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{72} +} +func (m *LimitRange) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LimitRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LimitRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_LimitRange.Merge(m, src) +} +func (m *LimitRange) XXX_Size() int { + return m.Size() +} +func (m *LimitRange) XXX_DiscardUnknown() { + xxx_messageInfo_LimitRange.DiscardUnknown(m) } -func (m *Secret) Reset() { *m = Secret{} } -func (*Secret) ProtoMessage() {} -func (*Secret) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{159} } +var xxx_messageInfo_LimitRange proto.InternalMessageInfo + +func (m *LimitRangeItem) Reset() { *m = LimitRangeItem{} } +func (*LimitRangeItem) ProtoMessage() {} +func (*LimitRangeItem) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{73} +} +func (m *LimitRangeItem) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LimitRangeItem) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LimitRangeItem) XXX_Merge(src proto.Message) { + xxx_messageInfo_LimitRangeItem.Merge(m, src) +} +func (m *LimitRangeItem) XXX_Size() int { + return m.Size() +} +func (m *LimitRangeItem) XXX_DiscardUnknown() { + xxx_messageInfo_LimitRangeItem.DiscardUnknown(m) +} -func (m *SecretEnvSource) Reset() { *m = SecretEnvSource{} } -func (*SecretEnvSource) ProtoMessage() {} -func (*SecretEnvSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{160} } +var xxx_messageInfo_LimitRangeItem proto.InternalMessageInfo -func (m *SecretKeySelector) Reset() { *m = SecretKeySelector{} } -func (*SecretKeySelector) ProtoMessage() {} -func (*SecretKeySelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{161} } +func (m *LimitRangeList) Reset() { *m = LimitRangeList{} } +func (*LimitRangeList) ProtoMessage() {} +func (*LimitRangeList) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{74} +} +func (m *LimitRangeList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LimitRangeList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LimitRangeList) XXX_Merge(src proto.Message) { + xxx_messageInfo_LimitRangeList.Merge(m, src) +} +func (m *LimitRangeList) XXX_Size() int { + return m.Size() +} +func (m *LimitRangeList) XXX_DiscardUnknown() { + xxx_messageInfo_LimitRangeList.DiscardUnknown(m) +} -func (m *SecretList) Reset() { *m = SecretList{} } -func (*SecretList) ProtoMessage() {} -func (*SecretList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{162} } +var xxx_messageInfo_LimitRangeList proto.InternalMessageInfo -func (m *SecretProjection) Reset() { *m = SecretProjection{} } -func (*SecretProjection) ProtoMessage() {} -func (*SecretProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{163} } +func (m *LimitRangeSpec) Reset() { *m = LimitRangeSpec{} } +func (*LimitRangeSpec) ProtoMessage() {} +func (*LimitRangeSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{75} +} +func (m *LimitRangeSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LimitRangeSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LimitRangeSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_LimitRangeSpec.Merge(m, src) +} +func (m *LimitRangeSpec) XXX_Size() int { + return m.Size() +} +func (m *LimitRangeSpec) XXX_DiscardUnknown() { + xxx_messageInfo_LimitRangeSpec.DiscardUnknown(m) +} -func (m *SecretReference) Reset() { *m = SecretReference{} } -func (*SecretReference) ProtoMessage() {} -func (*SecretReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{164} } +var xxx_messageInfo_LimitRangeSpec proto.InternalMessageInfo -func (m *SecretVolumeSource) Reset() { *m = SecretVolumeSource{} } -func (*SecretVolumeSource) ProtoMessage() {} -func (*SecretVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{165} } +func (m *List) Reset() { *m = List{} } +func (*List) ProtoMessage() {} +func (*List) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{76} +} +func (m *List) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *List) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *List) XXX_Merge(src proto.Message) { + xxx_messageInfo_List.Merge(m, src) +} +func (m *List) XXX_Size() int { + return m.Size() +} +func (m *List) XXX_DiscardUnknown() { + xxx_messageInfo_List.DiscardUnknown(m) +} -func (m *SecurityContext) Reset() { *m = SecurityContext{} } -func (*SecurityContext) ProtoMessage() {} -func (*SecurityContext) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{166} } +var xxx_messageInfo_List proto.InternalMessageInfo -func (m *SerializedReference) Reset() { *m = SerializedReference{} } -func (*SerializedReference) ProtoMessage() {} -func (*SerializedReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{167} } +func (m *LoadBalancerIngress) Reset() { *m = LoadBalancerIngress{} } +func (*LoadBalancerIngress) ProtoMessage() {} +func (*LoadBalancerIngress) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{77} +} +func (m *LoadBalancerIngress) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LoadBalancerIngress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LoadBalancerIngress) XXX_Merge(src proto.Message) { + xxx_messageInfo_LoadBalancerIngress.Merge(m, src) +} +func (m *LoadBalancerIngress) XXX_Size() int { + return m.Size() +} +func (m *LoadBalancerIngress) XXX_DiscardUnknown() { + xxx_messageInfo_LoadBalancerIngress.DiscardUnknown(m) +} -func (m *Service) Reset() { *m = Service{} } -func (*Service) ProtoMessage() {} -func (*Service) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{168} } +var xxx_messageInfo_LoadBalancerIngress proto.InternalMessageInfo -func (m *ServiceAccount) Reset() { *m = ServiceAccount{} } -func (*ServiceAccount) ProtoMessage() {} -func (*ServiceAccount) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{169} } +func (m *LoadBalancerStatus) Reset() { *m = LoadBalancerStatus{} } +func (*LoadBalancerStatus) ProtoMessage() {} +func (*LoadBalancerStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{78} +} +func (m *LoadBalancerStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LoadBalancerStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LoadBalancerStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_LoadBalancerStatus.Merge(m, src) +} +func (m *LoadBalancerStatus) XXX_Size() int { + return m.Size() +} +func (m *LoadBalancerStatus) XXX_DiscardUnknown() { + xxx_messageInfo_LoadBalancerStatus.DiscardUnknown(m) +} -func (m *ServiceAccountList) Reset() { *m = ServiceAccountList{} } -func (*ServiceAccountList) ProtoMessage() {} -func (*ServiceAccountList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{170} } +var xxx_messageInfo_LoadBalancerStatus proto.InternalMessageInfo -func (m *ServiceAccountTokenProjection) Reset() { *m = ServiceAccountTokenProjection{} } -func (*ServiceAccountTokenProjection) ProtoMessage() {} -func (*ServiceAccountTokenProjection) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{171} +func (m *LocalObjectReference) Reset() { *m = LocalObjectReference{} } +func (*LocalObjectReference) ProtoMessage() {} +func (*LocalObjectReference) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{79} +} +func (m *LocalObjectReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LocalObjectReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LocalObjectReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_LocalObjectReference.Merge(m, src) +} +func (m *LocalObjectReference) XXX_Size() int { + return m.Size() +} +func (m *LocalObjectReference) XXX_DiscardUnknown() { + xxx_messageInfo_LocalObjectReference.DiscardUnknown(m) } -func (m *ServiceList) Reset() { *m = ServiceList{} } -func (*ServiceList) ProtoMessage() {} -func (*ServiceList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{172} } +var xxx_messageInfo_LocalObjectReference proto.InternalMessageInfo -func (m *ServicePort) Reset() { *m = ServicePort{} } -func (*ServicePort) ProtoMessage() {} -func (*ServicePort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{173} } +func (m *LocalVolumeSource) Reset() { *m = LocalVolumeSource{} } +func (*LocalVolumeSource) ProtoMessage() {} +func (*LocalVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{80} +} +func (m *LocalVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LocalVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LocalVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_LocalVolumeSource.Merge(m, src) +} +func (m *LocalVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *LocalVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_LocalVolumeSource.DiscardUnknown(m) +} + +var xxx_messageInfo_LocalVolumeSource proto.InternalMessageInfo -func (m *ServiceProxyOptions) Reset() { *m = ServiceProxyOptions{} } -func (*ServiceProxyOptions) ProtoMessage() {} -func (*ServiceProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{174} } +func (m *NFSVolumeSource) Reset() { *m = NFSVolumeSource{} } +func (*NFSVolumeSource) ProtoMessage() {} +func (*NFSVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{81} +} +func (m *NFSVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NFSVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NFSVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_NFSVolumeSource.Merge(m, src) +} +func (m *NFSVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *NFSVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_NFSVolumeSource.DiscardUnknown(m) +} -func (m *ServiceSpec) Reset() { *m = ServiceSpec{} } -func (*ServiceSpec) ProtoMessage() {} -func (*ServiceSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{175} } +var xxx_messageInfo_NFSVolumeSource proto.InternalMessageInfo -func (m *ServiceStatus) Reset() { *m = ServiceStatus{} } -func (*ServiceStatus) ProtoMessage() {} -func (*ServiceStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{176} } +func (m *Namespace) Reset() { *m = Namespace{} } +func (*Namespace) ProtoMessage() {} +func (*Namespace) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{82} +} +func (m *Namespace) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Namespace) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Namespace) XXX_Merge(src proto.Message) { + xxx_messageInfo_Namespace.Merge(m, src) +} +func (m *Namespace) XXX_Size() int { + return m.Size() +} +func (m *Namespace) XXX_DiscardUnknown() { + xxx_messageInfo_Namespace.DiscardUnknown(m) +} -func (m *SessionAffinityConfig) Reset() { *m = SessionAffinityConfig{} } -func (*SessionAffinityConfig) ProtoMessage() {} -func (*SessionAffinityConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{177} } +var xxx_messageInfo_Namespace proto.InternalMessageInfo -func (m *StorageOSPersistentVolumeSource) Reset() { *m = StorageOSPersistentVolumeSource{} } -func (*StorageOSPersistentVolumeSource) ProtoMessage() {} -func (*StorageOSPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{178} +func (m *NamespaceCondition) Reset() { *m = NamespaceCondition{} } +func (*NamespaceCondition) ProtoMessage() {} +func (*NamespaceCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{83} +} +func (m *NamespaceCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NamespaceCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NamespaceCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_NamespaceCondition.Merge(m, src) +} +func (m *NamespaceCondition) XXX_Size() int { + return m.Size() +} +func (m *NamespaceCondition) XXX_DiscardUnknown() { + xxx_messageInfo_NamespaceCondition.DiscardUnknown(m) } -func (m *StorageOSVolumeSource) Reset() { *m = StorageOSVolumeSource{} } -func (*StorageOSVolumeSource) ProtoMessage() {} -func (*StorageOSVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{179} } +var xxx_messageInfo_NamespaceCondition proto.InternalMessageInfo -func (m *Sysctl) Reset() { *m = Sysctl{} } -func (*Sysctl) ProtoMessage() {} -func (*Sysctl) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{180} } +func (m *NamespaceList) Reset() { *m = NamespaceList{} } +func (*NamespaceList) ProtoMessage() {} +func (*NamespaceList) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{84} +} +func (m *NamespaceList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NamespaceList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NamespaceList) XXX_Merge(src proto.Message) { + xxx_messageInfo_NamespaceList.Merge(m, src) +} +func (m *NamespaceList) XXX_Size() int { + return m.Size() +} +func (m *NamespaceList) XXX_DiscardUnknown() { + xxx_messageInfo_NamespaceList.DiscardUnknown(m) +} -func (m *TCPSocketAction) Reset() { *m = TCPSocketAction{} } -func (*TCPSocketAction) ProtoMessage() {} -func (*TCPSocketAction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{181} } +var xxx_messageInfo_NamespaceList proto.InternalMessageInfo -func (m *Taint) Reset() { *m = Taint{} } -func (*Taint) ProtoMessage() {} -func (*Taint) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{182} } +func (m *NamespaceSpec) Reset() { *m = NamespaceSpec{} } +func (*NamespaceSpec) ProtoMessage() {} +func (*NamespaceSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{85} +} +func (m *NamespaceSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NamespaceSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NamespaceSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_NamespaceSpec.Merge(m, src) +} +func (m *NamespaceSpec) XXX_Size() int { + return m.Size() +} +func (m *NamespaceSpec) XXX_DiscardUnknown() { + xxx_messageInfo_NamespaceSpec.DiscardUnknown(m) +} -func (m *Toleration) Reset() { *m = Toleration{} } -func (*Toleration) ProtoMessage() {} -func (*Toleration) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{183} } +var xxx_messageInfo_NamespaceSpec proto.InternalMessageInfo -func (m *TopologySelectorLabelRequirement) Reset() { *m = TopologySelectorLabelRequirement{} } -func (*TopologySelectorLabelRequirement) ProtoMessage() {} -func (*TopologySelectorLabelRequirement) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{184} +func (m *NamespaceStatus) Reset() { *m = NamespaceStatus{} } +func (*NamespaceStatus) ProtoMessage() {} +func (*NamespaceStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{86} +} +func (m *NamespaceStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NamespaceStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NamespaceStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_NamespaceStatus.Merge(m, src) +} +func (m *NamespaceStatus) XXX_Size() int { + return m.Size() +} +func (m *NamespaceStatus) XXX_DiscardUnknown() { + xxx_messageInfo_NamespaceStatus.DiscardUnknown(m) } -func (m *TopologySelectorTerm) Reset() { *m = TopologySelectorTerm{} } -func (*TopologySelectorTerm) ProtoMessage() {} -func (*TopologySelectorTerm) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{185} } +var xxx_messageInfo_NamespaceStatus proto.InternalMessageInfo -func (m *TypedLocalObjectReference) Reset() { *m = TypedLocalObjectReference{} } -func (*TypedLocalObjectReference) ProtoMessage() {} -func (*TypedLocalObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{186} +func (m *Node) Reset() { *m = Node{} } +func (*Node) ProtoMessage() {} +func (*Node) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{87} +} +func (m *Node) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) } +func (m *Node) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Node) XXX_Merge(src proto.Message) { + xxx_messageInfo_Node.Merge(m, src) +} +func (m *Node) XXX_Size() int { + return m.Size() +} +func (m *Node) XXX_DiscardUnknown() { + xxx_messageInfo_Node.DiscardUnknown(m) +} + +var xxx_messageInfo_Node proto.InternalMessageInfo -func (m *Volume) Reset() { *m = Volume{} } -func (*Volume) ProtoMessage() {} -func (*Volume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{187} } +func (m *NodeAddress) Reset() { *m = NodeAddress{} } +func (*NodeAddress) ProtoMessage() {} +func (*NodeAddress) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{88} +} +func (m *NodeAddress) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeAddress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NodeAddress) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeAddress.Merge(m, src) +} +func (m *NodeAddress) XXX_Size() int { + return m.Size() +} +func (m *NodeAddress) XXX_DiscardUnknown() { + xxx_messageInfo_NodeAddress.DiscardUnknown(m) +} -func (m *VolumeDevice) Reset() { *m = VolumeDevice{} } -func (*VolumeDevice) ProtoMessage() {} -func (*VolumeDevice) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{188} } +var xxx_messageInfo_NodeAddress proto.InternalMessageInfo -func (m *VolumeMount) Reset() { *m = VolumeMount{} } -func (*VolumeMount) ProtoMessage() {} -func (*VolumeMount) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{189} } +func (m *NodeAffinity) Reset() { *m = NodeAffinity{} } +func (*NodeAffinity) ProtoMessage() {} +func (*NodeAffinity) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{89} +} +func (m *NodeAffinity) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeAffinity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NodeAffinity) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeAffinity.Merge(m, src) +} +func (m *NodeAffinity) XXX_Size() int { + return m.Size() +} +func (m *NodeAffinity) XXX_DiscardUnknown() { + xxx_messageInfo_NodeAffinity.DiscardUnknown(m) +} -func (m *VolumeNodeAffinity) Reset() { *m = VolumeNodeAffinity{} } -func (*VolumeNodeAffinity) ProtoMessage() {} -func (*VolumeNodeAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{190} } +var xxx_messageInfo_NodeAffinity proto.InternalMessageInfo -func (m *VolumeProjection) Reset() { *m = VolumeProjection{} } -func (*VolumeProjection) ProtoMessage() {} -func (*VolumeProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{191} } +func (m *NodeCondition) Reset() { *m = NodeCondition{} } +func (*NodeCondition) ProtoMessage() {} +func (*NodeCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{90} +} +func (m *NodeCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NodeCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeCondition.Merge(m, src) +} +func (m *NodeCondition) XXX_Size() int { + return m.Size() +} +func (m *NodeCondition) XXX_DiscardUnknown() { + xxx_messageInfo_NodeCondition.DiscardUnknown(m) +} -func (m *VolumeSource) Reset() { *m = VolumeSource{} } -func (*VolumeSource) ProtoMessage() {} -func (*VolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{192} } +var xxx_messageInfo_NodeCondition proto.InternalMessageInfo -func (m *VsphereVirtualDiskVolumeSource) Reset() { *m = VsphereVirtualDiskVolumeSource{} } -func (*VsphereVirtualDiskVolumeSource) ProtoMessage() {} -func (*VsphereVirtualDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{193} +func (m *NodeConfigSource) Reset() { *m = NodeConfigSource{} } +func (*NodeConfigSource) ProtoMessage() {} +func (*NodeConfigSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{91} +} +func (m *NodeConfigSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeConfigSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NodeConfigSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeConfigSource.Merge(m, src) +} +func (m *NodeConfigSource) XXX_Size() int { + return m.Size() +} +func (m *NodeConfigSource) XXX_DiscardUnknown() { + xxx_messageInfo_NodeConfigSource.DiscardUnknown(m) } -func (m *WeightedPodAffinityTerm) Reset() { *m = WeightedPodAffinityTerm{} } -func (*WeightedPodAffinityTerm) ProtoMessage() {} -func (*WeightedPodAffinityTerm) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{194} +var xxx_messageInfo_NodeConfigSource proto.InternalMessageInfo + +func (m *NodeConfigStatus) Reset() { *m = NodeConfigStatus{} } +func (*NodeConfigStatus) ProtoMessage() {} +func (*NodeConfigStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{92} +} +func (m *NodeConfigStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeConfigStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NodeConfigStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeConfigStatus.Merge(m, src) +} +func (m *NodeConfigStatus) XXX_Size() int { + return m.Size() +} +func (m *NodeConfigStatus) XXX_DiscardUnknown() { + xxx_messageInfo_NodeConfigStatus.DiscardUnknown(m) } -func init() { - proto.RegisterType((*AWSElasticBlockStoreVolumeSource)(nil), "k8s.io.api.core.v1.AWSElasticBlockStoreVolumeSource") - proto.RegisterType((*Affinity)(nil), "k8s.io.api.core.v1.Affinity") - proto.RegisterType((*AttachedVolume)(nil), "k8s.io.api.core.v1.AttachedVolume") - proto.RegisterType((*AvoidPods)(nil), "k8s.io.api.core.v1.AvoidPods") - proto.RegisterType((*AzureDiskVolumeSource)(nil), "k8s.io.api.core.v1.AzureDiskVolumeSource") - proto.RegisterType((*AzureFilePersistentVolumeSource)(nil), "k8s.io.api.core.v1.AzureFilePersistentVolumeSource") - proto.RegisterType((*AzureFileVolumeSource)(nil), "k8s.io.api.core.v1.AzureFileVolumeSource") - proto.RegisterType((*Binding)(nil), "k8s.io.api.core.v1.Binding") - proto.RegisterType((*CSIPersistentVolumeSource)(nil), "k8s.io.api.core.v1.CSIPersistentVolumeSource") - proto.RegisterType((*Capabilities)(nil), "k8s.io.api.core.v1.Capabilities") - proto.RegisterType((*CephFSPersistentVolumeSource)(nil), "k8s.io.api.core.v1.CephFSPersistentVolumeSource") - proto.RegisterType((*CephFSVolumeSource)(nil), "k8s.io.api.core.v1.CephFSVolumeSource") - proto.RegisterType((*CinderPersistentVolumeSource)(nil), "k8s.io.api.core.v1.CinderPersistentVolumeSource") - proto.RegisterType((*CinderVolumeSource)(nil), "k8s.io.api.core.v1.CinderVolumeSource") - proto.RegisterType((*ClientIPConfig)(nil), "k8s.io.api.core.v1.ClientIPConfig") - proto.RegisterType((*ComponentCondition)(nil), "k8s.io.api.core.v1.ComponentCondition") - proto.RegisterType((*ComponentStatus)(nil), "k8s.io.api.core.v1.ComponentStatus") - proto.RegisterType((*ComponentStatusList)(nil), "k8s.io.api.core.v1.ComponentStatusList") - proto.RegisterType((*ConfigMap)(nil), "k8s.io.api.core.v1.ConfigMap") - proto.RegisterType((*ConfigMapEnvSource)(nil), "k8s.io.api.core.v1.ConfigMapEnvSource") - proto.RegisterType((*ConfigMapKeySelector)(nil), "k8s.io.api.core.v1.ConfigMapKeySelector") - proto.RegisterType((*ConfigMapList)(nil), "k8s.io.api.core.v1.ConfigMapList") - proto.RegisterType((*ConfigMapNodeConfigSource)(nil), "k8s.io.api.core.v1.ConfigMapNodeConfigSource") - proto.RegisterType((*ConfigMapProjection)(nil), "k8s.io.api.core.v1.ConfigMapProjection") - proto.RegisterType((*ConfigMapVolumeSource)(nil), "k8s.io.api.core.v1.ConfigMapVolumeSource") - proto.RegisterType((*Container)(nil), "k8s.io.api.core.v1.Container") - proto.RegisterType((*ContainerImage)(nil), "k8s.io.api.core.v1.ContainerImage") - proto.RegisterType((*ContainerPort)(nil), "k8s.io.api.core.v1.ContainerPort") - proto.RegisterType((*ContainerState)(nil), "k8s.io.api.core.v1.ContainerState") - proto.RegisterType((*ContainerStateRunning)(nil), "k8s.io.api.core.v1.ContainerStateRunning") - proto.RegisterType((*ContainerStateTerminated)(nil), "k8s.io.api.core.v1.ContainerStateTerminated") - proto.RegisterType((*ContainerStateWaiting)(nil), "k8s.io.api.core.v1.ContainerStateWaiting") - proto.RegisterType((*ContainerStatus)(nil), "k8s.io.api.core.v1.ContainerStatus") - proto.RegisterType((*DaemonEndpoint)(nil), "k8s.io.api.core.v1.DaemonEndpoint") - proto.RegisterType((*DownwardAPIProjection)(nil), "k8s.io.api.core.v1.DownwardAPIProjection") - proto.RegisterType((*DownwardAPIVolumeFile)(nil), "k8s.io.api.core.v1.DownwardAPIVolumeFile") - proto.RegisterType((*DownwardAPIVolumeSource)(nil), "k8s.io.api.core.v1.DownwardAPIVolumeSource") - proto.RegisterType((*EmptyDirVolumeSource)(nil), "k8s.io.api.core.v1.EmptyDirVolumeSource") - proto.RegisterType((*EndpointAddress)(nil), "k8s.io.api.core.v1.EndpointAddress") - proto.RegisterType((*EndpointPort)(nil), "k8s.io.api.core.v1.EndpointPort") - proto.RegisterType((*EndpointSubset)(nil), "k8s.io.api.core.v1.EndpointSubset") - proto.RegisterType((*Endpoints)(nil), "k8s.io.api.core.v1.Endpoints") - proto.RegisterType((*EndpointsList)(nil), "k8s.io.api.core.v1.EndpointsList") - proto.RegisterType((*EnvFromSource)(nil), "k8s.io.api.core.v1.EnvFromSource") - proto.RegisterType((*EnvVar)(nil), "k8s.io.api.core.v1.EnvVar") - proto.RegisterType((*EnvVarSource)(nil), "k8s.io.api.core.v1.EnvVarSource") - proto.RegisterType((*Event)(nil), "k8s.io.api.core.v1.Event") - proto.RegisterType((*EventList)(nil), "k8s.io.api.core.v1.EventList") - proto.RegisterType((*EventSeries)(nil), "k8s.io.api.core.v1.EventSeries") - proto.RegisterType((*EventSource)(nil), "k8s.io.api.core.v1.EventSource") - proto.RegisterType((*ExecAction)(nil), "k8s.io.api.core.v1.ExecAction") - proto.RegisterType((*FCVolumeSource)(nil), "k8s.io.api.core.v1.FCVolumeSource") - proto.RegisterType((*FlexPersistentVolumeSource)(nil), "k8s.io.api.core.v1.FlexPersistentVolumeSource") - proto.RegisterType((*FlexVolumeSource)(nil), "k8s.io.api.core.v1.FlexVolumeSource") - proto.RegisterType((*FlockerVolumeSource)(nil), "k8s.io.api.core.v1.FlockerVolumeSource") - proto.RegisterType((*GCEPersistentDiskVolumeSource)(nil), "k8s.io.api.core.v1.GCEPersistentDiskVolumeSource") - proto.RegisterType((*GitRepoVolumeSource)(nil), "k8s.io.api.core.v1.GitRepoVolumeSource") - proto.RegisterType((*GlusterfsVolumeSource)(nil), "k8s.io.api.core.v1.GlusterfsVolumeSource") - proto.RegisterType((*HTTPGetAction)(nil), "k8s.io.api.core.v1.HTTPGetAction") - proto.RegisterType((*HTTPHeader)(nil), "k8s.io.api.core.v1.HTTPHeader") - proto.RegisterType((*Handler)(nil), "k8s.io.api.core.v1.Handler") - proto.RegisterType((*HostAlias)(nil), "k8s.io.api.core.v1.HostAlias") - proto.RegisterType((*HostPathVolumeSource)(nil), "k8s.io.api.core.v1.HostPathVolumeSource") - proto.RegisterType((*ISCSIPersistentVolumeSource)(nil), "k8s.io.api.core.v1.ISCSIPersistentVolumeSource") - proto.RegisterType((*ISCSIVolumeSource)(nil), "k8s.io.api.core.v1.ISCSIVolumeSource") - proto.RegisterType((*KeyToPath)(nil), "k8s.io.api.core.v1.KeyToPath") - proto.RegisterType((*Lifecycle)(nil), "k8s.io.api.core.v1.Lifecycle") - proto.RegisterType((*LimitRange)(nil), "k8s.io.api.core.v1.LimitRange") - proto.RegisterType((*LimitRangeItem)(nil), "k8s.io.api.core.v1.LimitRangeItem") - proto.RegisterType((*LimitRangeList)(nil), "k8s.io.api.core.v1.LimitRangeList") - proto.RegisterType((*LimitRangeSpec)(nil), "k8s.io.api.core.v1.LimitRangeSpec") - proto.RegisterType((*List)(nil), "k8s.io.api.core.v1.List") - proto.RegisterType((*LoadBalancerIngress)(nil), "k8s.io.api.core.v1.LoadBalancerIngress") - proto.RegisterType((*LoadBalancerStatus)(nil), "k8s.io.api.core.v1.LoadBalancerStatus") - proto.RegisterType((*LocalObjectReference)(nil), "k8s.io.api.core.v1.LocalObjectReference") - proto.RegisterType((*LocalVolumeSource)(nil), "k8s.io.api.core.v1.LocalVolumeSource") - proto.RegisterType((*NFSVolumeSource)(nil), "k8s.io.api.core.v1.NFSVolumeSource") - proto.RegisterType((*Namespace)(nil), "k8s.io.api.core.v1.Namespace") - proto.RegisterType((*NamespaceList)(nil), "k8s.io.api.core.v1.NamespaceList") - proto.RegisterType((*NamespaceSpec)(nil), "k8s.io.api.core.v1.NamespaceSpec") - proto.RegisterType((*NamespaceStatus)(nil), "k8s.io.api.core.v1.NamespaceStatus") - proto.RegisterType((*Node)(nil), "k8s.io.api.core.v1.Node") - proto.RegisterType((*NodeAddress)(nil), "k8s.io.api.core.v1.NodeAddress") - proto.RegisterType((*NodeAffinity)(nil), "k8s.io.api.core.v1.NodeAffinity") - proto.RegisterType((*NodeCondition)(nil), "k8s.io.api.core.v1.NodeCondition") - proto.RegisterType((*NodeConfigSource)(nil), "k8s.io.api.core.v1.NodeConfigSource") - proto.RegisterType((*NodeConfigStatus)(nil), "k8s.io.api.core.v1.NodeConfigStatus") - proto.RegisterType((*NodeDaemonEndpoints)(nil), "k8s.io.api.core.v1.NodeDaemonEndpoints") - proto.RegisterType((*NodeList)(nil), "k8s.io.api.core.v1.NodeList") - proto.RegisterType((*NodeProxyOptions)(nil), "k8s.io.api.core.v1.NodeProxyOptions") - proto.RegisterType((*NodeResources)(nil), "k8s.io.api.core.v1.NodeResources") - proto.RegisterType((*NodeSelector)(nil), "k8s.io.api.core.v1.NodeSelector") - proto.RegisterType((*NodeSelectorRequirement)(nil), "k8s.io.api.core.v1.NodeSelectorRequirement") - proto.RegisterType((*NodeSelectorTerm)(nil), "k8s.io.api.core.v1.NodeSelectorTerm") - proto.RegisterType((*NodeSpec)(nil), "k8s.io.api.core.v1.NodeSpec") - proto.RegisterType((*NodeStatus)(nil), "k8s.io.api.core.v1.NodeStatus") - proto.RegisterType((*NodeSystemInfo)(nil), "k8s.io.api.core.v1.NodeSystemInfo") - proto.RegisterType((*ObjectFieldSelector)(nil), "k8s.io.api.core.v1.ObjectFieldSelector") - proto.RegisterType((*ObjectReference)(nil), "k8s.io.api.core.v1.ObjectReference") - proto.RegisterType((*PersistentVolume)(nil), "k8s.io.api.core.v1.PersistentVolume") - proto.RegisterType((*PersistentVolumeClaim)(nil), "k8s.io.api.core.v1.PersistentVolumeClaim") - proto.RegisterType((*PersistentVolumeClaimCondition)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimCondition") - proto.RegisterType((*PersistentVolumeClaimList)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimList") - proto.RegisterType((*PersistentVolumeClaimSpec)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimSpec") - proto.RegisterType((*PersistentVolumeClaimStatus)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimStatus") - proto.RegisterType((*PersistentVolumeClaimVolumeSource)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimVolumeSource") - proto.RegisterType((*PersistentVolumeList)(nil), "k8s.io.api.core.v1.PersistentVolumeList") - proto.RegisterType((*PersistentVolumeSource)(nil), "k8s.io.api.core.v1.PersistentVolumeSource") - proto.RegisterType((*PersistentVolumeSpec)(nil), "k8s.io.api.core.v1.PersistentVolumeSpec") - proto.RegisterType((*PersistentVolumeStatus)(nil), "k8s.io.api.core.v1.PersistentVolumeStatus") - proto.RegisterType((*PhotonPersistentDiskVolumeSource)(nil), "k8s.io.api.core.v1.PhotonPersistentDiskVolumeSource") - proto.RegisterType((*Pod)(nil), "k8s.io.api.core.v1.Pod") - proto.RegisterType((*PodAffinity)(nil), "k8s.io.api.core.v1.PodAffinity") - proto.RegisterType((*PodAffinityTerm)(nil), "k8s.io.api.core.v1.PodAffinityTerm") - proto.RegisterType((*PodAntiAffinity)(nil), "k8s.io.api.core.v1.PodAntiAffinity") - proto.RegisterType((*PodAttachOptions)(nil), "k8s.io.api.core.v1.PodAttachOptions") - proto.RegisterType((*PodCondition)(nil), "k8s.io.api.core.v1.PodCondition") - proto.RegisterType((*PodDNSConfig)(nil), "k8s.io.api.core.v1.PodDNSConfig") - proto.RegisterType((*PodDNSConfigOption)(nil), "k8s.io.api.core.v1.PodDNSConfigOption") - proto.RegisterType((*PodExecOptions)(nil), "k8s.io.api.core.v1.PodExecOptions") - proto.RegisterType((*PodList)(nil), "k8s.io.api.core.v1.PodList") - proto.RegisterType((*PodLogOptions)(nil), "k8s.io.api.core.v1.PodLogOptions") - proto.RegisterType((*PodPortForwardOptions)(nil), "k8s.io.api.core.v1.PodPortForwardOptions") - proto.RegisterType((*PodProxyOptions)(nil), "k8s.io.api.core.v1.PodProxyOptions") - proto.RegisterType((*PodReadinessGate)(nil), "k8s.io.api.core.v1.PodReadinessGate") - proto.RegisterType((*PodSecurityContext)(nil), "k8s.io.api.core.v1.PodSecurityContext") - proto.RegisterType((*PodSignature)(nil), "k8s.io.api.core.v1.PodSignature") - proto.RegisterType((*PodSpec)(nil), "k8s.io.api.core.v1.PodSpec") - proto.RegisterType((*PodStatus)(nil), "k8s.io.api.core.v1.PodStatus") - proto.RegisterType((*PodStatusResult)(nil), "k8s.io.api.core.v1.PodStatusResult") - proto.RegisterType((*PodTemplate)(nil), "k8s.io.api.core.v1.PodTemplate") - proto.RegisterType((*PodTemplateList)(nil), "k8s.io.api.core.v1.PodTemplateList") - proto.RegisterType((*PodTemplateSpec)(nil), "k8s.io.api.core.v1.PodTemplateSpec") - proto.RegisterType((*PortworxVolumeSource)(nil), "k8s.io.api.core.v1.PortworxVolumeSource") - proto.RegisterType((*Preconditions)(nil), "k8s.io.api.core.v1.Preconditions") - proto.RegisterType((*PreferAvoidPodsEntry)(nil), "k8s.io.api.core.v1.PreferAvoidPodsEntry") - proto.RegisterType((*PreferredSchedulingTerm)(nil), "k8s.io.api.core.v1.PreferredSchedulingTerm") - proto.RegisterType((*Probe)(nil), "k8s.io.api.core.v1.Probe") - proto.RegisterType((*ProjectedVolumeSource)(nil), "k8s.io.api.core.v1.ProjectedVolumeSource") - proto.RegisterType((*QuobyteVolumeSource)(nil), "k8s.io.api.core.v1.QuobyteVolumeSource") - proto.RegisterType((*RBDPersistentVolumeSource)(nil), "k8s.io.api.core.v1.RBDPersistentVolumeSource") - proto.RegisterType((*RBDVolumeSource)(nil), "k8s.io.api.core.v1.RBDVolumeSource") - proto.RegisterType((*RangeAllocation)(nil), "k8s.io.api.core.v1.RangeAllocation") - proto.RegisterType((*ReplicationController)(nil), "k8s.io.api.core.v1.ReplicationController") - proto.RegisterType((*ReplicationControllerCondition)(nil), "k8s.io.api.core.v1.ReplicationControllerCondition") - proto.RegisterType((*ReplicationControllerList)(nil), "k8s.io.api.core.v1.ReplicationControllerList") - proto.RegisterType((*ReplicationControllerSpec)(nil), "k8s.io.api.core.v1.ReplicationControllerSpec") - proto.RegisterType((*ReplicationControllerStatus)(nil), "k8s.io.api.core.v1.ReplicationControllerStatus") - proto.RegisterType((*ResourceFieldSelector)(nil), "k8s.io.api.core.v1.ResourceFieldSelector") - proto.RegisterType((*ResourceQuota)(nil), "k8s.io.api.core.v1.ResourceQuota") - proto.RegisterType((*ResourceQuotaList)(nil), "k8s.io.api.core.v1.ResourceQuotaList") - proto.RegisterType((*ResourceQuotaSpec)(nil), "k8s.io.api.core.v1.ResourceQuotaSpec") - proto.RegisterType((*ResourceQuotaStatus)(nil), "k8s.io.api.core.v1.ResourceQuotaStatus") - proto.RegisterType((*ResourceRequirements)(nil), "k8s.io.api.core.v1.ResourceRequirements") - proto.RegisterType((*SELinuxOptions)(nil), "k8s.io.api.core.v1.SELinuxOptions") - proto.RegisterType((*ScaleIOPersistentVolumeSource)(nil), "k8s.io.api.core.v1.ScaleIOPersistentVolumeSource") - proto.RegisterType((*ScaleIOVolumeSource)(nil), "k8s.io.api.core.v1.ScaleIOVolumeSource") - proto.RegisterType((*ScopeSelector)(nil), "k8s.io.api.core.v1.ScopeSelector") - proto.RegisterType((*ScopedResourceSelectorRequirement)(nil), "k8s.io.api.core.v1.ScopedResourceSelectorRequirement") - proto.RegisterType((*Secret)(nil), "k8s.io.api.core.v1.Secret") - proto.RegisterType((*SecretEnvSource)(nil), "k8s.io.api.core.v1.SecretEnvSource") - proto.RegisterType((*SecretKeySelector)(nil), "k8s.io.api.core.v1.SecretKeySelector") - proto.RegisterType((*SecretList)(nil), "k8s.io.api.core.v1.SecretList") - proto.RegisterType((*SecretProjection)(nil), "k8s.io.api.core.v1.SecretProjection") - proto.RegisterType((*SecretReference)(nil), "k8s.io.api.core.v1.SecretReference") - proto.RegisterType((*SecretVolumeSource)(nil), "k8s.io.api.core.v1.SecretVolumeSource") - proto.RegisterType((*SecurityContext)(nil), "k8s.io.api.core.v1.SecurityContext") - proto.RegisterType((*SerializedReference)(nil), "k8s.io.api.core.v1.SerializedReference") - proto.RegisterType((*Service)(nil), "k8s.io.api.core.v1.Service") - proto.RegisterType((*ServiceAccount)(nil), "k8s.io.api.core.v1.ServiceAccount") - proto.RegisterType((*ServiceAccountList)(nil), "k8s.io.api.core.v1.ServiceAccountList") - proto.RegisterType((*ServiceAccountTokenProjection)(nil), "k8s.io.api.core.v1.ServiceAccountTokenProjection") - proto.RegisterType((*ServiceList)(nil), "k8s.io.api.core.v1.ServiceList") - proto.RegisterType((*ServicePort)(nil), "k8s.io.api.core.v1.ServicePort") - proto.RegisterType((*ServiceProxyOptions)(nil), "k8s.io.api.core.v1.ServiceProxyOptions") - proto.RegisterType((*ServiceSpec)(nil), "k8s.io.api.core.v1.ServiceSpec") - proto.RegisterType((*ServiceStatus)(nil), "k8s.io.api.core.v1.ServiceStatus") - proto.RegisterType((*SessionAffinityConfig)(nil), "k8s.io.api.core.v1.SessionAffinityConfig") - proto.RegisterType((*StorageOSPersistentVolumeSource)(nil), "k8s.io.api.core.v1.StorageOSPersistentVolumeSource") - proto.RegisterType((*StorageOSVolumeSource)(nil), "k8s.io.api.core.v1.StorageOSVolumeSource") - proto.RegisterType((*Sysctl)(nil), "k8s.io.api.core.v1.Sysctl") - proto.RegisterType((*TCPSocketAction)(nil), "k8s.io.api.core.v1.TCPSocketAction") - proto.RegisterType((*Taint)(nil), "k8s.io.api.core.v1.Taint") - proto.RegisterType((*Toleration)(nil), "k8s.io.api.core.v1.Toleration") - proto.RegisterType((*TopologySelectorLabelRequirement)(nil), "k8s.io.api.core.v1.TopologySelectorLabelRequirement") - proto.RegisterType((*TopologySelectorTerm)(nil), "k8s.io.api.core.v1.TopologySelectorTerm") - proto.RegisterType((*TypedLocalObjectReference)(nil), "k8s.io.api.core.v1.TypedLocalObjectReference") - proto.RegisterType((*Volume)(nil), "k8s.io.api.core.v1.Volume") - proto.RegisterType((*VolumeDevice)(nil), "k8s.io.api.core.v1.VolumeDevice") - proto.RegisterType((*VolumeMount)(nil), "k8s.io.api.core.v1.VolumeMount") - proto.RegisterType((*VolumeNodeAffinity)(nil), "k8s.io.api.core.v1.VolumeNodeAffinity") - proto.RegisterType((*VolumeProjection)(nil), "k8s.io.api.core.v1.VolumeProjection") - proto.RegisterType((*VolumeSource)(nil), "k8s.io.api.core.v1.VolumeSource") - proto.RegisterType((*VsphereVirtualDiskVolumeSource)(nil), "k8s.io.api.core.v1.VsphereVirtualDiskVolumeSource") - proto.RegisterType((*WeightedPodAffinityTerm)(nil), "k8s.io.api.core.v1.WeightedPodAffinityTerm") +var xxx_messageInfo_NodeConfigStatus proto.InternalMessageInfo + +func (m *NodeDaemonEndpoints) Reset() { *m = NodeDaemonEndpoints{} } +func (*NodeDaemonEndpoints) ProtoMessage() {} +func (*NodeDaemonEndpoints) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{93} } -func (m *AWSElasticBlockStoreVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +func (m *NodeDaemonEndpoints) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeDaemonEndpoints) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *AWSElasticBlockStoreVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeID))) - i += copy(dAtA[i:], m.VolumeID) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i += copy(dAtA[i:], m.FSType) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Partition)) - dAtA[i] = 0x20 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - return i, nil +func (m *NodeDaemonEndpoints) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeDaemonEndpoints.Merge(m, src) +} +func (m *NodeDaemonEndpoints) XXX_Size() int { + return m.Size() +} +func (m *NodeDaemonEndpoints) XXX_DiscardUnknown() { + xxx_messageInfo_NodeDaemonEndpoints.DiscardUnknown(m) } -func (m *Affinity) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_NodeDaemonEndpoints proto.InternalMessageInfo + +func (m *NodeList) Reset() { *m = NodeList{} } +func (*NodeList) ProtoMessage() {} +func (*NodeList) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{94} +} +func (m *NodeList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *NodeList) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeList.Merge(m, src) +} +func (m *NodeList) XXX_Size() int { + return m.Size() +} +func (m *NodeList) XXX_DiscardUnknown() { + xxx_messageInfo_NodeList.DiscardUnknown(m) } -func (m *Affinity) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.NodeAffinity != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NodeAffinity.Size())) - n1, err := m.NodeAffinity.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - } - if m.PodAffinity != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PodAffinity.Size())) - n2, err := m.PodAffinity.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 - } - if m.PodAntiAffinity != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PodAntiAffinity.Size())) - n3, err := m.PodAntiAffinity.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 +var xxx_messageInfo_NodeList proto.InternalMessageInfo + +func (m *NodeProxyOptions) Reset() { *m = NodeProxyOptions{} } +func (*NodeProxyOptions) ProtoMessage() {} +func (*NodeProxyOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{95} +} +func (m *NodeProxyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeProxyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } - return i, nil + return b[:n], nil +} +func (m *NodeProxyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeProxyOptions.Merge(m, src) +} +func (m *NodeProxyOptions) XXX_Size() int { + return m.Size() +} +func (m *NodeProxyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_NodeProxyOptions.DiscardUnknown(m) } -func (m *AttachedVolume) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_NodeProxyOptions proto.InternalMessageInfo + +func (m *NodeResources) Reset() { *m = NodeResources{} } +func (*NodeResources) ProtoMessage() {} +func (*NodeResources) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{96} +} +func (m *NodeResources) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeResources) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *AttachedVolume) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DevicePath))) - i += copy(dAtA[i:], m.DevicePath) - return i, nil +func (m *NodeResources) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeResources.Merge(m, src) +} +func (m *NodeResources) XXX_Size() int { + return m.Size() +} +func (m *NodeResources) XXX_DiscardUnknown() { + xxx_messageInfo_NodeResources.DiscardUnknown(m) } -func (m *AvoidPods) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_NodeResources proto.InternalMessageInfo + +func (m *NodeSelector) Reset() { *m = NodeSelector{} } +func (*NodeSelector) ProtoMessage() {} +func (*NodeSelector) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{97} +} +func (m *NodeSelector) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *NodeSelector) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeSelector.Merge(m, src) +} +func (m *NodeSelector) XXX_Size() int { + return m.Size() +} +func (m *NodeSelector) XXX_DiscardUnknown() { + xxx_messageInfo_NodeSelector.DiscardUnknown(m) } -func (m *AvoidPods) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.PreferAvoidPods) > 0 { - for _, msg := range m.PreferAvoidPods { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } +var xxx_messageInfo_NodeSelector proto.InternalMessageInfo + +func (m *NodeSelectorRequirement) Reset() { *m = NodeSelectorRequirement{} } +func (*NodeSelectorRequirement) ProtoMessage() {} +func (*NodeSelectorRequirement) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{98} +} +func (m *NodeSelectorRequirement) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeSelectorRequirement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } - return i, nil + return b[:n], nil +} +func (m *NodeSelectorRequirement) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeSelectorRequirement.Merge(m, src) +} +func (m *NodeSelectorRequirement) XXX_Size() int { + return m.Size() +} +func (m *NodeSelectorRequirement) XXX_DiscardUnknown() { + xxx_messageInfo_NodeSelectorRequirement.DiscardUnknown(m) } -func (m *AzureDiskVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_NodeSelectorRequirement proto.InternalMessageInfo + +func (m *NodeSelectorTerm) Reset() { *m = NodeSelectorTerm{} } +func (*NodeSelectorTerm) ProtoMessage() {} +func (*NodeSelectorTerm) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{99} +} +func (m *NodeSelectorTerm) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeSelectorTerm) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *NodeSelectorTerm) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeSelectorTerm.Merge(m, src) +} +func (m *NodeSelectorTerm) XXX_Size() int { + return m.Size() +} +func (m *NodeSelectorTerm) XXX_DiscardUnknown() { + xxx_messageInfo_NodeSelectorTerm.DiscardUnknown(m) } -func (m *AzureDiskVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DiskName))) - i += copy(dAtA[i:], m.DiskName) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DataDiskURI))) - i += copy(dAtA[i:], m.DataDiskURI) - if m.CachingMode != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.CachingMode))) - i += copy(dAtA[i:], *m.CachingMode) - } - if m.FSType != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FSType))) - i += copy(dAtA[i:], *m.FSType) - } - if m.ReadOnly != nil { - dAtA[i] = 0x28 - i++ - if *m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if m.Kind != nil { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Kind))) - i += copy(dAtA[i:], *m.Kind) +var xxx_messageInfo_NodeSelectorTerm proto.InternalMessageInfo + +func (m *NodeSpec) Reset() { *m = NodeSpec{} } +func (*NodeSpec) ProtoMessage() {} +func (*NodeSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{100} +} +func (m *NodeSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } - return i, nil + return b[:n], nil +} +func (m *NodeSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeSpec.Merge(m, src) +} +func (m *NodeSpec) XXX_Size() int { + return m.Size() +} +func (m *NodeSpec) XXX_DiscardUnknown() { + xxx_messageInfo_NodeSpec.DiscardUnknown(m) } -func (m *AzureFilePersistentVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_NodeSpec proto.InternalMessageInfo + +func (m *NodeStatus) Reset() { *m = NodeStatus{} } +func (*NodeStatus) ProtoMessage() {} +func (*NodeStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{101} +} +func (m *NodeStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *NodeStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeStatus.Merge(m, src) +} +func (m *NodeStatus) XXX_Size() int { + return m.Size() +} +func (m *NodeStatus) XXX_DiscardUnknown() { + xxx_messageInfo_NodeStatus.DiscardUnknown(m) } -func (m *AzureFilePersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretName))) - i += copy(dAtA[i:], m.SecretName) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ShareName))) - i += copy(dAtA[i:], m.ShareName) - dAtA[i] = 0x18 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if m.SecretNamespace != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SecretNamespace))) - i += copy(dAtA[i:], *m.SecretNamespace) +var xxx_messageInfo_NodeStatus proto.InternalMessageInfo + +func (m *NodeSystemInfo) Reset() { *m = NodeSystemInfo{} } +func (*NodeSystemInfo) ProtoMessage() {} +func (*NodeSystemInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{102} +} +func (m *NodeSystemInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeSystemInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } - return i, nil + return b[:n], nil +} +func (m *NodeSystemInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeSystemInfo.Merge(m, src) +} +func (m *NodeSystemInfo) XXX_Size() int { + return m.Size() +} +func (m *NodeSystemInfo) XXX_DiscardUnknown() { + xxx_messageInfo_NodeSystemInfo.DiscardUnknown(m) } -func (m *AzureFileVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_NodeSystemInfo proto.InternalMessageInfo + +func (m *ObjectFieldSelector) Reset() { *m = ObjectFieldSelector{} } +func (*ObjectFieldSelector) ProtoMessage() {} +func (*ObjectFieldSelector) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{103} +} +func (m *ObjectFieldSelector) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ObjectFieldSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *ObjectFieldSelector) XXX_Merge(src proto.Message) { + xxx_messageInfo_ObjectFieldSelector.Merge(m, src) +} +func (m *ObjectFieldSelector) XXX_Size() int { + return m.Size() +} +func (m *ObjectFieldSelector) XXX_DiscardUnknown() { + xxx_messageInfo_ObjectFieldSelector.DiscardUnknown(m) } -func (m *AzureFileVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretName))) - i += copy(dAtA[i:], m.SecretName) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ShareName))) - i += copy(dAtA[i:], m.ShareName) - dAtA[i] = 0x18 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 +var xxx_messageInfo_ObjectFieldSelector proto.InternalMessageInfo + +func (m *ObjectReference) Reset() { *m = ObjectReference{} } +func (*ObjectReference) ProtoMessage() {} +func (*ObjectReference) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{104} +} +func (m *ObjectReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ObjectReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } - i++ - return i, nil + return b[:n], nil +} +func (m *ObjectReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_ObjectReference.Merge(m, src) +} +func (m *ObjectReference) XXX_Size() int { + return m.Size() +} +func (m *ObjectReference) XXX_DiscardUnknown() { + xxx_messageInfo_ObjectReference.DiscardUnknown(m) } -func (m *Binding) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_ObjectReference proto.InternalMessageInfo + +func (m *PersistentVolume) Reset() { *m = PersistentVolume{} } +func (*PersistentVolume) ProtoMessage() {} +func (*PersistentVolume) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{105} +} +func (m *PersistentVolume) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PersistentVolume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *PersistentVolume) XXX_Merge(src proto.Message) { + xxx_messageInfo_PersistentVolume.Merge(m, src) +} +func (m *PersistentVolume) XXX_Size() int { + return m.Size() +} +func (m *PersistentVolume) XXX_DiscardUnknown() { + xxx_messageInfo_PersistentVolume.DiscardUnknown(m) } -func (m *Binding) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n4, err := m.ObjectMeta.MarshalTo(dAtA[i:]) +var xxx_messageInfo_PersistentVolume proto.InternalMessageInfo + +func (m *PersistentVolumeClaim) Reset() { *m = PersistentVolumeClaim{} } +func (*PersistentVolumeClaim) ProtoMessage() {} +func (*PersistentVolumeClaim) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{106} +} +func (m *PersistentVolumeClaim) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PersistentVolumeClaim) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err + return nil, err } - i += n4 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Target.Size())) - n5, err := m.Target.MarshalTo(dAtA[i:]) + return b[:n], nil +} +func (m *PersistentVolumeClaim) XXX_Merge(src proto.Message) { + xxx_messageInfo_PersistentVolumeClaim.Merge(m, src) +} +func (m *PersistentVolumeClaim) XXX_Size() int { + return m.Size() +} +func (m *PersistentVolumeClaim) XXX_DiscardUnknown() { + xxx_messageInfo_PersistentVolumeClaim.DiscardUnknown(m) +} + +var xxx_messageInfo_PersistentVolumeClaim proto.InternalMessageInfo + +func (m *PersistentVolumeClaimCondition) Reset() { *m = PersistentVolumeClaimCondition{} } +func (*PersistentVolumeClaimCondition) ProtoMessage() {} +func (*PersistentVolumeClaimCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{107} +} +func (m *PersistentVolumeClaimCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PersistentVolumeClaimCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err + return nil, err } - i += n5 - return i, nil + return b[:n], nil +} +func (m *PersistentVolumeClaimCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_PersistentVolumeClaimCondition.Merge(m, src) +} +func (m *PersistentVolumeClaimCondition) XXX_Size() int { + return m.Size() +} +func (m *PersistentVolumeClaimCondition) XXX_DiscardUnknown() { + xxx_messageInfo_PersistentVolumeClaimCondition.DiscardUnknown(m) } -func (m *CSIPersistentVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_PersistentVolumeClaimCondition proto.InternalMessageInfo + +func (m *PersistentVolumeClaimList) Reset() { *m = PersistentVolumeClaimList{} } +func (*PersistentVolumeClaimList) ProtoMessage() {} +func (*PersistentVolumeClaimList) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{108} +} +func (m *PersistentVolumeClaimList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PersistentVolumeClaimList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *PersistentVolumeClaimList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PersistentVolumeClaimList.Merge(m, src) +} +func (m *PersistentVolumeClaimList) XXX_Size() int { + return m.Size() +} +func (m *PersistentVolumeClaimList) XXX_DiscardUnknown() { + xxx_messageInfo_PersistentVolumeClaimList.DiscardUnknown(m) } -func (m *CSIPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) - i += copy(dAtA[i:], m.Driver) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeHandle))) - i += copy(dAtA[i:], m.VolumeHandle) - dAtA[i] = 0x18 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i += copy(dAtA[i:], m.FSType) - if len(m.VolumeAttributes) > 0 { - keysForVolumeAttributes := make([]string, 0, len(m.VolumeAttributes)) - for k := range m.VolumeAttributes { - keysForVolumeAttributes = append(keysForVolumeAttributes, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForVolumeAttributes) - for _, k := range keysForVolumeAttributes { - dAtA[i] = 0x2a - i++ - v := m.VolumeAttributes[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } - } - if m.ControllerPublishSecretRef != nil { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ControllerPublishSecretRef.Size())) - n6, err := m.ControllerPublishSecretRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 - } - if m.NodeStageSecretRef != nil { - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NodeStageSecretRef.Size())) - n7, err := m.NodeStageSecretRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 - } - if m.NodePublishSecretRef != nil { - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NodePublishSecretRef.Size())) - n8, err := m.NodePublishSecretRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n8 +var xxx_messageInfo_PersistentVolumeClaimList proto.InternalMessageInfo + +func (m *PersistentVolumeClaimSpec) Reset() { *m = PersistentVolumeClaimSpec{} } +func (*PersistentVolumeClaimSpec) ProtoMessage() {} +func (*PersistentVolumeClaimSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{109} +} +func (m *PersistentVolumeClaimSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PersistentVolumeClaimSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } - return i, nil + return b[:n], nil +} +func (m *PersistentVolumeClaimSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_PersistentVolumeClaimSpec.Merge(m, src) +} +func (m *PersistentVolumeClaimSpec) XXX_Size() int { + return m.Size() +} +func (m *PersistentVolumeClaimSpec) XXX_DiscardUnknown() { + xxx_messageInfo_PersistentVolumeClaimSpec.DiscardUnknown(m) } -func (m *Capabilities) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_PersistentVolumeClaimSpec proto.InternalMessageInfo + +func (m *PersistentVolumeClaimStatus) Reset() { *m = PersistentVolumeClaimStatus{} } +func (*PersistentVolumeClaimStatus) ProtoMessage() {} +func (*PersistentVolumeClaimStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{110} +} +func (m *PersistentVolumeClaimStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PersistentVolumeClaimStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *PersistentVolumeClaimStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_PersistentVolumeClaimStatus.Merge(m, src) +} +func (m *PersistentVolumeClaimStatus) XXX_Size() int { + return m.Size() +} +func (m *PersistentVolumeClaimStatus) XXX_DiscardUnknown() { + xxx_messageInfo_PersistentVolumeClaimStatus.DiscardUnknown(m) } -func (m *Capabilities) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Add) > 0 { - for _, s := range m.Add { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if len(m.Drop) > 0 { - for _, s := range m.Drop { - dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } +var xxx_messageInfo_PersistentVolumeClaimStatus proto.InternalMessageInfo + +func (m *PersistentVolumeClaimVolumeSource) Reset() { *m = PersistentVolumeClaimVolumeSource{} } +func (*PersistentVolumeClaimVolumeSource) ProtoMessage() {} +func (*PersistentVolumeClaimVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{111} +} +func (m *PersistentVolumeClaimVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PersistentVolumeClaimVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } - return i, nil + return b[:n], nil +} +func (m *PersistentVolumeClaimVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_PersistentVolumeClaimVolumeSource.Merge(m, src) +} +func (m *PersistentVolumeClaimVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *PersistentVolumeClaimVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_PersistentVolumeClaimVolumeSource.DiscardUnknown(m) } -func (m *CephFSPersistentVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_PersistentVolumeClaimVolumeSource proto.InternalMessageInfo + +func (m *PersistentVolumeList) Reset() { *m = PersistentVolumeList{} } +func (*PersistentVolumeList) ProtoMessage() {} +func (*PersistentVolumeList) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{112} +} +func (m *PersistentVolumeList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PersistentVolumeList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *CephFSPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Monitors) > 0 { - for _, s := range m.Monitors { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i += copy(dAtA[i:], m.Path) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.User))) - i += copy(dAtA[i:], m.User) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretFile))) - i += copy(dAtA[i:], m.SecretFile) - if m.SecretRef != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n9, err := m.SecretRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n9 - } - dAtA[i] = 0x30 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - return i, nil +func (m *PersistentVolumeList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PersistentVolumeList.Merge(m, src) +} +func (m *PersistentVolumeList) XXX_Size() int { + return m.Size() +} +func (m *PersistentVolumeList) XXX_DiscardUnknown() { + xxx_messageInfo_PersistentVolumeList.DiscardUnknown(m) } -func (m *CephFSVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_PersistentVolumeList proto.InternalMessageInfo + +func (m *PersistentVolumeSource) Reset() { *m = PersistentVolumeSource{} } +func (*PersistentVolumeSource) ProtoMessage() {} +func (*PersistentVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{113} +} +func (m *PersistentVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PersistentVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *CephFSVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Monitors) > 0 { - for _, s := range m.Monitors { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i += copy(dAtA[i:], m.Path) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.User))) - i += copy(dAtA[i:], m.User) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretFile))) - i += copy(dAtA[i:], m.SecretFile) - if m.SecretRef != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n10, err := m.SecretRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n10 - } - dAtA[i] = 0x30 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - return i, nil +func (m *PersistentVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_PersistentVolumeSource.Merge(m, src) +} +func (m *PersistentVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *PersistentVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_PersistentVolumeSource.DiscardUnknown(m) } -func (m *CinderPersistentVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_PersistentVolumeSource proto.InternalMessageInfo + +func (m *PersistentVolumeSpec) Reset() { *m = PersistentVolumeSpec{} } +func (*PersistentVolumeSpec) ProtoMessage() {} +func (*PersistentVolumeSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{114} +} +func (m *PersistentVolumeSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PersistentVolumeSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *PersistentVolumeSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_PersistentVolumeSpec.Merge(m, src) +} +func (m *PersistentVolumeSpec) XXX_Size() int { + return m.Size() +} +func (m *PersistentVolumeSpec) XXX_DiscardUnknown() { + xxx_messageInfo_PersistentVolumeSpec.DiscardUnknown(m) } -func (m *CinderPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeID))) - i += copy(dAtA[i:], m.VolumeID) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i += copy(dAtA[i:], m.FSType) - dAtA[i] = 0x18 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if m.SecretRef != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n11, err := m.SecretRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n11 +var xxx_messageInfo_PersistentVolumeSpec proto.InternalMessageInfo + +func (m *PersistentVolumeStatus) Reset() { *m = PersistentVolumeStatus{} } +func (*PersistentVolumeStatus) ProtoMessage() {} +func (*PersistentVolumeStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{115} +} +func (m *PersistentVolumeStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PersistentVolumeStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } - return i, nil + return b[:n], nil +} +func (m *PersistentVolumeStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_PersistentVolumeStatus.Merge(m, src) +} +func (m *PersistentVolumeStatus) XXX_Size() int { + return m.Size() +} +func (m *PersistentVolumeStatus) XXX_DiscardUnknown() { + xxx_messageInfo_PersistentVolumeStatus.DiscardUnknown(m) } -func (m *CinderVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_PersistentVolumeStatus proto.InternalMessageInfo + +func (m *PhotonPersistentDiskVolumeSource) Reset() { *m = PhotonPersistentDiskVolumeSource{} } +func (*PhotonPersistentDiskVolumeSource) ProtoMessage() {} +func (*PhotonPersistentDiskVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{116} +} +func (m *PhotonPersistentDiskVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PhotonPersistentDiskVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *PhotonPersistentDiskVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_PhotonPersistentDiskVolumeSource.Merge(m, src) +} +func (m *PhotonPersistentDiskVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *PhotonPersistentDiskVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_PhotonPersistentDiskVolumeSource.DiscardUnknown(m) } -func (m *CinderVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeID))) - i += copy(dAtA[i:], m.VolumeID) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i += copy(dAtA[i:], m.FSType) - dAtA[i] = 0x18 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if m.SecretRef != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n12, err := m.SecretRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n12 +var xxx_messageInfo_PhotonPersistentDiskVolumeSource proto.InternalMessageInfo + +func (m *Pod) Reset() { *m = Pod{} } +func (*Pod) ProtoMessage() {} +func (*Pod) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{117} +} +func (m *Pod) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Pod) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } - return i, nil + return b[:n], nil +} +func (m *Pod) XXX_Merge(src proto.Message) { + xxx_messageInfo_Pod.Merge(m, src) +} +func (m *Pod) XXX_Size() int { + return m.Size() +} +func (m *Pod) XXX_DiscardUnknown() { + xxx_messageInfo_Pod.DiscardUnknown(m) } -func (m *ClientIPConfig) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_Pod proto.InternalMessageInfo + +func (m *PodAffinity) Reset() { *m = PodAffinity{} } +func (*PodAffinity) ProtoMessage() {} +func (*PodAffinity) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{118} +} +func (m *PodAffinity) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodAffinity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *PodAffinity) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodAffinity.Merge(m, src) +} +func (m *PodAffinity) XXX_Size() int { + return m.Size() +} +func (m *PodAffinity) XXX_DiscardUnknown() { + xxx_messageInfo_PodAffinity.DiscardUnknown(m) } -func (m *ClientIPConfig) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.TimeoutSeconds != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) +var xxx_messageInfo_PodAffinity proto.InternalMessageInfo + +func (m *PodAffinityTerm) Reset() { *m = PodAffinityTerm{} } +func (*PodAffinityTerm) ProtoMessage() {} +func (*PodAffinityTerm) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{119} +} +func (m *PodAffinityTerm) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodAffinityTerm) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } - return i, nil + return b[:n], nil +} +func (m *PodAffinityTerm) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodAffinityTerm.Merge(m, src) +} +func (m *PodAffinityTerm) XXX_Size() int { + return m.Size() +} +func (m *PodAffinityTerm) XXX_DiscardUnknown() { + xxx_messageInfo_PodAffinityTerm.DiscardUnknown(m) } -func (m *ComponentCondition) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_PodAffinityTerm proto.InternalMessageInfo + +func (m *PodAntiAffinity) Reset() { *m = PodAntiAffinity{} } +func (*PodAntiAffinity) ProtoMessage() {} +func (*PodAntiAffinity) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{120} +} +func (m *PodAntiAffinity) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodAntiAffinity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *ComponentCondition) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Error))) - i += copy(dAtA[i:], m.Error) - return i, nil +func (m *PodAntiAffinity) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodAntiAffinity.Merge(m, src) +} +func (m *PodAntiAffinity) XXX_Size() int { + return m.Size() +} +func (m *PodAntiAffinity) XXX_DiscardUnknown() { + xxx_messageInfo_PodAntiAffinity.DiscardUnknown(m) } -func (m *ComponentStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_PodAntiAffinity proto.InternalMessageInfo + +func (m *PodAttachOptions) Reset() { *m = PodAttachOptions{} } +func (*PodAttachOptions) ProtoMessage() {} +func (*PodAttachOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{121} +} +func (m *PodAttachOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodAttachOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *PodAttachOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodAttachOptions.Merge(m, src) +} +func (m *PodAttachOptions) XXX_Size() int { + return m.Size() +} +func (m *PodAttachOptions) XXX_DiscardUnknown() { + xxx_messageInfo_PodAttachOptions.DiscardUnknown(m) } -func (m *ComponentStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n13, err := m.ObjectMeta.MarshalTo(dAtA[i:]) +var xxx_messageInfo_PodAttachOptions proto.InternalMessageInfo + +func (m *PodCondition) Reset() { *m = PodCondition{} } +func (*PodCondition) ProtoMessage() {} +func (*PodCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{122} +} +func (m *PodCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err - } - i += n13 - if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } + return nil, err } - return i, nil + return b[:n], nil +} +func (m *PodCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodCondition.Merge(m, src) +} +func (m *PodCondition) XXX_Size() int { + return m.Size() +} +func (m *PodCondition) XXX_DiscardUnknown() { + xxx_messageInfo_PodCondition.DiscardUnknown(m) } -func (m *ComponentStatusList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_PodCondition proto.InternalMessageInfo + +func (m *PodDNSConfig) Reset() { *m = PodDNSConfig{} } +func (*PodDNSConfig) ProtoMessage() {} +func (*PodDNSConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{123} +} +func (m *PodDNSConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodDNSConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *PodDNSConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodDNSConfig.Merge(m, src) +} +func (m *PodDNSConfig) XXX_Size() int { + return m.Size() +} +func (m *PodDNSConfig) XXX_DiscardUnknown() { + xxx_messageInfo_PodDNSConfig.DiscardUnknown(m) } -func (m *ComponentStatusList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n14, err := m.ListMeta.MarshalTo(dAtA[i:]) +var xxx_messageInfo_PodDNSConfig proto.InternalMessageInfo + +func (m *PodDNSConfigOption) Reset() { *m = PodDNSConfigOption{} } +func (*PodDNSConfigOption) ProtoMessage() {} +func (*PodDNSConfigOption) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{124} +} +func (m *PodDNSConfigOption) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodDNSConfigOption) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err - } - i += n14 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } + return nil, err } - return i, nil + return b[:n], nil +} +func (m *PodDNSConfigOption) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodDNSConfigOption.Merge(m, src) +} +func (m *PodDNSConfigOption) XXX_Size() int { + return m.Size() +} +func (m *PodDNSConfigOption) XXX_DiscardUnknown() { + xxx_messageInfo_PodDNSConfigOption.DiscardUnknown(m) } -func (m *ConfigMap) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_PodDNSConfigOption proto.InternalMessageInfo + +func (m *PodExecOptions) Reset() { *m = PodExecOptions{} } +func (*PodExecOptions) ProtoMessage() {} +func (*PodExecOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{125} +} +func (m *PodExecOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodExecOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *PodExecOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodExecOptions.Merge(m, src) +} +func (m *PodExecOptions) XXX_Size() int { + return m.Size() +} +func (m *PodExecOptions) XXX_DiscardUnknown() { + xxx_messageInfo_PodExecOptions.DiscardUnknown(m) } -func (m *ConfigMap) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n15, err := m.ObjectMeta.MarshalTo(dAtA[i:]) +var xxx_messageInfo_PodExecOptions proto.InternalMessageInfo + +func (m *PodIP) Reset() { *m = PodIP{} } +func (*PodIP) ProtoMessage() {} +func (*PodIP) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{126} +} +func (m *PodIP) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodIP) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err - } - i += n15 - if len(m.Data) > 0 { - keysForData := make([]string, 0, len(m.Data)) - for k := range m.Data { - keysForData = append(keysForData, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForData) - for _, k := range keysForData { - dAtA[i] = 0x12 - i++ - v := m.Data[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } - } - if len(m.BinaryData) > 0 { - keysForBinaryData := make([]string, 0, len(m.BinaryData)) - for k := range m.BinaryData { - keysForBinaryData = append(keysForBinaryData, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForBinaryData) - for _, k := range keysForBinaryData { - dAtA[i] = 0x1a - i++ - v := m.BinaryData[string(k)] - byteSize := 0 - if v != nil { - byteSize = 1 + len(v) + sovGenerated(uint64(len(v))) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + byteSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - if v != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } - } + return nil, err } - return i, nil + return b[:n], nil +} +func (m *PodIP) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodIP.Merge(m, src) +} +func (m *PodIP) XXX_Size() int { + return m.Size() +} +func (m *PodIP) XXX_DiscardUnknown() { + xxx_messageInfo_PodIP.DiscardUnknown(m) } -func (m *ConfigMapEnvSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_PodIP proto.InternalMessageInfo + +func (m *PodList) Reset() { *m = PodList{} } +func (*PodList) ProtoMessage() {} +func (*PodList) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{127} +} +func (m *PodList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *PodList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodList.Merge(m, src) +} +func (m *PodList) XXX_Size() int { + return m.Size() +} +func (m *PodList) XXX_DiscardUnknown() { + xxx_messageInfo_PodList.DiscardUnknown(m) } -func (m *ConfigMapEnvSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size())) - n16, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) +var xxx_messageInfo_PodList proto.InternalMessageInfo + +func (m *PodLogOptions) Reset() { *m = PodLogOptions{} } +func (*PodLogOptions) ProtoMessage() {} +func (*PodLogOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{128} +} +func (m *PodLogOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodLogOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err - } - i += n16 - if m.Optional != nil { - dAtA[i] = 0x10 - i++ - if *m.Optional { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ + return nil, err } - return i, nil + return b[:n], nil +} +func (m *PodLogOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodLogOptions.Merge(m, src) +} +func (m *PodLogOptions) XXX_Size() int { + return m.Size() +} +func (m *PodLogOptions) XXX_DiscardUnknown() { + xxx_messageInfo_PodLogOptions.DiscardUnknown(m) } -func (m *ConfigMapKeySelector) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_PodLogOptions proto.InternalMessageInfo + +func (m *PodPortForwardOptions) Reset() { *m = PodPortForwardOptions{} } +func (*PodPortForwardOptions) ProtoMessage() {} +func (*PodPortForwardOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{129} +} +func (m *PodPortForwardOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodPortForwardOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *PodPortForwardOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodPortForwardOptions.Merge(m, src) +} +func (m *PodPortForwardOptions) XXX_Size() int { + return m.Size() +} +func (m *PodPortForwardOptions) XXX_DiscardUnknown() { + xxx_messageInfo_PodPortForwardOptions.DiscardUnknown(m) } -func (m *ConfigMapKeySelector) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size())) - n17, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) +var xxx_messageInfo_PodPortForwardOptions proto.InternalMessageInfo + +func (m *PodProxyOptions) Reset() { *m = PodProxyOptions{} } +func (*PodProxyOptions) ProtoMessage() {} +func (*PodProxyOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{130} +} +func (m *PodProxyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodProxyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err - } - i += n17 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) - i += copy(dAtA[i:], m.Key) - if m.Optional != nil { - dAtA[i] = 0x18 - i++ - if *m.Optional { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ + return nil, err } - return i, nil + return b[:n], nil +} +func (m *PodProxyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodProxyOptions.Merge(m, src) +} +func (m *PodProxyOptions) XXX_Size() int { + return m.Size() +} +func (m *PodProxyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_PodProxyOptions.DiscardUnknown(m) } -func (m *ConfigMapList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_PodProxyOptions proto.InternalMessageInfo + +func (m *PodReadinessGate) Reset() { *m = PodReadinessGate{} } +func (*PodReadinessGate) ProtoMessage() {} +func (*PodReadinessGate) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{131} +} +func (m *PodReadinessGate) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodReadinessGate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *PodReadinessGate) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodReadinessGate.Merge(m, src) +} +func (m *PodReadinessGate) XXX_Size() int { + return m.Size() +} +func (m *PodReadinessGate) XXX_DiscardUnknown() { + xxx_messageInfo_PodReadinessGate.DiscardUnknown(m) } -func (m *ConfigMapList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n18, err := m.ListMeta.MarshalTo(dAtA[i:]) +var xxx_messageInfo_PodReadinessGate proto.InternalMessageInfo + +func (m *PodSecurityContext) Reset() { *m = PodSecurityContext{} } +func (*PodSecurityContext) ProtoMessage() {} +func (*PodSecurityContext) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{132} +} +func (m *PodSecurityContext) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodSecurityContext) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err - } - i += n18 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } + return nil, err } - return i, nil + return b[:n], nil +} +func (m *PodSecurityContext) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSecurityContext.Merge(m, src) +} +func (m *PodSecurityContext) XXX_Size() int { + return m.Size() +} +func (m *PodSecurityContext) XXX_DiscardUnknown() { + xxx_messageInfo_PodSecurityContext.DiscardUnknown(m) } -func (m *ConfigMapNodeConfigSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_PodSecurityContext proto.InternalMessageInfo + +func (m *PodSignature) Reset() { *m = PodSignature{} } +func (*PodSignature) ProtoMessage() {} +func (*PodSignature) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{133} +} +func (m *PodSignature) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodSignature) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *ConfigMapNodeConfigSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i += copy(dAtA[i:], m.Namespace) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) - i += copy(dAtA[i:], m.UID) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) - i += copy(dAtA[i:], m.ResourceVersion) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.KubeletConfigKey))) - i += copy(dAtA[i:], m.KubeletConfigKey) - return i, nil +func (m *PodSignature) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSignature.Merge(m, src) +} +func (m *PodSignature) XXX_Size() int { + return m.Size() +} +func (m *PodSignature) XXX_DiscardUnknown() { + xxx_messageInfo_PodSignature.DiscardUnknown(m) } -func (m *ConfigMapProjection) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_PodSignature proto.InternalMessageInfo + +func (m *PodSpec) Reset() { *m = PodSpec{} } +func (*PodSpec) ProtoMessage() {} +func (*PodSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{134} +} +func (m *PodSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *PodSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSpec.Merge(m, src) +} +func (m *PodSpec) XXX_Size() int { + return m.Size() +} +func (m *PodSpec) XXX_DiscardUnknown() { + xxx_messageInfo_PodSpec.DiscardUnknown(m) } -func (m *ConfigMapProjection) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size())) - n19, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) +var xxx_messageInfo_PodSpec proto.InternalMessageInfo + +func (m *PodStatus) Reset() { *m = PodStatus{} } +func (*PodStatus) ProtoMessage() {} +func (*PodStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{135} +} +func (m *PodStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err - } - i += n19 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.Optional != nil { - dAtA[i] = 0x20 - i++ - if *m.Optional { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ + return nil, err } - return i, nil + return b[:n], nil +} +func (m *PodStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodStatus.Merge(m, src) +} +func (m *PodStatus) XXX_Size() int { + return m.Size() +} +func (m *PodStatus) XXX_DiscardUnknown() { + xxx_messageInfo_PodStatus.DiscardUnknown(m) } -func (m *ConfigMapVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_PodStatus proto.InternalMessageInfo + +func (m *PodStatusResult) Reset() { *m = PodStatusResult{} } +func (*PodStatusResult) ProtoMessage() {} +func (*PodStatusResult) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{136} +} +func (m *PodStatusResult) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodStatusResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *PodStatusResult) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodStatusResult.Merge(m, src) +} +func (m *PodStatusResult) XXX_Size() int { + return m.Size() +} +func (m *PodStatusResult) XXX_DiscardUnknown() { + xxx_messageInfo_PodStatusResult.DiscardUnknown(m) } -func (m *ConfigMapVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size())) - n20, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) +var xxx_messageInfo_PodStatusResult proto.InternalMessageInfo + +func (m *PodTemplate) Reset() { *m = PodTemplate{} } +func (*PodTemplate) ProtoMessage() {} +func (*PodTemplate) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{137} +} +func (m *PodTemplate) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodTemplate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err - } - i += n20 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.DefaultMode != nil { - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.DefaultMode)) - } - if m.Optional != nil { - dAtA[i] = 0x20 - i++ - if *m.Optional { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ + return nil, err } - return i, nil + return b[:n], nil +} +func (m *PodTemplate) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodTemplate.Merge(m, src) +} +func (m *PodTemplate) XXX_Size() int { + return m.Size() +} +func (m *PodTemplate) XXX_DiscardUnknown() { + xxx_messageInfo_PodTemplate.DiscardUnknown(m) } -func (m *Container) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_PodTemplate proto.InternalMessageInfo + +func (m *PodTemplateList) Reset() { *m = PodTemplateList{} } +func (*PodTemplateList) ProtoMessage() {} +func (*PodTemplateList) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{138} +} +func (m *PodTemplateList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodTemplateList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *PodTemplateList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodTemplateList.Merge(m, src) +} +func (m *PodTemplateList) XXX_Size() int { + return m.Size() +} +func (m *PodTemplateList) XXX_DiscardUnknown() { + xxx_messageInfo_PodTemplateList.DiscardUnknown(m) } -func (m *Container) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Image))) - i += copy(dAtA[i:], m.Image) - if len(m.Command) > 0 { - for _, s := range m.Command { - dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if len(m.Args) > 0 { - for _, s := range m.Args { - dAtA[i] = 0x22 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.WorkingDir))) - i += copy(dAtA[i:], m.WorkingDir) - if len(m.Ports) > 0 { - for _, msg := range m.Ports { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.Env) > 0 { - for _, msg := range m.Env { - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Resources.Size())) - n21, err := m.Resources.MarshalTo(dAtA[i:]) +var xxx_messageInfo_PodTemplateList proto.InternalMessageInfo + +func (m *PodTemplateSpec) Reset() { *m = PodTemplateSpec{} } +func (*PodTemplateSpec) ProtoMessage() {} +func (*PodTemplateSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{139} +} +func (m *PodTemplateSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodTemplateSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err - } - i += n21 - if len(m.VolumeMounts) > 0 { - for _, msg := range m.VolumeMounts { - dAtA[i] = 0x4a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.LivenessProbe != nil { - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LivenessProbe.Size())) - n22, err := m.LivenessProbe.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n22 - } - if m.ReadinessProbe != nil { - dAtA[i] = 0x5a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ReadinessProbe.Size())) - n23, err := m.ReadinessProbe.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n23 - } - if m.Lifecycle != nil { - dAtA[i] = 0x62 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Lifecycle.Size())) - n24, err := m.Lifecycle.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n24 - } - dAtA[i] = 0x6a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.TerminationMessagePath))) - i += copy(dAtA[i:], m.TerminationMessagePath) - dAtA[i] = 0x72 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ImagePullPolicy))) - i += copy(dAtA[i:], m.ImagePullPolicy) - if m.SecurityContext != nil { - dAtA[i] = 0x7a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SecurityContext.Size())) - n25, err := m.SecurityContext.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n25 - } - dAtA[i] = 0x80 - i++ - dAtA[i] = 0x1 - i++ - if m.Stdin { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x88 - i++ - dAtA[i] = 0x1 - i++ - if m.StdinOnce { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x90 - i++ - dAtA[i] = 0x1 - i++ - if m.TTY { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if len(m.EnvFrom) > 0 { - for _, msg := range m.EnvFrom { - dAtA[i] = 0x9a - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - dAtA[i] = 0xa2 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.TerminationMessagePolicy))) - i += copy(dAtA[i:], m.TerminationMessagePolicy) - if len(m.VolumeDevices) > 0 { - for _, msg := range m.VolumeDevices { - dAtA[i] = 0xaa - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } + return nil, err } - return i, nil + return b[:n], nil +} +func (m *PodTemplateSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodTemplateSpec.Merge(m, src) +} +func (m *PodTemplateSpec) XXX_Size() int { + return m.Size() +} +func (m *PodTemplateSpec) XXX_DiscardUnknown() { + xxx_messageInfo_PodTemplateSpec.DiscardUnknown(m) } -func (m *ContainerImage) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_PodTemplateSpec proto.InternalMessageInfo + +func (m *PortworxVolumeSource) Reset() { *m = PortworxVolumeSource{} } +func (*PortworxVolumeSource) ProtoMessage() {} +func (*PortworxVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{140} +} +func (m *PortworxVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PortworxVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *ContainerImage) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Names) > 0 { - for _, s := range m.Names { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SizeBytes)) - return i, nil +func (m *PortworxVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_PortworxVolumeSource.Merge(m, src) +} +func (m *PortworxVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *PortworxVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_PortworxVolumeSource.DiscardUnknown(m) } -func (m *ContainerPort) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_PortworxVolumeSource proto.InternalMessageInfo + +func (m *Preconditions) Reset() { *m = Preconditions{} } +func (*Preconditions) ProtoMessage() {} +func (*Preconditions) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{141} +} +func (m *Preconditions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Preconditions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *ContainerPort) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.HostPort)) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ContainerPort)) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Protocol))) - i += copy(dAtA[i:], m.Protocol) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.HostIP))) - i += copy(dAtA[i:], m.HostIP) - return i, nil +func (m *Preconditions) XXX_Merge(src proto.Message) { + xxx_messageInfo_Preconditions.Merge(m, src) +} +func (m *Preconditions) XXX_Size() int { + return m.Size() +} +func (m *Preconditions) XXX_DiscardUnknown() { + xxx_messageInfo_Preconditions.DiscardUnknown(m) } -func (m *ContainerState) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_Preconditions proto.InternalMessageInfo + +func (m *PreferAvoidPodsEntry) Reset() { *m = PreferAvoidPodsEntry{} } +func (*PreferAvoidPodsEntry) ProtoMessage() {} +func (*PreferAvoidPodsEntry) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{142} +} +func (m *PreferAvoidPodsEntry) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PreferAvoidPodsEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *PreferAvoidPodsEntry) XXX_Merge(src proto.Message) { + xxx_messageInfo_PreferAvoidPodsEntry.Merge(m, src) +} +func (m *PreferAvoidPodsEntry) XXX_Size() int { + return m.Size() +} +func (m *PreferAvoidPodsEntry) XXX_DiscardUnknown() { + xxx_messageInfo_PreferAvoidPodsEntry.DiscardUnknown(m) } -func (m *ContainerState) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Waiting != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Waiting.Size())) - n26, err := m.Waiting.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n26 - } - if m.Running != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Running.Size())) - n27, err := m.Running.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n27 - } - if m.Terminated != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Terminated.Size())) - n28, err := m.Terminated.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n28 +var xxx_messageInfo_PreferAvoidPodsEntry proto.InternalMessageInfo + +func (m *PreferredSchedulingTerm) Reset() { *m = PreferredSchedulingTerm{} } +func (*PreferredSchedulingTerm) ProtoMessage() {} +func (*PreferredSchedulingTerm) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{143} +} +func (m *PreferredSchedulingTerm) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PreferredSchedulingTerm) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } - return i, nil + return b[:n], nil +} +func (m *PreferredSchedulingTerm) XXX_Merge(src proto.Message) { + xxx_messageInfo_PreferredSchedulingTerm.Merge(m, src) +} +func (m *PreferredSchedulingTerm) XXX_Size() int { + return m.Size() +} +func (m *PreferredSchedulingTerm) XXX_DiscardUnknown() { + xxx_messageInfo_PreferredSchedulingTerm.DiscardUnknown(m) } -func (m *ContainerStateRunning) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_PreferredSchedulingTerm proto.InternalMessageInfo + +func (m *Probe) Reset() { *m = Probe{} } +func (*Probe) ProtoMessage() {} +func (*Probe) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{144} +} +func (m *Probe) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Probe) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *Probe) XXX_Merge(src proto.Message) { + xxx_messageInfo_Probe.Merge(m, src) +} +func (m *Probe) XXX_Size() int { + return m.Size() +} +func (m *Probe) XXX_DiscardUnknown() { + xxx_messageInfo_Probe.DiscardUnknown(m) } -func (m *ContainerStateRunning) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.StartedAt.Size())) - n29, err := m.StartedAt.MarshalTo(dAtA[i:]) +var xxx_messageInfo_Probe proto.InternalMessageInfo + +func (m *ProjectedVolumeSource) Reset() { *m = ProjectedVolumeSource{} } +func (*ProjectedVolumeSource) ProtoMessage() {} +func (*ProjectedVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{145} +} +func (m *ProjectedVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ProjectedVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err + return nil, err } - i += n29 - return i, nil + return b[:n], nil +} +func (m *ProjectedVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProjectedVolumeSource.Merge(m, src) +} +func (m *ProjectedVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *ProjectedVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_ProjectedVolumeSource.DiscardUnknown(m) } -func (m *ContainerStateTerminated) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_ProjectedVolumeSource proto.InternalMessageInfo + +func (m *QuobyteVolumeSource) Reset() { *m = QuobyteVolumeSource{} } +func (*QuobyteVolumeSource) ProtoMessage() {} +func (*QuobyteVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{146} +} +func (m *QuobyteVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QuobyteVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *QuobyteVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_QuobyteVolumeSource.Merge(m, src) +} +func (m *QuobyteVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *QuobyteVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_QuobyteVolumeSource.DiscardUnknown(m) } -func (m *ContainerStateTerminated) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ExitCode)) - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Signal)) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.StartedAt.Size())) - n30, err := m.StartedAt.MarshalTo(dAtA[i:]) +var xxx_messageInfo_QuobyteVolumeSource proto.InternalMessageInfo + +func (m *RBDPersistentVolumeSource) Reset() { *m = RBDPersistentVolumeSource{} } +func (*RBDPersistentVolumeSource) ProtoMessage() {} +func (*RBDPersistentVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{147} +} +func (m *RBDPersistentVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RBDPersistentVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err + return nil, err } - i += n30 - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.FinishedAt.Size())) - n31, err := m.FinishedAt.MarshalTo(dAtA[i:]) + return b[:n], nil +} +func (m *RBDPersistentVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_RBDPersistentVolumeSource.Merge(m, src) +} +func (m *RBDPersistentVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *RBDPersistentVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_RBDPersistentVolumeSource.DiscardUnknown(m) +} + +var xxx_messageInfo_RBDPersistentVolumeSource proto.InternalMessageInfo + +func (m *RBDVolumeSource) Reset() { *m = RBDVolumeSource{} } +func (*RBDVolumeSource) ProtoMessage() {} +func (*RBDVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{148} +} +func (m *RBDVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RBDVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err + return nil, err } - i += n31 - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContainerID))) - i += copy(dAtA[i:], m.ContainerID) - return i, nil + return b[:n], nil +} +func (m *RBDVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_RBDVolumeSource.Merge(m, src) +} +func (m *RBDVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *RBDVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_RBDVolumeSource.DiscardUnknown(m) } -func (m *ContainerStateWaiting) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_RBDVolumeSource proto.InternalMessageInfo + +func (m *RangeAllocation) Reset() { *m = RangeAllocation{} } +func (*RangeAllocation) ProtoMessage() {} +func (*RangeAllocation) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{149} +} +func (m *RangeAllocation) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RangeAllocation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *ContainerStateWaiting) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil +func (m *RangeAllocation) XXX_Merge(src proto.Message) { + xxx_messageInfo_RangeAllocation.Merge(m, src) +} +func (m *RangeAllocation) XXX_Size() int { + return m.Size() +} +func (m *RangeAllocation) XXX_DiscardUnknown() { + xxx_messageInfo_RangeAllocation.DiscardUnknown(m) } -func (m *ContainerStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_RangeAllocation proto.InternalMessageInfo + +func (m *ReplicationController) Reset() { *m = ReplicationController{} } +func (*ReplicationController) ProtoMessage() {} +func (*ReplicationController) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{150} +} +func (m *ReplicationController) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicationController) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *ReplicationController) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicationController.Merge(m, src) +} +func (m *ReplicationController) XXX_Size() int { + return m.Size() +} +func (m *ReplicationController) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicationController.DiscardUnknown(m) } -func (m *ContainerStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.State.Size())) - n32, err := m.State.MarshalTo(dAtA[i:]) +var xxx_messageInfo_ReplicationController proto.InternalMessageInfo + +func (m *ReplicationControllerCondition) Reset() { *m = ReplicationControllerCondition{} } +func (*ReplicationControllerCondition) ProtoMessage() {} +func (*ReplicationControllerCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{151} +} +func (m *ReplicationControllerCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicationControllerCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err + return nil, err } - i += n32 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTerminationState.Size())) - n33, err := m.LastTerminationState.MarshalTo(dAtA[i:]) + return b[:n], nil +} +func (m *ReplicationControllerCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicationControllerCondition.Merge(m, src) +} +func (m *ReplicationControllerCondition) XXX_Size() int { + return m.Size() +} +func (m *ReplicationControllerCondition) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicationControllerCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_ReplicationControllerCondition proto.InternalMessageInfo + +func (m *ReplicationControllerList) Reset() { *m = ReplicationControllerList{} } +func (*ReplicationControllerList) ProtoMessage() {} +func (*ReplicationControllerList) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{152} +} +func (m *ReplicationControllerList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicationControllerList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err - } - i += n33 - dAtA[i] = 0x20 - i++ - if m.Ready { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + return nil, err } - i++ - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RestartCount)) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Image))) - i += copy(dAtA[i:], m.Image) - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ImageID))) - i += copy(dAtA[i:], m.ImageID) - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContainerID))) - i += copy(dAtA[i:], m.ContainerID) - return i, nil + return b[:n], nil +} +func (m *ReplicationControllerList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicationControllerList.Merge(m, src) +} +func (m *ReplicationControllerList) XXX_Size() int { + return m.Size() +} +func (m *ReplicationControllerList) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicationControllerList.DiscardUnknown(m) } -func (m *DaemonEndpoint) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_ReplicationControllerList proto.InternalMessageInfo + +func (m *ReplicationControllerSpec) Reset() { *m = ReplicationControllerSpec{} } +func (*ReplicationControllerSpec) ProtoMessage() {} +func (*ReplicationControllerSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{153} +} +func (m *ReplicationControllerSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicationControllerSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *DaemonEndpoint) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Port)) - return i, nil +func (m *ReplicationControllerSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicationControllerSpec.Merge(m, src) +} +func (m *ReplicationControllerSpec) XXX_Size() int { + return m.Size() +} +func (m *ReplicationControllerSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicationControllerSpec.DiscardUnknown(m) } -func (m *DownwardAPIProjection) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_ReplicationControllerSpec proto.InternalMessageInfo + +func (m *ReplicationControllerStatus) Reset() { *m = ReplicationControllerStatus{} } +func (*ReplicationControllerStatus) ProtoMessage() {} +func (*ReplicationControllerStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{154} +} +func (m *ReplicationControllerStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicationControllerStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *ReplicationControllerStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicationControllerStatus.Merge(m, src) +} +func (m *ReplicationControllerStatus) XXX_Size() int { + return m.Size() +} +func (m *ReplicationControllerStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicationControllerStatus.DiscardUnknown(m) } -func (m *DownwardAPIProjection) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } +var xxx_messageInfo_ReplicationControllerStatus proto.InternalMessageInfo + +func (m *ResourceFieldSelector) Reset() { *m = ResourceFieldSelector{} } +func (*ResourceFieldSelector) ProtoMessage() {} +func (*ResourceFieldSelector) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{155} +} +func (m *ResourceFieldSelector) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceFieldSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } - return i, nil + return b[:n], nil +} +func (m *ResourceFieldSelector) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceFieldSelector.Merge(m, src) +} +func (m *ResourceFieldSelector) XXX_Size() int { + return m.Size() +} +func (m *ResourceFieldSelector) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceFieldSelector.DiscardUnknown(m) } -func (m *DownwardAPIVolumeFile) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_ResourceFieldSelector proto.InternalMessageInfo + +func (m *ResourceQuota) Reset() { *m = ResourceQuota{} } +func (*ResourceQuota) ProtoMessage() {} +func (*ResourceQuota) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{156} +} +func (m *ResourceQuota) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceQuota) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *ResourceQuota) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceQuota.Merge(m, src) +} +func (m *ResourceQuota) XXX_Size() int { + return m.Size() +} +func (m *ResourceQuota) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceQuota.DiscardUnknown(m) } -func (m *DownwardAPIVolumeFile) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i += copy(dAtA[i:], m.Path) - if m.FieldRef != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.FieldRef.Size())) - n34, err := m.FieldRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n34 - } - if m.ResourceFieldRef != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ResourceFieldRef.Size())) - n35, err := m.ResourceFieldRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n35 - } - if m.Mode != nil { - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Mode)) +var xxx_messageInfo_ResourceQuota proto.InternalMessageInfo + +func (m *ResourceQuotaList) Reset() { *m = ResourceQuotaList{} } +func (*ResourceQuotaList) ProtoMessage() {} +func (*ResourceQuotaList) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{157} +} +func (m *ResourceQuotaList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceQuotaList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } - return i, nil + return b[:n], nil +} +func (m *ResourceQuotaList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceQuotaList.Merge(m, src) +} +func (m *ResourceQuotaList) XXX_Size() int { + return m.Size() +} +func (m *ResourceQuotaList) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceQuotaList.DiscardUnknown(m) } -func (m *DownwardAPIVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_ResourceQuotaList proto.InternalMessageInfo + +func (m *ResourceQuotaSpec) Reset() { *m = ResourceQuotaSpec{} } +func (*ResourceQuotaSpec) ProtoMessage() {} +func (*ResourceQuotaSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{158} +} +func (m *ResourceQuotaSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceQuotaSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *ResourceQuotaSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceQuotaSpec.Merge(m, src) +} +func (m *ResourceQuotaSpec) XXX_Size() int { + return m.Size() +} +func (m *ResourceQuotaSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceQuotaSpec.DiscardUnknown(m) } -func (m *DownwardAPIVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.DefaultMode != nil { - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.DefaultMode)) +var xxx_messageInfo_ResourceQuotaSpec proto.InternalMessageInfo + +func (m *ResourceQuotaStatus) Reset() { *m = ResourceQuotaStatus{} } +func (*ResourceQuotaStatus) ProtoMessage() {} +func (*ResourceQuotaStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{159} +} +func (m *ResourceQuotaStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceQuotaStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } - return i, nil + return b[:n], nil +} +func (m *ResourceQuotaStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceQuotaStatus.Merge(m, src) +} +func (m *ResourceQuotaStatus) XXX_Size() int { + return m.Size() +} +func (m *ResourceQuotaStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceQuotaStatus.DiscardUnknown(m) } -func (m *EmptyDirVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_ResourceQuotaStatus proto.InternalMessageInfo + +func (m *ResourceRequirements) Reset() { *m = ResourceRequirements{} } +func (*ResourceRequirements) ProtoMessage() {} +func (*ResourceRequirements) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{160} +} +func (m *ResourceRequirements) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceRequirements) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *ResourceRequirements) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceRequirements.Merge(m, src) +} +func (m *ResourceRequirements) XXX_Size() int { + return m.Size() +} +func (m *ResourceRequirements) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceRequirements.DiscardUnknown(m) } -func (m *EmptyDirVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Medium))) - i += copy(dAtA[i:], m.Medium) - if m.SizeLimit != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SizeLimit.Size())) - n36, err := m.SizeLimit.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n36 +var xxx_messageInfo_ResourceRequirements proto.InternalMessageInfo + +func (m *SELinuxOptions) Reset() { *m = SELinuxOptions{} } +func (*SELinuxOptions) ProtoMessage() {} +func (*SELinuxOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{161} +} +func (m *SELinuxOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SELinuxOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } - return i, nil + return b[:n], nil +} +func (m *SELinuxOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_SELinuxOptions.Merge(m, src) +} +func (m *SELinuxOptions) XXX_Size() int { + return m.Size() +} +func (m *SELinuxOptions) XXX_DiscardUnknown() { + xxx_messageInfo_SELinuxOptions.DiscardUnknown(m) } -func (m *EndpointAddress) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_SELinuxOptions proto.InternalMessageInfo + +func (m *ScaleIOPersistentVolumeSource) Reset() { *m = ScaleIOPersistentVolumeSource{} } +func (*ScaleIOPersistentVolumeSource) ProtoMessage() {} +func (*ScaleIOPersistentVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{162} +} +func (m *ScaleIOPersistentVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ScaleIOPersistentVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *ScaleIOPersistentVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ScaleIOPersistentVolumeSource.Merge(m, src) +} +func (m *ScaleIOPersistentVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *ScaleIOPersistentVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_ScaleIOPersistentVolumeSource.DiscardUnknown(m) } -func (m *EndpointAddress) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.IP))) - i += copy(dAtA[i:], m.IP) - if m.TargetRef != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.TargetRef.Size())) - n37, err := m.TargetRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n37 - } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Hostname))) - i += copy(dAtA[i:], m.Hostname) - if m.NodeName != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.NodeName))) - i += copy(dAtA[i:], *m.NodeName) - } - return i, nil -} +var xxx_messageInfo_ScaleIOPersistentVolumeSource proto.InternalMessageInfo -func (m *EndpointPort) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +func (m *ScaleIOVolumeSource) Reset() { *m = ScaleIOVolumeSource{} } +func (*ScaleIOVolumeSource) ProtoMessage() {} +func (*ScaleIOVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{163} +} +func (m *ScaleIOVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ScaleIOVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *EndpointPort) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Port)) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Protocol))) - i += copy(dAtA[i:], m.Protocol) - return i, nil +func (m *ScaleIOVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ScaleIOVolumeSource.Merge(m, src) +} +func (m *ScaleIOVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *ScaleIOVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_ScaleIOVolumeSource.DiscardUnknown(m) } -func (m *EndpointSubset) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_ScaleIOVolumeSource proto.InternalMessageInfo + +func (m *ScopeSelector) Reset() { *m = ScopeSelector{} } +func (*ScopeSelector) ProtoMessage() {} +func (*ScopeSelector) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{164} +} +func (m *ScopeSelector) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ScopeSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *ScopeSelector) XXX_Merge(src proto.Message) { + xxx_messageInfo_ScopeSelector.Merge(m, src) +} +func (m *ScopeSelector) XXX_Size() int { + return m.Size() +} +func (m *ScopeSelector) XXX_DiscardUnknown() { + xxx_messageInfo_ScopeSelector.DiscardUnknown(m) } -func (m *EndpointSubset) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Addresses) > 0 { - for _, msg := range m.Addresses { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.NotReadyAddresses) > 0 { - for _, msg := range m.NotReadyAddresses { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.Ports) > 0 { - for _, msg := range m.Ports { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } +var xxx_messageInfo_ScopeSelector proto.InternalMessageInfo + +func (m *ScopedResourceSelectorRequirement) Reset() { *m = ScopedResourceSelectorRequirement{} } +func (*ScopedResourceSelectorRequirement) ProtoMessage() {} +func (*ScopedResourceSelectorRequirement) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{165} +} +func (m *ScopedResourceSelectorRequirement) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ScopedResourceSelectorRequirement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } - return i, nil + return b[:n], nil +} +func (m *ScopedResourceSelectorRequirement) XXX_Merge(src proto.Message) { + xxx_messageInfo_ScopedResourceSelectorRequirement.Merge(m, src) +} +func (m *ScopedResourceSelectorRequirement) XXX_Size() int { + return m.Size() +} +func (m *ScopedResourceSelectorRequirement) XXX_DiscardUnknown() { + xxx_messageInfo_ScopedResourceSelectorRequirement.DiscardUnknown(m) } -func (m *Endpoints) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_ScopedResourceSelectorRequirement proto.InternalMessageInfo + +func (m *Secret) Reset() { *m = Secret{} } +func (*Secret) ProtoMessage() {} +func (*Secret) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{166} +} +func (m *Secret) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Secret) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *Secret) XXX_Merge(src proto.Message) { + xxx_messageInfo_Secret.Merge(m, src) +} +func (m *Secret) XXX_Size() int { + return m.Size() +} +func (m *Secret) XXX_DiscardUnknown() { + xxx_messageInfo_Secret.DiscardUnknown(m) } -func (m *Endpoints) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n38, err := m.ObjectMeta.MarshalTo(dAtA[i:]) +var xxx_messageInfo_Secret proto.InternalMessageInfo + +func (m *SecretEnvSource) Reset() { *m = SecretEnvSource{} } +func (*SecretEnvSource) ProtoMessage() {} +func (*SecretEnvSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{167} +} +func (m *SecretEnvSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SecretEnvSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err - } - i += n38 - if len(m.Subsets) > 0 { - for _, msg := range m.Subsets { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } + return nil, err } - return i, nil + return b[:n], nil +} +func (m *SecretEnvSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_SecretEnvSource.Merge(m, src) +} +func (m *SecretEnvSource) XXX_Size() int { + return m.Size() +} +func (m *SecretEnvSource) XXX_DiscardUnknown() { + xxx_messageInfo_SecretEnvSource.DiscardUnknown(m) } -func (m *EndpointsList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_SecretEnvSource proto.InternalMessageInfo + +func (m *SecretKeySelector) Reset() { *m = SecretKeySelector{} } +func (*SecretKeySelector) ProtoMessage() {} +func (*SecretKeySelector) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{168} +} +func (m *SecretKeySelector) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SecretKeySelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *SecretKeySelector) XXX_Merge(src proto.Message) { + xxx_messageInfo_SecretKeySelector.Merge(m, src) +} +func (m *SecretKeySelector) XXX_Size() int { + return m.Size() +} +func (m *SecretKeySelector) XXX_DiscardUnknown() { + xxx_messageInfo_SecretKeySelector.DiscardUnknown(m) } -func (m *EndpointsList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n39, err := m.ListMeta.MarshalTo(dAtA[i:]) +var xxx_messageInfo_SecretKeySelector proto.InternalMessageInfo + +func (m *SecretList) Reset() { *m = SecretList{} } +func (*SecretList) ProtoMessage() {} +func (*SecretList) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{169} +} +func (m *SecretList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SecretList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err - } - i += n39 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } + return nil, err } - return i, nil + return b[:n], nil +} +func (m *SecretList) XXX_Merge(src proto.Message) { + xxx_messageInfo_SecretList.Merge(m, src) +} +func (m *SecretList) XXX_Size() int { + return m.Size() +} +func (m *SecretList) XXX_DiscardUnknown() { + xxx_messageInfo_SecretList.DiscardUnknown(m) } -func (m *EnvFromSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_SecretList proto.InternalMessageInfo + +func (m *SecretProjection) Reset() { *m = SecretProjection{} } +func (*SecretProjection) ProtoMessage() {} +func (*SecretProjection) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{170} +} +func (m *SecretProjection) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SecretProjection) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *SecretProjection) XXX_Merge(src proto.Message) { + xxx_messageInfo_SecretProjection.Merge(m, src) +} +func (m *SecretProjection) XXX_Size() int { + return m.Size() +} +func (m *SecretProjection) XXX_DiscardUnknown() { + xxx_messageInfo_SecretProjection.DiscardUnknown(m) } -func (m *EnvFromSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Prefix))) - i += copy(dAtA[i:], m.Prefix) - if m.ConfigMapRef != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigMapRef.Size())) - n40, err := m.ConfigMapRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n40 - } - if m.SecretRef != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n41, err := m.SecretRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n41 +var xxx_messageInfo_SecretProjection proto.InternalMessageInfo + +func (m *SecretReference) Reset() { *m = SecretReference{} } +func (*SecretReference) ProtoMessage() {} +func (*SecretReference) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{171} +} +func (m *SecretReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SecretReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } - return i, nil + return b[:n], nil +} +func (m *SecretReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_SecretReference.Merge(m, src) +} +func (m *SecretReference) XXX_Size() int { + return m.Size() +} +func (m *SecretReference) XXX_DiscardUnknown() { + xxx_messageInfo_SecretReference.DiscardUnknown(m) } -func (m *EnvVar) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_SecretReference proto.InternalMessageInfo + +func (m *SecretVolumeSource) Reset() { *m = SecretVolumeSource{} } +func (*SecretVolumeSource) ProtoMessage() {} +func (*SecretVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{172} +} +func (m *SecretVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SecretVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *SecretVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_SecretVolumeSource.Merge(m, src) +} +func (m *SecretVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *SecretVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_SecretVolumeSource.DiscardUnknown(m) } -func (m *EnvVar) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) - i += copy(dAtA[i:], m.Value) - if m.ValueFrom != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ValueFrom.Size())) - n42, err := m.ValueFrom.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n42 +var xxx_messageInfo_SecretVolumeSource proto.InternalMessageInfo + +func (m *SecurityContext) Reset() { *m = SecurityContext{} } +func (*SecurityContext) ProtoMessage() {} +func (*SecurityContext) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{173} +} +func (m *SecurityContext) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SecurityContext) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } - return i, nil + return b[:n], nil +} +func (m *SecurityContext) XXX_Merge(src proto.Message) { + xxx_messageInfo_SecurityContext.Merge(m, src) +} +func (m *SecurityContext) XXX_Size() int { + return m.Size() +} +func (m *SecurityContext) XXX_DiscardUnknown() { + xxx_messageInfo_SecurityContext.DiscardUnknown(m) } -func (m *EnvVarSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_SecurityContext proto.InternalMessageInfo + +func (m *SerializedReference) Reset() { *m = SerializedReference{} } +func (*SerializedReference) ProtoMessage() {} +func (*SerializedReference) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{174} +} +func (m *SerializedReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SerializedReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *SerializedReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_SerializedReference.Merge(m, src) +} +func (m *SerializedReference) XXX_Size() int { + return m.Size() +} +func (m *SerializedReference) XXX_DiscardUnknown() { + xxx_messageInfo_SerializedReference.DiscardUnknown(m) } -func (m *EnvVarSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.FieldRef != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.FieldRef.Size())) - n43, err := m.FieldRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n43 - } - if m.ResourceFieldRef != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ResourceFieldRef.Size())) - n44, err := m.ResourceFieldRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n44 - } - if m.ConfigMapKeyRef != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigMapKeyRef.Size())) - n45, err := m.ConfigMapKeyRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n45 - } - if m.SecretKeyRef != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SecretKeyRef.Size())) - n46, err := m.SecretKeyRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n46 +var xxx_messageInfo_SerializedReference proto.InternalMessageInfo + +func (m *Service) Reset() { *m = Service{} } +func (*Service) ProtoMessage() {} +func (*Service) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{175} +} +func (m *Service) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Service) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } - return i, nil + return b[:n], nil +} +func (m *Service) XXX_Merge(src proto.Message) { + xxx_messageInfo_Service.Merge(m, src) +} +func (m *Service) XXX_Size() int { + return m.Size() +} +func (m *Service) XXX_DiscardUnknown() { + xxx_messageInfo_Service.DiscardUnknown(m) } -func (m *Event) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_Service proto.InternalMessageInfo + +func (m *ServiceAccount) Reset() { *m = ServiceAccount{} } +func (*ServiceAccount) ProtoMessage() {} +func (*ServiceAccount) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{176} +} +func (m *ServiceAccount) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceAccount) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *ServiceAccount) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceAccount.Merge(m, src) +} +func (m *ServiceAccount) XXX_Size() int { + return m.Size() +} +func (m *ServiceAccount) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceAccount.DiscardUnknown(m) } -func (m *Event) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n47, err := m.ObjectMeta.MarshalTo(dAtA[i:]) +var xxx_messageInfo_ServiceAccount proto.InternalMessageInfo + +func (m *ServiceAccountList) Reset() { *m = ServiceAccountList{} } +func (*ServiceAccountList) ProtoMessage() {} +func (*ServiceAccountList) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{177} +} +func (m *ServiceAccountList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceAccountList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err + return nil, err } - i += n47 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.InvolvedObject.Size())) - n48, err := m.InvolvedObject.MarshalTo(dAtA[i:]) + return b[:n], nil +} +func (m *ServiceAccountList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceAccountList.Merge(m, src) +} +func (m *ServiceAccountList) XXX_Size() int { + return m.Size() +} +func (m *ServiceAccountList) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceAccountList.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceAccountList proto.InternalMessageInfo + +func (m *ServiceAccountTokenProjection) Reset() { *m = ServiceAccountTokenProjection{} } +func (*ServiceAccountTokenProjection) ProtoMessage() {} +func (*ServiceAccountTokenProjection) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{178} +} +func (m *ServiceAccountTokenProjection) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceAccountTokenProjection) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err + return nil, err } - i += n48 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Source.Size())) - n49, err := m.Source.MarshalTo(dAtA[i:]) + return b[:n], nil +} +func (m *ServiceAccountTokenProjection) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceAccountTokenProjection.Merge(m, src) +} +func (m *ServiceAccountTokenProjection) XXX_Size() int { + return m.Size() +} +func (m *ServiceAccountTokenProjection) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceAccountTokenProjection.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceAccountTokenProjection proto.InternalMessageInfo + +func (m *ServiceList) Reset() { *m = ServiceList{} } +func (*ServiceList) ProtoMessage() {} +func (*ServiceList) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{179} +} +func (m *ServiceList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err + return nil, err } - i += n49 - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.FirstTimestamp.Size())) - n50, err := m.FirstTimestamp.MarshalTo(dAtA[i:]) + return b[:n], nil +} +func (m *ServiceList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceList.Merge(m, src) +} +func (m *ServiceList) XXX_Size() int { + return m.Size() +} +func (m *ServiceList) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceList.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceList proto.InternalMessageInfo + +func (m *ServicePort) Reset() { *m = ServicePort{} } +func (*ServicePort) ProtoMessage() {} +func (*ServicePort) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{180} +} +func (m *ServicePort) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServicePort) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err + return nil, err } - i += n50 - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTimestamp.Size())) - n51, err := m.LastTimestamp.MarshalTo(dAtA[i:]) + return b[:n], nil +} +func (m *ServicePort) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServicePort.Merge(m, src) +} +func (m *ServicePort) XXX_Size() int { + return m.Size() +} +func (m *ServicePort) XXX_DiscardUnknown() { + xxx_messageInfo_ServicePort.DiscardUnknown(m) +} + +var xxx_messageInfo_ServicePort proto.InternalMessageInfo + +func (m *ServiceProxyOptions) Reset() { *m = ServiceProxyOptions{} } +func (*ServiceProxyOptions) ProtoMessage() {} +func (*ServiceProxyOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{181} +} +func (m *ServiceProxyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceProxyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err + return nil, err } - i += n51 - dAtA[i] = 0x40 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Count)) - dAtA[i] = 0x4a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.EventTime.Size())) - n52, err := m.EventTime.MarshalTo(dAtA[i:]) + return b[:n], nil +} +func (m *ServiceProxyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceProxyOptions.Merge(m, src) +} +func (m *ServiceProxyOptions) XXX_Size() int { + return m.Size() +} +func (m *ServiceProxyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceProxyOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceProxyOptions proto.InternalMessageInfo + +func (m *ServiceSpec) Reset() { *m = ServiceSpec{} } +func (*ServiceSpec) ProtoMessage() {} +func (*ServiceSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{182} +} +func (m *ServiceSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err - } - i += n52 - if m.Series != nil { - dAtA[i] = 0x5a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Series.Size())) - n53, err := m.Series.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n53 - } - dAtA[i] = 0x62 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Action))) - i += copy(dAtA[i:], m.Action) - if m.Related != nil { - dAtA[i] = 0x6a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Related.Size())) - n54, err := m.Related.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n54 + return nil, err } - dAtA[i] = 0x72 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ReportingController))) - i += copy(dAtA[i:], m.ReportingController) - dAtA[i] = 0x7a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ReportingInstance))) - i += copy(dAtA[i:], m.ReportingInstance) - return i, nil + return b[:n], nil +} +func (m *ServiceSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceSpec.Merge(m, src) +} +func (m *ServiceSpec) XXX_Size() int { + return m.Size() +} +func (m *ServiceSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceSpec.DiscardUnknown(m) } -func (m *EventList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_ServiceSpec proto.InternalMessageInfo + +func (m *ServiceStatus) Reset() { *m = ServiceStatus{} } +func (*ServiceStatus) ProtoMessage() {} +func (*ServiceStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{183} +} +func (m *ServiceStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *ServiceStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceStatus.Merge(m, src) +} +func (m *ServiceStatus) XXX_Size() int { + return m.Size() +} +func (m *ServiceStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceStatus.DiscardUnknown(m) } -func (m *EventList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n55, err := m.ListMeta.MarshalTo(dAtA[i:]) +var xxx_messageInfo_ServiceStatus proto.InternalMessageInfo + +func (m *SessionAffinityConfig) Reset() { *m = SessionAffinityConfig{} } +func (*SessionAffinityConfig) ProtoMessage() {} +func (*SessionAffinityConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{184} +} +func (m *SessionAffinityConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SessionAffinityConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err - } - i += n55 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } + return nil, err } - return i, nil + return b[:n], nil +} +func (m *SessionAffinityConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_SessionAffinityConfig.Merge(m, src) +} +func (m *SessionAffinityConfig) XXX_Size() int { + return m.Size() +} +func (m *SessionAffinityConfig) XXX_DiscardUnknown() { + xxx_messageInfo_SessionAffinityConfig.DiscardUnknown(m) } -func (m *EventSeries) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_SessionAffinityConfig proto.InternalMessageInfo + +func (m *StorageOSPersistentVolumeSource) Reset() { *m = StorageOSPersistentVolumeSource{} } +func (*StorageOSPersistentVolumeSource) ProtoMessage() {} +func (*StorageOSPersistentVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{185} +} +func (m *StorageOSPersistentVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StorageOSPersistentVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *StorageOSPersistentVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_StorageOSPersistentVolumeSource.Merge(m, src) +} +func (m *StorageOSPersistentVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *StorageOSPersistentVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_StorageOSPersistentVolumeSource.DiscardUnknown(m) } -func (m *EventSeries) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Count)) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastObservedTime.Size())) - n56, err := m.LastObservedTime.MarshalTo(dAtA[i:]) +var xxx_messageInfo_StorageOSPersistentVolumeSource proto.InternalMessageInfo + +func (m *StorageOSVolumeSource) Reset() { *m = StorageOSVolumeSource{} } +func (*StorageOSVolumeSource) ProtoMessage() {} +func (*StorageOSVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{186} +} +func (m *StorageOSVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StorageOSVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err + return nil, err } - i += n56 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.State))) - i += copy(dAtA[i:], m.State) - return i, nil + return b[:n], nil +} +func (m *StorageOSVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_StorageOSVolumeSource.Merge(m, src) +} +func (m *StorageOSVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *StorageOSVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_StorageOSVolumeSource.DiscardUnknown(m) } -func (m *EventSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_StorageOSVolumeSource proto.InternalMessageInfo + +func (m *Sysctl) Reset() { *m = Sysctl{} } +func (*Sysctl) ProtoMessage() {} +func (*Sysctl) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{187} +} +func (m *Sysctl) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Sysctl) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *EventSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Component))) - i += copy(dAtA[i:], m.Component) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) - i += copy(dAtA[i:], m.Host) - return i, nil +func (m *Sysctl) XXX_Merge(src proto.Message) { + xxx_messageInfo_Sysctl.Merge(m, src) +} +func (m *Sysctl) XXX_Size() int { + return m.Size() +} +func (m *Sysctl) XXX_DiscardUnknown() { + xxx_messageInfo_Sysctl.DiscardUnknown(m) } -func (m *ExecAction) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_Sysctl proto.InternalMessageInfo + +func (m *TCPSocketAction) Reset() { *m = TCPSocketAction{} } +func (*TCPSocketAction) ProtoMessage() {} +func (*TCPSocketAction) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{188} +} +func (m *TCPSocketAction) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TCPSocketAction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *TCPSocketAction) XXX_Merge(src proto.Message) { + xxx_messageInfo_TCPSocketAction.Merge(m, src) +} +func (m *TCPSocketAction) XXX_Size() int { + return m.Size() +} +func (m *TCPSocketAction) XXX_DiscardUnknown() { + xxx_messageInfo_TCPSocketAction.DiscardUnknown(m) } -func (m *ExecAction) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Command) > 0 { - for _, s := range m.Command { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } +var xxx_messageInfo_TCPSocketAction proto.InternalMessageInfo + +func (m *Taint) Reset() { *m = Taint{} } +func (*Taint) ProtoMessage() {} +func (*Taint) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{189} +} +func (m *Taint) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Taint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } - return i, nil + return b[:n], nil +} +func (m *Taint) XXX_Merge(src proto.Message) { + xxx_messageInfo_Taint.Merge(m, src) +} +func (m *Taint) XXX_Size() int { + return m.Size() +} +func (m *Taint) XXX_DiscardUnknown() { + xxx_messageInfo_Taint.DiscardUnknown(m) } -func (m *FCVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_Taint proto.InternalMessageInfo + +func (m *Toleration) Reset() { *m = Toleration{} } +func (*Toleration) ProtoMessage() {} +func (*Toleration) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{190} +} +func (m *Toleration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Toleration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *Toleration) XXX_Merge(src proto.Message) { + xxx_messageInfo_Toleration.Merge(m, src) +} +func (m *Toleration) XXX_Size() int { + return m.Size() +} +func (m *Toleration) XXX_DiscardUnknown() { + xxx_messageInfo_Toleration.DiscardUnknown(m) } -func (m *FCVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.TargetWWNs) > 0 { - for _, s := range m.TargetWWNs { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if m.Lun != nil { - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Lun)) - } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i += copy(dAtA[i:], m.FSType) - dAtA[i] = 0x20 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if len(m.WWIDs) > 0 { - for _, s := range m.WWIDs { - dAtA[i] = 0x2a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } +var xxx_messageInfo_Toleration proto.InternalMessageInfo + +func (m *TopologySelectorLabelRequirement) Reset() { *m = TopologySelectorLabelRequirement{} } +func (*TopologySelectorLabelRequirement) ProtoMessage() {} +func (*TopologySelectorLabelRequirement) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{191} +} +func (m *TopologySelectorLabelRequirement) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TopologySelectorLabelRequirement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } - return i, nil + return b[:n], nil +} +func (m *TopologySelectorLabelRequirement) XXX_Merge(src proto.Message) { + xxx_messageInfo_TopologySelectorLabelRequirement.Merge(m, src) +} +func (m *TopologySelectorLabelRequirement) XXX_Size() int { + return m.Size() +} +func (m *TopologySelectorLabelRequirement) XXX_DiscardUnknown() { + xxx_messageInfo_TopologySelectorLabelRequirement.DiscardUnknown(m) } -func (m *FlexPersistentVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_TopologySelectorLabelRequirement proto.InternalMessageInfo + +func (m *TopologySelectorTerm) Reset() { *m = TopologySelectorTerm{} } +func (*TopologySelectorTerm) ProtoMessage() {} +func (*TopologySelectorTerm) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{192} +} +func (m *TopologySelectorTerm) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TopologySelectorTerm) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *TopologySelectorTerm) XXX_Merge(src proto.Message) { + xxx_messageInfo_TopologySelectorTerm.Merge(m, src) +} +func (m *TopologySelectorTerm) XXX_Size() int { + return m.Size() +} +func (m *TopologySelectorTerm) XXX_DiscardUnknown() { + xxx_messageInfo_TopologySelectorTerm.DiscardUnknown(m) } -func (m *FlexPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) - i += copy(dAtA[i:], m.Driver) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i += copy(dAtA[i:], m.FSType) - if m.SecretRef != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n57, err := m.SecretRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n57 - } - dAtA[i] = 0x20 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 +var xxx_messageInfo_TopologySelectorTerm proto.InternalMessageInfo + +func (m *TopologySpreadConstraint) Reset() { *m = TopologySpreadConstraint{} } +func (*TopologySpreadConstraint) ProtoMessage() {} +func (*TopologySpreadConstraint) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{193} +} +func (m *TopologySpreadConstraint) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TopologySpreadConstraint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } - i++ - if len(m.Options) > 0 { - keysForOptions := make([]string, 0, len(m.Options)) - for k := range m.Options { - keysForOptions = append(keysForOptions, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForOptions) - for _, k := range keysForOptions { - dAtA[i] = 0x2a - i++ - v := m.Options[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } + return b[:n], nil +} +func (m *TopologySpreadConstraint) XXX_Merge(src proto.Message) { + xxx_messageInfo_TopologySpreadConstraint.Merge(m, src) +} +func (m *TopologySpreadConstraint) XXX_Size() int { + return m.Size() +} +func (m *TopologySpreadConstraint) XXX_DiscardUnknown() { + xxx_messageInfo_TopologySpreadConstraint.DiscardUnknown(m) +} + +var xxx_messageInfo_TopologySpreadConstraint proto.InternalMessageInfo + +func (m *TypedLocalObjectReference) Reset() { *m = TypedLocalObjectReference{} } +func (*TypedLocalObjectReference) ProtoMessage() {} +func (*TypedLocalObjectReference) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{194} +} +func (m *TypedLocalObjectReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TypedLocalObjectReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } - return i, nil + return b[:n], nil +} +func (m *TypedLocalObjectReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_TypedLocalObjectReference.Merge(m, src) +} +func (m *TypedLocalObjectReference) XXX_Size() int { + return m.Size() +} +func (m *TypedLocalObjectReference) XXX_DiscardUnknown() { + xxx_messageInfo_TypedLocalObjectReference.DiscardUnknown(m) } -func (m *FlexVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_TypedLocalObjectReference proto.InternalMessageInfo + +func (m *Volume) Reset() { *m = Volume{} } +func (*Volume) ProtoMessage() {} +func (*Volume) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{195} +} +func (m *Volume) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Volume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *Volume) XXX_Merge(src proto.Message) { + xxx_messageInfo_Volume.Merge(m, src) +} +func (m *Volume) XXX_Size() int { + return m.Size() +} +func (m *Volume) XXX_DiscardUnknown() { + xxx_messageInfo_Volume.DiscardUnknown(m) } -func (m *FlexVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) - i += copy(dAtA[i:], m.Driver) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i += copy(dAtA[i:], m.FSType) - if m.SecretRef != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n58, err := m.SecretRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n58 +var xxx_messageInfo_Volume proto.InternalMessageInfo + +func (m *VolumeDevice) Reset() { *m = VolumeDevice{} } +func (*VolumeDevice) ProtoMessage() {} +func (*VolumeDevice) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{196} +} +func (m *VolumeDevice) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeDevice) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } - dAtA[i] = 0x20 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + return b[:n], nil +} +func (m *VolumeDevice) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeDevice.Merge(m, src) +} +func (m *VolumeDevice) XXX_Size() int { + return m.Size() +} +func (m *VolumeDevice) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeDevice.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeDevice proto.InternalMessageInfo + +func (m *VolumeMount) Reset() { *m = VolumeMount{} } +func (*VolumeMount) ProtoMessage() {} +func (*VolumeMount) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{197} +} +func (m *VolumeMount) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeMount) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } - i++ - if len(m.Options) > 0 { - keysForOptions := make([]string, 0, len(m.Options)) - for k := range m.Options { - keysForOptions = append(keysForOptions, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForOptions) - for _, k := range keysForOptions { - dAtA[i] = 0x2a - i++ - v := m.Options[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } + return b[:n], nil +} +func (m *VolumeMount) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeMount.Merge(m, src) +} +func (m *VolumeMount) XXX_Size() int { + return m.Size() +} +func (m *VolumeMount) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeMount.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeMount proto.InternalMessageInfo + +func (m *VolumeNodeAffinity) Reset() { *m = VolumeNodeAffinity{} } +func (*VolumeNodeAffinity) ProtoMessage() {} +func (*VolumeNodeAffinity) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{198} +} +func (m *VolumeNodeAffinity) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeNodeAffinity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } - return i, nil + return b[:n], nil +} +func (m *VolumeNodeAffinity) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeNodeAffinity.Merge(m, src) +} +func (m *VolumeNodeAffinity) XXX_Size() int { + return m.Size() +} +func (m *VolumeNodeAffinity) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeNodeAffinity.DiscardUnknown(m) } -func (m *FlockerVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_VolumeNodeAffinity proto.InternalMessageInfo + +func (m *VolumeProjection) Reset() { *m = VolumeProjection{} } +func (*VolumeProjection) ProtoMessage() {} +func (*VolumeProjection) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{199} +} +func (m *VolumeProjection) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeProjection) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *VolumeProjection) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeProjection.Merge(m, src) +} +func (m *VolumeProjection) XXX_Size() int { + return m.Size() +} +func (m *VolumeProjection) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeProjection.DiscardUnknown(m) } -func (m *FlockerVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DatasetName))) - i += copy(dAtA[i:], m.DatasetName) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DatasetUUID))) - i += copy(dAtA[i:], m.DatasetUUID) - return i, nil +var xxx_messageInfo_VolumeProjection proto.InternalMessageInfo + +func (m *VolumeSource) Reset() { *m = VolumeSource{} } +func (*VolumeSource) ProtoMessage() {} +func (*VolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{200} +} +func (m *VolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeSource.Merge(m, src) +} +func (m *VolumeSource) XXX_Size() int { + return m.Size() +} +func (m *VolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeSource.DiscardUnknown(m) } -func (m *GCEPersistentDiskVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_VolumeSource proto.InternalMessageInfo + +func (m *VsphereVirtualDiskVolumeSource) Reset() { *m = VsphereVirtualDiskVolumeSource{} } +func (*VsphereVirtualDiskVolumeSource) ProtoMessage() {} +func (*VsphereVirtualDiskVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{201} +} +func (m *VsphereVirtualDiskVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VsphereVirtualDiskVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *VsphereVirtualDiskVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_VsphereVirtualDiskVolumeSource.Merge(m, src) +} +func (m *VsphereVirtualDiskVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *VsphereVirtualDiskVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_VsphereVirtualDiskVolumeSource.DiscardUnknown(m) } -func (m *GCEPersistentDiskVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PDName))) - i += copy(dAtA[i:], m.PDName) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i += copy(dAtA[i:], m.FSType) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Partition)) - dAtA[i] = 0x20 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 +var xxx_messageInfo_VsphereVirtualDiskVolumeSource proto.InternalMessageInfo + +func (m *WeightedPodAffinityTerm) Reset() { *m = WeightedPodAffinityTerm{} } +func (*WeightedPodAffinityTerm) ProtoMessage() {} +func (*WeightedPodAffinityTerm) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{202} +} +func (m *WeightedPodAffinityTerm) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WeightedPodAffinityTerm) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } - i++ - return i, nil + return b[:n], nil +} +func (m *WeightedPodAffinityTerm) XXX_Merge(src proto.Message) { + xxx_messageInfo_WeightedPodAffinityTerm.Merge(m, src) +} +func (m *WeightedPodAffinityTerm) XXX_Size() int { + return m.Size() +} +func (m *WeightedPodAffinityTerm) XXX_DiscardUnknown() { + xxx_messageInfo_WeightedPodAffinityTerm.DiscardUnknown(m) } -func (m *GitRepoVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_WeightedPodAffinityTerm proto.InternalMessageInfo + +func (m *WindowsSecurityContextOptions) Reset() { *m = WindowsSecurityContextOptions{} } +func (*WindowsSecurityContextOptions) ProtoMessage() {} +func (*WindowsSecurityContextOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{203} +} +func (m *WindowsSecurityContextOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WindowsSecurityContextOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *WindowsSecurityContextOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_WindowsSecurityContextOptions.Merge(m, src) +} +func (m *WindowsSecurityContextOptions) XXX_Size() int { + return m.Size() +} +func (m *WindowsSecurityContextOptions) XXX_DiscardUnknown() { + xxx_messageInfo_WindowsSecurityContextOptions.DiscardUnknown(m) } -func (m *GitRepoVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Repository))) - i += copy(dAtA[i:], m.Repository) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Revision))) - i += copy(dAtA[i:], m.Revision) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Directory))) - i += copy(dAtA[i:], m.Directory) - return i, nil +var xxx_messageInfo_WindowsSecurityContextOptions proto.InternalMessageInfo + +func init() { + proto.RegisterType((*AWSElasticBlockStoreVolumeSource)(nil), "k8s.io.api.core.v1.AWSElasticBlockStoreVolumeSource") + proto.RegisterType((*Affinity)(nil), "k8s.io.api.core.v1.Affinity") + proto.RegisterType((*AttachedVolume)(nil), "k8s.io.api.core.v1.AttachedVolume") + proto.RegisterType((*AvoidPods)(nil), "k8s.io.api.core.v1.AvoidPods") + proto.RegisterType((*AzureDiskVolumeSource)(nil), "k8s.io.api.core.v1.AzureDiskVolumeSource") + proto.RegisterType((*AzureFilePersistentVolumeSource)(nil), "k8s.io.api.core.v1.AzureFilePersistentVolumeSource") + proto.RegisterType((*AzureFileVolumeSource)(nil), "k8s.io.api.core.v1.AzureFileVolumeSource") + proto.RegisterType((*Binding)(nil), "k8s.io.api.core.v1.Binding") + proto.RegisterType((*CSIPersistentVolumeSource)(nil), "k8s.io.api.core.v1.CSIPersistentVolumeSource") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.core.v1.CSIPersistentVolumeSource.VolumeAttributesEntry") + proto.RegisterType((*CSIVolumeSource)(nil), "k8s.io.api.core.v1.CSIVolumeSource") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.core.v1.CSIVolumeSource.VolumeAttributesEntry") + proto.RegisterType((*Capabilities)(nil), "k8s.io.api.core.v1.Capabilities") + proto.RegisterType((*CephFSPersistentVolumeSource)(nil), "k8s.io.api.core.v1.CephFSPersistentVolumeSource") + proto.RegisterType((*CephFSVolumeSource)(nil), "k8s.io.api.core.v1.CephFSVolumeSource") + proto.RegisterType((*CinderPersistentVolumeSource)(nil), "k8s.io.api.core.v1.CinderPersistentVolumeSource") + proto.RegisterType((*CinderVolumeSource)(nil), "k8s.io.api.core.v1.CinderVolumeSource") + proto.RegisterType((*ClientIPConfig)(nil), "k8s.io.api.core.v1.ClientIPConfig") + proto.RegisterType((*ComponentCondition)(nil), "k8s.io.api.core.v1.ComponentCondition") + proto.RegisterType((*ComponentStatus)(nil), "k8s.io.api.core.v1.ComponentStatus") + proto.RegisterType((*ComponentStatusList)(nil), "k8s.io.api.core.v1.ComponentStatusList") + proto.RegisterType((*ConfigMap)(nil), "k8s.io.api.core.v1.ConfigMap") + proto.RegisterMapType((map[string][]byte)(nil), "k8s.io.api.core.v1.ConfigMap.BinaryDataEntry") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.core.v1.ConfigMap.DataEntry") + proto.RegisterType((*ConfigMapEnvSource)(nil), "k8s.io.api.core.v1.ConfigMapEnvSource") + proto.RegisterType((*ConfigMapKeySelector)(nil), "k8s.io.api.core.v1.ConfigMapKeySelector") + proto.RegisterType((*ConfigMapList)(nil), "k8s.io.api.core.v1.ConfigMapList") + proto.RegisterType((*ConfigMapNodeConfigSource)(nil), "k8s.io.api.core.v1.ConfigMapNodeConfigSource") + proto.RegisterType((*ConfigMapProjection)(nil), "k8s.io.api.core.v1.ConfigMapProjection") + proto.RegisterType((*ConfigMapVolumeSource)(nil), "k8s.io.api.core.v1.ConfigMapVolumeSource") + proto.RegisterType((*Container)(nil), "k8s.io.api.core.v1.Container") + proto.RegisterType((*ContainerImage)(nil), "k8s.io.api.core.v1.ContainerImage") + proto.RegisterType((*ContainerPort)(nil), "k8s.io.api.core.v1.ContainerPort") + proto.RegisterType((*ContainerState)(nil), "k8s.io.api.core.v1.ContainerState") + proto.RegisterType((*ContainerStateRunning)(nil), "k8s.io.api.core.v1.ContainerStateRunning") + proto.RegisterType((*ContainerStateTerminated)(nil), "k8s.io.api.core.v1.ContainerStateTerminated") + proto.RegisterType((*ContainerStateWaiting)(nil), "k8s.io.api.core.v1.ContainerStateWaiting") + proto.RegisterType((*ContainerStatus)(nil), "k8s.io.api.core.v1.ContainerStatus") + proto.RegisterType((*DaemonEndpoint)(nil), "k8s.io.api.core.v1.DaemonEndpoint") + proto.RegisterType((*DownwardAPIProjection)(nil), "k8s.io.api.core.v1.DownwardAPIProjection") + proto.RegisterType((*DownwardAPIVolumeFile)(nil), "k8s.io.api.core.v1.DownwardAPIVolumeFile") + proto.RegisterType((*DownwardAPIVolumeSource)(nil), "k8s.io.api.core.v1.DownwardAPIVolumeSource") + proto.RegisterType((*EmptyDirVolumeSource)(nil), "k8s.io.api.core.v1.EmptyDirVolumeSource") + proto.RegisterType((*EndpointAddress)(nil), "k8s.io.api.core.v1.EndpointAddress") + proto.RegisterType((*EndpointPort)(nil), "k8s.io.api.core.v1.EndpointPort") + proto.RegisterType((*EndpointSubset)(nil), "k8s.io.api.core.v1.EndpointSubset") + proto.RegisterType((*Endpoints)(nil), "k8s.io.api.core.v1.Endpoints") + proto.RegisterType((*EndpointsList)(nil), "k8s.io.api.core.v1.EndpointsList") + proto.RegisterType((*EnvFromSource)(nil), "k8s.io.api.core.v1.EnvFromSource") + proto.RegisterType((*EnvVar)(nil), "k8s.io.api.core.v1.EnvVar") + proto.RegisterType((*EnvVarSource)(nil), "k8s.io.api.core.v1.EnvVarSource") + proto.RegisterType((*EphemeralContainer)(nil), "k8s.io.api.core.v1.EphemeralContainer") + proto.RegisterType((*EphemeralContainerCommon)(nil), "k8s.io.api.core.v1.EphemeralContainerCommon") + proto.RegisterType((*EphemeralContainers)(nil), "k8s.io.api.core.v1.EphemeralContainers") + proto.RegisterType((*Event)(nil), "k8s.io.api.core.v1.Event") + proto.RegisterType((*EventList)(nil), "k8s.io.api.core.v1.EventList") + proto.RegisterType((*EventSeries)(nil), "k8s.io.api.core.v1.EventSeries") + proto.RegisterType((*EventSource)(nil), "k8s.io.api.core.v1.EventSource") + proto.RegisterType((*ExecAction)(nil), "k8s.io.api.core.v1.ExecAction") + proto.RegisterType((*FCVolumeSource)(nil), "k8s.io.api.core.v1.FCVolumeSource") + proto.RegisterType((*FlexPersistentVolumeSource)(nil), "k8s.io.api.core.v1.FlexPersistentVolumeSource") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.core.v1.FlexPersistentVolumeSource.OptionsEntry") + proto.RegisterType((*FlexVolumeSource)(nil), "k8s.io.api.core.v1.FlexVolumeSource") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.core.v1.FlexVolumeSource.OptionsEntry") + proto.RegisterType((*FlockerVolumeSource)(nil), "k8s.io.api.core.v1.FlockerVolumeSource") + proto.RegisterType((*GCEPersistentDiskVolumeSource)(nil), "k8s.io.api.core.v1.GCEPersistentDiskVolumeSource") + proto.RegisterType((*GitRepoVolumeSource)(nil), "k8s.io.api.core.v1.GitRepoVolumeSource") + proto.RegisterType((*GlusterfsPersistentVolumeSource)(nil), "k8s.io.api.core.v1.GlusterfsPersistentVolumeSource") + proto.RegisterType((*GlusterfsVolumeSource)(nil), "k8s.io.api.core.v1.GlusterfsVolumeSource") + proto.RegisterType((*HTTPGetAction)(nil), "k8s.io.api.core.v1.HTTPGetAction") + proto.RegisterType((*HTTPHeader)(nil), "k8s.io.api.core.v1.HTTPHeader") + proto.RegisterType((*Handler)(nil), "k8s.io.api.core.v1.Handler") + proto.RegisterType((*HostAlias)(nil), "k8s.io.api.core.v1.HostAlias") + proto.RegisterType((*HostPathVolumeSource)(nil), "k8s.io.api.core.v1.HostPathVolumeSource") + proto.RegisterType((*ISCSIPersistentVolumeSource)(nil), "k8s.io.api.core.v1.ISCSIPersistentVolumeSource") + proto.RegisterType((*ISCSIVolumeSource)(nil), "k8s.io.api.core.v1.ISCSIVolumeSource") + proto.RegisterType((*KeyToPath)(nil), "k8s.io.api.core.v1.KeyToPath") + proto.RegisterType((*Lifecycle)(nil), "k8s.io.api.core.v1.Lifecycle") + proto.RegisterType((*LimitRange)(nil), "k8s.io.api.core.v1.LimitRange") + proto.RegisterType((*LimitRangeItem)(nil), "k8s.io.api.core.v1.LimitRangeItem") + proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.LimitRangeItem.DefaultEntry") + proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.LimitRangeItem.DefaultRequestEntry") + proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.LimitRangeItem.MaxEntry") + proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.LimitRangeItem.MaxLimitRequestRatioEntry") + proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.LimitRangeItem.MinEntry") + proto.RegisterType((*LimitRangeList)(nil), "k8s.io.api.core.v1.LimitRangeList") + proto.RegisterType((*LimitRangeSpec)(nil), "k8s.io.api.core.v1.LimitRangeSpec") + proto.RegisterType((*List)(nil), "k8s.io.api.core.v1.List") + proto.RegisterType((*LoadBalancerIngress)(nil), "k8s.io.api.core.v1.LoadBalancerIngress") + proto.RegisterType((*LoadBalancerStatus)(nil), "k8s.io.api.core.v1.LoadBalancerStatus") + proto.RegisterType((*LocalObjectReference)(nil), "k8s.io.api.core.v1.LocalObjectReference") + proto.RegisterType((*LocalVolumeSource)(nil), "k8s.io.api.core.v1.LocalVolumeSource") + proto.RegisterType((*NFSVolumeSource)(nil), "k8s.io.api.core.v1.NFSVolumeSource") + proto.RegisterType((*Namespace)(nil), "k8s.io.api.core.v1.Namespace") + proto.RegisterType((*NamespaceCondition)(nil), "k8s.io.api.core.v1.NamespaceCondition") + proto.RegisterType((*NamespaceList)(nil), "k8s.io.api.core.v1.NamespaceList") + proto.RegisterType((*NamespaceSpec)(nil), "k8s.io.api.core.v1.NamespaceSpec") + proto.RegisterType((*NamespaceStatus)(nil), "k8s.io.api.core.v1.NamespaceStatus") + proto.RegisterType((*Node)(nil), "k8s.io.api.core.v1.Node") + proto.RegisterType((*NodeAddress)(nil), "k8s.io.api.core.v1.NodeAddress") + proto.RegisterType((*NodeAffinity)(nil), "k8s.io.api.core.v1.NodeAffinity") + proto.RegisterType((*NodeCondition)(nil), "k8s.io.api.core.v1.NodeCondition") + proto.RegisterType((*NodeConfigSource)(nil), "k8s.io.api.core.v1.NodeConfigSource") + proto.RegisterType((*NodeConfigStatus)(nil), "k8s.io.api.core.v1.NodeConfigStatus") + proto.RegisterType((*NodeDaemonEndpoints)(nil), "k8s.io.api.core.v1.NodeDaemonEndpoints") + proto.RegisterType((*NodeList)(nil), "k8s.io.api.core.v1.NodeList") + proto.RegisterType((*NodeProxyOptions)(nil), "k8s.io.api.core.v1.NodeProxyOptions") + proto.RegisterType((*NodeResources)(nil), "k8s.io.api.core.v1.NodeResources") + proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.NodeResources.CapacityEntry") + proto.RegisterType((*NodeSelector)(nil), "k8s.io.api.core.v1.NodeSelector") + proto.RegisterType((*NodeSelectorRequirement)(nil), "k8s.io.api.core.v1.NodeSelectorRequirement") + proto.RegisterType((*NodeSelectorTerm)(nil), "k8s.io.api.core.v1.NodeSelectorTerm") + proto.RegisterType((*NodeSpec)(nil), "k8s.io.api.core.v1.NodeSpec") + proto.RegisterType((*NodeStatus)(nil), "k8s.io.api.core.v1.NodeStatus") + proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.NodeStatus.AllocatableEntry") + proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.NodeStatus.CapacityEntry") + proto.RegisterType((*NodeSystemInfo)(nil), "k8s.io.api.core.v1.NodeSystemInfo") + proto.RegisterType((*ObjectFieldSelector)(nil), "k8s.io.api.core.v1.ObjectFieldSelector") + proto.RegisterType((*ObjectReference)(nil), "k8s.io.api.core.v1.ObjectReference") + proto.RegisterType((*PersistentVolume)(nil), "k8s.io.api.core.v1.PersistentVolume") + proto.RegisterType((*PersistentVolumeClaim)(nil), "k8s.io.api.core.v1.PersistentVolumeClaim") + proto.RegisterType((*PersistentVolumeClaimCondition)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimCondition") + proto.RegisterType((*PersistentVolumeClaimList)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimList") + proto.RegisterType((*PersistentVolumeClaimSpec)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimSpec") + proto.RegisterType((*PersistentVolumeClaimStatus)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimStatus") + proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimStatus.CapacityEntry") + proto.RegisterType((*PersistentVolumeClaimVolumeSource)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimVolumeSource") + proto.RegisterType((*PersistentVolumeList)(nil), "k8s.io.api.core.v1.PersistentVolumeList") + proto.RegisterType((*PersistentVolumeSource)(nil), "k8s.io.api.core.v1.PersistentVolumeSource") + proto.RegisterType((*PersistentVolumeSpec)(nil), "k8s.io.api.core.v1.PersistentVolumeSpec") + proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.PersistentVolumeSpec.CapacityEntry") + proto.RegisterType((*PersistentVolumeStatus)(nil), "k8s.io.api.core.v1.PersistentVolumeStatus") + proto.RegisterType((*PhotonPersistentDiskVolumeSource)(nil), "k8s.io.api.core.v1.PhotonPersistentDiskVolumeSource") + proto.RegisterType((*Pod)(nil), "k8s.io.api.core.v1.Pod") + proto.RegisterType((*PodAffinity)(nil), "k8s.io.api.core.v1.PodAffinity") + proto.RegisterType((*PodAffinityTerm)(nil), "k8s.io.api.core.v1.PodAffinityTerm") + proto.RegisterType((*PodAntiAffinity)(nil), "k8s.io.api.core.v1.PodAntiAffinity") + proto.RegisterType((*PodAttachOptions)(nil), "k8s.io.api.core.v1.PodAttachOptions") + proto.RegisterType((*PodCondition)(nil), "k8s.io.api.core.v1.PodCondition") + proto.RegisterType((*PodDNSConfig)(nil), "k8s.io.api.core.v1.PodDNSConfig") + proto.RegisterType((*PodDNSConfigOption)(nil), "k8s.io.api.core.v1.PodDNSConfigOption") + proto.RegisterType((*PodExecOptions)(nil), "k8s.io.api.core.v1.PodExecOptions") + proto.RegisterType((*PodIP)(nil), "k8s.io.api.core.v1.PodIP") + proto.RegisterType((*PodList)(nil), "k8s.io.api.core.v1.PodList") + proto.RegisterType((*PodLogOptions)(nil), "k8s.io.api.core.v1.PodLogOptions") + proto.RegisterType((*PodPortForwardOptions)(nil), "k8s.io.api.core.v1.PodPortForwardOptions") + proto.RegisterType((*PodProxyOptions)(nil), "k8s.io.api.core.v1.PodProxyOptions") + proto.RegisterType((*PodReadinessGate)(nil), "k8s.io.api.core.v1.PodReadinessGate") + proto.RegisterType((*PodSecurityContext)(nil), "k8s.io.api.core.v1.PodSecurityContext") + proto.RegisterType((*PodSignature)(nil), "k8s.io.api.core.v1.PodSignature") + proto.RegisterType((*PodSpec)(nil), "k8s.io.api.core.v1.PodSpec") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.core.v1.PodSpec.NodeSelectorEntry") + proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.PodSpec.OverheadEntry") + proto.RegisterType((*PodStatus)(nil), "k8s.io.api.core.v1.PodStatus") + proto.RegisterType((*PodStatusResult)(nil), "k8s.io.api.core.v1.PodStatusResult") + proto.RegisterType((*PodTemplate)(nil), "k8s.io.api.core.v1.PodTemplate") + proto.RegisterType((*PodTemplateList)(nil), "k8s.io.api.core.v1.PodTemplateList") + proto.RegisterType((*PodTemplateSpec)(nil), "k8s.io.api.core.v1.PodTemplateSpec") + proto.RegisterType((*PortworxVolumeSource)(nil), "k8s.io.api.core.v1.PortworxVolumeSource") + proto.RegisterType((*Preconditions)(nil), "k8s.io.api.core.v1.Preconditions") + proto.RegisterType((*PreferAvoidPodsEntry)(nil), "k8s.io.api.core.v1.PreferAvoidPodsEntry") + proto.RegisterType((*PreferredSchedulingTerm)(nil), "k8s.io.api.core.v1.PreferredSchedulingTerm") + proto.RegisterType((*Probe)(nil), "k8s.io.api.core.v1.Probe") + proto.RegisterType((*ProjectedVolumeSource)(nil), "k8s.io.api.core.v1.ProjectedVolumeSource") + proto.RegisterType((*QuobyteVolumeSource)(nil), "k8s.io.api.core.v1.QuobyteVolumeSource") + proto.RegisterType((*RBDPersistentVolumeSource)(nil), "k8s.io.api.core.v1.RBDPersistentVolumeSource") + proto.RegisterType((*RBDVolumeSource)(nil), "k8s.io.api.core.v1.RBDVolumeSource") + proto.RegisterType((*RangeAllocation)(nil), "k8s.io.api.core.v1.RangeAllocation") + proto.RegisterType((*ReplicationController)(nil), "k8s.io.api.core.v1.ReplicationController") + proto.RegisterType((*ReplicationControllerCondition)(nil), "k8s.io.api.core.v1.ReplicationControllerCondition") + proto.RegisterType((*ReplicationControllerList)(nil), "k8s.io.api.core.v1.ReplicationControllerList") + proto.RegisterType((*ReplicationControllerSpec)(nil), "k8s.io.api.core.v1.ReplicationControllerSpec") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.core.v1.ReplicationControllerSpec.SelectorEntry") + proto.RegisterType((*ReplicationControllerStatus)(nil), "k8s.io.api.core.v1.ReplicationControllerStatus") + proto.RegisterType((*ResourceFieldSelector)(nil), "k8s.io.api.core.v1.ResourceFieldSelector") + proto.RegisterType((*ResourceQuota)(nil), "k8s.io.api.core.v1.ResourceQuota") + proto.RegisterType((*ResourceQuotaList)(nil), "k8s.io.api.core.v1.ResourceQuotaList") + proto.RegisterType((*ResourceQuotaSpec)(nil), "k8s.io.api.core.v1.ResourceQuotaSpec") + proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.ResourceQuotaSpec.HardEntry") + proto.RegisterType((*ResourceQuotaStatus)(nil), "k8s.io.api.core.v1.ResourceQuotaStatus") + proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.ResourceQuotaStatus.HardEntry") + proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.ResourceQuotaStatus.UsedEntry") + proto.RegisterType((*ResourceRequirements)(nil), "k8s.io.api.core.v1.ResourceRequirements") + proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.ResourceRequirements.LimitsEntry") + proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.ResourceRequirements.RequestsEntry") + proto.RegisterType((*SELinuxOptions)(nil), "k8s.io.api.core.v1.SELinuxOptions") + proto.RegisterType((*ScaleIOPersistentVolumeSource)(nil), "k8s.io.api.core.v1.ScaleIOPersistentVolumeSource") + proto.RegisterType((*ScaleIOVolumeSource)(nil), "k8s.io.api.core.v1.ScaleIOVolumeSource") + proto.RegisterType((*ScopeSelector)(nil), "k8s.io.api.core.v1.ScopeSelector") + proto.RegisterType((*ScopedResourceSelectorRequirement)(nil), "k8s.io.api.core.v1.ScopedResourceSelectorRequirement") + proto.RegisterType((*Secret)(nil), "k8s.io.api.core.v1.Secret") + proto.RegisterMapType((map[string][]byte)(nil), "k8s.io.api.core.v1.Secret.DataEntry") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.core.v1.Secret.StringDataEntry") + proto.RegisterType((*SecretEnvSource)(nil), "k8s.io.api.core.v1.SecretEnvSource") + proto.RegisterType((*SecretKeySelector)(nil), "k8s.io.api.core.v1.SecretKeySelector") + proto.RegisterType((*SecretList)(nil), "k8s.io.api.core.v1.SecretList") + proto.RegisterType((*SecretProjection)(nil), "k8s.io.api.core.v1.SecretProjection") + proto.RegisterType((*SecretReference)(nil), "k8s.io.api.core.v1.SecretReference") + proto.RegisterType((*SecretVolumeSource)(nil), "k8s.io.api.core.v1.SecretVolumeSource") + proto.RegisterType((*SecurityContext)(nil), "k8s.io.api.core.v1.SecurityContext") + proto.RegisterType((*SerializedReference)(nil), "k8s.io.api.core.v1.SerializedReference") + proto.RegisterType((*Service)(nil), "k8s.io.api.core.v1.Service") + proto.RegisterType((*ServiceAccount)(nil), "k8s.io.api.core.v1.ServiceAccount") + proto.RegisterType((*ServiceAccountList)(nil), "k8s.io.api.core.v1.ServiceAccountList") + proto.RegisterType((*ServiceAccountTokenProjection)(nil), "k8s.io.api.core.v1.ServiceAccountTokenProjection") + proto.RegisterType((*ServiceList)(nil), "k8s.io.api.core.v1.ServiceList") + proto.RegisterType((*ServicePort)(nil), "k8s.io.api.core.v1.ServicePort") + proto.RegisterType((*ServiceProxyOptions)(nil), "k8s.io.api.core.v1.ServiceProxyOptions") + proto.RegisterType((*ServiceSpec)(nil), "k8s.io.api.core.v1.ServiceSpec") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.core.v1.ServiceSpec.SelectorEntry") + proto.RegisterType((*ServiceStatus)(nil), "k8s.io.api.core.v1.ServiceStatus") + proto.RegisterType((*SessionAffinityConfig)(nil), "k8s.io.api.core.v1.SessionAffinityConfig") + proto.RegisterType((*StorageOSPersistentVolumeSource)(nil), "k8s.io.api.core.v1.StorageOSPersistentVolumeSource") + proto.RegisterType((*StorageOSVolumeSource)(nil), "k8s.io.api.core.v1.StorageOSVolumeSource") + proto.RegisterType((*Sysctl)(nil), "k8s.io.api.core.v1.Sysctl") + proto.RegisterType((*TCPSocketAction)(nil), "k8s.io.api.core.v1.TCPSocketAction") + proto.RegisterType((*Taint)(nil), "k8s.io.api.core.v1.Taint") + proto.RegisterType((*Toleration)(nil), "k8s.io.api.core.v1.Toleration") + proto.RegisterType((*TopologySelectorLabelRequirement)(nil), "k8s.io.api.core.v1.TopologySelectorLabelRequirement") + proto.RegisterType((*TopologySelectorTerm)(nil), "k8s.io.api.core.v1.TopologySelectorTerm") + proto.RegisterType((*TopologySpreadConstraint)(nil), "k8s.io.api.core.v1.TopologySpreadConstraint") + proto.RegisterType((*TypedLocalObjectReference)(nil), "k8s.io.api.core.v1.TypedLocalObjectReference") + proto.RegisterType((*Volume)(nil), "k8s.io.api.core.v1.Volume") + proto.RegisterType((*VolumeDevice)(nil), "k8s.io.api.core.v1.VolumeDevice") + proto.RegisterType((*VolumeMount)(nil), "k8s.io.api.core.v1.VolumeMount") + proto.RegisterType((*VolumeNodeAffinity)(nil), "k8s.io.api.core.v1.VolumeNodeAffinity") + proto.RegisterType((*VolumeProjection)(nil), "k8s.io.api.core.v1.VolumeProjection") + proto.RegisterType((*VolumeSource)(nil), "k8s.io.api.core.v1.VolumeSource") + proto.RegisterType((*VsphereVirtualDiskVolumeSource)(nil), "k8s.io.api.core.v1.VsphereVirtualDiskVolumeSource") + proto.RegisterType((*WeightedPodAffinityTerm)(nil), "k8s.io.api.core.v1.WeightedPodAffinityTerm") + proto.RegisterType((*WindowsSecurityContextOptions)(nil), "k8s.io.api.core.v1.WindowsSecurityContextOptions") } -func (m *GlusterfsVolumeSource) Marshal() (dAtA []byte, err error) { +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/core/v1/generated.proto", fileDescriptor_83c10c24ec417dc9) +} + +var fileDescriptor_83c10c24ec417dc9 = []byte{ + // 13620 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0xbd, 0x6b, 0x70, 0x24, 0x59, + 0x5a, 0x18, 0xba, 0x59, 0xa5, 0x47, 0xd5, 0xa7, 0xf7, 0xe9, 0xc7, 0xa8, 0x35, 0xdd, 0xad, 0x9e, + 0x9c, 0xdd, 0x9e, 0x9e, 0x9d, 0x19, 0xf5, 0xce, 0x6b, 0x67, 0x98, 0x99, 0x1d, 0x90, 0x54, 0x52, + 0x77, 0x4d, 0xb7, 0xd4, 0x35, 0xa7, 0xd4, 0xdd, 0xbb, 0xc3, 0xec, 0xde, 0x4d, 0x55, 0x1e, 0x49, + 0x39, 0x2a, 0x65, 0xd6, 0x64, 0x66, 0x49, 0xad, 0xb9, 0x10, 0x97, 0xbb, 0x3c, 0xf7, 0x02, 0x37, + 0x36, 0x6c, 0xc2, 0x0f, 0x20, 0xb0, 0x03, 0xe3, 0x00, 0x0c, 0x76, 0x18, 0x83, 0x01, 0xef, 0x62, + 0x1b, 0x83, 0xed, 0xc0, 0xfe, 0x81, 0xb1, 0xc3, 0xf6, 0x12, 0x41, 0x58, 0x86, 0xc6, 0x61, 0x62, + 0x7f, 0x18, 0x08, 0x83, 0x7f, 0x58, 0x26, 0x8c, 0xe3, 0x3c, 0xf3, 0x9c, 0xac, 0xcc, 0xaa, 0x52, + 0x8f, 0x5a, 0x3b, 0x6c, 0xcc, 0xbf, 0xaa, 0xf3, 0x7d, 0xe7, 0x3b, 0x27, 0xcf, 0xf3, 0x3b, 0xdf, + 0x13, 0x5e, 0xdd, 0x7e, 0x39, 0x9a, 0xf3, 0x82, 0xab, 0xdb, 0xed, 0x75, 0x12, 0xfa, 0x24, 0x26, + 0xd1, 0xd5, 0x5d, 0xe2, 0xbb, 0x41, 0x78, 0x55, 0x00, 0x9c, 0x96, 0x77, 0xb5, 0x11, 0x84, 0xe4, + 0xea, 0xee, 0xb3, 0x57, 0x37, 0x89, 0x4f, 0x42, 0x27, 0x26, 0xee, 0x5c, 0x2b, 0x0c, 0xe2, 0x00, + 0x21, 0x8e, 0x33, 0xe7, 0xb4, 0xbc, 0x39, 0x8a, 0x33, 0xb7, 0xfb, 0xec, 0xcc, 0x33, 0x9b, 0x5e, + 0xbc, 0xd5, 0x5e, 0x9f, 0x6b, 0x04, 0x3b, 0x57, 0x37, 0x83, 0xcd, 0xe0, 0x2a, 0x43, 0x5d, 0x6f, + 0x6f, 0xb0, 0x7f, 0xec, 0x0f, 0xfb, 0xc5, 0x49, 0xcc, 0xbc, 0x90, 0x34, 0xb3, 0xe3, 0x34, 0xb6, + 0x3c, 0x9f, 0x84, 0xfb, 0x57, 0x5b, 0xdb, 0x9b, 0xac, 0xdd, 0x90, 0x44, 0x41, 0x3b, 0x6c, 0x90, + 0x74, 0xc3, 0x5d, 0x6b, 0x45, 0x57, 0x77, 0x48, 0xec, 0x64, 0x74, 0x77, 0xe6, 0x6a, 0x5e, 0xad, + 0xb0, 0xed, 0xc7, 0xde, 0x4e, 0x67, 0x33, 0x9f, 0xec, 0x55, 0x21, 0x6a, 0x6c, 0x91, 0x1d, 0xa7, + 0xa3, 0xde, 0xf3, 0x79, 0xf5, 0xda, 0xb1, 0xd7, 0xbc, 0xea, 0xf9, 0x71, 0x14, 0x87, 0xe9, 0x4a, + 0xf6, 0x57, 0x2d, 0xb8, 0x34, 0x7f, 0xb7, 0xbe, 0xd4, 0x74, 0xa2, 0xd8, 0x6b, 0x2c, 0x34, 0x83, + 0xc6, 0x76, 0x3d, 0x0e, 0x42, 0x72, 0x27, 0x68, 0xb6, 0x77, 0x48, 0x9d, 0x0d, 0x04, 0x7a, 0x1a, + 0x4a, 0xbb, 0xec, 0x7f, 0xb5, 0x32, 0x6d, 0x5d, 0xb2, 0xae, 0x94, 0x17, 0x26, 0x7f, 0xe3, 0x60, + 0xf6, 0x23, 0xf7, 0x0f, 0x66, 0x4b, 0x77, 0x44, 0x39, 0x56, 0x18, 0xe8, 0x32, 0x0c, 0x6d, 0x44, + 0x6b, 0xfb, 0x2d, 0x32, 0x5d, 0x60, 0xb8, 0xe3, 0x02, 0x77, 0x68, 0xb9, 0x4e, 0x4b, 0xb1, 0x80, + 0xa2, 0xab, 0x50, 0x6e, 0x39, 0x61, 0xec, 0xc5, 0x5e, 0xe0, 0x4f, 0x17, 0x2f, 0x59, 0x57, 0x06, + 0x17, 0xa6, 0x04, 0x6a, 0xb9, 0x26, 0x01, 0x38, 0xc1, 0xa1, 0xdd, 0x08, 0x89, 0xe3, 0xde, 0xf2, + 0x9b, 0xfb, 0xd3, 0x03, 0x97, 0xac, 0x2b, 0xa5, 0xa4, 0x1b, 0x58, 0x94, 0x63, 0x85, 0x61, 0xff, + 0x70, 0x01, 0x4a, 0xf3, 0x1b, 0x1b, 0x9e, 0xef, 0xc5, 0xfb, 0xe8, 0x0e, 0x8c, 0xfa, 0x81, 0x4b, + 0xe4, 0x7f, 0xf6, 0x15, 0x23, 0xcf, 0x5d, 0x9a, 0xeb, 0x5c, 0x4a, 0x73, 0xab, 0x1a, 0xde, 0xc2, + 0xe4, 0xfd, 0x83, 0xd9, 0x51, 0xbd, 0x04, 0x1b, 0x74, 0x10, 0x86, 0x91, 0x56, 0xe0, 0x2a, 0xb2, + 0x05, 0x46, 0x76, 0x36, 0x8b, 0x6c, 0x2d, 0x41, 0x5b, 0x98, 0xb8, 0x7f, 0x30, 0x3b, 0xa2, 0x15, + 0x60, 0x9d, 0x08, 0x5a, 0x87, 0x09, 0xfa, 0xd7, 0x8f, 0x3d, 0x45, 0xb7, 0xc8, 0xe8, 0x3e, 0x9e, + 0x47, 0x57, 0x43, 0x5d, 0x38, 0x75, 0xff, 0x60, 0x76, 0x22, 0x55, 0x88, 0xd3, 0x04, 0xed, 0xf7, + 0x60, 0x7c, 0x3e, 0x8e, 0x9d, 0xc6, 0x16, 0x71, 0xf9, 0x0c, 0xa2, 0x17, 0x60, 0xc0, 0x77, 0x76, + 0x88, 0x98, 0xdf, 0x4b, 0x62, 0x60, 0x07, 0x56, 0x9d, 0x1d, 0x72, 0x78, 0x30, 0x3b, 0x79, 0xdb, + 0xf7, 0xde, 0x6d, 0x8b, 0x55, 0x41, 0xcb, 0x30, 0xc3, 0x46, 0xcf, 0x01, 0xb8, 0x64, 0xd7, 0x6b, + 0x90, 0x9a, 0x13, 0x6f, 0x89, 0xf9, 0x46, 0xa2, 0x2e, 0x54, 0x14, 0x04, 0x6b, 0x58, 0xf6, 0x3d, + 0x28, 0xcf, 0xef, 0x06, 0x9e, 0x5b, 0x0b, 0xdc, 0x08, 0x6d, 0xc3, 0x44, 0x2b, 0x24, 0x1b, 0x24, + 0x54, 0x45, 0xd3, 0xd6, 0xa5, 0xe2, 0x95, 0x91, 0xe7, 0xae, 0x64, 0x7e, 0xac, 0x89, 0xba, 0xe4, + 0xc7, 0xe1, 0xfe, 0xc2, 0x23, 0xa2, 0xbd, 0x89, 0x14, 0x14, 0xa7, 0x29, 0xdb, 0xff, 0xbc, 0x00, + 0x67, 0xe6, 0xdf, 0x6b, 0x87, 0xa4, 0xe2, 0x45, 0xdb, 0xe9, 0x15, 0xee, 0x7a, 0xd1, 0xf6, 0x6a, + 0x32, 0x02, 0x6a, 0x69, 0x55, 0x44, 0x39, 0x56, 0x18, 0xe8, 0x19, 0x18, 0xa6, 0xbf, 0x6f, 0xe3, + 0xaa, 0xf8, 0xe4, 0x53, 0x02, 0x79, 0xa4, 0xe2, 0xc4, 0x4e, 0x85, 0x83, 0xb0, 0xc4, 0x41, 0x2b, + 0x30, 0xd2, 0x60, 0x1b, 0x72, 0x73, 0x25, 0x70, 0x09, 0x9b, 0xcc, 0xf2, 0xc2, 0x53, 0x14, 0x7d, + 0x31, 0x29, 0x3e, 0x3c, 0x98, 0x9d, 0xe6, 0x7d, 0x13, 0x24, 0x34, 0x18, 0xd6, 0xeb, 0x23, 0x5b, + 0xed, 0xaf, 0x01, 0x46, 0x09, 0x32, 0xf6, 0xd6, 0x15, 0x6d, 0xab, 0x0c, 0xb2, 0xad, 0x32, 0x9a, + 0xbd, 0x4d, 0xd0, 0xb3, 0x30, 0xb0, 0xed, 0xf9, 0xee, 0xf4, 0x10, 0xa3, 0x75, 0x81, 0xce, 0xf9, + 0x0d, 0xcf, 0x77, 0x0f, 0x0f, 0x66, 0xa7, 0x8c, 0xee, 0xd0, 0x42, 0xcc, 0x50, 0xed, 0x3f, 0xb1, + 0x60, 0x96, 0xc1, 0x96, 0xbd, 0x26, 0xa9, 0x91, 0x30, 0xf2, 0xa2, 0x98, 0xf8, 0xb1, 0x31, 0xa0, + 0xcf, 0x01, 0x44, 0xa4, 0x11, 0x92, 0x58, 0x1b, 0x52, 0xb5, 0x30, 0xea, 0x0a, 0x82, 0x35, 0x2c, + 0x7a, 0x20, 0x44, 0x5b, 0x4e, 0xc8, 0xd6, 0x97, 0x18, 0x58, 0x75, 0x20, 0xd4, 0x25, 0x00, 0x27, + 0x38, 0xc6, 0x81, 0x50, 0xec, 0x75, 0x20, 0xa0, 0x4f, 0xc1, 0x44, 0xd2, 0x58, 0xd4, 0x72, 0x1a, + 0x72, 0x00, 0xd9, 0x96, 0xa9, 0x9b, 0x20, 0x9c, 0xc6, 0xb5, 0xff, 0x8e, 0x25, 0x16, 0x0f, 0xfd, + 0xea, 0x0f, 0xf8, 0xb7, 0xda, 0xbf, 0x6c, 0xc1, 0xf0, 0x82, 0xe7, 0xbb, 0x9e, 0xbf, 0x89, 0x3e, + 0x0f, 0x25, 0x7a, 0x37, 0xb9, 0x4e, 0xec, 0x88, 0x73, 0xef, 0x13, 0xda, 0xde, 0x52, 0x57, 0xc5, + 0x5c, 0x6b, 0x7b, 0x93, 0x16, 0x44, 0x73, 0x14, 0x9b, 0xee, 0xb6, 0x5b, 0xeb, 0xef, 0x90, 0x46, + 0xbc, 0x42, 0x62, 0x27, 0xf9, 0x9c, 0xa4, 0x0c, 0x2b, 0xaa, 0xe8, 0x06, 0x0c, 0xc5, 0x4e, 0xb8, + 0x49, 0x62, 0x71, 0x00, 0x66, 0x1e, 0x54, 0xbc, 0x26, 0xa6, 0x3b, 0x92, 0xf8, 0x0d, 0x92, 0x5c, + 0x0b, 0x6b, 0xac, 0x2a, 0x16, 0x24, 0xec, 0x1f, 0x1c, 0x86, 0x73, 0x8b, 0xf5, 0x6a, 0xce, 0xba, + 0xba, 0x0c, 0x43, 0x6e, 0xe8, 0xed, 0x92, 0x50, 0x8c, 0xb3, 0xa2, 0x52, 0x61, 0xa5, 0x58, 0x40, + 0xd1, 0xcb, 0x30, 0xca, 0x2f, 0xa4, 0xeb, 0x8e, 0xef, 0x36, 0xe5, 0x10, 0x9f, 0x16, 0xd8, 0xa3, + 0x77, 0x34, 0x18, 0x36, 0x30, 0x8f, 0xb8, 0xa8, 0x2e, 0xa7, 0x36, 0x63, 0xde, 0x65, 0xf7, 0x45, + 0x0b, 0x26, 0x79, 0x33, 0xf3, 0x71, 0x1c, 0x7a, 0xeb, 0xed, 0x98, 0x44, 0xd3, 0x83, 0xec, 0xa4, + 0x5b, 0xcc, 0x1a, 0xad, 0xdc, 0x11, 0x98, 0xbb, 0x93, 0xa2, 0xc2, 0x0f, 0xc1, 0x69, 0xd1, 0xee, + 0x64, 0x1a, 0x8c, 0x3b, 0x9a, 0x45, 0xdf, 0x69, 0xc1, 0x4c, 0x23, 0xf0, 0xe3, 0x30, 0x68, 0x36, + 0x49, 0x58, 0x6b, 0xaf, 0x37, 0xbd, 0x68, 0x8b, 0xaf, 0x53, 0x4c, 0x36, 0xd8, 0x49, 0x90, 0x33, + 0x87, 0x0a, 0x49, 0xcc, 0xe1, 0xc5, 0xfb, 0x07, 0xb3, 0x33, 0x8b, 0xb9, 0xa4, 0x70, 0x97, 0x66, + 0xd0, 0x36, 0x20, 0x7a, 0x95, 0xd6, 0x63, 0x67, 0x93, 0x24, 0x8d, 0x0f, 0xf7, 0xdf, 0xf8, 0xd9, + 0xfb, 0x07, 0xb3, 0x68, 0xb5, 0x83, 0x04, 0xce, 0x20, 0x8b, 0xde, 0x85, 0xd3, 0xb4, 0xb4, 0xe3, + 0x5b, 0x4b, 0xfd, 0x37, 0x37, 0x7d, 0xff, 0x60, 0xf6, 0xf4, 0x6a, 0x06, 0x11, 0x9c, 0x49, 0x1a, + 0x7d, 0x87, 0x05, 0xe7, 0x92, 0xcf, 0x5f, 0xba, 0xd7, 0x72, 0x7c, 0x37, 0x69, 0xb8, 0xdc, 0x7f, + 0xc3, 0xf4, 0x4c, 0x3e, 0xb7, 0x98, 0x47, 0x09, 0xe7, 0x37, 0x32, 0xb3, 0x08, 0x67, 0x32, 0x57, + 0x0b, 0x9a, 0x84, 0xe2, 0x36, 0xe1, 0x5c, 0x50, 0x19, 0xd3, 0x9f, 0xe8, 0x34, 0x0c, 0xee, 0x3a, + 0xcd, 0xb6, 0xd8, 0x28, 0x98, 0xff, 0x79, 0xa5, 0xf0, 0xb2, 0x65, 0xff, 0x8b, 0x22, 0x4c, 0x2c, + 0xd6, 0xab, 0x0f, 0xb4, 0x0b, 0xf5, 0x6b, 0xa8, 0xd0, 0xf5, 0x1a, 0x4a, 0x2e, 0xb5, 0x62, 0xee, + 0xa5, 0xf6, 0xff, 0x64, 0x6c, 0xa1, 0x01, 0xb6, 0x85, 0xbe, 0x29, 0x67, 0x0b, 0x1d, 0xf3, 0xc6, + 0xd9, 0xcd, 0x59, 0x45, 0x83, 0x6c, 0x32, 0x33, 0x39, 0x96, 0x9b, 0x41, 0xc3, 0x69, 0xa6, 0x8f, + 0xbe, 0x23, 0x2e, 0xa5, 0xe3, 0x99, 0xc7, 0x06, 0x8c, 0x2e, 0x3a, 0x2d, 0x67, 0xdd, 0x6b, 0x7a, + 0xb1, 0x47, 0x22, 0xf4, 0x04, 0x14, 0x1d, 0xd7, 0x65, 0xdc, 0x56, 0x79, 0xe1, 0xcc, 0xfd, 0x83, + 0xd9, 0xe2, 0xbc, 0x4b, 0xaf, 0x7d, 0x50, 0x58, 0xfb, 0x98, 0x62, 0xa0, 0x8f, 0xc3, 0x80, 0x1b, + 0x06, 0xad, 0xe9, 0x02, 0xc3, 0xa4, 0xbb, 0x6e, 0xa0, 0x12, 0x06, 0xad, 0x14, 0x2a, 0xc3, 0xb1, + 0x7f, 0xb5, 0x00, 0xe7, 0x17, 0x49, 0x6b, 0x6b, 0xb9, 0x9e, 0x73, 0x7e, 0x5f, 0x81, 0xd2, 0x4e, + 0xe0, 0x7b, 0x71, 0x10, 0x46, 0xa2, 0x69, 0xb6, 0x22, 0x56, 0x44, 0x19, 0x56, 0x50, 0x74, 0x09, + 0x06, 0x5a, 0x09, 0x53, 0x39, 0x2a, 0x19, 0x52, 0xc6, 0x4e, 0x32, 0x08, 0xc5, 0x68, 0x47, 0x24, + 0x14, 0x2b, 0x46, 0x61, 0xdc, 0x8e, 0x48, 0x88, 0x19, 0x24, 0xb9, 0x99, 0xe9, 0x9d, 0x2d, 0x4e, + 0xe8, 0xd4, 0xcd, 0x4c, 0x21, 0x58, 0xc3, 0x42, 0x35, 0x28, 0x47, 0xa9, 0x99, 0xed, 0x6b, 0x9b, + 0x8e, 0xb1, 0xab, 0x5b, 0xcd, 0x64, 0x42, 0xc4, 0xb8, 0x51, 0x86, 0x7a, 0x5e, 0xdd, 0x5f, 0x29, + 0x00, 0xe2, 0x43, 0xf8, 0x17, 0x6c, 0xe0, 0x6e, 0x77, 0x0e, 0x5c, 0xff, 0x5b, 0xe2, 0xb8, 0x46, + 0xef, 0x4f, 0x2d, 0x38, 0xbf, 0xe8, 0xf9, 0x2e, 0x09, 0x73, 0x16, 0xe0, 0xc3, 0x79, 0xcb, 0x1e, + 0x8d, 0x69, 0x30, 0x96, 0xd8, 0xc0, 0x31, 0x2c, 0x31, 0xfb, 0x8f, 0x2c, 0x40, 0xfc, 0xb3, 0x3f, + 0x70, 0x1f, 0x7b, 0xbb, 0xf3, 0x63, 0x8f, 0x61, 0x59, 0xd8, 0x37, 0x61, 0x7c, 0xb1, 0xe9, 0x11, + 0x3f, 0xae, 0xd6, 0x16, 0x03, 0x7f, 0xc3, 0xdb, 0x44, 0xaf, 0xc0, 0x78, 0xec, 0xed, 0x90, 0xa0, + 0x1d, 0xd7, 0x49, 0x23, 0xf0, 0xd9, 0x4b, 0xd2, 0xba, 0x32, 0xb8, 0x80, 0xee, 0x1f, 0xcc, 0x8e, + 0xaf, 0x19, 0x10, 0x9c, 0xc2, 0xb4, 0x7f, 0x87, 0x8e, 0x5f, 0xb0, 0xd3, 0x0a, 0x7c, 0xe2, 0xc7, + 0x8b, 0x81, 0xef, 0x72, 0x89, 0xc3, 0x2b, 0x30, 0x10, 0xd3, 0xf1, 0xe0, 0x63, 0x77, 0x59, 0x6e, + 0x14, 0x3a, 0x0a, 0x87, 0x07, 0xb3, 0x67, 0x3b, 0x6b, 0xb0, 0x71, 0x62, 0x75, 0xd0, 0x37, 0xc1, + 0x50, 0x14, 0x3b, 0x71, 0x3b, 0x12, 0xa3, 0xf9, 0x98, 0x1c, 0xcd, 0x3a, 0x2b, 0x3d, 0x3c, 0x98, + 0x9d, 0x50, 0xd5, 0x78, 0x11, 0x16, 0x15, 0xd0, 0x93, 0x30, 0xbc, 0x43, 0xa2, 0xc8, 0xd9, 0x94, + 0xb7, 0xe1, 0x84, 0xa8, 0x3b, 0xbc, 0xc2, 0x8b, 0xb1, 0x84, 0xa3, 0xc7, 0x61, 0x90, 0x84, 0x61, + 0x10, 0x8a, 0x3d, 0x3a, 0x26, 0x10, 0x07, 0x97, 0x68, 0x21, 0xe6, 0x30, 0xfb, 0xdf, 0x58, 0x30, + 0xa1, 0xfa, 0xca, 0xdb, 0x3a, 0x81, 0x57, 0xc1, 0x5b, 0x00, 0x0d, 0xf9, 0x81, 0x11, 0xbb, 0x3d, + 0x46, 0x9e, 0xbb, 0x9c, 0x79, 0x51, 0x77, 0x0c, 0x63, 0x42, 0x59, 0x15, 0x45, 0x58, 0xa3, 0x66, + 0xff, 0x63, 0x0b, 0x4e, 0xa5, 0xbe, 0xe8, 0xa6, 0x17, 0xc5, 0xe8, 0xed, 0x8e, 0xaf, 0x9a, 0xeb, + 0xef, 0xab, 0x68, 0x6d, 0xf6, 0x4d, 0x6a, 0x29, 0xcb, 0x12, 0xed, 0x8b, 0xae, 0xc3, 0xa0, 0x17, + 0x93, 0x1d, 0xf9, 0x31, 0x8f, 0x77, 0xfd, 0x18, 0xde, 0xab, 0x64, 0x46, 0xaa, 0xb4, 0x26, 0xe6, + 0x04, 0xec, 0xbf, 0x5c, 0x84, 0x32, 0x5f, 0xb6, 0x2b, 0x4e, 0xeb, 0x04, 0xe6, 0xa2, 0x0a, 0x03, + 0x8c, 0x3a, 0xef, 0xf8, 0x13, 0xd9, 0x1d, 0x17, 0xdd, 0x99, 0xa3, 0x4f, 0x7e, 0xce, 0x1c, 0xa9, + 0xab, 0x81, 0x16, 0x61, 0x46, 0x02, 0x39, 0x00, 0xeb, 0x9e, 0xef, 0x84, 0xfb, 0xb4, 0x6c, 0xba, + 0xc8, 0x08, 0x3e, 0xd3, 0x9d, 0xe0, 0x82, 0xc2, 0xe7, 0x64, 0x55, 0x5f, 0x13, 0x00, 0xd6, 0x88, + 0xce, 0xbc, 0x04, 0x65, 0x85, 0x7c, 0x14, 0x1e, 0x67, 0xe6, 0x53, 0x30, 0x91, 0x6a, 0xab, 0x57, + 0xf5, 0x51, 0x9d, 0x45, 0xfa, 0x32, 0x3b, 0x05, 0x44, 0xaf, 0x97, 0xfc, 0x5d, 0x71, 0x8a, 0xbe, + 0x07, 0xa7, 0x9b, 0x19, 0x87, 0x93, 0x98, 0xaa, 0xfe, 0x0f, 0xb3, 0xf3, 0xe2, 0xb3, 0x4f, 0x67, + 0x41, 0x71, 0x66, 0x1b, 0xf4, 0xda, 0x0f, 0x5a, 0x74, 0xcd, 0x3b, 0x4d, 0x9d, 0x83, 0xbe, 0x25, + 0xca, 0xb0, 0x82, 0xd2, 0x23, 0xec, 0xb4, 0xea, 0xfc, 0x0d, 0xb2, 0x5f, 0x27, 0x4d, 0xd2, 0x88, + 0x83, 0xf0, 0xeb, 0xda, 0xfd, 0x0b, 0x7c, 0xf4, 0xf9, 0x09, 0x38, 0x22, 0x08, 0x14, 0x6f, 0x90, + 0x7d, 0x3e, 0x15, 0xfa, 0xd7, 0x15, 0xbb, 0x7e, 0xdd, 0xcf, 0x59, 0x30, 0xa6, 0xbe, 0xee, 0x04, + 0xb6, 0xfa, 0x82, 0xb9, 0xd5, 0x2f, 0x74, 0x5d, 0xe0, 0x39, 0x9b, 0xfc, 0x2b, 0x05, 0x38, 0xa7, + 0x70, 0x28, 0xbb, 0xcf, 0xff, 0x88, 0x55, 0x75, 0x15, 0xca, 0xbe, 0x12, 0x44, 0x59, 0xa6, 0x04, + 0x28, 0x11, 0x43, 0x25, 0x38, 0x94, 0x6b, 0xf3, 0x13, 0x69, 0xd1, 0xa8, 0x2e, 0xa1, 0x15, 0xd2, + 0xd8, 0x05, 0x28, 0xb6, 0x3d, 0x57, 0xdc, 0x19, 0x9f, 0x90, 0xa3, 0x7d, 0xbb, 0x5a, 0x39, 0x3c, + 0x98, 0x7d, 0x2c, 0x4f, 0x3b, 0x40, 0x2f, 0xab, 0x68, 0xee, 0x76, 0xb5, 0x82, 0x69, 0x65, 0x34, + 0x0f, 0x13, 0x52, 0x01, 0x72, 0x87, 0x72, 0x50, 0x81, 0x2f, 0xae, 0x16, 0x25, 0x66, 0xc5, 0x26, + 0x18, 0xa7, 0xf1, 0x51, 0x05, 0x26, 0xb7, 0xdb, 0xeb, 0xa4, 0x49, 0x62, 0xfe, 0xc1, 0x37, 0x08, + 0x17, 0x42, 0x96, 0x93, 0xc7, 0xd6, 0x8d, 0x14, 0x1c, 0x77, 0xd4, 0xb0, 0xff, 0x9c, 0x1d, 0xf1, + 0x62, 0xf4, 0x6a, 0x61, 0x40, 0x17, 0x16, 0xa5, 0xfe, 0xf5, 0x5c, 0xce, 0xfd, 0xac, 0x8a, 0x1b, + 0x64, 0x7f, 0x2d, 0xa0, 0xcc, 0x76, 0xf6, 0xaa, 0x30, 0xd6, 0xfc, 0x40, 0xd7, 0x35, 0xff, 0x0b, + 0x05, 0x38, 0xa3, 0x46, 0xc0, 0xe0, 0xeb, 0xfe, 0xa2, 0x8f, 0xc1, 0xb3, 0x30, 0xe2, 0x92, 0x0d, + 0xa7, 0xdd, 0x8c, 0x95, 0x44, 0x7c, 0x90, 0x6b, 0x45, 0x2a, 0x49, 0x31, 0xd6, 0x71, 0x8e, 0x30, + 0x6c, 0xff, 0x63, 0x84, 0xdd, 0xad, 0xb1, 0x43, 0xd7, 0xb8, 0xda, 0x35, 0x56, 0xee, 0xae, 0x79, + 0x1c, 0x06, 0xbd, 0x1d, 0xca, 0x6b, 0x15, 0x4c, 0x16, 0xaa, 0x4a, 0x0b, 0x31, 0x87, 0xa1, 0x8f, + 0xc1, 0x70, 0x23, 0xd8, 0xd9, 0x71, 0x7c, 0x97, 0x5d, 0x79, 0xe5, 0x85, 0x11, 0xca, 0x8e, 0x2d, + 0xf2, 0x22, 0x2c, 0x61, 0xe8, 0x3c, 0x0c, 0x38, 0xe1, 0x26, 0x17, 0x4b, 0x94, 0x17, 0x4a, 0xb4, + 0xa5, 0xf9, 0x70, 0x33, 0xc2, 0xac, 0x94, 0xbe, 0xaa, 0xf6, 0x82, 0x70, 0xdb, 0xf3, 0x37, 0x2b, + 0x5e, 0x28, 0xb6, 0x84, 0xba, 0x0b, 0xef, 0x2a, 0x08, 0xd6, 0xb0, 0xd0, 0x32, 0x0c, 0xb6, 0x82, + 0x30, 0x8e, 0xa6, 0x87, 0xd8, 0x70, 0x3f, 0x96, 0x73, 0x10, 0xf1, 0xaf, 0xad, 0x05, 0x61, 0x9c, + 0x7c, 0x00, 0xfd, 0x17, 0x61, 0x5e, 0x1d, 0xdd, 0x84, 0x61, 0xe2, 0xef, 0x2e, 0x87, 0xc1, 0xce, + 0xf4, 0xa9, 0x7c, 0x4a, 0x4b, 0x1c, 0x85, 0x2f, 0xb3, 0x84, 0xed, 0x14, 0xc5, 0x58, 0x92, 0x40, + 0xdf, 0x04, 0x45, 0xe2, 0xef, 0x4e, 0x0f, 0x33, 0x4a, 0x33, 0x39, 0x94, 0xee, 0x38, 0x61, 0x72, + 0xe6, 0x2f, 0xf9, 0xbb, 0x98, 0xd6, 0x41, 0x9f, 0x81, 0xb2, 0x3c, 0x30, 0x22, 0x21, 0x7f, 0xcb, + 0x5c, 0xb0, 0xf2, 0x98, 0xc1, 0xe4, 0xdd, 0xb6, 0x17, 0x92, 0x1d, 0xe2, 0xc7, 0x51, 0x72, 0x42, + 0x4a, 0x68, 0x84, 0x13, 0x6a, 0xe8, 0x33, 0x52, 0xe8, 0xbb, 0x12, 0xb4, 0xfd, 0x38, 0x9a, 0x2e, + 0xb3, 0xee, 0x65, 0xaa, 0xe3, 0xee, 0x24, 0x78, 0x69, 0xa9, 0x30, 0xaf, 0x8c, 0x0d, 0x52, 0xe8, + 0xb3, 0x30, 0xc6, 0xff, 0x73, 0xa5, 0x56, 0x34, 0x7d, 0x86, 0xd1, 0xbe, 0x94, 0x4f, 0x9b, 0x23, + 0x2e, 0x9c, 0x11, 0xc4, 0xc7, 0xf4, 0xd2, 0x08, 0x9b, 0xd4, 0x10, 0x86, 0xb1, 0xa6, 0xb7, 0x4b, + 0x7c, 0x12, 0x45, 0xb5, 0x30, 0x58, 0x27, 0xd3, 0xc0, 0x06, 0xe6, 0x5c, 0xb6, 0x12, 0x2c, 0x58, + 0x27, 0x0b, 0x53, 0x94, 0xe6, 0x4d, 0xbd, 0x0e, 0x36, 0x49, 0xa0, 0xdb, 0x30, 0x4e, 0x1f, 0x61, + 0x5e, 0x42, 0x74, 0xa4, 0x17, 0x51, 0xf6, 0x54, 0xc2, 0x46, 0x25, 0x9c, 0x22, 0x82, 0x6e, 0xc1, + 0x68, 0x14, 0x3b, 0x61, 0xdc, 0x6e, 0x71, 0xa2, 0x67, 0x7b, 0x11, 0x65, 0x3a, 0xd4, 0xba, 0x56, + 0x05, 0x1b, 0x04, 0xd0, 0x1b, 0x50, 0x6e, 0x7a, 0x1b, 0xa4, 0xb1, 0xdf, 0x68, 0x92, 0xe9, 0x51, + 0x46, 0x2d, 0xf3, 0x50, 0xb9, 0x29, 0x91, 0xf8, 0xab, 0x50, 0xfd, 0xc5, 0x49, 0x75, 0x74, 0x07, + 0xce, 0xc6, 0x24, 0xdc, 0xf1, 0x7c, 0x87, 0x1e, 0x06, 0xe2, 0xb5, 0xc4, 0x74, 0x93, 0x63, 0x6c, + 0xb7, 0x5d, 0x14, 0xb3, 0x71, 0x76, 0x2d, 0x13, 0x0b, 0xe7, 0xd4, 0x46, 0xf7, 0x60, 0x3a, 0x03, + 0x12, 0x34, 0xbd, 0xc6, 0xfe, 0xf4, 0x69, 0x46, 0xf9, 0x35, 0x41, 0x79, 0x7a, 0x2d, 0x07, 0xef, + 0xb0, 0x0b, 0x0c, 0xe7, 0x52, 0x47, 0xb7, 0x60, 0x82, 0x9d, 0x40, 0xb5, 0x76, 0xb3, 0x29, 0x1a, + 0x1c, 0x67, 0x0d, 0x7e, 0x4c, 0xde, 0xc7, 0x55, 0x13, 0x7c, 0x78, 0x30, 0x0b, 0xc9, 0x3f, 0x9c, + 0xae, 0x8d, 0xd6, 0x99, 0x1a, 0xac, 0x1d, 0x7a, 0xf1, 0x3e, 0x3d, 0x37, 0xc8, 0xbd, 0x78, 0x7a, + 0xa2, 0xab, 0x08, 0x42, 0x47, 0x55, 0xba, 0x32, 0xbd, 0x10, 0xa7, 0x09, 0xd2, 0x23, 0x35, 0x8a, + 0x5d, 0xcf, 0x9f, 0x9e, 0x64, 0x27, 0xb5, 0x3a, 0x91, 0xea, 0xb4, 0x10, 0x73, 0x18, 0x53, 0x81, + 0xd1, 0x1f, 0xb7, 0xe8, 0xcd, 0x35, 0xc5, 0x10, 0x13, 0x15, 0x98, 0x04, 0xe0, 0x04, 0x87, 0x32, + 0x93, 0x71, 0xbc, 0x3f, 0x8d, 0x18, 0xaa, 0x3a, 0x58, 0xd6, 0xd6, 0x3e, 0x83, 0x69, 0xb9, 0xbd, + 0x0e, 0xe3, 0xea, 0x20, 0x64, 0x63, 0x82, 0x66, 0x61, 0x90, 0xb1, 0x4f, 0x42, 0x60, 0x56, 0xa6, + 0x5d, 0x60, 0xac, 0x15, 0xe6, 0xe5, 0xac, 0x0b, 0xde, 0x7b, 0x64, 0x61, 0x3f, 0x26, 0xfc, 0x99, + 0x5e, 0xd4, 0xba, 0x20, 0x01, 0x38, 0xc1, 0xb1, 0xff, 0x37, 0x67, 0x43, 0x93, 0xd3, 0xb6, 0x8f, + 0xfb, 0xe5, 0x69, 0x28, 0x6d, 0x05, 0x51, 0x4c, 0xb1, 0x59, 0x1b, 0x83, 0x09, 0xe3, 0x79, 0x5d, + 0x94, 0x63, 0x85, 0x81, 0x5e, 0x85, 0xb1, 0x86, 0xde, 0x80, 0xb8, 0x1c, 0xd5, 0x31, 0x62, 0xb4, + 0x8e, 0x4d, 0x5c, 0xf4, 0x32, 0x94, 0x98, 0x59, 0x47, 0x23, 0x68, 0x0a, 0xae, 0x4d, 0xde, 0xf0, + 0xa5, 0x9a, 0x28, 0x3f, 0xd4, 0x7e, 0x63, 0x85, 0x8d, 0x2e, 0xc3, 0x10, 0xed, 0x42, 0xb5, 0x26, + 0xae, 0x25, 0x25, 0xfb, 0xb9, 0xce, 0x4a, 0xb1, 0x80, 0xda, 0x7f, 0xa9, 0xa0, 0x8d, 0x32, 0x7d, + 0xe2, 0x12, 0x54, 0x83, 0xe1, 0x3d, 0xc7, 0x8b, 0x3d, 0x7f, 0x53, 0xf0, 0x1f, 0x4f, 0x76, 0xbd, + 0xa3, 0x58, 0xa5, 0xbb, 0xbc, 0x02, 0xbf, 0x45, 0xc5, 0x1f, 0x2c, 0xc9, 0x50, 0x8a, 0x61, 0xdb, + 0xf7, 0x29, 0xc5, 0x42, 0xbf, 0x14, 0x31, 0xaf, 0xc0, 0x29, 0x8a, 0x3f, 0x58, 0x92, 0x41, 0x6f, + 0x03, 0xc8, 0x1d, 0x46, 0x5c, 0x61, 0x4e, 0xf1, 0x74, 0x6f, 0xa2, 0x6b, 0xaa, 0xce, 0xc2, 0x38, + 0xbd, 0xa3, 0x93, 0xff, 0x58, 0xa3, 0x67, 0xc7, 0x8c, 0x4f, 0xeb, 0xec, 0x0c, 0xfa, 0x56, 0xba, + 0xc4, 0x9d, 0x30, 0x26, 0xee, 0x7c, 0x2c, 0x06, 0xe7, 0xe3, 0xfd, 0x3d, 0x52, 0xd6, 0xbc, 0x1d, + 0xa2, 0x6f, 0x07, 0x41, 0x04, 0x27, 0xf4, 0xec, 0x5f, 0x2a, 0xc2, 0x74, 0x5e, 0x77, 0xe9, 0xa2, + 0x23, 0xf7, 0xbc, 0x78, 0x91, 0xb2, 0x57, 0x96, 0xb9, 0xe8, 0x96, 0x44, 0x39, 0x56, 0x18, 0x74, + 0xf6, 0x23, 0x6f, 0x53, 0xbe, 0x31, 0x07, 0x93, 0xd9, 0xaf, 0xb3, 0x52, 0x2c, 0xa0, 0x14, 0x2f, + 0x24, 0x4e, 0x24, 0xec, 0x75, 0xb4, 0x55, 0x82, 0x59, 0x29, 0x16, 0x50, 0x5d, 0x80, 0x35, 0xd0, + 0x43, 0x80, 0x65, 0x0c, 0xd1, 0xe0, 0xf1, 0x0e, 0x11, 0xfa, 0x1c, 0xc0, 0x86, 0xe7, 0x7b, 0xd1, + 0x16, 0xa3, 0x3e, 0x74, 0x64, 0xea, 0x8a, 0x39, 0x5b, 0x56, 0x54, 0xb0, 0x46, 0x11, 0xbd, 0x08, + 0x23, 0x6a, 0x03, 0x56, 0x2b, 0x4c, 0x79, 0xa9, 0x19, 0x83, 0x24, 0xa7, 0x51, 0x05, 0xeb, 0x78, + 0xf6, 0x3b, 0xe9, 0xf5, 0x22, 0x76, 0x80, 0x36, 0xbe, 0x56, 0xbf, 0xe3, 0x5b, 0xe8, 0x3e, 0xbe, + 0xf6, 0xd7, 0x8a, 0x30, 0x61, 0x34, 0xd6, 0x8e, 0xfa, 0x38, 0xb3, 0xae, 0xd1, 0x03, 0xdc, 0x89, + 0x89, 0xd8, 0x7f, 0x76, 0xef, 0xad, 0xa2, 0x1f, 0xf2, 0x74, 0x07, 0xf0, 0xfa, 0xe8, 0x73, 0x50, + 0x6e, 0x3a, 0x11, 0x13, 0x86, 0x11, 0xb1, 0xef, 0xfa, 0x21, 0x96, 0x3c, 0x4c, 0x9c, 0x28, 0xd6, + 0x6e, 0x4d, 0x4e, 0x3b, 0x21, 0x49, 0x6f, 0x1a, 0xca, 0x9f, 0x48, 0x83, 0x30, 0xd5, 0x09, 0xca, + 0xc4, 0xec, 0x63, 0x0e, 0x43, 0x2f, 0xc3, 0x68, 0x48, 0xd8, 0xaa, 0x58, 0xa4, 0xdc, 0x1c, 0x5b, + 0x66, 0x83, 0x09, 0xdb, 0x87, 0x35, 0x18, 0x36, 0x30, 0x93, 0xb7, 0xc1, 0x50, 0x97, 0xb7, 0xc1, + 0x93, 0x30, 0xcc, 0x7e, 0xa8, 0x15, 0xa0, 0x66, 0xa3, 0xca, 0x8b, 0xb1, 0x84, 0xa7, 0x17, 0x4c, + 0xa9, 0xbf, 0x05, 0x43, 0x5f, 0x1f, 0x62, 0x51, 0x33, 0xc5, 0x71, 0x89, 0x9f, 0x72, 0x62, 0xc9, + 0x63, 0x09, 0xb3, 0x3f, 0x0e, 0xe3, 0x15, 0x87, 0xec, 0x04, 0xfe, 0x92, 0xef, 0xb6, 0x02, 0xcf, + 0x8f, 0xd1, 0x34, 0x0c, 0xb0, 0x4b, 0x84, 0x1f, 0x01, 0x03, 0xb4, 0x21, 0x3c, 0x40, 0x1f, 0x04, + 0xf6, 0x26, 0x9c, 0xa9, 0x04, 0x7b, 0xfe, 0x9e, 0x13, 0xba, 0xf3, 0xb5, 0xaa, 0xf6, 0xbe, 0x5e, + 0x95, 0xef, 0x3b, 0x6e, 0x87, 0x95, 0x79, 0xf4, 0x6a, 0x35, 0x39, 0x5b, 0xbb, 0xec, 0x35, 0x49, + 0x8e, 0x14, 0xe4, 0xaf, 0x16, 0x8c, 0x96, 0x12, 0x7c, 0xa5, 0xa8, 0xb2, 0x72, 0x15, 0x55, 0x6f, + 0x42, 0x69, 0xc3, 0x23, 0x4d, 0x17, 0x93, 0x0d, 0xb1, 0x12, 0x9f, 0xc8, 0x37, 0x2d, 0x59, 0xa6, + 0x98, 0x52, 0xea, 0xc5, 0x5f, 0x87, 0xcb, 0xa2, 0x32, 0x56, 0x64, 0xd0, 0x36, 0x4c, 0xca, 0x07, + 0x83, 0x84, 0x8a, 0x75, 0xf9, 0x64, 0xb7, 0x57, 0x88, 0x49, 0xfc, 0xf4, 0xfd, 0x83, 0xd9, 0x49, + 0x9c, 0x22, 0x83, 0x3b, 0x08, 0xd3, 0xe7, 0xe0, 0x0e, 0x3d, 0x81, 0x07, 0xd8, 0xf0, 0xb3, 0xe7, + 0x20, 0x7b, 0xd9, 0xb2, 0x52, 0xfb, 0x47, 0x2d, 0x78, 0xa4, 0x63, 0x64, 0xc4, 0x0b, 0xff, 0x98, + 0x67, 0x21, 0xfd, 0xe2, 0x2e, 0xf4, 0x7e, 0x71, 0xdb, 0x3f, 0x6b, 0xc1, 0xe9, 0xa5, 0x9d, 0x56, + 0xbc, 0x5f, 0xf1, 0x4c, 0xad, 0xd2, 0x4b, 0x30, 0xb4, 0x43, 0x5c, 0xaf, 0xbd, 0x23, 0x66, 0x6e, + 0x56, 0x9e, 0x52, 0x2b, 0xac, 0xf4, 0xf0, 0x60, 0x76, 0xac, 0x1e, 0x07, 0xa1, 0xb3, 0x49, 0x78, + 0x01, 0x16, 0xe8, 0xec, 0xac, 0xf7, 0xde, 0x23, 0x37, 0xbd, 0x1d, 0x4f, 0x9a, 0x0a, 0x75, 0x95, + 0xd9, 0xcd, 0xc9, 0x01, 0x9d, 0x7b, 0xb3, 0xed, 0xf8, 0xb1, 0x17, 0xef, 0x0b, 0x85, 0x90, 0x24, + 0x82, 0x13, 0x7a, 0xf6, 0x57, 0x2d, 0x98, 0x90, 0xeb, 0x7e, 0xde, 0x75, 0x43, 0x12, 0x45, 0x68, + 0x06, 0x0a, 0x5e, 0x4b, 0xf4, 0x12, 0x44, 0x2f, 0x0b, 0xd5, 0x1a, 0x2e, 0x78, 0x2d, 0xc9, 0x96, + 0xb1, 0x83, 0xb0, 0x68, 0xea, 0xc6, 0xae, 0x8b, 0x72, 0xac, 0x30, 0xd0, 0x15, 0x28, 0xf9, 0x81, + 0xcb, 0xcd, 0xb5, 0xf8, 0x95, 0xc6, 0x16, 0xd8, 0xaa, 0x28, 0xc3, 0x0a, 0x8a, 0x6a, 0x50, 0xe6, + 0x96, 0x4c, 0xc9, 0xa2, 0xed, 0xcb, 0x1e, 0x8a, 0x7d, 0xd9, 0x9a, 0xac, 0x89, 0x13, 0x22, 0xf6, + 0x0f, 0x58, 0x30, 0x2a, 0xbf, 0xac, 0x4f, 0x9e, 0x93, 0x6e, 0xad, 0x84, 0xdf, 0x4c, 0xb6, 0x16, + 0xe5, 0x19, 0x19, 0xc4, 0x60, 0x15, 0x8b, 0x47, 0x61, 0x15, 0xed, 0x1f, 0x29, 0xc0, 0xb8, 0xec, + 0x4e, 0xbd, 0xbd, 0x1e, 0x91, 0x18, 0xad, 0x41, 0xd9, 0xe1, 0x43, 0x4e, 0xe4, 0x8a, 0x7d, 0x3c, + 0x5b, 0x28, 0x60, 0xcc, 0x4f, 0x72, 0x7b, 0xcf, 0xcb, 0xda, 0x38, 0x21, 0x84, 0x9a, 0x30, 0xe5, + 0x07, 0x31, 0x3b, 0xc9, 0x15, 0xbc, 0x9b, 0xea, 0x25, 0x4d, 0xfd, 0x9c, 0xa0, 0x3e, 0xb5, 0x9a, + 0xa6, 0x82, 0x3b, 0x09, 0xa3, 0x25, 0x29, 0x68, 0x29, 0xe6, 0xbf, 0xec, 0xf5, 0x59, 0xc8, 0x96, + 0xb3, 0xd8, 0xbf, 0x62, 0x41, 0x59, 0xa2, 0x9d, 0x84, 0x96, 0x6d, 0x05, 0x86, 0x23, 0x36, 0x09, + 0x72, 0x68, 0xec, 0x6e, 0x1d, 0xe7, 0xf3, 0x95, 0x5c, 0x50, 0xfc, 0x7f, 0x84, 0x25, 0x0d, 0x26, + 0x67, 0x57, 0xdd, 0xff, 0x80, 0xc8, 0xd9, 0x55, 0x7f, 0x72, 0x6e, 0x98, 0x3f, 0x60, 0x7d, 0xd6, + 0x04, 0x57, 0x94, 0x8f, 0x6a, 0x85, 0x64, 0xc3, 0xbb, 0x97, 0xe6, 0xa3, 0x6a, 0xac, 0x14, 0x0b, + 0x28, 0x7a, 0x1b, 0x46, 0x1b, 0x52, 0xc0, 0x9a, 0x6c, 0xd7, 0xcb, 0x5d, 0x85, 0xfd, 0x4a, 0x2f, + 0xc4, 0x05, 0x1b, 0x8b, 0x5a, 0x7d, 0x6c, 0x50, 0x33, 0xd5, 0xfc, 0xc5, 0x5e, 0x6a, 0xfe, 0x84, + 0x6e, 0xbe, 0xd2, 0xfb, 0xc7, 0x2c, 0x18, 0xe2, 0x82, 0xb5, 0xfe, 0xe4, 0x9a, 0x9a, 0x9a, 0x2c, + 0x19, 0xbb, 0x3b, 0xb4, 0x50, 0xa8, 0xbd, 0xd0, 0x0a, 0x94, 0xd9, 0x0f, 0x26, 0x18, 0x2c, 0xe6, + 0x5b, 0xc5, 0xf3, 0x56, 0xf5, 0x0e, 0xde, 0x91, 0xd5, 0x70, 0x42, 0xc1, 0xfe, 0xa1, 0x22, 0x3d, + 0xaa, 0x12, 0x54, 0xe3, 0x06, 0xb7, 0x1e, 0xde, 0x0d, 0x5e, 0x78, 0x58, 0x37, 0xf8, 0x26, 0x4c, + 0x34, 0x34, 0xa5, 0x5a, 0x32, 0x93, 0x57, 0xba, 0x2e, 0x12, 0x4d, 0xff, 0xc6, 0x45, 0x26, 0x8b, + 0x26, 0x11, 0x9c, 0xa6, 0x8a, 0xbe, 0x15, 0x46, 0xf9, 0x3c, 0x8b, 0x56, 0xb8, 0xa5, 0xc4, 0xc7, + 0xf2, 0xd7, 0x8b, 0xde, 0x04, 0x17, 0xb1, 0x69, 0xd5, 0xb1, 0x41, 0xcc, 0xfe, 0x63, 0x0b, 0xd0, + 0x52, 0x6b, 0x8b, 0xec, 0x90, 0xd0, 0x69, 0x26, 0xb2, 0xf1, 0xff, 0xcf, 0x82, 0x69, 0xd2, 0x51, + 0xbc, 0x18, 0xec, 0xec, 0x88, 0x17, 0x48, 0xce, 0x23, 0x79, 0x29, 0xa7, 0x8e, 0x72, 0x1b, 0x98, + 0xce, 0xc3, 0xc0, 0xb9, 0xed, 0xa1, 0x15, 0x38, 0xc5, 0xaf, 0x3c, 0x05, 0xd0, 0x6c, 0xa3, 0x1f, + 0x15, 0x84, 0x4f, 0xad, 0x75, 0xa2, 0xe0, 0xac, 0x7a, 0xf6, 0x77, 0x8d, 0x42, 0x6e, 0x2f, 0x3e, + 0x54, 0x0a, 0x7c, 0xa8, 0x14, 0xf8, 0x50, 0x29, 0xf0, 0xa1, 0x52, 0xe0, 0x43, 0xa5, 0xc0, 0x37, + 0xbc, 0x52, 0xe0, 0x0f, 0x2d, 0x38, 0xd5, 0x79, 0x0d, 0x9c, 0x04, 0x63, 0xde, 0x86, 0x53, 0x9d, + 0x77, 0x5d, 0x57, 0x3b, 0xb8, 0xce, 0x7e, 0x26, 0xf7, 0x5e, 0xc6, 0x37, 0xe0, 0x2c, 0xfa, 0xf6, + 0x2f, 0x95, 0x60, 0x70, 0x69, 0x97, 0xf8, 0xf1, 0x09, 0x7c, 0x62, 0x03, 0xc6, 0x3d, 0x7f, 0x37, + 0x68, 0xee, 0x12, 0x97, 0xc3, 0x8f, 0xf2, 0xde, 0x3d, 0x2b, 0x48, 0x8f, 0x57, 0x0d, 0x12, 0x38, + 0x45, 0xf2, 0x61, 0xc8, 0x9c, 0xaf, 0xc1, 0x10, 0xbf, 0x1d, 0x84, 0xc0, 0x39, 0xf3, 0x32, 0x60, + 0x83, 0x28, 0xee, 0xbc, 0x44, 0x1e, 0xce, 0x6f, 0x1f, 0x51, 0x1d, 0xbd, 0x03, 0xe3, 0x1b, 0x5e, + 0x18, 0xc5, 0x6b, 0xde, 0x0e, 0x89, 0x62, 0x67, 0xa7, 0xf5, 0x00, 0x32, 0x66, 0x35, 0x0e, 0xcb, + 0x06, 0x25, 0x9c, 0xa2, 0x8c, 0x36, 0x61, 0xac, 0xe9, 0xe8, 0x4d, 0x0d, 0x1f, 0xb9, 0x29, 0x75, + 0xed, 0xdc, 0xd4, 0x09, 0x61, 0x93, 0x2e, 0xdd, 0xa7, 0x0d, 0x26, 0x26, 0x2d, 0x31, 0xe1, 0x81, + 0xda, 0xa7, 0x5c, 0x3e, 0xca, 0x61, 0x94, 0x83, 0x62, 0x96, 0xb1, 0x65, 0x93, 0x83, 0xd2, 0xec, + 0x5f, 0x3f, 0x0f, 0x65, 0x42, 0x87, 0x90, 0x12, 0x16, 0x37, 0xd7, 0xd5, 0xfe, 0xfa, 0xba, 0xe2, + 0x35, 0xc2, 0xc0, 0x94, 0xee, 0x2f, 0x49, 0x4a, 0x38, 0x21, 0x8a, 0x16, 0x61, 0x28, 0x22, 0xa1, + 0x47, 0x22, 0x71, 0x87, 0x75, 0x99, 0x46, 0x86, 0xc6, 0x9d, 0x4a, 0xf8, 0x6f, 0x2c, 0xaa, 0xd2, + 0xe5, 0xe5, 0x30, 0xc1, 0x27, 0xbb, 0x65, 0xb4, 0xe5, 0x35, 0xcf, 0x4a, 0xb1, 0x80, 0xa2, 0x37, + 0x60, 0x38, 0x24, 0x4d, 0xa6, 0x3e, 0x1a, 0xeb, 0x7f, 0x91, 0x73, 0x6d, 0x14, 0xaf, 0x87, 0x25, + 0x01, 0x74, 0x03, 0x50, 0x48, 0x28, 0x07, 0xe6, 0xf9, 0x9b, 0xca, 0x5e, 0x54, 0x9c, 0xe0, 0x6a, + 0xc7, 0xe3, 0x04, 0x43, 0xfa, 0xf7, 0xe0, 0x8c, 0x6a, 0xe8, 0x1a, 0x4c, 0xa9, 0xd2, 0xaa, 0x1f, + 0xc5, 0x0e, 0x3d, 0x39, 0x27, 0x18, 0x2d, 0x25, 0x00, 0xc1, 0x69, 0x04, 0xdc, 0x59, 0xc7, 0xfe, + 0x69, 0x0b, 0xf8, 0x38, 0x9f, 0xc0, 0xb3, 0xff, 0x75, 0xf3, 0xd9, 0x7f, 0x2e, 0x77, 0xe6, 0x72, + 0x9e, 0xfc, 0xf7, 0x2d, 0x18, 0xd1, 0x66, 0x36, 0x59, 0xb3, 0x56, 0x97, 0x35, 0xdb, 0x86, 0x49, + 0xba, 0xd2, 0x6f, 0xad, 0x47, 0x24, 0xdc, 0x25, 0x2e, 0x5b, 0x98, 0x85, 0x07, 0x5b, 0x98, 0xca, + 0x90, 0xed, 0x66, 0x8a, 0x20, 0xee, 0x68, 0x02, 0xbd, 0x24, 0x75, 0x29, 0x45, 0xc3, 0x0e, 0x9c, + 0xeb, 0x49, 0x0e, 0x0f, 0x66, 0x27, 0xb5, 0x0f, 0xd1, 0x75, 0x27, 0xf6, 0xe7, 0xe5, 0x37, 0x2a, + 0x83, 0xc1, 0x86, 0x5a, 0x2c, 0x29, 0x83, 0x41, 0xb5, 0x1c, 0x70, 0x82, 0x43, 0xf7, 0xe8, 0x56, + 0x10, 0xc5, 0x69, 0x83, 0xc1, 0xeb, 0x41, 0x14, 0x63, 0x06, 0xb1, 0x9f, 0x07, 0x58, 0xba, 0x47, + 0x1a, 0x7c, 0xa9, 0xeb, 0xcf, 0x19, 0x2b, 0xff, 0x39, 0x63, 0xff, 0x3b, 0x0b, 0xc6, 0x97, 0x17, + 0x0d, 0x89, 0xf0, 0x1c, 0x00, 0x7f, 0x83, 0xdd, 0xbd, 0xbb, 0x2a, 0xb5, 0xed, 0x5c, 0x61, 0xaa, + 0x4a, 0xb1, 0x86, 0x81, 0xce, 0x41, 0xb1, 0xd9, 0xf6, 0x85, 0x74, 0x72, 0x98, 0x5e, 0xd8, 0x37, + 0xdb, 0x3e, 0xa6, 0x65, 0x9a, 0x13, 0x42, 0xb1, 0x6f, 0x27, 0x84, 0x9e, 0xc1, 0x00, 0xd0, 0x2c, + 0x0c, 0xee, 0xed, 0x79, 0x2e, 0x77, 0xb9, 0x14, 0x96, 0x00, 0x77, 0xef, 0x56, 0x2b, 0x11, 0xe6, + 0xe5, 0xf6, 0x97, 0x8a, 0x30, 0xb3, 0xdc, 0x24, 0xf7, 0xde, 0xa7, 0xdb, 0x69, 0xbf, 0x2e, 0x14, + 0x47, 0x13, 0x0d, 0x1d, 0xd5, 0x4d, 0xa6, 0xf7, 0x78, 0x6c, 0xc0, 0x30, 0xb7, 0x97, 0x93, 0x4e, + 0xa8, 0xaf, 0x66, 0xb5, 0x9e, 0x3f, 0x20, 0x73, 0xdc, 0xee, 0x4e, 0xf8, 0xd0, 0xa9, 0x9b, 0x56, + 0x94, 0x62, 0x49, 0x7c, 0xe6, 0x15, 0x18, 0xd5, 0x31, 0x8f, 0xe4, 0xb0, 0xf6, 0xff, 0x16, 0x61, + 0x92, 0xf6, 0xe0, 0xa1, 0x4e, 0xc4, 0xed, 0xce, 0x89, 0x38, 0x6e, 0xa7, 0xa5, 0xde, 0xb3, 0xf1, + 0x76, 0x7a, 0x36, 0x9e, 0xcd, 0x9b, 0x8d, 0x93, 0x9e, 0x83, 0xef, 0xb4, 0xe0, 0xd4, 0x72, 0x33, + 0x68, 0x6c, 0xa7, 0x1c, 0x8b, 0x5e, 0x84, 0x11, 0x7a, 0x8e, 0x47, 0x86, 0xcf, 0xbb, 0x11, 0x05, + 0x41, 0x80, 0xb0, 0x8e, 0xa7, 0x55, 0xbb, 0x7d, 0xbb, 0x5a, 0xc9, 0x0a, 0x9e, 0x20, 0x40, 0x58, + 0xc7, 0xb3, 0x7f, 0xd3, 0x82, 0x0b, 0xd7, 0x16, 0x97, 0x92, 0xa5, 0xd8, 0x11, 0xbf, 0xe1, 0x32, + 0x0c, 0xb5, 0x5c, 0xad, 0x2b, 0x89, 0xc0, 0xb7, 0xc2, 0x7a, 0x21, 0xa0, 0x1f, 0x94, 0xd8, 0x24, + 0x3f, 0x65, 0xc1, 0xa9, 0x6b, 0x5e, 0x4c, 0xaf, 0xe5, 0x74, 0x24, 0x01, 0x7a, 0x2f, 0x47, 0x5e, + 0x1c, 0x84, 0xfb, 0xe9, 0x48, 0x02, 0x58, 0x41, 0xb0, 0x86, 0xc5, 0x5b, 0xde, 0xf5, 0x98, 0xa5, + 0x76, 0xc1, 0xd4, 0x63, 0x61, 0x51, 0x8e, 0x15, 0x06, 0xfd, 0x30, 0xd7, 0x0b, 0x99, 0xd4, 0x70, + 0x5f, 0x9c, 0xb0, 0xea, 0xc3, 0x2a, 0x12, 0x80, 0x13, 0x1c, 0xfa, 0x80, 0x9a, 0xbd, 0xd6, 0x6c, + 0x47, 0x31, 0x09, 0x37, 0xa2, 0x9c, 0xd3, 0xf1, 0x79, 0x28, 0x13, 0x29, 0xa3, 0x17, 0xbd, 0x56, + 0xac, 0xa6, 0x12, 0xde, 0xf3, 0x80, 0x06, 0x0a, 0xaf, 0x0f, 0x37, 0xc5, 0xa3, 0xf9, 0x99, 0x2d, + 0x03, 0x22, 0x7a, 0x5b, 0x7a, 0x84, 0x07, 0xe6, 0x2a, 0xbe, 0xd4, 0x01, 0xc5, 0x19, 0x35, 0xec, + 0x1f, 0xb5, 0xe0, 0x8c, 0xfa, 0xe0, 0x0f, 0xdc, 0x67, 0xda, 0x3f, 0x5f, 0x80, 0xb1, 0xeb, 0x6b, + 0x6b, 0xb5, 0x6b, 0x24, 0x16, 0xd7, 0x76, 0x6f, 0x35, 0x3a, 0xd6, 0xb4, 0x81, 0xdd, 0x5e, 0x81, + 0xed, 0xd8, 0x6b, 0xce, 0xf1, 0x40, 0x41, 0x73, 0x55, 0x3f, 0xbe, 0x15, 0xd6, 0xe3, 0xd0, 0xf3, + 0x37, 0x33, 0xf5, 0x87, 0x92, 0xb9, 0x28, 0xe6, 0x31, 0x17, 0xe8, 0x79, 0x18, 0x62, 0x91, 0x8a, + 0xe4, 0x24, 0x3c, 0xaa, 0x1e, 0x51, 0xac, 0xf4, 0xf0, 0x60, 0xb6, 0x7c, 0x1b, 0x57, 0xf9, 0x1f, + 0x2c, 0x50, 0xd1, 0x6d, 0x18, 0xd9, 0x8a, 0xe3, 0xd6, 0x75, 0xe2, 0xb8, 0xf4, 0xb5, 0xcc, 0x8f, + 0xc3, 0x8b, 0x59, 0xc7, 0x21, 0x1d, 0x04, 0x8e, 0x96, 0x9c, 0x20, 0x49, 0x59, 0x84, 0x75, 0x3a, + 0x76, 0x1d, 0x20, 0x81, 0x1d, 0x93, 0xee, 0xc4, 0xfe, 0x7d, 0x0b, 0x86, 0x79, 0xd0, 0x88, 0x10, + 0xbd, 0x06, 0x03, 0xe4, 0x1e, 0x69, 0x08, 0x56, 0x39, 0xb3, 0xc3, 0x09, 0xa7, 0xc5, 0x65, 0xc0, + 0xf4, 0x3f, 0x66, 0xb5, 0xd0, 0x75, 0x18, 0xa6, 0xbd, 0xbd, 0xa6, 0x22, 0x68, 0x3c, 0x96, 0xf7, + 0xc5, 0x6a, 0xda, 0x39, 0x73, 0x26, 0x8a, 0xb0, 0xac, 0xce, 0xb4, 0xcf, 0x8d, 0x56, 0x9d, 0x9e, + 0xd8, 0x71, 0x37, 0xc6, 0x62, 0x6d, 0xb1, 0xc6, 0x91, 0x04, 0x35, 0xae, 0x7d, 0x96, 0x85, 0x38, + 0x21, 0x62, 0xaf, 0x41, 0x99, 0x4e, 0xea, 0x7c, 0xd3, 0x73, 0xba, 0x2b, 0xd4, 0x9f, 0x82, 0xb2, + 0x54, 0x97, 0x47, 0xc2, 0x59, 0x9c, 0x51, 0x95, 0xda, 0xf4, 0x08, 0x27, 0x70, 0x7b, 0x03, 0x4e, + 0x33, 0xe3, 0x47, 0x27, 0xde, 0x32, 0xf6, 0x58, 0xef, 0xc5, 0xfc, 0xb4, 0x78, 0x79, 0xf2, 0x99, + 0x99, 0xd6, 0xfc, 0x31, 0x47, 0x25, 0xc5, 0xe4, 0x15, 0x6a, 0x7f, 0x6d, 0x00, 0x1e, 0xad, 0xd6, + 0xf3, 0xe3, 0x89, 0xbc, 0x0c, 0xa3, 0x9c, 0x2f, 0xa5, 0x4b, 0xdb, 0x69, 0x8a, 0x76, 0x95, 0xf0, + 0x77, 0x4d, 0x83, 0x61, 0x03, 0x13, 0x5d, 0x80, 0xa2, 0xf7, 0xae, 0x9f, 0x76, 0x6d, 0xaa, 0xbe, + 0xb9, 0x8a, 0x69, 0x39, 0x05, 0x53, 0x16, 0x97, 0xdf, 0x1d, 0x0a, 0xac, 0xd8, 0xdc, 0xd7, 0x61, + 0xdc, 0x8b, 0x1a, 0x91, 0x57, 0xf5, 0xe9, 0x39, 0xa3, 0x9d, 0x54, 0x4a, 0x2a, 0x42, 0x3b, 0xad, + 0xa0, 0x38, 0x85, 0xad, 0x5d, 0x64, 0x83, 0x7d, 0xb3, 0xc9, 0x3d, 0xbd, 0xa7, 0xe9, 0x0b, 0xa0, + 0xc5, 0xbe, 0x2e, 0x62, 0x52, 0x7c, 0xf1, 0x02, 0xe0, 0x1f, 0x1c, 0x61, 0x09, 0xa3, 0x4f, 0xce, + 0xc6, 0x96, 0xd3, 0x9a, 0x6f, 0xc7, 0x5b, 0x15, 0x2f, 0x6a, 0x04, 0xbb, 0x24, 0xdc, 0x67, 0xd2, + 0x82, 0x52, 0xf2, 0xe4, 0x54, 0x80, 0xc5, 0xeb, 0xf3, 0x35, 0x8a, 0x89, 0x3b, 0xeb, 0xa0, 0x79, + 0x98, 0x90, 0x85, 0x75, 0x12, 0xb1, 0x2b, 0x6c, 0x84, 0x91, 0x51, 0xce, 0x46, 0xa2, 0x58, 0x11, + 0x49, 0xe3, 0x9b, 0x9c, 0x34, 0x1c, 0x07, 0x27, 0xfd, 0x12, 0x8c, 0x79, 0xbe, 0x17, 0x7b, 0x4e, + 0x1c, 0x70, 0x15, 0x14, 0x17, 0x0c, 0x30, 0xd9, 0x7a, 0x55, 0x07, 0x60, 0x13, 0xcf, 0xfe, 0x2f, + 0x03, 0x30, 0xc5, 0xa6, 0xed, 0xc3, 0x15, 0xf6, 0x8d, 0xb4, 0xc2, 0x6e, 0x77, 0xae, 0xb0, 0xe3, + 0x78, 0x22, 0x3c, 0xf0, 0x32, 0x7b, 0x07, 0xca, 0xca, 0xbf, 0x4a, 0x3a, 0x58, 0x5a, 0x39, 0x0e, + 0x96, 0xbd, 0xb9, 0x0f, 0x69, 0xa2, 0x56, 0xcc, 0x34, 0x51, 0xfb, 0xeb, 0x16, 0x24, 0x3a, 0x15, + 0x74, 0x1d, 0xca, 0xad, 0x80, 0x59, 0x5e, 0x86, 0xd2, 0x9c, 0xf9, 0xd1, 0xcc, 0x8b, 0x8a, 0x5f, + 0x8a, 0xfc, 0xe3, 0x6b, 0xb2, 0x06, 0x4e, 0x2a, 0xa3, 0x05, 0x18, 0x6e, 0x85, 0xa4, 0x1e, 0xb3, + 0xb0, 0x22, 0x3d, 0xe9, 0xf0, 0x35, 0xc2, 0xf1, 0xb1, 0xac, 0x68, 0xff, 0x82, 0x05, 0xc0, 0xad, + 0xc0, 0x1c, 0x7f, 0x93, 0x9c, 0x80, 0xb8, 0xbb, 0x02, 0x03, 0x51, 0x8b, 0x34, 0xba, 0xd9, 0xc4, + 0x26, 0xfd, 0xa9, 0xb7, 0x48, 0x23, 0x19, 0x70, 0xfa, 0x0f, 0xb3, 0xda, 0xf6, 0x77, 0x03, 0x8c, + 0x27, 0x68, 0xd5, 0x98, 0xec, 0xa0, 0x67, 0x8c, 0x30, 0x03, 0xe7, 0x52, 0x61, 0x06, 0xca, 0x0c, + 0x5b, 0x93, 0xac, 0xbe, 0x03, 0xc5, 0x1d, 0xe7, 0x9e, 0x10, 0x9d, 0x3d, 0xd5, 0xbd, 0x1b, 0x94, + 0xfe, 0xdc, 0x8a, 0x73, 0x8f, 0x3f, 0x12, 0x9f, 0x92, 0x0b, 0x64, 0xc5, 0xb9, 0x77, 0xc8, 0x2d, + 0x5f, 0xd9, 0x21, 0x75, 0xd3, 0x8b, 0xe2, 0x2f, 0xfc, 0xe7, 0xe4, 0x3f, 0x5b, 0x76, 0xb4, 0x11, + 0xd6, 0x96, 0xe7, 0x0b, 0x9b, 0xa8, 0xbe, 0xda, 0xf2, 0xfc, 0x74, 0x5b, 0x9e, 0xdf, 0x47, 0x5b, + 0x9e, 0x8f, 0xde, 0x83, 0x61, 0x61, 0x7f, 0x28, 0xc2, 0xfa, 0x5c, 0xed, 0xa3, 0x3d, 0x61, 0xbe, + 0xc8, 0xdb, 0xbc, 0x2a, 0x1f, 0xc1, 0xa2, 0xb4, 0x67, 0xbb, 0xb2, 0x41, 0xf4, 0x57, 0x2c, 0x18, + 0x17, 0xbf, 0x31, 0x79, 0xb7, 0x4d, 0xa2, 0x58, 0xf0, 0x9e, 0x9f, 0xec, 0xbf, 0x0f, 0xa2, 0x22, + 0xef, 0xca, 0x27, 0xe5, 0x31, 0x6b, 0x02, 0x7b, 0xf6, 0x28, 0xd5, 0x0b, 0xf4, 0xf7, 0x2c, 0x38, + 0xbd, 0xe3, 0xdc, 0xe3, 0x2d, 0xf2, 0x32, 0xec, 0xc4, 0x5e, 0x20, 0x54, 0xff, 0xaf, 0xf5, 0x37, + 0xfd, 0x1d, 0xd5, 0x79, 0x27, 0xa5, 0x7e, 0xf2, 0x74, 0x16, 0x4a, 0xcf, 0xae, 0x66, 0xf6, 0x6b, + 0x66, 0x03, 0x4a, 0x72, 0xbd, 0x65, 0x88, 0x1a, 0x2a, 0x3a, 0x63, 0x7d, 0x64, 0xf3, 0x4f, 0xdd, + 0xd7, 0x9f, 0xb6, 0x23, 0xd6, 0xda, 0x43, 0x6d, 0xe7, 0x1d, 0x18, 0xd5, 0xd7, 0xd8, 0x43, 0x6d, + 0xeb, 0x5d, 0x38, 0x95, 0xb1, 0x96, 0x1e, 0x6a, 0x93, 0x7b, 0x70, 0x2e, 0x77, 0x7d, 0x3c, 0xcc, + 0x86, 0xed, 0x9f, 0xb7, 0xf4, 0x73, 0xf0, 0x04, 0x74, 0x0e, 0x8b, 0xa6, 0xce, 0xe1, 0x62, 0xf7, + 0x9d, 0x93, 0xa3, 0x78, 0x78, 0x5b, 0xef, 0x34, 0x3d, 0xd5, 0xd1, 0x1b, 0x30, 0xd4, 0xa4, 0x25, + 0xd2, 0xf0, 0xd5, 0xee, 0xbd, 0x23, 0x13, 0x5e, 0x8a, 0x95, 0x47, 0x58, 0x50, 0xb0, 0x7f, 0xd9, + 0x82, 0x81, 0x13, 0x18, 0x09, 0x6c, 0x8e, 0xc4, 0x33, 0xb9, 0xa4, 0x45, 0xc4, 0xe1, 0x39, 0xec, + 0xec, 0x2d, 0xdd, 0x8b, 0x89, 0x1f, 0xb1, 0xa7, 0x62, 0xe6, 0xc0, 0xfc, 0x5f, 0x70, 0xea, 0x66, + 0xe0, 0xb8, 0x0b, 0x4e, 0xd3, 0xf1, 0x1b, 0x24, 0xac, 0xfa, 0x9b, 0x47, 0xb2, 0xc0, 0x2e, 0xf4, + 0xb2, 0xc0, 0xb6, 0xb7, 0x00, 0xe9, 0x0d, 0x08, 0x57, 0x16, 0x0c, 0xc3, 0x1e, 0x6f, 0x4a, 0x0c, + 0xff, 0x13, 0xd9, 0xac, 0x59, 0x47, 0xcf, 0x34, 0x27, 0x0d, 0x5e, 0x80, 0x25, 0x21, 0xfb, 0x65, + 0xc8, 0xf4, 0x87, 0xef, 0x2d, 0x36, 0xb0, 0x3f, 0x03, 0x53, 0xac, 0xe6, 0x11, 0x9f, 0xb4, 0x76, + 0x4a, 0x2a, 0x99, 0x11, 0xfc, 0xce, 0xfe, 0xa2, 0x05, 0x13, 0xab, 0xa9, 0x98, 0x60, 0x97, 0x99, + 0x02, 0x34, 0x43, 0x18, 0x5e, 0x67, 0xa5, 0x58, 0x40, 0x8f, 0x5d, 0x06, 0xf5, 0xe7, 0x16, 0x24, + 0x21, 0x2a, 0x4e, 0x80, 0xf1, 0x5a, 0x34, 0x18, 0xaf, 0x4c, 0xd9, 0x88, 0xea, 0x4e, 0x1e, 0xdf, + 0x85, 0x6e, 0xa8, 0x78, 0x4c, 0x5d, 0xc4, 0x22, 0x09, 0x19, 0x1e, 0xbd, 0x67, 0xdc, 0x0c, 0xda, + 0x24, 0x23, 0x34, 0xd9, 0xff, 0xb1, 0x00, 0x48, 0xe1, 0xf6, 0x1d, 0x2f, 0xaa, 0xb3, 0xc6, 0xf1, + 0xc4, 0x8b, 0xda, 0x05, 0xc4, 0x54, 0xf8, 0xa1, 0xe3, 0x47, 0x9c, 0xac, 0x27, 0xa4, 0x6e, 0x47, + 0xb3, 0x0f, 0x98, 0x11, 0x4d, 0xa2, 0x9b, 0x1d, 0xd4, 0x70, 0x46, 0x0b, 0x9a, 0x69, 0xc6, 0x60, + 0xbf, 0xa6, 0x19, 0x43, 0x3d, 0xdc, 0xd5, 0x7e, 0xce, 0x82, 0x31, 0x35, 0x4c, 0x1f, 0x10, 0xfb, + 0x73, 0xd5, 0x9f, 0x9c, 0xa3, 0xaf, 0xa6, 0x75, 0x99, 0x5d, 0x09, 0xdf, 0xcc, 0xdc, 0x0e, 0x9d, + 0xa6, 0xf7, 0x1e, 0x51, 0xd1, 0xfa, 0x66, 0x85, 0x1b, 0xa1, 0x28, 0x3d, 0x3c, 0x98, 0x1d, 0x53, + 0xff, 0x78, 0x74, 0xe0, 0xa4, 0x8a, 0xfd, 0x13, 0x74, 0xb3, 0x9b, 0x4b, 0x11, 0xbd, 0x08, 0x83, + 0xad, 0x2d, 0x27, 0x22, 0x29, 0xa7, 0x9b, 0xc1, 0x1a, 0x2d, 0x3c, 0x3c, 0x98, 0x1d, 0x57, 0x15, + 0x58, 0x09, 0xe6, 0xd8, 0xfd, 0x47, 0xe1, 0xea, 0x5c, 0x9c, 0x3d, 0xa3, 0x70, 0xfd, 0xb1, 0x05, + 0x03, 0xab, 0x81, 0x7b, 0x12, 0x47, 0xc0, 0xeb, 0xc6, 0x11, 0x70, 0x3e, 0x2f, 0x70, 0x7b, 0xee, + 0xee, 0x5f, 0x4e, 0xed, 0xfe, 0x8b, 0xb9, 0x14, 0xba, 0x6f, 0xfc, 0x1d, 0x18, 0x61, 0xe1, 0xe0, + 0x85, 0x83, 0xd1, 0xf3, 0xc6, 0x86, 0x9f, 0x4d, 0x6d, 0xf8, 0x09, 0x0d, 0x55, 0xdb, 0xe9, 0x4f, + 0xc2, 0xb0, 0x70, 0x72, 0x49, 0x7b, 0x6f, 0x0a, 0x5c, 0x2c, 0xe1, 0xf6, 0x8f, 0x15, 0xc1, 0x08, + 0x3f, 0x8f, 0x7e, 0xc5, 0x82, 0xb9, 0x90, 0x1b, 0xbf, 0xba, 0x95, 0x76, 0xe8, 0xf9, 0x9b, 0xf5, + 0xc6, 0x16, 0x71, 0xdb, 0x4d, 0xcf, 0xdf, 0xac, 0x6e, 0xfa, 0x81, 0x2a, 0x5e, 0xba, 0x47, 0x1a, + 0x6d, 0xa6, 0xbe, 0xea, 0x11, 0xeb, 0x5e, 0x19, 0x91, 0x3f, 0x77, 0xff, 0x60, 0x76, 0x0e, 0x1f, + 0x89, 0x36, 0x3e, 0x62, 0x5f, 0xd0, 0x6f, 0x5a, 0x70, 0x95, 0x47, 0x65, 0xef, 0xbf, 0xff, 0x5d, + 0xde, 0xb9, 0x35, 0x49, 0x2a, 0x21, 0xb2, 0x46, 0xc2, 0x9d, 0x85, 0x97, 0xc4, 0x80, 0x5e, 0xad, + 0x1d, 0xad, 0x2d, 0x7c, 0xd4, 0xce, 0xd9, 0xff, 0xac, 0x08, 0x63, 0x22, 0xb4, 0x93, 0xb8, 0x03, + 0x5e, 0x34, 0x96, 0xc4, 0x63, 0xa9, 0x25, 0x31, 0x65, 0x20, 0x1f, 0xcf, 0xf1, 0x1f, 0xc1, 0x14, + 0x3d, 0x9c, 0xaf, 0x13, 0x27, 0x8c, 0xd7, 0x89, 0xc3, 0x2d, 0xae, 0x8a, 0x47, 0x3e, 0xfd, 0x95, + 0x60, 0xed, 0x66, 0x9a, 0x18, 0xee, 0xa4, 0xff, 0x8d, 0x74, 0xe7, 0xf8, 0x30, 0xd9, 0x11, 0x9d, + 0xeb, 0x2d, 0x28, 0x2b, 0x0f, 0x0d, 0x71, 0xe8, 0x74, 0x0f, 0x72, 0x97, 0xa6, 0xc0, 0x85, 0x5f, + 0x89, 0x77, 0x50, 0x42, 0xce, 0xfe, 0xfb, 0x05, 0xa3, 0x41, 0x3e, 0x89, 0xab, 0x50, 0x72, 0xa2, + 0xc8, 0xdb, 0xf4, 0x89, 0x2b, 0x76, 0xec, 0x47, 0xf3, 0x76, 0xac, 0xd1, 0x0c, 0xf3, 0x92, 0x99, + 0x17, 0x35, 0xb1, 0xa2, 0x81, 0xae, 0x73, 0xbb, 0xb6, 0x5d, 0xf9, 0x52, 0xeb, 0x8f, 0x1a, 0x48, + 0xcb, 0xb7, 0x5d, 0x82, 0x45, 0x7d, 0xf4, 0x59, 0x6e, 0x78, 0x78, 0xc3, 0x0f, 0xf6, 0xfc, 0x6b, + 0x41, 0x20, 0xc3, 0x27, 0xf4, 0x47, 0x70, 0x4a, 0x9a, 0x1b, 0xaa, 0xea, 0xd8, 0xa4, 0xd6, 0x5f, + 0x04, 0xcb, 0x6f, 0x83, 0x53, 0x94, 0xb4, 0xe9, 0xdd, 0x1c, 0x21, 0x02, 0x13, 0x22, 0x6e, 0x98, + 0x2c, 0x13, 0x63, 0x97, 0xf9, 0x08, 0x33, 0x6b, 0x27, 0x12, 0xe0, 0x1b, 0x26, 0x09, 0x9c, 0xa6, + 0x69, 0xff, 0xa4, 0x05, 0xcc, 0xd3, 0xf3, 0x04, 0xf8, 0x91, 0x4f, 0x99, 0xfc, 0xc8, 0x74, 0xde, + 0x20, 0xe7, 0xb0, 0x22, 0x2f, 0xf0, 0x95, 0x55, 0x0b, 0x83, 0x7b, 0xfb, 0xc2, 0xe8, 0xa3, 0xf7, + 0xfb, 0xc3, 0xfe, 0x5f, 0x16, 0x3f, 0xc4, 0x94, 0xff, 0x04, 0xfa, 0x76, 0x28, 0x35, 0x9c, 0x96, + 0xd3, 0xe0, 0xb9, 0x52, 0x72, 0x65, 0x71, 0x46, 0xa5, 0xb9, 0x45, 0x51, 0x83, 0xcb, 0x96, 0x64, + 0xfc, 0xb9, 0x92, 0x2c, 0xee, 0x29, 0x4f, 0x52, 0x4d, 0xce, 0x6c, 0xc3, 0x98, 0x41, 0xec, 0xa1, + 0x0a, 0x22, 0xbe, 0x9d, 0x5f, 0xb1, 0x2a, 0x5e, 0xe2, 0x0e, 0x4c, 0xf9, 0xda, 0x7f, 0x7a, 0xa1, + 0xc8, 0xc7, 0xe5, 0x47, 0x7b, 0x5d, 0xa2, 0xec, 0xf6, 0xd1, 0xfc, 0x4e, 0x53, 0x64, 0x70, 0x27, + 0x65, 0xfb, 0xc7, 0x2d, 0x78, 0x44, 0x47, 0xd4, 0x5c, 0x5b, 0x7a, 0x49, 0xf7, 0x2b, 0x50, 0x0a, + 0x5a, 0x24, 0x74, 0xe2, 0x20, 0x14, 0xb7, 0xc6, 0x15, 0x39, 0xe8, 0xb7, 0x44, 0xf9, 0xa1, 0x88, + 0x34, 0x2e, 0xa9, 0xcb, 0x72, 0xac, 0x6a, 0xd2, 0xd7, 0x27, 0x1b, 0x8c, 0x48, 0x38, 0x31, 0xb1, + 0x33, 0x80, 0x29, 0xba, 0x23, 0x2c, 0x20, 0xf6, 0xd7, 0x2c, 0xbe, 0xb0, 0xf4, 0xae, 0xa3, 0x77, + 0x61, 0x72, 0xc7, 0x89, 0x1b, 0x5b, 0x4b, 0xf7, 0x5a, 0x21, 0xd7, 0x95, 0xc8, 0x71, 0x7a, 0xaa, + 0xd7, 0x38, 0x69, 0x1f, 0x99, 0xd8, 0x52, 0xae, 0xa4, 0x88, 0xe1, 0x0e, 0xf2, 0x68, 0x1d, 0x46, + 0x58, 0x19, 0xf3, 0xcf, 0x8b, 0xba, 0xb1, 0x06, 0x79, 0xad, 0x29, 0x5b, 0x81, 0x95, 0x84, 0x0e, + 0xd6, 0x89, 0xda, 0x3f, 0x53, 0xe4, 0xbb, 0x9d, 0xb1, 0xf2, 0x4f, 0xc2, 0x70, 0x2b, 0x70, 0x17, + 0xab, 0x15, 0x2c, 0x66, 0x41, 0x5d, 0x23, 0x35, 0x5e, 0x8c, 0x25, 0x1c, 0x5d, 0x81, 0x92, 0xf8, + 0x29, 0x75, 0x5b, 0xec, 0x6c, 0x16, 0x78, 0x11, 0x56, 0x50, 0xf4, 0x1c, 0x40, 0x2b, 0x0c, 0x76, + 0x3d, 0x97, 0x05, 0x81, 0x28, 0x9a, 0x66, 0x3e, 0x35, 0x05, 0xc1, 0x1a, 0x16, 0x7a, 0x15, 0xc6, + 0xda, 0x7e, 0xc4, 0xd9, 0x11, 0x67, 0x5d, 0x04, 0xe5, 0x2e, 0x25, 0x06, 0x28, 0xb7, 0x75, 0x20, + 0x36, 0x71, 0xd1, 0x3c, 0x0c, 0xc5, 0x0e, 0x33, 0x5b, 0x19, 0xcc, 0xb7, 0xb7, 0x5d, 0xa3, 0x18, + 0x7a, 0x5a, 0x0e, 0x5a, 0x01, 0x8b, 0x8a, 0xe8, 0x2d, 0xe9, 0x2a, 0xcb, 0x0f, 0x76, 0x61, 0xe8, + 0xde, 0xdf, 0x25, 0xa0, 0x39, 0xca, 0x0a, 0x03, 0x7a, 0x83, 0x16, 0x7a, 0x05, 0x80, 0xdc, 0x8b, + 0x49, 0xe8, 0x3b, 0x4d, 0x65, 0x15, 0xa6, 0xf8, 0x82, 0x4a, 0xb0, 0x1a, 0xc4, 0xb7, 0x23, 0xb2, + 0xa4, 0x30, 0xb0, 0x86, 0x6d, 0xff, 0x66, 0x19, 0x20, 0xe1, 0xdb, 0xd1, 0x7b, 0x1d, 0x07, 0xd7, + 0xd3, 0xdd, 0x39, 0xfd, 0xe3, 0x3b, 0xb5, 0xd0, 0xf7, 0x58, 0x30, 0xe2, 0x34, 0x9b, 0x41, 0xc3, + 0x89, 0xd9, 0x0c, 0x15, 0xba, 0x1f, 0x9c, 0xa2, 0xfd, 0xf9, 0xa4, 0x06, 0xef, 0xc2, 0xf3, 0x72, + 0x85, 0x6a, 0x90, 0x9e, 0xbd, 0xd0, 0x1b, 0x46, 0x9f, 0x90, 0x4f, 0xc5, 0xa2, 0x31, 0x94, 0xea, + 0xa9, 0x58, 0x66, 0x77, 0x84, 0xfe, 0x4a, 0xbc, 0x6d, 0xbc, 0x12, 0x07, 0xf2, 0x7d, 0x01, 0x0d, + 0xf6, 0xb5, 0xd7, 0x03, 0x11, 0xd5, 0xf4, 0xb8, 0x00, 0x83, 0xf9, 0x8e, 0x77, 0xda, 0x3b, 0xa9, + 0x47, 0x4c, 0x80, 0x77, 0x60, 0xc2, 0x35, 0x99, 0x00, 0xb1, 0x12, 0x9f, 0xc8, 0xa3, 0x9b, 0xe2, + 0x19, 0x92, 0x6b, 0x3f, 0x05, 0xc0, 0x69, 0xc2, 0xa8, 0xc6, 0x63, 0x3e, 0x54, 0xfd, 0x8d, 0x40, + 0x38, 0x5b, 0xd8, 0xb9, 0x73, 0xb9, 0x1f, 0xc5, 0x64, 0x87, 0x62, 0x26, 0xb7, 0xfb, 0xaa, 0xa8, + 0x8b, 0x15, 0x15, 0xf4, 0x06, 0x0c, 0x31, 0xcf, 0xab, 0x68, 0xba, 0x94, 0x2f, 0x2b, 0x36, 0x83, + 0x98, 0x25, 0x1b, 0x92, 0xfd, 0x8d, 0xb0, 0xa0, 0x80, 0xae, 0x4b, 0xbf, 0xc6, 0xa8, 0xea, 0xdf, + 0x8e, 0x08, 0xf3, 0x6b, 0x2c, 0x2f, 0x7c, 0x34, 0x71, 0x59, 0xe4, 0xe5, 0x99, 0xc9, 0xbb, 0x8c, + 0x9a, 0x94, 0x8b, 0x12, 0xff, 0x65, 0x4e, 0xb0, 0x69, 0xc8, 0xef, 0x9e, 0x99, 0x37, 0x2c, 0x19, + 0xce, 0x3b, 0x26, 0x09, 0x9c, 0xa6, 0x49, 0x39, 0x52, 0xbe, 0xeb, 0x85, 0xbb, 0x46, 0xaf, 0xb3, + 0x83, 0x3f, 0xc4, 0xd9, 0x6d, 0xc4, 0x4b, 0xb0, 0xa8, 0x7f, 0xa2, 0xec, 0xc1, 0x8c, 0x0f, 0x93, + 0xe9, 0x2d, 0xfa, 0x50, 0xd9, 0x91, 0xdf, 0x1f, 0x80, 0x71, 0x73, 0x49, 0xa1, 0xab, 0x50, 0x16, + 0x44, 0x54, 0x1c, 0x7f, 0xb5, 0x4b, 0x56, 0x24, 0x00, 0x27, 0x38, 0x2c, 0x7d, 0x03, 0xab, 0xae, + 0x99, 0xd9, 0x26, 0xe9, 0x1b, 0x14, 0x04, 0x6b, 0x58, 0xf4, 0x61, 0xb5, 0x1e, 0x04, 0xb1, 0xba, + 0x90, 0xd4, 0xba, 0x5b, 0x60, 0xa5, 0x58, 0x40, 0xe9, 0x45, 0xb4, 0x4d, 0x42, 0x9f, 0x34, 0xcd, + 0xf0, 0xc0, 0xea, 0x22, 0xba, 0xa1, 0x03, 0xb1, 0x89, 0x4b, 0xaf, 0xd3, 0x20, 0x62, 0x0b, 0x59, + 0x3c, 0xdf, 0x12, 0xb3, 0xe5, 0x3a, 0x77, 0xad, 0x96, 0x70, 0xf4, 0x19, 0x78, 0x44, 0x85, 0x40, + 0xc2, 0x5c, 0x0f, 0x21, 0x5b, 0x1c, 0x32, 0xa4, 0x2d, 0x8f, 0x2c, 0x66, 0xa3, 0xe1, 0xbc, 0xfa, + 0xe8, 0x75, 0x18, 0x17, 0x2c, 0xbe, 0xa4, 0x38, 0x6c, 0x9a, 0xc6, 0xdc, 0x30, 0xa0, 0x38, 0x85, + 0x2d, 0x03, 0x1c, 0x33, 0x2e, 0x5b, 0x52, 0x28, 0x75, 0x06, 0x38, 0xd6, 0xe1, 0xb8, 0xa3, 0x06, + 0x9a, 0x87, 0x09, 0xce, 0x83, 0x79, 0xfe, 0x26, 0x9f, 0x13, 0xe1, 0x4d, 0xa5, 0xb6, 0xd4, 0x2d, + 0x13, 0x8c, 0xd3, 0xf8, 0xe8, 0x65, 0x18, 0x75, 0xc2, 0xc6, 0x96, 0x17, 0x93, 0x46, 0xdc, 0x0e, + 0xb9, 0x9b, 0x95, 0x66, 0x5b, 0x34, 0xaf, 0xc1, 0xb0, 0x81, 0x69, 0xbf, 0x07, 0xa7, 0x32, 0x62, + 0x2e, 0xd0, 0x85, 0xe3, 0xb4, 0x3c, 0xf9, 0x4d, 0x29, 0x03, 0xe4, 0xf9, 0x5a, 0x55, 0x7e, 0x8d, + 0x86, 0x45, 0x57, 0x27, 0x8b, 0xcd, 0xa0, 0xa5, 0x00, 0x54, 0xab, 0x73, 0x59, 0x02, 0x70, 0x82, + 0x63, 0xff, 0xf7, 0x02, 0x4c, 0x64, 0xe8, 0x56, 0x58, 0x1a, 0xba, 0xd4, 0x23, 0x25, 0xc9, 0x3a, + 0x67, 0xc6, 0xcb, 0x2e, 0x1c, 0x21, 0x5e, 0x76, 0xb1, 0x57, 0xbc, 0xec, 0x81, 0xf7, 0x13, 0x2f, + 0xdb, 0x1c, 0xb1, 0xc1, 0xbe, 0x46, 0x2c, 0x23, 0xc6, 0xf6, 0xd0, 0x11, 0x63, 0x6c, 0x1b, 0x83, + 0x3e, 0xdc, 0xc7, 0xa0, 0xff, 0x50, 0x01, 0x26, 0xd3, 0x36, 0x90, 0x27, 0x20, 0xb7, 0x7d, 0xc3, + 0x90, 0xdb, 0x66, 0x27, 0x75, 0x4c, 0x5b, 0x66, 0xe6, 0xc9, 0x70, 0x71, 0x4a, 0x86, 0xfb, 0xf1, + 0xbe, 0xa8, 0x75, 0x97, 0xe7, 0xfe, 0xad, 0x02, 0x9c, 0x49, 0x57, 0x59, 0x6c, 0x3a, 0xde, 0xce, + 0x09, 0x8c, 0xcd, 0x2d, 0x63, 0x6c, 0x9e, 0xe9, 0xe7, 0x6b, 0x58, 0xd7, 0x72, 0x07, 0xe8, 0x6e, + 0x6a, 0x80, 0xae, 0xf6, 0x4f, 0xb2, 0xfb, 0x28, 0x7d, 0xb5, 0x08, 0x17, 0x33, 0xeb, 0x25, 0x62, + 0xcf, 0x65, 0x43, 0xec, 0xf9, 0x5c, 0x4a, 0xec, 0x69, 0x77, 0xaf, 0x7d, 0x3c, 0x72, 0x50, 0xe1, + 0x21, 0xcb, 0x02, 0x08, 0x3c, 0xa0, 0x0c, 0xd4, 0xf0, 0x90, 0x55, 0x84, 0xb0, 0x49, 0xf7, 0x1b, + 0x49, 0xf6, 0xf9, 0xaf, 0x2c, 0x38, 0x97, 0x39, 0x37, 0x27, 0x20, 0xeb, 0x5a, 0x35, 0x65, 0x5d, + 0x4f, 0xf6, 0xbd, 0x5a, 0x73, 0x84, 0x5f, 0xbf, 0x3e, 0x90, 0xf3, 0x2d, 0xec, 0x25, 0x7f, 0x0b, + 0x46, 0x9c, 0x46, 0x83, 0x44, 0xd1, 0x4a, 0xe0, 0xaa, 0x90, 0xc0, 0xcf, 0xb0, 0x77, 0x56, 0x52, + 0x7c, 0x78, 0x30, 0x3b, 0x93, 0x26, 0x91, 0x80, 0xb1, 0x4e, 0x01, 0x7d, 0x16, 0x4a, 0x91, 0xb8, + 0x37, 0xc5, 0xdc, 0x3f, 0xdf, 0xe7, 0xe0, 0x38, 0xeb, 0xa4, 0x69, 0x86, 0x39, 0x52, 0x92, 0x0a, + 0x45, 0xd2, 0x0c, 0x89, 0x52, 0x38, 0xd6, 0x90, 0x28, 0xcf, 0x01, 0xec, 0xaa, 0xc7, 0x40, 0x5a, + 0xfe, 0xa0, 0x3d, 0x13, 0x34, 0x2c, 0xf4, 0x2d, 0x30, 0x19, 0xf1, 0xa0, 0x7e, 0x8b, 0x4d, 0x27, + 0x62, 0x6e, 0x2e, 0x62, 0x15, 0xb2, 0x50, 0x4a, 0xf5, 0x14, 0x0c, 0x77, 0x60, 0xa3, 0x65, 0xd9, + 0x2a, 0x8b, 0x40, 0xc8, 0x17, 0xe6, 0xe5, 0xa4, 0x45, 0x91, 0x04, 0xf7, 0x74, 0x7a, 0xf8, 0xd9, + 0xc0, 0x6b, 0x35, 0xd1, 0x67, 0x01, 0xe8, 0xf2, 0x11, 0x72, 0x88, 0xe1, 0xfc, 0xc3, 0x93, 0x9e, + 0x2a, 0x6e, 0xa6, 0x55, 0x2e, 0xf3, 0x4d, 0xad, 0x28, 0x22, 0x58, 0x23, 0x68, 0xff, 0xd0, 0x00, + 0x3c, 0xda, 0xe5, 0x8c, 0x44, 0xf3, 0xa6, 0x1e, 0xf6, 0xa9, 0xf4, 0xe3, 0x7a, 0x26, 0xb3, 0xb2, + 0xf1, 0xda, 0x4e, 0x2d, 0xc5, 0xc2, 0xfb, 0x5e, 0x8a, 0xdf, 0x6f, 0x69, 0x62, 0x0f, 0x6e, 0xab, + 0xf9, 0xa9, 0x23, 0x9e, 0xfd, 0xc7, 0x28, 0x07, 0xd9, 0xc8, 0x10, 0x26, 0x3c, 0xd7, 0x77, 0x77, + 0xfa, 0x96, 0x2e, 0x9c, 0xac, 0x94, 0xf8, 0x0b, 0x16, 0x3c, 0x96, 0xd9, 0x5f, 0xc3, 0x22, 0xe7, + 0x2a, 0x94, 0x1b, 0xb4, 0x50, 0x73, 0x45, 0x4c, 0x7c, 0xb4, 0x25, 0x00, 0x27, 0x38, 0x86, 0xe1, + 0x4d, 0xa1, 0xa7, 0xe1, 0xcd, 0x3f, 0xb5, 0xa0, 0x63, 0x7f, 0x9c, 0xc0, 0x41, 0x5d, 0x35, 0x0f, + 0xea, 0x8f, 0xf6, 0x33, 0x97, 0x39, 0x67, 0xf4, 0x1f, 0x4d, 0xc0, 0xd9, 0x1c, 0x57, 0x9c, 0x5d, + 0x98, 0xda, 0x6c, 0x10, 0xd3, 0xc9, 0x53, 0x7c, 0x4c, 0xa6, 0x3f, 0x6c, 0x57, 0x8f, 0x50, 0x96, + 0xd1, 0x72, 0xaa, 0x03, 0x05, 0x77, 0x36, 0x81, 0xbe, 0x60, 0xc1, 0x69, 0x67, 0x2f, 0xea, 0x48, + 0x81, 0x2f, 0xd6, 0xcc, 0x0b, 0x99, 0x42, 0x90, 0x1e, 0x29, 0xf3, 0x79, 0x8a, 0xcf, 0x2c, 0x2c, + 0x9c, 0xd9, 0x16, 0xc2, 0x22, 0x48, 0x3c, 0x65, 0xe7, 0xbb, 0xb8, 0x21, 0x67, 0xf9, 0x4c, 0xf1, + 0x1b, 0x44, 0x42, 0xb0, 0xa2, 0x83, 0x3e, 0x0f, 0xe5, 0x4d, 0xe9, 0xc8, 0x98, 0x71, 0x43, 0x25, + 0x03, 0xd9, 0xdd, 0xbd, 0x93, 0x6b, 0x32, 0x15, 0x12, 0x4e, 0x88, 0xa2, 0xd7, 0xa1, 0xe8, 0x6f, + 0x44, 0xdd, 0xb2, 0x64, 0xa6, 0x4c, 0xd6, 0xb8, 0xb3, 0xff, 0xea, 0x72, 0x1d, 0xd3, 0x8a, 0xe8, + 0x3a, 0x14, 0xc3, 0x75, 0x57, 0x48, 0xf0, 0x32, 0xcf, 0x70, 0xbc, 0x50, 0xc9, 0xe9, 0x15, 0xa3, + 0x84, 0x17, 0x2a, 0x98, 0x92, 0x40, 0x35, 0x18, 0x64, 0xfe, 0x2b, 0xe2, 0x3e, 0xc8, 0xe4, 0x7c, + 0xbb, 0xf8, 0x81, 0xf1, 0x88, 0x00, 0x0c, 0x01, 0x73, 0x42, 0x68, 0x0d, 0x86, 0x1a, 0x2c, 0xa3, + 0xa2, 0x88, 0x47, 0xf6, 0x89, 0x4c, 0x59, 0x5d, 0x97, 0x54, 0x93, 0x42, 0x74, 0xc5, 0x30, 0xb0, + 0xa0, 0xc5, 0xa8, 0x92, 0xd6, 0xd6, 0x46, 0x24, 0x32, 0x00, 0x67, 0x53, 0xed, 0x92, 0x41, 0x55, + 0x50, 0x65, 0x18, 0x58, 0xd0, 0x42, 0xaf, 0x40, 0x61, 0xa3, 0x21, 0x7c, 0x53, 0x32, 0x85, 0x76, + 0x66, 0xbc, 0x86, 0x85, 0xa1, 0xfb, 0x07, 0xb3, 0x85, 0xe5, 0x45, 0x5c, 0xd8, 0x68, 0xa0, 0x55, + 0x18, 0xde, 0xe0, 0x1e, 0xde, 0x42, 0x2e, 0xf7, 0x44, 0xb6, 0xf3, 0x79, 0x87, 0x13, 0x38, 0x77, + 0xcb, 0x10, 0x00, 0x2c, 0x89, 0xb0, 0x98, 0xeb, 0xca, 0x53, 0x5d, 0x84, 0xee, 0x9a, 0x3b, 0x5a, + 0x74, 0x01, 0x7e, 0x3f, 0x27, 0xfe, 0xee, 0x58, 0xa3, 0x48, 0x57, 0xb5, 0x23, 0xd3, 0xb0, 0x8b, + 0x50, 0x2c, 0x99, 0xab, 0xba, 0x47, 0x86, 0x7a, 0xbe, 0xaa, 0x15, 0x12, 0x4e, 0x88, 0xa2, 0x6d, + 0x18, 0xdb, 0x8d, 0x5a, 0x5b, 0x44, 0x6e, 0x69, 0x16, 0x99, 0x25, 0xe7, 0x0a, 0xbb, 0x23, 0x10, + 0xbd, 0x30, 0x6e, 0x3b, 0xcd, 0x8e, 0x53, 0x88, 0xa9, 0xbf, 0xef, 0xe8, 0xc4, 0xb0, 0x49, 0x9b, + 0x0e, 0xff, 0xbb, 0xed, 0x60, 0x7d, 0x3f, 0x26, 0x22, 0xe2, 0x56, 0xe6, 0xf0, 0xbf, 0xc9, 0x51, + 0x3a, 0x87, 0x5f, 0x00, 0xb0, 0x24, 0x82, 0xee, 0x88, 0xe1, 0x61, 0xa7, 0xe7, 0x64, 0x7e, 0x58, + 0xcc, 0x79, 0x89, 0x94, 0x33, 0x28, 0xec, 0xb4, 0x4c, 0x48, 0xb1, 0x53, 0xb2, 0xb5, 0x15, 0xc4, + 0x81, 0x9f, 0x3a, 0xa1, 0xa7, 0xf2, 0x4f, 0xc9, 0x5a, 0x06, 0x7e, 0xe7, 0x29, 0x99, 0x85, 0x85, + 0x33, 0xdb, 0x42, 0x2e, 0x8c, 0xb7, 0x82, 0x30, 0xde, 0x0b, 0x42, 0xb9, 0xbe, 0x50, 0x17, 0xb9, + 0x82, 0x81, 0x29, 0x5a, 0x64, 0xc1, 0xec, 0x4c, 0x08, 0x4e, 0xd1, 0x44, 0x9f, 0x86, 0xe1, 0xa8, + 0xe1, 0x34, 0x49, 0xf5, 0xd6, 0xf4, 0xa9, 0xfc, 0xeb, 0xa7, 0xce, 0x51, 0x72, 0x56, 0x17, 0x0f, + 0xd0, 0xce, 0x51, 0xb0, 0x24, 0x87, 0x96, 0x61, 0x90, 0xe5, 0xd4, 0x62, 0xe1, 0xe1, 0x72, 0xa2, + 0x7b, 0x76, 0x18, 0x10, 0xf3, 0xb3, 0x89, 0x15, 0x63, 0x5e, 0x9d, 0xee, 0x01, 0xc1, 0x5e, 0x07, + 0xd1, 0xf4, 0x99, 0xfc, 0x3d, 0x20, 0xb8, 0xf2, 0x5b, 0xf5, 0x6e, 0x7b, 0x40, 0x21, 0xe1, 0x84, + 0x28, 0x3d, 0x99, 0xe9, 0x69, 0x7a, 0xb6, 0x8b, 0xe5, 0x4b, 0xee, 0x59, 0xca, 0x4e, 0x66, 0x7a, + 0x92, 0x52, 0x12, 0xf6, 0xef, 0x0e, 0x77, 0xf2, 0x2c, 0xec, 0x41, 0xf6, 0x5d, 0x56, 0x87, 0xae, + 0xee, 0x93, 0xfd, 0xca, 0x87, 0x8e, 0x91, 0x5b, 0xfd, 0x82, 0x05, 0x67, 0x5b, 0x99, 0x1f, 0x22, + 0x18, 0x80, 0xfe, 0xc4, 0x4c, 0xfc, 0xd3, 0x55, 0x28, 0xc1, 0x6c, 0x38, 0xce, 0x69, 0x29, 0xfd, + 0x22, 0x28, 0xbe, 0xef, 0x17, 0xc1, 0x0a, 0x94, 0x18, 0x93, 0xd9, 0x23, 0xc3, 0x70, 0xfa, 0x61, + 0xc4, 0x58, 0x89, 0x45, 0x51, 0x11, 0x2b, 0x12, 0xe8, 0x07, 0x2c, 0xb8, 0x90, 0xee, 0x3a, 0x26, + 0x0c, 0x2c, 0xe2, 0x0f, 0xf2, 0xb7, 0xe0, 0xb2, 0xf8, 0xfe, 0x0b, 0xb5, 0x6e, 0xc8, 0x87, 0xbd, + 0x10, 0x70, 0xf7, 0xc6, 0x50, 0x25, 0xe3, 0x31, 0x3a, 0x64, 0x0a, 0xe0, 0xfb, 0x78, 0x90, 0xbe, + 0x00, 0xa3, 0x3b, 0x41, 0xdb, 0x8f, 0x85, 0xa1, 0x8c, 0x50, 0xda, 0x33, 0x65, 0xf5, 0x8a, 0x56, + 0x8e, 0x0d, 0xac, 0xd4, 0x33, 0xb6, 0xf4, 0xc0, 0xcf, 0xd8, 0xb7, 0x61, 0xd4, 0xd7, 0x2c, 0x3b, + 0x05, 0x3f, 0x70, 0x39, 0x3f, 0x76, 0xa8, 0x6e, 0x07, 0xca, 0x7b, 0xa9, 0x97, 0x60, 0x83, 0xda, + 0xc9, 0xbe, 0x8d, 0x7e, 0xda, 0xca, 0x60, 0xea, 0xf9, 0x6b, 0xf9, 0x35, 0xf3, 0xb5, 0x7c, 0x39, + 0xfd, 0x5a, 0xee, 0x10, 0xbe, 0x1a, 0x0f, 0xe5, 0xfe, 0xf3, 0x9c, 0xf4, 0x1b, 0x26, 0xd0, 0x6e, + 0xc2, 0xa5, 0x5e, 0xd7, 0x12, 0xb3, 0x98, 0x72, 0x95, 0xaa, 0x2d, 0xb1, 0x98, 0x72, 0xab, 0x15, + 0xcc, 0x20, 0xfd, 0xc6, 0x91, 0xb1, 0xff, 0x9b, 0x05, 0xc5, 0x5a, 0xe0, 0x9e, 0x80, 0x30, 0xf9, + 0x53, 0x86, 0x30, 0xf9, 0xd1, 0xec, 0x0b, 0xd1, 0xcd, 0x15, 0x1d, 0x2f, 0xa5, 0x44, 0xc7, 0x17, + 0xf2, 0x08, 0x74, 0x17, 0x14, 0xff, 0x44, 0x11, 0x46, 0x6a, 0x81, 0xab, 0xcc, 0x95, 0x7f, 0xfd, + 0x41, 0xcc, 0x95, 0x73, 0x03, 0xfc, 0x6b, 0x94, 0x99, 0xa1, 0x95, 0xf4, 0xb1, 0xfc, 0x0b, 0x66, + 0xb5, 0x7c, 0x97, 0x78, 0x9b, 0x5b, 0x31, 0x71, 0xd3, 0x9f, 0x73, 0x72, 0x56, 0xcb, 0xff, 0xd5, + 0x82, 0x89, 0x54, 0xeb, 0xa8, 0x09, 0x63, 0x4d, 0x5d, 0x30, 0x29, 0xd6, 0xe9, 0x03, 0xc9, 0x34, + 0x85, 0xd5, 0xa7, 0x56, 0x84, 0x4d, 0xe2, 0x68, 0x0e, 0x40, 0x69, 0xea, 0xa4, 0x04, 0x8c, 0x71, + 0xfd, 0x4a, 0x95, 0x17, 0x61, 0x0d, 0x03, 0xbd, 0x08, 0x23, 0x71, 0xd0, 0x0a, 0x9a, 0xc1, 0xe6, + 0xfe, 0x0d, 0x22, 0x23, 0x17, 0x29, 0x5b, 0xae, 0xb5, 0x04, 0x84, 0x75, 0x3c, 0xfb, 0xa7, 0x8a, + 0xfc, 0x43, 0xfd, 0xd8, 0xfb, 0x70, 0x4d, 0x7e, 0xb0, 0xd7, 0xe4, 0x57, 0x2d, 0x98, 0xa4, 0xad, + 0x33, 0x73, 0x11, 0x79, 0xd9, 0xaa, 0x98, 0xc1, 0x56, 0x97, 0x98, 0xc1, 0x97, 0xe9, 0xd9, 0xe5, + 0x06, 0xed, 0x58, 0x48, 0xd0, 0xb4, 0xc3, 0x89, 0x96, 0x62, 0x01, 0x15, 0x78, 0x24, 0x0c, 0x85, + 0x8b, 0x9b, 0x8e, 0x47, 0xc2, 0x10, 0x0b, 0xa8, 0x0c, 0x29, 0x3c, 0x90, 0x1d, 0x52, 0x98, 0xc7, + 0x61, 0x14, 0x86, 0x05, 0x82, 0xed, 0xd1, 0xe2, 0x30, 0x4a, 0x8b, 0x83, 0x04, 0xc7, 0xfe, 0xf9, + 0x22, 0x8c, 0xd6, 0x02, 0x37, 0xd1, 0x95, 0xbd, 0x60, 0xe8, 0xca, 0x2e, 0xa5, 0x74, 0x65, 0x93, + 0x3a, 0xee, 0x87, 0x9a, 0xb1, 0xaf, 0x97, 0x66, 0xec, 0x9f, 0x58, 0x6c, 0xd6, 0x2a, 0xab, 0x75, + 0x6e, 0x7d, 0x84, 0x9e, 0x85, 0x11, 0x76, 0x20, 0x31, 0x9f, 0x4a, 0xa9, 0x40, 0x62, 0x29, 0x94, + 0x56, 0x93, 0x62, 0xac, 0xe3, 0xa0, 0x2b, 0x50, 0x8a, 0x88, 0x13, 0x36, 0xb6, 0xd4, 0x19, 0x27, + 0xb4, 0x3d, 0xbc, 0x0c, 0x2b, 0x28, 0x7a, 0x33, 0x09, 0x01, 0x58, 0xcc, 0xf7, 0xd1, 0xd2, 0xfb, + 0xc3, 0xb7, 0x48, 0x7e, 0xdc, 0x3f, 0xfb, 0x2e, 0xa0, 0x4e, 0xfc, 0x3e, 0x62, 0x5f, 0xcd, 0x9a, + 0xb1, 0xaf, 0xca, 0x1d, 0x71, 0xaf, 0xfe, 0xcc, 0x82, 0xf1, 0x5a, 0xe0, 0xd2, 0xad, 0xfb, 0x8d, + 0xb4, 0x4f, 0xf5, 0xf8, 0xa7, 0x43, 0x5d, 0xe2, 0x9f, 0x3e, 0x0e, 0x83, 0xb5, 0xc0, 0xad, 0xd6, + 0xba, 0xf9, 0x36, 0xdb, 0x7f, 0xdb, 0x82, 0xe1, 0x5a, 0xe0, 0x9e, 0x80, 0x70, 0xfe, 0x35, 0x53, + 0x38, 0xff, 0x48, 0xce, 0xba, 0xc9, 0x91, 0xc7, 0xff, 0xcd, 0x01, 0x18, 0xa3, 0xfd, 0x0c, 0x36, + 0xe5, 0x54, 0x1a, 0xc3, 0x66, 0xf5, 0x31, 0x6c, 0x94, 0x17, 0x0e, 0x9a, 0xcd, 0x60, 0x2f, 0x3d, + 0xad, 0xcb, 0xac, 0x14, 0x0b, 0x28, 0x7a, 0x1a, 0x4a, 0xad, 0x90, 0xec, 0x7a, 0x81, 0x60, 0x32, + 0x35, 0x55, 0x47, 0x4d, 0x94, 0x63, 0x85, 0x41, 0x1f, 0x67, 0x91, 0xe7, 0x37, 0x48, 0x9d, 0x34, + 0x02, 0xdf, 0xe5, 0xf2, 0xeb, 0xa2, 0x48, 0x1b, 0xa0, 0x95, 0x63, 0x03, 0x0b, 0xdd, 0x85, 0x32, + 0xfb, 0xcf, 0x8e, 0x9d, 0xa3, 0x67, 0x93, 0x14, 0xd9, 0xc5, 0x04, 0x01, 0x9c, 0xd0, 0x42, 0xcf, + 0x01, 0xc4, 0x32, 0x42, 0x76, 0x24, 0xe2, 0x1c, 0x29, 0x86, 0x5c, 0xc5, 0xce, 0x8e, 0xb0, 0x86, + 0x85, 0x9e, 0x82, 0x72, 0xec, 0x78, 0xcd, 0x9b, 0x9e, 0x4f, 0x22, 0x26, 0x97, 0x2e, 0xca, 0x24, + 0x5f, 0xa2, 0x10, 0x27, 0x70, 0xca, 0x10, 0xb1, 0x20, 0x00, 0x3c, 0x17, 0x6d, 0x89, 0x61, 0x33, + 0x86, 0xe8, 0xa6, 0x2a, 0xc5, 0x1a, 0x06, 0xda, 0x82, 0xf3, 0x9e, 0xcf, 0x42, 0xec, 0x93, 0xfa, + 0xb6, 0xd7, 0x5a, 0xbb, 0x59, 0xbf, 0x43, 0x42, 0x6f, 0x63, 0x7f, 0xc1, 0x69, 0x6c, 0x13, 0x5f, + 0xe6, 0x09, 0xfc, 0xa8, 0xe8, 0xe2, 0xf9, 0x6a, 0x17, 0x5c, 0xdc, 0x95, 0x92, 0xfd, 0x32, 0x9c, + 0xa9, 0x05, 0x6e, 0x2d, 0x08, 0xe3, 0xe5, 0x20, 0xdc, 0x73, 0x42, 0x57, 0xae, 0x94, 0x59, 0x99, + 0x85, 0x84, 0x1e, 0x85, 0x83, 0xfc, 0xa0, 0x30, 0x72, 0x61, 0x3d, 0xcf, 0x98, 0xaf, 0x23, 0x3a, + 0xa3, 0x34, 0x18, 0x1b, 0xa0, 0xf2, 0x4d, 0x5c, 0x73, 0x62, 0x82, 0x6e, 0xb1, 0xa4, 0xb8, 0xc9, + 0x8d, 0x28, 0xaa, 0x3f, 0xa9, 0x25, 0xc5, 0x4d, 0x80, 0x99, 0x57, 0xa8, 0x59, 0xdf, 0xfe, 0xd9, + 0x01, 0x76, 0x38, 0xa6, 0x72, 0x16, 0xa0, 0xcf, 0xc1, 0x78, 0x44, 0x6e, 0x7a, 0x7e, 0xfb, 0x9e, + 0x94, 0x09, 0x74, 0x71, 0x27, 0xaa, 0x2f, 0xe9, 0x98, 0x5c, 0xb2, 0x68, 0x96, 0xe1, 0x14, 0x35, + 0xb4, 0x03, 0xe3, 0x7b, 0x9e, 0xef, 0x06, 0x7b, 0x91, 0xa4, 0x5f, 0xca, 0x17, 0x30, 0xde, 0xe5, + 0x98, 0xa9, 0x3e, 0x1a, 0xcd, 0xdd, 0x35, 0x88, 0xe1, 0x14, 0x71, 0xba, 0x00, 0xc3, 0xb6, 0x3f, + 0x1f, 0xdd, 0x8e, 0x48, 0x28, 0xd2, 0x1b, 0xb3, 0x05, 0x88, 0x65, 0x21, 0x4e, 0xe0, 0x74, 0x01, + 0xb2, 0x3f, 0xd7, 0xc2, 0xa0, 0xcd, 0xe3, 0xd8, 0x8b, 0x05, 0x88, 0x55, 0x29, 0xd6, 0x30, 0xe8, + 0x06, 0x65, 0xff, 0x56, 0x03, 0x1f, 0x07, 0x41, 0x2c, 0xb7, 0x34, 0x4b, 0xa8, 0xa9, 0x95, 0x63, + 0x03, 0x0b, 0x2d, 0x03, 0x8a, 0xda, 0xad, 0x56, 0x93, 0xd9, 0x29, 0x38, 0x4d, 0x46, 0x8a, 0xeb, + 0x88, 0x8b, 0x3c, 0x4a, 0x67, 0xbd, 0x03, 0x8a, 0x33, 0x6a, 0xd0, 0xb3, 0x7a, 0x43, 0x74, 0x75, + 0x90, 0x75, 0x95, 0x2b, 0x23, 0xea, 0xbc, 0x9f, 0x12, 0x86, 0x96, 0x60, 0x38, 0xda, 0x8f, 0x1a, + 0xb1, 0x08, 0x37, 0x96, 0x93, 0x96, 0xa6, 0xce, 0x50, 0xb4, 0xac, 0x68, 0xbc, 0x0a, 0x96, 0x75, + 0xed, 0x6f, 0x67, 0xac, 0x00, 0x4b, 0x86, 0x1b, 0xb7, 0x43, 0x82, 0x76, 0x60, 0xac, 0xc5, 0x56, + 0x98, 0x08, 0xcc, 0x2e, 0x96, 0xc9, 0x0b, 0x7d, 0xbe, 0xe9, 0xf7, 0xe8, 0x09, 0xaa, 0x64, 0x6e, + 0xec, 0xb1, 0x54, 0xd3, 0xc9, 0x61, 0x93, 0xba, 0xfd, 0xd5, 0xb3, 0xec, 0x32, 0xa9, 0xf3, 0x87, + 0xfa, 0xb0, 0x30, 0xac, 0x16, 0xaf, 0x92, 0x99, 0x7c, 0x89, 0x51, 0xf2, 0x45, 0xc2, 0x38, 0x1b, + 0xcb, 0xba, 0xe8, 0xb3, 0x30, 0x4e, 0x99, 0x7c, 0x2d, 0x31, 0xc5, 0xe9, 0x7c, 0x07, 0xf8, 0x24, + 0x1f, 0x85, 0x96, 0xb4, 0x41, 0xaf, 0x8c, 0x53, 0xc4, 0xd0, 0x9b, 0xcc, 0x04, 0xc0, 0xcc, 0x79, + 0xd1, 0x83, 0xb4, 0xae, 0xed, 0x97, 0x64, 0x35, 0x22, 0x79, 0xf9, 0x34, 0xec, 0x87, 0x9b, 0x4f, + 0x03, 0xdd, 0x84, 0x31, 0x91, 0x11, 0x56, 0x08, 0x3a, 0x8b, 0x86, 0x20, 0x6b, 0x0c, 0xeb, 0xc0, + 0xc3, 0x74, 0x01, 0x36, 0x2b, 0xa3, 0x4d, 0xb8, 0xa0, 0x25, 0x75, 0xb9, 0x16, 0x3a, 0x4c, 0x1b, + 0xed, 0xb1, 0x93, 0x48, 0xbb, 0xe6, 0x1e, 0xbb, 0x7f, 0x30, 0x7b, 0x61, 0xad, 0x1b, 0x22, 0xee, + 0x4e, 0x07, 0xdd, 0x82, 0x33, 0xdc, 0x7d, 0xb3, 0x42, 0x1c, 0xb7, 0xe9, 0xf9, 0xea, 0x1e, 0xe5, + 0xbb, 0xe5, 0xdc, 0xfd, 0x83, 0xd9, 0x33, 0xf3, 0x59, 0x08, 0x38, 0xbb, 0x1e, 0x7a, 0x0d, 0xca, + 0xae, 0x1f, 0x89, 0x31, 0x18, 0x32, 0xf2, 0xe6, 0x94, 0x2b, 0xab, 0x75, 0xf5, 0xfd, 0xc9, 0x1f, + 0x9c, 0x54, 0x40, 0x9b, 0x5c, 0xd8, 0xa9, 0x64, 0x0b, 0xc3, 0x1d, 0x81, 0x67, 0xd2, 0x52, 0x2a, + 0xc3, 0x81, 0x8b, 0x4b, 0xf9, 0x95, 0x5d, 0xb3, 0xe1, 0xdb, 0x65, 0x10, 0x46, 0x6f, 0x00, 0xa2, + 0xcc, 0xb7, 0xd7, 0x20, 0xf3, 0x0d, 0x16, 0xf5, 0x9f, 0xc9, 0x86, 0x4b, 0xa6, 0x4b, 0x51, 0xbd, + 0x03, 0x03, 0x67, 0xd4, 0x42, 0xd7, 0xe9, 0x6d, 0xa0, 0x97, 0x0a, 0xfb, 0x6c, 0x95, 0xe5, 0xac, + 0x42, 0x5a, 0x21, 0x69, 0x38, 0x31, 0x71, 0x4d, 0x8a, 0x38, 0x55, 0x0f, 0xb9, 0x70, 0xde, 0x69, + 0xc7, 0x01, 0x93, 0x23, 0x9b, 0xa8, 0x6b, 0xc1, 0x36, 0xf1, 0x99, 0x0a, 0xa7, 0xb4, 0x70, 0x89, + 0x5e, 0xd4, 0xf3, 0x5d, 0xf0, 0x70, 0x57, 0x2a, 0x94, 0xc1, 0x52, 0x39, 0x4a, 0xc1, 0x8c, 0xa7, + 0x93, 0x91, 0xa7, 0xf4, 0x45, 0x18, 0xd9, 0x0a, 0xa2, 0x78, 0x95, 0xc4, 0x7b, 0x41, 0xb8, 0x2d, + 0xa2, 0x22, 0x26, 0x91, 0x74, 0x13, 0x10, 0xd6, 0xf1, 0xe8, 0x0b, 0x8a, 0x19, 0x18, 0x54, 0x2b, + 0x4c, 0xb7, 0x5b, 0x4a, 0xce, 0x98, 0xeb, 0xbc, 0x18, 0x4b, 0xb8, 0x44, 0xad, 0xd6, 0x16, 0x99, + 0x9e, 0x36, 0x85, 0x5a, 0xad, 0x2d, 0x62, 0x09, 0xa7, 0xcb, 0x35, 0xda, 0x72, 0x42, 0x52, 0x0b, + 0x83, 0x06, 0x89, 0xb4, 0xf8, 0xcd, 0x8f, 0xf2, 0x98, 0x8f, 0x74, 0xb9, 0xd6, 0xb3, 0x10, 0x70, + 0x76, 0x3d, 0x44, 0x3a, 0x13, 0x1a, 0x8d, 0xe7, 0x0b, 0xd8, 0x3b, 0x59, 0x81, 0x3e, 0x73, 0x1a, + 0xf9, 0x30, 0xa9, 0x52, 0x29, 0xf1, 0x28, 0x8f, 0xd1, 0xf4, 0x04, 0x5b, 0xdb, 0xfd, 0x87, 0x88, + 0x54, 0x2a, 0x8b, 0x6a, 0x8a, 0x12, 0xee, 0xa0, 0x6d, 0x84, 0x4c, 0x9a, 0xec, 0x99, 0xb4, 0xf6, + 0x2a, 0x94, 0xa3, 0xf6, 0xba, 0x1b, 0xec, 0x38, 0x9e, 0xcf, 0xf4, 0xb4, 0x1a, 0x2b, 0x5f, 0x97, + 0x00, 0x9c, 0xe0, 0xa0, 0x65, 0x28, 0x39, 0x52, 0x1f, 0x81, 0xf2, 0x23, 0x6d, 0x28, 0x2d, 0x04, + 0x77, 0x3e, 0x97, 0x1a, 0x08, 0x55, 0x17, 0xbd, 0x0a, 0x63, 0xc2, 0xfd, 0x50, 0x64, 0xf1, 0x3b, + 0x65, 0xfa, 0x88, 0xd4, 0x75, 0x20, 0x36, 0x71, 0xd1, 0x6d, 0x18, 0x89, 0x83, 0x26, 0x73, 0x74, + 0xa0, 0x1c, 0xd2, 0xd9, 0xfc, 0x68, 0x5d, 0x6b, 0x0a, 0x4d, 0x17, 0x05, 0xaa, 0xaa, 0x58, 0xa7, + 0x83, 0xd6, 0xf8, 0x7a, 0x67, 0x71, 0x8c, 0x49, 0x34, 0xfd, 0x48, 0xfe, 0x9d, 0xa4, 0xc2, 0x1d, + 0x9b, 0xdb, 0x41, 0xd4, 0xc4, 0x3a, 0x19, 0x74, 0x0d, 0xa6, 0x5a, 0xa1, 0x17, 0xb0, 0x35, 0xa1, + 0x54, 0x51, 0xd3, 0x66, 0xf6, 0x95, 0x5a, 0x1a, 0x01, 0x77, 0xd6, 0x61, 0xde, 0xa3, 0xa2, 0x70, + 0xfa, 0x1c, 0xcf, 0xda, 0xcb, 0x5f, 0x46, 0xbc, 0x0c, 0x2b, 0x28, 0x5a, 0x61, 0x27, 0x31, 0x7f, + 0xd4, 0x4f, 0xcf, 0xe4, 0x07, 0xf7, 0xd0, 0x1f, 0xff, 0x9c, 0xef, 0x53, 0x7f, 0x71, 0x42, 0x01, + 0xb9, 0x5a, 0x46, 0x38, 0xca, 0x6c, 0x47, 0xd3, 0xe7, 0xbb, 0x58, 0x79, 0xa5, 0x38, 0xf3, 0x84, + 0x21, 0x30, 0x8a, 0x23, 0x9c, 0xa2, 0x89, 0xbe, 0x05, 0x26, 0x45, 0x30, 0xb1, 0x64, 0x98, 0x2e, + 0x24, 0xe6, 0xa3, 0x38, 0x05, 0xc3, 0x1d, 0xd8, 0x3c, 0xbe, 0xbb, 0xb3, 0xde, 0x24, 0xe2, 0xe8, + 0xbb, 0xe9, 0xf9, 0xdb, 0xd1, 0xf4, 0x45, 0x76, 0x3e, 0x88, 0xf8, 0xee, 0x69, 0x28, 0xce, 0xa8, + 0x81, 0xd6, 0x60, 0xb2, 0x15, 0x12, 0xb2, 0xc3, 0x78, 0x64, 0x71, 0x9f, 0xcd, 0x72, 0xe7, 0x69, + 0xda, 0x93, 0x5a, 0x0a, 0x76, 0x98, 0x51, 0x86, 0x3b, 0x28, 0xa0, 0x3d, 0x28, 0x05, 0xbb, 0x24, + 0xdc, 0x22, 0x8e, 0x3b, 0x7d, 0xa9, 0x8b, 0x39, 0xb3, 0xb8, 0xdc, 0x6e, 0x09, 0xdc, 0x94, 0xfa, + 0x5a, 0x16, 0xf7, 0x56, 0x5f, 0xcb, 0xc6, 0xd0, 0x0f, 0x5a, 0x70, 0x4e, 0x4a, 0xbc, 0xeb, 0x2d, + 0x3a, 0xea, 0x8b, 0x81, 0x1f, 0xc5, 0x21, 0x77, 0xf7, 0x7d, 0x2c, 0xdf, 0x05, 0x76, 0x2d, 0xa7, + 0x92, 0x92, 0x2b, 0x9e, 0xcb, 0xc3, 0x88, 0x70, 0x7e, 0x8b, 0x33, 0xdf, 0x0c, 0x53, 0x1d, 0x37, + 0xf7, 0x51, 0x52, 0x4e, 0xcc, 0x6c, 0xc3, 0x98, 0x31, 0x3a, 0x0f, 0x55, 0x73, 0xf9, 0x2f, 0x87, + 0xa1, 0xac, 0xb4, 0x5a, 0xe8, 0xaa, 0xa9, 0xac, 0x3c, 0x97, 0x56, 0x56, 0x96, 0xe8, 0x6b, 0x56, + 0xd7, 0x4f, 0xae, 0x65, 0x04, 0x57, 0xca, 0xdb, 0x8b, 0xfd, 0x7b, 0xcd, 0x6a, 0x42, 0xca, 0x62, + 0xdf, 0x5a, 0xcf, 0x81, 0xae, 0x72, 0xcf, 0x6b, 0x30, 0xe5, 0x07, 0x8c, 0x5d, 0x24, 0xae, 0xe4, + 0x05, 0xd8, 0x95, 0x5f, 0xd6, 0xa3, 0x15, 0xa4, 0x10, 0x70, 0x67, 0x1d, 0xda, 0x20, 0xbf, 0xb3, + 0xd3, 0x82, 0x56, 0x7e, 0xa5, 0x63, 0x01, 0x45, 0x8f, 0xc3, 0x60, 0x2b, 0x70, 0xab, 0x35, 0xc1, + 0x2a, 0x6a, 0xe9, 0x47, 0xdd, 0x6a, 0x0d, 0x73, 0x18, 0x9a, 0x87, 0x21, 0xf6, 0x23, 0x9a, 0x1e, + 0xcd, 0x77, 0x4b, 0x67, 0x35, 0xb4, 0x84, 0x1e, 0xac, 0x02, 0x16, 0x15, 0x99, 0xc0, 0x87, 0xf2, + 0xd7, 0x4c, 0xe0, 0x33, 0xfc, 0x80, 0x02, 0x1f, 0x49, 0x00, 0x27, 0xb4, 0xd0, 0x3d, 0x38, 0x63, + 0xbc, 0x69, 0xf8, 0x12, 0x21, 0x91, 0x70, 0x8d, 0x7d, 0xbc, 0xeb, 0x63, 0x46, 0x68, 0x49, 0x2f, + 0x88, 0x4e, 0x9f, 0xa9, 0x66, 0x51, 0xc2, 0xd9, 0x0d, 0xa0, 0x26, 0x4c, 0x35, 0x3a, 0x5a, 0x2d, + 0xf5, 0xdf, 0xaa, 0x9a, 0xd0, 0xce, 0x16, 0x3b, 0x09, 0xa3, 0x57, 0xa1, 0xf4, 0x6e, 0x10, 0xb1, + 0x63, 0x56, 0xb0, 0xb7, 0xd2, 0xaf, 0xb2, 0xf4, 0xe6, 0xad, 0x3a, 0x2b, 0x3f, 0x3c, 0x98, 0x1d, + 0xa9, 0x05, 0xae, 0xfc, 0x8b, 0x55, 0x05, 0xf4, 0xbd, 0x16, 0xcc, 0x74, 0x3e, 0x9a, 0x54, 0xa7, + 0xc7, 0xfa, 0xef, 0xb4, 0x2d, 0x1a, 0x9d, 0x59, 0xca, 0x25, 0x87, 0xbb, 0x34, 0x65, 0x7f, 0x99, + 0x6b, 0x34, 0x85, 0xde, 0x83, 0x44, 0xed, 0xe6, 0x49, 0x24, 0x40, 0x5c, 0x32, 0x54, 0x32, 0x0f, + 0xac, 0x35, 0xff, 0x35, 0x8b, 0x69, 0xcd, 0xd7, 0xc8, 0x4e, 0xab, 0xe9, 0xc4, 0x27, 0xe1, 0x96, + 0xf7, 0x26, 0x94, 0x62, 0xd1, 0x5a, 0xb7, 0x9c, 0x8d, 0x5a, 0xa7, 0x98, 0xe5, 0x80, 0x62, 0x36, + 0x65, 0x29, 0x56, 0x64, 0xec, 0x7f, 0xc8, 0x67, 0x40, 0x42, 0x4e, 0x40, 0xf2, 0x5d, 0x31, 0x25, + 0xdf, 0xb3, 0x3d, 0xbe, 0x20, 0x47, 0x02, 0xfe, 0x0f, 0xcc, 0x7e, 0x33, 0x21, 0xcb, 0x07, 0xdd, + 0x5c, 0xc3, 0xfe, 0x61, 0x0b, 0x4e, 0x67, 0xd9, 0x37, 0xd2, 0x07, 0x02, 0x17, 0xf1, 0x28, 0xf3, + 0x15, 0x35, 0x82, 0x77, 0x44, 0x39, 0x56, 0x18, 0x7d, 0xa7, 0x43, 0x3a, 0x5a, 0x78, 0xd0, 0x5b, + 0x30, 0x56, 0x0b, 0x89, 0x76, 0xa1, 0xbd, 0xce, 0xfd, 0x6c, 0x79, 0x7f, 0x9e, 0x3e, 0xb2, 0x8f, + 0xad, 0xfd, 0x33, 0x05, 0x38, 0xcd, 0xf5, 0xcf, 0xf3, 0xbb, 0x81, 0xe7, 0xd6, 0x02, 0x57, 0xa4, + 0xb2, 0x7a, 0x0b, 0x46, 0x5b, 0x9a, 0x5c, 0xae, 0x5b, 0xa8, 0x3b, 0x5d, 0x7e, 0x97, 0x48, 0x12, + 0xf4, 0x52, 0x6c, 0xd0, 0x42, 0x2e, 0x8c, 0x92, 0x5d, 0xaf, 0xa1, 0x94, 0x98, 0x85, 0x23, 0x5f, + 0x2e, 0xaa, 0x95, 0x25, 0x8d, 0x0e, 0x36, 0xa8, 0x3e, 0x84, 0xec, 0xa6, 0xf6, 0x8f, 0x58, 0xf0, + 0x48, 0x4e, 0x60, 0x3c, 0xda, 0xdc, 0x1e, 0xd3, 0xf4, 0x8b, 0x44, 0x89, 0xaa, 0x39, 0xae, 0xff, + 0xc7, 0x02, 0x8a, 0x3e, 0x0d, 0xc0, 0xf5, 0xf7, 0xf4, 0x85, 0xda, 0x2b, 0x82, 0x98, 0x11, 0xfc, + 0x48, 0x8b, 0x63, 0x23, 0xeb, 0x63, 0x8d, 0x96, 0xfd, 0x93, 0x45, 0x18, 0xe4, 0x29, 0x9e, 0x97, + 0x61, 0x78, 0x8b, 0x07, 0xf8, 0xef, 0x27, 0x97, 0x40, 0x22, 0x3b, 0xe0, 0x05, 0x58, 0x56, 0x46, + 0x2b, 0x70, 0x8a, 0x27, 0x48, 0x68, 0x56, 0x48, 0xd3, 0xd9, 0x97, 0x82, 0x2e, 0x9e, 0x5c, 0x50, + 0x09, 0xfc, 0xaa, 0x9d, 0x28, 0x38, 0xab, 0x1e, 0x7a, 0x1d, 0xc6, 0xe9, 0xc3, 0x23, 0x68, 0xc7, + 0x92, 0x12, 0x4f, 0x8d, 0xa0, 0x5e, 0x3a, 0x6b, 0x06, 0x14, 0xa7, 0xb0, 0xe9, 0xdb, 0xb7, 0xd5, + 0x21, 0xd2, 0x1b, 0x4c, 0xde, 0xbe, 0xa6, 0x18, 0xcf, 0xc4, 0x65, 0x86, 0x8d, 0x6d, 0x66, 0xc6, + 0xb9, 0xb6, 0x15, 0x92, 0x68, 0x2b, 0x68, 0xba, 0x8c, 0xd1, 0x1a, 0xd4, 0x0c, 0x1b, 0x53, 0x70, + 0xdc, 0x51, 0x83, 0x52, 0xd9, 0x70, 0xbc, 0x66, 0x3b, 0x24, 0x09, 0x95, 0x21, 0x93, 0xca, 0x72, + 0x0a, 0x8e, 0x3b, 0x6a, 0xd0, 0x75, 0x74, 0xa6, 0x16, 0x06, 0xf4, 0xf0, 0x92, 0xd1, 0x3e, 0x94, + 0xb5, 0xea, 0xb0, 0x74, 0x4c, 0xec, 0x12, 0x17, 0x4b, 0xd8, 0xf3, 0x71, 0x0a, 0x86, 0xaa, 0xba, + 0x2e, 0x5c, 0x12, 0x25, 0x15, 0xf4, 0x2c, 0x8c, 0x88, 0xb0, 0xf7, 0xcc, 0xa8, 0x92, 0x4f, 0x1d, + 0x53, 0xad, 0x57, 0x92, 0x62, 0xac, 0xe3, 0xd8, 0xdf, 0x57, 0x80, 0x53, 0x19, 0x56, 0xf1, 0xfc, + 0xa8, 0xda, 0xf4, 0xa2, 0x58, 0x25, 0x50, 0xd3, 0x8e, 0x2a, 0x5e, 0x8e, 0x15, 0x06, 0xdd, 0x0f, + 0xfc, 0x30, 0x4c, 0x1f, 0x80, 0xc2, 0xea, 0x54, 0x40, 0x8f, 0x98, 0x8a, 0xec, 0x12, 0x0c, 0xb4, + 0x23, 0x22, 0x23, 0xda, 0xa9, 0xf3, 0x9b, 0x69, 0x5c, 0x18, 0x84, 0xb2, 0xc7, 0x9b, 0x4a, 0x79, + 0xa1, 0xb1, 0xc7, 0x5c, 0x7d, 0xc1, 0x61, 0xb4, 0x73, 0x31, 0xf1, 0x1d, 0x3f, 0x16, 0x4c, 0x74, + 0x12, 0x9a, 0x89, 0x95, 0x62, 0x01, 0xb5, 0xbf, 0x54, 0x84, 0x73, 0xb9, 0x7e, 0x32, 0xb4, 0xeb, + 0x3b, 0x81, 0xef, 0xc5, 0x81, 0xb2, 0x59, 0xe0, 0xe1, 0x98, 0x48, 0x6b, 0x6b, 0x45, 0x94, 0x63, + 0x85, 0x81, 0x2e, 0xc3, 0x20, 0x13, 0x3a, 0x75, 0xa4, 0x92, 0x5b, 0xa8, 0xf0, 0xf8, 0x1c, 0x1c, + 0xdc, 0x77, 0x9a, 0xce, 0xc7, 0x61, 0xa0, 0x15, 0x04, 0xcd, 0xf4, 0xa1, 0x45, 0xbb, 0x1b, 0x04, + 0x4d, 0xcc, 0x80, 0xe8, 0x63, 0x62, 0xbc, 0x52, 0x4a, 0x7a, 0xec, 0xb8, 0x41, 0xa4, 0x0d, 0xda, + 0x93, 0x30, 0xbc, 0x4d, 0xf6, 0x43, 0xcf, 0xdf, 0x4c, 0x1b, 0x6f, 0xdc, 0xe0, 0xc5, 0x58, 0xc2, + 0xcd, 0xac, 0x40, 0xc3, 0xc7, 0x9d, 0x5f, 0xb3, 0xd4, 0xf3, 0x0a, 0xfc, 0xfe, 0x22, 0x4c, 0xe0, + 0x85, 0xca, 0x87, 0x13, 0x71, 0xbb, 0x73, 0x22, 0x8e, 0x3b, 0xbf, 0x66, 0xef, 0xd9, 0xf8, 0x45, + 0x0b, 0x26, 0x58, 0xf0, 0x7d, 0x11, 0xc8, 0xc7, 0x0b, 0xfc, 0x13, 0x60, 0xf1, 0x1e, 0x87, 0xc1, + 0x90, 0x36, 0x9a, 0xce, 0x21, 0xc7, 0x7a, 0x82, 0x39, 0x0c, 0x9d, 0x87, 0x01, 0xd6, 0x05, 0x3a, + 0x79, 0xa3, 0x3c, 0xfd, 0x4e, 0xc5, 0x89, 0x1d, 0xcc, 0x4a, 0x59, 0x74, 0x0a, 0x4c, 0x5a, 0x4d, + 0x8f, 0x77, 0x3a, 0x51, 0x09, 0x7e, 0x30, 0xa2, 0x53, 0x64, 0x76, 0xed, 0xfd, 0x45, 0xa7, 0xc8, + 0x26, 0xd9, 0xfd, 0xf9, 0xf4, 0x87, 0x05, 0xb8, 0x98, 0x59, 0xaf, 0xef, 0xe8, 0x14, 0xdd, 0x6b, + 0x3f, 0xcc, 0x20, 0xed, 0xc5, 0x13, 0x34, 0x8d, 0x1b, 0xe8, 0x97, 0xc3, 0x1c, 0xec, 0x23, 0x68, + 0x44, 0xe6, 0x90, 0x7d, 0x40, 0x82, 0x46, 0x64, 0xf6, 0x2d, 0xe7, 0xf9, 0xf7, 0xe7, 0x85, 0x9c, + 0x6f, 0x61, 0x0f, 0xc1, 0x2b, 0xf4, 0x9c, 0x61, 0xc0, 0x48, 0x70, 0xcc, 0xa3, 0xfc, 0x8c, 0xe1, + 0x65, 0x58, 0x41, 0xd1, 0x3c, 0x4c, 0xec, 0x78, 0x3e, 0x3d, 0x7c, 0xf6, 0x4d, 0xc6, 0x4f, 0xc5, + 0xf4, 0x59, 0x31, 0xc1, 0x38, 0x8d, 0x8f, 0x3c, 0x2d, 0xa0, 0x44, 0x21, 0x3f, 0x2b, 0x73, 0x6e, + 0x6f, 0xe7, 0x4c, 0x75, 0xa9, 0x1a, 0xc5, 0x8c, 0xe0, 0x12, 0x2b, 0xda, 0xfb, 0xbf, 0xd8, 0xff, + 0xfb, 0x7f, 0x34, 0xfb, 0xed, 0x3f, 0xf3, 0x2a, 0x8c, 0x3d, 0xb0, 0xc0, 0xd7, 0xfe, 0x6a, 0x11, + 0x1e, 0xed, 0xb2, 0xed, 0xf9, 0x59, 0x6f, 0xcc, 0x81, 0x76, 0xd6, 0x77, 0xcc, 0x43, 0x0d, 0x4e, + 0x6f, 0xb4, 0x9b, 0xcd, 0x7d, 0x66, 0x7d, 0x4e, 0x5c, 0x89, 0x21, 0x78, 0xca, 0xf3, 0x32, 0xe1, + 0xd1, 0x72, 0x06, 0x0e, 0xce, 0xac, 0x49, 0x19, 0x7a, 0x7a, 0x93, 0xec, 0x2b, 0x52, 0x29, 0x86, + 0x1e, 0xeb, 0x40, 0x6c, 0xe2, 0xa2, 0x6b, 0x30, 0xe5, 0xec, 0x3a, 0x1e, 0x8f, 0xca, 0x29, 0x09, + 0x70, 0x8e, 0x5e, 0xc9, 0xe9, 0xe6, 0xd3, 0x08, 0xb8, 0xb3, 0x0e, 0x7a, 0x03, 0x50, 0x20, 0xb2, + 0xca, 0x5f, 0x23, 0xbe, 0xd0, 0x6a, 0xb1, 0xb9, 0x2b, 0x26, 0x47, 0xc2, 0xad, 0x0e, 0x0c, 0x9c, + 0x51, 0x2b, 0x15, 0xa0, 0x61, 0x28, 0x3f, 0x40, 0x43, 0xf7, 0x73, 0xb1, 0x67, 0x7e, 0x80, 0xff, + 0x64, 0xd1, 0xeb, 0x8b, 0x33, 0xf9, 0x66, 0x9c, 0xb1, 0x57, 0x99, 0x41, 0x17, 0x97, 0xe1, 0x69, + 0xb1, 0x12, 0xce, 0x68, 0x06, 0x5d, 0x09, 0x10, 0x9b, 0xb8, 0x7c, 0x41, 0x44, 0x89, 0x8b, 0x9e, + 0xc1, 0xe2, 0x8b, 0x60, 0x28, 0x0a, 0x03, 0x7d, 0x06, 0x86, 0x5d, 0x6f, 0xd7, 0x8b, 0x82, 0x50, + 0xac, 0xf4, 0x23, 0xaa, 0x0b, 0x92, 0x73, 0xb0, 0xc2, 0xc9, 0x60, 0x49, 0xcf, 0xfe, 0xfe, 0x02, + 0x8c, 0xc9, 0x16, 0xdf, 0x6c, 0x07, 0xb1, 0x73, 0x02, 0xd7, 0xf2, 0x35, 0xe3, 0x5a, 0xfe, 0x58, + 0xb7, 0x88, 0x30, 0xac, 0x4b, 0xb9, 0xd7, 0xf1, 0xad, 0xd4, 0x75, 0xfc, 0x44, 0x6f, 0x52, 0xdd, + 0xaf, 0xe1, 0x7f, 0x64, 0xc1, 0x94, 0x81, 0x7f, 0x02, 0xb7, 0xc1, 0xb2, 0x79, 0x1b, 0x3c, 0xd6, + 0xf3, 0x1b, 0x72, 0x6e, 0x81, 0xef, 0x2e, 0xa6, 0xfa, 0xce, 0x4e, 0xff, 0x77, 0x61, 0x60, 0xcb, + 0x09, 0xdd, 0x6e, 0x11, 0xb0, 0x3b, 0x2a, 0xcd, 0x5d, 0x77, 0x42, 0xa1, 0xd6, 0x7b, 0x5a, 0x25, + 0x45, 0x76, 0xc2, 0xde, 0x2a, 0x3d, 0xd6, 0x14, 0x7a, 0x19, 0x86, 0xa2, 0x46, 0xd0, 0x52, 0xf6, + 0xe2, 0x97, 0x78, 0xc2, 0x64, 0x5a, 0x72, 0x78, 0x30, 0x8b, 0xcc, 0xe6, 0x68, 0x31, 0x16, 0xf8, + 0xe8, 0x2d, 0x18, 0x63, 0xbf, 0x94, 0x8d, 0x4d, 0x31, 0x3f, 0x5b, 0x4e, 0x5d, 0x47, 0xe4, 0x06, + 0x68, 0x46, 0x11, 0x36, 0x49, 0xcd, 0x6c, 0x42, 0x59, 0x7d, 0xd6, 0x43, 0xd5, 0xc7, 0xfd, 0xdb, + 0x22, 0x9c, 0xca, 0x58, 0x73, 0x28, 0x32, 0x66, 0xe2, 0xd9, 0x3e, 0x97, 0xea, 0xfb, 0x9c, 0x8b, + 0x88, 0xbd, 0x86, 0x5c, 0xb1, 0xb6, 0xfa, 0x6e, 0xf4, 0x76, 0x44, 0xd2, 0x8d, 0xd2, 0xa2, 0xde, + 0x8d, 0xd2, 0xc6, 0x4e, 0x6c, 0xa8, 0x69, 0x43, 0xaa, 0xa7, 0x0f, 0x75, 0x4e, 0xff, 0xa4, 0x08, + 0xa7, 0xb3, 0x82, 0x54, 0xa1, 0x6f, 0x4b, 0x65, 0x4e, 0x7b, 0xa1, 0xdf, 0xf0, 0x56, 0x3c, 0x9d, + 0x1a, 0x97, 0x01, 0x2f, 0xcc, 0x99, 0xb9, 0xd4, 0x7a, 0x0e, 0xb3, 0x68, 0x93, 0xb9, 0x9f, 0x87, + 0x3c, 0xe3, 0x9d, 0x3c, 0x3e, 0x3e, 0xd9, 0x77, 0x07, 0x44, 0xaa, 0xbc, 0x28, 0xa5, 0xbf, 0x97, + 0xc5, 0xbd, 0xf5, 0xf7, 0xb2, 0xe5, 0x19, 0x0f, 0x46, 0xb4, 0xaf, 0x79, 0xa8, 0x33, 0xbe, 0x4d, + 0x6f, 0x2b, 0xad, 0xdf, 0x0f, 0x75, 0xd6, 0x7f, 0xc4, 0x82, 0x94, 0x35, 0xb4, 0x12, 0x8b, 0x59, + 0xb9, 0x62, 0xb1, 0x4b, 0x30, 0x10, 0x06, 0x4d, 0x92, 0x4e, 0x54, 0x86, 0x83, 0x26, 0xc1, 0x0c, + 0x42, 0x31, 0xe2, 0x44, 0xd8, 0x31, 0xaa, 0x3f, 0xe4, 0xc4, 0x13, 0xed, 0x71, 0x18, 0x6c, 0x92, + 0x5d, 0xd2, 0x4c, 0xe7, 0x93, 0xb8, 0x49, 0x0b, 0x31, 0x87, 0xd9, 0xbf, 0x38, 0x00, 0x17, 0xba, + 0x06, 0x70, 0xa0, 0xcf, 0xa1, 0x4d, 0x27, 0x26, 0x7b, 0xce, 0x7e, 0x3a, 0xf0, 0xfb, 0x35, 0x5e, + 0x8c, 0x25, 0x9c, 0xf9, 0xab, 0xf0, 0xf8, 0xad, 0x29, 0x21, 0xa2, 0x08, 0xdb, 0x2a, 0xa0, 0xa6, + 0x50, 0xaa, 0x78, 0x1c, 0x42, 0xa9, 0xe7, 0x00, 0xa2, 0xa8, 0xc9, 0x0d, 0x5f, 0x5c, 0xe1, 0x08, + 0x93, 0xc4, 0xf9, 0xad, 0xdf, 0x14, 0x10, 0xac, 0x61, 0xa1, 0x0a, 0x4c, 0xb6, 0xc2, 0x20, 0xe6, + 0x32, 0xd9, 0x0a, 0xb7, 0x0d, 0x1b, 0x34, 0x7d, 0xe7, 0x6b, 0x29, 0x38, 0xee, 0xa8, 0x81, 0x5e, + 0x84, 0x11, 0xe1, 0x4f, 0x5f, 0x0b, 0x82, 0xa6, 0x10, 0x03, 0x29, 0x73, 0xa9, 0x7a, 0x02, 0xc2, + 0x3a, 0x9e, 0x56, 0x8d, 0x09, 0x7a, 0x87, 0x33, 0xab, 0x71, 0x61, 0xaf, 0x86, 0x97, 0x0a, 0x58, + 0x57, 0xea, 0x2b, 0x60, 0x5d, 0x22, 0x18, 0x2b, 0xf7, 0xad, 0xdb, 0x82, 0x9e, 0xa2, 0xa4, 0x9f, + 0x1b, 0x80, 0x53, 0x62, 0xe1, 0x3c, 0xec, 0xe5, 0x72, 0xbb, 0x73, 0xb9, 0x1c, 0x87, 0xe8, 0xec, + 0xc3, 0x35, 0x73, 0xd2, 0x6b, 0xe6, 0x07, 0x2c, 0x30, 0xd9, 0x2b, 0xf4, 0x7f, 0xe7, 0x66, 0xce, + 0x78, 0x31, 0x97, 0x5d, 0x73, 0xe5, 0x05, 0xf2, 0x3e, 0x73, 0x68, 0xd8, 0xff, 0xc1, 0x82, 0xc7, + 0x7a, 0x52, 0x44, 0x4b, 0x50, 0x66, 0x3c, 0xa0, 0xf6, 0x3a, 0x7b, 0x42, 0xd9, 0x8e, 0x4a, 0x40, + 0x0e, 0x4b, 0x9a, 0xd4, 0x44, 0x4b, 0x1d, 0x29, 0x4a, 0x9e, 0xcc, 0x48, 0x51, 0x72, 0xc6, 0x18, + 0x9e, 0x07, 0xcc, 0x51, 0xf2, 0xe5, 0x22, 0x0c, 0xf1, 0x15, 0x7f, 0x02, 0xcf, 0xb0, 0x65, 0x21, + 0xb7, 0xed, 0x12, 0x11, 0x8f, 0xf7, 0x65, 0xae, 0xe2, 0xc4, 0x0e, 0x67, 0x13, 0xd4, 0x6d, 0x95, + 0x48, 0x78, 0xd1, 0xe7, 0x00, 0xa2, 0x38, 0xf4, 0xfc, 0x4d, 0x5a, 0x26, 0x62, 0x25, 0x7e, 0xbc, + 0x0b, 0xb5, 0xba, 0x42, 0xe6, 0x34, 0x93, 0x9d, 0xab, 0x00, 0x58, 0xa3, 0x88, 0xe6, 0x8c, 0xfb, + 0x72, 0x26, 0x25, 0xf8, 0x04, 0x4e, 0x35, 0xb9, 0x3d, 0x67, 0x5e, 0x82, 0xb2, 0x22, 0xde, 0x4b, + 0x8a, 0x33, 0xaa, 0x33, 0x17, 0x9f, 0x82, 0x89, 0x54, 0xdf, 0x8e, 0x24, 0x04, 0xfa, 0x25, 0x0b, + 0x26, 0x78, 0x67, 0x96, 0xfc, 0x5d, 0x71, 0xa6, 0xbe, 0x07, 0xa7, 0x9b, 0x19, 0x67, 0x9b, 0x98, + 0xd1, 0xfe, 0xcf, 0x42, 0x25, 0xf4, 0xc9, 0x82, 0xe2, 0xcc, 0x36, 0xd0, 0x15, 0xba, 0x6e, 0xe9, + 0xd9, 0xe5, 0x34, 0x85, 0x5b, 0xe3, 0x28, 0x5f, 0xb3, 0xbc, 0x0c, 0x2b, 0xa8, 0xfd, 0xdb, 0x16, + 0x4c, 0xf1, 0x9e, 0xdf, 0x20, 0xfb, 0x6a, 0x87, 0x7f, 0x3d, 0xfb, 0x2e, 0xb2, 0x06, 0x15, 0x72, + 0xb2, 0x06, 0xe9, 0x9f, 0x56, 0xec, 0xfa, 0x69, 0x3f, 0x63, 0x81, 0x58, 0x21, 0x27, 0xf0, 0x94, + 0xff, 0x66, 0xf3, 0x29, 0x3f, 0x93, 0xbf, 0x09, 0x72, 0xde, 0xf0, 0x7f, 0x66, 0xc1, 0x24, 0x47, + 0x48, 0x74, 0xce, 0x5f, 0xd7, 0x79, 0xe8, 0x27, 0xb7, 0xe8, 0x0d, 0xb2, 0xbf, 0x16, 0xd4, 0x9c, + 0x78, 0x2b, 0xfb, 0xa3, 0x8c, 0xc9, 0x1a, 0xe8, 0x3a, 0x59, 0xae, 0xdc, 0x40, 0x47, 0x48, 0x58, + 0x7c, 0xe4, 0xa0, 0xfa, 0xf6, 0xd7, 0x2c, 0x40, 0xbc, 0x19, 0x83, 0xfd, 0xa1, 0x4c, 0x05, 0x2b, + 0xd5, 0xae, 0x8b, 0xe4, 0x68, 0x52, 0x10, 0xac, 0x61, 0x1d, 0xcb, 0xf0, 0xa4, 0x0c, 0x07, 0x8a, + 0xbd, 0x0d, 0x07, 0x8e, 0x30, 0xa2, 0x7f, 0x30, 0x08, 0x69, 0x0f, 0x10, 0x74, 0x07, 0x46, 0x1b, + 0x4e, 0xcb, 0x59, 0xf7, 0x9a, 0x5e, 0xec, 0x91, 0xa8, 0x9b, 0xc5, 0xd1, 0xa2, 0x86, 0x27, 0x54, + 0xbd, 0x5a, 0x09, 0x36, 0xe8, 0xa0, 0x39, 0x80, 0x56, 0xe8, 0xed, 0x7a, 0x4d, 0xb2, 0xc9, 0x24, + 0x0e, 0xcc, 0x91, 0x9a, 0x9b, 0xd1, 0xc8, 0x52, 0xac, 0x61, 0x64, 0x78, 0xaa, 0x16, 0x1f, 0xb2, + 0xa7, 0x2a, 0x9c, 0x98, 0xa7, 0xea, 0xc0, 0x91, 0x3c, 0x55, 0x4b, 0x47, 0xf6, 0x54, 0x1d, 0xec, + 0xcb, 0x53, 0x15, 0xc3, 0x59, 0xc9, 0xc1, 0xd1, 0xff, 0xcb, 0x5e, 0x93, 0x08, 0xb6, 0x9d, 0x7b, + 0x7f, 0xcf, 0xdc, 0x3f, 0x98, 0x3d, 0x8b, 0x33, 0x31, 0x70, 0x4e, 0x4d, 0xf4, 0x69, 0x98, 0x76, + 0x9a, 0xcd, 0x60, 0x4f, 0x4d, 0xea, 0x52, 0xd4, 0x70, 0x9a, 0x5c, 0x94, 0x3f, 0xcc, 0xa8, 0x9e, + 0xbf, 0x7f, 0x30, 0x3b, 0x3d, 0x9f, 0x83, 0x83, 0x73, 0x6b, 0xa3, 0xd7, 0xa0, 0xdc, 0x0a, 0x83, + 0xc6, 0x8a, 0xe6, 0xa6, 0x76, 0x91, 0x0e, 0x60, 0x4d, 0x16, 0x1e, 0x1e, 0xcc, 0x8e, 0xa9, 0x3f, + 0xec, 0xc2, 0x4f, 0x2a, 0xd8, 0xdb, 0x70, 0xaa, 0x4e, 0x42, 0x8f, 0xa5, 0x1f, 0x76, 0x93, 0xf3, + 0x63, 0x0d, 0xca, 0x61, 0xea, 0xc4, 0xec, 0x2b, 0x8a, 0x9c, 0x16, 0x7d, 0x5c, 0x9e, 0x90, 0x09, + 0x21, 0xfb, 0x7f, 0x5a, 0x30, 0x2c, 0x3c, 0x32, 0x4e, 0x80, 0x51, 0x9b, 0x37, 0xe4, 0xe5, 0xb3, + 0xd9, 0xb7, 0x0a, 0xeb, 0x4c, 0xae, 0xa4, 0xbc, 0x9a, 0x92, 0x94, 0x3f, 0xd6, 0x8d, 0x48, 0x77, + 0x19, 0xf9, 0x5f, 0x2b, 0xc2, 0xb8, 0xe9, 0xba, 0x77, 0x02, 0x43, 0xb0, 0x0a, 0xc3, 0x91, 0xf0, + 0x4d, 0x2b, 0xe4, 0x5b, 0x64, 0xa7, 0x27, 0x31, 0xb1, 0xd6, 0x12, 0xde, 0x68, 0x92, 0x48, 0xa6, + 0xd3, 0x5b, 0xf1, 0x21, 0x3a, 0xbd, 0xf5, 0xf2, 0x9e, 0x1c, 0x38, 0x0e, 0xef, 0x49, 0xfb, 0x2b, + 0xec, 0x66, 0xd3, 0xcb, 0x4f, 0x80, 0xe9, 0xb9, 0x66, 0xde, 0x81, 0x76, 0x97, 0x95, 0x25, 0x3a, + 0x95, 0xc3, 0xfc, 0xfc, 0x82, 0x05, 0x17, 0x32, 0xbe, 0x4a, 0xe3, 0x84, 0x9e, 0x86, 0x92, 0xd3, + 0x76, 0x3d, 0xb5, 0x97, 0x35, 0xad, 0xd9, 0xbc, 0x28, 0xc7, 0x0a, 0x03, 0x2d, 0xc2, 0x14, 0xb9, + 0xd7, 0xf2, 0xb8, 0xc2, 0x50, 0x37, 0xa9, 0x2c, 0xf2, 0xc8, 0xda, 0x4b, 0x69, 0x20, 0xee, 0xc4, + 0x57, 0xc1, 0x1e, 0x8a, 0xb9, 0xc1, 0x1e, 0xfe, 0xae, 0x05, 0x23, 0xca, 0x3b, 0xeb, 0xa1, 0x8f, + 0xf6, 0xb7, 0x98, 0xa3, 0xfd, 0x68, 0x97, 0xd1, 0xce, 0x19, 0xe6, 0xbf, 0x51, 0x50, 0xfd, 0xad, + 0x05, 0x61, 0xdc, 0x07, 0x87, 0xf5, 0x32, 0x94, 0x5a, 0x61, 0x10, 0x07, 0x8d, 0xa0, 0x29, 0x18, + 0xac, 0xf3, 0x49, 0xd4, 0x13, 0x5e, 0x7e, 0xa8, 0xfd, 0xc6, 0x0a, 0x9b, 0x8d, 0x5e, 0x10, 0xc6, + 0x82, 0xa9, 0x49, 0x46, 0x2f, 0x08, 0x63, 0xcc, 0x20, 0xc8, 0x05, 0x88, 0x9d, 0x70, 0x93, 0xc4, + 0xb4, 0x4c, 0x44, 0x59, 0xca, 0x3f, 0x3c, 0xda, 0xb1, 0xd7, 0x9c, 0xf3, 0xfc, 0x38, 0x8a, 0xc3, + 0xb9, 0xaa, 0x1f, 0xdf, 0x0a, 0xf9, 0x7b, 0x4d, 0x0b, 0x63, 0xa2, 0x68, 0x61, 0x8d, 0xae, 0x74, + 0x2b, 0x66, 0x6d, 0x0c, 0x9a, 0xfa, 0xf7, 0x55, 0x51, 0x8e, 0x15, 0x86, 0xfd, 0x12, 0xbb, 0x4a, + 0xd8, 0x00, 0x1d, 0x2d, 0xee, 0xc7, 0x97, 0xcb, 0x6a, 0x68, 0x99, 0xf2, 0xad, 0xa2, 0x47, 0x17, + 0xe9, 0x7e, 0x72, 0xd3, 0x86, 0x75, 0x17, 0xa3, 0x24, 0x04, 0x09, 0xfa, 0xd6, 0x0e, 0x9b, 0x8a, + 0x67, 0x7a, 0x5c, 0x01, 0x47, 0xb0, 0xa2, 0x60, 0xd1, 0xfe, 0x59, 0x2c, 0xf4, 0x6a, 0x4d, 0x2c, + 0x72, 0x2d, 0xda, 0xbf, 0x00, 0xe0, 0x04, 0x07, 0x5d, 0x15, 0xaf, 0x71, 0x2e, 0x9a, 0x7e, 0x34, + 0xf5, 0x1a, 0x97, 0x9f, 0xaf, 0x09, 0xb3, 0x9f, 0x85, 0x11, 0x95, 0xeb, 0xb2, 0xc6, 0x53, 0x28, + 0x8a, 0x98, 0x53, 0x4b, 0x49, 0x31, 0xd6, 0x71, 0xd0, 0x1a, 0x4c, 0x44, 0x5c, 0xd4, 0xa3, 0x42, + 0x8b, 0x72, 0x91, 0xd9, 0xc7, 0xa5, 0x21, 0x4a, 0xdd, 0x04, 0x1f, 0xb2, 0x22, 0x7e, 0x74, 0x48, + 0x57, 0xde, 0x34, 0x09, 0xf4, 0x3a, 0x8c, 0x37, 0x03, 0xc7, 0x5d, 0x70, 0x9a, 0x8e, 0xdf, 0x60, + 0xdf, 0x5b, 0x32, 0x53, 0xa6, 0xdd, 0x34, 0xa0, 0x38, 0x85, 0x4d, 0x39, 0x1f, 0xbd, 0x44, 0x84, + 0xc3, 0x75, 0xfc, 0x4d, 0x12, 0x89, 0xcc, 0x85, 0x8c, 0xf3, 0xb9, 0x99, 0x83, 0x83, 0x73, 0x6b, + 0xa3, 0x97, 0x61, 0x54, 0x7e, 0xbe, 0xe6, 0xf9, 0x9e, 0xd8, 0xde, 0x6b, 0x30, 0x6c, 0x60, 0xa2, + 0x3d, 0x38, 0x23, 0xff, 0xaf, 0x85, 0xce, 0xc6, 0x86, 0xd7, 0x10, 0xee, 0xa0, 0xdc, 0x31, 0x6e, + 0x5e, 0x7a, 0x6f, 0x2d, 0x65, 0x21, 0x1d, 0x1e, 0xcc, 0x5e, 0x12, 0xa3, 0x96, 0x09, 0x67, 0x93, + 0x98, 0x4d, 0x1f, 0xad, 0xc0, 0xa9, 0x2d, 0xe2, 0x34, 0xe3, 0xad, 0xc5, 0x2d, 0xd2, 0xd8, 0x96, + 0x9b, 0x88, 0xf9, 0xd3, 0x6b, 0x16, 0xeb, 0xd7, 0x3b, 0x51, 0x70, 0x56, 0x3d, 0xf4, 0x36, 0x4c, + 0xb7, 0xda, 0xeb, 0x4d, 0x2f, 0xda, 0x5a, 0x0d, 0x62, 0x66, 0x8d, 0xa2, 0x52, 0x67, 0x0a, 0xc7, + 0x7b, 0x15, 0xb1, 0xa0, 0x96, 0x83, 0x87, 0x73, 0x29, 0xa0, 0xf7, 0xe0, 0x4c, 0x6a, 0x31, 0x08, + 0xd7, 0xe3, 0xf1, 0xfc, 0xe0, 0xe2, 0xf5, 0xac, 0x0a, 0xc2, 0x8b, 0x3f, 0x0b, 0x84, 0xb3, 0x9b, + 0x40, 0x2f, 0x40, 0xc9, 0x6b, 0x2d, 0x3b, 0x3b, 0x5e, 0x73, 0x9f, 0x45, 0x47, 0x2f, 0xb3, 0x88, + 0xe1, 0xa5, 0x6a, 0x8d, 0x97, 0x1d, 0x6a, 0xbf, 0xb1, 0xc2, 0xa4, 0xfc, 0xbe, 0x16, 0x03, 0x32, + 0x9a, 0x9e, 0x4c, 0x8c, 0x6d, 0xb5, 0x40, 0x91, 0x11, 0x36, 0xb0, 0xde, 0x9f, 0x0d, 0xd3, 0xbb, + 0xb4, 0xb2, 0xc6, 0x00, 0xa2, 0xcf, 0xc3, 0xa8, 0xbe, 0x62, 0xc5, 0x65, 0x76, 0x39, 0x9b, 0x3f, + 0xd2, 0x56, 0x36, 0x67, 0x1f, 0xd5, 0xea, 0xd5, 0x61, 0xd8, 0xa0, 0x68, 0x13, 0xc8, 0x1e, 0x4b, + 0x74, 0x13, 0x4a, 0x8d, 0xa6, 0x47, 0xfc, 0xb8, 0x5a, 0xeb, 0x16, 0xbe, 0x68, 0x51, 0xe0, 0x88, + 0xc9, 0x11, 0x91, 0x9f, 0x79, 0x19, 0x56, 0x14, 0xec, 0x5f, 0x2d, 0xc0, 0x6c, 0x8f, 0x30, 0xe2, + 0x29, 0x51, 0xbb, 0xd5, 0x97, 0xa8, 0x7d, 0x5e, 0x26, 0x1d, 0x5d, 0x4d, 0xc9, 0x1f, 0x52, 0x09, + 0x45, 0x13, 0x29, 0x44, 0x1a, 0xbf, 0x6f, 0xd3, 0x67, 0x5d, 0x5a, 0x3f, 0xd0, 0xd3, 0x78, 0xdf, + 0xd0, 0xd2, 0x0d, 0xf6, 0xff, 0xe8, 0xc9, 0xd5, 0xb8, 0xd8, 0x5f, 0x29, 0xc0, 0x19, 0x35, 0x84, + 0xdf, 0xb8, 0x03, 0x77, 0xbb, 0x73, 0xe0, 0x8e, 0x41, 0x5f, 0x65, 0xdf, 0x82, 0x21, 0x1e, 0x8f, + 0xa9, 0x0f, 0x66, 0xeb, 0x71, 0x33, 0x74, 0xa1, 0x62, 0x09, 0x8c, 0xf0, 0x85, 0xdf, 0x6b, 0xc1, + 0xc4, 0xda, 0x62, 0xad, 0x1e, 0x34, 0xb6, 0x49, 0x3c, 0xcf, 0x99, 0x63, 0x2c, 0x78, 0x2d, 0xeb, + 0x01, 0x79, 0xa8, 0x2c, 0xee, 0xec, 0x12, 0x0c, 0x6c, 0x05, 0x51, 0x9c, 0x56, 0x66, 0x5f, 0x0f, + 0xa2, 0x18, 0x33, 0x88, 0xfd, 0x3b, 0x16, 0x0c, 0xb2, 0x34, 0xdb, 0xbd, 0x12, 0xbd, 0xf7, 0xf3, + 0x5d, 0xe8, 0x45, 0x18, 0x22, 0x1b, 0x1b, 0xa4, 0x11, 0x8b, 0x59, 0x95, 0xde, 0xc7, 0x43, 0x4b, + 0xac, 0x94, 0x32, 0x18, 0xac, 0x31, 0xfe, 0x17, 0x0b, 0x64, 0x74, 0x17, 0xca, 0xb1, 0xb7, 0x43, + 0xe6, 0x5d, 0x57, 0xa8, 0x03, 0x1f, 0xc0, 0x83, 0x7a, 0x4d, 0x12, 0xc0, 0x09, 0x2d, 0xfb, 0x4b, + 0x05, 0x80, 0x24, 0x1a, 0x47, 0xaf, 0x4f, 0x5c, 0xe8, 0x50, 0x14, 0x5d, 0xce, 0x50, 0x14, 0xa1, + 0x84, 0x60, 0x86, 0x96, 0x48, 0x0d, 0x53, 0xb1, 0xaf, 0x61, 0x1a, 0x38, 0xca, 0x30, 0x2d, 0xc2, + 0x54, 0x12, 0x4d, 0xc4, 0x0c, 0xa6, 0xc4, 0x1e, 0x44, 0x6b, 0x69, 0x20, 0xee, 0xc4, 0xb7, 0x09, + 0x5c, 0x52, 0x41, 0x15, 0xc4, 0x5d, 0xc3, 0xac, 0x4d, 0x8f, 0x90, 0xf3, 0x3f, 0xd1, 0x84, 0x15, + 0x72, 0x35, 0x61, 0x3f, 0x6e, 0xc1, 0xe9, 0x74, 0x3b, 0xcc, 0xfd, 0xef, 0x8b, 0x16, 0x9c, 0x61, + 0xfa, 0x40, 0xd6, 0x6a, 0xa7, 0xf6, 0xf1, 0x85, 0xae, 0x81, 0x22, 0x72, 0x7a, 0x9c, 0xb8, 0xb9, + 0xaf, 0x64, 0x91, 0xc6, 0xd9, 0x2d, 0xda, 0xff, 0xbe, 0x00, 0xd3, 0x79, 0x11, 0x26, 0x98, 0x31, + 0xba, 0x73, 0xaf, 0xbe, 0x4d, 0xf6, 0x84, 0xc9, 0x6f, 0x62, 0x8c, 0xce, 0x8b, 0xb1, 0x84, 0xa7, + 0x23, 0x43, 0x17, 0xfa, 0x8b, 0x0c, 0x8d, 0xb6, 0x60, 0x6a, 0x6f, 0x8b, 0xf8, 0xb7, 0xfd, 0xc8, + 0x89, 0xbd, 0x68, 0xc3, 0x63, 0x19, 0xdb, 0xf9, 0xba, 0x79, 0x45, 0x1a, 0xe6, 0xde, 0x4d, 0x23, + 0x1c, 0x1e, 0xcc, 0x5e, 0x30, 0x0a, 0x92, 0x2e, 0xf3, 0x83, 0x04, 0x77, 0x12, 0xed, 0x0c, 0xac, + 0x3d, 0xf0, 0x10, 0x03, 0x6b, 0xdb, 0x5f, 0xb4, 0xe0, 0x5c, 0x6e, 0xe2, 0x3b, 0x74, 0x05, 0x4a, + 0x4e, 0xcb, 0xe3, 0x82, 0x53, 0x71, 0x8c, 0x32, 0x01, 0x40, 0xad, 0xca, 0xc5, 0xa6, 0x0a, 0xaa, + 0x12, 0xf2, 0x16, 0x72, 0x13, 0xf2, 0xf6, 0xcc, 0xaf, 0x6b, 0x7f, 0x8f, 0x05, 0xc2, 0x91, 0xae, + 0x8f, 0xb3, 0xfb, 0x2d, 0x99, 0xcf, 0xdc, 0x48, 0xbe, 0x71, 0x29, 0xdf, 0xb3, 0x50, 0xa4, 0xdc, + 0x50, 0xbc, 0x92, 0x91, 0x68, 0xc3, 0xa0, 0x65, 0xbb, 0x20, 0xa0, 0x15, 0xc2, 0xc4, 0x8e, 0xbd, + 0x7b, 0xf3, 0x1c, 0x80, 0xcb, 0x70, 0xb5, 0xac, 0xc6, 0xea, 0x66, 0xae, 0x28, 0x08, 0xd6, 0xb0, + 0xec, 0x7f, 0x5d, 0x80, 0x11, 0x99, 0xec, 0xa1, 0xed, 0xf7, 0x23, 0x1c, 0x38, 0x52, 0xf6, 0x37, + 0x96, 0x06, 0x9c, 0x12, 0xae, 0x25, 0x32, 0x95, 0x24, 0x0d, 0xb8, 0x04, 0xe0, 0x04, 0x87, 0xee, + 0xa2, 0xa8, 0xbd, 0xce, 0xd0, 0x53, 0x6e, 0x5f, 0x75, 0x5e, 0x8c, 0x25, 0x1c, 0x7d, 0x1a, 0x26, + 0x79, 0xbd, 0x30, 0x68, 0x39, 0x9b, 0x5c, 0x22, 0x3d, 0xa8, 0xfc, 0xb5, 0x27, 0x57, 0x52, 0xb0, + 0xc3, 0x83, 0xd9, 0xd3, 0xe9, 0x32, 0xa6, 0x6a, 0xe9, 0xa0, 0xc2, 0xcc, 0x37, 0x78, 0x23, 0x74, + 0xf7, 0x77, 0x58, 0x7d, 0x24, 0x20, 0xac, 0xe3, 0xd9, 0x9f, 0x07, 0xd4, 0x99, 0xf6, 0x02, 0xbd, + 0xc1, 0x6d, 0xf6, 0xbc, 0x90, 0xb8, 0xdd, 0x54, 0x2f, 0xba, 0x57, 0xb2, 0xf4, 0xd8, 0xe0, 0xb5, + 0xb0, 0xaa, 0x6f, 0xff, 0xff, 0x45, 0x98, 0x4c, 0xfb, 0xa8, 0xa2, 0xeb, 0x30, 0xc4, 0x59, 0x0f, + 0x41, 0xbe, 0x8b, 0x66, 0x5f, 0xf3, 0x6c, 0x65, 0x87, 0xb0, 0xe0, 0x5e, 0x44, 0x7d, 0xf4, 0x36, + 0x8c, 0xb8, 0xc1, 0x9e, 0xbf, 0xe7, 0x84, 0xee, 0x7c, 0xad, 0x2a, 0x96, 0x73, 0xe6, 0x6b, 0xa9, + 0x92, 0xa0, 0xe9, 0xde, 0xb2, 0x4c, 0x8b, 0x95, 0x80, 0xb0, 0x4e, 0x0e, 0xad, 0xb1, 0x28, 0xbd, + 0x1b, 0xde, 0xe6, 0x8a, 0xd3, 0xea, 0x66, 0xc0, 0xbd, 0x28, 0x91, 0x34, 0xca, 0x63, 0x22, 0x94, + 0x2f, 0x07, 0xe0, 0x84, 0x10, 0xfa, 0x36, 0x38, 0x15, 0xe5, 0x08, 0x58, 0xf3, 0xb2, 0x20, 0x75, + 0x93, 0x39, 0x2e, 0x3c, 0x42, 0xdf, 0xb1, 0x59, 0xa2, 0xd8, 0xac, 0x66, 0xec, 0x5f, 0x3b, 0x05, + 0xc6, 0x26, 0x36, 0x92, 0xe2, 0x59, 0xc7, 0x94, 0x14, 0x0f, 0x43, 0x89, 0xec, 0xb4, 0xe2, 0xfd, + 0x8a, 0x17, 0x76, 0xcb, 0xaa, 0xba, 0x24, 0x70, 0x3a, 0x69, 0x4a, 0x08, 0x56, 0x74, 0xb2, 0x33, + 0x17, 0x16, 0xbf, 0x8e, 0x99, 0x0b, 0x07, 0x4e, 0x30, 0x73, 0xe1, 0x2a, 0x0c, 0x6f, 0x7a, 0x31, + 0x26, 0xad, 0x40, 0x30, 0xfd, 0x99, 0xeb, 0xf0, 0x1a, 0x47, 0xe9, 0xcc, 0x91, 0x25, 0x00, 0x58, + 0x12, 0x41, 0x6f, 0xa8, 0x1d, 0x38, 0x94, 0xff, 0x66, 0xee, 0x54, 0x41, 0x67, 0xee, 0x41, 0x91, + 0x9f, 0x70, 0xf8, 0x41, 0xf3, 0x13, 0x2e, 0xcb, 0xac, 0x82, 0xa5, 0x7c, 0x6f, 0x0b, 0x96, 0x34, + 0xb0, 0x47, 0x2e, 0xc1, 0x3b, 0x7a, 0x26, 0xc6, 0x72, 0xfe, 0x49, 0xa0, 0x92, 0x2c, 0xf6, 0x99, + 0x7f, 0xf1, 0x7b, 0x2c, 0x38, 0xd3, 0xca, 0x4a, 0x4a, 0x2a, 0xb4, 0xb5, 0x2f, 0xf6, 0x9d, 0x75, + 0xd5, 0x68, 0x90, 0x09, 0x6a, 0x32, 0xd1, 0x70, 0x76, 0x73, 0x74, 0xa0, 0xc3, 0x75, 0x57, 0x24, + 0x10, 0x7c, 0x3c, 0x27, 0x91, 0x63, 0x97, 0xf4, 0x8d, 0x6b, 0x19, 0x49, 0x03, 0x3f, 0x9a, 0x97, + 0x34, 0xb0, 0xef, 0x54, 0x81, 0x6f, 0xa8, 0x14, 0x8e, 0x63, 0xf9, 0x4b, 0x89, 0x27, 0x68, 0xec, + 0x99, 0xb8, 0xf1, 0x0d, 0x95, 0xb8, 0xb1, 0x4b, 0x1c, 0x49, 0x9e, 0x96, 0xb1, 0x67, 0xba, 0x46, + 0x2d, 0xe5, 0xe2, 0xc4, 0xf1, 0xa4, 0x5c, 0x34, 0xae, 0x1a, 0x9e, 0xf5, 0xef, 0xa9, 0x1e, 0x57, + 0x8d, 0x41, 0xb7, 0xfb, 0x65, 0xc3, 0xd3, 0x4b, 0x4e, 0x3d, 0x50, 0x7a, 0xc9, 0x3b, 0x7a, 0xba, + 0x46, 0xd4, 0x23, 0x1f, 0x21, 0x45, 0xea, 0x33, 0x49, 0xe3, 0x1d, 0xfd, 0x02, 0x3c, 0x95, 0x4f, + 0x57, 0xdd, 0x73, 0x9d, 0x74, 0x33, 0xaf, 0xc0, 0x8e, 0xe4, 0x8f, 0xa7, 0x4f, 0x26, 0xf9, 0xe3, + 0x99, 0x63, 0x4f, 0xfe, 0x78, 0xf6, 0x04, 0x92, 0x3f, 0x3e, 0x72, 0x82, 0xc9, 0x1f, 0xef, 0x30, + 0x13, 0x07, 0x1e, 0x8e, 0x44, 0xc4, 0xbd, 0xcc, 0x8e, 0xb1, 0x98, 0x15, 0xb3, 0x84, 0x7f, 0x9c, + 0x02, 0xe1, 0x84, 0x54, 0x46, 0x52, 0xc9, 0xe9, 0x87, 0x90, 0x54, 0x72, 0x35, 0x49, 0x2a, 0x79, + 0x2e, 0x7f, 0xaa, 0x33, 0x4c, 0xcb, 0x73, 0x52, 0x49, 0xde, 0xd1, 0x53, 0x40, 0x3e, 0xda, 0x45, + 0x14, 0x9f, 0x25, 0x78, 0xec, 0x92, 0xf8, 0xf1, 0x75, 0x9e, 0xf8, 0xf1, 0x7c, 0xfe, 0x49, 0x9e, + 0xbe, 0xee, 0xcc, 0x74, 0x8f, 0xdf, 0x57, 0x80, 0x8b, 0xdd, 0xf7, 0x45, 0x22, 0xf5, 0xac, 0x25, + 0x1a, 0xc1, 0x94, 0xd4, 0x93, 0xbf, 0xad, 0x12, 0xac, 0xbe, 0x23, 0x55, 0x5d, 0x83, 0x29, 0x65, + 0x3b, 0xde, 0xf4, 0x1a, 0xfb, 0x5a, 0x86, 0x7b, 0xe5, 0x6f, 0x5b, 0x4f, 0x23, 0xe0, 0xce, 0x3a, + 0x68, 0x1e, 0x26, 0x8c, 0xc2, 0x6a, 0x45, 0xbc, 0xa1, 0x94, 0x98, 0xb5, 0x6e, 0x82, 0x71, 0x1a, + 0xdf, 0xfe, 0x69, 0x0b, 0x1e, 0xc9, 0xc9, 0xab, 0xd4, 0x77, 0x20, 0xa6, 0x0d, 0x98, 0x68, 0x99, + 0x55, 0x7b, 0xc4, 0x6b, 0x33, 0xb2, 0x37, 0xa9, 0xbe, 0xa6, 0x00, 0x38, 0x4d, 0xd4, 0xfe, 0x53, + 0x0b, 0x2e, 0x74, 0x35, 0xe3, 0x42, 0x18, 0xce, 0x6e, 0xee, 0x44, 0xce, 0x62, 0x48, 0x5c, 0xe2, + 0xc7, 0x9e, 0xd3, 0xac, 0xb7, 0x48, 0x43, 0x93, 0x5b, 0x33, 0x7b, 0xa8, 0x6b, 0x2b, 0xf5, 0xf9, + 0x4e, 0x0c, 0x9c, 0x53, 0x13, 0x2d, 0x03, 0xea, 0x84, 0x88, 0x19, 0x66, 0x31, 0x5d, 0x3b, 0xe9, + 0xe1, 0x8c, 0x1a, 0xe8, 0x25, 0x18, 0x53, 0xe6, 0x61, 0xda, 0x8c, 0xb3, 0x03, 0x18, 0xeb, 0x00, + 0x6c, 0xe2, 0x2d, 0x5c, 0xf9, 0x8d, 0xdf, 0xbb, 0xf8, 0x91, 0xdf, 0xfa, 0xbd, 0x8b, 0x1f, 0xf9, + 0xed, 0xdf, 0xbb, 0xf8, 0x91, 0xef, 0xb8, 0x7f, 0xd1, 0xfa, 0x8d, 0xfb, 0x17, 0xad, 0xdf, 0xba, + 0x7f, 0xd1, 0xfa, 0xed, 0xfb, 0x17, 0xad, 0xdf, 0xbd, 0x7f, 0xd1, 0xfa, 0xd2, 0xef, 0x5f, 0xfc, + 0xc8, 0x5b, 0x85, 0xdd, 0x67, 0xff, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x5e, 0x40, 0x10, 0x5c, + 0xb3, 0xfc, 0x00, 0x00, +} + +func (m *AWSElasticBlockStoreVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *GlusterfsVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *AWSElasticBlockStoreVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AWSElasticBlockStoreVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.EndpointsName))) - i += copy(dAtA[i:], m.EndpointsName) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i += copy(dAtA[i:], m.Path) - dAtA[i] = 0x18 - i++ + i-- if m.ReadOnly { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - return i, nil + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.Partition)) + i-- + dAtA[i] = 0x18 + i -= len(m.FSType) + copy(dAtA[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i-- + dAtA[i] = 0x12 + i -= len(m.VolumeID) + copy(dAtA[i:], m.VolumeID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeID))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *HTTPGetAction) Marshal() (dAtA []byte, err error) { +func (m *Affinity) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *HTTPGetAction) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *Affinity) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Affinity) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i += copy(dAtA[i:], m.Path) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Port.Size())) - n59, err := m.Port.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.PodAntiAffinity != nil { + { + size, err := m.PodAntiAffinity.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a } - i += n59 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) - i += copy(dAtA[i:], m.Host) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Scheme))) - i += copy(dAtA[i:], m.Scheme) - if len(m.HTTPHeaders) > 0 { - for _, msg := range m.HTTPHeaders { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.PodAffinity != nil { + { + size, err := m.PodAffinity.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.NodeAffinity != nil { + { + size, err := m.NodeAffinity.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } -func (m *HTTPHeader) Marshal() (dAtA []byte, err error) { +func (m *AttachedVolume) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *HTTPHeader) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *AttachedVolume) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AttachedVolume) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) + i -= len(m.DevicePath) + copy(dAtA[i:], m.DevicePath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DevicePath))) + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) - i += copy(dAtA[i:], m.Value) - return i, nil + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *Handler) Marshal() (dAtA []byte, err error) { +func (m *AvoidPods) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *Handler) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *AvoidPods) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AvoidPods) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Exec != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Exec.Size())) - n60, err := m.Exec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n60 - } - if m.HTTPGet != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.HTTPGet.Size())) - n61, err := m.HTTPGet.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n61 - } - if m.TCPSocket != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.TCPSocket.Size())) - n62, err := m.TCPSocket.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.PreferAvoidPods) > 0 { + for iNdEx := len(m.PreferAvoidPods) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.PreferAvoidPods[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } - i += n62 } - return i, nil + return len(dAtA) - i, nil } -func (m *HostAlias) Marshal() (dAtA []byte, err error) { +func (m *AzureDiskVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *HostAlias) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *AzureDiskVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AzureDiskVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.IP))) - i += copy(dAtA[i:], m.IP) - if len(m.Hostnames) > 0 { - for _, s := range m.Hostnames { - dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + if m.Kind != nil { + i -= len(*m.Kind) + copy(dAtA[i:], *m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Kind))) + i-- + dAtA[i] = 0x32 + } + if m.ReadOnly != nil { + i-- + if *m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i-- + dAtA[i] = 0x28 + } + if m.FSType != nil { + i -= len(*m.FSType) + copy(dAtA[i:], *m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FSType))) + i-- + dAtA[i] = 0x22 } - return i, nil + if m.CachingMode != nil { + i -= len(*m.CachingMode) + copy(dAtA[i:], *m.CachingMode) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.CachingMode))) + i-- + dAtA[i] = 0x1a + } + i -= len(m.DataDiskURI) + copy(dAtA[i:], m.DataDiskURI) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DataDiskURI))) + i-- + dAtA[i] = 0x12 + i -= len(m.DiskName) + copy(dAtA[i:], m.DiskName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DiskName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *HostPathVolumeSource) Marshal() (dAtA []byte, err error) { +func (m *AzureFilePersistentVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *HostPathVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i += copy(dAtA[i:], m.Path) - if m.Type != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Type))) - i += copy(dAtA[i:], *m.Type) - } - return i, nil -} - -func (m *ISCSIPersistentVolumeSource) Marshal() (dAtA []byte, err error) { +func (m *AzureFilePersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ISCSIPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *AzureFilePersistentVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetPortal))) - i += copy(dAtA[i:], m.TargetPortal) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.IQN))) - i += copy(dAtA[i:], m.IQN) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Lun)) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ISCSIInterface))) - i += copy(dAtA[i:], m.ISCSIInterface) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i += copy(dAtA[i:], m.FSType) - dAtA[i] = 0x30 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if len(m.Portals) > 0 { - for _, s := range m.Portals { - dAtA[i] = 0x3a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - dAtA[i] = 0x40 - i++ - if m.DiscoveryCHAPAuth { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if m.SecretRef != nil { - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n63, err := m.SecretRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n63 + if m.SecretNamespace != nil { + i -= len(*m.SecretNamespace) + copy(dAtA[i:], *m.SecretNamespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SecretNamespace))) + i-- + dAtA[i] = 0x22 } - dAtA[i] = 0x58 - i++ - if m.SessionCHAPAuth { + i-- + if m.ReadOnly { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - if m.InitiatorName != nil { - dAtA[i] = 0x62 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.InitiatorName))) - i += copy(dAtA[i:], *m.InitiatorName) - } - return i, nil + i-- + dAtA[i] = 0x18 + i -= len(m.ShareName) + copy(dAtA[i:], m.ShareName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ShareName))) + i-- + dAtA[i] = 0x12 + i -= len(m.SecretName) + copy(dAtA[i:], m.SecretName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ISCSIVolumeSource) Marshal() (dAtA []byte, err error) { +func (m *AzureFileVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ISCSIVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *AzureFileVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AzureFileVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetPortal))) - i += copy(dAtA[i:], m.TargetPortal) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.IQN))) - i += copy(dAtA[i:], m.IQN) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Lun)) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ISCSIInterface))) - i += copy(dAtA[i:], m.ISCSIInterface) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i += copy(dAtA[i:], m.FSType) - dAtA[i] = 0x30 - i++ + i-- if m.ReadOnly { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - if len(m.Portals) > 0 { - for _, s := range m.Portals { - dAtA[i] = 0x3a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - dAtA[i] = 0x40 - i++ - if m.DiscoveryCHAPAuth { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if m.SecretRef != nil { - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n64, err := m.SecretRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n64 - } - dAtA[i] = 0x58 - i++ - if m.SessionCHAPAuth { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if m.InitiatorName != nil { - dAtA[i] = 0x62 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.InitiatorName))) - i += copy(dAtA[i:], *m.InitiatorName) - } - return i, nil + i-- + dAtA[i] = 0x18 + i -= len(m.ShareName) + copy(dAtA[i:], m.ShareName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ShareName))) + i-- + dAtA[i] = 0x12 + i -= len(m.SecretName) + copy(dAtA[i:], m.SecretName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *KeyToPath) Marshal() (dAtA []byte, err error) { +func (m *Binding) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *KeyToPath) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) - i += copy(dAtA[i:], m.Key) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i += copy(dAtA[i:], m.Path) - if m.Mode != nil { - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Mode)) - } - return i, nil -} - -func (m *Lifecycle) Marshal() (dAtA []byte, err error) { +func (m *Binding) MarshalTo(dAtA []byte) (int, error) { size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Lifecycle) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *Binding) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.PostStart != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PostStart.Size())) - n65, err := m.PostStart.MarshalTo(dAtA[i:]) + { + size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n65 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.PreStop != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PreStop.Size())) - n66, err := m.PreStop.MarshalTo(dAtA[i:]) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n66 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *LimitRange) Marshal() (dAtA []byte, err error) { +func (m *CSIPersistentVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *LimitRange) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n67, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n67 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n68, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n68 - return i, nil -} - -func (m *LimitRangeItem) Marshal() (dAtA []byte, err error) { +func (m *CSIPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *LimitRangeItem) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *CSIPersistentVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - if len(m.Max) > 0 { - keysForMax := make([]string, 0, len(m.Max)) - for k := range m.Max { - keysForMax = append(keysForMax, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForMax) - for _, k := range keysForMax { - dAtA[i] = 0x12 - i++ - v := m.Max[ResourceName(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n69, err := (&v).MarshalTo(dAtA[i:]) + if m.ControllerExpandSecretRef != nil { + { + size, err := m.ControllerExpandSecretRef.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n69 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x4a } - if len(m.Min) > 0 { - keysForMin := make([]string, 0, len(m.Min)) - for k := range m.Min { - keysForMin = append(keysForMin, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForMin) - for _, k := range keysForMin { - dAtA[i] = 0x1a - i++ - v := m.Min[ResourceName(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n70, err := (&v).MarshalTo(dAtA[i:]) + if m.NodePublishSecretRef != nil { + { + size, err := m.NodePublishSecretRef.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n70 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x42 } - if len(m.Default) > 0 { - keysForDefault := make([]string, 0, len(m.Default)) - for k := range m.Default { - keysForDefault = append(keysForDefault, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForDefault) - for _, k := range keysForDefault { - dAtA[i] = 0x22 - i++ - v := m.Default[ResourceName(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n71, err := (&v).MarshalTo(dAtA[i:]) + if m.NodeStageSecretRef != nil { + { + size, err := m.NodeStageSecretRef.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n71 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x3a } - if len(m.DefaultRequest) > 0 { - keysForDefaultRequest := make([]string, 0, len(m.DefaultRequest)) - for k := range m.DefaultRequest { - keysForDefaultRequest = append(keysForDefaultRequest, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForDefaultRequest) - for _, k := range keysForDefaultRequest { - dAtA[i] = 0x2a - i++ - v := m.DefaultRequest[ResourceName(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n72, err := (&v).MarshalTo(dAtA[i:]) + if m.ControllerPublishSecretRef != nil { + { + size, err := m.ControllerPublishSecretRef.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n72 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x32 } - if len(m.MaxLimitRequestRatio) > 0 { - keysForMaxLimitRequestRatio := make([]string, 0, len(m.MaxLimitRequestRatio)) - for k := range m.MaxLimitRequestRatio { - keysForMaxLimitRequestRatio = append(keysForMaxLimitRequestRatio, string(k)) + if len(m.VolumeAttributes) > 0 { + keysForVolumeAttributes := make([]string, 0, len(m.VolumeAttributes)) + for k := range m.VolumeAttributes { + keysForVolumeAttributes = append(keysForVolumeAttributes, string(k)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForMaxLimitRequestRatio) - for _, k := range keysForMaxLimitRequestRatio { - dAtA[i] = 0x32 - i++ - v := m.MaxLimitRequestRatio[ResourceName(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) + github_com_gogo_protobuf_sortkeys.Strings(keysForVolumeAttributes) + for iNdEx := len(keysForVolumeAttributes) - 1; iNdEx >= 0; iNdEx-- { + v := m.VolumeAttributes[string(keysForVolumeAttributes[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n73, err := (&v).MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n73 + i -= len(keysForVolumeAttributes[iNdEx]) + copy(dAtA[i:], keysForVolumeAttributes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForVolumeAttributes[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2a } } - return i, nil + i -= len(m.FSType) + copy(dAtA[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i-- + dAtA[i] = 0x22 + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + i -= len(m.VolumeHandle) + copy(dAtA[i:], m.VolumeHandle) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeHandle))) + i-- + dAtA[i] = 0x12 + i -= len(m.Driver) + copy(dAtA[i:], m.Driver) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *LimitRangeList) Marshal() (dAtA []byte, err error) { +func (m *CSIVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *LimitRangeList) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *CSIVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CSIVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n74, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n74 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.NodePublishSecretRef != nil { + { + size, err := m.NodePublishSecretRef.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if len(m.VolumeAttributes) > 0 { + keysForVolumeAttributes := make([]string, 0, len(m.VolumeAttributes)) + for k := range m.VolumeAttributes { + keysForVolumeAttributes = append(keysForVolumeAttributes, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForVolumeAttributes) + for iNdEx := len(keysForVolumeAttributes) - 1; iNdEx >= 0; iNdEx-- { + v := m.VolumeAttributes[string(keysForVolumeAttributes[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForVolumeAttributes[iNdEx]) + copy(dAtA[i:], keysForVolumeAttributes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForVolumeAttributes[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x22 + } + } + if m.FSType != nil { + i -= len(*m.FSType) + copy(dAtA[i:], *m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FSType))) + i-- + dAtA[i] = 0x1a + } + if m.ReadOnly != nil { + i-- + if *m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i-- + dAtA[i] = 0x10 } - return i, nil + i -= len(m.Driver) + copy(dAtA[i:], m.Driver) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *LimitRangeSpec) Marshal() (dAtA []byte, err error) { +func (m *Capabilities) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *LimitRangeSpec) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *Capabilities) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Capabilities) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Limits) > 0 { - for _, msg := range m.Limits { + if len(m.Drop) > 0 { + for iNdEx := len(m.Drop) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Drop[iNdEx]) + copy(dAtA[i:], m.Drop[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Drop[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Add) > 0 { + for iNdEx := len(m.Add) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Add[iNdEx]) + copy(dAtA[i:], m.Add[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Add[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n } } - return i, nil + return len(dAtA) - i, nil } -func (m *List) Marshal() (dAtA []byte, err error) { +func (m *CephFSPersistentVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *List) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *CephFSPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CephFSPersistentVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n75, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - i += n75 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + i-- + dAtA[i] = 0x30 + if m.SecretRef != nil { + { + size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + i -= len(m.SecretFile) + copy(dAtA[i:], m.SecretFile) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretFile))) + i-- + dAtA[i] = 0x22 + i -= len(m.User) + copy(dAtA[i:], m.User) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.User))) + i-- + dAtA[i] = 0x1a + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0x12 + if len(m.Monitors) > 0 { + for iNdEx := len(m.Monitors) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Monitors[iNdEx]) + copy(dAtA[i:], m.Monitors[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Monitors[iNdEx]))) + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } -func (m *LoadBalancerIngress) Marshal() (dAtA []byte, err error) { +func (m *CephFSVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *LoadBalancerIngress) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *CephFSVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CephFSVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.IP))) - i += copy(dAtA[i:], m.IP) + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + if m.SecretRef != nil { + { + size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + i -= len(m.SecretFile) + copy(dAtA[i:], m.SecretFile) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretFile))) + i-- + dAtA[i] = 0x22 + i -= len(m.User) + copy(dAtA[i:], m.User) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.User))) + i-- + dAtA[i] = 0x1a + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Hostname))) - i += copy(dAtA[i:], m.Hostname) - return i, nil + if len(m.Monitors) > 0 { + for iNdEx := len(m.Monitors) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Monitors[iNdEx]) + copy(dAtA[i:], m.Monitors[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Monitors[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil } -func (m *LoadBalancerStatus) Marshal() (dAtA []byte, err error) { +func (m *CinderPersistentVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *LoadBalancerStatus) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *CinderPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CinderPersistentVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Ingress) > 0 { - for _, msg := range m.Ingress { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.SecretRef != nil { + { + size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x22 + } + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - return i, nil + i-- + dAtA[i] = 0x18 + i -= len(m.FSType) + copy(dAtA[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i-- + dAtA[i] = 0x12 + i -= len(m.VolumeID) + copy(dAtA[i:], m.VolumeID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeID))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *LocalObjectReference) Marshal() (dAtA []byte, err error) { +func (m *CinderVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *LocalObjectReference) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *CinderVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CinderVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + if m.SecretRef != nil { + { + size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + i -= len(m.FSType) + copy(dAtA[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i-- + dAtA[i] = 0x12 + i -= len(m.VolumeID) + copy(dAtA[i:], m.VolumeID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeID))) + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - return i, nil + return len(dAtA) - i, nil } -func (m *LocalVolumeSource) Marshal() (dAtA []byte, err error) { +func (m *ClientIPConfig) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *LocalVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ClientIPConfig) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClientIPConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i += copy(dAtA[i:], m.Path) - if m.FSType != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FSType))) - i += copy(dAtA[i:], *m.FSType) + if m.TimeoutSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) + i-- + dAtA[i] = 0x8 } - return i, nil + return len(dAtA) - i, nil } -func (m *NFSVolumeSource) Marshal() (dAtA []byte, err error) { +func (m *ComponentCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NFSVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ComponentCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ComponentCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Server))) - i += copy(dAtA[i:], m.Server) + i -= len(m.Error) + copy(dAtA[i:], m.Error) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Error))) + i-- + dAtA[i] = 0x22 + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i += copy(dAtA[i:], m.Path) - dAtA[i] = 0x18 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - return i, nil + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *Namespace) Marshal() (dAtA []byte, err error) { +func (m *ComponentStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *Namespace) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ComponentStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ComponentStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n76, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n76 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n77, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } } - i += n77 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n78, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n78 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *NamespaceList) Marshal() (dAtA []byte, err error) { +func (m *ComponentStatusList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NamespaceList) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ComponentStatusList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ComponentStatusList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n79, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n79 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *NamespaceSpec) Marshal() (dAtA []byte, err error) { +func (m *ConfigMap) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NamespaceSpec) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ConfigMap) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConfigMap) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Finalizers) > 0 { - for _, s := range m.Finalizers { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ + if len(m.BinaryData) > 0 { + keysForBinaryData := make([]string, 0, len(m.BinaryData)) + for k := range m.BinaryData { + keysForBinaryData = append(keysForBinaryData, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForBinaryData) + for iNdEx := len(keysForBinaryData) - 1; iNdEx >= 0; iNdEx-- { + v := m.BinaryData[string(keysForBinaryData[iNdEx])] + baseI := i + if v != nil { + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i -= len(keysForBinaryData[iNdEx]) + copy(dAtA[i:], keysForBinaryData[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForBinaryData[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Data) > 0 { + keysForData := make([]string, 0, len(m.Data)) + for k := range m.Data { + keysForData = append(keysForData, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForData) + for iNdEx := len(keysForData) - 1; iNdEx >= 0; iNdEx-- { + v := m.Data[string(keysForData[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForData[iNdEx]) + copy(dAtA[i:], keysForData[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForData[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 } } - return i, nil + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *NamespaceStatus) Marshal() (dAtA []byte, err error) { +func (m *ConfigMapEnvSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NamespaceStatus) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ConfigMapEnvSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConfigMapEnvSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + if m.Optional != nil { + i-- + if *m.Optional { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + { + size, err := m.LocalObjectReference.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) - i += copy(dAtA[i:], m.Phase) - return i, nil + return len(dAtA) - i, nil } -func (m *Node) Marshal() (dAtA []byte, err error) { +func (m *ConfigMapKeySelector) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *Node) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ConfigMapKeySelector) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConfigMapKeySelector) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n80, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.Optional != nil { + i-- + if *m.Optional { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 } - i += n80 + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n81, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n81 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n82, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.LocalObjectReference.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n82 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *NodeAddress) Marshal() (dAtA []byte, err error) { +func (m *ConfigMapList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NodeAddress) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Address))) - i += copy(dAtA[i:], m.Address) - return i, nil -} - -func (m *NodeAffinity) Marshal() (dAtA []byte, err error) { +func (m *ConfigMapList) MarshalTo(dAtA []byte) (int, error) { size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *NodeAffinity) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ConfigMapList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.RequiredDuringSchedulingIgnoredDuringExecution != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RequiredDuringSchedulingIgnoredDuringExecution.Size())) - n83, err := m.RequiredDuringSchedulingIgnoredDuringExecution.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } - i += n83 } - if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { - for _, msg := range m.PreferredDuringSchedulingIgnoredDuringExecution { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *NodeCondition) Marshal() (dAtA []byte, err error) { +func (m *ConfigMapNodeConfigSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NodeCondition) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ConfigMapNodeConfigSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConfigMapNodeConfigSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastHeartbeatTime.Size())) - n84, err := m.LastHeartbeatTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n84 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n85, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n85 + i -= len(m.KubeletConfigKey) + copy(dAtA[i:], m.KubeletConfigKey) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.KubeletConfigKey))) + i-- dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil + i -= len(m.ResourceVersion) + copy(dAtA[i:], m.ResourceVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) + i-- + dAtA[i] = 0x22 + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0x1a + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *NodeConfigSource) Marshal() (dAtA []byte, err error) { +func (m *ConfigMapProjection) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NodeConfigSource) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ConfigMapProjection) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConfigMapProjection) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.ConfigMap != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigMap.Size())) - n86, err := m.ConfigMap.MarshalTo(dAtA[i:]) + if m.Optional != nil { + i-- + if *m.Optional { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.LocalObjectReference.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n86 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *NodeConfigStatus) Marshal() (dAtA []byte, err error) { +func (m *ConfigMapVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NodeConfigStatus) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ConfigMapVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConfigMapVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Assigned != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Assigned.Size())) - n87, err := m.Assigned.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.Optional != nil { + i-- + if *m.Optional { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - i += n87 + i-- + dAtA[i] = 0x20 } - if m.Active != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Active.Size())) - n88, err := m.Active.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.DefaultMode != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.DefaultMode)) + i-- + dAtA[i] = 0x18 + } + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } - i += n88 } - if m.LastKnownGood != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastKnownGood.Size())) - n89, err := m.LastKnownGood.MarshalTo(dAtA[i:]) + { + size, err := m.LocalObjectReference.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n89 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Error))) - i += copy(dAtA[i:], m.Error) - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *NodeDaemonEndpoints) Marshal() (dAtA []byte, err error) { +func (m *Container) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NodeDaemonEndpoints) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *Container) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Container) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.KubeletEndpoint.Size())) - n90, err := m.KubeletEndpoint.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.StartupProbe != nil { + { + size, err := m.StartupProbe.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb2 + } + if len(m.VolumeDevices) > 0 { + for iNdEx := len(m.VolumeDevices) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.VolumeDevices[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xaa + } + } + i -= len(m.TerminationMessagePolicy) + copy(dAtA[i:], m.TerminationMessagePolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TerminationMessagePolicy))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa2 + if len(m.EnvFrom) > 0 { + for iNdEx := len(m.EnvFrom) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.EnvFrom[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x9a + } + } + i-- + if m.TTY { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x90 + i-- + if m.StdinOnce { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x88 + i-- + if m.Stdin { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x80 + if m.SecurityContext != nil { + { + size, err := m.SecurityContext.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x7a + } + i -= len(m.ImagePullPolicy) + copy(dAtA[i:], m.ImagePullPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ImagePullPolicy))) + i-- + dAtA[i] = 0x72 + i -= len(m.TerminationMessagePath) + copy(dAtA[i:], m.TerminationMessagePath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TerminationMessagePath))) + i-- + dAtA[i] = 0x6a + if m.Lifecycle != nil { + { + size, err := m.Lifecycle.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x62 + } + if m.ReadinessProbe != nil { + { + size, err := m.ReadinessProbe.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + } + if m.LivenessProbe != nil { + { + size, err := m.LivenessProbe.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } + if len(m.VolumeMounts) > 0 { + for iNdEx := len(m.VolumeMounts) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.VolumeMounts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + } + { + size, err := m.Resources.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n90 - return i, nil + i-- + dAtA[i] = 0x42 + if len(m.Env) > 0 { + for iNdEx := len(m.Env) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Env[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + } + if len(m.Ports) > 0 { + for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ports[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + i -= len(m.WorkingDir) + copy(dAtA[i:], m.WorkingDir) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.WorkingDir))) + i-- + dAtA[i] = 0x2a + if len(m.Args) > 0 { + for iNdEx := len(m.Args) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Args[iNdEx]) + copy(dAtA[i:], m.Args[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Args[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if len(m.Command) > 0 { + for iNdEx := len(m.Command) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Command[iNdEx]) + copy(dAtA[i:], m.Command[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Command[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.Image) + copy(dAtA[i:], m.Image) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Image))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *NodeList) Marshal() (dAtA []byte, err error) { +func (m *ContainerImage) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NodeList) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ContainerImage) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerImage) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n91, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n91 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + i = encodeVarintGenerated(dAtA, i, uint64(m.SizeBytes)) + i-- + dAtA[i] = 0x10 + if len(m.Names) > 0 { + for iNdEx := len(m.Names) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Names[iNdEx]) + copy(dAtA[i:], m.Names[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Names[iNdEx]))) + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } -func (m *NodeProxyOptions) Marshal() (dAtA []byte, err error) { +func (m *ContainerPort) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NodeProxyOptions) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ContainerPort) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerPort) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + i -= len(m.HostIP) + copy(dAtA[i:], m.HostIP) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.HostIP))) + i-- + dAtA[i] = 0x2a + i -= len(m.Protocol) + copy(dAtA[i:], m.Protocol) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Protocol))) + i-- + dAtA[i] = 0x22 + i = encodeVarintGenerated(dAtA, i, uint64(m.ContainerPort)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.HostPort)) + i-- + dAtA[i] = 0x10 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i += copy(dAtA[i:], m.Path) - return i, nil + return len(dAtA) - i, nil } -func (m *NodeResources) Marshal() (dAtA []byte, err error) { +func (m *ContainerState) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NodeResources) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ContainerState) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerState) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Capacity) > 0 { - keysForCapacity := make([]string, 0, len(m.Capacity)) - for k := range m.Capacity { - keysForCapacity = append(keysForCapacity, string(k)) + if m.Terminated != nil { + { + size, err := m.Terminated.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) - for _, k := range keysForCapacity { - dAtA[i] = 0xa - i++ - v := m.Capacity[ResourceName(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n92, err := (&v).MarshalTo(dAtA[i:]) + i-- + dAtA[i] = 0x1a + } + if m.Running != nil { + { + size, err := m.Running.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Waiting != nil { + { + size, err := m.Waiting.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n92 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } -func (m *NodeSelector) Marshal() (dAtA []byte, err error) { +func (m *ContainerStateRunning) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NodeSelector) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ContainerStateRunning) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerStateRunning) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.NodeSelectorTerms) > 0 { - for _, msg := range m.NodeSelectorTerms { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + { + size, err := m.StartedAt.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *NodeSelectorRequirement) Marshal() (dAtA []byte, err error) { +func (m *ContainerStateTerminated) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NodeSelectorRequirement) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ContainerStateTerminated) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerStateTerminated) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) - i += copy(dAtA[i:], m.Key) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operator))) - i += copy(dAtA[i:], m.Operator) - if len(m.Values) > 0 { - for _, s := range m.Values { - dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i -= len(m.ContainerID) + copy(dAtA[i:], m.ContainerID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContainerID))) + i-- + dAtA[i] = 0x3a + { + size, err := m.FinishedAt.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + { + size, err := m.StartedAt.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0x2a + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x22 + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x1a + i = encodeVarintGenerated(dAtA, i, uint64(m.Signal)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.ExitCode)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } -func (m *NodeSelectorTerm) Marshal() (dAtA []byte, err error) { +func (m *ContainerStateWaiting) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NodeSelectorTerm) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ContainerStateWaiting) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerStateWaiting) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.MatchExpressions) > 0 { - for _, msg := range m.MatchExpressions { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.MatchFields) > 0 { - for _, msg := range m.MatchFields { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x12 + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *NodeSpec) Marshal() (dAtA []byte, err error) { +func (m *ContainerStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NodeSpec) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ContainerStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodCIDR))) - i += copy(dAtA[i:], m.PodCIDR) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DoNotUse_ExternalID))) - i += copy(dAtA[i:], m.DoNotUse_ExternalID) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ProviderID))) - i += copy(dAtA[i:], m.ProviderID) - dAtA[i] = 0x20 - i++ - if m.Unschedulable { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if m.Started != nil { + i-- + if *m.Started { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x48 } - i++ - if len(m.Taints) > 0 { - for _, msg := range m.Taints { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + i -= len(m.ContainerID) + copy(dAtA[i:], m.ContainerID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContainerID))) + i-- + dAtA[i] = 0x42 + i -= len(m.ImageID) + copy(dAtA[i:], m.ImageID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ImageID))) + i-- + dAtA[i] = 0x3a + i -= len(m.Image) + copy(dAtA[i:], m.Image) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Image))) + i-- + dAtA[i] = 0x32 + i = encodeVarintGenerated(dAtA, i, uint64(m.RestartCount)) + i-- + dAtA[i] = 0x28 + i-- + if m.Ready { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + { + size, err := m.LastTerminationState.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.ConfigSource != nil { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigSource.Size())) - n93, err := m.ConfigSource.MarshalTo(dAtA[i:]) + i-- + dAtA[i] = 0x1a + { + size, err := m.State.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n93 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *NodeStatus) Marshal() (dAtA []byte, err error) { +func (m *DaemonEndpoint) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NodeStatus) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *DaemonEndpoint) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DaemonEndpoint) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Capacity) > 0 { - keysForCapacity := make([]string, 0, len(m.Capacity)) - for k := range m.Capacity { - keysForCapacity = append(keysForCapacity, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) - for _, k := range keysForCapacity { - dAtA[i] = 0xa - i++ - v := m.Capacity[ResourceName(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n94, err := (&v).MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n94 - } + i = encodeVarintGenerated(dAtA, i, uint64(m.Port)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *DownwardAPIProjection) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - if len(m.Allocatable) > 0 { - keysForAllocatable := make([]string, 0, len(m.Allocatable)) - for k := range m.Allocatable { - keysForAllocatable = append(keysForAllocatable, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForAllocatable) - for _, k := range keysForAllocatable { - dAtA[i] = 0x12 - i++ - v := m.Allocatable[ResourceName(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n95, err := (&v).MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + return dAtA[:n], nil +} + +func (m *DownwardAPIProjection) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DownwardAPIProjection) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n95 + i-- + dAtA[i] = 0xa } } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) - i += copy(dAtA[i:], m.Phase) - if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + return len(dAtA) - i, nil +} + +func (m *DownwardAPIVolumeFile) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DownwardAPIVolumeFile) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DownwardAPIVolumeFile) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Mode != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Mode)) + i-- + dAtA[i] = 0x20 + } + if m.ResourceFieldRef != nil { + { + size, err := m.ResourceFieldRef.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x1a } - if len(m.Addresses) > 0 { - for _, msg := range m.Addresses { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.FieldRef != nil { + { + size, err := m.FieldRef.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 } - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DaemonEndpoints.Size())) - n96, err := m.DaemonEndpoints.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n96 - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NodeInfo.Size())) - n97, err := m.NodeInfo.MarshalTo(dAtA[i:]) + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DownwardAPIVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { - return 0, err - } - i += n97 - if len(m.Images) > 0 { - for _, msg := range m.Images { - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } + return nil, err } - if len(m.VolumesInUse) > 0 { - for _, s := range m.VolumesInUse { - dAtA[i] = 0x4a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } + return dAtA[:n], nil +} + +func (m *DownwardAPIVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DownwardAPIVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.DefaultMode != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.DefaultMode)) + i-- + dAtA[i] = 0x10 } - if len(m.VolumesAttached) > 0 { - for _, msg := range m.VolumesAttached { - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n - } - } - if m.Config != nil { - dAtA[i] = 0x5a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Config.Size())) - n98, err := m.Config.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i-- + dAtA[i] = 0xa } - i += n98 } - return i, nil + return len(dAtA) - i, nil } -func (m *NodeSystemInfo) Marshal() (dAtA []byte, err error) { +func (m *EmptyDirVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NodeSystemInfo) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *EmptyDirVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EmptyDirVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + if m.SizeLimit != nil { + { + size, err := m.SizeLimit.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Medium) + copy(dAtA[i:], m.Medium) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Medium))) + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MachineID))) - i += copy(dAtA[i:], m.MachineID) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SystemUUID))) - i += copy(dAtA[i:], m.SystemUUID) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.BootID))) - i += copy(dAtA[i:], m.BootID) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.KernelVersion))) - i += copy(dAtA[i:], m.KernelVersion) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.OSImage))) - i += copy(dAtA[i:], m.OSImage) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContainerRuntimeVersion))) - i += copy(dAtA[i:], m.ContainerRuntimeVersion) - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.KubeletVersion))) - i += copy(dAtA[i:], m.KubeletVersion) - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.KubeProxyVersion))) - i += copy(dAtA[i:], m.KubeProxyVersion) - dAtA[i] = 0x4a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.OperatingSystem))) - i += copy(dAtA[i:], m.OperatingSystem) - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Architecture))) - i += copy(dAtA[i:], m.Architecture) - return i, nil + return len(dAtA) - i, nil } -func (m *ObjectFieldSelector) Marshal() (dAtA []byte, err error) { +func (m *EndpointAddress) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ObjectFieldSelector) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *EndpointAddress) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EndpointAddress) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + if m.NodeName != nil { + i -= len(*m.NodeName) + copy(dAtA[i:], *m.NodeName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.NodeName))) + i-- + dAtA[i] = 0x22 + } + i -= len(m.Hostname) + copy(dAtA[i:], m.Hostname) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Hostname))) + i-- + dAtA[i] = 0x1a + if m.TargetRef != nil { + { + size, err := m.TargetRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.IP) + copy(dAtA[i:], m.IP) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.IP))) + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) - i += copy(dAtA[i:], m.APIVersion) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldPath))) - i += copy(dAtA[i:], m.FieldPath) - return i, nil + return len(dAtA) - i, nil } -func (m *ObjectReference) Marshal() (dAtA []byte, err error) { +func (m *EndpointPort) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ObjectReference) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *EndpointPort) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EndpointPort) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i += copy(dAtA[i:], m.Namespace) + i -= len(m.Protocol) + copy(dAtA[i:], m.Protocol) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Protocol))) + i-- dAtA[i] = 0x1a - i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Port)) + i-- + dAtA[i] = 0x10 + i -= len(m.Name) + copy(dAtA[i:], m.Name) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) - i += copy(dAtA[i:], m.UID) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) - i += copy(dAtA[i:], m.APIVersion) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) - i += copy(dAtA[i:], m.ResourceVersion) - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldPath))) - i += copy(dAtA[i:], m.FieldPath) - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PersistentVolume) Marshal() (dAtA []byte, err error) { +func (m *EndpointSubset) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PersistentVolume) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *EndpointSubset) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EndpointSubset) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n99, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Ports) > 0 { + for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ports[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } } - i += n99 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n100, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.NotReadyAddresses) > 0 { + for iNdEx := len(m.NotReadyAddresses) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.NotReadyAddresses[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } } - i += n100 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n101, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Addresses) > 0 { + for iNdEx := len(m.Addresses) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Addresses[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } } - i += n101 - return i, nil + return len(dAtA) - i, nil } -func (m *PersistentVolumeClaim) Marshal() (dAtA []byte, err error) { +func (m *Endpoints) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PersistentVolumeClaim) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *Endpoints) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Endpoints) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n102, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n102 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n103, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Subsets) > 0 { + for iNdEx := len(m.Subsets) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Subsets[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } } - i += n103 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n104, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n104 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PersistentVolumeClaimCondition) Marshal() (dAtA []byte, err error) { +func (m *EndpointsList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PersistentVolumeClaimCondition) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *EndpointsList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EndpointsList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastProbeTime.Size())) - n105, err := m.LastProbeTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } } - i += n105 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n106, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n106 - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PersistentVolumeClaimList) Marshal() (dAtA []byte, err error) { +func (m *EnvFromSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PersistentVolumeClaimList) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *EnvFromSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EnvFromSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n107, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.SecretRef != nil { + { + size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a } - i += n107 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.ConfigMapRef != nil { + { + size, err := m.ConfigMapRef.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Prefix) + copy(dAtA[i:], m.Prefix) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Prefix))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PersistentVolumeClaimSpec) Marshal() (dAtA []byte, err error) { +func (m *EnvVar) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PersistentVolumeClaimSpec) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *EnvVar) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EnvVar) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.AccessModes) > 0 { - for _, s := range m.AccessModes { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ + if m.ValueFrom != nil { + { + size, err := m.ValueFrom.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x1a } + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Resources.Size())) - n108, err := m.Resources.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n108 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeName))) - i += copy(dAtA[i:], m.VolumeName) - if m.Selector != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n109, err := m.Selector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n109 - } - if m.StorageClassName != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.StorageClassName))) - i += copy(dAtA[i:], *m.StorageClassName) - } - if m.VolumeMode != nil { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VolumeMode))) - i += copy(dAtA[i:], *m.VolumeMode) - } - if m.DataSource != nil { - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DataSource.Size())) - n110, err := m.DataSource.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n110 - } - return i, nil + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PersistentVolumeClaimStatus) Marshal() (dAtA []byte, err error) { +func (m *EnvVarSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PersistentVolumeClaimStatus) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *EnvVarSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EnvVarSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) - i += copy(dAtA[i:], m.Phase) - if len(m.AccessModes) > 0 { - for _, s := range m.AccessModes { - dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ + if m.SecretKeyRef != nil { + { + size, err := m.SecretKeyRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x22 } - if len(m.Capacity) > 0 { - keysForCapacity := make([]string, 0, len(m.Capacity)) - for k := range m.Capacity { - keysForCapacity = append(keysForCapacity, string(k)) + if m.ConfigMapKeyRef != nil { + { + size, err := m.ConfigMapKeyRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) - for _, k := range keysForCapacity { - dAtA[i] = 0x1a - i++ - v := m.Capacity[ResourceName(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n111, err := (&v).MarshalTo(dAtA[i:]) + i-- + dAtA[i] = 0x1a + } + if m.ResourceFieldRef != nil { + { + size, err := m.ResourceFieldRef.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n111 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 } - if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.FieldRef != nil { + { + size, err := m.FieldRef.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } -func (m *PersistentVolumeClaimVolumeSource) Marshal() (dAtA []byte, err error) { +func (m *EphemeralContainer) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PersistentVolumeClaimVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClaimName))) - i += copy(dAtA[i:], m.ClaimName) - dAtA[i] = 0x10 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - return i, nil -} - -func (m *PersistentVolumeList) Marshal() (dAtA []byte, err error) { +func (m *EphemeralContainer) MarshalTo(dAtA []byte) (int, error) { size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *PersistentVolumeList) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *EphemeralContainer) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n112, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n112 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + i -= len(m.TargetContainerName) + copy(dAtA[i:], m.TargetContainerName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetContainerName))) + i-- + dAtA[i] = 0x12 + { + size, err := m.EphemeralContainerCommon.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PersistentVolumeSource) Marshal() (dAtA []byte, err error) { +func (m *EphemeralContainerCommon) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *EphemeralContainerCommon) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EphemeralContainerCommon) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.GCEPersistentDisk != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.GCEPersistentDisk.Size())) - n113, err := m.GCEPersistentDisk.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.StartupProbe != nil { + { + size, err := m.StartupProbe.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n113 + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb2 } - if m.AWSElasticBlockStore != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AWSElasticBlockStore.Size())) - n114, err := m.AWSElasticBlockStore.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.VolumeDevices) > 0 { + for iNdEx := len(m.VolumeDevices) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.VolumeDevices[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xaa } - i += n114 } - if m.HostPath != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.HostPath.Size())) - n115, err := m.HostPath.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i -= len(m.TerminationMessagePolicy) + copy(dAtA[i:], m.TerminationMessagePolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TerminationMessagePolicy))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa2 + if len(m.EnvFrom) > 0 { + for iNdEx := len(m.EnvFrom) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.EnvFrom[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x9a } - i += n115 } - if m.Glusterfs != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Glusterfs.Size())) - n116, err := m.Glusterfs.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n116 - } - if m.NFS != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NFS.Size())) - n117, err := m.NFS.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n117 + i-- + if m.TTY { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - if m.RBD != nil { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RBD.Size())) - n118, err := m.RBD.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n118 + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x90 + i-- + if m.StdinOnce { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - if m.ISCSI != nil { - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ISCSI.Size())) - n119, err := m.ISCSI.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n119 + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x88 + i-- + if m.Stdin { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - if m.Cinder != nil { - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Cinder.Size())) - n120, err := m.Cinder.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x80 + if m.SecurityContext != nil { + { + size, err := m.SecurityContext.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n120 + i-- + dAtA[i] = 0x7a } - if m.CephFS != nil { - dAtA[i] = 0x4a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CephFS.Size())) - n121, err := m.CephFS.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i -= len(m.ImagePullPolicy) + copy(dAtA[i:], m.ImagePullPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ImagePullPolicy))) + i-- + dAtA[i] = 0x72 + i -= len(m.TerminationMessagePath) + copy(dAtA[i:], m.TerminationMessagePath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TerminationMessagePath))) + i-- + dAtA[i] = 0x6a + if m.Lifecycle != nil { + { + size, err := m.Lifecycle.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n121 + i-- + dAtA[i] = 0x62 } - if m.FC != nil { - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.FC.Size())) - n122, err := m.FC.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.ReadinessProbe != nil { + { + size, err := m.ReadinessProbe.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n122 - } - if m.Flocker != nil { + i-- dAtA[i] = 0x5a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Flocker.Size())) - n123, err := m.Flocker.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n123 } - if m.FlexVolume != nil { - dAtA[i] = 0x62 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.FlexVolume.Size())) - n124, err := m.FlexVolume.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n124 - } - if m.AzureFile != nil { - dAtA[i] = 0x6a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AzureFile.Size())) - n125, err := m.AzureFile.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.LivenessProbe != nil { + { + size, err := m.LivenessProbe.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n125 + i-- + dAtA[i] = 0x52 } - if m.VsphereVolume != nil { - dAtA[i] = 0x72 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.VsphereVolume.Size())) - n126, err := m.VsphereVolume.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.VolumeMounts) > 0 { + for iNdEx := len(m.VolumeMounts) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.VolumeMounts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a } - i += n126 } - if m.Quobyte != nil { - dAtA[i] = 0x7a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Quobyte.Size())) - n127, err := m.Quobyte.MarshalTo(dAtA[i:]) + { + size, err := m.Resources.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n127 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.AzureDisk != nil { - dAtA[i] = 0x82 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AzureDisk.Size())) - n128, err := m.AzureDisk.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i-- + dAtA[i] = 0x42 + if len(m.Env) > 0 { + for iNdEx := len(m.Env) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Env[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a } - i += n128 } - if m.PhotonPersistentDisk != nil { - dAtA[i] = 0x8a - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PhotonPersistentDisk.Size())) - n129, err := m.PhotonPersistentDisk.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Ports) > 0 { + for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ports[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 } - i += n129 } - if m.PortworxVolume != nil { - dAtA[i] = 0x92 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PortworxVolume.Size())) - n130, err := m.PortworxVolume.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i -= len(m.WorkingDir) + copy(dAtA[i:], m.WorkingDir) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.WorkingDir))) + i-- + dAtA[i] = 0x2a + if len(m.Args) > 0 { + for iNdEx := len(m.Args) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Args[iNdEx]) + copy(dAtA[i:], m.Args[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Args[iNdEx]))) + i-- + dAtA[i] = 0x22 } - i += n130 } - if m.ScaleIO != nil { - dAtA[i] = 0x9a - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ScaleIO.Size())) - n131, err := m.ScaleIO.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Command) > 0 { + for iNdEx := len(m.Command) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Command[iNdEx]) + copy(dAtA[i:], m.Command[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Command[iNdEx]))) + i-- + dAtA[i] = 0x1a } - i += n131 } - if m.Local != nil { - dAtA[i] = 0xa2 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Local.Size())) - n132, err := m.Local.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n132 + i -= len(m.Image) + copy(dAtA[i:], m.Image) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Image))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *EphemeralContainers) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - if m.StorageOS != nil { - dAtA[i] = 0xaa - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.StorageOS.Size())) - n133, err := m.StorageOS.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + return dAtA[:n], nil +} + +func (m *EphemeralContainers) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EphemeralContainers) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.EphemeralContainers) > 0 { + for iNdEx := len(m.EphemeralContainers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.EphemeralContainers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } - i += n133 } - if m.CSI != nil { - dAtA[i] = 0xb2 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CSI.Size())) - n134, err := m.CSI.MarshalTo(dAtA[i:]) + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n134 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PersistentVolumeSpec) Marshal() (dAtA []byte, err error) { +func (m *Event) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PersistentVolumeSpec) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *Event) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Event) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Capacity) > 0 { - keysForCapacity := make([]string, 0, len(m.Capacity)) - for k := range m.Capacity { - keysForCapacity = append(keysForCapacity, string(k)) + i -= len(m.ReportingInstance) + copy(dAtA[i:], m.ReportingInstance) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ReportingInstance))) + i-- + dAtA[i] = 0x7a + i -= len(m.ReportingController) + copy(dAtA[i:], m.ReportingController) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ReportingController))) + i-- + dAtA[i] = 0x72 + if m.Related != nil { + { + size, err := m.Related.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) - for _, k := range keysForCapacity { - dAtA[i] = 0xa - i++ - v := m.Capacity[ResourceName(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n135, err := (&v).MarshalTo(dAtA[i:]) + i-- + dAtA[i] = 0x6a + } + i -= len(m.Action) + copy(dAtA[i:], m.Action) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Action))) + i-- + dAtA[i] = 0x62 + if m.Series != nil { + { + size, err := m.Series.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n135 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x5a } - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PersistentVolumeSource.Size())) - n136, err := m.PersistentVolumeSource.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.EventTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n136 - if len(m.AccessModes) > 0 { - for _, s := range m.AccessModes { - dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i-- + dAtA[i] = 0x52 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0x4a + i = encodeVarintGenerated(dAtA, i, uint64(m.Count)) + i-- + dAtA[i] = 0x40 + { + size, err := m.LastTimestamp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.ClaimRef != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ClaimRef.Size())) - n137, err := m.ClaimRef.MarshalTo(dAtA[i:]) + i-- + dAtA[i] = 0x3a + { + size, err := m.FirstTimestamp.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n137 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PersistentVolumeReclaimPolicy))) - i += copy(dAtA[i:], m.PersistentVolumeReclaimPolicy) + i-- dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.StorageClassName))) - i += copy(dAtA[i:], m.StorageClassName) - if len(m.MountOptions) > 0 { - for _, s := range m.MountOptions { - dAtA[i] = 0x3a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + { + size, err := m.Source.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.VolumeMode != nil { - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VolumeMode))) - i += copy(dAtA[i:], *m.VolumeMode) + i-- + dAtA[i] = 0x2a + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x22 + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x1a + { + size, err := m.InvolvedObject.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.NodeAffinity != nil { - dAtA[i] = 0x4a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NodeAffinity.Size())) - n138, err := m.NodeAffinity.MarshalTo(dAtA[i:]) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n138 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PersistentVolumeStatus) Marshal() (dAtA []byte, err error) { +func (m *EventList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PersistentVolumeStatus) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *EventList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EventList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) - i += copy(dAtA[i:], m.Phase) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - return i, nil + return len(dAtA) - i, nil } -func (m *PhotonPersistentDiskVolumeSource) Marshal() (dAtA []byte, err error) { +func (m *EventSeries) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PhotonPersistentDiskVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *EventSeries) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EventSeries) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PdID))) - i += copy(dAtA[i:], m.PdID) + i -= len(m.State) + copy(dAtA[i:], m.State) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.State))) + i-- + dAtA[i] = 0x1a + { + size, err := m.LastObservedTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i += copy(dAtA[i:], m.FSType) - return i, nil + i = encodeVarintGenerated(dAtA, i, uint64(m.Count)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } -func (m *Pod) Marshal() (dAtA []byte, err error) { +func (m *EventSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *Pod) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *EventSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EventSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n139, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n139 + i -= len(m.Host) + copy(dAtA[i:], m.Host) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n140, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n140 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n141, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n141 - return i, nil + i -= len(m.Component) + copy(dAtA[i:], m.Component) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Component))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PodAffinity) Marshal() (dAtA []byte, err error) { +func (m *ExecAction) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PodAffinity) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ExecAction) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExecAction) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.RequiredDuringSchedulingIgnoredDuringExecution) > 0 { - for _, msg := range m.RequiredDuringSchedulingIgnoredDuringExecution { + if len(m.Command) > 0 { + for iNdEx := len(m.Command) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Command[iNdEx]) + copy(dAtA[i:], m.Command[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Command[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { - for _, msg := range m.PreferredDuringSchedulingIgnoredDuringExecution { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n } } - return i, nil + return len(dAtA) - i, nil } -func (m *PodAffinityTerm) Marshal() (dAtA []byte, err error) { +func (m *FCVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PodAffinityTerm) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *FCVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FCVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.LabelSelector != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LabelSelector.Size())) - n142, err := m.LabelSelector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.WWIDs) > 0 { + for iNdEx := len(m.WWIDs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.WWIDs[iNdEx]) + copy(dAtA[i:], m.WWIDs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.WWIDs[iNdEx]))) + i-- + dAtA[i] = 0x2a } - i += n142 } - if len(m.Namespaces) > 0 { - for _, s := range m.Namespaces { - dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i-- + dAtA[i] = 0x20 + i -= len(m.FSType) + copy(dAtA[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.TopologyKey))) - i += copy(dAtA[i:], m.TopologyKey) - return i, nil + if m.Lun != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Lun)) + i-- + dAtA[i] = 0x10 + } + if len(m.TargetWWNs) > 0 { + for iNdEx := len(m.TargetWWNs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.TargetWWNs[iNdEx]) + copy(dAtA[i:], m.TargetWWNs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetWWNs[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil } -func (m *PodAntiAffinity) Marshal() (dAtA []byte, err error) { +func (m *FlexPersistentVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PodAntiAffinity) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *FlexPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FlexPersistentVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.RequiredDuringSchedulingIgnoredDuringExecution) > 0 { - for _, msg := range m.RequiredDuringSchedulingIgnoredDuringExecution { + if len(m.Options) > 0 { + keysForOptions := make([]string, 0, len(m.Options)) + for k := range m.Options { + keysForOptions = append(keysForOptions, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForOptions) + for iNdEx := len(keysForOptions) - 1; iNdEx >= 0; iNdEx-- { + v := m.Options[string(keysForOptions[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForOptions[iNdEx]) + copy(dAtA[i:], keysForOptions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForOptions[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2a } } - if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { - for _, msg := range m.PreferredDuringSchedulingIgnoredDuringExecution { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + if m.SecretRef != nil { + { + size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x1a } - return i, nil + i -= len(m.FSType) + copy(dAtA[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i-- + dAtA[i] = 0x12 + i -= len(m.Driver) + copy(dAtA[i:], m.Driver) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PodAttachOptions) Marshal() (dAtA []byte, err error) { +func (m *FlexVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PodAttachOptions) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *FlexVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FlexVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - if m.Stdin { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x10 - i++ - if m.Stdout { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if len(m.Options) > 0 { + keysForOptions := make([]string, 0, len(m.Options)) + for k := range m.Options { + keysForOptions = append(keysForOptions, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForOptions) + for iNdEx := len(keysForOptions) - 1; iNdEx >= 0; iNdEx-- { + v := m.Options[string(keysForOptions[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForOptions[iNdEx]) + copy(dAtA[i:], keysForOptions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForOptions[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2a + } } - i++ - dAtA[i] = 0x18 - i++ - if m.Stderr { + i-- + if m.ReadOnly { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ + i-- dAtA[i] = 0x20 - i++ - if m.TTY { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if m.SecretRef != nil { + { + size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a } - i++ - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Container))) - i += copy(dAtA[i:], m.Container) - return i, nil + i -= len(m.FSType) + copy(dAtA[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i-- + dAtA[i] = 0x12 + i -= len(m.Driver) + copy(dAtA[i:], m.Driver) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PodCondition) Marshal() (dAtA []byte, err error) { +func (m *FlockerVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PodCondition) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *FlockerVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FlockerVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) + i -= len(m.DatasetUUID) + copy(dAtA[i:], m.DatasetUUID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DatasetUUID))) + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastProbeTime.Size())) - n143, err := m.LastProbeTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n143 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n144, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n144 - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil + i -= len(m.DatasetName) + copy(dAtA[i:], m.DatasetName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DatasetName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PodDNSConfig) Marshal() (dAtA []byte, err error) { +func (m *GCEPersistentDiskVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PodDNSConfig) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *GCEPersistentDiskVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GCEPersistentDiskVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Nameservers) > 0 { - for _, s := range m.Nameservers { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if len(m.Searches) > 0 { - for _, s := range m.Searches { - dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if len(m.Options) > 0 { - for _, msg := range m.Options { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - return i, nil + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.Partition)) + i-- + dAtA[i] = 0x18 + i -= len(m.FSType) + copy(dAtA[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i-- + dAtA[i] = 0x12 + i -= len(m.PDName) + copy(dAtA[i:], m.PDName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PDName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PodDNSConfigOption) Marshal() (dAtA []byte, err error) { +func (m *GitRepoVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PodDNSConfigOption) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int +func (m *GitRepoVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GitRepoVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int _ = l + i -= len(m.Directory) + copy(dAtA[i:], m.Directory) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Directory))) + i-- + dAtA[i] = 0x1a + i -= len(m.Revision) + copy(dAtA[i:], m.Revision) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Revision))) + i-- + dAtA[i] = 0x12 + i -= len(m.Repository) + copy(dAtA[i:], m.Repository) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Repository))) + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - if m.Value != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Value))) - i += copy(dAtA[i:], *m.Value) - } - return i, nil + return len(dAtA) - i, nil } -func (m *PodExecOptions) Marshal() (dAtA []byte, err error) { +func (m *GlusterfsPersistentVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PodExecOptions) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *GlusterfsPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GlusterfsPersistentVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - if m.Stdin { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if m.EndpointsNamespace != nil { + i -= len(*m.EndpointsNamespace) + copy(dAtA[i:], *m.EndpointsNamespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.EndpointsNamespace))) + i-- + dAtA[i] = 0x22 } - i++ - dAtA[i] = 0x10 - i++ - if m.Stdout { + i-- + if m.ReadOnly { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ + i-- dAtA[i] = 0x18 - i++ - if m.Stderr { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0x12 + i -= len(m.EndpointsName) + copy(dAtA[i:], m.EndpointsName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.EndpointsName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *GlusterfsVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - i++ - dAtA[i] = 0x20 - i++ - if m.TTY { + return dAtA[:n], nil +} + +func (m *GlusterfsVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GlusterfsVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i-- + if m.ReadOnly { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Container))) - i += copy(dAtA[i:], m.Container) - if len(m.Command) > 0 { - for _, s := range m.Command { - dAtA[i] = 0x32 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ + i-- + dAtA[i] = 0x18 + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0x12 + i -= len(m.EndpointsName) + copy(dAtA[i:], m.EndpointsName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.EndpointsName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *HTTPGetAction) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HTTPGetAction) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HTTPGetAction) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.HTTPHeaders) > 0 { + for iNdEx := len(m.HTTPHeaders) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.HTTPHeaders[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i-- + dAtA[i] = 0x2a + } + } + i -= len(m.Scheme) + copy(dAtA[i:], m.Scheme) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Scheme))) + i-- + dAtA[i] = 0x22 + i -= len(m.Host) + copy(dAtA[i:], m.Host) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) + i-- + dAtA[i] = 0x1a + { + size, err := m.Port.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PodList) Marshal() (dAtA []byte, err error) { +func (m *HTTPHeader) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PodList) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *HTTPHeader) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HTTPHeader) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n145, err := m.ListMeta.MarshalTo(dAtA[i:]) + return len(dAtA) - i, nil +} + +func (m *Handler) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { - return 0, err + return nil, err } - i += n145 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + return dAtA[:n], nil +} + +func (m *Handler) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Handler) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.TCPSocket != nil { + { + size, err := m.TCPSocket.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.HTTPGet != nil { + { + size, err := m.HTTPGet.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Exec != nil { + { + size, err := m.Exec.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } -func (m *PodLogOptions) Marshal() (dAtA []byte, err error) { +func (m *HostAlias) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PodLogOptions) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *HostAlias) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HostAlias) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + if len(m.Hostnames) > 0 { + for iNdEx := len(m.Hostnames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Hostnames[iNdEx]) + copy(dAtA[i:], m.Hostnames[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Hostnames[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.IP) + copy(dAtA[i:], m.IP) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.IP))) + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Container))) - i += copy(dAtA[i:], m.Container) - dAtA[i] = 0x10 - i++ - if m.Follow { + return len(dAtA) - i, nil +} + +func (m *HostPathVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HostPathVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HostPathVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Type != nil { + i -= len(*m.Type) + copy(dAtA[i:], *m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Type))) + i-- + dAtA[i] = 0x12 + } + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ISCSIPersistentVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ISCSIPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ISCSIPersistentVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.InitiatorName != nil { + i -= len(*m.InitiatorName) + copy(dAtA[i:], *m.InitiatorName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.InitiatorName))) + i-- + dAtA[i] = 0x62 + } + i-- + if m.SessionCHAPAuth { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - dAtA[i] = 0x18 - i++ - if m.Previous { + i-- + dAtA[i] = 0x58 + if m.SecretRef != nil { + { + size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } + i-- + if m.DiscoveryCHAPAuth { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - if m.SinceSeconds != nil { - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.SinceSeconds)) - } - if m.SinceTime != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SinceTime.Size())) - n146, err := m.SinceTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i-- + dAtA[i] = 0x40 + if len(m.Portals) > 0 { + for iNdEx := len(m.Portals) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Portals[iNdEx]) + copy(dAtA[i:], m.Portals[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Portals[iNdEx]))) + i-- + dAtA[i] = 0x3a } - i += n146 } - dAtA[i] = 0x30 - i++ - if m.Timestamps { + i-- + if m.ReadOnly { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - if m.TailLines != nil { - dAtA[i] = 0x38 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.TailLines)) - } - if m.LimitBytes != nil { - dAtA[i] = 0x40 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.LimitBytes)) - } - return i, nil + i-- + dAtA[i] = 0x30 + i -= len(m.FSType) + copy(dAtA[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i-- + dAtA[i] = 0x2a + i -= len(m.ISCSIInterface) + copy(dAtA[i:], m.ISCSIInterface) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ISCSIInterface))) + i-- + dAtA[i] = 0x22 + i = encodeVarintGenerated(dAtA, i, uint64(m.Lun)) + i-- + dAtA[i] = 0x18 + i -= len(m.IQN) + copy(dAtA[i:], m.IQN) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.IQN))) + i-- + dAtA[i] = 0x12 + i -= len(m.TargetPortal) + copy(dAtA[i:], m.TargetPortal) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetPortal))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PodPortForwardOptions) Marshal() (dAtA []byte, err error) { +func (m *ISCSIVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PodPortForwardOptions) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ISCSIVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ISCSIVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Ports) > 0 { - for _, num := range m.Ports { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(num)) + if m.InitiatorName != nil { + i -= len(*m.InitiatorName) + copy(dAtA[i:], *m.InitiatorName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.InitiatorName))) + i-- + dAtA[i] = 0x62 + } + i-- + if m.SessionCHAPAuth { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x58 + if m.SecretRef != nil { + { + size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } + i-- + if m.DiscoveryCHAPAuth { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x40 + if len(m.Portals) > 0 { + for iNdEx := len(m.Portals) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Portals[iNdEx]) + copy(dAtA[i:], m.Portals[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Portals[iNdEx]))) + i-- + dAtA[i] = 0x3a } } - return i, nil + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + i -= len(m.FSType) + copy(dAtA[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i-- + dAtA[i] = 0x2a + i -= len(m.ISCSIInterface) + copy(dAtA[i:], m.ISCSIInterface) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ISCSIInterface))) + i-- + dAtA[i] = 0x22 + i = encodeVarintGenerated(dAtA, i, uint64(m.Lun)) + i-- + dAtA[i] = 0x18 + i -= len(m.IQN) + copy(dAtA[i:], m.IQN) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.IQN))) + i-- + dAtA[i] = 0x12 + i -= len(m.TargetPortal) + copy(dAtA[i:], m.TargetPortal) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetPortal))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PodProxyOptions) Marshal() (dAtA []byte, err error) { +func (m *KeyToPath) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PodProxyOptions) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *KeyToPath) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *KeyToPath) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ + if m.Mode != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Mode)) + i-- + dAtA[i] = 0x18 + } + i -= len(m.Path) + copy(dAtA[i:], m.Path) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i += copy(dAtA[i:], m.Path) - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PodReadinessGate) Marshal() (dAtA []byte, err error) { +func (m *Lifecycle) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PodReadinessGate) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *Lifecycle) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Lifecycle) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ConditionType))) - i += copy(dAtA[i:], m.ConditionType) - return i, nil + if m.PreStop != nil { + { + size, err := m.PreStop.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.PostStart != nil { + { + size, err := m.PostStart.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *PodSecurityContext) Marshal() (dAtA []byte, err error) { +func (m *LimitRange) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PodSecurityContext) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *LimitRange) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LimitRange) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.SELinuxOptions != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SELinuxOptions.Size())) - n147, err := m.SELinuxOptions.MarshalTo(dAtA[i:]) + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n147 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.RunAsUser != nil { - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.RunAsUser)) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.RunAsNonRoot != nil { - dAtA[i] = 0x18 - i++ - if *m.RunAsNonRoot { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *LimitRangeItem) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LimitRangeItem) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LimitRangeItem) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.MaxLimitRequestRatio) > 0 { + keysForMaxLimitRequestRatio := make([]string, 0, len(m.MaxLimitRequestRatio)) + for k := range m.MaxLimitRequestRatio { + keysForMaxLimitRequestRatio = append(keysForMaxLimitRequestRatio, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMaxLimitRequestRatio) + for iNdEx := len(keysForMaxLimitRequestRatio) - 1; iNdEx >= 0; iNdEx-- { + v := m.MaxLimitRequestRatio[ResourceName(keysForMaxLimitRequestRatio[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForMaxLimitRequestRatio[iNdEx]) + copy(dAtA[i:], keysForMaxLimitRequestRatio[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForMaxLimitRequestRatio[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x32 } - i++ } - if len(m.SupplementalGroups) > 0 { - for _, num := range m.SupplementalGroups { - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(num)) + if len(m.DefaultRequest) > 0 { + keysForDefaultRequest := make([]string, 0, len(m.DefaultRequest)) + for k := range m.DefaultRequest { + keysForDefaultRequest = append(keysForDefaultRequest, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForDefaultRequest) + for iNdEx := len(keysForDefaultRequest) - 1; iNdEx >= 0; iNdEx-- { + v := m.DefaultRequest[ResourceName(keysForDefaultRequest[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForDefaultRequest[iNdEx]) + copy(dAtA[i:], keysForDefaultRequest[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForDefaultRequest[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2a } } - if m.FSGroup != nil { - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.FSGroup)) + if len(m.Default) > 0 { + keysForDefault := make([]string, 0, len(m.Default)) + for k := range m.Default { + keysForDefault = append(keysForDefault, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForDefault) + for iNdEx := len(keysForDefault) - 1; iNdEx >= 0; iNdEx-- { + v := m.Default[ResourceName(keysForDefault[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForDefault[iNdEx]) + copy(dAtA[i:], keysForDefault[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForDefault[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x22 + } } - if m.RunAsGroup != nil { - dAtA[i] = 0x30 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.RunAsGroup)) + if len(m.Min) > 0 { + keysForMin := make([]string, 0, len(m.Min)) + for k := range m.Min { + keysForMin = append(keysForMin, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMin) + for iNdEx := len(keysForMin) - 1; iNdEx >= 0; iNdEx-- { + v := m.Min[ResourceName(keysForMin[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForMin[iNdEx]) + copy(dAtA[i:], keysForMin[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForMin[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1a + } } - if len(m.Sysctls) > 0 { - for _, msg := range m.Sysctls { - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Max) > 0 { + keysForMax := make([]string, 0, len(m.Max)) + for k := range m.Max { + keysForMax = append(keysForMax, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMax) + for iNdEx := len(keysForMax) - 1; iNdEx >= 0; iNdEx-- { + v := m.Max[ResourceName(keysForMax[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + i -= len(keysForMax[iNdEx]) + copy(dAtA[i:], keysForMax[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForMax[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 } } - return i, nil + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PodSignature) Marshal() (dAtA []byte, err error) { +func (m *LimitRangeList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PodSignature) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *LimitRangeList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LimitRangeList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.PodController != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PodController.Size())) - n148, err := m.PodController.MarshalTo(dAtA[i:]) + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n148 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PodSpec) Marshal() (dAtA []byte, err error) { +func (m *LimitRangeSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PodSpec) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *LimitRangeSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LimitRangeSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Volumes) > 0 { - for _, msg := range m.Volumes { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Limits) > 0 { + for iNdEx := len(m.Limits) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Limits[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0xa } } - if len(m.Containers) > 0 { - for _, msg := range m.Containers { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } + return len(dAtA) - i, nil +} + +func (m *List) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.RestartPolicy))) - i += copy(dAtA[i:], m.RestartPolicy) - if m.TerminationGracePeriodSeconds != nil { - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.TerminationGracePeriodSeconds)) - } - if m.ActiveDeadlineSeconds != nil { - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.ActiveDeadlineSeconds)) - } - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DNSPolicy))) - i += copy(dAtA[i:], m.DNSPolicy) - if len(m.NodeSelector) > 0 { - keysForNodeSelector := make([]string, 0, len(m.NodeSelector)) - for k := range m.NodeSelector { - keysForNodeSelector = append(keysForNodeSelector, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector) - for _, k := range keysForNodeSelector { - dAtA[i] = 0x3a - i++ - v := m.NodeSelector[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } - } - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceAccountName))) - i += copy(dAtA[i:], m.ServiceAccountName) - dAtA[i] = 0x4a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DeprecatedServiceAccount))) - i += copy(dAtA[i:], m.DeprecatedServiceAccount) - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeName))) - i += copy(dAtA[i:], m.NodeName) - dAtA[i] = 0x58 - i++ - if m.HostNetwork { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x60 - i++ - if m.HostPID { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x68 - i++ - if m.HostIPC { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if m.SecurityContext != nil { - dAtA[i] = 0x72 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SecurityContext.Size())) - n149, err := m.SecurityContext.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n149 - } - if len(m.ImagePullSecrets) > 0 { - for _, msg := range m.ImagePullSecrets { - dAtA[i] = 0x7a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - dAtA[i] = 0x82 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Hostname))) - i += copy(dAtA[i:], m.Hostname) - dAtA[i] = 0x8a - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Subdomain))) - i += copy(dAtA[i:], m.Subdomain) - if m.Affinity != nil { - dAtA[i] = 0x92 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Affinity.Size())) - n150, err := m.Affinity.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n150 - } - dAtA[i] = 0x9a - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SchedulerName))) - i += copy(dAtA[i:], m.SchedulerName) - if len(m.InitContainers) > 0 { - for _, msg := range m.InitContainers { - dAtA[i] = 0xa2 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.AutomountServiceAccountToken != nil { - dAtA[i] = 0xa8 - i++ - dAtA[i] = 0x1 - i++ - if *m.AutomountServiceAccountToken { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if len(m.Tolerations) > 0 { - for _, msg := range m.Tolerations { - dAtA[i] = 0xb2 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.HostAliases) > 0 { - for _, msg := range m.HostAliases { - dAtA[i] = 0xba - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - dAtA[i] = 0xc2 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PriorityClassName))) - i += copy(dAtA[i:], m.PriorityClassName) - if m.Priority != nil { - dAtA[i] = 0xc8 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Priority)) - } - if m.DNSConfig != nil { - dAtA[i] = 0xd2 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DNSConfig.Size())) - n151, err := m.DNSConfig.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n151 - } - if m.ShareProcessNamespace != nil { - dAtA[i] = 0xd8 - i++ - dAtA[i] = 0x1 - i++ - if *m.ShareProcessNamespace { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if len(m.ReadinessGates) > 0 { - for _, msg := range m.ReadinessGates { - dAtA[i] = 0xe2 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.RuntimeClassName != nil { - dAtA[i] = 0xea - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.RuntimeClassName))) - i += copy(dAtA[i:], *m.RuntimeClassName) - } - return i, nil + return dAtA[:n], nil } -func (m *PodStatus) Marshal() (dAtA []byte, err error) { +func (m *List) MarshalTo(dAtA []byte) (int, error) { size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *PodStatus) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *List) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) - i += copy(dAtA[i:], m.Phase) - if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.HostIP))) - i += copy(dAtA[i:], m.HostIP) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodIP))) - i += copy(dAtA[i:], m.PodIP) - if m.StartTime != nil { - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.StartTime.Size())) - n152, err := m.StartTime.MarshalTo(dAtA[i:]) + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n152 - } - if len(m.ContainerStatuses) > 0 { - for _, msg := range m.ContainerStatuses { - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - dAtA[i] = 0x4a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.QOSClass))) - i += copy(dAtA[i:], m.QOSClass) - if len(m.InitContainerStatuses) > 0 { - for _, msg := range m.InitContainerStatuses { - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - dAtA[i] = 0x5a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.NominatedNodeName))) - i += copy(dAtA[i:], m.NominatedNodeName) - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PodStatusResult) Marshal() (dAtA []byte, err error) { +func (m *LoadBalancerIngress) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PodStatusResult) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *LoadBalancerIngress) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LoadBalancerIngress) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n153, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n153 + i -= len(m.Hostname) + copy(dAtA[i:], m.Hostname) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Hostname))) + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n154, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n154 - return i, nil + i -= len(m.IP) + copy(dAtA[i:], m.IP) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.IP))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PodTemplate) Marshal() (dAtA []byte, err error) { +func (m *LoadBalancerStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PodTemplate) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *LoadBalancerStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LoadBalancerStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n155, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n155 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n156, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Ingress) > 0 { + for iNdEx := len(m.Ingress) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ingress[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } } - i += n156 - return i, nil + return len(dAtA) - i, nil } -func (m *PodTemplateList) Marshal() (dAtA []byte, err error) { +func (m *LocalObjectReference) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PodTemplateList) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *LocalObjectReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LocalObjectReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n157, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n157 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil + return len(dAtA) - i, nil } -func (m *PodTemplateSpec) Marshal() (dAtA []byte, err error) { +func (m *LocalVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PodTemplateSpec) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *LocalVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LocalVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n158, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n158 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n159, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.FSType != nil { + i -= len(*m.FSType) + copy(dAtA[i:], *m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FSType))) + i-- + dAtA[i] = 0x12 } - i += n159 - return i, nil + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PortworxVolumeSource) Marshal() (dAtA []byte, err error) { +func (m *NFSVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PortworxVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *NFSVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NFSVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeID))) - i += copy(dAtA[i:], m.VolumeID) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i += copy(dAtA[i:], m.FSType) - dAtA[i] = 0x18 - i++ + i-- if m.ReadOnly { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - return i, nil + i-- + dAtA[i] = 0x18 + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0x12 + i -= len(m.Server) + copy(dAtA[i:], m.Server) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Server))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *Preconditions) Marshal() (dAtA []byte, err error) { +func (m *Namespace) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *Preconditions) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *Namespace) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Namespace) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.UID != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.UID))) - i += copy(dAtA[i:], *m.UID) + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PreferAvoidPodsEntry) Marshal() (dAtA []byte, err error) { +func (m *NamespaceCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PreferAvoidPodsEntry) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *NamespaceCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NamespaceCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PodSignature.Size())) - n160, err := m.PodSignature.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n160 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.EvictionTime.Size())) - n161, err := m.EvictionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n161 - dAtA[i] = 0x1a - i++ + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x32 + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) + i-- + dAtA[i] = 0x2a + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PreferredSchedulingTerm) Marshal() (dAtA []byte, err error) { +func (m *NamespaceList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PreferredSchedulingTerm) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *NamespaceList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NamespaceList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Weight)) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Preference.Size())) - n162, err := m.Preference.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } } - i += n162 - return i, nil + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *Probe) Marshal() (dAtA []byte, err error) { +func (m *NamespaceSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *Probe) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *NamespaceSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NamespaceSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Handler.Size())) - n163, err := m.Handler.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Finalizers) > 0 { + for iNdEx := len(m.Finalizers) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Finalizers[iNdEx]) + copy(dAtA[i:], m.Finalizers[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Finalizers[iNdEx]))) + i-- + dAtA[i] = 0xa + } } - i += n163 - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.InitialDelaySeconds)) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.TimeoutSeconds)) - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PeriodSeconds)) - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SuccessThreshold)) - dAtA[i] = 0x30 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.FailureThreshold)) - return i, nil + return len(dAtA) - i, nil } -func (m *ProjectedVolumeSource) Marshal() (dAtA []byte, err error) { +func (m *NamespaceStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ProjectedVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *NamespaceStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NamespaceStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Sources) > 0 { - for _, msg := range m.Sources { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - if m.DefaultMode != nil { - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.DefaultMode)) - } - return i, nil + i -= len(m.Phase) + copy(dAtA[i:], m.Phase) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *QuobyteVolumeSource) Marshal() (dAtA []byte, err error) { +func (m *Node) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *QuobyteVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *Node) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Node) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Registry))) - i += copy(dAtA[i:], m.Registry) + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Volume))) - i += copy(dAtA[i:], m.Volume) - dAtA[i] = 0x18 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i++ - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.User))) - i += copy(dAtA[i:], m.User) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) - i += copy(dAtA[i:], m.Group) - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *RBDPersistentVolumeSource) Marshal() (dAtA []byte, err error) { +func (m *NodeAddress) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *RBDPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *NodeAddress) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodeAddress) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.CephMonitors) > 0 { - for _, s := range m.CephMonitors { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } + i -= len(m.Address) + copy(dAtA[i:], m.Address) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Address))) + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.RBDImage))) - i += copy(dAtA[i:], m.RBDImage) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i += copy(dAtA[i:], m.FSType) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.RBDPool))) - i += copy(dAtA[i:], m.RBDPool) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.RadosUser))) - i += copy(dAtA[i:], m.RadosUser) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Keyring))) - i += copy(dAtA[i:], m.Keyring) - if m.SecretRef != nil { - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n164, err := m.SecretRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n164 - } - dAtA[i] = 0x40 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - return i, nil + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *RBDVolumeSource) Marshal() (dAtA []byte, err error) { +func (m *NodeAffinity) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *RBDVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *NodeAffinity) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodeAffinity) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.CephMonitors) > 0 { - for _, s := range m.CephMonitors { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ + if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { + for iNdEx := len(m.PreferredDuringSchedulingIgnoredDuringExecution) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.PreferredDuringSchedulingIgnoredDuringExecution[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i-- + dAtA[i] = 0x12 } } - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.RBDImage))) - i += copy(dAtA[i:], m.RBDImage) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i += copy(dAtA[i:], m.FSType) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.RBDPool))) - i += copy(dAtA[i:], m.RBDPool) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.RadosUser))) - i += copy(dAtA[i:], m.RadosUser) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Keyring))) - i += copy(dAtA[i:], m.Keyring) - if m.SecretRef != nil { - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n165, err := m.SecretRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.RequiredDuringSchedulingIgnoredDuringExecution != nil { + { + size, err := m.RequiredDuringSchedulingIgnoredDuringExecution.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n165 - } - dAtA[i] = 0x40 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + i-- + dAtA[i] = 0xa } - i++ - return i, nil + return len(dAtA) - i, nil } -func (m *RangeAllocation) Marshal() (dAtA []byte, err error) { +func (m *NodeCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *RangeAllocation) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *NodeCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodeCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n166, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x32 + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x2a + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n166 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Range))) - i += copy(dAtA[i:], m.Range) - if m.Data != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Data))) - i += copy(dAtA[i:], m.Data) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastHeartbeatTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ReplicationController) Marshal() (dAtA []byte, err error) { +func (m *NodeConfigSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ReplicationController) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *NodeConfigSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodeConfigSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n167, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n167 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n168, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n168 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n169, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.ConfigMap != nil { + { + size, err := m.ConfigMap.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } - i += n169 - return i, nil + return len(dAtA) - i, nil } -func (m *ReplicationControllerCondition) Marshal() (dAtA []byte, err error) { +func (m *NodeConfigStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ReplicationControllerCondition) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *NodeConfigStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodeConfigStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n170, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n170 + i -= len(m.Error) + copy(dAtA[i:], m.Error) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Error))) + i-- dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil + if m.LastKnownGood != nil { + { + size, err := m.LastKnownGood.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Active != nil { + { + size, err := m.Active.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Assigned != nil { + { + size, err := m.Assigned.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *ReplicationControllerList) Marshal() (dAtA []byte, err error) { +func (m *NodeDaemonEndpoints) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ReplicationControllerList) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *NodeDaemonEndpoints) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodeDaemonEndpoints) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n171, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n171 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + { + size, err := m.KubeletEndpoint.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ReplicationControllerSpec) Marshal() (dAtA []byte, err error) { +func (m *NodeList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ReplicationControllerSpec) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *NodeList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodeList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Replicas != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) - } - if len(m.Selector) > 0 { - keysForSelector := make([]string, 0, len(m.Selector)) - for k := range m.Selector { - keysForSelector = append(keysForSelector, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) - for _, k := range keysForSelector { - dAtA[i] = 0x12 - i++ - v := m.Selector[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) } } - if m.Template != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n172, err := m.Template.MarshalTo(dAtA[i:]) + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n172 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ReplicationControllerStatus) Marshal() (dAtA []byte, err error) { +func (m *NodeProxyOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ReplicationControllerStatus) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *NodeProxyOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodeProxyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.FullyLabeledReplicas)) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) - if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ResourceFieldSelector) Marshal() (dAtA []byte, err error) { +func (m *NodeResources) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ResourceFieldSelector) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *NodeResources) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodeResources) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContainerName))) - i += copy(dAtA[i:], m.ContainerName) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) - i += copy(dAtA[i:], m.Resource) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Divisor.Size())) - n173, err := m.Divisor.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Capacity) > 0 { + keysForCapacity := make([]string, 0, len(m.Capacity)) + for k := range m.Capacity { + keysForCapacity = append(keysForCapacity, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) + for iNdEx := len(keysForCapacity) - 1; iNdEx >= 0; iNdEx-- { + v := m.Capacity[ResourceName(keysForCapacity[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForCapacity[iNdEx]) + copy(dAtA[i:], keysForCapacity[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForCapacity[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } } - i += n173 - return i, nil + return len(dAtA) - i, nil } -func (m *ResourceQuota) Marshal() (dAtA []byte, err error) { +func (m *NodeSelector) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ResourceQuota) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *NodeSelector) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodeSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n174, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n174 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n175, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n175 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n176, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.NodeSelectorTerms) > 0 { + for iNdEx := len(m.NodeSelectorTerms) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.NodeSelectorTerms[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } } - i += n176 - return i, nil + return len(dAtA) - i, nil } -func (m *ResourceQuotaList) Marshal() (dAtA []byte, err error) { +func (m *NodeSelectorRequirement) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ResourceQuotaList) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *NodeSelectorRequirement) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodeSelectorRequirement) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n177, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n177 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + if len(m.Values) > 0 { + for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Values[iNdEx]) + copy(dAtA[i:], m.Values[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Values[iNdEx]))) + i-- + dAtA[i] = 0x1a } } - return i, nil + i -= len(m.Operator) + copy(dAtA[i:], m.Operator) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operator))) + i-- + dAtA[i] = 0x12 + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ResourceQuotaSpec) Marshal() (dAtA []byte, err error) { +func (m *NodeSelectorTerm) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ResourceQuotaSpec) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *NodeSelectorTerm) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodeSelectorTerm) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Hard) > 0 { - keysForHard := make([]string, 0, len(m.Hard)) - for k := range m.Hard { - keysForHard = append(keysForHard, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForHard) - for _, k := range keysForHard { - dAtA[i] = 0xa - i++ - v := m.Hard[ResourceName(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n178, err := (&v).MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.MatchFields) > 0 { + for iNdEx := len(m.MatchFields) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.MatchFields[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n178 - } - } - if len(m.Scopes) > 0 { - for _, s := range m.Scopes { + i-- dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - if m.ScopeSelector != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ScopeSelector.Size())) - n179, err := m.ScopeSelector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.MatchExpressions) > 0 { + for iNdEx := len(m.MatchExpressions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.MatchExpressions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } - i += n179 } - return i, nil + return len(dAtA) - i, nil } -func (m *ResourceQuotaStatus) Marshal() (dAtA []byte, err error) { +func (m *NodeSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ResourceQuotaStatus) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *NodeSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodeSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Hard) > 0 { - keysForHard := make([]string, 0, len(m.Hard)) - for k := range m.Hard { - keysForHard = append(keysForHard, string(k)) + if len(m.PodCIDRs) > 0 { + for iNdEx := len(m.PodCIDRs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.PodCIDRs[iNdEx]) + copy(dAtA[i:], m.PodCIDRs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodCIDRs[iNdEx]))) + i-- + dAtA[i] = 0x3a } - github_com_gogo_protobuf_sortkeys.Strings(keysForHard) - for _, k := range keysForHard { - dAtA[i] = 0xa - i++ - v := m.Hard[ResourceName(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n180, err := (&v).MarshalTo(dAtA[i:]) + } + if m.ConfigSource != nil { + { + size, err := m.ConfigSource.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n180 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x32 } - if len(m.Used) > 0 { - keysForUsed := make([]string, 0, len(m.Used)) - for k := range m.Used { - keysForUsed = append(keysForUsed, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForUsed) - for _, k := range keysForUsed { - dAtA[i] = 0x12 - i++ - v := m.Used[ResourceName(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n181, err := (&v).MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Taints) > 0 { + for iNdEx := len(m.Taints) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Taints[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n181 + i-- + dAtA[i] = 0x2a } } - return i, nil + i-- + if m.Unschedulable { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + i -= len(m.ProviderID) + copy(dAtA[i:], m.ProviderID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ProviderID))) + i-- + dAtA[i] = 0x1a + i -= len(m.DoNotUseExternalID) + copy(dAtA[i:], m.DoNotUseExternalID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DoNotUseExternalID))) + i-- + dAtA[i] = 0x12 + i -= len(m.PodCIDR) + copy(dAtA[i:], m.PodCIDR) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodCIDR))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ResourceRequirements) Marshal() (dAtA []byte, err error) { +func (m *NodeStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ResourceRequirements) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *NodeStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodeStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Limits) > 0 { - keysForLimits := make([]string, 0, len(m.Limits)) - for k := range m.Limits { - keysForLimits = append(keysForLimits, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForLimits) - for _, k := range keysForLimits { - dAtA[i] = 0xa - i++ - v := m.Limits[ResourceName(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n182, err := (&v).MarshalTo(dAtA[i:]) + if m.Config != nil { + { + size, err := m.Config.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n182 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x5a } - if len(m.Requests) > 0 { - keysForRequests := make([]string, 0, len(m.Requests)) - for k := range m.Requests { - keysForRequests = append(keysForRequests, string(k)) + if len(m.VolumesAttached) > 0 { + for iNdEx := len(m.VolumesAttached) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.VolumesAttached[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 } - github_com_gogo_protobuf_sortkeys.Strings(keysForRequests) - for _, k := range keysForRequests { + } + if len(m.VolumesInUse) > 0 { + for iNdEx := len(m.VolumesInUse) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.VolumesInUse[iNdEx]) + copy(dAtA[i:], m.VolumesInUse[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumesInUse[iNdEx]))) + i-- + dAtA[i] = 0x4a + } + } + if len(m.Images) > 0 { + for iNdEx := len(m.Images) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Images[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + } + { + size, err := m.NodeInfo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + { + size, err := m.DaemonEndpoints.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + if len(m.Addresses) > 0 { + for iNdEx := len(m.Addresses) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Addresses[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + i -= len(m.Phase) + copy(dAtA[i:], m.Phase) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) + i-- + dAtA[i] = 0x1a + if len(m.Allocatable) > 0 { + keysForAllocatable := make([]string, 0, len(m.Allocatable)) + for k := range m.Allocatable { + keysForAllocatable = append(keysForAllocatable, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAllocatable) + for iNdEx := len(keysForAllocatable) - 1; iNdEx >= 0; iNdEx-- { + v := m.Allocatable[ResourceName(keysForAllocatable[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x12 - i++ - v := m.Requests[ResourceName(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + i -= len(keysForAllocatable[iNdEx]) + copy(dAtA[i:], keysForAllocatable[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAllocatable[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n183, err := (&v).MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + } + } + if len(m.Capacity) > 0 { + keysForCapacity := make([]string, 0, len(m.Capacity)) + for k := range m.Capacity { + keysForCapacity = append(keysForCapacity, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) + for iNdEx := len(keysForCapacity) - 1; iNdEx >= 0; iNdEx-- { + v := m.Capacity[ResourceName(keysForCapacity[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n183 + i-- + dAtA[i] = 0x12 + i -= len(keysForCapacity[iNdEx]) + copy(dAtA[i:], keysForCapacity[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForCapacity[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } -func (m *SELinuxOptions) Marshal() (dAtA []byte, err error) { +func (m *NodeSystemInfo) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *SELinuxOptions) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *NodeSystemInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodeSystemInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.User))) - i += copy(dAtA[i:], m.User) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Role))) - i += copy(dAtA[i:], m.Role) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) + i -= len(m.Architecture) + copy(dAtA[i:], m.Architecture) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Architecture))) + i-- + dAtA[i] = 0x52 + i -= len(m.OperatingSystem) + copy(dAtA[i:], m.OperatingSystem) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.OperatingSystem))) + i-- + dAtA[i] = 0x4a + i -= len(m.KubeProxyVersion) + copy(dAtA[i:], m.KubeProxyVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.KubeProxyVersion))) + i-- + dAtA[i] = 0x42 + i -= len(m.KubeletVersion) + copy(dAtA[i:], m.KubeletVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.KubeletVersion))) + i-- + dAtA[i] = 0x3a + i -= len(m.ContainerRuntimeVersion) + copy(dAtA[i:], m.ContainerRuntimeVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContainerRuntimeVersion))) + i-- + dAtA[i] = 0x32 + i -= len(m.OSImage) + copy(dAtA[i:], m.OSImage) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.OSImage))) + i-- + dAtA[i] = 0x2a + i -= len(m.KernelVersion) + copy(dAtA[i:], m.KernelVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.KernelVersion))) + i-- dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Level))) - i += copy(dAtA[i:], m.Level) - return i, nil + i -= len(m.BootID) + copy(dAtA[i:], m.BootID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.BootID))) + i-- + dAtA[i] = 0x1a + i -= len(m.SystemUUID) + copy(dAtA[i:], m.SystemUUID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SystemUUID))) + i-- + dAtA[i] = 0x12 + i -= len(m.MachineID) + copy(dAtA[i:], m.MachineID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MachineID))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ScaleIOPersistentVolumeSource) Marshal() (dAtA []byte, err error) { +func (m *ObjectFieldSelector) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ScaleIOPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ObjectFieldSelector) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ObjectFieldSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Gateway))) - i += copy(dAtA[i:], m.Gateway) + i -= len(m.FieldPath) + copy(dAtA[i:], m.FieldPath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldPath))) + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.System))) - i += copy(dAtA[i:], m.System) - if m.SecretRef != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n184, err := m.SecretRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n184 - } - dAtA[i] = 0x20 - i++ - if m.SSLEnabled { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ProtectionDomain))) - i += copy(dAtA[i:], m.ProtectionDomain) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.StoragePool))) - i += copy(dAtA[i:], m.StoragePool) - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.StorageMode))) - i += copy(dAtA[i:], m.StorageMode) - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeName))) - i += copy(dAtA[i:], m.VolumeName) - dAtA[i] = 0x4a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i += copy(dAtA[i:], m.FSType) - dAtA[i] = 0x50 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - return i, nil + i -= len(m.APIVersion) + copy(dAtA[i:], m.APIVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ScaleIOVolumeSource) Marshal() (dAtA []byte, err error) { +func (m *ObjectReference) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ScaleIOVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ObjectReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ObjectReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Gateway))) - i += copy(dAtA[i:], m.Gateway) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.System))) - i += copy(dAtA[i:], m.System) - if m.SecretRef != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n185, err := m.SecretRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n185 - } - dAtA[i] = 0x20 - i++ - if m.SSLEnabled { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ProtectionDomain))) - i += copy(dAtA[i:], m.ProtectionDomain) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.StoragePool))) - i += copy(dAtA[i:], m.StoragePool) + i -= len(m.FieldPath) + copy(dAtA[i:], m.FieldPath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldPath))) + i-- dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.StorageMode))) - i += copy(dAtA[i:], m.StorageMode) - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeName))) - i += copy(dAtA[i:], m.VolumeName) - dAtA[i] = 0x4a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i += copy(dAtA[i:], m.FSType) - dAtA[i] = 0x50 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - return i, nil + i -= len(m.ResourceVersion) + copy(dAtA[i:], m.ResourceVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) + i-- + dAtA[i] = 0x32 + i -= len(m.APIVersion) + copy(dAtA[i:], m.APIVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) + i-- + dAtA[i] = 0x2a + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0x22 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x1a + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0x12 + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ScopeSelector) Marshal() (dAtA []byte, err error) { +func (m *PersistentVolume) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ScopeSelector) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.MatchExpressions) > 0 { - for _, msg := range m.MatchExpressions { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *ScopedResourceSelectorRequirement) Marshal() (dAtA []byte, err error) { +func (m *PersistentVolume) MarshalTo(dAtA []byte) (int, error) { size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ScopedResourceSelectorRequirement) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *PersistentVolume) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ScopeName))) - i += copy(dAtA[i:], m.ScopeName) + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operator))) - i += copy(dAtA[i:], m.Operator) - if len(m.Values) > 0 { - for _, s := range m.Values { - dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *Secret) Marshal() (dAtA []byte, err error) { +func (m *PersistentVolumeClaim) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *Secret) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *PersistentVolumeClaim) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PersistentVolumeClaim) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n186, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n186 - if len(m.Data) > 0 { - keysForData := make([]string, 0, len(m.Data)) - for k := range m.Data { - keysForData = append(keysForData, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForData) - for _, k := range keysForData { - dAtA[i] = 0x12 - i++ - v := m.Data[string(k)] - byteSize := 0 - if v != nil { - byteSize = 1 + len(v) + sovGenerated(uint64(len(v))) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + byteSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - if v != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - if len(m.StringData) > 0 { - keysForStringData := make([]string, 0, len(m.StringData)) - for k := range m.StringData { - keysForStringData = append(keysForStringData, string(k)) + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - github_com_gogo_protobuf_sortkeys.Strings(keysForStringData) - for _, k := range keysForStringData { - dAtA[i] = 0x22 - i++ - v := m.StringData[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *SecretEnvSource) Marshal() (dAtA []byte, err error) { +func (m *PersistentVolumeClaimCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *SecretEnvSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size())) - n187, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n187 - if m.Optional != nil { - dAtA[i] = 0x10 - i++ - if *m.Optional { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - return i, nil -} - -func (m *SecretKeySelector) Marshal() (dAtA []byte, err error) { +func (m *PersistentVolumeClaimCondition) MarshalTo(dAtA []byte) (int, error) { size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *SecretKeySelector) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *PersistentVolumeClaimCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size())) - n188, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x32 + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x2a + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n188 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) - i += copy(dAtA[i:], m.Key) - if m.Optional != nil { - dAtA[i] = 0x18 - i++ - if *m.Optional { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + i-- + dAtA[i] = 0x22 + { + size, err := m.LastProbeTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - i++ + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *SecretList) Marshal() (dAtA []byte, err error) { +func (m *PersistentVolumeClaimList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *SecretList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n189, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n189 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *SecretProjection) Marshal() (dAtA []byte, err error) { +func (m *PersistentVolumeClaimList) MarshalTo(dAtA []byte) (int, error) { size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *SecretProjection) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *PersistentVolumeClaimList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size())) - n190, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n190 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - if m.Optional != nil { - dAtA[i] = 0x20 - i++ - if *m.Optional { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - i++ + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *SecretReference) Marshal() (dAtA []byte, err error) { +func (m *PersistentVolumeClaimSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *SecretReference) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i += copy(dAtA[i:], m.Namespace) - return i, nil -} - -func (m *SecretVolumeSource) Marshal() (dAtA []byte, err error) { +func (m *PersistentVolumeClaimSpec) MarshalTo(dAtA []byte) (int, error) { size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *SecretVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *PersistentVolumeClaimSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretName))) - i += copy(dAtA[i:], m.SecretName) - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.DataSource != nil { + { + size, err := m.DataSource.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x3a } - if m.DefaultMode != nil { - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.DefaultMode)) + if m.VolumeMode != nil { + i -= len(*m.VolumeMode) + copy(dAtA[i:], *m.VolumeMode) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VolumeMode))) + i-- + dAtA[i] = 0x32 } - if m.Optional != nil { - dAtA[i] = 0x20 - i++ - if *m.Optional { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if m.StorageClassName != nil { + i -= len(*m.StorageClassName) + copy(dAtA[i:], *m.StorageClassName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.StorageClassName))) + i-- + dAtA[i] = 0x2a + } + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + i -= len(m.VolumeName) + copy(dAtA[i:], m.VolumeName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeName))) + i-- + dAtA[i] = 0x1a + { + size, err := m.Resources.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if len(m.AccessModes) > 0 { + for iNdEx := len(m.AccessModes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AccessModes[iNdEx]) + copy(dAtA[i:], m.AccessModes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AccessModes[iNdEx]))) + i-- + dAtA[i] = 0xa } - i++ } - return i, nil + return len(dAtA) - i, nil } -func (m *SecurityContext) Marshal() (dAtA []byte, err error) { +func (m *PersistentVolumeClaimStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *SecurityContext) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *PersistentVolumeClaimStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PersistentVolumeClaimStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Capabilities != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Capabilities.Size())) - n191, err := m.Capabilities.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n191 - } - if m.Privileged != nil { - dAtA[i] = 0x10 - i++ - if *m.Privileged { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if m.SELinuxOptions != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SELinuxOptions.Size())) - n192, err := m.SELinuxOptions.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 } - i += n192 - } - if m.RunAsUser != nil { - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.RunAsUser)) } - if m.RunAsNonRoot != nil { - dAtA[i] = 0x28 - i++ - if *m.RunAsNonRoot { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if len(m.Capacity) > 0 { + keysForCapacity := make([]string, 0, len(m.Capacity)) + for k := range m.Capacity { + keysForCapacity = append(keysForCapacity, string(k)) } - i++ - } - if m.ReadOnlyRootFilesystem != nil { - dAtA[i] = 0x30 - i++ - if *m.ReadOnlyRootFilesystem { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) + for iNdEx := len(keysForCapacity) - 1; iNdEx >= 0; iNdEx-- { + v := m.Capacity[ResourceName(keysForCapacity[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForCapacity[iNdEx]) + copy(dAtA[i:], keysForCapacity[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForCapacity[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1a } - i++ } - if m.AllowPrivilegeEscalation != nil { - dAtA[i] = 0x38 - i++ - if *m.AllowPrivilegeEscalation { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if len(m.AccessModes) > 0 { + for iNdEx := len(m.AccessModes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AccessModes[iNdEx]) + copy(dAtA[i:], m.AccessModes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AccessModes[iNdEx]))) + i-- + dAtA[i] = 0x12 } - i++ - } - if m.RunAsGroup != nil { - dAtA[i] = 0x40 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.RunAsGroup)) - } - if m.ProcMount != nil { - dAtA[i] = 0x4a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ProcMount))) - i += copy(dAtA[i:], *m.ProcMount) } - return i, nil + i -= len(m.Phase) + copy(dAtA[i:], m.Phase) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *SerializedReference) Marshal() (dAtA []byte, err error) { +func (m *PersistentVolumeClaimVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *SerializedReference) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *PersistentVolumeClaimVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PersistentVolumeClaimVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Reference.Size())) - n193, err := m.Reference.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - i += n193 - return i, nil + i-- + dAtA[i] = 0x10 + i -= len(m.ClaimName) + copy(dAtA[i:], m.ClaimName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClaimName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *Service) Marshal() (dAtA []byte, err error) { +func (m *PersistentVolumeList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *Service) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *PersistentVolumeList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PersistentVolumeList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n194, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n194 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n195, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } } - i += n195 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n196, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n196 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ServiceAccount) Marshal() (dAtA []byte, err error) { +func (m *PersistentVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ServiceAccount) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *PersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PersistentVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n197, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n197 - if len(m.Secrets) > 0 { - for _, msg := range m.Secrets { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.CSI != nil { + { + size, err := m.CSI.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb2 } - if len(m.ImagePullSecrets) > 0 { - for _, msg := range m.ImagePullSecrets { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.StorageOS != nil { + { + size, err := m.StorageOS.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xaa } - if m.AutomountServiceAccountToken != nil { - dAtA[i] = 0x20 - i++ - if *m.AutomountServiceAccountToken { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if m.Local != nil { + { + size, err := m.Local.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i++ + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa2 } - return i, nil -} - -func (m *ServiceAccountList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err + if m.ScaleIO != nil { + { + size, err := m.ScaleIO.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x9a } - return dAtA[:n], nil -} - -func (m *ServiceAccountList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n198, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.PortworxVolume != nil { + { + size, err := m.PortworxVolume.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x92 } - i += n198 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.PhotonPersistentDisk != nil { + { + size, err := m.PhotonPersistentDisk.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a } - return i, nil -} - -func (m *ServiceAccountTokenProjection) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err + if m.AzureDisk != nil { + { + size, err := m.AzureDisk.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 } - return dAtA[:n], nil -} - -func (m *ServiceAccountTokenProjection) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Audience))) - i += copy(dAtA[i:], m.Audience) - if m.ExpirationSeconds != nil { - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.ExpirationSeconds)) + if m.Quobyte != nil { + { + size, err := m.Quobyte.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x7a } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i += copy(dAtA[i:], m.Path) - return i, nil -} - -func (m *ServiceList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err + if m.VsphereVolume != nil { + { + size, err := m.VsphereVolume.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x72 } - return dAtA[:n], nil -} - -func (m *ServiceList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n199, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.AzureFile != nil { + { + size, err := m.AzureFile.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x6a } - i += n199 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.FlexVolume != nil { + { + size, err := m.FlexVolume.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x62 } - return i, nil -} - -func (m *ServicePort) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err + if m.Flocker != nil { + { + size, err := m.Flocker.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a } - return dAtA[:n], nil -} - -func (m *ServicePort) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Protocol))) - i += copy(dAtA[i:], m.Protocol) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Port)) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.TargetPort.Size())) - n200, err := m.TargetPort.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.FC != nil { + { + size, err := m.FC.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 } - i += n200 - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NodePort)) - return i, nil + if m.CephFS != nil { + { + size, err := m.CephFS.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + if m.Cinder != nil { + { + size, err := m.Cinder.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + if m.ISCSI != nil { + { + size, err := m.ISCSI.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + if m.RBD != nil { + { + size, err := m.RBD.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + if m.NFS != nil { + { + size, err := m.NFS.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.Glusterfs != nil { + { + size, err := m.Glusterfs.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.HostPath != nil { + { + size, err := m.HostPath.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.AWSElasticBlockStore != nil { + { + size, err := m.AWSElasticBlockStore.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.GCEPersistentDisk != nil { + { + size, err := m.GCEPersistentDisk.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *ServiceProxyOptions) Marshal() (dAtA []byte, err error) { +func (m *PersistentVolumeSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ServiceProxyOptions) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i += copy(dAtA[i:], m.Path) - return i, nil -} - -func (m *ServiceSpec) Marshal() (dAtA []byte, err error) { +func (m *PersistentVolumeSpec) MarshalTo(dAtA []byte) (int, error) { size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ServiceSpec) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *PersistentVolumeSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Ports) > 0 { - for _, msg := range m.Ports { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.NodeAffinity != nil { + { + size, err := m.NodeAffinity.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x4a } - if len(m.Selector) > 0 { - keysForSelector := make([]string, 0, len(m.Selector)) - for k := range m.Selector { - keysForSelector = append(keysForSelector, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) - for _, k := range keysForSelector { - dAtA[i] = 0x12 - i++ - v := m.Selector[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } + if m.VolumeMode != nil { + i -= len(*m.VolumeMode) + copy(dAtA[i:], *m.VolumeMode) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VolumeMode))) + i-- + dAtA[i] = 0x42 } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClusterIP))) - i += copy(dAtA[i:], m.ClusterIP) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - if len(m.ExternalIPs) > 0 { - for _, s := range m.ExternalIPs { - dAtA[i] = 0x2a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + if len(m.MountOptions) > 0 { + for iNdEx := len(m.MountOptions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.MountOptions[iNdEx]) + copy(dAtA[i:], m.MountOptions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MountOptions[iNdEx]))) + i-- + dAtA[i] = 0x3a } } - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SessionAffinity))) - i += copy(dAtA[i:], m.SessionAffinity) - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.LoadBalancerIP))) - i += copy(dAtA[i:], m.LoadBalancerIP) - if len(m.LoadBalancerSourceRanges) > 0 { - for _, s := range m.LoadBalancerSourceRanges { - dAtA[i] = 0x4a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ + i -= len(m.StorageClassName) + copy(dAtA[i:], m.StorageClassName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.StorageClassName))) + i-- + dAtA[i] = 0x32 + i -= len(m.PersistentVolumeReclaimPolicy) + copy(dAtA[i:], m.PersistentVolumeReclaimPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PersistentVolumeReclaimPolicy))) + i-- + dAtA[i] = 0x2a + if m.ClaimRef != nil { + { + size, err := m.ClaimRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x22 } - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ExternalName))) - i += copy(dAtA[i:], m.ExternalName) - dAtA[i] = 0x5a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ExternalTrafficPolicy))) - i += copy(dAtA[i:], m.ExternalTrafficPolicy) - dAtA[i] = 0x60 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.HealthCheckNodePort)) - dAtA[i] = 0x68 - i++ - if m.PublishNotReadyAddresses { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if len(m.AccessModes) > 0 { + for iNdEx := len(m.AccessModes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AccessModes[iNdEx]) + copy(dAtA[i:], m.AccessModes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AccessModes[iNdEx]))) + i-- + dAtA[i] = 0x1a + } } - i++ - if m.SessionAffinityConfig != nil { - dAtA[i] = 0x72 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SessionAffinityConfig.Size())) - n201, err := m.SessionAffinityConfig.MarshalTo(dAtA[i:]) + { + size, err := m.PersistentVolumeSource.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n201 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if len(m.Capacity) > 0 { + keysForCapacity := make([]string, 0, len(m.Capacity)) + for k := range m.Capacity { + keysForCapacity = append(keysForCapacity, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) + for iNdEx := len(keysForCapacity) - 1; iNdEx >= 0; iNdEx-- { + v := m.Capacity[ResourceName(keysForCapacity[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForCapacity[iNdEx]) + copy(dAtA[i:], keysForCapacity[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForCapacity[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } } - return i, nil + return len(dAtA) - i, nil } -func (m *ServiceStatus) Marshal() (dAtA []byte, err error) { +func (m *PersistentVolumeStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ServiceStatus) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *PersistentVolumeStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PersistentVolumeStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x1a + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x12 + i -= len(m.Phase) + copy(dAtA[i:], m.Phase) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LoadBalancer.Size())) - n202, err := m.LoadBalancer.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n202 - return i, nil + return len(dAtA) - i, nil } -func (m *SessionAffinityConfig) Marshal() (dAtA []byte, err error) { +func (m *PhotonPersistentDiskVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *SessionAffinityConfig) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *PhotonPersistentDiskVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PhotonPersistentDiskVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.ClientIP != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ClientIP.Size())) - n203, err := m.ClientIP.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n203 - } - return i, nil + i -= len(m.FSType) + copy(dAtA[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i-- + dAtA[i] = 0x12 + i -= len(m.PdID) + copy(dAtA[i:], m.PdID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PdID))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *StorageOSPersistentVolumeSource) Marshal() (dAtA []byte, err error) { +func (m *Pod) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *StorageOSPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *Pod) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Pod) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeName))) - i += copy(dAtA[i:], m.VolumeName) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeNamespace))) - i += copy(dAtA[i:], m.VolumeNamespace) + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i += copy(dAtA[i:], m.FSType) - dAtA[i] = 0x20 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i++ - if m.SecretRef != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n204, err := m.SecretRef.MarshalTo(dAtA[i:]) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n204 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *StorageOSVolumeSource) Marshal() (dAtA []byte, err error) { +func (m *PodAffinity) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *StorageOSVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *PodAffinity) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodAffinity) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeName))) - i += copy(dAtA[i:], m.VolumeName) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeNamespace))) - i += copy(dAtA[i:], m.VolumeNamespace) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i += copy(dAtA[i:], m.FSType) - dAtA[i] = 0x20 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { + for iNdEx := len(m.PreferredDuringSchedulingIgnoredDuringExecution) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.PreferredDuringSchedulingIgnoredDuringExecution[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } } - i++ - if m.SecretRef != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n205, err := m.SecretRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.RequiredDuringSchedulingIgnoredDuringExecution) > 0 { + for iNdEx := len(m.RequiredDuringSchedulingIgnoredDuringExecution) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.RequiredDuringSchedulingIgnoredDuringExecution[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } - i += n205 } - return i, nil + return len(dAtA) - i, nil } -func (m *Sysctl) Marshal() (dAtA []byte, err error) { +func (m *PodAffinityTerm) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *Sysctl) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) - i += copy(dAtA[i:], m.Value) - return i, nil -} - -func (m *TCPSocketAction) Marshal() (dAtA []byte, err error) { +func (m *PodAffinityTerm) MarshalTo(dAtA []byte) (int, error) { size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *TCPSocketAction) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *PodAffinityTerm) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Port.Size())) - n206, err := m.Port.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i -= len(m.TopologyKey) + copy(dAtA[i:], m.TopologyKey) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TopologyKey))) + i-- + dAtA[i] = 0x1a + if len(m.Namespaces) > 0 { + for iNdEx := len(m.Namespaces) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Namespaces[iNdEx]) + copy(dAtA[i:], m.Namespaces[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespaces[iNdEx]))) + i-- + dAtA[i] = 0x12 + } } - i += n206 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) - i += copy(dAtA[i:], m.Host) - return i, nil + if m.LabelSelector != nil { + { + size, err := m.LabelSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *Taint) Marshal() (dAtA []byte, err error) { +func (m *PodAntiAffinity) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *Taint) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *PodAntiAffinity) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodAntiAffinity) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) - i += copy(dAtA[i:], m.Key) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) - i += copy(dAtA[i:], m.Value) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Effect))) - i += copy(dAtA[i:], m.Effect) - if m.TimeAdded != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.TimeAdded.Size())) - n207, err := m.TimeAdded.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { + for iNdEx := len(m.PreferredDuringSchedulingIgnoredDuringExecution) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.PreferredDuringSchedulingIgnoredDuringExecution[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.RequiredDuringSchedulingIgnoredDuringExecution) > 0 { + for iNdEx := len(m.RequiredDuringSchedulingIgnoredDuringExecution) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.RequiredDuringSchedulingIgnoredDuringExecution[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } - i += n207 } - return i, nil + return len(dAtA) - i, nil } -func (m *Toleration) Marshal() (dAtA []byte, err error) { +func (m *PodAttachOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *Toleration) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *PodAttachOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodAttachOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) - i += copy(dAtA[i:], m.Key) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operator))) - i += copy(dAtA[i:], m.Operator) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) - i += copy(dAtA[i:], m.Value) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Effect))) - i += copy(dAtA[i:], m.Effect) - if m.TolerationSeconds != nil { - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.TolerationSeconds)) + i -= len(m.Container) + copy(dAtA[i:], m.Container) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Container))) + i-- + dAtA[i] = 0x2a + i-- + if m.TTY { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + i-- + if m.Stderr { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + i-- + if m.Stdout { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + i-- + if m.Stdin { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - return i, nil + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } -func (m *TopologySelectorLabelRequirement) Marshal() (dAtA []byte, err error) { +func (m *PodCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *TopologySelectorLabelRequirement) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *PodCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) - i += copy(dAtA[i:], m.Key) - if len(m.Values) > 0 { - for _, s := range m.Values { - dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x32 + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x2a + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + { + size, err := m.LastProbeTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *TopologySelectorTerm) Marshal() (dAtA []byte, err error) { +func (m *PodDNSConfig) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *TopologySelectorTerm) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *PodDNSConfig) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodDNSConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.MatchLabelExpressions) > 0 { - for _, msg := range m.MatchLabelExpressions { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Options) > 0 { + for iNdEx := len(m.Options) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Options[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x1a + } + } + if len(m.Searches) > 0 { + for iNdEx := len(m.Searches) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Searches[iNdEx]) + copy(dAtA[i:], m.Searches[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Searches[iNdEx]))) + i-- + dAtA[i] = 0x12 } } - return i, nil + if len(m.Nameservers) > 0 { + for iNdEx := len(m.Nameservers) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Nameservers[iNdEx]) + copy(dAtA[i:], m.Nameservers[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Nameservers[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil } -func (m *TypedLocalObjectReference) Marshal() (dAtA []byte, err error) { +func (m *PodDNSConfigOption) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *TypedLocalObjectReference) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *PodDNSConfigOption) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodDNSConfigOption) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.APIGroup != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.APIGroup))) - i += copy(dAtA[i:], *m.APIGroup) + if m.Value != nil { + i -= len(*m.Value) + copy(dAtA[i:], *m.Value) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Value))) + i-- + dAtA[i] = 0x12 } - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) - dAtA[i] = 0x1a - i++ + i -= len(m.Name) + copy(dAtA[i:], m.Name) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *Volume) Marshal() (dAtA []byte, err error) { +func (m *PodExecOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *Volume) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *PodExecOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodExecOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.VolumeSource.Size())) - n208, err := m.VolumeSource.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Command) > 0 { + for iNdEx := len(m.Command) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Command[iNdEx]) + copy(dAtA[i:], m.Command[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Command[iNdEx]))) + i-- + dAtA[i] = 0x32 + } + } + i -= len(m.Container) + copy(dAtA[i:], m.Container) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Container))) + i-- + dAtA[i] = 0x2a + i-- + if m.TTY { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + i-- + if m.Stderr { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - i += n208 - return i, nil + i-- + dAtA[i] = 0x18 + i-- + if m.Stdout { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + i-- + if m.Stdin { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } -func (m *VolumeDevice) Marshal() (dAtA []byte, err error) { +func (m *PodIP) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *VolumeDevice) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i +func (m *PodIP) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodIP) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l + i -= len(m.IP) + copy(dAtA[i:], m.IP) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.IP))) + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DevicePath))) - i += copy(dAtA[i:], m.DevicePath) - return i, nil + return len(dAtA) - i, nil } -func (m *VolumeMount) Marshal() (dAtA []byte, err error) { +func (m *PodList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *VolumeMount) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *PodList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x10 - i++ - if m.ReadOnly { + return len(dAtA) - i, nil +} + +func (m *PodLogOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodLogOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodLogOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i-- + if m.InsecureSkipTLSVerifyBackend { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MountPath))) - i += copy(dAtA[i:], m.MountPath) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SubPath))) - i += copy(dAtA[i:], m.SubPath) - if m.MountPropagation != nil { + i-- + dAtA[i] = 0x48 + if m.LimitBytes != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.LimitBytes)) + i-- + dAtA[i] = 0x40 + } + if m.TailLines != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TailLines)) + i-- + dAtA[i] = 0x38 + } + i-- + if m.Timestamps { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + if m.SinceTime != nil { + { + size, err := m.SinceTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MountPropagation))) - i += copy(dAtA[i:], *m.MountPropagation) } - return i, nil + if m.SinceSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.SinceSeconds)) + i-- + dAtA[i] = 0x20 + } + i-- + if m.Previous { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + i-- + if m.Follow { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + i -= len(m.Container) + copy(dAtA[i:], m.Container) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Container))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *VolumeNodeAffinity) Marshal() (dAtA []byte, err error) { +func (m *PodPortForwardOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *VolumeNodeAffinity) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *PodPortForwardOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodPortForwardOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Required != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Required.Size())) - n209, err := m.Required.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Ports) > 0 { + for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- { + i = encodeVarintGenerated(dAtA, i, uint64(m.Ports[iNdEx])) + i-- + dAtA[i] = 0x8 } - i += n209 } - return i, nil + return len(dAtA) - i, nil } -func (m *VolumeProjection) Marshal() (dAtA []byte, err error) { +func (m *PodProxyOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *VolumeProjection) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *PodProxyOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodProxyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Secret != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Secret.Size())) - n210, err := m.Secret.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n210 - } - if m.DownwardAPI != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DownwardAPI.Size())) - n211, err := m.DownwardAPI.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n211 - } - if m.ConfigMap != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigMap.Size())) - n212, err := m.ConfigMap.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n212 - } - if m.ServiceAccountToken != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ServiceAccountToken.Size())) - n213, err := m.ServiceAccountToken.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n213 + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodReadinessGate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return i, nil + return dAtA[:n], nil } -func (m *VolumeSource) Marshal() (dAtA []byte, err error) { +func (m *PodReadinessGate) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodReadinessGate) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.ConditionType) + copy(dAtA[i:], m.ConditionType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ConditionType))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodSecurityContext) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *PodSecurityContext) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodSecurityContext) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.HostPath != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.HostPath.Size())) - n214, err := m.HostPath.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.WindowsOptions != nil { + { + size, err := m.WindowsOptions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n214 + i-- + dAtA[i] = 0x42 } - if m.EmptyDir != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.EmptyDir.Size())) - n215, err := m.EmptyDir.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Sysctls) > 0 { + for iNdEx := len(m.Sysctls) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Sysctls[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a } - i += n215 } - if m.GCEPersistentDisk != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.GCEPersistentDisk.Size())) - n216, err := m.GCEPersistentDisk.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n216 + if m.RunAsGroup != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.RunAsGroup)) + i-- + dAtA[i] = 0x30 } - if m.AWSElasticBlockStore != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AWSElasticBlockStore.Size())) - n217, err := m.AWSElasticBlockStore.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n217 + if m.FSGroup != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.FSGroup)) + i-- + dAtA[i] = 0x28 } - if m.GitRepo != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.GitRepo.Size())) - n218, err := m.GitRepo.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.SupplementalGroups) > 0 { + for iNdEx := len(m.SupplementalGroups) - 1; iNdEx >= 0; iNdEx-- { + i = encodeVarintGenerated(dAtA, i, uint64(m.SupplementalGroups[iNdEx])) + i-- + dAtA[i] = 0x20 } - i += n218 } - if m.Secret != nil { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Secret.Size())) - n219, err := m.Secret.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.RunAsNonRoot != nil { + i-- + if *m.RunAsNonRoot { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - i += n219 + i-- + dAtA[i] = 0x18 } - if m.NFS != nil { - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NFS.Size())) - n220, err := m.NFS.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n220 + if m.RunAsUser != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.RunAsUser)) + i-- + dAtA[i] = 0x10 } - if m.ISCSI != nil { - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ISCSI.Size())) - n221, err := m.ISCSI.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.SELinuxOptions != nil { + { + size, err := m.SELinuxOptions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n221 + i-- + dAtA[i] = 0xa } - if m.Glusterfs != nil { - dAtA[i] = 0x4a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Glusterfs.Size())) - n222, err := m.Glusterfs.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n222 + return len(dAtA) - i, nil +} + +func (m *PodSignature) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - if m.PersistentVolumeClaim != nil { - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PersistentVolumeClaim.Size())) - n223, err := m.PersistentVolumeClaim.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + return dAtA[:n], nil +} + +func (m *PodSignature) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodSignature) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.PodController != nil { + { + size, err := m.PodController.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n223 + i-- + dAtA[i] = 0xa } - if m.RBD != nil { - dAtA[i] = 0x5a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RBD.Size())) - n224, err := m.RBD.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n224 + return len(dAtA) - i, nil +} + +func (m *PodSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - if m.FlexVolume != nil { - dAtA[i] = 0x62 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.FlexVolume.Size())) - n225, err := m.FlexVolume.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + return dAtA[:n], nil +} + +func (m *PodSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.EphemeralContainers) > 0 { + for iNdEx := len(m.EphemeralContainers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.EphemeralContainers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x92 } - i += n225 } - if m.Cinder != nil { - dAtA[i] = 0x6a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Cinder.Size())) - n226, err := m.Cinder.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.TopologySpreadConstraints) > 0 { + for iNdEx := len(m.TopologySpreadConstraints) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.TopologySpreadConstraints[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x8a } - i += n226 } - if m.CephFS != nil { - dAtA[i] = 0x72 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CephFS.Size())) - n227, err := m.CephFS.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Overhead) > 0 { + keysForOverhead := make([]string, 0, len(m.Overhead)) + for k := range m.Overhead { + keysForOverhead = append(keysForOverhead, string(k)) } - i += n227 + github_com_gogo_protobuf_sortkeys.Strings(keysForOverhead) + for iNdEx := len(keysForOverhead) - 1; iNdEx >= 0; iNdEx-- { + v := m.Overhead[ResourceName(keysForOverhead[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForOverhead[iNdEx]) + copy(dAtA[i:], keysForOverhead[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForOverhead[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x82 + } + } + if m.PreemptionPolicy != nil { + i -= len(*m.PreemptionPolicy) + copy(dAtA[i:], *m.PreemptionPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PreemptionPolicy))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xfa } - if m.Flocker != nil { - dAtA[i] = 0x7a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Flocker.Size())) - n228, err := m.Flocker.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.EnableServiceLinks != nil { + i-- + if *m.EnableServiceLinks { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - i += n228 - } - if m.DownwardAPI != nil { - dAtA[i] = 0x82 - i++ + i-- dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DownwardAPI.Size())) - n229, err := m.DownwardAPI.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n229 + i-- + dAtA[i] = 0xf0 } - if m.FC != nil { - dAtA[i] = 0x8a - i++ + if m.RuntimeClassName != nil { + i -= len(*m.RuntimeClassName) + copy(dAtA[i:], *m.RuntimeClassName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.RuntimeClassName))) + i-- dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.FC.Size())) - n230, err := m.FC.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n230 + i-- + dAtA[i] = 0xea } - if m.AzureFile != nil { - dAtA[i] = 0x92 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AzureFile.Size())) - n231, err := m.AzureFile.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.ReadinessGates) > 0 { + for iNdEx := len(m.ReadinessGates) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ReadinessGates[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xe2 } - i += n231 } - if m.ConfigMap != nil { - dAtA[i] = 0x9a - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigMap.Size())) - n232, err := m.ConfigMap.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.ShareProcessNamespace != nil { + i-- + if *m.ShareProcessNamespace { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - i += n232 - } - if m.VsphereVolume != nil { - dAtA[i] = 0xa2 - i++ + i-- dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.VsphereVolume.Size())) - n233, err := m.VsphereVolume.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n233 + i-- + dAtA[i] = 0xd8 } - if m.Quobyte != nil { - dAtA[i] = 0xaa - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Quobyte.Size())) - n234, err := m.Quobyte.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.DNSConfig != nil { + { + size, err := m.DNSConfig.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n234 - } - if m.AzureDisk != nil { - dAtA[i] = 0xb2 - i++ + i-- dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AzureDisk.Size())) - n235, err := m.AzureDisk.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n235 + i-- + dAtA[i] = 0xd2 } - if m.PhotonPersistentDisk != nil { - dAtA[i] = 0xba - i++ + if m.Priority != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Priority)) + i-- dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PhotonPersistentDisk.Size())) - n236, err := m.PhotonPersistentDisk.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n236 + i-- + dAtA[i] = 0xc8 } - if m.PortworxVolume != nil { - dAtA[i] = 0xc2 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PortworxVolume.Size())) - n237, err := m.PortworxVolume.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i -= len(m.PriorityClassName) + copy(dAtA[i:], m.PriorityClassName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PriorityClassName))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc2 + if len(m.HostAliases) > 0 { + for iNdEx := len(m.HostAliases) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.HostAliases[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xba } - i += n237 } - if m.ScaleIO != nil { - dAtA[i] = 0xca - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ScaleIO.Size())) - n238, err := m.ScaleIO.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Tolerations) > 0 { + for iNdEx := len(m.Tolerations) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Tolerations[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb2 } - i += n238 } - if m.Projected != nil { - dAtA[i] = 0xd2 - i++ + if m.AutomountServiceAccountToken != nil { + i-- + if *m.AutomountServiceAccountToken { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Projected.Size())) - n239, err := m.Projected.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i-- + dAtA[i] = 0xa8 + } + if len(m.InitContainers) > 0 { + for iNdEx := len(m.InitContainers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.InitContainers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa2 } - i += n239 } - if m.StorageOS != nil { - dAtA[i] = 0xda - i++ + i -= len(m.SchedulerName) + copy(dAtA[i:], m.SchedulerName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SchedulerName))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x9a + if m.Affinity != nil { + { + size, err := m.Affinity.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.StorageOS.Size())) - n240, err := m.StorageOS.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i-- + dAtA[i] = 0x92 + } + i -= len(m.Subdomain) + copy(dAtA[i:], m.Subdomain) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Subdomain))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + i -= len(m.Hostname) + copy(dAtA[i:], m.Hostname) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Hostname))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + if len(m.ImagePullSecrets) > 0 { + for iNdEx := len(m.ImagePullSecrets) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ImagePullSecrets[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x7a } - i += n240 } - return i, nil -} - -func (m *VsphereVirtualDiskVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err + if m.SecurityContext != nil { + { + size, err := m.SecurityContext.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x72 } - return dAtA[:n], nil -} - -func (m *VsphereVirtualDiskVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumePath))) - i += copy(dAtA[i:], m.VolumePath) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i += copy(dAtA[i:], m.FSType) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.StoragePolicyName))) - i += copy(dAtA[i:], m.StoragePolicyName) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.StoragePolicyID))) - i += copy(dAtA[i:], m.StoragePolicyID) - return i, nil -} - -func (m *WeightedPodAffinityTerm) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err + i-- + if m.HostIPC { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - return dAtA[:n], nil -} - -func (m *WeightedPodAffinityTerm) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Weight)) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PodAffinityTerm.Size())) - n241, err := m.PodAffinityTerm.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i-- + dAtA[i] = 0x68 + i-- + if m.HostPID { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - i += n241 - return i, nil -} - -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ + i-- + dAtA[i] = 0x60 + i-- + if m.HostNetwork { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *AWSElasticBlockStoreVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.VolumeID) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.Partition)) - n += 2 - return n -} - -func (m *Affinity) Size() (n int) { - var l int - _ = l - if m.NodeAffinity != nil { - l = m.NodeAffinity.Size() - n += 1 + l + sovGenerated(uint64(l)) + i-- + dAtA[i] = 0x58 + i -= len(m.NodeName) + copy(dAtA[i:], m.NodeName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeName))) + i-- + dAtA[i] = 0x52 + i -= len(m.DeprecatedServiceAccount) + copy(dAtA[i:], m.DeprecatedServiceAccount) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DeprecatedServiceAccount))) + i-- + dAtA[i] = 0x4a + i -= len(m.ServiceAccountName) + copy(dAtA[i:], m.ServiceAccountName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceAccountName))) + i-- + dAtA[i] = 0x42 + if len(m.NodeSelector) > 0 { + keysForNodeSelector := make([]string, 0, len(m.NodeSelector)) + for k := range m.NodeSelector { + keysForNodeSelector = append(keysForNodeSelector, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector) + for iNdEx := len(keysForNodeSelector) - 1; iNdEx >= 0; iNdEx-- { + v := m.NodeSelector[string(keysForNodeSelector[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForNodeSelector[iNdEx]) + copy(dAtA[i:], keysForNodeSelector[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForNodeSelector[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x3a + } } - if m.PodAffinity != nil { - l = m.PodAffinity.Size() - n += 1 + l + sovGenerated(uint64(l)) + i -= len(m.DNSPolicy) + copy(dAtA[i:], m.DNSPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DNSPolicy))) + i-- + dAtA[i] = 0x32 + if m.ActiveDeadlineSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.ActiveDeadlineSeconds)) + i-- + dAtA[i] = 0x28 } - if m.PodAntiAffinity != nil { - l = m.PodAntiAffinity.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.TerminationGracePeriodSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TerminationGracePeriodSeconds)) + i-- + dAtA[i] = 0x20 } - return n + i -= len(m.RestartPolicy) + copy(dAtA[i:], m.RestartPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RestartPolicy))) + i-- + dAtA[i] = 0x1a + if len(m.Containers) > 0 { + for iNdEx := len(m.Containers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Containers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Volumes) > 0 { + for iNdEx := len(m.Volumes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Volumes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil } -func (m *AttachedVolume) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.DevicePath) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *PodStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *AvoidPods) Size() (n int) { - var l int - _ = l - if len(m.PreferAvoidPods) > 0 { - for _, e := range m.PreferAvoidPods { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n +func (m *PodStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *AzureDiskVolumeSource) Size() (n int) { +func (m *PodStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.DiskName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.DataDiskURI) - n += 1 + l + sovGenerated(uint64(l)) - if m.CachingMode != nil { - l = len(*m.CachingMode) - n += 1 + l + sovGenerated(uint64(l)) + if len(m.EphemeralContainerStatuses) > 0 { + for iNdEx := len(m.EphemeralContainerStatuses) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.EphemeralContainerStatuses[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x6a + } } - if m.FSType != nil { - l = len(*m.FSType) - n += 1 + l + sovGenerated(uint64(l)) + if len(m.PodIPs) > 0 { + for iNdEx := len(m.PodIPs) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.PodIPs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x62 + } } - if m.ReadOnly != nil { - n += 2 + i -= len(m.NominatedNodeName) + copy(dAtA[i:], m.NominatedNodeName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NominatedNodeName))) + i-- + dAtA[i] = 0x5a + if len(m.InitContainerStatuses) > 0 { + for iNdEx := len(m.InitContainerStatuses) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.InitContainerStatuses[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } } - if m.Kind != nil { - l = len(*m.Kind) - n += 1 + l + sovGenerated(uint64(l)) + i -= len(m.QOSClass) + copy(dAtA[i:], m.QOSClass) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.QOSClass))) + i-- + dAtA[i] = 0x4a + if len(m.ContainerStatuses) > 0 { + for iNdEx := len(m.ContainerStatuses) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ContainerStatuses[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } } - return n -} - -func (m *AzureFilePersistentVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.SecretName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ShareName) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - if m.SecretNamespace != nil { - l = len(*m.SecretNamespace) - n += 1 + l + sovGenerated(uint64(l)) + if m.StartTime != nil { + { + size, err := m.StartTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a } - return n + i -= len(m.PodIP) + copy(dAtA[i:], m.PodIP) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodIP))) + i-- + dAtA[i] = 0x32 + i -= len(m.HostIP) + copy(dAtA[i:], m.HostIP) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.HostIP))) + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x1a + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Phase) + copy(dAtA[i:], m.Phase) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *AzureFileVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.SecretName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ShareName) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - return n +func (m *PodStatusResult) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *Binding) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Target.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *PodStatusResult) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *CSIPersistentVolumeSource) Size() (n int) { +func (m *PodStatusResult) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Driver) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.VolumeHandle) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.VolumeAttributes) > 0 { - for k, v := range m.VolumeAttributes { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.ControllerPublishSecretRef != nil { - l = m.ControllerPublishSecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.NodeStageSecretRef != nil { - l = m.NodeStageSecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.NodePublishSecretRef != nil { - l = m.NodePublishSecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodTemplate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *Capabilities) Size() (n int) { +func (m *PodTemplate) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodTemplate) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.Add) > 0 { - for _, s := range m.Add { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if len(m.Drop) > 0 { - for _, s := range m.Drop { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return n + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *CephFSPersistentVolumeSource) Size() (n int) { +func (m *PodTemplateList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodTemplateList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodTemplateList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.Monitors) > 0 { - for _, s := range m.Monitors { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } } - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.User) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.SecretFile) - n += 1 + l + sovGenerated(uint64(l)) - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - n += 2 - return n + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *CephFSVolumeSource) Size() (n int) { +func (m *PodTemplateSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodTemplateSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodTemplateSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.Monitors) > 0 { - for _, s := range m.Monitors { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.User) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.SecretFile) - n += 1 + l + sovGenerated(uint64(l)) - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - n += 2 - return n + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *CinderPersistentVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.VolumeID) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) +func (m *PortworxVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *CinderVolumeSource) Size() (n int) { +func (m *PortworxVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PortworxVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.VolumeID) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - return n + i-- + dAtA[i] = 0x18 + i -= len(m.FSType) + copy(dAtA[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i-- + dAtA[i] = 0x12 + i -= len(m.VolumeID) + copy(dAtA[i:], m.VolumeID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeID))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ClientIPConfig) Size() (n int) { - var l int - _ = l - if m.TimeoutSeconds != nil { - n += 1 + sovGenerated(uint64(*m.TimeoutSeconds)) +func (m *Preconditions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *ComponentCondition) Size() (n int) { - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Status) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Error) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *Preconditions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ComponentStatus) Size() (n int) { +func (m *Preconditions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + if m.UID != nil { + i -= len(*m.UID) + copy(dAtA[i:], *m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.UID))) + i-- + dAtA[i] = 0xa } - return n + return len(dAtA) - i, nil } -func (m *ComponentStatusList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } +func (m *PreferAvoidPodsEntry) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *ConfigMap) Size() (n int) { +func (m *PreferAvoidPodsEntry) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PreferAvoidPodsEntry) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Data) > 0 { - for k, v := range m.Data { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x22 + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x1a + { + size, err := m.EvictionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if len(m.BinaryData) > 0 { - for k, v := range m.BinaryData { - _ = k - _ = v - l = 0 - if v != nil { - l = 1 + len(v) + sovGenerated(uint64(len(v))) - } - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + l - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + i-- + dAtA[i] = 0x12 + { + size, err := m.PodSignature.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return n + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ConfigMapEnvSource) Size() (n int) { - var l int - _ = l - l = m.LocalObjectReference.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.Optional != nil { - n += 2 +func (m *PreferredSchedulingTerm) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *ConfigMapKeySelector) Size() (n int) { - var l int - _ = l - l = m.LocalObjectReference.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Key) - n += 1 + l + sovGenerated(uint64(l)) - if m.Optional != nil { - n += 2 - } - return n +func (m *PreferredSchedulingTerm) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ConfigMapList) Size() (n int) { +func (m *PreferredSchedulingTerm) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + { + size, err := m.Preference.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return n + i-- + dAtA[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(m.Weight)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } -func (m *ConfigMapNodeConfigSource) Size() (n int) { - var l int - _ = l - l = len(m.Namespace) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.UID) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ResourceVersion) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.KubeletConfigKey) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *Probe) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *ConfigMapProjection) Size() (n int) { +func (m *Probe) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Probe) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.LocalObjectReference.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + i = encodeVarintGenerated(dAtA, i, uint64(m.FailureThreshold)) + i-- + dAtA[i] = 0x30 + i = encodeVarintGenerated(dAtA, i, uint64(m.SuccessThreshold)) + i-- + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.PeriodSeconds)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.TimeoutSeconds)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.InitialDelaySeconds)) + i-- + dAtA[i] = 0x10 + { + size, err := m.Handler.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.Optional != nil { - n += 2 + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ProjectedVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *ConfigMapVolumeSource) Size() (n int) { +func (m *ProjectedVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ProjectedVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.LocalObjectReference.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } if m.DefaultMode != nil { - n += 1 + sovGenerated(uint64(*m.DefaultMode)) + i = encodeVarintGenerated(dAtA, i, uint64(*m.DefaultMode)) + i-- + dAtA[i] = 0x10 } - if m.Optional != nil { - n += 2 + if len(m.Sources) > 0 { + for iNdEx := len(m.Sources) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Sources[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } } - return n + return len(dAtA) - i, nil } -func (m *Container) Size() (n int) { +func (m *QuobyteVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QuobyteVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QuobyteVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Image) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Command) > 0 { - for _, s := range m.Command { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } + i -= len(m.Tenant) + copy(dAtA[i:], m.Tenant) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Tenant))) + i-- + dAtA[i] = 0x32 + i -= len(m.Group) + copy(dAtA[i:], m.Group) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i-- + dAtA[i] = 0x2a + i -= len(m.User) + copy(dAtA[i:], m.User) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.User))) + i-- + dAtA[i] = 0x22 + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - if len(m.Args) > 0 { - for _, s := range m.Args { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } + i-- + dAtA[i] = 0x18 + i -= len(m.Volume) + copy(dAtA[i:], m.Volume) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Volume))) + i-- + dAtA[i] = 0x12 + i -= len(m.Registry) + copy(dAtA[i:], m.Registry) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Registry))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RBDPersistentVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - l = len(m.WorkingDir) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Ports) > 0 { - for _, e := range m.Ports { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + return dAtA[:n], nil +} + +func (m *RBDPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RBDPersistentVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - if len(m.Env) > 0 { - for _, e := range m.Env { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + i-- + dAtA[i] = 0x40 + if m.SecretRef != nil { + { + size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x3a } - l = m.Resources.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.VolumeMounts) > 0 { - for _, e := range m.VolumeMounts { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + i -= len(m.Keyring) + copy(dAtA[i:], m.Keyring) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Keyring))) + i-- + dAtA[i] = 0x32 + i -= len(m.RadosUser) + copy(dAtA[i:], m.RadosUser) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RadosUser))) + i-- + dAtA[i] = 0x2a + i -= len(m.RBDPool) + copy(dAtA[i:], m.RBDPool) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RBDPool))) + i-- + dAtA[i] = 0x22 + i -= len(m.FSType) + copy(dAtA[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i-- + dAtA[i] = 0x1a + i -= len(m.RBDImage) + copy(dAtA[i:], m.RBDImage) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RBDImage))) + i-- + dAtA[i] = 0x12 + if len(m.CephMonitors) > 0 { + for iNdEx := len(m.CephMonitors) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.CephMonitors[iNdEx]) + copy(dAtA[i:], m.CephMonitors[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CephMonitors[iNdEx]))) + i-- + dAtA[i] = 0xa } } - if m.LivenessProbe != nil { - l = m.LivenessProbe.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ReadinessProbe != nil { - l = m.ReadinessProbe.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Lifecycle != nil { - l = m.Lifecycle.Size() - n += 1 + l + sovGenerated(uint64(l)) + return len(dAtA) - i, nil +} + +func (m *RBDVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - l = len(m.TerminationMessagePath) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ImagePullPolicy) - n += 1 + l + sovGenerated(uint64(l)) - if m.SecurityContext != nil { - l = m.SecurityContext.Size() - n += 1 + l + sovGenerated(uint64(l)) + return dAtA[:n], nil +} + +func (m *RBDVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RBDVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - n += 3 - n += 3 - n += 3 - if len(m.EnvFrom) > 0 { - for _, e := range m.EnvFrom { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) + i-- + dAtA[i] = 0x40 + if m.SecretRef != nil { + { + size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x3a } - l = len(m.TerminationMessagePolicy) - n += 2 + l + sovGenerated(uint64(l)) - if len(m.VolumeDevices) > 0 { - for _, e := range m.VolumeDevices { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) + i -= len(m.Keyring) + copy(dAtA[i:], m.Keyring) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Keyring))) + i-- + dAtA[i] = 0x32 + i -= len(m.RadosUser) + copy(dAtA[i:], m.RadosUser) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RadosUser))) + i-- + dAtA[i] = 0x2a + i -= len(m.RBDPool) + copy(dAtA[i:], m.RBDPool) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RBDPool))) + i-- + dAtA[i] = 0x22 + i -= len(m.FSType) + copy(dAtA[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i-- + dAtA[i] = 0x1a + i -= len(m.RBDImage) + copy(dAtA[i:], m.RBDImage) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RBDImage))) + i-- + dAtA[i] = 0x12 + if len(m.CephMonitors) > 0 { + for iNdEx := len(m.CephMonitors) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.CephMonitors[iNdEx]) + copy(dAtA[i:], m.CephMonitors[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CephMonitors[iNdEx]))) + i-- + dAtA[i] = 0xa } } - return n + return len(dAtA) - i, nil } -func (m *ContainerImage) Size() (n int) { - var l int - _ = l - if len(m.Names) > 0 { - for _, s := range m.Names { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } +func (m *RangeAllocation) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - n += 1 + sovGenerated(uint64(m.SizeBytes)) - return n + return dAtA[:n], nil } -func (m *ContainerPort) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.HostPort)) - n += 1 + sovGenerated(uint64(m.ContainerPort)) - l = len(m.Protocol) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.HostIP) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *RangeAllocation) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ContainerState) Size() (n int) { +func (m *RangeAllocation) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.Waiting != nil { - l = m.Waiting.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Running != nil { - l = m.Running.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.Data != nil { + i -= len(m.Data) + copy(dAtA[i:], m.Data) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Data))) + i-- + dAtA[i] = 0x1a } - if m.Terminated != nil { - l = m.Terminated.Size() - n += 1 + l + sovGenerated(uint64(l)) + i -= len(m.Range) + copy(dAtA[i:], m.Range) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Range))) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return n + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ContainerStateRunning) Size() (n int) { - var l int - _ = l - l = m.StartedAt.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *ReplicationController) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *ContainerStateTerminated) Size() (n int) { - var l int - _ = l - n += 1 + sovGenerated(uint64(m.ExitCode)) - n += 1 + sovGenerated(uint64(m.Signal)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - l = m.StartedAt.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.FinishedAt.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ContainerID) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *ReplicationController) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ContainerStateWaiting) Size() (n int) { +func (m *ReplicationController) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - return n + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ContainerStatus) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = m.State.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastTerminationState.Size() - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - n += 1 + sovGenerated(uint64(m.RestartCount)) - l = len(m.Image) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ImageID) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ContainerID) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *ReplicationControllerCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *DaemonEndpoint) Size() (n int) { - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Port)) - return n +func (m *ReplicationControllerCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *DownwardAPIProjection) Size() (n int) { +func (m *ReplicationControllerCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return n + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *DownwardAPIVolumeFile) Size() (n int) { - var l int - _ = l - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - if m.FieldRef != nil { - l = m.FieldRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ResourceFieldRef != nil { - l = m.ResourceFieldRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Mode != nil { - n += 1 + sovGenerated(uint64(*m.Mode)) +func (m *ReplicationControllerList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *DownwardAPIVolumeSource) Size() (n int) { +func (m *ReplicationControllerList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicationControllerList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } } - if m.DefaultMode != nil { - n += 1 + sovGenerated(uint64(*m.DefaultMode)) + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return n + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *EmptyDirVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.Medium) - n += 1 + l + sovGenerated(uint64(l)) - if m.SizeLimit != nil { - l = m.SizeLimit.Size() - n += 1 + l + sovGenerated(uint64(l)) +func (m *ReplicationControllerSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *EndpointAddress) Size() (n int) { - var l int - _ = l - l = len(m.IP) - n += 1 + l + sovGenerated(uint64(l)) - if m.TargetRef != nil { - l = m.TargetRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = len(m.Hostname) - n += 1 + l + sovGenerated(uint64(l)) - if m.NodeName != nil { - l = len(*m.NodeName) - n += 1 + l + sovGenerated(uint64(l)) - } - return n +func (m *ReplicationControllerSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *EndpointPort) Size() (n int) { +func (m *ReplicationControllerSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.Port)) - l = len(m.Protocol) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *EndpointSubset) Size() (n int) { - var l int - _ = l - if len(m.Addresses) > 0 { - for _, e := range m.Addresses { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + i-- + dAtA[i] = 0x20 + if m.Template != nil { + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x1a } - if len(m.NotReadyAddresses) > 0 { - for _, e := range m.NotReadyAddresses { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Selector) > 0 { + keysForSelector := make([]string, 0, len(m.Selector)) + for k := range m.Selector { + keysForSelector = append(keysForSelector, string(k)) } - } - if len(m.Ports) > 0 { - for _, e := range m.Ports { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) + for iNdEx := len(keysForSelector) - 1; iNdEx >= 0; iNdEx-- { + v := m.Selector[string(keysForSelector[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForSelector[iNdEx]) + copy(dAtA[i:], keysForSelector[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForSelector[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 } } - return n + if m.Replicas != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil } -func (m *Endpoints) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Subsets) > 0 { - for _, e := range m.Subsets { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } +func (m *ReplicationControllerStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *EndpointsList) Size() (n int) { +func (m *ReplicationControllerStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicationControllerStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 } } - return n + i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) + i-- + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.FullyLabeledReplicas)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } -func (m *EnvFromSource) Size() (n int) { - var l int - _ = l - l = len(m.Prefix) - n += 1 + l + sovGenerated(uint64(l)) - if m.ConfigMapRef != nil { - l = m.ConfigMapRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) +func (m *ResourceFieldSelector) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *EnvVar) Size() (n int) { +func (m *ResourceFieldSelector) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceFieldSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Value) - n += 1 + l + sovGenerated(uint64(l)) - if m.ValueFrom != nil { - l = m.ValueFrom.Size() - n += 1 + l + sovGenerated(uint64(l)) + { + size, err := m.Divisor.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return n + i-- + dAtA[i] = 0x1a + i -= len(m.Resource) + copy(dAtA[i:], m.Resource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) + i-- + dAtA[i] = 0x12 + i -= len(m.ContainerName) + copy(dAtA[i:], m.ContainerName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContainerName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *EnvVarSource) Size() (n int) { +func (m *ResourceQuota) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceQuota) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceQuota) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.FieldRef != nil { - l = m.FieldRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ResourceFieldRef != nil { - l = m.ResourceFieldRef.Size() - n += 1 + l + sovGenerated(uint64(l)) + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.ConfigMapKeyRef != nil { - l = m.ConfigMapKeyRef.Size() - n += 1 + l + sovGenerated(uint64(l)) + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.SecretKeyRef != nil { - l = m.SecretKeyRef.Size() - n += 1 + l + sovGenerated(uint64(l)) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return n + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *Event) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.InvolvedObject.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - l = m.Source.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.FirstTimestamp.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastTimestamp.Size() - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.Count)) - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = m.EventTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.Series != nil { - l = m.Series.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = len(m.Action) - n += 1 + l + sovGenerated(uint64(l)) - if m.Related != nil { - l = m.Related.Size() - n += 1 + l + sovGenerated(uint64(l)) +func (m *ResourceQuotaList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - l = len(m.ReportingController) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ReportingInstance) - n += 1 + l + sovGenerated(uint64(l)) - return n + return dAtA[:n], nil } -func (m *EventList) Size() (n int) { +func (m *ResourceQuotaList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceQuotaList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } } - return n + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *EventSeries) Size() (n int) { - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Count)) - l = m.LastObservedTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.State) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *ResourceQuotaSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *EventSource) Size() (n int) { - var l int - _ = l - l = len(m.Component) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Host) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *ResourceQuotaSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ExecAction) Size() (n int) { +func (m *ResourceQuotaSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.Command) > 0 { - for _, s := range m.Command { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + if m.ScopeSelector != nil { + { + size, err := m.ScopeSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x1a } - return n -} - -func (m *FCVolumeSource) Size() (n int) { - var l int - _ = l - if len(m.TargetWWNs) > 0 { - for _, s := range m.TargetWWNs { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Scopes) > 0 { + for iNdEx := len(m.Scopes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Scopes[iNdEx]) + copy(dAtA[i:], m.Scopes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Scopes[iNdEx]))) + i-- + dAtA[i] = 0x12 } } - if m.Lun != nil { - n += 1 + sovGenerated(uint64(*m.Lun)) - } - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - if len(m.WWIDs) > 0 { - for _, s := range m.WWIDs { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Hard) > 0 { + keysForHard := make([]string, 0, len(m.Hard)) + for k := range m.Hard { + keysForHard = append(keysForHard, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForHard) + for iNdEx := len(keysForHard) - 1; iNdEx >= 0; iNdEx-- { + v := m.Hard[ResourceName(keysForHard[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForHard[iNdEx]) + copy(dAtA[i:], keysForHard[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForHard[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa } } - return n + return len(dAtA) - i, nil } -func (m *FlexPersistentVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.Driver) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - n += 2 - if len(m.Options) > 0 { - for k, v := range m.Options { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } +func (m *ResourceQuotaStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *FlexVolumeSource) Size() (n int) { +func (m *ResourceQuotaStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceQuotaStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Driver) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Used) > 0 { + keysForUsed := make([]string, 0, len(m.Used)) + for k := range m.Used { + keysForUsed = append(keysForUsed, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForUsed) + for iNdEx := len(keysForUsed) - 1; iNdEx >= 0; iNdEx-- { + v := m.Used[ResourceName(keysForUsed[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForUsed[iNdEx]) + copy(dAtA[i:], keysForUsed[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForUsed[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } } - n += 2 - if len(m.Options) > 0 { - for k, v := range m.Options { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + if len(m.Hard) > 0 { + keysForHard := make([]string, 0, len(m.Hard)) + for k := range m.Hard { + keysForHard = append(keysForHard, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForHard) + for iNdEx := len(keysForHard) - 1; iNdEx >= 0; iNdEx-- { + v := m.Hard[ResourceName(keysForHard[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForHard[iNdEx]) + copy(dAtA[i:], keysForHard[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForHard[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa } } - return n + return len(dAtA) - i, nil } -func (m *FlockerVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.DatasetName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.DatasetUUID) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *ResourceRequirements) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *GCEPersistentDiskVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.PDName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.Partition)) - n += 2 - return n +func (m *ResourceRequirements) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *GitRepoVolumeSource) Size() (n int) { +func (m *ResourceRequirements) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Repository) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Revision) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Directory) - n += 1 + l + sovGenerated(uint64(l)) - return n + if len(m.Requests) > 0 { + keysForRequests := make([]string, 0, len(m.Requests)) + for k := range m.Requests { + keysForRequests = append(keysForRequests, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForRequests) + for iNdEx := len(keysForRequests) - 1; iNdEx >= 0; iNdEx-- { + v := m.Requests[ResourceName(keysForRequests[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForRequests[iNdEx]) + copy(dAtA[i:], keysForRequests[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForRequests[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Limits) > 0 { + keysForLimits := make([]string, 0, len(m.Limits)) + for k := range m.Limits { + keysForLimits = append(keysForLimits, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLimits) + for iNdEx := len(keysForLimits) - 1; iNdEx >= 0; iNdEx-- { + v := m.Limits[ResourceName(keysForLimits[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForLimits[iNdEx]) + copy(dAtA[i:], keysForLimits[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForLimits[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil } -func (m *GlusterfsVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.EndpointsName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - return n +func (m *SELinuxOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *HTTPGetAction) Size() (n int) { +func (m *SELinuxOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SELinuxOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - l = m.Port.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Host) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Scheme) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.HTTPHeaders) > 0 { - for _, e := range m.HTTPHeaders { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + i -= len(m.Level) + copy(dAtA[i:], m.Level) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Level))) + i-- + dAtA[i] = 0x22 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0x1a + i -= len(m.Role) + copy(dAtA[i:], m.Role) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Role))) + i-- + dAtA[i] = 0x12 + i -= len(m.User) + copy(dAtA[i:], m.User) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.User))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ScaleIOPersistentVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *HTTPHeader) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Value) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *ScaleIOPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Handler) Size() (n int) { +func (m *ScaleIOPersistentVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.Exec != nil { - l = m.Exec.Size() - n += 1 + l + sovGenerated(uint64(l)) + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - if m.HTTPGet != nil { - l = m.HTTPGet.Size() - n += 1 + l + sovGenerated(uint64(l)) + i-- + dAtA[i] = 0x50 + i -= len(m.FSType) + copy(dAtA[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i-- + dAtA[i] = 0x4a + i -= len(m.VolumeName) + copy(dAtA[i:], m.VolumeName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeName))) + i-- + dAtA[i] = 0x42 + i -= len(m.StorageMode) + copy(dAtA[i:], m.StorageMode) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.StorageMode))) + i-- + dAtA[i] = 0x3a + i -= len(m.StoragePool) + copy(dAtA[i:], m.StoragePool) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.StoragePool))) + i-- + dAtA[i] = 0x32 + i -= len(m.ProtectionDomain) + copy(dAtA[i:], m.ProtectionDomain) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ProtectionDomain))) + i-- + dAtA[i] = 0x2a + i-- + if m.SSLEnabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - if m.TCPSocket != nil { - l = m.TCPSocket.Size() - n += 1 + l + sovGenerated(uint64(l)) + i-- + dAtA[i] = 0x20 + if m.SecretRef != nil { + { + size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a } - return n + i -= len(m.System) + copy(dAtA[i:], m.System) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.System))) + i-- + dAtA[i] = 0x12 + i -= len(m.Gateway) + copy(dAtA[i:], m.Gateway) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Gateway))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *HostAlias) Size() (n int) { - var l int - _ = l - l = len(m.IP) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Hostnames) > 0 { - for _, s := range m.Hostnames { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } +func (m *ScaleIOVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *HostPathVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - if m.Type != nil { - l = len(*m.Type) - n += 1 + l + sovGenerated(uint64(l)) - } - return n +func (m *ScaleIOVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ISCSIPersistentVolumeSource) Size() (n int) { +func (m *ScaleIOVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.TargetPortal) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.IQN) - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.Lun)) - l = len(m.ISCSIInterface) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - if len(m.Portals) > 0 { - for _, s := range m.Portals { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - n += 2 + i-- + dAtA[i] = 0x50 + i -= len(m.FSType) + copy(dAtA[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i-- + dAtA[i] = 0x4a + i -= len(m.VolumeName) + copy(dAtA[i:], m.VolumeName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeName))) + i-- + dAtA[i] = 0x42 + i -= len(m.StorageMode) + copy(dAtA[i:], m.StorageMode) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.StorageMode))) + i-- + dAtA[i] = 0x3a + i -= len(m.StoragePool) + copy(dAtA[i:], m.StoragePool) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.StoragePool))) + i-- + dAtA[i] = 0x32 + i -= len(m.ProtectionDomain) + copy(dAtA[i:], m.ProtectionDomain) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ProtectionDomain))) + i-- + dAtA[i] = 0x2a + i-- + if m.SSLEnabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) + { + size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a } - n += 2 - if m.InitiatorName != nil { - l = len(*m.InitiatorName) - n += 1 + l + sovGenerated(uint64(l)) + i -= len(m.System) + copy(dAtA[i:], m.System) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.System))) + i-- + dAtA[i] = 0x12 + i -= len(m.Gateway) + copy(dAtA[i:], m.Gateway) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Gateway))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ScopeSelector) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *ISCSIVolumeSource) Size() (n int) { +func (m *ScopeSelector) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ScopeSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.TargetPortal) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.IQN) - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.Lun)) - l = len(m.ISCSIInterface) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - if len(m.Portals) > 0 { - for _, s := range m.Portals { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + if len(m.MatchExpressions) > 0 { + for iNdEx := len(m.MatchExpressions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.MatchExpressions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } } - n += 2 - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - n += 2 - if m.InitiatorName != nil { - l = len(*m.InitiatorName) - n += 1 + l + sovGenerated(uint64(l)) - } - return n + return len(dAtA) - i, nil } -func (m *KeyToPath) Size() (n int) { - var l int - _ = l - l = len(m.Key) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - if m.Mode != nil { - n += 1 + sovGenerated(uint64(*m.Mode)) +func (m *ScopedResourceSelectorRequirement) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *Lifecycle) Size() (n int) { +func (m *ScopedResourceSelectorRequirement) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ScopedResourceSelectorRequirement) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.PostStart != nil { - l = m.PostStart.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Values) > 0 { + for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Values[iNdEx]) + copy(dAtA[i:], m.Values[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Values[iNdEx]))) + i-- + dAtA[i] = 0x1a + } } - if m.PreStop != nil { - l = m.PreStop.Size() - n += 1 + l + sovGenerated(uint64(l)) + i -= len(m.Operator) + copy(dAtA[i:], m.Operator) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operator))) + i-- + dAtA[i] = 0x12 + i -= len(m.ScopeName) + copy(dAtA[i:], m.ScopeName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ScopeName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Secret) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *LimitRange) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *Secret) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *LimitRangeItem) Size() (n int) { +func (m *Secret) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Max) > 0 { - for k, v := range m.Max { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + if len(m.StringData) > 0 { + keysForStringData := make([]string, 0, len(m.StringData)) + for k := range m.StringData { + keysForStringData = append(keysForStringData, string(k)) } - } - if len(m.Min) > 0 { - for k, v := range m.Min { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + github_com_gogo_protobuf_sortkeys.Strings(keysForStringData) + for iNdEx := len(keysForStringData) - 1; iNdEx >= 0; iNdEx-- { + v := m.StringData[string(keysForStringData[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForStringData[iNdEx]) + copy(dAtA[i:], keysForStringData[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForStringData[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x22 } } - if len(m.Default) > 0 { - for k, v := range m.Default { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0x1a + if len(m.Data) > 0 { + keysForData := make([]string, 0, len(m.Data)) + for k := range m.Data { + keysForData = append(keysForData, string(k)) } - } - if len(m.DefaultRequest) > 0 { - for k, v := range m.DefaultRequest { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + github_com_gogo_protobuf_sortkeys.Strings(keysForData) + for iNdEx := len(keysForData) - 1; iNdEx >= 0; iNdEx-- { + v := m.Data[string(keysForData[iNdEx])] + baseI := i + if v != nil { + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + } + i -= len(keysForData[iNdEx]) + copy(dAtA[i:], keysForData[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForData[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 } } - if len(m.MaxLimitRequestRatio) > 0 { - for k, v := range m.MaxLimitRequestRatio { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return n + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *LimitRangeList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } +func (m *SecretEnvSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *LimitRangeSpec) Size() (n int) { +func (m *SecretEnvSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SecretEnvSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.Limits) > 0 { - for _, e := range m.Limits { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.Optional != nil { + i-- + if *m.Optional { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i-- + dAtA[i] = 0x10 } - return n + { + size, err := m.LocalObjectReference.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *List) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } +func (m *SecretKeySelector) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *LoadBalancerIngress) Size() (n int) { - var l int - _ = l - l = len(m.IP) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Hostname) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *SecretKeySelector) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *LoadBalancerStatus) Size() (n int) { +func (m *SecretKeySelector) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.Ingress) > 0 { - for _, e := range m.Ingress { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.Optional != nil { + i-- + if *m.Optional { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i-- + dAtA[i] = 0x18 } - return n + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0x12 + { + size, err := m.LocalObjectReference.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *LocalObjectReference) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *SecretList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *LocalVolumeSource) Size() (n int) { +func (m *SecretList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SecretList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - if m.FSType != nil { - l = len(*m.FSType) - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } } - return n + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *NFSVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.Server) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - return n +func (m *SecretProjection) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *Namespace) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *SecretProjection) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *NamespaceList) Size() (n int) { +func (m *SecretProjection) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.Optional != nil { + i-- + if *m.Optional { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } } - return n -} - -func (m *NamespaceSpec) Size() (n int) { - var l int - _ = l - if len(m.Finalizers) > 0 { - for _, s := range m.Finalizers { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + { + size, err := m.LocalObjectReference.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return n + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *NamespaceStatus) Size() (n int) { - var l int - _ = l - l = len(m.Phase) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *SecretReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *Node) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *SecretReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *NodeAddress) Size() (n int) { +func (m *SecretReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Address) - n += 1 + l + sovGenerated(uint64(l)) - return n + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *NodeAffinity) Size() (n int) { - var l int - _ = l - if m.RequiredDuringSchedulingIgnoredDuringExecution != nil { - l = m.RequiredDuringSchedulingIgnoredDuringExecution.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { - for _, e := range m.PreferredDuringSchedulingIgnoredDuringExecution { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } +func (m *SecretVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *NodeCondition) Size() (n int) { - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Status) - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastHeartbeatTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastTransitionTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *SecretVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *NodeConfigSource) Size() (n int) { +func (m *SecretVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.ConfigMap != nil { - l = m.ConfigMap.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.Optional != nil { + i-- + if *m.Optional { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 } - return n -} - -func (m *NodeConfigStatus) Size() (n int) { - var l int - _ = l - if m.Assigned != nil { - l = m.Assigned.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.DefaultMode != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.DefaultMode)) + i-- + dAtA[i] = 0x18 } - if m.Active != nil { - l = m.Active.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } } - if m.LastKnownGood != nil { - l = m.LastKnownGood.Size() - n += 1 + l + sovGenerated(uint64(l)) + i -= len(m.SecretName) + copy(dAtA[i:], m.SecretName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SecurityContext) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - l = len(m.Error) - n += 1 + l + sovGenerated(uint64(l)) - return n + return dAtA[:n], nil } -func (m *NodeDaemonEndpoints) Size() (n int) { - var l int - _ = l - l = m.KubeletEndpoint.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *SecurityContext) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *NodeList) Size() (n int) { +func (m *SecurityContext) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.WindowsOptions != nil { + { + size, err := m.WindowsOptions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x52 } - return n + if m.ProcMount != nil { + i -= len(*m.ProcMount) + copy(dAtA[i:], *m.ProcMount) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ProcMount))) + i-- + dAtA[i] = 0x4a + } + if m.RunAsGroup != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.RunAsGroup)) + i-- + dAtA[i] = 0x40 + } + if m.AllowPrivilegeEscalation != nil { + i-- + if *m.AllowPrivilegeEscalation { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 + } + if m.ReadOnlyRootFilesystem != nil { + i-- + if *m.ReadOnlyRootFilesystem { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + if m.RunAsNonRoot != nil { + i-- + if *m.RunAsNonRoot { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if m.RunAsUser != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.RunAsUser)) + i-- + dAtA[i] = 0x20 + } + if m.SELinuxOptions != nil { + { + size, err := m.SELinuxOptions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Privileged != nil { + i-- + if *m.Privileged { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.Capabilities != nil { + { + size, err := m.Capabilities.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *NodeProxyOptions) Size() (n int) { - var l int - _ = l - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *SerializedReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *NodeResources) Size() (n int) { - var l int - _ = l - if len(m.Capacity) > 0 { - for k, v := range m.Capacity { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - return n +func (m *SerializedReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *NodeSelector) Size() (n int) { +func (m *SerializedReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.NodeSelectorTerms) > 0 { - for _, e := range m.NodeSelectorTerms { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + { + size, err := m.Reference.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return n + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *NodeSelectorRequirement) Size() (n int) { - var l int - _ = l - l = len(m.Key) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Operator) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Values) > 0 { - for _, s := range m.Values { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } +func (m *Service) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *NodeSelectorTerm) Size() (n int) { +func (m *Service) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Service) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.MatchExpressions) > 0 { - for _, e := range m.MatchExpressions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if len(m.MatchFields) > 0 { - for _, e := range m.MatchFields { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return n -} - -func (m *NodeSpec) Size() (n int) { - var l int - _ = l - l = len(m.PodCIDR) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.DoNotUse_ExternalID) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ProviderID) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - if len(m.Taints) > 0 { - for _, e := range m.Taints { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.ConfigSource != nil { - l = m.ConfigSource.Size() - n += 1 + l + sovGenerated(uint64(l)) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ServiceAccount) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *NodeStatus) Size() (n int) { +func (m *ServiceAccount) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServiceAccount) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.Capacity) > 0 { - for k, v := range m.Capacity { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - if len(m.Allocatable) > 0 { - for k, v := range m.Allocatable { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - l = len(m.Phase) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Addresses) > 0 { - for _, e := range m.Addresses { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.AutomountServiceAccountToken != nil { + i-- + if *m.AutomountServiceAccountToken { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i-- + dAtA[i] = 0x20 } - l = m.DaemonEndpoints.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.NodeInfo.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Images) > 0 { - for _, e := range m.Images { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.ImagePullSecrets) > 0 { + for iNdEx := len(m.ImagePullSecrets) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ImagePullSecrets[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a } } - if len(m.VolumesInUse) > 0 { - for _, s := range m.VolumesInUse { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Secrets) > 0 { + for iNdEx := len(m.Secrets) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Secrets[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } } - if len(m.VolumesAttached) > 0 { - for _, e := range m.VolumesAttached { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.Config != nil { - l = m.Config.Size() - n += 1 + l + sovGenerated(uint64(l)) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ServiceAccountList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *NodeSystemInfo) Size() (n int) { - var l int - _ = l - l = len(m.MachineID) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.SystemUUID) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.BootID) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.KernelVersion) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.OSImage) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ContainerRuntimeVersion) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.KubeletVersion) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.KubeProxyVersion) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.OperatingSystem) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Architecture) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *ServiceAccountList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ObjectFieldSelector) Size() (n int) { +func (m *ServiceAccountList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.APIVersion) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FieldPath) - n += 1 + l + sovGenerated(uint64(l)) - return n + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ObjectReference) Size() (n int) { - var l int - _ = l - l = len(m.Kind) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Namespace) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.UID) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.APIVersion) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ResourceVersion) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FieldPath) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *ServiceAccountTokenProjection) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *PersistentVolume) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *ServiceAccountTokenProjection) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *PersistentVolumeClaim) Size() (n int) { +func (m *ServiceAccountTokenProjection) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0x1a + if m.ExpirationSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.ExpirationSeconds)) + i-- + dAtA[i] = 0x10 + } + i -= len(m.Audience) + copy(dAtA[i:], m.Audience) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Audience))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PersistentVolumeClaimCondition) Size() (n int) { - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Status) - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastProbeTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastTransitionTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *ServiceList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *PersistentVolumeClaimList) Size() (n int) { +func (m *ServiceList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServiceList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } } - return n -} - -func (m *PersistentVolumeClaimSpec) Size() (n int) { - var l int - _ = l - if len(m.AccessModes) > 0 { - for _, s := range m.AccessModes { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - l = m.Resources.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.VolumeName) - n += 1 + l + sovGenerated(uint64(l)) - if m.Selector != nil { - l = m.Selector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.StorageClassName != nil { - l = len(*m.StorageClassName) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.VolumeMode != nil { - l = len(*m.VolumeMode) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.DataSource != nil { - l = m.DataSource.Size() - n += 1 + l + sovGenerated(uint64(l)) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ServicePort) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *PersistentVolumeClaimStatus) Size() (n int) { +func (m *ServicePort) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServicePort) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Phase) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.AccessModes) > 0 { - for _, s := range m.AccessModes { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Capacity) > 0 { - for k, v := range m.Capacity { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + i = encodeVarintGenerated(dAtA, i, uint64(m.NodePort)) + i-- + dAtA[i] = 0x28 + { + size, err := m.TargetPort.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + i-- + dAtA[i] = 0x22 + i = encodeVarintGenerated(dAtA, i, uint64(m.Port)) + i-- + dAtA[i] = 0x18 + i -= len(m.Protocol) + copy(dAtA[i:], m.Protocol) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Protocol))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ServiceProxyOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *PersistentVolumeClaimVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.ClaimName) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - return n +func (m *ServiceProxyOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *PersistentVolumeList) Size() (n int) { +func (m *ServiceProxyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ServiceSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *PersistentVolumeSource) Size() (n int) { +func (m *ServiceSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServiceSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.GCEPersistentDisk != nil { - l = m.GCEPersistentDisk.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.AWSElasticBlockStore != nil { - l = m.AWSElasticBlockStore.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.HostPath != nil { - l = m.HostPath.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Glusterfs != nil { - l = m.Glusterfs.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.NFS != nil { - l = m.NFS.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.RBD != nil { - l = m.RBD.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ISCSI != nil { - l = m.ISCSI.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Cinder != nil { - l = m.Cinder.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.CephFS != nil { - l = m.CephFS.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.FC != nil { - l = m.FC.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Flocker != nil { - l = m.Flocker.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.FlexVolume != nil { - l = m.FlexVolume.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.AzureFile != nil { - l = m.AzureFile.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.VsphereVolume != nil { - l = m.VsphereVolume.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Quobyte != nil { - l = m.Quobyte.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.AzureDisk != nil { - l = m.AzureDisk.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if m.PhotonPersistentDisk != nil { - l = m.PhotonPersistentDisk.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if m.PortworxVolume != nil { - l = m.PortworxVolume.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if m.ScaleIO != nil { - l = m.ScaleIO.Size() - n += 2 + l + sovGenerated(uint64(l)) + if len(m.TopologyKeys) > 0 { + for iNdEx := len(m.TopologyKeys) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.TopologyKeys[iNdEx]) + copy(dAtA[i:], m.TopologyKeys[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TopologyKeys[iNdEx]))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } } - if m.Local != nil { - l = m.Local.Size() - n += 2 + l + sovGenerated(uint64(l)) + if m.IPFamily != nil { + i -= len(*m.IPFamily) + copy(dAtA[i:], *m.IPFamily) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.IPFamily))) + i-- + dAtA[i] = 0x7a } - if m.StorageOS != nil { - l = m.StorageOS.Size() - n += 2 + l + sovGenerated(uint64(l)) + if m.SessionAffinityConfig != nil { + { + size, err := m.SessionAffinityConfig.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x72 } - if m.CSI != nil { - l = m.CSI.Size() - n += 2 + l + sovGenerated(uint64(l)) + i-- + if m.PublishNotReadyAddresses { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - return n -} - -func (m *PersistentVolumeSpec) Size() (n int) { - var l int - _ = l - if len(m.Capacity) > 0 { - for k, v := range m.Capacity { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + i-- + dAtA[i] = 0x68 + i = encodeVarintGenerated(dAtA, i, uint64(m.HealthCheckNodePort)) + i-- + dAtA[i] = 0x60 + i -= len(m.ExternalTrafficPolicy) + copy(dAtA[i:], m.ExternalTrafficPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ExternalTrafficPolicy))) + i-- + dAtA[i] = 0x5a + i -= len(m.ExternalName) + copy(dAtA[i:], m.ExternalName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ExternalName))) + i-- + dAtA[i] = 0x52 + if len(m.LoadBalancerSourceRanges) > 0 { + for iNdEx := len(m.LoadBalancerSourceRanges) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.LoadBalancerSourceRanges[iNdEx]) + copy(dAtA[i:], m.LoadBalancerSourceRanges[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.LoadBalancerSourceRanges[iNdEx]))) + i-- + dAtA[i] = 0x4a } } - l = m.PersistentVolumeSource.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.AccessModes) > 0 { - for _, s := range m.AccessModes { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + i -= len(m.LoadBalancerIP) + copy(dAtA[i:], m.LoadBalancerIP) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.LoadBalancerIP))) + i-- + dAtA[i] = 0x42 + i -= len(m.SessionAffinity) + copy(dAtA[i:], m.SessionAffinity) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SessionAffinity))) + i-- + dAtA[i] = 0x3a + if len(m.ExternalIPs) > 0 { + for iNdEx := len(m.ExternalIPs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ExternalIPs[iNdEx]) + copy(dAtA[i:], m.ExternalIPs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ExternalIPs[iNdEx]))) + i-- + dAtA[i] = 0x2a } } - if m.ClaimRef != nil { - l = m.ClaimRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = len(m.PersistentVolumeReclaimPolicy) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.StorageClassName) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.MountOptions) > 0 { - for _, s := range m.MountOptions { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0x22 + i -= len(m.ClusterIP) + copy(dAtA[i:], m.ClusterIP) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClusterIP))) + i-- + dAtA[i] = 0x1a + if len(m.Selector) > 0 { + keysForSelector := make([]string, 0, len(m.Selector)) + for k := range m.Selector { + keysForSelector = append(keysForSelector, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) + for iNdEx := len(keysForSelector) - 1; iNdEx >= 0; iNdEx-- { + v := m.Selector[string(keysForSelector[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForSelector[iNdEx]) + copy(dAtA[i:], keysForSelector[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForSelector[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 } } - if m.VolumeMode != nil { - l = len(*m.VolumeMode) - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Ports) > 0 { + for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ports[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } } - if m.NodeAffinity != nil { - l = m.NodeAffinity.Size() - n += 1 + l + sovGenerated(uint64(l)) + return len(dAtA) - i, nil +} + +func (m *ServiceStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *PersistentVolumeStatus) Size() (n int) { - var l int - _ = l - l = len(m.Phase) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *ServiceStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *PhotonPersistentDiskVolumeSource) Size() (n int) { +func (m *ServiceStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.PdID) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - return n + { + size, err := m.LoadBalancer.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *Pod) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *SessionAffinityConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *PodAffinity) Size() (n int) { +func (m *SessionAffinityConfig) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SessionAffinityConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.RequiredDuringSchedulingIgnoredDuringExecution) > 0 { - for _, e := range m.RequiredDuringSchedulingIgnoredDuringExecution { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { - for _, e := range m.PreferredDuringSchedulingIgnoredDuringExecution { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.ClientIP != nil { + { + size, err := m.ClientIP.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa } - return n + return len(dAtA) - i, nil } -func (m *PodAffinityTerm) Size() (n int) { - var l int - _ = l - if m.LabelSelector != nil { - l = m.LabelSelector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.Namespaces) > 0 { - for _, s := range m.Namespaces { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } +func (m *StorageOSPersistentVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - l = len(m.TopologyKey) - n += 1 + l + sovGenerated(uint64(l)) - return n + return dAtA[:n], nil } -func (m *PodAntiAffinity) Size() (n int) { +func (m *StorageOSPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StorageOSPersistentVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.RequiredDuringSchedulingIgnoredDuringExecution) > 0 { - for _, e := range m.RequiredDuringSchedulingIgnoredDuringExecution { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.SecretRef != nil { + { + size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x2a } - if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { - for _, e := range m.PreferredDuringSchedulingIgnoredDuringExecution { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - return n + i-- + dAtA[i] = 0x20 + i -= len(m.FSType) + copy(dAtA[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i-- + dAtA[i] = 0x1a + i -= len(m.VolumeNamespace) + copy(dAtA[i:], m.VolumeNamespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeNamespace))) + i-- + dAtA[i] = 0x12 + i -= len(m.VolumeName) + copy(dAtA[i:], m.VolumeName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PodAttachOptions) Size() (n int) { - var l int - _ = l - n += 2 - n += 2 - n += 2 - n += 2 - l = len(m.Container) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *StorageOSVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *PodCondition) Size() (n int) { - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Status) - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastProbeTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastTransitionTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *StorageOSVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *PodDNSConfig) Size() (n int) { +func (m *StorageOSVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.Nameservers) > 0 { - for _, s := range m.Nameservers { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + if m.SecretRef != nil { + { + size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x2a } - if len(m.Searches) > 0 { - for _, s := range m.Searches { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - if len(m.Options) > 0 { - for _, e := range m.Options { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + i-- + dAtA[i] = 0x20 + i -= len(m.FSType) + copy(dAtA[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i-- + dAtA[i] = 0x1a + i -= len(m.VolumeNamespace) + copy(dAtA[i:], m.VolumeNamespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeNamespace))) + i-- + dAtA[i] = 0x12 + i -= len(m.VolumeName) + copy(dAtA[i:], m.VolumeName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Sysctl) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *PodDNSConfigOption) Size() (n int) { +func (m *Sysctl) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Sysctl) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - if m.Value != nil { - l = len(*m.Value) - n += 1 + l + sovGenerated(uint64(l)) + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *TCPSocketAction) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *PodExecOptions) Size() (n int) { +func (m *TCPSocketAction) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TCPSocketAction) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - n += 2 - n += 2 - n += 2 - n += 2 - l = len(m.Container) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Command) > 0 { - for _, s := range m.Command { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + i -= len(m.Host) + copy(dAtA[i:], m.Host) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) + i-- + dAtA[i] = 0x12 + { + size, err := m.Port.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return n + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PodList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } +func (m *Taint) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *PodLogOptions) Size() (n int) { +func (m *Taint) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Taint) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Container) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - n += 2 - if m.SinceSeconds != nil { - n += 1 + sovGenerated(uint64(*m.SinceSeconds)) - } - if m.SinceTime != nil { - l = m.SinceTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - n += 2 - if m.TailLines != nil { - n += 1 + sovGenerated(uint64(*m.TailLines)) + if m.TimeAdded != nil { + { + size, err := m.TimeAdded.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 } - if m.LimitBytes != nil { - n += 1 + sovGenerated(uint64(*m.LimitBytes)) + i -= len(m.Effect) + copy(dAtA[i:], m.Effect) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Effect))) + i-- + dAtA[i] = 0x1a + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Toleration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *PodPortForwardOptions) Size() (n int) { +func (m *Toleration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Toleration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.Ports) > 0 { - for _, e := range m.Ports { - n += 1 + sovGenerated(uint64(e)) - } + if m.TolerationSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TolerationSeconds)) + i-- + dAtA[i] = 0x28 } - return n + i -= len(m.Effect) + copy(dAtA[i:], m.Effect) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Effect))) + i-- + dAtA[i] = 0x22 + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x1a + i -= len(m.Operator) + copy(dAtA[i:], m.Operator) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operator))) + i-- + dAtA[i] = 0x12 + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PodProxyOptions) Size() (n int) { - var l int - _ = l - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *TopologySelectorLabelRequirement) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *PodReadinessGate) Size() (n int) { - var l int - _ = l - l = len(m.ConditionType) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *TopologySelectorLabelRequirement) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *PodSecurityContext) Size() (n int) { +func (m *TopologySelectorLabelRequirement) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.SELinuxOptions != nil { - l = m.SELinuxOptions.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.RunAsUser != nil { - n += 1 + sovGenerated(uint64(*m.RunAsUser)) - } - if m.RunAsNonRoot != nil { - n += 2 - } - if len(m.SupplementalGroups) > 0 { - for _, e := range m.SupplementalGroups { - n += 1 + sovGenerated(uint64(e)) - } - } - if m.FSGroup != nil { - n += 1 + sovGenerated(uint64(*m.FSGroup)) - } - if m.RunAsGroup != nil { - n += 1 + sovGenerated(uint64(*m.RunAsGroup)) - } - if len(m.Sysctls) > 0 { - for _, e := range m.Sysctls { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Values) > 0 { + for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Values[iNdEx]) + copy(dAtA[i:], m.Values[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Values[iNdEx]))) + i-- + dAtA[i] = 0x12 } } - return n + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PodSignature) Size() (n int) { - var l int - _ = l - if m.PodController != nil { - l = m.PodController.Size() - n += 1 + l + sovGenerated(uint64(l)) +func (m *TopologySelectorTerm) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *PodSpec) Size() (n int) { +func (m *TopologySelectorTerm) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TopologySelectorTerm) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.Volumes) > 0 { - for _, e := range m.Volumes { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Containers) > 0 { - for _, e := range m.Containers { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = len(m.RestartPolicy) - n += 1 + l + sovGenerated(uint64(l)) - if m.TerminationGracePeriodSeconds != nil { - n += 1 + sovGenerated(uint64(*m.TerminationGracePeriodSeconds)) - } - if m.ActiveDeadlineSeconds != nil { - n += 1 + sovGenerated(uint64(*m.ActiveDeadlineSeconds)) - } - l = len(m.DNSPolicy) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.NodeSelector) > 0 { - for k, v := range m.NodeSelector { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - l = len(m.ServiceAccountName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.DeprecatedServiceAccount) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.NodeName) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - n += 2 - n += 2 - if m.SecurityContext != nil { - l = m.SecurityContext.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.ImagePullSecrets) > 0 { - for _, e := range m.ImagePullSecrets { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = len(m.Hostname) - n += 2 + l + sovGenerated(uint64(l)) - l = len(m.Subdomain) - n += 2 + l + sovGenerated(uint64(l)) - if m.Affinity != nil { - l = m.Affinity.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - l = len(m.SchedulerName) - n += 2 + l + sovGenerated(uint64(l)) - if len(m.InitContainers) > 0 { - for _, e := range m.InitContainers { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - } - if m.AutomountServiceAccountToken != nil { - n += 3 - } - if len(m.Tolerations) > 0 { - for _, e := range m.Tolerations { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - } - if len(m.HostAliases) > 0 { - for _, e := range m.HostAliases { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - } - l = len(m.PriorityClassName) - n += 2 + l + sovGenerated(uint64(l)) - if m.Priority != nil { - n += 2 + sovGenerated(uint64(*m.Priority)) - } - if m.DNSConfig != nil { - l = m.DNSConfig.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if m.ShareProcessNamespace != nil { - n += 3 - } - if len(m.ReadinessGates) > 0 { - for _, e := range m.ReadinessGates { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) + if len(m.MatchLabelExpressions) > 0 { + for iNdEx := len(m.MatchLabelExpressions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.MatchLabelExpressions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } } - if m.RuntimeClassName != nil { - l = len(*m.RuntimeClassName) - n += 2 + l + sovGenerated(uint64(l)) + return len(dAtA) - i, nil +} + +func (m *TopologySpreadConstraint) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *PodStatus) Size() (n int) { +func (m *TopologySpreadConstraint) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TopologySpreadConstraint) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Phase) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.HostIP) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.PodIP) - n += 1 + l + sovGenerated(uint64(l)) - if m.StartTime != nil { - l = m.StartTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.ContainerStatuses) > 0 { - for _, e := range m.ContainerStatuses { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.LabelSelector != nil { + { + size, err := m.LabelSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x22 } - l = len(m.QOSClass) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.InitContainerStatuses) > 0 { - for _, e := range m.InitContainerStatuses { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + i -= len(m.WhenUnsatisfiable) + copy(dAtA[i:], m.WhenUnsatisfiable) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.WhenUnsatisfiable))) + i-- + dAtA[i] = 0x1a + i -= len(m.TopologyKey) + copy(dAtA[i:], m.TopologyKey) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TopologyKey))) + i-- + dAtA[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(m.MaxSkew)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *TypedLocalObjectReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - l = len(m.NominatedNodeName) - n += 1 + l + sovGenerated(uint64(l)) - return n + return dAtA[:n], nil } -func (m *PodStatusResult) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *TypedLocalObjectReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *PodTemplate) Size() (n int) { +func (m *TypedLocalObjectReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Template.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x1a + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0x12 + if m.APIGroup != nil { + i -= len(*m.APIGroup) + copy(dAtA[i:], *m.APIGroup) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.APIGroup))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *PodTemplateList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } +func (m *Volume) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *PodTemplateSpec) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *Volume) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *PortworxVolumeSource) Size() (n int) { +func (m *Volume) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.VolumeID) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - return n + { + size, err := m.VolumeSource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *Preconditions) Size() (n int) { - var l int - _ = l - if m.UID != nil { - l = len(*m.UID) - n += 1 + l + sovGenerated(uint64(l)) +func (m *VolumeDevice) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *PreferAvoidPodsEntry) Size() (n int) { - var l int - _ = l - l = m.PodSignature.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.EvictionTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *VolumeDevice) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *PreferredSchedulingTerm) Size() (n int) { +func (m *VolumeDevice) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - n += 1 + sovGenerated(uint64(m.Weight)) - l = m.Preference.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n + i -= len(m.DevicePath) + copy(dAtA[i:], m.DevicePath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DevicePath))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *Probe) Size() (n int) { - var l int - _ = l - l = m.Handler.Size() - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.InitialDelaySeconds)) - n += 1 + sovGenerated(uint64(m.TimeoutSeconds)) - n += 1 + sovGenerated(uint64(m.PeriodSeconds)) - n += 1 + sovGenerated(uint64(m.SuccessThreshold)) - n += 1 + sovGenerated(uint64(m.FailureThreshold)) - return n +func (m *VolumeMount) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *ProjectedVolumeSource) Size() (n int) { +func (m *VolumeMount) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeMount) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.Sources) > 0 { - for _, e := range m.Sources { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + i -= len(m.SubPathExpr) + copy(dAtA[i:], m.SubPathExpr) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SubPathExpr))) + i-- + dAtA[i] = 0x32 + if m.MountPropagation != nil { + i -= len(*m.MountPropagation) + copy(dAtA[i:], *m.MountPropagation) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MountPropagation))) + i-- + dAtA[i] = 0x2a } - if m.DefaultMode != nil { - n += 1 + sovGenerated(uint64(*m.DefaultMode)) + i -= len(m.SubPath) + copy(dAtA[i:], m.SubPath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SubPath))) + i-- + dAtA[i] = 0x22 + i -= len(m.MountPath) + copy(dAtA[i:], m.MountPath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MountPath))) + i-- + dAtA[i] = 0x1a + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - return n + i-- + dAtA[i] = 0x10 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *QuobyteVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.Registry) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Volume) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - l = len(m.User) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Group) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *VolumeNodeAffinity) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *RBDPersistentVolumeSource) Size() (n int) { +func (m *VolumeNodeAffinity) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeNodeAffinity) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.CephMonitors) > 0 { - for _, s := range m.CephMonitors { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + if m.Required != nil { + { + size, err := m.Required.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa } - l = len(m.RBDImage) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.RBDPool) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.RadosUser) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Keyring) - n += 1 + l + sovGenerated(uint64(l)) - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) + return len(dAtA) - i, nil +} + +func (m *VolumeProjection) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - n += 2 - return n + return dAtA[:n], nil } -func (m *RBDVolumeSource) Size() (n int) { +func (m *VolumeProjection) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeProjection) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.CephMonitors) > 0 { - for _, s := range m.CephMonitors { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + if m.ServiceAccountToken != nil { + { + size, err := m.ServiceAccountToken.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x22 } - l = len(m.RBDImage) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.RBDPool) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.RadosUser) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Keyring) - n += 1 + l + sovGenerated(uint64(l)) - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.ConfigMap != nil { + { + size, err := m.ConfigMap.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a } - n += 2 - return n + if m.DownwardAPI != nil { + { + size, err := m.DownwardAPI.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Secret != nil { + { + size, err := m.Secret.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *RangeAllocation) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Range) - n += 1 + l + sovGenerated(uint64(l)) - if m.Data != nil { - l = len(m.Data) - n += 1 + l + sovGenerated(uint64(l)) +func (m *VolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *ReplicationController) Size() (n int) { +func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n + if m.CSI != nil { + { + size, err := m.CSI.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xe2 + } + if m.StorageOS != nil { + { + size, err := m.StorageOS.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xda + } + if m.Projected != nil { + { + size, err := m.Projected.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xd2 + } + if m.ScaleIO != nil { + { + size, err := m.ScaleIO.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xca + } + if m.PortworxVolume != nil { + { + size, err := m.PortworxVolume.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc2 + } + if m.PhotonPersistentDisk != nil { + { + size, err := m.PhotonPersistentDisk.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xba + } + if m.AzureDisk != nil { + { + size, err := m.AzureDisk.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb2 + } + if m.Quobyte != nil { + { + size, err := m.Quobyte.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xaa + } + if m.VsphereVolume != nil { + { + size, err := m.VsphereVolume.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa2 + } + if m.ConfigMap != nil { + { + size, err := m.ConfigMap.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x9a + } + if m.AzureFile != nil { + { + size, err := m.AzureFile.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x92 + } + if m.FC != nil { + { + size, err := m.FC.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + } + if m.DownwardAPI != nil { + { + size, err := m.DownwardAPI.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } + if m.Flocker != nil { + { + size, err := m.Flocker.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x7a + } + if m.CephFS != nil { + { + size, err := m.CephFS.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x72 + } + if m.Cinder != nil { + { + size, err := m.Cinder.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x6a + } + if m.FlexVolume != nil { + { + size, err := m.FlexVolume.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x62 + } + if m.RBD != nil { + { + size, err := m.RBD.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + } + if m.PersistentVolumeClaim != nil { + { + size, err := m.PersistentVolumeClaim.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } + if m.Glusterfs != nil { + { + size, err := m.Glusterfs.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + if m.ISCSI != nil { + { + size, err := m.ISCSI.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + if m.NFS != nil { + { + size, err := m.NFS.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + if m.Secret != nil { + { + size, err := m.Secret.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + if m.GitRepo != nil { + { + size, err := m.GitRepo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.AWSElasticBlockStore != nil { + { + size, err := m.AWSElasticBlockStore.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.GCEPersistentDisk != nil { + { + size, err := m.GCEPersistentDisk.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.EmptyDir != nil { + { + size, err := m.EmptyDir.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.HostPath != nil { + { + size, err := m.HostPath.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *ReplicationControllerCondition) Size() (n int) { +func (m *VsphereVirtualDiskVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VsphereVirtualDiskVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VsphereVirtualDiskVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Status) - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastTransitionTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - return n + i -= len(m.StoragePolicyID) + copy(dAtA[i:], m.StoragePolicyID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.StoragePolicyID))) + i-- + dAtA[i] = 0x22 + i -= len(m.StoragePolicyName) + copy(dAtA[i:], m.StoragePolicyName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.StoragePolicyName))) + i-- + dAtA[i] = 0x1a + i -= len(m.FSType) + copy(dAtA[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i-- + dAtA[i] = 0x12 + i -= len(m.VolumePath) + copy(dAtA[i:], m.VolumePath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumePath))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ReplicationControllerList) Size() (n int) { +func (m *WeightedPodAffinityTerm) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WeightedPodAffinityTerm) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WeightedPodAffinityTerm) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + { + size, err := m.PodAffinityTerm.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return n + i-- + dAtA[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(m.Weight)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } -func (m *ReplicationControllerSpec) Size() (n int) { +func (m *WindowsSecurityContextOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WindowsSecurityContextOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WindowsSecurityContextOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.Replicas != nil { - n += 1 + sovGenerated(uint64(*m.Replicas)) + if m.RunAsUserName != nil { + i -= len(*m.RunAsUserName) + copy(dAtA[i:], *m.RunAsUserName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.RunAsUserName))) + i-- + dAtA[i] = 0x1a } - if len(m.Selector) > 0 { - for k, v := range m.Selector { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } + if m.GMSACredentialSpec != nil { + i -= len(*m.GMSACredentialSpec) + copy(dAtA[i:], *m.GMSACredentialSpec) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.GMSACredentialSpec))) + i-- + dAtA[i] = 0x12 } - if m.Template != nil { - l = m.Template.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.GMSACredentialSpecName != nil { + i -= len(*m.GMSACredentialSpecName) + copy(dAtA[i:], *m.GMSACredentialSpecName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.GMSACredentialSpecName))) + i-- + dAtA[i] = 0xa } - n += 1 + sovGenerated(uint64(m.MinReadySeconds)) - return n + return len(dAtA) - i, nil } -func (m *ReplicationControllerStatus) Size() (n int) { +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *AWSElasticBlockStoreVolumeSource) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l - n += 1 + sovGenerated(uint64(m.Replicas)) - n += 1 + sovGenerated(uint64(m.FullyLabeledReplicas)) - n += 1 + sovGenerated(uint64(m.ObservedGeneration)) - n += 1 + sovGenerated(uint64(m.ReadyReplicas)) - n += 1 + sovGenerated(uint64(m.AvailableReplicas)) - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } + l = len(m.VolumeID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Partition)) + n += 2 return n } -func (m *ResourceFieldSelector) Size() (n int) { +func (m *Affinity) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l - l = len(m.ContainerName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Resource) - n += 1 + l + sovGenerated(uint64(l)) - l = m.Divisor.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.NodeAffinity != nil { + l = m.NodeAffinity.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.PodAffinity != nil { + l = m.PodAffinity.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.PodAntiAffinity != nil { + l = m.PodAntiAffinity.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } -func (m *ResourceQuota) Size() (n int) { +func (m *AttachedVolume) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() + l = len(m.Name) n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() + l = len(m.DevicePath) n += 1 + l + sovGenerated(uint64(l)) return n } -func (m *ResourceQuotaList) Size() (n int) { +func (m *AvoidPods) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { + if len(m.PreferAvoidPods) > 0 { + for _, e := range m.PreferAvoidPods { l = e.Size() n += 1 + l + sovGenerated(uint64(l)) } @@ -13678,150 +19115,297 @@ func (m *ResourceQuotaList) Size() (n int) { return n } -func (m *ResourceQuotaSpec) Size() (n int) { +func (m *AzureDiskVolumeSource) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l - if len(m.Hard) > 0 { - for k, v := range m.Hard { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } + l = len(m.DiskName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DataDiskURI) + n += 1 + l + sovGenerated(uint64(l)) + if m.CachingMode != nil { + l = len(*m.CachingMode) + n += 1 + l + sovGenerated(uint64(l)) } - if len(m.Scopes) > 0 { - for _, s := range m.Scopes { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } + if m.FSType != nil { + l = len(*m.FSType) + n += 1 + l + sovGenerated(uint64(l)) } - if m.ScopeSelector != nil { - l = m.ScopeSelector.Size() + if m.ReadOnly != nil { + n += 2 + } + if m.Kind != nil { + l = len(*m.Kind) n += 1 + l + sovGenerated(uint64(l)) } return n } -func (m *ResourceQuotaStatus) Size() (n int) { - var l int +func (m *AzureFilePersistentVolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int _ = l - if len(m.Hard) > 0 { - for k, v := range m.Hard { + l = len(m.SecretName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ShareName) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if m.SecretNamespace != nil { + l = len(*m.SecretNamespace) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *AzureFileVolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.SecretName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ShareName) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n +} + +func (m *Binding) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Target.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *CSIPersistentVolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Driver) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.VolumeHandle) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.VolumeAttributes) > 0 { + for k, v := range m.VolumeAttributes { _ = k _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) } } - if len(m.Used) > 0 { - for k, v := range m.Used { + if m.ControllerPublishSecretRef != nil { + l = m.ControllerPublishSecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NodeStageSecretRef != nil { + l = m.NodeStageSecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NodePublishSecretRef != nil { + l = m.NodePublishSecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ControllerExpandSecretRef != nil { + l = m.ControllerExpandSecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *CSIVolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Driver) + n += 1 + l + sovGenerated(uint64(l)) + if m.ReadOnly != nil { + n += 2 + } + if m.FSType != nil { + l = len(*m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.VolumeAttributes) > 0 { + for k, v := range m.VolumeAttributes { _ = k _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) } } + if m.NodePublishSecretRef != nil { + l = m.NodePublishSecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } -func (m *ResourceRequirements) Size() (n int) { +func (m *Capabilities) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l - if len(m.Limits) > 0 { - for k, v := range m.Limits { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + if len(m.Add) > 0 { + for _, s := range m.Add { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) } } - if len(m.Requests) > 0 { - for k, v := range m.Requests { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + if len(m.Drop) > 0 { + for _, s := range m.Drop { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) } } return n } -func (m *SELinuxOptions) Size() (n int) { +func (m *CephFSPersistentVolumeSource) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l - l = len(m.User) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Role) + if len(m.Monitors) > 0 { + for _, s := range m.Monitors { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.Path) n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Type) + l = len(m.User) n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Level) + l = len(m.SecretFile) n += 1 + l + sovGenerated(uint64(l)) + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 return n } -func (m *ScaleIOPersistentVolumeSource) Size() (n int) { +func (m *CephFSVolumeSource) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l - l = len(m.Gateway) + if len(m.Monitors) > 0 { + for _, s := range m.Monitors { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.Path) n += 1 + l + sovGenerated(uint64(l)) - l = len(m.System) + l = len(m.User) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.SecretFile) n += 1 + l + sovGenerated(uint64(l)) if m.SecretRef != nil { l = m.SecretRef.Size() n += 1 + l + sovGenerated(uint64(l)) } n += 2 - l = len(m.ProtectionDomain) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.StoragePool) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.StorageMode) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.VolumeName) + return n +} + +func (m *CinderPersistentVolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.VolumeID) n += 1 + l + sovGenerated(uint64(l)) l = len(m.FSType) n += 1 + l + sovGenerated(uint64(l)) n += 2 + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } -func (m *ScaleIOVolumeSource) Size() (n int) { +func (m *CinderVolumeSource) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l - l = len(m.Gateway) + l = len(m.VolumeID) n += 1 + l + sovGenerated(uint64(l)) - l = len(m.System) + l = len(m.FSType) n += 1 + l + sovGenerated(uint64(l)) + n += 2 if m.SecretRef != nil { l = m.SecretRef.Size() n += 1 + l + sovGenerated(uint64(l)) } - n += 2 - l = len(m.ProtectionDomain) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.StoragePool) + return n +} + +func (m *ClientIPConfig) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TimeoutSeconds != nil { + n += 1 + sovGenerated(uint64(*m.TimeoutSeconds)) + } + return n +} + +func (m *ComponentCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) n += 1 + l + sovGenerated(uint64(l)) - l = len(m.StorageMode) + l = len(m.Status) n += 1 + l + sovGenerated(uint64(l)) - l = len(m.VolumeName) + l = len(m.Message) n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) + l = len(m.Error) n += 1 + l + sovGenerated(uint64(l)) - n += 2 return n } -func (m *ScopeSelector) Size() (n int) { +func (m *ComponentStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l - if len(m.MatchExpressions) > 0 { - for _, e := range m.MatchExpressions { + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { l = e.Size() n += 1 + l + sovGenerated(uint64(l)) } @@ -13829,23 +19413,27 @@ func (m *ScopeSelector) Size() (n int) { return n } -func (m *ScopedResourceSelectorRequirement) Size() (n int) { +func (m *ComponentStatusList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l - l = len(m.ScopeName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Operator) + l = m.ListMeta.Size() n += 1 + l + sovGenerated(uint64(l)) - if len(m.Values) > 0 { - for _, s := range m.Values { - l = len(s) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() n += 1 + l + sovGenerated(uint64(l)) } } return n } -func (m *Secret) Size() (n int) { +func (m *ConfigMap) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -13854,28 +19442,29 @@ func (m *Secret) Size() (n int) { for k, v := range m.Data { _ = k _ = v - l = 0 - if v != nil { - l = 1 + len(v) + sovGenerated(uint64(len(v))) - } - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + l + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) } } - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.StringData) > 0 { - for k, v := range m.StringData { + if len(m.BinaryData) > 0 { + for k, v := range m.BinaryData { _ = k _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + l = 0 + if v != nil { + l = 1 + len(v) + sovGenerated(uint64(len(v))) + } + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + l n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) } } return n } -func (m *SecretEnvSource) Size() (n int) { +func (m *ConfigMapEnvSource) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.LocalObjectReference.Size() @@ -13886,7 +19475,10 @@ func (m *SecretEnvSource) Size() (n int) { return n } -func (m *SecretKeySelector) Size() (n int) { +func (m *ConfigMapKeySelector) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.LocalObjectReference.Size() @@ -13899,7 +19491,10 @@ func (m *SecretKeySelector) Size() (n int) { return n } -func (m *SecretList) Size() (n int) { +func (m *ConfigMapList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -13913,7 +19508,29 @@ func (m *SecretList) Size() (n int) { return n } -func (m *SecretProjection) Size() (n int) { +func (m *ConfigMapNodeConfigSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ResourceVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.KubeletConfigKey) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ConfigMapProjection) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.LocalObjectReference.Size() @@ -13930,20 +19547,13 @@ func (m *SecretProjection) Size() (n int) { return n } -func (m *SecretReference) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Namespace) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *SecretVolumeSource) Size() (n int) { +func (m *ConfigMapVolumeSource) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l - l = len(m.SecretName) + l = m.LocalObjectReference.Size() n += 1 + l + sovGenerated(uint64(l)) if len(m.Items) > 0 { for _, e := range m.Items { @@ -13960,322 +19570,354 @@ func (m *SecretVolumeSource) Size() (n int) { return n } -func (m *SecurityContext) Size() (n int) { +func (m *Container) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l - if m.Capabilities != nil { - l = m.Capabilities.Size() - n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Image) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Command) > 0 { + for _, s := range m.Command { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - if m.Privileged != nil { - n += 2 + if len(m.Args) > 0 { + for _, s := range m.Args { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - if m.SELinuxOptions != nil { - l = m.SELinuxOptions.Size() - n += 1 + l + sovGenerated(uint64(l)) + l = len(m.WorkingDir) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - if m.RunAsUser != nil { - n += 1 + sovGenerated(uint64(*m.RunAsUser)) + if len(m.Env) > 0 { + for _, e := range m.Env { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - if m.RunAsNonRoot != nil { - n += 2 + l = m.Resources.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.VolumeMounts) > 0 { + for _, e := range m.VolumeMounts { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - if m.ReadOnlyRootFilesystem != nil { - n += 2 + if m.LivenessProbe != nil { + l = m.LivenessProbe.Size() + n += 1 + l + sovGenerated(uint64(l)) } - if m.AllowPrivilegeEscalation != nil { - n += 2 + if m.ReadinessProbe != nil { + l = m.ReadinessProbe.Size() + n += 1 + l + sovGenerated(uint64(l)) } - if m.RunAsGroup != nil { - n += 1 + sovGenerated(uint64(*m.RunAsGroup)) + if m.Lifecycle != nil { + l = m.Lifecycle.Size() + n += 1 + l + sovGenerated(uint64(l)) } - if m.ProcMount != nil { - l = len(*m.ProcMount) + l = len(m.TerminationMessagePath) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ImagePullPolicy) + n += 1 + l + sovGenerated(uint64(l)) + if m.SecurityContext != nil { + l = m.SecurityContext.Size() n += 1 + l + sovGenerated(uint64(l)) } + n += 3 + n += 3 + n += 3 + if len(m.EnvFrom) > 0 { + for _, e := range m.EnvFrom { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + l = len(m.TerminationMessagePolicy) + n += 2 + l + sovGenerated(uint64(l)) + if len(m.VolumeDevices) > 0 { + for _, e := range m.VolumeDevices { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + if m.StartupProbe != nil { + l = m.StartupProbe.Size() + n += 2 + l + sovGenerated(uint64(l)) + } return n } -func (m *SerializedReference) Size() (n int) { +func (m *ContainerImage) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l - l = m.Reference.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Names) > 0 { + for _, s := range m.Names { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 1 + sovGenerated(uint64(m.SizeBytes)) return n } -func (m *Service) Size() (n int) { +func (m *ContainerPort) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l - l = m.ObjectMeta.Size() + l = len(m.Name) n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() + n += 1 + sovGenerated(uint64(m.HostPort)) + n += 1 + sovGenerated(uint64(m.ContainerPort)) + l = len(m.Protocol) n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() + l = len(m.HostIP) n += 1 + l + sovGenerated(uint64(l)) return n } -func (m *ServiceAccount) Size() (n int) { +func (m *ContainerState) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Secrets) > 0 { - for _, e := range m.Secrets { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + if m.Waiting != nil { + l = m.Waiting.Size() + n += 1 + l + sovGenerated(uint64(l)) } - if len(m.ImagePullSecrets) > 0 { - for _, e := range m.ImagePullSecrets { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + if m.Running != nil { + l = m.Running.Size() + n += 1 + l + sovGenerated(uint64(l)) } - if m.AutomountServiceAccountToken != nil { - n += 2 + if m.Terminated != nil { + l = m.Terminated.Size() + n += 1 + l + sovGenerated(uint64(l)) } return n } -func (m *ServiceAccountList) Size() (n int) { +func (m *ContainerStateRunning) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l - l = m.ListMeta.Size() + l = m.StartedAt.Size() n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } return n } -func (m *ServiceAccountTokenProjection) Size() (n int) { +func (m *ContainerStateTerminated) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l - l = len(m.Audience) + n += 1 + sovGenerated(uint64(m.ExitCode)) + n += 1 + sovGenerated(uint64(m.Signal)) + l = len(m.Reason) n += 1 + l + sovGenerated(uint64(l)) - if m.ExpirationSeconds != nil { - n += 1 + sovGenerated(uint64(*m.ExpirationSeconds)) - } - l = len(m.Path) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = m.StartedAt.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.FinishedAt.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ContainerID) n += 1 + l + sovGenerated(uint64(l)) return n } -func (m *ServiceList) Size() (n int) { +func (m *ContainerStateWaiting) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l - l = m.ListMeta.Size() + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } return n } -func (m *ServicePort) Size() (n int) { +func (m *ContainerStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Name) n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Protocol) + l = m.State.Size() n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.Port)) - l = m.TargetPort.Size() + l = m.LastTerminationState.Size() n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.NodePort)) + n += 2 + n += 1 + sovGenerated(uint64(m.RestartCount)) + l = len(m.Image) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ImageID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ContainerID) + n += 1 + l + sovGenerated(uint64(l)) + if m.Started != nil { + n += 2 + } return n } -func (m *ServiceProxyOptions) Size() (n int) { +func (m *DaemonEndpoint) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Port)) return n } -func (m *ServiceSpec) Size() (n int) { +func (m *DownwardAPIProjection) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l - if len(m.Ports) > 0 { - for _, e := range m.Ports { + if len(m.Items) > 0 { + for _, e := range m.Items { l = e.Size() n += 1 + l + sovGenerated(uint64(l)) } } - if len(m.Selector) > 0 { - for k, v := range m.Selector { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - l = len(m.ClusterIP) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.ExternalIPs) > 0 { - for _, s := range m.ExternalIPs { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = len(m.SessionAffinity) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.LoadBalancerIP) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.LoadBalancerSourceRanges) > 0 { - for _, s := range m.LoadBalancerSourceRanges { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = len(m.ExternalName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ExternalTrafficPolicy) - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.HealthCheckNodePort)) - n += 2 - if m.SessionAffinityConfig != nil { - l = m.SessionAffinityConfig.Size() - n += 1 + l + sovGenerated(uint64(l)) - } return n } -func (m *ServiceStatus) Size() (n int) { +func (m *DownwardAPIVolumeFile) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l - l = m.LoadBalancer.Size() + l = len(m.Path) n += 1 + l + sovGenerated(uint64(l)) + if m.FieldRef != nil { + l = m.FieldRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ResourceFieldRef != nil { + l = m.ResourceFieldRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Mode != nil { + n += 1 + sovGenerated(uint64(*m.Mode)) + } return n } -func (m *SessionAffinityConfig) Size() (n int) { +func (m *DownwardAPIVolumeSource) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l - if m.ClientIP != nil { - l = m.ClientIP.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.DefaultMode != nil { + n += 1 + sovGenerated(uint64(*m.DefaultMode)) } return n } -func (m *StorageOSPersistentVolumeSource) Size() (n int) { +func (m *EmptyDirVolumeSource) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l - l = len(m.VolumeName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.VolumeNamespace) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) + l = len(m.Medium) n += 1 + l + sovGenerated(uint64(l)) - n += 2 - if m.SecretRef != nil { - l = m.SecretRef.Size() + if m.SizeLimit != nil { + l = m.SizeLimit.Size() n += 1 + l + sovGenerated(uint64(l)) } return n } -func (m *StorageOSVolumeSource) Size() (n int) { +func (m *EndpointAddress) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l - l = len(m.VolumeName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.VolumeNamespace) + l = len(m.IP) n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) + if m.TargetRef != nil { + l = m.TargetRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Hostname) n += 1 + l + sovGenerated(uint64(l)) - n += 2 - if m.SecretRef != nil { - l = m.SecretRef.Size() + if m.NodeName != nil { + l = len(*m.NodeName) n += 1 + l + sovGenerated(uint64(l)) } return n } -func (m *Sysctl) Size() (n int) { +func (m *EndpointPort) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Name) n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Value) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *TCPSocketAction) Size() (n int) { - var l int - _ = l - l = m.Port.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Host) + n += 1 + sovGenerated(uint64(m.Port)) + l = len(m.Protocol) n += 1 + l + sovGenerated(uint64(l)) return n } -func (m *Taint) Size() (n int) { - var l int - _ = l - l = len(m.Key) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Value) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Effect) - n += 1 + l + sovGenerated(uint64(l)) - if m.TimeAdded != nil { - l = m.TimeAdded.Size() - n += 1 + l + sovGenerated(uint64(l)) +func (m *EndpointSubset) Size() (n int) { + if m == nil { + return 0 } - return n -} - -func (m *Toleration) Size() (n int) { var l int _ = l - l = len(m.Key) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Operator) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Value) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Effect) - n += 1 + l + sovGenerated(uint64(l)) - if m.TolerationSeconds != nil { - n += 1 + sovGenerated(uint64(*m.TolerationSeconds)) + if len(m.Addresses) > 0 { + for _, e := range m.Addresses { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - return n -} - -func (m *TopologySelectorLabelRequirement) Size() (n int) { - var l int - _ = l - l = len(m.Key) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Values) > 0 { - for _, s := range m.Values { - l = len(s) + if len(m.NotReadyAddresses) > 0 { + for _, e := range m.NotReadyAddresses { + l = e.Size() n += 1 + l + sovGenerated(uint64(l)) } } - return n -} - -func (m *TopologySelectorTerm) Size() (n int) { - var l int - _ = l - if len(m.MatchLabelExpressions) > 0 { - for _, e := range m.MatchLabelExpressions { + if len(m.Ports) > 0 { + for _, e := range m.Ports { l = e.Size() n += 1 + l + sovGenerated(uint64(l)) } @@ -14283,3019 +19925,8975 @@ func (m *TopologySelectorTerm) Size() (n int) { return n } -func (m *TypedLocalObjectReference) Size() (n int) { +func (m *Endpoints) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l - if m.APIGroup != nil { - l = len(*m.APIGroup) - n += 1 + l + sovGenerated(uint64(l)) - } - l = len(m.Kind) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Name) + l = m.ObjectMeta.Size() n += 1 + l + sovGenerated(uint64(l)) + if len(m.Subsets) > 0 { + for _, e := range m.Subsets { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } -func (m *Volume) Size() (n int) { +func (m *EndpointsList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = m.VolumeSource.Size() + l = m.ListMeta.Size() n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } -func (m *VolumeDevice) Size() (n int) { +func (m *EnvFromSource) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.DevicePath) + l = len(m.Prefix) n += 1 + l + sovGenerated(uint64(l)) + if m.ConfigMapRef != nil { + l = m.ConfigMapRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } -func (m *VolumeMount) Size() (n int) { +func (m *EnvVar) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Name) n += 1 + l + sovGenerated(uint64(l)) - n += 2 - l = len(m.MountPath) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.SubPath) + l = len(m.Value) n += 1 + l + sovGenerated(uint64(l)) - if m.MountPropagation != nil { - l = len(*m.MountPropagation) + if m.ValueFrom != nil { + l = m.ValueFrom.Size() n += 1 + l + sovGenerated(uint64(l)) } return n } -func (m *VolumeNodeAffinity) Size() (n int) { - var l int - _ = l - if m.Required != nil { - l = m.Required.Size() - n += 1 + l + sovGenerated(uint64(l)) +func (m *EnvVarSource) Size() (n int) { + if m == nil { + return 0 } - return n -} - -func (m *VolumeProjection) Size() (n int) { var l int _ = l - if m.Secret != nil { - l = m.Secret.Size() + if m.FieldRef != nil { + l = m.FieldRef.Size() n += 1 + l + sovGenerated(uint64(l)) } - if m.DownwardAPI != nil { - l = m.DownwardAPI.Size() + if m.ResourceFieldRef != nil { + l = m.ResourceFieldRef.Size() n += 1 + l + sovGenerated(uint64(l)) } - if m.ConfigMap != nil { - l = m.ConfigMap.Size() + if m.ConfigMapKeyRef != nil { + l = m.ConfigMapKeyRef.Size() n += 1 + l + sovGenerated(uint64(l)) } - if m.ServiceAccountToken != nil { - l = m.ServiceAccountToken.Size() + if m.SecretKeyRef != nil { + l = m.SecretKeyRef.Size() n += 1 + l + sovGenerated(uint64(l)) } return n } -func (m *VolumeSource) Size() (n int) { +func (m *EphemeralContainer) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l - if m.HostPath != nil { - l = m.HostPath.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.EmptyDir != nil { - l = m.EmptyDir.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.GCEPersistentDisk != nil { - l = m.GCEPersistentDisk.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.AWSElasticBlockStore != nil { - l = m.AWSElasticBlockStore.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.GitRepo != nil { - l = m.GitRepo.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Secret != nil { - l = m.Secret.Size() - n += 1 + l + sovGenerated(uint64(l)) + l = m.EphemeralContainerCommon.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.TargetContainerName) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *EphemeralContainerCommon) Size() (n int) { + if m == nil { + return 0 } - if m.NFS != nil { - l = m.NFS.Size() - n += 1 + l + sovGenerated(uint64(l)) + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Image) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Command) > 0 { + for _, s := range m.Command { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - if m.ISCSI != nil { - l = m.ISCSI.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Args) > 0 { + for _, s := range m.Args { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - if m.Glusterfs != nil { - l = m.Glusterfs.Size() - n += 1 + l + sovGenerated(uint64(l)) + l = len(m.WorkingDir) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - if m.PersistentVolumeClaim != nil { - l = m.PersistentVolumeClaim.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Env) > 0 { + for _, e := range m.Env { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - if m.RBD != nil { - l = m.RBD.Size() - n += 1 + l + sovGenerated(uint64(l)) + l = m.Resources.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.VolumeMounts) > 0 { + for _, e := range m.VolumeMounts { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - if m.FlexVolume != nil { - l = m.FlexVolume.Size() + if m.LivenessProbe != nil { + l = m.LivenessProbe.Size() n += 1 + l + sovGenerated(uint64(l)) } - if m.Cinder != nil { - l = m.Cinder.Size() + if m.ReadinessProbe != nil { + l = m.ReadinessProbe.Size() n += 1 + l + sovGenerated(uint64(l)) } - if m.CephFS != nil { - l = m.CephFS.Size() + if m.Lifecycle != nil { + l = m.Lifecycle.Size() n += 1 + l + sovGenerated(uint64(l)) } - if m.Flocker != nil { - l = m.Flocker.Size() + l = len(m.TerminationMessagePath) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ImagePullPolicy) + n += 1 + l + sovGenerated(uint64(l)) + if m.SecurityContext != nil { + l = m.SecurityContext.Size() n += 1 + l + sovGenerated(uint64(l)) } - if m.DownwardAPI != nil { - l = m.DownwardAPI.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if m.FC != nil { - l = m.FC.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if m.AzureFile != nil { - l = m.AzureFile.Size() - n += 2 + l + sovGenerated(uint64(l)) + n += 3 + n += 3 + n += 3 + if len(m.EnvFrom) > 0 { + for _, e := range m.EnvFrom { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } } - if m.ConfigMap != nil { - l = m.ConfigMap.Size() - n += 2 + l + sovGenerated(uint64(l)) + l = len(m.TerminationMessagePolicy) + n += 2 + l + sovGenerated(uint64(l)) + if len(m.VolumeDevices) > 0 { + for _, e := range m.VolumeDevices { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } } - if m.VsphereVolume != nil { - l = m.VsphereVolume.Size() + if m.StartupProbe != nil { + l = m.StartupProbe.Size() n += 2 + l + sovGenerated(uint64(l)) } - if m.Quobyte != nil { - l = m.Quobyte.Size() - n += 2 + l + sovGenerated(uint64(l)) + return n +} + +func (m *EphemeralContainers) Size() (n int) { + if m == nil { + return 0 } - if m.AzureDisk != nil { - l = m.AzureDisk.Size() - n += 2 + l + sovGenerated(uint64(l)) + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.EphemeralContainers) > 0 { + for _, e := range m.EphemeralContainers { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - if m.PhotonPersistentDisk != nil { - l = m.PhotonPersistentDisk.Size() - n += 2 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Event) Size() (n int) { + if m == nil { + return 0 } - if m.PortworxVolume != nil { - l = m.PortworxVolume.Size() - n += 2 + l + sovGenerated(uint64(l)) + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.InvolvedObject.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Source.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.FirstTimestamp.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTimestamp.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Count)) + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = m.EventTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Series != nil { + l = m.Series.Size() + n += 1 + l + sovGenerated(uint64(l)) } - if m.ScaleIO != nil { - l = m.ScaleIO.Size() - n += 2 + l + sovGenerated(uint64(l)) + l = len(m.Action) + n += 1 + l + sovGenerated(uint64(l)) + if m.Related != nil { + l = m.Related.Size() + n += 1 + l + sovGenerated(uint64(l)) } - if m.Projected != nil { - l = m.Projected.Size() - n += 2 + l + sovGenerated(uint64(l)) + l = len(m.ReportingController) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ReportingInstance) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *EventList) Size() (n int) { + if m == nil { + return 0 } - if m.StorageOS != nil { - l = m.StorageOS.Size() - n += 2 + l + sovGenerated(uint64(l)) + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } return n } -func (m *VsphereVirtualDiskVolumeSource) Size() (n int) { +func (m *EventSeries) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l - l = len(m.VolumePath) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.StoragePolicyName) + n += 1 + sovGenerated(uint64(m.Count)) + l = m.LastObservedTime.Size() n += 1 + l + sovGenerated(uint64(l)) - l = len(m.StoragePolicyID) + l = len(m.State) n += 1 + l + sovGenerated(uint64(l)) return n } -func (m *WeightedPodAffinityTerm) Size() (n int) { +func (m *EventSource) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l - n += 1 + sovGenerated(uint64(m.Weight)) - l = m.PodAffinityTerm.Size() + l = len(m.Component) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Host) n += 1 + l + sovGenerated(uint64(l)) return n } -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break +func (m *ExecAction) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Command) > 0 { + for _, s := range m.Command { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) } } return n } -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *AWSElasticBlockStoreVolumeSource) String() string { - if this == nil { - return "nil" + +func (m *FCVolumeSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&AWSElasticBlockStoreVolumeSource{`, - `VolumeID:` + fmt.Sprintf("%v", this.VolumeID) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `Partition:` + fmt.Sprintf("%v", this.Partition) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `}`, - }, "") - return s -} -func (this *Affinity) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Affinity{`, - `NodeAffinity:` + strings.Replace(fmt.Sprintf("%v", this.NodeAffinity), "NodeAffinity", "NodeAffinity", 1) + `,`, - `PodAffinity:` + strings.Replace(fmt.Sprintf("%v", this.PodAffinity), "PodAffinity", "PodAffinity", 1) + `,`, - `PodAntiAffinity:` + strings.Replace(fmt.Sprintf("%v", this.PodAntiAffinity), "PodAntiAffinity", "PodAntiAffinity", 1) + `,`, - `}`, - }, "") - return s -} -func (this *AttachedVolume) String() string { - if this == nil { - return "nil" + var l int + _ = l + if len(m.TargetWWNs) > 0 { + for _, s := range m.TargetWWNs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&AttachedVolume{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `DevicePath:` + fmt.Sprintf("%v", this.DevicePath) + `,`, - `}`, - }, "") - return s -} -func (this *AvoidPods) String() string { - if this == nil { - return "nil" + if m.Lun != nil { + n += 1 + sovGenerated(uint64(*m.Lun)) } - s := strings.Join([]string{`&AvoidPods{`, - `PreferAvoidPods:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.PreferAvoidPods), "PreferAvoidPodsEntry", "PreferAvoidPodsEntry", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *AzureDiskVolumeSource) String() string { - if this == nil { - return "nil" + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if len(m.WWIDs) > 0 { + for _, s := range m.WWIDs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&AzureDiskVolumeSource{`, - `DiskName:` + fmt.Sprintf("%v", this.DiskName) + `,`, - `DataDiskURI:` + fmt.Sprintf("%v", this.DataDiskURI) + `,`, - `CachingMode:` + valueToStringGenerated(this.CachingMode) + `,`, - `FSType:` + valueToStringGenerated(this.FSType) + `,`, - `ReadOnly:` + valueToStringGenerated(this.ReadOnly) + `,`, - `Kind:` + valueToStringGenerated(this.Kind) + `,`, - `}`, - }, "") - return s + return n } -func (this *AzureFilePersistentVolumeSource) String() string { - if this == nil { - return "nil" + +func (m *FlexPersistentVolumeSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&AzureFilePersistentVolumeSource{`, - `SecretName:` + fmt.Sprintf("%v", this.SecretName) + `,`, - `ShareName:` + fmt.Sprintf("%v", this.ShareName) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `SecretNamespace:` + valueToStringGenerated(this.SecretNamespace) + `,`, - `}`, - }, "") - return s -} -func (this *AzureFileVolumeSource) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = len(m.Driver) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&AzureFileVolumeSource{`, - `SecretName:` + fmt.Sprintf("%v", this.SecretName) + `,`, - `ShareName:` + fmt.Sprintf("%v", this.ShareName) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `}`, - }, "") - return s -} -func (this *Binding) String() string { - if this == nil { - return "nil" + n += 2 + if len(m.Options) > 0 { + for k, v := range m.Options { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } } - s := strings.Join([]string{`&Binding{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Target:` + strings.Replace(strings.Replace(this.Target.String(), "ObjectReference", "ObjectReference", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *CSIPersistentVolumeSource) String() string { - if this == nil { - return "nil" + +func (m *FlexVolumeSource) Size() (n int) { + if m == nil { + return 0 } - keysForVolumeAttributes := make([]string, 0, len(this.VolumeAttributes)) - for k := range this.VolumeAttributes { - keysForVolumeAttributes = append(keysForVolumeAttributes, k) + var l int + _ = l + l = len(m.Driver) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForVolumeAttributes) - mapStringForVolumeAttributes := "map[string]string{" - for _, k := range keysForVolumeAttributes { - mapStringForVolumeAttributes += fmt.Sprintf("%v: %v,", k, this.VolumeAttributes[k]) + n += 2 + if len(m.Options) > 0 { + for k, v := range m.Options { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } } - mapStringForVolumeAttributes += "}" - s := strings.Join([]string{`&CSIPersistentVolumeSource{`, - `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, - `VolumeHandle:` + fmt.Sprintf("%v", this.VolumeHandle) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `VolumeAttributes:` + mapStringForVolumeAttributes + `,`, - `ControllerPublishSecretRef:` + strings.Replace(fmt.Sprintf("%v", this.ControllerPublishSecretRef), "SecretReference", "SecretReference", 1) + `,`, - `NodeStageSecretRef:` + strings.Replace(fmt.Sprintf("%v", this.NodeStageSecretRef), "SecretReference", "SecretReference", 1) + `,`, - `NodePublishSecretRef:` + strings.Replace(fmt.Sprintf("%v", this.NodePublishSecretRef), "SecretReference", "SecretReference", 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *Capabilities) String() string { - if this == nil { - return "nil" + +func (m *FlockerVolumeSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&Capabilities{`, - `Add:` + fmt.Sprintf("%v", this.Add) + `,`, - `Drop:` + fmt.Sprintf("%v", this.Drop) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.DatasetName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DatasetUUID) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *CephFSPersistentVolumeSource) String() string { - if this == nil { - return "nil" + +func (m *GCEPersistentDiskVolumeSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&CephFSPersistentVolumeSource{`, - `Monitors:` + fmt.Sprintf("%v", this.Monitors) + `,`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `User:` + fmt.Sprintf("%v", this.User) + `,`, - `SecretFile:` + fmt.Sprintf("%v", this.SecretFile) + `,`, - `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "SecretReference", "SecretReference", 1) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.PDName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Partition)) + n += 2 + return n } -func (this *CephFSVolumeSource) String() string { - if this == nil { - return "nil" + +func (m *GitRepoVolumeSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&CephFSVolumeSource{`, - `Monitors:` + fmt.Sprintf("%v", this.Monitors) + `,`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `User:` + fmt.Sprintf("%v", this.User) + `,`, - `SecretFile:` + fmt.Sprintf("%v", this.SecretFile) + `,`, - `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "LocalObjectReference", "LocalObjectReference", 1) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.Repository) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Revision) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Directory) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *CinderPersistentVolumeSource) String() string { - if this == nil { - return "nil" + +func (m *GlusterfsPersistentVolumeSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&CinderPersistentVolumeSource{`, - `VolumeID:` + fmt.Sprintf("%v", this.VolumeID) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "SecretReference", "SecretReference", 1) + `,`, - `}`, - }, "") - return s -} -func (this *CinderVolumeSource) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = len(m.EndpointsName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if m.EndpointsNamespace != nil { + l = len(*m.EndpointsNamespace) + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&CinderVolumeSource{`, - `VolumeID:` + fmt.Sprintf("%v", this.VolumeID) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "LocalObjectReference", "LocalObjectReference", 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *ClientIPConfig) String() string { - if this == nil { - return "nil" + +func (m *GlusterfsVolumeSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ClientIPConfig{`, - `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.EndpointsName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n } -func (this *ComponentCondition) String() string { - if this == nil { - return "nil" + +func (m *HTTPGetAction) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ComponentCondition{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `Error:` + fmt.Sprintf("%v", this.Error) + `,`, - `}`, - }, "") - return s -} -func (this *ComponentStatus) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Port.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Host) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Scheme) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.HTTPHeaders) > 0 { + for _, e := range m.HTTPHeaders { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&ComponentStatus{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "ComponentCondition", "ComponentCondition", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *ComponentStatusList) String() string { - if this == nil { - return "nil" + +func (m *HTTPHeader) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ComponentStatusList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ComponentStatus", "ComponentStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Value) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *ConfigMap) String() string { - if this == nil { - return "nil" - } - keysForData := make([]string, 0, len(this.Data)) - for k := range this.Data { - keysForData = append(keysForData, k) + +func (m *Handler) Size() (n int) { + if m == nil { + return 0 } - github_com_gogo_protobuf_sortkeys.Strings(keysForData) - mapStringForData := "map[string]string{" - for _, k := range keysForData { - mapStringForData += fmt.Sprintf("%v: %v,", k, this.Data[k]) + var l int + _ = l + if m.Exec != nil { + l = m.Exec.Size() + n += 1 + l + sovGenerated(uint64(l)) } - mapStringForData += "}" - keysForBinaryData := make([]string, 0, len(this.BinaryData)) - for k := range this.BinaryData { - keysForBinaryData = append(keysForBinaryData, k) + if m.HTTPGet != nil { + l = m.HTTPGet.Size() + n += 1 + l + sovGenerated(uint64(l)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForBinaryData) - mapStringForBinaryData := "map[string][]byte{" - for _, k := range keysForBinaryData { - mapStringForBinaryData += fmt.Sprintf("%v: %v,", k, this.BinaryData[k]) + if m.TCPSocket != nil { + l = m.TCPSocket.Size() + n += 1 + l + sovGenerated(uint64(l)) } - mapStringForBinaryData += "}" - s := strings.Join([]string{`&ConfigMap{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Data:` + mapStringForData + `,`, - `BinaryData:` + mapStringForBinaryData + `,`, - `}`, - }, "") - return s + return n } -func (this *ConfigMapEnvSource) String() string { - if this == nil { - return "nil" + +func (m *HostAlias) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ConfigMapEnvSource{`, - `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, - `Optional:` + valueToStringGenerated(this.Optional) + `,`, - `}`, - }, "") - return s -} -func (this *ConfigMapKeySelector) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = len(m.IP) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Hostnames) > 0 { + for _, s := range m.Hostnames { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&ConfigMapKeySelector{`, - `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, - `Key:` + fmt.Sprintf("%v", this.Key) + `,`, - `Optional:` + valueToStringGenerated(this.Optional) + `,`, - `}`, - }, "") - return s + return n } -func (this *ConfigMapList) String() string { - if this == nil { - return "nil" + +func (m *HostPathVolumeSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ConfigMapList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ConfigMap", "ConfigMap", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ConfigMapNodeConfigSource) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + if m.Type != nil { + l = len(*m.Type) + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&ConfigMapNodeConfigSource{`, - `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `UID:` + fmt.Sprintf("%v", this.UID) + `,`, - `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`, - `KubeletConfigKey:` + fmt.Sprintf("%v", this.KubeletConfigKey) + `,`, - `}`, - }, "") - return s + return n } -func (this *ConfigMapProjection) String() string { - if this == nil { - return "nil" + +func (m *ISCSIPersistentVolumeSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ConfigMapProjection{`, - `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "KeyToPath", "KeyToPath", 1), `&`, ``, 1) + `,`, - `Optional:` + valueToStringGenerated(this.Optional) + `,`, - `}`, - }, "") - return s -} -func (this *ConfigMapVolumeSource) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = len(m.TargetPortal) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.IQN) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Lun)) + l = len(m.ISCSIInterface) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if len(m.Portals) > 0 { + for _, s := range m.Portals { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&ConfigMapVolumeSource{`, - `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "KeyToPath", "KeyToPath", 1), `&`, ``, 1) + `,`, - `DefaultMode:` + valueToStringGenerated(this.DefaultMode) + `,`, - `Optional:` + valueToStringGenerated(this.Optional) + `,`, - `}`, - }, "") - return s -} -func (this *Container) String() string { - if this == nil { - return "nil" + n += 2 + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&Container{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Image:` + fmt.Sprintf("%v", this.Image) + `,`, - `Command:` + fmt.Sprintf("%v", this.Command) + `,`, - `Args:` + fmt.Sprintf("%v", this.Args) + `,`, - `WorkingDir:` + fmt.Sprintf("%v", this.WorkingDir) + `,`, - `Ports:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ports), "ContainerPort", "ContainerPort", 1), `&`, ``, 1) + `,`, - `Env:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Env), "EnvVar", "EnvVar", 1), `&`, ``, 1) + `,`, - `Resources:` + strings.Replace(strings.Replace(this.Resources.String(), "ResourceRequirements", "ResourceRequirements", 1), `&`, ``, 1) + `,`, - `VolumeMounts:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.VolumeMounts), "VolumeMount", "VolumeMount", 1), `&`, ``, 1) + `,`, - `LivenessProbe:` + strings.Replace(fmt.Sprintf("%v", this.LivenessProbe), "Probe", "Probe", 1) + `,`, - `ReadinessProbe:` + strings.Replace(fmt.Sprintf("%v", this.ReadinessProbe), "Probe", "Probe", 1) + `,`, - `Lifecycle:` + strings.Replace(fmt.Sprintf("%v", this.Lifecycle), "Lifecycle", "Lifecycle", 1) + `,`, - `TerminationMessagePath:` + fmt.Sprintf("%v", this.TerminationMessagePath) + `,`, - `ImagePullPolicy:` + fmt.Sprintf("%v", this.ImagePullPolicy) + `,`, - `SecurityContext:` + strings.Replace(fmt.Sprintf("%v", this.SecurityContext), "SecurityContext", "SecurityContext", 1) + `,`, - `Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`, - `StdinOnce:` + fmt.Sprintf("%v", this.StdinOnce) + `,`, - `TTY:` + fmt.Sprintf("%v", this.TTY) + `,`, - `EnvFrom:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.EnvFrom), "EnvFromSource", "EnvFromSource", 1), `&`, ``, 1) + `,`, - `TerminationMessagePolicy:` + fmt.Sprintf("%v", this.TerminationMessagePolicy) + `,`, - `VolumeDevices:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.VolumeDevices), "VolumeDevice", "VolumeDevice", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ContainerImage) String() string { - if this == nil { - return "nil" + n += 2 + if m.InitiatorName != nil { + l = len(*m.InitiatorName) + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&ContainerImage{`, - `Names:` + fmt.Sprintf("%v", this.Names) + `,`, - `SizeBytes:` + fmt.Sprintf("%v", this.SizeBytes) + `,`, - `}`, - }, "") - return s + return n } -func (this *ContainerPort) String() string { - if this == nil { - return "nil" + +func (m *ISCSIVolumeSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ContainerPort{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `HostPort:` + fmt.Sprintf("%v", this.HostPort) + `,`, - `ContainerPort:` + fmt.Sprintf("%v", this.ContainerPort) + `,`, - `Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`, - `HostIP:` + fmt.Sprintf("%v", this.HostIP) + `,`, - `}`, - }, "") - return s -} -func (this *ContainerState) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = len(m.TargetPortal) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.IQN) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Lun)) + l = len(m.ISCSIInterface) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if len(m.Portals) > 0 { + for _, s := range m.Portals { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&ContainerState{`, - `Waiting:` + strings.Replace(fmt.Sprintf("%v", this.Waiting), "ContainerStateWaiting", "ContainerStateWaiting", 1) + `,`, - `Running:` + strings.Replace(fmt.Sprintf("%v", this.Running), "ContainerStateRunning", "ContainerStateRunning", 1) + `,`, - `Terminated:` + strings.Replace(fmt.Sprintf("%v", this.Terminated), "ContainerStateTerminated", "ContainerStateTerminated", 1) + `,`, - `}`, - }, "") - return s -} -func (this *ContainerStateRunning) String() string { - if this == nil { - return "nil" + n += 2 + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&ContainerStateRunning{`, - `StartedAt:` + strings.Replace(strings.Replace(this.StartedAt.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ContainerStateTerminated) String() string { - if this == nil { - return "nil" + n += 2 + if m.InitiatorName != nil { + l = len(*m.InitiatorName) + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&ContainerStateTerminated{`, - `ExitCode:` + fmt.Sprintf("%v", this.ExitCode) + `,`, - `Signal:` + fmt.Sprintf("%v", this.Signal) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `StartedAt:` + strings.Replace(strings.Replace(this.StartedAt.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `FinishedAt:` + strings.Replace(strings.Replace(this.FinishedAt.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, - `}`, - }, "") - return s + return n } -func (this *ContainerStateWaiting) String() string { - if this == nil { - return "nil" + +func (m *KeyToPath) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ContainerStateWaiting{`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `}`, - }, "") - return s -} -func (this *ContainerStatus) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + if m.Mode != nil { + n += 1 + sovGenerated(uint64(*m.Mode)) } - s := strings.Join([]string{`&ContainerStatus{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `State:` + strings.Replace(strings.Replace(this.State.String(), "ContainerState", "ContainerState", 1), `&`, ``, 1) + `,`, - `LastTerminationState:` + strings.Replace(strings.Replace(this.LastTerminationState.String(), "ContainerState", "ContainerState", 1), `&`, ``, 1) + `,`, - `Ready:` + fmt.Sprintf("%v", this.Ready) + `,`, - `RestartCount:` + fmt.Sprintf("%v", this.RestartCount) + `,`, - `Image:` + fmt.Sprintf("%v", this.Image) + `,`, - `ImageID:` + fmt.Sprintf("%v", this.ImageID) + `,`, - `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, - `}`, - }, "") - return s + return n } -func (this *DaemonEndpoint) String() string { - if this == nil { - return "nil" + +func (m *Lifecycle) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&DaemonEndpoint{`, - `Port:` + fmt.Sprintf("%v", this.Port) + `,`, - `}`, - }, "") - return s -} -func (this *DownwardAPIProjection) String() string { - if this == nil { - return "nil" + var l int + _ = l + if m.PostStart != nil { + l = m.PostStart.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&DownwardAPIProjection{`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "DownwardAPIVolumeFile", "DownwardAPIVolumeFile", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *DownwardAPIVolumeFile) String() string { - if this == nil { - return "nil" + if m.PreStop != nil { + l = m.PreStop.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&DownwardAPIVolumeFile{`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `FieldRef:` + strings.Replace(fmt.Sprintf("%v", this.FieldRef), "ObjectFieldSelector", "ObjectFieldSelector", 1) + `,`, - `ResourceFieldRef:` + strings.Replace(fmt.Sprintf("%v", this.ResourceFieldRef), "ResourceFieldSelector", "ResourceFieldSelector", 1) + `,`, - `Mode:` + valueToStringGenerated(this.Mode) + `,`, - `}`, - }, "") - return s + return n } -func (this *DownwardAPIVolumeSource) String() string { - if this == nil { - return "nil" + +func (m *LimitRange) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&DownwardAPIVolumeSource{`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "DownwardAPIVolumeFile", "DownwardAPIVolumeFile", 1), `&`, ``, 1) + `,`, - `DefaultMode:` + valueToStringGenerated(this.DefaultMode) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *EmptyDirVolumeSource) String() string { - if this == nil { - return "nil" + +func (m *LimitRangeItem) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&EmptyDirVolumeSource{`, - `Medium:` + fmt.Sprintf("%v", this.Medium) + `,`, - `SizeLimit:` + strings.Replace(fmt.Sprintf("%v", this.SizeLimit), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, - `}`, - }, "") - return s -} -func (this *EndpointAddress) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Max) > 0 { + for k, v := range m.Max { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } } - s := strings.Join([]string{`&EndpointAddress{`, - `IP:` + fmt.Sprintf("%v", this.IP) + `,`, - `TargetRef:` + strings.Replace(fmt.Sprintf("%v", this.TargetRef), "ObjectReference", "ObjectReference", 1) + `,`, - `Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`, - `NodeName:` + valueToStringGenerated(this.NodeName) + `,`, - `}`, - }, "") - return s -} -func (this *EndpointPort) String() string { - if this == nil { - return "nil" + if len(m.Min) > 0 { + for k, v := range m.Min { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } } - s := strings.Join([]string{`&EndpointPort{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Port:` + fmt.Sprintf("%v", this.Port) + `,`, - `Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`, - `}`, - }, "") - return s -} -func (this *EndpointSubset) String() string { - if this == nil { - return "nil" + if len(m.Default) > 0 { + for k, v := range m.Default { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } } - s := strings.Join([]string{`&EndpointSubset{`, - `Addresses:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Addresses), "EndpointAddress", "EndpointAddress", 1), `&`, ``, 1) + `,`, - `NotReadyAddresses:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.NotReadyAddresses), "EndpointAddress", "EndpointAddress", 1), `&`, ``, 1) + `,`, - `Ports:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ports), "EndpointPort", "EndpointPort", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *Endpoints) String() string { - if this == nil { - return "nil" + if len(m.DefaultRequest) > 0 { + for k, v := range m.DefaultRequest { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } } - s := strings.Join([]string{`&Endpoints{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Subsets:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Subsets), "EndpointSubset", "EndpointSubset", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *EndpointsList) String() string { - if this == nil { - return "nil" + if len(m.MaxLimitRequestRatio) > 0 { + for k, v := range m.MaxLimitRequestRatio { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } } - s := strings.Join([]string{`&EndpointsList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Endpoints", "Endpoints", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *EnvFromSource) String() string { - if this == nil { - return "nil" + +func (m *LimitRangeList) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&EnvFromSource{`, - `Prefix:` + fmt.Sprintf("%v", this.Prefix) + `,`, - `ConfigMapRef:` + strings.Replace(fmt.Sprintf("%v", this.ConfigMapRef), "ConfigMapEnvSource", "ConfigMapEnvSource", 1) + `,`, - `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "SecretEnvSource", "SecretEnvSource", 1) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n } -func (this *EnvVar) String() string { - if this == nil { - return "nil" + +func (m *LimitRangeSpec) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&EnvVar{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Value:` + fmt.Sprintf("%v", this.Value) + `,`, - `ValueFrom:` + strings.Replace(fmt.Sprintf("%v", this.ValueFrom), "EnvVarSource", "EnvVarSource", 1) + `,`, - `}`, - }, "") - return s + var l int + _ = l + if len(m.Limits) > 0 { + for _, e := range m.Limits { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n } -func (this *EnvVarSource) String() string { - if this == nil { - return "nil" + +func (m *List) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&EnvVarSource{`, - `FieldRef:` + strings.Replace(fmt.Sprintf("%v", this.FieldRef), "ObjectFieldSelector", "ObjectFieldSelector", 1) + `,`, - `ResourceFieldRef:` + strings.Replace(fmt.Sprintf("%v", this.ResourceFieldRef), "ResourceFieldSelector", "ResourceFieldSelector", 1) + `,`, - `ConfigMapKeyRef:` + strings.Replace(fmt.Sprintf("%v", this.ConfigMapKeyRef), "ConfigMapKeySelector", "ConfigMapKeySelector", 1) + `,`, - `SecretKeyRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretKeyRef), "SecretKeySelector", "SecretKeySelector", 1) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n } -func (this *Event) String() string { - if this == nil { - return "nil" + +func (m *LoadBalancerIngress) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&Event{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `InvolvedObject:` + strings.Replace(strings.Replace(this.InvolvedObject.String(), "ObjectReference", "ObjectReference", 1), `&`, ``, 1) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `Source:` + strings.Replace(strings.Replace(this.Source.String(), "EventSource", "EventSource", 1), `&`, ``, 1) + `,`, - `FirstTimestamp:` + strings.Replace(strings.Replace(this.FirstTimestamp.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `LastTimestamp:` + strings.Replace(strings.Replace(this.LastTimestamp.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `Count:` + fmt.Sprintf("%v", this.Count) + `,`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `EventTime:` + strings.Replace(strings.Replace(this.EventTime.String(), "MicroTime", "k8s_io_apimachinery_pkg_apis_meta_v1.MicroTime", 1), `&`, ``, 1) + `,`, - `Series:` + strings.Replace(fmt.Sprintf("%v", this.Series), "EventSeries", "EventSeries", 1) + `,`, - `Action:` + fmt.Sprintf("%v", this.Action) + `,`, - `Related:` + strings.Replace(fmt.Sprintf("%v", this.Related), "ObjectReference", "ObjectReference", 1) + `,`, - `ReportingController:` + fmt.Sprintf("%v", this.ReportingController) + `,`, - `ReportingInstance:` + fmt.Sprintf("%v", this.ReportingInstance) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.IP) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Hostname) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *EventList) String() string { - if this == nil { - return "nil" + +func (m *LoadBalancerStatus) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&EventList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Event", "Event", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + var l int + _ = l + if len(m.Ingress) > 0 { + for _, e := range m.Ingress { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n } -func (this *EventSeries) String() string { - if this == nil { - return "nil" + +func (m *LocalObjectReference) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&EventSeries{`, - `Count:` + fmt.Sprintf("%v", this.Count) + `,`, - `LastObservedTime:` + strings.Replace(strings.Replace(this.LastObservedTime.String(), "MicroTime", "k8s_io_apimachinery_pkg_apis_meta_v1.MicroTime", 1), `&`, ``, 1) + `,`, - `State:` + fmt.Sprintf("%v", this.State) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *EventSource) String() string { - if this == nil { - return "nil" + +func (m *LocalVolumeSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&EventSource{`, - `Component:` + fmt.Sprintf("%v", this.Component) + `,`, - `Host:` + fmt.Sprintf("%v", this.Host) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + if m.FSType != nil { + l = len(*m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + } + return n } -func (this *ExecAction) String() string { - if this == nil { - return "nil" + +func (m *NFSVolumeSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ExecAction{`, - `Command:` + fmt.Sprintf("%v", this.Command) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.Server) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n } -func (this *FCVolumeSource) String() string { - if this == nil { - return "nil" + +func (m *Namespace) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&FCVolumeSource{`, - `TargetWWNs:` + fmt.Sprintf("%v", this.TargetWWNs) + `,`, - `Lun:` + valueToStringGenerated(this.Lun) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `WWIDs:` + fmt.Sprintf("%v", this.WWIDs) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *FlexPersistentVolumeSource) String() string { - if this == nil { - return "nil" + +func (m *NamespaceCondition) Size() (n int) { + if m == nil { + return 0 } - keysForOptions := make([]string, 0, len(this.Options)) - for k := range this.Options { - keysForOptions = append(keysForOptions, k) + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *NamespaceList) Size() (n int) { + if m == nil { + return 0 } - github_com_gogo_protobuf_sortkeys.Strings(keysForOptions) - mapStringForOptions := "map[string]string{" - for _, k := range keysForOptions { - mapStringForOptions += fmt.Sprintf("%v: %v,", k, this.Options[k]) + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - mapStringForOptions += "}" - s := strings.Join([]string{`&FlexPersistentVolumeSource{`, - `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "SecretReference", "SecretReference", 1) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `Options:` + mapStringForOptions + `,`, - `}`, - }, "") - return s + return n } -func (this *FlexVolumeSource) String() string { - if this == nil { - return "nil" - } - keysForOptions := make([]string, 0, len(this.Options)) - for k := range this.Options { - keysForOptions = append(keysForOptions, k) + +func (m *NamespaceSpec) Size() (n int) { + if m == nil { + return 0 } - github_com_gogo_protobuf_sortkeys.Strings(keysForOptions) - mapStringForOptions := "map[string]string{" - for _, k := range keysForOptions { - mapStringForOptions += fmt.Sprintf("%v: %v,", k, this.Options[k]) + var l int + _ = l + if len(m.Finalizers) > 0 { + for _, s := range m.Finalizers { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - mapStringForOptions += "}" - s := strings.Join([]string{`&FlexVolumeSource{`, - `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "LocalObjectReference", "LocalObjectReference", 1) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `Options:` + mapStringForOptions + `,`, - `}`, - }, "") - return s + return n } -func (this *FlockerVolumeSource) String() string { - if this == nil { - return "nil" + +func (m *NamespaceStatus) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&FlockerVolumeSource{`, - `DatasetName:` + fmt.Sprintf("%v", this.DatasetName) + `,`, - `DatasetUUID:` + fmt.Sprintf("%v", this.DatasetUUID) + `,`, - `}`, - }, "") - return s -} -func (this *GCEPersistentDiskVolumeSource) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = len(m.Phase) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&GCEPersistentDiskVolumeSource{`, - `PDName:` + fmt.Sprintf("%v", this.PDName) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `Partition:` + fmt.Sprintf("%v", this.Partition) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `}`, - }, "") - return s + return n } -func (this *GitRepoVolumeSource) String() string { - if this == nil { - return "nil" + +func (m *Node) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&GitRepoVolumeSource{`, - `Repository:` + fmt.Sprintf("%v", this.Repository) + `,`, - `Revision:` + fmt.Sprintf("%v", this.Revision) + `,`, - `Directory:` + fmt.Sprintf("%v", this.Directory) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *GlusterfsVolumeSource) String() string { - if this == nil { - return "nil" + +func (m *NodeAddress) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&GlusterfsVolumeSource{`, - `EndpointsName:` + fmt.Sprintf("%v", this.EndpointsName) + `,`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Address) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *HTTPGetAction) String() string { - if this == nil { - return "nil" + +func (m *NodeAffinity) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&HTTPGetAction{`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `Port:` + strings.Replace(strings.Replace(this.Port.String(), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1), `&`, ``, 1) + `,`, - `Host:` + fmt.Sprintf("%v", this.Host) + `,`, - `Scheme:` + fmt.Sprintf("%v", this.Scheme) + `,`, - `HTTPHeaders:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.HTTPHeaders), "HTTPHeader", "HTTPHeader", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *HTTPHeader) String() string { - if this == nil { - return "nil" + var l int + _ = l + if m.RequiredDuringSchedulingIgnoredDuringExecution != nil { + l = m.RequiredDuringSchedulingIgnoredDuringExecution.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&HTTPHeader{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Value:` + fmt.Sprintf("%v", this.Value) + `,`, - `}`, - }, "") - return s -} -func (this *Handler) String() string { - if this == nil { - return "nil" + if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { + for _, e := range m.PreferredDuringSchedulingIgnoredDuringExecution { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&Handler{`, - `Exec:` + strings.Replace(fmt.Sprintf("%v", this.Exec), "ExecAction", "ExecAction", 1) + `,`, - `HTTPGet:` + strings.Replace(fmt.Sprintf("%v", this.HTTPGet), "HTTPGetAction", "HTTPGetAction", 1) + `,`, - `TCPSocket:` + strings.Replace(fmt.Sprintf("%v", this.TCPSocket), "TCPSocketAction", "TCPSocketAction", 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *HostAlias) String() string { - if this == nil { - return "nil" + +func (m *NodeCondition) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&HostAlias{`, - `IP:` + fmt.Sprintf("%v", this.IP) + `,`, - `Hostnames:` + fmt.Sprintf("%v", this.Hostnames) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastHeartbeatTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *HostPathVolumeSource) String() string { - if this == nil { - return "nil" + +func (m *NodeConfigSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&HostPathVolumeSource{`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `Type:` + valueToStringGenerated(this.Type) + `,`, - `}`, - }, "") - return s + var l int + _ = l + if m.ConfigMap != nil { + l = m.ConfigMap.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n } -func (this *ISCSIPersistentVolumeSource) String() string { - if this == nil { - return "nil" + +func (m *NodeConfigStatus) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ISCSIPersistentVolumeSource{`, - `TargetPortal:` + fmt.Sprintf("%v", this.TargetPortal) + `,`, - `IQN:` + fmt.Sprintf("%v", this.IQN) + `,`, - `Lun:` + fmt.Sprintf("%v", this.Lun) + `,`, - `ISCSIInterface:` + fmt.Sprintf("%v", this.ISCSIInterface) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `Portals:` + fmt.Sprintf("%v", this.Portals) + `,`, - `DiscoveryCHAPAuth:` + fmt.Sprintf("%v", this.DiscoveryCHAPAuth) + `,`, - `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "SecretReference", "SecretReference", 1) + `,`, - `SessionCHAPAuth:` + fmt.Sprintf("%v", this.SessionCHAPAuth) + `,`, - `InitiatorName:` + valueToStringGenerated(this.InitiatorName) + `,`, - `}`, - }, "") - return s + var l int + _ = l + if m.Assigned != nil { + l = m.Assigned.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Active != nil { + l = m.Active.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.LastKnownGood != nil { + l = m.LastKnownGood.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Error) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *ISCSIVolumeSource) String() string { - if this == nil { - return "nil" + +func (m *NodeDaemonEndpoints) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ISCSIVolumeSource{`, - `TargetPortal:` + fmt.Sprintf("%v", this.TargetPortal) + `,`, - `IQN:` + fmt.Sprintf("%v", this.IQN) + `,`, - `Lun:` + fmt.Sprintf("%v", this.Lun) + `,`, - `ISCSIInterface:` + fmt.Sprintf("%v", this.ISCSIInterface) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `Portals:` + fmt.Sprintf("%v", this.Portals) + `,`, - `DiscoveryCHAPAuth:` + fmt.Sprintf("%v", this.DiscoveryCHAPAuth) + `,`, - `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "LocalObjectReference", "LocalObjectReference", 1) + `,`, - `SessionCHAPAuth:` + fmt.Sprintf("%v", this.SessionCHAPAuth) + `,`, - `InitiatorName:` + valueToStringGenerated(this.InitiatorName) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = m.KubeletEndpoint.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *KeyToPath) String() string { - if this == nil { - return "nil" + +func (m *NodeList) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&KeyToPath{`, - `Key:` + fmt.Sprintf("%v", this.Key) + `,`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `Mode:` + valueToStringGenerated(this.Mode) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n } -func (this *Lifecycle) String() string { - if this == nil { - return "nil" + +func (m *NodeProxyOptions) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&Lifecycle{`, - `PostStart:` + strings.Replace(fmt.Sprintf("%v", this.PostStart), "Handler", "Handler", 1) + `,`, - `PreStop:` + strings.Replace(fmt.Sprintf("%v", this.PreStop), "Handler", "Handler", 1) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *LimitRange) String() string { - if this == nil { - return "nil" + +func (m *NodeResources) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&LimitRange{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "LimitRangeSpec", "LimitRangeSpec", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + var l int + _ = l + if len(m.Capacity) > 0 { + for k, v := range m.Capacity { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n } -func (this *LimitRangeItem) String() string { - if this == nil { - return "nil" + +func (m *NodeSelector) Size() (n int) { + if m == nil { + return 0 } - keysForMax := make([]string, 0, len(this.Max)) - for k := range this.Max { - keysForMax = append(keysForMax, string(k)) + var l int + _ = l + if len(m.NodeSelectorTerms) > 0 { + for _, e := range m.NodeSelectorTerms { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - github_com_gogo_protobuf_sortkeys.Strings(keysForMax) - mapStringForMax := "ResourceList{" - for _, k := range keysForMax { - mapStringForMax += fmt.Sprintf("%v: %v,", k, this.Max[ResourceName(k)]) + return n +} + +func (m *NodeSelectorRequirement) Size() (n int) { + if m == nil { + return 0 } - mapStringForMax += "}" - keysForMin := make([]string, 0, len(this.Min)) - for k := range this.Min { - keysForMin = append(keysForMin, string(k)) + var l int + _ = l + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Operator) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Values) > 0 { + for _, s := range m.Values { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - github_com_gogo_protobuf_sortkeys.Strings(keysForMin) - mapStringForMin := "ResourceList{" - for _, k := range keysForMin { - mapStringForMin += fmt.Sprintf("%v: %v,", k, this.Min[ResourceName(k)]) + return n +} + +func (m *NodeSelectorTerm) Size() (n int) { + if m == nil { + return 0 } - mapStringForMin += "}" - keysForDefault := make([]string, 0, len(this.Default)) - for k := range this.Default { - keysForDefault = append(keysForDefault, string(k)) + var l int + _ = l + if len(m.MatchExpressions) > 0 { + for _, e := range m.MatchExpressions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - github_com_gogo_protobuf_sortkeys.Strings(keysForDefault) - mapStringForDefault := "ResourceList{" - for _, k := range keysForDefault { - mapStringForDefault += fmt.Sprintf("%v: %v,", k, this.Default[ResourceName(k)]) + if len(m.MatchFields) > 0 { + for _, e := range m.MatchFields { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - mapStringForDefault += "}" - keysForDefaultRequest := make([]string, 0, len(this.DefaultRequest)) - for k := range this.DefaultRequest { - keysForDefaultRequest = append(keysForDefaultRequest, string(k)) + return n +} + +func (m *NodeSpec) Size() (n int) { + if m == nil { + return 0 } - github_com_gogo_protobuf_sortkeys.Strings(keysForDefaultRequest) - mapStringForDefaultRequest := "ResourceList{" - for _, k := range keysForDefaultRequest { - mapStringForDefaultRequest += fmt.Sprintf("%v: %v,", k, this.DefaultRequest[ResourceName(k)]) + var l int + _ = l + l = len(m.PodCIDR) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DoNotUseExternalID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ProviderID) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if len(m.Taints) > 0 { + for _, e := range m.Taints { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - mapStringForDefaultRequest += "}" - keysForMaxLimitRequestRatio := make([]string, 0, len(this.MaxLimitRequestRatio)) - for k := range this.MaxLimitRequestRatio { - keysForMaxLimitRequestRatio = append(keysForMaxLimitRequestRatio, string(k)) + if m.ConfigSource != nil { + l = m.ConfigSource.Size() + n += 1 + l + sovGenerated(uint64(l)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForMaxLimitRequestRatio) - mapStringForMaxLimitRequestRatio := "ResourceList{" - for _, k := range keysForMaxLimitRequestRatio { - mapStringForMaxLimitRequestRatio += fmt.Sprintf("%v: %v,", k, this.MaxLimitRequestRatio[ResourceName(k)]) + if len(m.PodCIDRs) > 0 { + for _, s := range m.PodCIDRs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - mapStringForMaxLimitRequestRatio += "}" - s := strings.Join([]string{`&LimitRangeItem{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Max:` + mapStringForMax + `,`, - `Min:` + mapStringForMin + `,`, - `Default:` + mapStringForDefault + `,`, - `DefaultRequest:` + mapStringForDefaultRequest + `,`, - `MaxLimitRequestRatio:` + mapStringForMaxLimitRequestRatio + `,`, - `}`, - }, "") - return s + return n } -func (this *LimitRangeList) String() string { - if this == nil { - return "nil" + +func (m *NodeStatus) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&LimitRangeList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "LimitRange", "LimitRange", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *LimitRangeSpec) String() string { - if this == nil { - return "nil" + var l int + _ = l + if len(m.Capacity) > 0 { + for k, v := range m.Capacity { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } } - s := strings.Join([]string{`&LimitRangeSpec{`, - `Limits:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Limits), "LimitRangeItem", "LimitRangeItem", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *List) String() string { - if this == nil { - return "nil" + if len(m.Allocatable) > 0 { + for k, v := range m.Allocatable { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } } - s := strings.Join([]string{`&List{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "RawExtension", "k8s_io_apimachinery_pkg_runtime.RawExtension", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *LoadBalancerIngress) String() string { - if this == nil { - return "nil" + l = len(m.Phase) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&LoadBalancerIngress{`, - `IP:` + fmt.Sprintf("%v", this.IP) + `,`, - `Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`, - `}`, - }, "") - return s -} -func (this *LoadBalancerStatus) String() string { - if this == nil { - return "nil" + if len(m.Addresses) > 0 { + for _, e := range m.Addresses { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&LoadBalancerStatus{`, - `Ingress:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ingress), "LoadBalancerIngress", "LoadBalancerIngress", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *LocalObjectReference) String() string { - if this == nil { - return "nil" + l = m.DaemonEndpoints.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.NodeInfo.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Images) > 0 { + for _, e := range m.Images { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&LocalObjectReference{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `}`, - }, "") - return s -} -func (this *LocalVolumeSource) String() string { - if this == nil { - return "nil" + if len(m.VolumesInUse) > 0 { + for _, s := range m.VolumesInUse { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&LocalVolumeSource{`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `FSType:` + valueToStringGenerated(this.FSType) + `,`, - `}`, - }, "") - return s + if len(m.VolumesAttached) > 0 { + for _, e := range m.VolumesAttached { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Config != nil { + l = m.Config.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n } -func (this *NFSVolumeSource) String() string { - if this == nil { - return "nil" + +func (m *NodeSystemInfo) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&NFSVolumeSource{`, - `Server:` + fmt.Sprintf("%v", this.Server) + `,`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.MachineID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.SystemUUID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.BootID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.KernelVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.OSImage) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ContainerRuntimeVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.KubeletVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.KubeProxyVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.OperatingSystem) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Architecture) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *Namespace) String() string { - if this == nil { - return "nil" + +func (m *ObjectFieldSelector) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&Namespace{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "NamespaceSpec", "NamespaceSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "NamespaceStatus", "NamespaceStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.APIVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FieldPath) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *NamespaceList) String() string { - if this == nil { - return "nil" + +func (m *ObjectReference) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&NamespaceList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Namespace", "Namespace", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.APIVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ResourceVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FieldPath) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *NamespaceSpec) String() string { - if this == nil { - return "nil" + +func (m *PersistentVolume) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&NamespaceSpec{`, - `Finalizers:` + fmt.Sprintf("%v", this.Finalizers) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *NamespaceStatus) String() string { - if this == nil { - return "nil" + +func (m *PersistentVolumeClaim) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&NamespaceStatus{`, - `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *Node) String() string { - if this == nil { - return "nil" + +func (m *PersistentVolumeClaimCondition) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&Node{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "NodeSpec", "NodeSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "NodeStatus", "NodeStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastProbeTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *NodeAddress) String() string { - if this == nil { - return "nil" + +func (m *PersistentVolumeClaimList) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&NodeAddress{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Address:` + fmt.Sprintf("%v", this.Address) + `,`, - `}`, - }, "") - return s -} -func (this *NodeAffinity) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&NodeAffinity{`, - `RequiredDuringSchedulingIgnoredDuringExecution:` + strings.Replace(fmt.Sprintf("%v", this.RequiredDuringSchedulingIgnoredDuringExecution), "NodeSelector", "NodeSelector", 1) + `,`, - `PreferredDuringSchedulingIgnoredDuringExecution:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.PreferredDuringSchedulingIgnoredDuringExecution), "PreferredSchedulingTerm", "PreferredSchedulingTerm", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *NodeCondition) String() string { - if this == nil { - return "nil" + +func (m *PersistentVolumeClaimSpec) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&NodeCondition{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastHeartbeatTime:` + strings.Replace(strings.Replace(this.LastHeartbeatTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `}`, - }, "") - return s -} -func (this *NodeConfigSource) String() string { - if this == nil { - return "nil" + var l int + _ = l + if len(m.AccessModes) > 0 { + for _, s := range m.AccessModes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&NodeConfigSource{`, - `ConfigMap:` + strings.Replace(fmt.Sprintf("%v", this.ConfigMap), "ConfigMapNodeConfigSource", "ConfigMapNodeConfigSource", 1) + `,`, - `}`, - }, "") - return s -} -func (this *NodeConfigStatus) String() string { - if this == nil { - return "nil" + l = m.Resources.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.VolumeName) + n += 1 + l + sovGenerated(uint64(l)) + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&NodeConfigStatus{`, - `Assigned:` + strings.Replace(fmt.Sprintf("%v", this.Assigned), "NodeConfigSource", "NodeConfigSource", 1) + `,`, - `Active:` + strings.Replace(fmt.Sprintf("%v", this.Active), "NodeConfigSource", "NodeConfigSource", 1) + `,`, - `LastKnownGood:` + strings.Replace(fmt.Sprintf("%v", this.LastKnownGood), "NodeConfigSource", "NodeConfigSource", 1) + `,`, - `Error:` + fmt.Sprintf("%v", this.Error) + `,`, - `}`, - }, "") - return s -} -func (this *NodeDaemonEndpoints) String() string { - if this == nil { - return "nil" + if m.StorageClassName != nil { + l = len(*m.StorageClassName) + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&NodeDaemonEndpoints{`, - `KubeletEndpoint:` + strings.Replace(strings.Replace(this.KubeletEndpoint.String(), "DaemonEndpoint", "DaemonEndpoint", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *NodeList) String() string { - if this == nil { - return "nil" + if m.VolumeMode != nil { + l = len(*m.VolumeMode) + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&NodeList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Node", "Node", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *NodeProxyOptions) String() string { - if this == nil { - return "nil" + if m.DataSource != nil { + l = m.DataSource.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&NodeProxyOptions{`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `}`, - }, "") - return s + return n } -func (this *NodeResources) String() string { - if this == nil { - return "nil" + +func (m *PersistentVolumeClaimStatus) Size() (n int) { + if m == nil { + return 0 } - keysForCapacity := make([]string, 0, len(this.Capacity)) - for k := range this.Capacity { - keysForCapacity = append(keysForCapacity, string(k)) + var l int + _ = l + l = len(m.Phase) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.AccessModes) > 0 { + for _, s := range m.AccessModes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) - mapStringForCapacity := "ResourceList{" - for _, k := range keysForCapacity { - mapStringForCapacity += fmt.Sprintf("%v: %v,", k, this.Capacity[ResourceName(k)]) + if len(m.Capacity) > 0 { + for k, v := range m.Capacity { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } } - mapStringForCapacity += "}" - s := strings.Join([]string{`&NodeResources{`, - `Capacity:` + mapStringForCapacity + `,`, - `}`, - }, "") - return s -} -func (this *NodeSelector) String() string { - if this == nil { - return "nil" + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&NodeSelector{`, - `NodeSelectorTerms:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.NodeSelectorTerms), "NodeSelectorTerm", "NodeSelectorTerm", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *NodeSelectorRequirement) String() string { - if this == nil { - return "nil" + +func (m *PersistentVolumeClaimVolumeSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&NodeSelectorRequirement{`, - `Key:` + fmt.Sprintf("%v", this.Key) + `,`, - `Operator:` + fmt.Sprintf("%v", this.Operator) + `,`, - `Values:` + fmt.Sprintf("%v", this.Values) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.ClaimName) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n } -func (this *NodeSelectorTerm) String() string { - if this == nil { - return "nil" + +func (m *PersistentVolumeList) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&NodeSelectorTerm{`, - `MatchExpressions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.MatchExpressions), "NodeSelectorRequirement", "NodeSelectorRequirement", 1), `&`, ``, 1) + `,`, - `MatchFields:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.MatchFields), "NodeSelectorRequirement", "NodeSelectorRequirement", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *NodeSpec) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&NodeSpec{`, - `PodCIDR:` + fmt.Sprintf("%v", this.PodCIDR) + `,`, - `DoNotUse_ExternalID:` + fmt.Sprintf("%v", this.DoNotUse_ExternalID) + `,`, - `ProviderID:` + fmt.Sprintf("%v", this.ProviderID) + `,`, - `Unschedulable:` + fmt.Sprintf("%v", this.Unschedulable) + `,`, - `Taints:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Taints), "Taint", "Taint", 1), `&`, ``, 1) + `,`, - `ConfigSource:` + strings.Replace(fmt.Sprintf("%v", this.ConfigSource), "NodeConfigSource", "NodeConfigSource", 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *NodeStatus) String() string { - if this == nil { - return "nil" + +func (m *PersistentVolumeSource) Size() (n int) { + if m == nil { + return 0 } - keysForCapacity := make([]string, 0, len(this.Capacity)) - for k := range this.Capacity { - keysForCapacity = append(keysForCapacity, string(k)) + var l int + _ = l + if m.GCEPersistentDisk != nil { + l = m.GCEPersistentDisk.Size() + n += 1 + l + sovGenerated(uint64(l)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) - mapStringForCapacity := "ResourceList{" - for _, k := range keysForCapacity { - mapStringForCapacity += fmt.Sprintf("%v: %v,", k, this.Capacity[ResourceName(k)]) + if m.AWSElasticBlockStore != nil { + l = m.AWSElasticBlockStore.Size() + n += 1 + l + sovGenerated(uint64(l)) } - mapStringForCapacity += "}" - keysForAllocatable := make([]string, 0, len(this.Allocatable)) - for k := range this.Allocatable { - keysForAllocatable = append(keysForAllocatable, string(k)) + if m.HostPath != nil { + l = m.HostPath.Size() + n += 1 + l + sovGenerated(uint64(l)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForAllocatable) - mapStringForAllocatable := "ResourceList{" - for _, k := range keysForAllocatable { - mapStringForAllocatable += fmt.Sprintf("%v: %v,", k, this.Allocatable[ResourceName(k)]) + if m.Glusterfs != nil { + l = m.Glusterfs.Size() + n += 1 + l + sovGenerated(uint64(l)) } - mapStringForAllocatable += "}" - s := strings.Join([]string{`&NodeStatus{`, - `Capacity:` + mapStringForCapacity + `,`, - `Allocatable:` + mapStringForAllocatable + `,`, - `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "NodeCondition", "NodeCondition", 1), `&`, ``, 1) + `,`, - `Addresses:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Addresses), "NodeAddress", "NodeAddress", 1), `&`, ``, 1) + `,`, - `DaemonEndpoints:` + strings.Replace(strings.Replace(this.DaemonEndpoints.String(), "NodeDaemonEndpoints", "NodeDaemonEndpoints", 1), `&`, ``, 1) + `,`, - `NodeInfo:` + strings.Replace(strings.Replace(this.NodeInfo.String(), "NodeSystemInfo", "NodeSystemInfo", 1), `&`, ``, 1) + `,`, - `Images:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Images), "ContainerImage", "ContainerImage", 1), `&`, ``, 1) + `,`, - `VolumesInUse:` + fmt.Sprintf("%v", this.VolumesInUse) + `,`, - `VolumesAttached:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.VolumesAttached), "AttachedVolume", "AttachedVolume", 1), `&`, ``, 1) + `,`, - `Config:` + strings.Replace(fmt.Sprintf("%v", this.Config), "NodeConfigStatus", "NodeConfigStatus", 1) + `,`, - `}`, - }, "") - return s -} -func (this *NodeSystemInfo) String() string { - if this == nil { - return "nil" + if m.NFS != nil { + l = m.NFS.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&NodeSystemInfo{`, - `MachineID:` + fmt.Sprintf("%v", this.MachineID) + `,`, - `SystemUUID:` + fmt.Sprintf("%v", this.SystemUUID) + `,`, - `BootID:` + fmt.Sprintf("%v", this.BootID) + `,`, - `KernelVersion:` + fmt.Sprintf("%v", this.KernelVersion) + `,`, - `OSImage:` + fmt.Sprintf("%v", this.OSImage) + `,`, - `ContainerRuntimeVersion:` + fmt.Sprintf("%v", this.ContainerRuntimeVersion) + `,`, - `KubeletVersion:` + fmt.Sprintf("%v", this.KubeletVersion) + `,`, - `KubeProxyVersion:` + fmt.Sprintf("%v", this.KubeProxyVersion) + `,`, - `OperatingSystem:` + fmt.Sprintf("%v", this.OperatingSystem) + `,`, - `Architecture:` + fmt.Sprintf("%v", this.Architecture) + `,`, - `}`, - }, "") - return s -} -func (this *ObjectFieldSelector) String() string { - if this == nil { - return "nil" + if m.RBD != nil { + l = m.RBD.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&ObjectFieldSelector{`, - `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, - `FieldPath:` + fmt.Sprintf("%v", this.FieldPath) + `,`, - `}`, - }, "") - return s -} -func (this *ObjectReference) String() string { - if this == nil { - return "nil" + if m.ISCSI != nil { + l = m.ISCSI.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&ObjectReference{`, - `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, - `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `UID:` + fmt.Sprintf("%v", this.UID) + `,`, - `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, - `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`, - `FieldPath:` + fmt.Sprintf("%v", this.FieldPath) + `,`, - `}`, - }, "") - return s -} -func (this *PersistentVolume) String() string { - if this == nil { - return "nil" + if m.Cinder != nil { + l = m.Cinder.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&PersistentVolume{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PersistentVolumeSpec", "PersistentVolumeSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PersistentVolumeStatus", "PersistentVolumeStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PersistentVolumeClaim) String() string { - if this == nil { - return "nil" + if m.CephFS != nil { + l = m.CephFS.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&PersistentVolumeClaim{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PersistentVolumeClaimSpec", "PersistentVolumeClaimSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PersistentVolumeClaimStatus", "PersistentVolumeClaimStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PersistentVolumeClaimCondition) String() string { - if this == nil { - return "nil" + if m.FC != nil { + l = m.FC.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&PersistentVolumeClaimCondition{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastProbeTime:` + strings.Replace(strings.Replace(this.LastProbeTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `}`, - }, "") - return s -} -func (this *PersistentVolumeClaimList) String() string { - if this == nil { - return "nil" + if m.Flocker != nil { + l = m.Flocker.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&PersistentVolumeClaimList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "PersistentVolumeClaim", "PersistentVolumeClaim", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PersistentVolumeClaimSpec) String() string { - if this == nil { - return "nil" + if m.FlexVolume != nil { + l = m.FlexVolume.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&PersistentVolumeClaimSpec{`, - `AccessModes:` + fmt.Sprintf("%v", this.AccessModes) + `,`, - `Resources:` + strings.Replace(strings.Replace(this.Resources.String(), "ResourceRequirements", "ResourceRequirements", 1), `&`, ``, 1) + `,`, - `VolumeName:` + fmt.Sprintf("%v", this.VolumeName) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `StorageClassName:` + valueToStringGenerated(this.StorageClassName) + `,`, - `VolumeMode:` + valueToStringGenerated(this.VolumeMode) + `,`, - `DataSource:` + strings.Replace(fmt.Sprintf("%v", this.DataSource), "TypedLocalObjectReference", "TypedLocalObjectReference", 1) + `,`, - `}`, - }, "") - return s -} -func (this *PersistentVolumeClaimStatus) String() string { - if this == nil { - return "nil" + if m.AzureFile != nil { + l = m.AzureFile.Size() + n += 1 + l + sovGenerated(uint64(l)) } - keysForCapacity := make([]string, 0, len(this.Capacity)) - for k := range this.Capacity { - keysForCapacity = append(keysForCapacity, string(k)) + if m.VsphereVolume != nil { + l = m.VsphereVolume.Size() + n += 1 + l + sovGenerated(uint64(l)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) - mapStringForCapacity := "ResourceList{" - for _, k := range keysForCapacity { - mapStringForCapacity += fmt.Sprintf("%v: %v,", k, this.Capacity[ResourceName(k)]) + if m.Quobyte != nil { + l = m.Quobyte.Size() + n += 1 + l + sovGenerated(uint64(l)) } - mapStringForCapacity += "}" - s := strings.Join([]string{`&PersistentVolumeClaimStatus{`, - `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, - `AccessModes:` + fmt.Sprintf("%v", this.AccessModes) + `,`, - `Capacity:` + mapStringForCapacity + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "PersistentVolumeClaimCondition", "PersistentVolumeClaimCondition", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PersistentVolumeClaimVolumeSource) String() string { - if this == nil { - return "nil" + if m.AzureDisk != nil { + l = m.AzureDisk.Size() + n += 2 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&PersistentVolumeClaimVolumeSource{`, - `ClaimName:` + fmt.Sprintf("%v", this.ClaimName) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `}`, - }, "") - return s -} -func (this *PersistentVolumeList) String() string { - if this == nil { - return "nil" + if m.PhotonPersistentDisk != nil { + l = m.PhotonPersistentDisk.Size() + n += 2 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&PersistentVolumeList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "PersistentVolume", "PersistentVolume", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PersistentVolumeSource) String() string { - if this == nil { - return "nil" + if m.PortworxVolume != nil { + l = m.PortworxVolume.Size() + n += 2 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&PersistentVolumeSource{`, - `GCEPersistentDisk:` + strings.Replace(fmt.Sprintf("%v", this.GCEPersistentDisk), "GCEPersistentDiskVolumeSource", "GCEPersistentDiskVolumeSource", 1) + `,`, - `AWSElasticBlockStore:` + strings.Replace(fmt.Sprintf("%v", this.AWSElasticBlockStore), "AWSElasticBlockStoreVolumeSource", "AWSElasticBlockStoreVolumeSource", 1) + `,`, - `HostPath:` + strings.Replace(fmt.Sprintf("%v", this.HostPath), "HostPathVolumeSource", "HostPathVolumeSource", 1) + `,`, - `Glusterfs:` + strings.Replace(fmt.Sprintf("%v", this.Glusterfs), "GlusterfsVolumeSource", "GlusterfsVolumeSource", 1) + `,`, - `NFS:` + strings.Replace(fmt.Sprintf("%v", this.NFS), "NFSVolumeSource", "NFSVolumeSource", 1) + `,`, - `RBD:` + strings.Replace(fmt.Sprintf("%v", this.RBD), "RBDPersistentVolumeSource", "RBDPersistentVolumeSource", 1) + `,`, - `ISCSI:` + strings.Replace(fmt.Sprintf("%v", this.ISCSI), "ISCSIPersistentVolumeSource", "ISCSIPersistentVolumeSource", 1) + `,`, - `Cinder:` + strings.Replace(fmt.Sprintf("%v", this.Cinder), "CinderPersistentVolumeSource", "CinderPersistentVolumeSource", 1) + `,`, - `CephFS:` + strings.Replace(fmt.Sprintf("%v", this.CephFS), "CephFSPersistentVolumeSource", "CephFSPersistentVolumeSource", 1) + `,`, - `FC:` + strings.Replace(fmt.Sprintf("%v", this.FC), "FCVolumeSource", "FCVolumeSource", 1) + `,`, - `Flocker:` + strings.Replace(fmt.Sprintf("%v", this.Flocker), "FlockerVolumeSource", "FlockerVolumeSource", 1) + `,`, - `FlexVolume:` + strings.Replace(fmt.Sprintf("%v", this.FlexVolume), "FlexPersistentVolumeSource", "FlexPersistentVolumeSource", 1) + `,`, - `AzureFile:` + strings.Replace(fmt.Sprintf("%v", this.AzureFile), "AzureFilePersistentVolumeSource", "AzureFilePersistentVolumeSource", 1) + `,`, - `VsphereVolume:` + strings.Replace(fmt.Sprintf("%v", this.VsphereVolume), "VsphereVirtualDiskVolumeSource", "VsphereVirtualDiskVolumeSource", 1) + `,`, - `Quobyte:` + strings.Replace(fmt.Sprintf("%v", this.Quobyte), "QuobyteVolumeSource", "QuobyteVolumeSource", 1) + `,`, - `AzureDisk:` + strings.Replace(fmt.Sprintf("%v", this.AzureDisk), "AzureDiskVolumeSource", "AzureDiskVolumeSource", 1) + `,`, - `PhotonPersistentDisk:` + strings.Replace(fmt.Sprintf("%v", this.PhotonPersistentDisk), "PhotonPersistentDiskVolumeSource", "PhotonPersistentDiskVolumeSource", 1) + `,`, - `PortworxVolume:` + strings.Replace(fmt.Sprintf("%v", this.PortworxVolume), "PortworxVolumeSource", "PortworxVolumeSource", 1) + `,`, - `ScaleIO:` + strings.Replace(fmt.Sprintf("%v", this.ScaleIO), "ScaleIOPersistentVolumeSource", "ScaleIOPersistentVolumeSource", 1) + `,`, - `Local:` + strings.Replace(fmt.Sprintf("%v", this.Local), "LocalVolumeSource", "LocalVolumeSource", 1) + `,`, - `StorageOS:` + strings.Replace(fmt.Sprintf("%v", this.StorageOS), "StorageOSPersistentVolumeSource", "StorageOSPersistentVolumeSource", 1) + `,`, - `CSI:` + strings.Replace(fmt.Sprintf("%v", this.CSI), "CSIPersistentVolumeSource", "CSIPersistentVolumeSource", 1) + `,`, - `}`, - }, "") - return s -} -func (this *PersistentVolumeSpec) String() string { - if this == nil { - return "nil" + if m.ScaleIO != nil { + l = m.ScaleIO.Size() + n += 2 + l + sovGenerated(uint64(l)) } - keysForCapacity := make([]string, 0, len(this.Capacity)) - for k := range this.Capacity { - keysForCapacity = append(keysForCapacity, string(k)) + if m.Local != nil { + l = m.Local.Size() + n += 2 + l + sovGenerated(uint64(l)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) - mapStringForCapacity := "ResourceList{" - for _, k := range keysForCapacity { - mapStringForCapacity += fmt.Sprintf("%v: %v,", k, this.Capacity[ResourceName(k)]) + if m.StorageOS != nil { + l = m.StorageOS.Size() + n += 2 + l + sovGenerated(uint64(l)) } - mapStringForCapacity += "}" - s := strings.Join([]string{`&PersistentVolumeSpec{`, - `Capacity:` + mapStringForCapacity + `,`, - `PersistentVolumeSource:` + strings.Replace(strings.Replace(this.PersistentVolumeSource.String(), "PersistentVolumeSource", "PersistentVolumeSource", 1), `&`, ``, 1) + `,`, - `AccessModes:` + fmt.Sprintf("%v", this.AccessModes) + `,`, - `ClaimRef:` + strings.Replace(fmt.Sprintf("%v", this.ClaimRef), "ObjectReference", "ObjectReference", 1) + `,`, - `PersistentVolumeReclaimPolicy:` + fmt.Sprintf("%v", this.PersistentVolumeReclaimPolicy) + `,`, - `StorageClassName:` + fmt.Sprintf("%v", this.StorageClassName) + `,`, - `MountOptions:` + fmt.Sprintf("%v", this.MountOptions) + `,`, - `VolumeMode:` + valueToStringGenerated(this.VolumeMode) + `,`, - `NodeAffinity:` + strings.Replace(fmt.Sprintf("%v", this.NodeAffinity), "VolumeNodeAffinity", "VolumeNodeAffinity", 1) + `,`, - `}`, - }, "") - return s -} -func (this *PersistentVolumeStatus) String() string { - if this == nil { - return "nil" + if m.CSI != nil { + l = m.CSI.Size() + n += 2 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&PersistentVolumeStatus{`, - `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `}`, - }, "") - return s + return n } -func (this *PhotonPersistentDiskVolumeSource) String() string { - if this == nil { - return "nil" + +func (m *PersistentVolumeSpec) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&PhotonPersistentDiskVolumeSource{`, - `PdID:` + fmt.Sprintf("%v", this.PdID) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `}`, - }, "") - return s -} -func (this *Pod) String() string { - if this == nil { - return "nil" + var l int + _ = l + if len(m.Capacity) > 0 { + for k, v := range m.Capacity { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } } - s := strings.Join([]string{`&Pod{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodSpec", "PodSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PodStatus", "PodStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PodAffinity) String() string { - if this == nil { - return "nil" + l = m.PersistentVolumeSource.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.AccessModes) > 0 { + for _, s := range m.AccessModes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&PodAffinity{`, - `RequiredDuringSchedulingIgnoredDuringExecution:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.RequiredDuringSchedulingIgnoredDuringExecution), "PodAffinityTerm", "PodAffinityTerm", 1), `&`, ``, 1) + `,`, - `PreferredDuringSchedulingIgnoredDuringExecution:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.PreferredDuringSchedulingIgnoredDuringExecution), "WeightedPodAffinityTerm", "WeightedPodAffinityTerm", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PodAffinityTerm) String() string { - if this == nil { - return "nil" + if m.ClaimRef != nil { + l = m.ClaimRef.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&PodAffinityTerm{`, - `LabelSelector:` + strings.Replace(fmt.Sprintf("%v", this.LabelSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `Namespaces:` + fmt.Sprintf("%v", this.Namespaces) + `,`, - `TopologyKey:` + fmt.Sprintf("%v", this.TopologyKey) + `,`, - `}`, - }, "") - return s -} -func (this *PodAntiAffinity) String() string { - if this == nil { - return "nil" + l = len(m.PersistentVolumeReclaimPolicy) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.StorageClassName) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.MountOptions) > 0 { + for _, s := range m.MountOptions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&PodAntiAffinity{`, - `RequiredDuringSchedulingIgnoredDuringExecution:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.RequiredDuringSchedulingIgnoredDuringExecution), "PodAffinityTerm", "PodAffinityTerm", 1), `&`, ``, 1) + `,`, - `PreferredDuringSchedulingIgnoredDuringExecution:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.PreferredDuringSchedulingIgnoredDuringExecution), "WeightedPodAffinityTerm", "WeightedPodAffinityTerm", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PodAttachOptions) String() string { - if this == nil { - return "nil" + if m.VolumeMode != nil { + l = len(*m.VolumeMode) + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&PodAttachOptions{`, - `Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`, - `Stdout:` + fmt.Sprintf("%v", this.Stdout) + `,`, - `Stderr:` + fmt.Sprintf("%v", this.Stderr) + `,`, - `TTY:` + fmt.Sprintf("%v", this.TTY) + `,`, - `Container:` + fmt.Sprintf("%v", this.Container) + `,`, - `}`, - }, "") - return s -} -func (this *PodCondition) String() string { - if this == nil { - return "nil" + if m.NodeAffinity != nil { + l = m.NodeAffinity.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&PodCondition{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastProbeTime:` + strings.Replace(strings.Replace(this.LastProbeTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `}`, - }, "") - return s + return n } -func (this *PodDNSConfig) String() string { - if this == nil { - return "nil" + +func (m *PersistentVolumeStatus) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&PodDNSConfig{`, - `Nameservers:` + fmt.Sprintf("%v", this.Nameservers) + `,`, - `Searches:` + fmt.Sprintf("%v", this.Searches) + `,`, - `Options:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Options), "PodDNSConfigOption", "PodDNSConfigOption", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.Phase) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *PodDNSConfigOption) String() string { - if this == nil { - return "nil" + +func (m *PhotonPersistentDiskVolumeSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&PodDNSConfigOption{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Value:` + valueToStringGenerated(this.Value) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.PdID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *PodExecOptions) String() string { - if this == nil { - return "nil" + +func (m *Pod) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&PodExecOptions{`, - `Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`, - `Stdout:` + fmt.Sprintf("%v", this.Stdout) + `,`, - `Stderr:` + fmt.Sprintf("%v", this.Stderr) + `,`, - `TTY:` + fmt.Sprintf("%v", this.TTY) + `,`, - `Container:` + fmt.Sprintf("%v", this.Container) + `,`, - `Command:` + fmt.Sprintf("%v", this.Command) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *PodList) String() string { - if this == nil { - return "nil" + +func (m *PodAffinity) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&PodList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Pod", "Pod", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PodLogOptions) String() string { - if this == nil { - return "nil" + var l int + _ = l + if len(m.RequiredDuringSchedulingIgnoredDuringExecution) > 0 { + for _, e := range m.RequiredDuringSchedulingIgnoredDuringExecution { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&PodLogOptions{`, - `Container:` + fmt.Sprintf("%v", this.Container) + `,`, - `Follow:` + fmt.Sprintf("%v", this.Follow) + `,`, - `Previous:` + fmt.Sprintf("%v", this.Previous) + `,`, - `SinceSeconds:` + valueToStringGenerated(this.SinceSeconds) + `,`, - `SinceTime:` + strings.Replace(fmt.Sprintf("%v", this.SinceTime), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, - `Timestamps:` + fmt.Sprintf("%v", this.Timestamps) + `,`, - `TailLines:` + valueToStringGenerated(this.TailLines) + `,`, - `LimitBytes:` + valueToStringGenerated(this.LimitBytes) + `,`, - `}`, - }, "") - return s -} -func (this *PodPortForwardOptions) String() string { - if this == nil { - return "nil" + if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { + for _, e := range m.PreferredDuringSchedulingIgnoredDuringExecution { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&PodPortForwardOptions{`, - `Ports:` + fmt.Sprintf("%v", this.Ports) + `,`, - `}`, - }, "") - return s + return n } -func (this *PodProxyOptions) String() string { - if this == nil { - return "nil" + +func (m *PodAffinityTerm) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&PodProxyOptions{`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `}`, - }, "") - return s + var l int + _ = l + if m.LabelSelector != nil { + l = m.LabelSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Namespaces) > 0 { + for _, s := range m.Namespaces { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.TopologyKey) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *PodReadinessGate) String() string { - if this == nil { - return "nil" + +func (m *PodAntiAffinity) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&PodReadinessGate{`, - `ConditionType:` + fmt.Sprintf("%v", this.ConditionType) + `,`, - `}`, - }, "") - return s + var l int + _ = l + if len(m.RequiredDuringSchedulingIgnoredDuringExecution) > 0 { + for _, e := range m.RequiredDuringSchedulingIgnoredDuringExecution { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { + for _, e := range m.PreferredDuringSchedulingIgnoredDuringExecution { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n } -func (this *PodSecurityContext) String() string { - if this == nil { - return "nil" + +func (m *PodAttachOptions) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&PodSecurityContext{`, - `SELinuxOptions:` + strings.Replace(fmt.Sprintf("%v", this.SELinuxOptions), "SELinuxOptions", "SELinuxOptions", 1) + `,`, - `RunAsUser:` + valueToStringGenerated(this.RunAsUser) + `,`, - `RunAsNonRoot:` + valueToStringGenerated(this.RunAsNonRoot) + `,`, - `SupplementalGroups:` + fmt.Sprintf("%v", this.SupplementalGroups) + `,`, - `FSGroup:` + valueToStringGenerated(this.FSGroup) + `,`, - `RunAsGroup:` + valueToStringGenerated(this.RunAsGroup) + `,`, - `Sysctls:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Sysctls), "Sysctl", "Sysctl", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + var l int + _ = l + n += 2 + n += 2 + n += 2 + n += 2 + l = len(m.Container) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *PodSignature) String() string { - if this == nil { - return "nil" + +func (m *PodCondition) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&PodSignature{`, - `PodController:` + strings.Replace(fmt.Sprintf("%v", this.PodController), "OwnerReference", "k8s_io_apimachinery_pkg_apis_meta_v1.OwnerReference", 1) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastProbeTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *PodSpec) String() string { - if this == nil { - return "nil" + +func (m *PodDNSConfig) Size() (n int) { + if m == nil { + return 0 } - keysForNodeSelector := make([]string, 0, len(this.NodeSelector)) - for k := range this.NodeSelector { - keysForNodeSelector = append(keysForNodeSelector, k) + var l int + _ = l + if len(m.Nameservers) > 0 { + for _, s := range m.Nameservers { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector) - mapStringForNodeSelector := "map[string]string{" - for _, k := range keysForNodeSelector { - mapStringForNodeSelector += fmt.Sprintf("%v: %v,", k, this.NodeSelector[k]) + if len(m.Searches) > 0 { + for _, s := range m.Searches { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - mapStringForNodeSelector += "}" - s := strings.Join([]string{`&PodSpec{`, - `Volumes:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Volumes), "Volume", "Volume", 1), `&`, ``, 1) + `,`, - `Containers:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Containers), "Container", "Container", 1), `&`, ``, 1) + `,`, - `RestartPolicy:` + fmt.Sprintf("%v", this.RestartPolicy) + `,`, - `TerminationGracePeriodSeconds:` + valueToStringGenerated(this.TerminationGracePeriodSeconds) + `,`, - `ActiveDeadlineSeconds:` + valueToStringGenerated(this.ActiveDeadlineSeconds) + `,`, - `DNSPolicy:` + fmt.Sprintf("%v", this.DNSPolicy) + `,`, - `NodeSelector:` + mapStringForNodeSelector + `,`, - `ServiceAccountName:` + fmt.Sprintf("%v", this.ServiceAccountName) + `,`, - `DeprecatedServiceAccount:` + fmt.Sprintf("%v", this.DeprecatedServiceAccount) + `,`, - `NodeName:` + fmt.Sprintf("%v", this.NodeName) + `,`, - `HostNetwork:` + fmt.Sprintf("%v", this.HostNetwork) + `,`, - `HostPID:` + fmt.Sprintf("%v", this.HostPID) + `,`, - `HostIPC:` + fmt.Sprintf("%v", this.HostIPC) + `,`, - `SecurityContext:` + strings.Replace(fmt.Sprintf("%v", this.SecurityContext), "PodSecurityContext", "PodSecurityContext", 1) + `,`, - `ImagePullSecrets:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ImagePullSecrets), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, - `Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`, - `Subdomain:` + fmt.Sprintf("%v", this.Subdomain) + `,`, - `Affinity:` + strings.Replace(fmt.Sprintf("%v", this.Affinity), "Affinity", "Affinity", 1) + `,`, - `SchedulerName:` + fmt.Sprintf("%v", this.SchedulerName) + `,`, - `InitContainers:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.InitContainers), "Container", "Container", 1), `&`, ``, 1) + `,`, - `AutomountServiceAccountToken:` + valueToStringGenerated(this.AutomountServiceAccountToken) + `,`, - `Tolerations:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Tolerations), "Toleration", "Toleration", 1), `&`, ``, 1) + `,`, - `HostAliases:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.HostAliases), "HostAlias", "HostAlias", 1), `&`, ``, 1) + `,`, - `PriorityClassName:` + fmt.Sprintf("%v", this.PriorityClassName) + `,`, - `Priority:` + valueToStringGenerated(this.Priority) + `,`, - `DNSConfig:` + strings.Replace(fmt.Sprintf("%v", this.DNSConfig), "PodDNSConfig", "PodDNSConfig", 1) + `,`, - `ShareProcessNamespace:` + valueToStringGenerated(this.ShareProcessNamespace) + `,`, - `ReadinessGates:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ReadinessGates), "PodReadinessGate", "PodReadinessGate", 1), `&`, ``, 1) + `,`, - `RuntimeClassName:` + valueToStringGenerated(this.RuntimeClassName) + `,`, - `}`, - }, "") - return s -} -func (this *PodStatus) String() string { - if this == nil { - return "nil" + if len(m.Options) > 0 { + for _, e := range m.Options { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&PodStatus{`, - `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "PodCondition", "PodCondition", 1), `&`, ``, 1) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `HostIP:` + fmt.Sprintf("%v", this.HostIP) + `,`, - `PodIP:` + fmt.Sprintf("%v", this.PodIP) + `,`, - `StartTime:` + strings.Replace(fmt.Sprintf("%v", this.StartTime), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, - `ContainerStatuses:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ContainerStatuses), "ContainerStatus", "ContainerStatus", 1), `&`, ``, 1) + `,`, - `QOSClass:` + fmt.Sprintf("%v", this.QOSClass) + `,`, - `InitContainerStatuses:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.InitContainerStatuses), "ContainerStatus", "ContainerStatus", 1), `&`, ``, 1) + `,`, - `NominatedNodeName:` + fmt.Sprintf("%v", this.NominatedNodeName) + `,`, - `}`, - }, "") - return s + return n } -func (this *PodStatusResult) String() string { - if this == nil { - return "nil" + +func (m *PodDNSConfigOption) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&PodStatusResult{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PodStatus", "PodStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PodTemplate) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.Value != nil { + l = len(*m.Value) + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&PodTemplate{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "PodTemplateSpec", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *PodTemplateList) String() string { - if this == nil { - return "nil" + +func (m *PodExecOptions) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&PodTemplateList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "PodTemplate", "PodTemplate", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PodTemplateSpec) String() string { - if this == nil { - return "nil" + var l int + _ = l + n += 2 + n += 2 + n += 2 + n += 2 + l = len(m.Container) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Command) > 0 { + for _, s := range m.Command { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&PodTemplateSpec{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodSpec", "PodSpec", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *PortworxVolumeSource) String() string { - if this == nil { - return "nil" + +func (m *PodIP) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&PortworxVolumeSource{`, - `VolumeID:` + fmt.Sprintf("%v", this.VolumeID) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.IP) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *Preconditions) String() string { - if this == nil { - return "nil" + +func (m *PodList) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&Preconditions{`, - `UID:` + valueToStringGenerated(this.UID) + `,`, - `}`, - }, "") - return s -} -func (this *PreferAvoidPodsEntry) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&PreferAvoidPodsEntry{`, - `PodSignature:` + strings.Replace(strings.Replace(this.PodSignature.String(), "PodSignature", "PodSignature", 1), `&`, ``, 1) + `,`, - `EvictionTime:` + strings.Replace(strings.Replace(this.EvictionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `}`, - }, "") - return s + return n } -func (this *PreferredSchedulingTerm) String() string { - if this == nil { - return "nil" + +func (m *PodLogOptions) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&PreferredSchedulingTerm{`, - `Weight:` + fmt.Sprintf("%v", this.Weight) + `,`, - `Preference:` + strings.Replace(strings.Replace(this.Preference.String(), "NodeSelectorTerm", "NodeSelectorTerm", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *Probe) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = len(m.Container) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + n += 2 + if m.SinceSeconds != nil { + n += 1 + sovGenerated(uint64(*m.SinceSeconds)) } - s := strings.Join([]string{`&Probe{`, - `Handler:` + strings.Replace(strings.Replace(this.Handler.String(), "Handler", "Handler", 1), `&`, ``, 1) + `,`, - `InitialDelaySeconds:` + fmt.Sprintf("%v", this.InitialDelaySeconds) + `,`, - `TimeoutSeconds:` + fmt.Sprintf("%v", this.TimeoutSeconds) + `,`, - `PeriodSeconds:` + fmt.Sprintf("%v", this.PeriodSeconds) + `,`, - `SuccessThreshold:` + fmt.Sprintf("%v", this.SuccessThreshold) + `,`, - `FailureThreshold:` + fmt.Sprintf("%v", this.FailureThreshold) + `,`, - `}`, - }, "") - return s -} -func (this *ProjectedVolumeSource) String() string { - if this == nil { - return "nil" + if m.SinceTime != nil { + l = m.SinceTime.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&ProjectedVolumeSource{`, - `Sources:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Sources), "VolumeProjection", "VolumeProjection", 1), `&`, ``, 1) + `,`, - `DefaultMode:` + valueToStringGenerated(this.DefaultMode) + `,`, - `}`, - }, "") - return s -} -func (this *QuobyteVolumeSource) String() string { - if this == nil { - return "nil" + n += 2 + if m.TailLines != nil { + n += 1 + sovGenerated(uint64(*m.TailLines)) } - s := strings.Join([]string{`&QuobyteVolumeSource{`, - `Registry:` + fmt.Sprintf("%v", this.Registry) + `,`, - `Volume:` + fmt.Sprintf("%v", this.Volume) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `User:` + fmt.Sprintf("%v", this.User) + `,`, - `Group:` + fmt.Sprintf("%v", this.Group) + `,`, - `}`, - }, "") - return s -} -func (this *RBDPersistentVolumeSource) String() string { - if this == nil { - return "nil" + if m.LimitBytes != nil { + n += 1 + sovGenerated(uint64(*m.LimitBytes)) } - s := strings.Join([]string{`&RBDPersistentVolumeSource{`, - `CephMonitors:` + fmt.Sprintf("%v", this.CephMonitors) + `,`, - `RBDImage:` + fmt.Sprintf("%v", this.RBDImage) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `RBDPool:` + fmt.Sprintf("%v", this.RBDPool) + `,`, - `RadosUser:` + fmt.Sprintf("%v", this.RadosUser) + `,`, - `Keyring:` + fmt.Sprintf("%v", this.Keyring) + `,`, - `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "SecretReference", "SecretReference", 1) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `}`, - }, "") - return s + n += 2 + return n } -func (this *RBDVolumeSource) String() string { - if this == nil { - return "nil" + +func (m *PodPortForwardOptions) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&RBDVolumeSource{`, - `CephMonitors:` + fmt.Sprintf("%v", this.CephMonitors) + `,`, - `RBDImage:` + fmt.Sprintf("%v", this.RBDImage) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `RBDPool:` + fmt.Sprintf("%v", this.RBDPool) + `,`, - `RadosUser:` + fmt.Sprintf("%v", this.RadosUser) + `,`, - `Keyring:` + fmt.Sprintf("%v", this.Keyring) + `,`, - `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "LocalObjectReference", "LocalObjectReference", 1) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `}`, - }, "") - return s -} -func (this *RangeAllocation) String() string { - if this == nil { - return "nil" + var l int + _ = l + if len(m.Ports) > 0 { + for _, e := range m.Ports { + n += 1 + sovGenerated(uint64(e)) + } } - s := strings.Join([]string{`&RangeAllocation{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Range:` + fmt.Sprintf("%v", this.Range) + `,`, - `Data:` + valueToStringGenerated(this.Data) + `,`, - `}`, - }, "") - return s + return n } -func (this *ReplicationController) String() string { - if this == nil { - return "nil" + +func (m *PodProxyOptions) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ReplicationController{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ReplicationControllerSpec", "ReplicationControllerSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ReplicationControllerStatus", "ReplicationControllerStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *ReplicationControllerCondition) String() string { - if this == nil { - return "nil" + +func (m *PodReadinessGate) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ReplicationControllerCondition{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.ConditionType) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *ReplicationControllerList) String() string { - if this == nil { - return "nil" + +func (m *PodSecurityContext) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ReplicationControllerList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ReplicationController", "ReplicationController", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ReplicationControllerSpec) String() string { - if this == nil { - return "nil" + var l int + _ = l + if m.SELinuxOptions != nil { + l = m.SELinuxOptions.Size() + n += 1 + l + sovGenerated(uint64(l)) } - keysForSelector := make([]string, 0, len(this.Selector)) - for k := range this.Selector { - keysForSelector = append(keysForSelector, k) + if m.RunAsUser != nil { + n += 1 + sovGenerated(uint64(*m.RunAsUser)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) - mapStringForSelector := "map[string]string{" - for _, k := range keysForSelector { - mapStringForSelector += fmt.Sprintf("%v: %v,", k, this.Selector[k]) + if m.RunAsNonRoot != nil { + n += 2 } - mapStringForSelector += "}" - s := strings.Join([]string{`&ReplicationControllerSpec{`, - `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, - `Selector:` + mapStringForSelector + `,`, - `Template:` + strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "PodTemplateSpec", 1) + `,`, - `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, - `}`, - }, "") - return s -} -func (this *ReplicationControllerStatus) String() string { - if this == nil { - return "nil" + if len(m.SupplementalGroups) > 0 { + for _, e := range m.SupplementalGroups { + n += 1 + sovGenerated(uint64(e)) + } } - s := strings.Join([]string{`&ReplicationControllerStatus{`, - `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, - `FullyLabeledReplicas:` + fmt.Sprintf("%v", this.FullyLabeledReplicas) + `,`, - `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, - `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, - `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "ReplicationControllerCondition", "ReplicationControllerCondition", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ResourceFieldSelector) String() string { - if this == nil { - return "nil" + if m.FSGroup != nil { + n += 1 + sovGenerated(uint64(*m.FSGroup)) } - s := strings.Join([]string{`&ResourceFieldSelector{`, - `ContainerName:` + fmt.Sprintf("%v", this.ContainerName) + `,`, - `Resource:` + fmt.Sprintf("%v", this.Resource) + `,`, - `Divisor:` + strings.Replace(strings.Replace(this.Divisor.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ResourceQuota) String() string { - if this == nil { - return "nil" + if m.RunAsGroup != nil { + n += 1 + sovGenerated(uint64(*m.RunAsGroup)) } - s := strings.Join([]string{`&ResourceQuota{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ResourceQuotaSpec", "ResourceQuotaSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ResourceQuotaStatus", "ResourceQuotaStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + if len(m.Sysctls) > 0 { + for _, e := range m.Sysctls { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.WindowsOptions != nil { + l = m.WindowsOptions.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n } -func (this *ResourceQuotaList) String() string { - if this == nil { - return "nil" + +func (m *PodSignature) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ResourceQuotaList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ResourceQuota", "ResourceQuota", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + var l int + _ = l + if m.PodController != nil { + l = m.PodController.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n } -func (this *ResourceQuotaSpec) String() string { - if this == nil { - return "nil" + +func (m *PodSpec) Size() (n int) { + if m == nil { + return 0 } - keysForHard := make([]string, 0, len(this.Hard)) - for k := range this.Hard { - keysForHard = append(keysForHard, string(k)) + var l int + _ = l + if len(m.Volumes) > 0 { + for _, e := range m.Volumes { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - github_com_gogo_protobuf_sortkeys.Strings(keysForHard) - mapStringForHard := "ResourceList{" - for _, k := range keysForHard { - mapStringForHard += fmt.Sprintf("%v: %v,", k, this.Hard[ResourceName(k)]) + if len(m.Containers) > 0 { + for _, e := range m.Containers { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - mapStringForHard += "}" - s := strings.Join([]string{`&ResourceQuotaSpec{`, - `Hard:` + mapStringForHard + `,`, - `Scopes:` + fmt.Sprintf("%v", this.Scopes) + `,`, - `ScopeSelector:` + strings.Replace(fmt.Sprintf("%v", this.ScopeSelector), "ScopeSelector", "ScopeSelector", 1) + `,`, - `}`, - }, "") - return s -} -func (this *ResourceQuotaStatus) String() string { - if this == nil { - return "nil" + l = len(m.RestartPolicy) + n += 1 + l + sovGenerated(uint64(l)) + if m.TerminationGracePeriodSeconds != nil { + n += 1 + sovGenerated(uint64(*m.TerminationGracePeriodSeconds)) } - keysForHard := make([]string, 0, len(this.Hard)) - for k := range this.Hard { - keysForHard = append(keysForHard, string(k)) + if m.ActiveDeadlineSeconds != nil { + n += 1 + sovGenerated(uint64(*m.ActiveDeadlineSeconds)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForHard) - mapStringForHard := "ResourceList{" - for _, k := range keysForHard { - mapStringForHard += fmt.Sprintf("%v: %v,", k, this.Hard[ResourceName(k)]) + l = len(m.DNSPolicy) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.NodeSelector) > 0 { + for k, v := range m.NodeSelector { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } } - mapStringForHard += "}" - keysForUsed := make([]string, 0, len(this.Used)) - for k := range this.Used { - keysForUsed = append(keysForUsed, string(k)) + l = len(m.ServiceAccountName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DeprecatedServiceAccount) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.NodeName) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + n += 2 + n += 2 + if m.SecurityContext != nil { + l = m.SecurityContext.Size() + n += 1 + l + sovGenerated(uint64(l)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForUsed) - mapStringForUsed := "ResourceList{" - for _, k := range keysForUsed { - mapStringForUsed += fmt.Sprintf("%v: %v,", k, this.Used[ResourceName(k)]) + if len(m.ImagePullSecrets) > 0 { + for _, e := range m.ImagePullSecrets { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - mapStringForUsed += "}" - s := strings.Join([]string{`&ResourceQuotaStatus{`, - `Hard:` + mapStringForHard + `,`, - `Used:` + mapStringForUsed + `,`, - `}`, - }, "") - return s -} -func (this *ResourceRequirements) String() string { - if this == nil { - return "nil" + l = len(m.Hostname) + n += 2 + l + sovGenerated(uint64(l)) + l = len(m.Subdomain) + n += 2 + l + sovGenerated(uint64(l)) + if m.Affinity != nil { + l = m.Affinity.Size() + n += 2 + l + sovGenerated(uint64(l)) } - keysForLimits := make([]string, 0, len(this.Limits)) - for k := range this.Limits { - keysForLimits = append(keysForLimits, string(k)) + l = len(m.SchedulerName) + n += 2 + l + sovGenerated(uint64(l)) + if len(m.InitContainers) > 0 { + for _, e := range m.InitContainers { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } } - github_com_gogo_protobuf_sortkeys.Strings(keysForLimits) - mapStringForLimits := "ResourceList{" - for _, k := range keysForLimits { - mapStringForLimits += fmt.Sprintf("%v: %v,", k, this.Limits[ResourceName(k)]) + if m.AutomountServiceAccountToken != nil { + n += 3 } - mapStringForLimits += "}" - keysForRequests := make([]string, 0, len(this.Requests)) - for k := range this.Requests { - keysForRequests = append(keysForRequests, string(k)) + if len(m.Tolerations) > 0 { + for _, e := range m.Tolerations { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } } - github_com_gogo_protobuf_sortkeys.Strings(keysForRequests) - mapStringForRequests := "ResourceList{" - for _, k := range keysForRequests { - mapStringForRequests += fmt.Sprintf("%v: %v,", k, this.Requests[ResourceName(k)]) + if len(m.HostAliases) > 0 { + for _, e := range m.HostAliases { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } } - mapStringForRequests += "}" - s := strings.Join([]string{`&ResourceRequirements{`, - `Limits:` + mapStringForLimits + `,`, - `Requests:` + mapStringForRequests + `,`, - `}`, - }, "") - return s -} -func (this *SELinuxOptions) String() string { - if this == nil { - return "nil" + l = len(m.PriorityClassName) + n += 2 + l + sovGenerated(uint64(l)) + if m.Priority != nil { + n += 2 + sovGenerated(uint64(*m.Priority)) } - s := strings.Join([]string{`&SELinuxOptions{`, - `User:` + fmt.Sprintf("%v", this.User) + `,`, - `Role:` + fmt.Sprintf("%v", this.Role) + `,`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Level:` + fmt.Sprintf("%v", this.Level) + `,`, - `}`, - }, "") - return s -} -func (this *ScaleIOPersistentVolumeSource) String() string { - if this == nil { - return "nil" + if m.DNSConfig != nil { + l = m.DNSConfig.Size() + n += 2 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&ScaleIOPersistentVolumeSource{`, - `Gateway:` + fmt.Sprintf("%v", this.Gateway) + `,`, - `System:` + fmt.Sprintf("%v", this.System) + `,`, - `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "SecretReference", "SecretReference", 1) + `,`, - `SSLEnabled:` + fmt.Sprintf("%v", this.SSLEnabled) + `,`, - `ProtectionDomain:` + fmt.Sprintf("%v", this.ProtectionDomain) + `,`, - `StoragePool:` + fmt.Sprintf("%v", this.StoragePool) + `,`, - `StorageMode:` + fmt.Sprintf("%v", this.StorageMode) + `,`, - `VolumeName:` + fmt.Sprintf("%v", this.VolumeName) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `}`, - }, "") - return s -} -func (this *ScaleIOVolumeSource) String() string { - if this == nil { - return "nil" + if m.ShareProcessNamespace != nil { + n += 3 } - s := strings.Join([]string{`&ScaleIOVolumeSource{`, - `Gateway:` + fmt.Sprintf("%v", this.Gateway) + `,`, - `System:` + fmt.Sprintf("%v", this.System) + `,`, - `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "LocalObjectReference", "LocalObjectReference", 1) + `,`, - `SSLEnabled:` + fmt.Sprintf("%v", this.SSLEnabled) + `,`, - `ProtectionDomain:` + fmt.Sprintf("%v", this.ProtectionDomain) + `,`, - `StoragePool:` + fmt.Sprintf("%v", this.StoragePool) + `,`, - `StorageMode:` + fmt.Sprintf("%v", this.StorageMode) + `,`, - `VolumeName:` + fmt.Sprintf("%v", this.VolumeName) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `}`, - }, "") - return s -} -func (this *ScopeSelector) String() string { - if this == nil { - return "nil" + if len(m.ReadinessGates) > 0 { + for _, e := range m.ReadinessGates { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&ScopeSelector{`, - `MatchExpressions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.MatchExpressions), "ScopedResourceSelectorRequirement", "ScopedResourceSelectorRequirement", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ScopedResourceSelectorRequirement) String() string { - if this == nil { - return "nil" + if m.RuntimeClassName != nil { + l = len(*m.RuntimeClassName) + n += 2 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&ScopedResourceSelectorRequirement{`, - `ScopeName:` + fmt.Sprintf("%v", this.ScopeName) + `,`, - `Operator:` + fmt.Sprintf("%v", this.Operator) + `,`, - `Values:` + fmt.Sprintf("%v", this.Values) + `,`, - `}`, - }, "") - return s -} -func (this *Secret) String() string { - if this == nil { - return "nil" + if m.EnableServiceLinks != nil { + n += 3 } - keysForData := make([]string, 0, len(this.Data)) - for k := range this.Data { - keysForData = append(keysForData, k) + if m.PreemptionPolicy != nil { + l = len(*m.PreemptionPolicy) + n += 2 + l + sovGenerated(uint64(l)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForData) - mapStringForData := "map[string][]byte{" - for _, k := range keysForData { - mapStringForData += fmt.Sprintf("%v: %v,", k, this.Data[k]) + if len(m.Overhead) > 0 { + for k, v := range m.Overhead { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 2 + sovGenerated(uint64(mapEntrySize)) + } } - mapStringForData += "}" - keysForStringData := make([]string, 0, len(this.StringData)) - for k := range this.StringData { - keysForStringData = append(keysForStringData, k) + if len(m.TopologySpreadConstraints) > 0 { + for _, e := range m.TopologySpreadConstraints { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } } - github_com_gogo_protobuf_sortkeys.Strings(keysForStringData) - mapStringForStringData := "map[string]string{" - for _, k := range keysForStringData { - mapStringForStringData += fmt.Sprintf("%v: %v,", k, this.StringData[k]) + if len(m.EphemeralContainers) > 0 { + for _, e := range m.EphemeralContainers { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } } - mapStringForStringData += "}" - s := strings.Join([]string{`&Secret{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Data:` + mapStringForData + `,`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `StringData:` + mapStringForStringData + `,`, - `}`, - }, "") - return s + return n } -func (this *SecretEnvSource) String() string { - if this == nil { - return "nil" + +func (m *PodStatus) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&SecretEnvSource{`, - `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, - `Optional:` + valueToStringGenerated(this.Optional) + `,`, - `}`, - }, "") - return s -} -func (this *SecretKeySelector) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = len(m.Phase) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&SecretKeySelector{`, - `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, - `Key:` + fmt.Sprintf("%v", this.Key) + `,`, - `Optional:` + valueToStringGenerated(this.Optional) + `,`, - `}`, - }, "") - return s -} -func (this *SecretList) String() string { - if this == nil { - return "nil" + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.HostIP) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.PodIP) + n += 1 + l + sovGenerated(uint64(l)) + if m.StartTime != nil { + l = m.StartTime.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&SecretList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Secret", "Secret", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *SecretProjection) String() string { - if this == nil { - return "nil" + if len(m.ContainerStatuses) > 0 { + for _, e := range m.ContainerStatuses { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&SecretProjection{`, - `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "KeyToPath", "KeyToPath", 1), `&`, ``, 1) + `,`, - `Optional:` + valueToStringGenerated(this.Optional) + `,`, - `}`, - }, "") - return s -} -func (this *SecretReference) String() string { - if this == nil { - return "nil" + l = len(m.QOSClass) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.InitContainerStatuses) > 0 { + for _, e := range m.InitContainerStatuses { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&SecretReference{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, - `}`, - }, "") - return s -} -func (this *SecretVolumeSource) String() string { - if this == nil { - return "nil" + l = len(m.NominatedNodeName) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.PodIPs) > 0 { + for _, e := range m.PodIPs { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&SecretVolumeSource{`, - `SecretName:` + fmt.Sprintf("%v", this.SecretName) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "KeyToPath", "KeyToPath", 1), `&`, ``, 1) + `,`, - `DefaultMode:` + valueToStringGenerated(this.DefaultMode) + `,`, - `Optional:` + valueToStringGenerated(this.Optional) + `,`, - `}`, - }, "") - return s -} -func (this *SecurityContext) String() string { - if this == nil { - return "nil" + if len(m.EphemeralContainerStatuses) > 0 { + for _, e := range m.EphemeralContainerStatuses { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&SecurityContext{`, - `Capabilities:` + strings.Replace(fmt.Sprintf("%v", this.Capabilities), "Capabilities", "Capabilities", 1) + `,`, - `Privileged:` + valueToStringGenerated(this.Privileged) + `,`, - `SELinuxOptions:` + strings.Replace(fmt.Sprintf("%v", this.SELinuxOptions), "SELinuxOptions", "SELinuxOptions", 1) + `,`, - `RunAsUser:` + valueToStringGenerated(this.RunAsUser) + `,`, - `RunAsNonRoot:` + valueToStringGenerated(this.RunAsNonRoot) + `,`, - `ReadOnlyRootFilesystem:` + valueToStringGenerated(this.ReadOnlyRootFilesystem) + `,`, - `AllowPrivilegeEscalation:` + valueToStringGenerated(this.AllowPrivilegeEscalation) + `,`, - `RunAsGroup:` + valueToStringGenerated(this.RunAsGroup) + `,`, - `ProcMount:` + valueToStringGenerated(this.ProcMount) + `,`, - `}`, - }, "") - return s + return n } -func (this *SerializedReference) String() string { - if this == nil { - return "nil" + +func (m *PodStatusResult) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&SerializedReference{`, - `Reference:` + strings.Replace(strings.Replace(this.Reference.String(), "ObjectReference", "ObjectReference", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *Service) String() string { - if this == nil { - return "nil" + +func (m *PodTemplate) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&Service{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ServiceSpec", "ServiceSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ServiceStatus", "ServiceStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *ServiceAccount) String() string { - if this == nil { - return "nil" + +func (m *PodTemplateList) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ServiceAccount{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Secrets:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Secrets), "ObjectReference", "ObjectReference", 1), `&`, ``, 1) + `,`, - `ImagePullSecrets:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ImagePullSecrets), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, - `AutomountServiceAccountToken:` + valueToStringGenerated(this.AutomountServiceAccountToken) + `,`, - `}`, - }, "") - return s -} -func (this *ServiceAccountList) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&ServiceAccountList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ServiceAccount", "ServiceAccount", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *ServiceAccountTokenProjection) String() string { - if this == nil { - return "nil" + +func (m *PodTemplateSpec) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ServiceAccountTokenProjection{`, - `Audience:` + fmt.Sprintf("%v", this.Audience) + `,`, - `ExpirationSeconds:` + valueToStringGenerated(this.ExpirationSeconds) + `,`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *ServiceList) String() string { - if this == nil { - return "nil" + +func (m *PortworxVolumeSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ServiceList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Service", "Service", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.VolumeID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n } -func (this *ServicePort) String() string { - if this == nil { - return "nil" + +func (m *Preconditions) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ServicePort{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`, - `Port:` + fmt.Sprintf("%v", this.Port) + `,`, - `TargetPort:` + strings.Replace(strings.Replace(this.TargetPort.String(), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1), `&`, ``, 1) + `,`, - `NodePort:` + fmt.Sprintf("%v", this.NodePort) + `,`, - `}`, - }, "") - return s -} -func (this *ServiceProxyOptions) String() string { - if this == nil { - return "nil" + var l int + _ = l + if m.UID != nil { + l = len(*m.UID) + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&ServiceProxyOptions{`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `}`, - }, "") - return s + return n } -func (this *ServiceSpec) String() string { - if this == nil { - return "nil" - } - keysForSelector := make([]string, 0, len(this.Selector)) - for k := range this.Selector { - keysForSelector = append(keysForSelector, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) - mapStringForSelector := "map[string]string{" - for _, k := range keysForSelector { - mapStringForSelector += fmt.Sprintf("%v: %v,", k, this.Selector[k]) + +func (m *PreferAvoidPodsEntry) Size() (n int) { + if m == nil { + return 0 } - mapStringForSelector += "}" - s := strings.Join([]string{`&ServiceSpec{`, - `Ports:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ports), "ServicePort", "ServicePort", 1), `&`, ``, 1) + `,`, - `Selector:` + mapStringForSelector + `,`, - `ClusterIP:` + fmt.Sprintf("%v", this.ClusterIP) + `,`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `ExternalIPs:` + fmt.Sprintf("%v", this.ExternalIPs) + `,`, - `SessionAffinity:` + fmt.Sprintf("%v", this.SessionAffinity) + `,`, - `LoadBalancerIP:` + fmt.Sprintf("%v", this.LoadBalancerIP) + `,`, - `LoadBalancerSourceRanges:` + fmt.Sprintf("%v", this.LoadBalancerSourceRanges) + `,`, - `ExternalName:` + fmt.Sprintf("%v", this.ExternalName) + `,`, - `ExternalTrafficPolicy:` + fmt.Sprintf("%v", this.ExternalTrafficPolicy) + `,`, - `HealthCheckNodePort:` + fmt.Sprintf("%v", this.HealthCheckNodePort) + `,`, - `PublishNotReadyAddresses:` + fmt.Sprintf("%v", this.PublishNotReadyAddresses) + `,`, - `SessionAffinityConfig:` + strings.Replace(fmt.Sprintf("%v", this.SessionAffinityConfig), "SessionAffinityConfig", "SessionAffinityConfig", 1) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = m.PodSignature.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.EvictionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *ServiceStatus) String() string { - if this == nil { - return "nil" + +func (m *PreferredSchedulingTerm) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ServiceStatus{`, - `LoadBalancer:` + strings.Replace(strings.Replace(this.LoadBalancer.String(), "LoadBalancerStatus", "LoadBalancerStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Weight)) + l = m.Preference.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *SessionAffinityConfig) String() string { - if this == nil { - return "nil" + +func (m *Probe) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&SessionAffinityConfig{`, - `ClientIP:` + strings.Replace(fmt.Sprintf("%v", this.ClientIP), "ClientIPConfig", "ClientIPConfig", 1) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = m.Handler.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.InitialDelaySeconds)) + n += 1 + sovGenerated(uint64(m.TimeoutSeconds)) + n += 1 + sovGenerated(uint64(m.PeriodSeconds)) + n += 1 + sovGenerated(uint64(m.SuccessThreshold)) + n += 1 + sovGenerated(uint64(m.FailureThreshold)) + return n } -func (this *StorageOSPersistentVolumeSource) String() string { - if this == nil { - return "nil" + +func (m *ProjectedVolumeSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&StorageOSPersistentVolumeSource{`, - `VolumeName:` + fmt.Sprintf("%v", this.VolumeName) + `,`, - `VolumeNamespace:` + fmt.Sprintf("%v", this.VolumeNamespace) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "ObjectReference", "ObjectReference", 1) + `,`, - `}`, - }, "") - return s -} -func (this *StorageOSVolumeSource) String() string { - if this == nil { - return "nil" + var l int + _ = l + if len(m.Sources) > 0 { + for _, e := range m.Sources { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&StorageOSVolumeSource{`, - `VolumeName:` + fmt.Sprintf("%v", this.VolumeName) + `,`, - `VolumeNamespace:` + fmt.Sprintf("%v", this.VolumeNamespace) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "LocalObjectReference", "LocalObjectReference", 1) + `,`, - `}`, - }, "") - return s -} -func (this *Sysctl) String() string { - if this == nil { - return "nil" + if m.DefaultMode != nil { + n += 1 + sovGenerated(uint64(*m.DefaultMode)) } - s := strings.Join([]string{`&Sysctl{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Value:` + fmt.Sprintf("%v", this.Value) + `,`, - `}`, - }, "") - return s + return n } -func (this *TCPSocketAction) String() string { - if this == nil { - return "nil" + +func (m *QuobyteVolumeSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&TCPSocketAction{`, - `Port:` + strings.Replace(strings.Replace(this.Port.String(), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1), `&`, ``, 1) + `,`, - `Host:` + fmt.Sprintf("%v", this.Host) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.Registry) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Volume) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + l = len(m.User) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Group) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Tenant) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *Taint) String() string { - if this == nil { - return "nil" + +func (m *RBDPersistentVolumeSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&Taint{`, - `Key:` + fmt.Sprintf("%v", this.Key) + `,`, - `Value:` + fmt.Sprintf("%v", this.Value) + `,`, - `Effect:` + fmt.Sprintf("%v", this.Effect) + `,`, - `TimeAdded:` + strings.Replace(fmt.Sprintf("%v", this.TimeAdded), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, - `}`, - }, "") - return s + var l int + _ = l + if len(m.CephMonitors) > 0 { + for _, s := range m.CephMonitors { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.RBDImage) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.RBDPool) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.RadosUser) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Keyring) + n += 1 + l + sovGenerated(uint64(l)) + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + return n } -func (this *Toleration) String() string { - if this == nil { - return "nil" + +func (m *RBDVolumeSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&Toleration{`, - `Key:` + fmt.Sprintf("%v", this.Key) + `,`, - `Operator:` + fmt.Sprintf("%v", this.Operator) + `,`, - `Value:` + fmt.Sprintf("%v", this.Value) + `,`, - `Effect:` + fmt.Sprintf("%v", this.Effect) + `,`, - `TolerationSeconds:` + valueToStringGenerated(this.TolerationSeconds) + `,`, - `}`, - }, "") - return s + var l int + _ = l + if len(m.CephMonitors) > 0 { + for _, s := range m.CephMonitors { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.RBDImage) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.RBDPool) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.RadosUser) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Keyring) + n += 1 + l + sovGenerated(uint64(l)) + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + return n } -func (this *TopologySelectorLabelRequirement) String() string { - if this == nil { - return "nil" + +func (m *RangeAllocation) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&TopologySelectorLabelRequirement{`, - `Key:` + fmt.Sprintf("%v", this.Key) + `,`, - `Values:` + fmt.Sprintf("%v", this.Values) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Range) + n += 1 + l + sovGenerated(uint64(l)) + if m.Data != nil { + l = len(m.Data) + n += 1 + l + sovGenerated(uint64(l)) + } + return n } -func (this *TopologySelectorTerm) String() string { - if this == nil { - return "nil" + +func (m *ReplicationController) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&TopologySelectorTerm{`, - `MatchLabelExpressions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.MatchLabelExpressions), "TopologySelectorLabelRequirement", "TopologySelectorLabelRequirement", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *TypedLocalObjectReference) String() string { - if this == nil { - return "nil" + +func (m *ReplicationControllerCondition) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&TypedLocalObjectReference{`, - `APIGroup:` + valueToStringGenerated(this.APIGroup) + `,`, - `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *Volume) String() string { - if this == nil { - return "nil" + +func (m *ReplicationControllerList) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&Volume{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `VolumeSource:` + strings.Replace(strings.Replace(this.VolumeSource.String(), "VolumeSource", "VolumeSource", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n } -func (this *VolumeDevice) String() string { - if this == nil { - return "nil" + +func (m *ReplicationControllerSpec) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&VolumeDevice{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `DevicePath:` + fmt.Sprintf("%v", this.DevicePath) + `,`, - `}`, - }, "") - return s + var l int + _ = l + if m.Replicas != nil { + n += 1 + sovGenerated(uint64(*m.Replicas)) + } + if len(m.Selector) > 0 { + for k, v := range m.Selector { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.Template != nil { + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 1 + sovGenerated(uint64(m.MinReadySeconds)) + return n } -func (this *VolumeMount) String() string { - if this == nil { - return "nil" + +func (m *ReplicationControllerStatus) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&VolumeMount{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `MountPath:` + fmt.Sprintf("%v", this.MountPath) + `,`, - `SubPath:` + fmt.Sprintf("%v", this.SubPath) + `,`, - `MountPropagation:` + valueToStringGenerated(this.MountPropagation) + `,`, - `}`, - }, "") - return s + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Replicas)) + n += 1 + sovGenerated(uint64(m.FullyLabeledReplicas)) + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + n += 1 + sovGenerated(uint64(m.ReadyReplicas)) + n += 1 + sovGenerated(uint64(m.AvailableReplicas)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n } -func (this *VolumeNodeAffinity) String() string { - if this == nil { - return "nil" + +func (m *ResourceFieldSelector) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&VolumeNodeAffinity{`, - `Required:` + strings.Replace(fmt.Sprintf("%v", this.Required), "NodeSelector", "NodeSelector", 1) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.ContainerName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Resource) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Divisor.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *VolumeProjection) String() string { - if this == nil { - return "nil" + +func (m *ResourceQuota) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&VolumeProjection{`, - `Secret:` + strings.Replace(fmt.Sprintf("%v", this.Secret), "SecretProjection", "SecretProjection", 1) + `,`, - `DownwardAPI:` + strings.Replace(fmt.Sprintf("%v", this.DownwardAPI), "DownwardAPIProjection", "DownwardAPIProjection", 1) + `,`, - `ConfigMap:` + strings.Replace(fmt.Sprintf("%v", this.ConfigMap), "ConfigMapProjection", "ConfigMapProjection", 1) + `,`, - `ServiceAccountToken:` + strings.Replace(fmt.Sprintf("%v", this.ServiceAccountToken), "ServiceAccountTokenProjection", "ServiceAccountTokenProjection", 1) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *VolumeSource) String() string { - if this == nil { - return "nil" + +func (m *ResourceQuotaList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ResourceQuotaSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Hard) > 0 { + for k, v := range m.Hard { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Scopes) > 0 { + for _, s := range m.Scopes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.ScopeSelector != nil { + l = m.ScopeSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ResourceQuotaStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Hard) > 0 { + for k, v := range m.Hard { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Used) > 0 { + for k, v := range m.Used { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *ResourceRequirements) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Limits) > 0 { + for k, v := range m.Limits { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Requests) > 0 { + for k, v := range m.Requests { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *SELinuxOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.User) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Role) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Level) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ScaleIOPersistentVolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Gateway) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.System) + n += 1 + l + sovGenerated(uint64(l)) + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + l = len(m.ProtectionDomain) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.StoragePool) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.StorageMode) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.VolumeName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n +} + +func (m *ScaleIOVolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Gateway) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.System) + n += 1 + l + sovGenerated(uint64(l)) + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + l = len(m.ProtectionDomain) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.StoragePool) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.StorageMode) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.VolumeName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n +} + +func (m *ScopeSelector) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.MatchExpressions) > 0 { + for _, e := range m.MatchExpressions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ScopedResourceSelectorRequirement) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ScopeName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Operator) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Values) > 0 { + for _, s := range m.Values { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *Secret) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Data) > 0 { + for k, v := range m.Data { + _ = k + _ = v + l = 0 + if v != nil { + l = 1 + len(v) + sovGenerated(uint64(len(v))) + } + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + l + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.StringData) > 0 { + for k, v := range m.StringData { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *SecretEnvSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.LocalObjectReference.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Optional != nil { + n += 2 + } + return n +} + +func (m *SecretKeySelector) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.LocalObjectReference.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + if m.Optional != nil { + n += 2 + } + return n +} + +func (m *SecretList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *SecretProjection) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.LocalObjectReference.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Optional != nil { + n += 2 + } + return n +} + +func (m *SecretReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SecretVolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.SecretName) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.DefaultMode != nil { + n += 1 + sovGenerated(uint64(*m.DefaultMode)) + } + if m.Optional != nil { + n += 2 + } + return n +} + +func (m *SecurityContext) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Capabilities != nil { + l = m.Capabilities.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Privileged != nil { + n += 2 + } + if m.SELinuxOptions != nil { + l = m.SELinuxOptions.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.RunAsUser != nil { + n += 1 + sovGenerated(uint64(*m.RunAsUser)) + } + if m.RunAsNonRoot != nil { + n += 2 + } + if m.ReadOnlyRootFilesystem != nil { + n += 2 + } + if m.AllowPrivilegeEscalation != nil { + n += 2 + } + if m.RunAsGroup != nil { + n += 1 + sovGenerated(uint64(*m.RunAsGroup)) + } + if m.ProcMount != nil { + l = len(*m.ProcMount) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.WindowsOptions != nil { + l = m.WindowsOptions.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *SerializedReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Reference.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Service) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ServiceAccount) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Secrets) > 0 { + for _, e := range m.Secrets { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.ImagePullSecrets) > 0 { + for _, e := range m.ImagePullSecrets { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.AutomountServiceAccountToken != nil { + n += 2 + } + return n +} + +func (m *ServiceAccountList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ServiceAccountTokenProjection) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Audience) + n += 1 + l + sovGenerated(uint64(l)) + if m.ExpirationSeconds != nil { + n += 1 + sovGenerated(uint64(*m.ExpirationSeconds)) + } + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ServiceList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ServicePort) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Protocol) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Port)) + l = m.TargetPort.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.NodePort)) + return n +} + +func (m *ServiceProxyOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ServiceSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Selector) > 0 { + for k, v := range m.Selector { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + l = len(m.ClusterIP) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.ExternalIPs) > 0 { + for _, s := range m.ExternalIPs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.SessionAffinity) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.LoadBalancerIP) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.LoadBalancerSourceRanges) > 0 { + for _, s := range m.LoadBalancerSourceRanges { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.ExternalName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ExternalTrafficPolicy) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.HealthCheckNodePort)) + n += 2 + if m.SessionAffinityConfig != nil { + l = m.SessionAffinityConfig.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.IPFamily != nil { + l = len(*m.IPFamily) + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.TopologyKeys) > 0 { + for _, s := range m.TopologyKeys { + l = len(s) + n += 2 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ServiceStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.LoadBalancer.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SessionAffinityConfig) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ClientIP != nil { + l = m.ClientIP.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *StorageOSPersistentVolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.VolumeName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.VolumeNamespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *StorageOSVolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.VolumeName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.VolumeNamespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *Sysctl) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Value) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *TCPSocketAction) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Port.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Host) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Taint) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Value) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Effect) + n += 1 + l + sovGenerated(uint64(l)) + if m.TimeAdded != nil { + l = m.TimeAdded.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *Toleration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Operator) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Value) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Effect) + n += 1 + l + sovGenerated(uint64(l)) + if m.TolerationSeconds != nil { + n += 1 + sovGenerated(uint64(*m.TolerationSeconds)) + } + return n +} + +func (m *TopologySelectorLabelRequirement) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Values) > 0 { + for _, s := range m.Values { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *TopologySelectorTerm) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.MatchLabelExpressions) > 0 { + for _, e := range m.MatchLabelExpressions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *TopologySpreadConstraint) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.MaxSkew)) + l = len(m.TopologyKey) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.WhenUnsatisfiable) + n += 1 + l + sovGenerated(uint64(l)) + if m.LabelSelector != nil { + l = m.LabelSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *TypedLocalObjectReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.APIGroup != nil { + l = len(*m.APIGroup) + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Volume) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.VolumeSource.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *VolumeDevice) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DevicePath) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *VolumeMount) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + l = len(m.MountPath) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.SubPath) + n += 1 + l + sovGenerated(uint64(l)) + if m.MountPropagation != nil { + l = len(*m.MountPropagation) + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.SubPathExpr) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *VolumeNodeAffinity) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Required != nil { + l = m.Required.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *VolumeProjection) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Secret != nil { + l = m.Secret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.DownwardAPI != nil { + l = m.DownwardAPI.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ConfigMap != nil { + l = m.ConfigMap.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ServiceAccountToken != nil { + l = m.ServiceAccountToken.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *VolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HostPath != nil { + l = m.HostPath.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.EmptyDir != nil { + l = m.EmptyDir.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.GCEPersistentDisk != nil { + l = m.GCEPersistentDisk.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.AWSElasticBlockStore != nil { + l = m.AWSElasticBlockStore.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.GitRepo != nil { + l = m.GitRepo.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Secret != nil { + l = m.Secret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NFS != nil { + l = m.NFS.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ISCSI != nil { + l = m.ISCSI.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Glusterfs != nil { + l = m.Glusterfs.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.PersistentVolumeClaim != nil { + l = m.PersistentVolumeClaim.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.RBD != nil { + l = m.RBD.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.FlexVolume != nil { + l = m.FlexVolume.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Cinder != nil { + l = m.Cinder.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.CephFS != nil { + l = m.CephFS.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Flocker != nil { + l = m.Flocker.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.DownwardAPI != nil { + l = m.DownwardAPI.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.FC != nil { + l = m.FC.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.AzureFile != nil { + l = m.AzureFile.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.ConfigMap != nil { + l = m.ConfigMap.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.VsphereVolume != nil { + l = m.VsphereVolume.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.Quobyte != nil { + l = m.Quobyte.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.AzureDisk != nil { + l = m.AzureDisk.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.PhotonPersistentDisk != nil { + l = m.PhotonPersistentDisk.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.PortworxVolume != nil { + l = m.PortworxVolume.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.ScaleIO != nil { + l = m.ScaleIO.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.Projected != nil { + l = m.Projected.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.StorageOS != nil { + l = m.StorageOS.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.CSI != nil { + l = m.CSI.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *VsphereVirtualDiskVolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.VolumePath) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.StoragePolicyName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.StoragePolicyID) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *WeightedPodAffinityTerm) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Weight)) + l = m.PodAffinityTerm.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *WindowsSecurityContextOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.GMSACredentialSpecName != nil { + l = len(*m.GMSACredentialSpecName) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.GMSACredentialSpec != nil { + l = len(*m.GMSACredentialSpec) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.RunAsUserName != nil { + l = len(*m.RunAsUserName) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *AWSElasticBlockStoreVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AWSElasticBlockStoreVolumeSource{`, + `VolumeID:` + fmt.Sprintf("%v", this.VolumeID) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `Partition:` + fmt.Sprintf("%v", this.Partition) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *Affinity) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Affinity{`, + `NodeAffinity:` + strings.Replace(this.NodeAffinity.String(), "NodeAffinity", "NodeAffinity", 1) + `,`, + `PodAffinity:` + strings.Replace(this.PodAffinity.String(), "PodAffinity", "PodAffinity", 1) + `,`, + `PodAntiAffinity:` + strings.Replace(this.PodAntiAffinity.String(), "PodAntiAffinity", "PodAntiAffinity", 1) + `,`, + `}`, + }, "") + return s +} +func (this *AttachedVolume) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AttachedVolume{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `DevicePath:` + fmt.Sprintf("%v", this.DevicePath) + `,`, + `}`, + }, "") + return s +} +func (this *AvoidPods) String() string { + if this == nil { + return "nil" + } + repeatedStringForPreferAvoidPods := "[]PreferAvoidPodsEntry{" + for _, f := range this.PreferAvoidPods { + repeatedStringForPreferAvoidPods += strings.Replace(strings.Replace(f.String(), "PreferAvoidPodsEntry", "PreferAvoidPodsEntry", 1), `&`, ``, 1) + "," + } + repeatedStringForPreferAvoidPods += "}" + s := strings.Join([]string{`&AvoidPods{`, + `PreferAvoidPods:` + repeatedStringForPreferAvoidPods + `,`, + `}`, + }, "") + return s +} +func (this *AzureDiskVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AzureDiskVolumeSource{`, + `DiskName:` + fmt.Sprintf("%v", this.DiskName) + `,`, + `DataDiskURI:` + fmt.Sprintf("%v", this.DataDiskURI) + `,`, + `CachingMode:` + valueToStringGenerated(this.CachingMode) + `,`, + `FSType:` + valueToStringGenerated(this.FSType) + `,`, + `ReadOnly:` + valueToStringGenerated(this.ReadOnly) + `,`, + `Kind:` + valueToStringGenerated(this.Kind) + `,`, + `}`, + }, "") + return s +} +func (this *AzureFilePersistentVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AzureFilePersistentVolumeSource{`, + `SecretName:` + fmt.Sprintf("%v", this.SecretName) + `,`, + `ShareName:` + fmt.Sprintf("%v", this.ShareName) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `SecretNamespace:` + valueToStringGenerated(this.SecretNamespace) + `,`, + `}`, + }, "") + return s +} +func (this *AzureFileVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AzureFileVolumeSource{`, + `SecretName:` + fmt.Sprintf("%v", this.SecretName) + `,`, + `ShareName:` + fmt.Sprintf("%v", this.ShareName) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *Binding) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Binding{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Target:` + strings.Replace(strings.Replace(this.Target.String(), "ObjectReference", "ObjectReference", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *CSIPersistentVolumeSource) String() string { + if this == nil { + return "nil" + } + keysForVolumeAttributes := make([]string, 0, len(this.VolumeAttributes)) + for k := range this.VolumeAttributes { + keysForVolumeAttributes = append(keysForVolumeAttributes, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForVolumeAttributes) + mapStringForVolumeAttributes := "map[string]string{" + for _, k := range keysForVolumeAttributes { + mapStringForVolumeAttributes += fmt.Sprintf("%v: %v,", k, this.VolumeAttributes[k]) + } + mapStringForVolumeAttributes += "}" + s := strings.Join([]string{`&CSIPersistentVolumeSource{`, + `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, + `VolumeHandle:` + fmt.Sprintf("%v", this.VolumeHandle) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `VolumeAttributes:` + mapStringForVolumeAttributes + `,`, + `ControllerPublishSecretRef:` + strings.Replace(this.ControllerPublishSecretRef.String(), "SecretReference", "SecretReference", 1) + `,`, + `NodeStageSecretRef:` + strings.Replace(this.NodeStageSecretRef.String(), "SecretReference", "SecretReference", 1) + `,`, + `NodePublishSecretRef:` + strings.Replace(this.NodePublishSecretRef.String(), "SecretReference", "SecretReference", 1) + `,`, + `ControllerExpandSecretRef:` + strings.Replace(this.ControllerExpandSecretRef.String(), "SecretReference", "SecretReference", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CSIVolumeSource) String() string { + if this == nil { + return "nil" + } + keysForVolumeAttributes := make([]string, 0, len(this.VolumeAttributes)) + for k := range this.VolumeAttributes { + keysForVolumeAttributes = append(keysForVolumeAttributes, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForVolumeAttributes) + mapStringForVolumeAttributes := "map[string]string{" + for _, k := range keysForVolumeAttributes { + mapStringForVolumeAttributes += fmt.Sprintf("%v: %v,", k, this.VolumeAttributes[k]) + } + mapStringForVolumeAttributes += "}" + s := strings.Join([]string{`&CSIVolumeSource{`, + `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, + `ReadOnly:` + valueToStringGenerated(this.ReadOnly) + `,`, + `FSType:` + valueToStringGenerated(this.FSType) + `,`, + `VolumeAttributes:` + mapStringForVolumeAttributes + `,`, + `NodePublishSecretRef:` + strings.Replace(this.NodePublishSecretRef.String(), "LocalObjectReference", "LocalObjectReference", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Capabilities) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Capabilities{`, + `Add:` + fmt.Sprintf("%v", this.Add) + `,`, + `Drop:` + fmt.Sprintf("%v", this.Drop) + `,`, + `}`, + }, "") + return s +} +func (this *CephFSPersistentVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CephFSPersistentVolumeSource{`, + `Monitors:` + fmt.Sprintf("%v", this.Monitors) + `,`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `User:` + fmt.Sprintf("%v", this.User) + `,`, + `SecretFile:` + fmt.Sprintf("%v", this.SecretFile) + `,`, + `SecretRef:` + strings.Replace(this.SecretRef.String(), "SecretReference", "SecretReference", 1) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *CephFSVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CephFSVolumeSource{`, + `Monitors:` + fmt.Sprintf("%v", this.Monitors) + `,`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `User:` + fmt.Sprintf("%v", this.User) + `,`, + `SecretFile:` + fmt.Sprintf("%v", this.SecretFile) + `,`, + `SecretRef:` + strings.Replace(this.SecretRef.String(), "LocalObjectReference", "LocalObjectReference", 1) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *CinderPersistentVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CinderPersistentVolumeSource{`, + `VolumeID:` + fmt.Sprintf("%v", this.VolumeID) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `SecretRef:` + strings.Replace(this.SecretRef.String(), "SecretReference", "SecretReference", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CinderVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CinderVolumeSource{`, + `VolumeID:` + fmt.Sprintf("%v", this.VolumeID) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `SecretRef:` + strings.Replace(this.SecretRef.String(), "LocalObjectReference", "LocalObjectReference", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ClientIPConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClientIPConfig{`, + `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, + `}`, + }, "") + return s +} +func (this *ComponentCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ComponentCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `Error:` + fmt.Sprintf("%v", this.Error) + `,`, + `}`, + }, "") + return s +} +func (this *ComponentStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]ComponentCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "ComponentCondition", "ComponentCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&ComponentStatus{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s +} +func (this *ComponentStatusList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ComponentStatus{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ComponentStatus", "ComponentStatus", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ComponentStatusList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ConfigMap) String() string { + if this == nil { + return "nil" + } + keysForData := make([]string, 0, len(this.Data)) + for k := range this.Data { + keysForData = append(keysForData, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForData) + mapStringForData := "map[string]string{" + for _, k := range keysForData { + mapStringForData += fmt.Sprintf("%v: %v,", k, this.Data[k]) + } + mapStringForData += "}" + keysForBinaryData := make([]string, 0, len(this.BinaryData)) + for k := range this.BinaryData { + keysForBinaryData = append(keysForBinaryData, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForBinaryData) + mapStringForBinaryData := "map[string][]byte{" + for _, k := range keysForBinaryData { + mapStringForBinaryData += fmt.Sprintf("%v: %v,", k, this.BinaryData[k]) + } + mapStringForBinaryData += "}" + s := strings.Join([]string{`&ConfigMap{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Data:` + mapStringForData + `,`, + `BinaryData:` + mapStringForBinaryData + `,`, + `}`, + }, "") + return s +} +func (this *ConfigMapEnvSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ConfigMapEnvSource{`, + `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, + `Optional:` + valueToStringGenerated(this.Optional) + `,`, + `}`, + }, "") + return s +} +func (this *ConfigMapKeySelector) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ConfigMapKeySelector{`, + `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Optional:` + valueToStringGenerated(this.Optional) + `,`, + `}`, + }, "") + return s +} +func (this *ConfigMapList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ConfigMap{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ConfigMap", "ConfigMap", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ConfigMapList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ConfigMapNodeConfigSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ConfigMapNodeConfigSource{`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`, + `KubeletConfigKey:` + fmt.Sprintf("%v", this.KubeletConfigKey) + `,`, + `}`, + }, "") + return s +} +func (this *ConfigMapProjection) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]KeyToPath{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "KeyToPath", "KeyToPath", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ConfigMapProjection{`, + `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `Optional:` + valueToStringGenerated(this.Optional) + `,`, + `}`, + }, "") + return s +} +func (this *ConfigMapVolumeSource) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]KeyToPath{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "KeyToPath", "KeyToPath", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ConfigMapVolumeSource{`, + `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `DefaultMode:` + valueToStringGenerated(this.DefaultMode) + `,`, + `Optional:` + valueToStringGenerated(this.Optional) + `,`, + `}`, + }, "") + return s +} +func (this *Container) String() string { + if this == nil { + return "nil" + } + repeatedStringForPorts := "[]ContainerPort{" + for _, f := range this.Ports { + repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "ContainerPort", "ContainerPort", 1), `&`, ``, 1) + "," + } + repeatedStringForPorts += "}" + repeatedStringForEnv := "[]EnvVar{" + for _, f := range this.Env { + repeatedStringForEnv += strings.Replace(strings.Replace(f.String(), "EnvVar", "EnvVar", 1), `&`, ``, 1) + "," + } + repeatedStringForEnv += "}" + repeatedStringForVolumeMounts := "[]VolumeMount{" + for _, f := range this.VolumeMounts { + repeatedStringForVolumeMounts += strings.Replace(strings.Replace(f.String(), "VolumeMount", "VolumeMount", 1), `&`, ``, 1) + "," + } + repeatedStringForVolumeMounts += "}" + repeatedStringForEnvFrom := "[]EnvFromSource{" + for _, f := range this.EnvFrom { + repeatedStringForEnvFrom += strings.Replace(strings.Replace(f.String(), "EnvFromSource", "EnvFromSource", 1), `&`, ``, 1) + "," + } + repeatedStringForEnvFrom += "}" + repeatedStringForVolumeDevices := "[]VolumeDevice{" + for _, f := range this.VolumeDevices { + repeatedStringForVolumeDevices += strings.Replace(strings.Replace(f.String(), "VolumeDevice", "VolumeDevice", 1), `&`, ``, 1) + "," + } + repeatedStringForVolumeDevices += "}" + s := strings.Join([]string{`&Container{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Image:` + fmt.Sprintf("%v", this.Image) + `,`, + `Command:` + fmt.Sprintf("%v", this.Command) + `,`, + `Args:` + fmt.Sprintf("%v", this.Args) + `,`, + `WorkingDir:` + fmt.Sprintf("%v", this.WorkingDir) + `,`, + `Ports:` + repeatedStringForPorts + `,`, + `Env:` + repeatedStringForEnv + `,`, + `Resources:` + strings.Replace(strings.Replace(this.Resources.String(), "ResourceRequirements", "ResourceRequirements", 1), `&`, ``, 1) + `,`, + `VolumeMounts:` + repeatedStringForVolumeMounts + `,`, + `LivenessProbe:` + strings.Replace(this.LivenessProbe.String(), "Probe", "Probe", 1) + `,`, + `ReadinessProbe:` + strings.Replace(this.ReadinessProbe.String(), "Probe", "Probe", 1) + `,`, + `Lifecycle:` + strings.Replace(this.Lifecycle.String(), "Lifecycle", "Lifecycle", 1) + `,`, + `TerminationMessagePath:` + fmt.Sprintf("%v", this.TerminationMessagePath) + `,`, + `ImagePullPolicy:` + fmt.Sprintf("%v", this.ImagePullPolicy) + `,`, + `SecurityContext:` + strings.Replace(this.SecurityContext.String(), "SecurityContext", "SecurityContext", 1) + `,`, + `Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`, + `StdinOnce:` + fmt.Sprintf("%v", this.StdinOnce) + `,`, + `TTY:` + fmt.Sprintf("%v", this.TTY) + `,`, + `EnvFrom:` + repeatedStringForEnvFrom + `,`, + `TerminationMessagePolicy:` + fmt.Sprintf("%v", this.TerminationMessagePolicy) + `,`, + `VolumeDevices:` + repeatedStringForVolumeDevices + `,`, + `StartupProbe:` + strings.Replace(this.StartupProbe.String(), "Probe", "Probe", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerImage) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerImage{`, + `Names:` + fmt.Sprintf("%v", this.Names) + `,`, + `SizeBytes:` + fmt.Sprintf("%v", this.SizeBytes) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerPort) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerPort{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `HostPort:` + fmt.Sprintf("%v", this.HostPort) + `,`, + `ContainerPort:` + fmt.Sprintf("%v", this.ContainerPort) + `,`, + `Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`, + `HostIP:` + fmt.Sprintf("%v", this.HostIP) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerState) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerState{`, + `Waiting:` + strings.Replace(this.Waiting.String(), "ContainerStateWaiting", "ContainerStateWaiting", 1) + `,`, + `Running:` + strings.Replace(this.Running.String(), "ContainerStateRunning", "ContainerStateRunning", 1) + `,`, + `Terminated:` + strings.Replace(this.Terminated.String(), "ContainerStateTerminated", "ContainerStateTerminated", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerStateRunning) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerStateRunning{`, + `StartedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.StartedAt), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerStateTerminated) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerStateTerminated{`, + `ExitCode:` + fmt.Sprintf("%v", this.ExitCode) + `,`, + `Signal:` + fmt.Sprintf("%v", this.Signal) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `StartedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.StartedAt), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `FinishedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.FinishedAt), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerStateWaiting) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerStateWaiting{`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerStatus{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `State:` + strings.Replace(strings.Replace(this.State.String(), "ContainerState", "ContainerState", 1), `&`, ``, 1) + `,`, + `LastTerminationState:` + strings.Replace(strings.Replace(this.LastTerminationState.String(), "ContainerState", "ContainerState", 1), `&`, ``, 1) + `,`, + `Ready:` + fmt.Sprintf("%v", this.Ready) + `,`, + `RestartCount:` + fmt.Sprintf("%v", this.RestartCount) + `,`, + `Image:` + fmt.Sprintf("%v", this.Image) + `,`, + `ImageID:` + fmt.Sprintf("%v", this.ImageID) + `,`, + `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, + `Started:` + valueToStringGenerated(this.Started) + `,`, + `}`, + }, "") + return s +} +func (this *DaemonEndpoint) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DaemonEndpoint{`, + `Port:` + fmt.Sprintf("%v", this.Port) + `,`, + `}`, + }, "") + return s +} +func (this *DownwardAPIProjection) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]DownwardAPIVolumeFile{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "DownwardAPIVolumeFile", "DownwardAPIVolumeFile", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&DownwardAPIProjection{`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *DownwardAPIVolumeFile) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DownwardAPIVolumeFile{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `FieldRef:` + strings.Replace(this.FieldRef.String(), "ObjectFieldSelector", "ObjectFieldSelector", 1) + `,`, + `ResourceFieldRef:` + strings.Replace(this.ResourceFieldRef.String(), "ResourceFieldSelector", "ResourceFieldSelector", 1) + `,`, + `Mode:` + valueToStringGenerated(this.Mode) + `,`, + `}`, + }, "") + return s +} +func (this *DownwardAPIVolumeSource) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]DownwardAPIVolumeFile{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "DownwardAPIVolumeFile", "DownwardAPIVolumeFile", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&DownwardAPIVolumeSource{`, + `Items:` + repeatedStringForItems + `,`, + `DefaultMode:` + valueToStringGenerated(this.DefaultMode) + `,`, + `}`, + }, "") + return s +} +func (this *EmptyDirVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EmptyDirVolumeSource{`, + `Medium:` + fmt.Sprintf("%v", this.Medium) + `,`, + `SizeLimit:` + strings.Replace(fmt.Sprintf("%v", this.SizeLimit), "Quantity", "resource.Quantity", 1) + `,`, + `}`, + }, "") + return s +} +func (this *EndpointAddress) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EndpointAddress{`, + `IP:` + fmt.Sprintf("%v", this.IP) + `,`, + `TargetRef:` + strings.Replace(this.TargetRef.String(), "ObjectReference", "ObjectReference", 1) + `,`, + `Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`, + `NodeName:` + valueToStringGenerated(this.NodeName) + `,`, + `}`, + }, "") + return s +} +func (this *EndpointPort) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EndpointPort{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Port:` + fmt.Sprintf("%v", this.Port) + `,`, + `Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`, + `}`, + }, "") + return s +} +func (this *EndpointSubset) String() string { + if this == nil { + return "nil" + } + repeatedStringForAddresses := "[]EndpointAddress{" + for _, f := range this.Addresses { + repeatedStringForAddresses += strings.Replace(strings.Replace(f.String(), "EndpointAddress", "EndpointAddress", 1), `&`, ``, 1) + "," + } + repeatedStringForAddresses += "}" + repeatedStringForNotReadyAddresses := "[]EndpointAddress{" + for _, f := range this.NotReadyAddresses { + repeatedStringForNotReadyAddresses += strings.Replace(strings.Replace(f.String(), "EndpointAddress", "EndpointAddress", 1), `&`, ``, 1) + "," + } + repeatedStringForNotReadyAddresses += "}" + repeatedStringForPorts := "[]EndpointPort{" + for _, f := range this.Ports { + repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "EndpointPort", "EndpointPort", 1), `&`, ``, 1) + "," + } + repeatedStringForPorts += "}" + s := strings.Join([]string{`&EndpointSubset{`, + `Addresses:` + repeatedStringForAddresses + `,`, + `NotReadyAddresses:` + repeatedStringForNotReadyAddresses + `,`, + `Ports:` + repeatedStringForPorts + `,`, + `}`, + }, "") + return s +} +func (this *Endpoints) String() string { + if this == nil { + return "nil" + } + repeatedStringForSubsets := "[]EndpointSubset{" + for _, f := range this.Subsets { + repeatedStringForSubsets += strings.Replace(strings.Replace(f.String(), "EndpointSubset", "EndpointSubset", 1), `&`, ``, 1) + "," + } + repeatedStringForSubsets += "}" + s := strings.Join([]string{`&Endpoints{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Subsets:` + repeatedStringForSubsets + `,`, + `}`, + }, "") + return s +} +func (this *EndpointsList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Endpoints{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Endpoints", "Endpoints", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&EndpointsList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *EnvFromSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EnvFromSource{`, + `Prefix:` + fmt.Sprintf("%v", this.Prefix) + `,`, + `ConfigMapRef:` + strings.Replace(this.ConfigMapRef.String(), "ConfigMapEnvSource", "ConfigMapEnvSource", 1) + `,`, + `SecretRef:` + strings.Replace(this.SecretRef.String(), "SecretEnvSource", "SecretEnvSource", 1) + `,`, + `}`, + }, "") + return s +} +func (this *EnvVar) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EnvVar{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `ValueFrom:` + strings.Replace(this.ValueFrom.String(), "EnvVarSource", "EnvVarSource", 1) + `,`, + `}`, + }, "") + return s +} +func (this *EnvVarSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EnvVarSource{`, + `FieldRef:` + strings.Replace(this.FieldRef.String(), "ObjectFieldSelector", "ObjectFieldSelector", 1) + `,`, + `ResourceFieldRef:` + strings.Replace(this.ResourceFieldRef.String(), "ResourceFieldSelector", "ResourceFieldSelector", 1) + `,`, + `ConfigMapKeyRef:` + strings.Replace(this.ConfigMapKeyRef.String(), "ConfigMapKeySelector", "ConfigMapKeySelector", 1) + `,`, + `SecretKeyRef:` + strings.Replace(this.SecretKeyRef.String(), "SecretKeySelector", "SecretKeySelector", 1) + `,`, + `}`, + }, "") + return s +} +func (this *EphemeralContainer) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EphemeralContainer{`, + `EphemeralContainerCommon:` + strings.Replace(strings.Replace(this.EphemeralContainerCommon.String(), "EphemeralContainerCommon", "EphemeralContainerCommon", 1), `&`, ``, 1) + `,`, + `TargetContainerName:` + fmt.Sprintf("%v", this.TargetContainerName) + `,`, + `}`, + }, "") + return s +} +func (this *EphemeralContainerCommon) String() string { + if this == nil { + return "nil" + } + repeatedStringForPorts := "[]ContainerPort{" + for _, f := range this.Ports { + repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "ContainerPort", "ContainerPort", 1), `&`, ``, 1) + "," + } + repeatedStringForPorts += "}" + repeatedStringForEnv := "[]EnvVar{" + for _, f := range this.Env { + repeatedStringForEnv += strings.Replace(strings.Replace(f.String(), "EnvVar", "EnvVar", 1), `&`, ``, 1) + "," + } + repeatedStringForEnv += "}" + repeatedStringForVolumeMounts := "[]VolumeMount{" + for _, f := range this.VolumeMounts { + repeatedStringForVolumeMounts += strings.Replace(strings.Replace(f.String(), "VolumeMount", "VolumeMount", 1), `&`, ``, 1) + "," + } + repeatedStringForVolumeMounts += "}" + repeatedStringForEnvFrom := "[]EnvFromSource{" + for _, f := range this.EnvFrom { + repeatedStringForEnvFrom += strings.Replace(strings.Replace(f.String(), "EnvFromSource", "EnvFromSource", 1), `&`, ``, 1) + "," + } + repeatedStringForEnvFrom += "}" + repeatedStringForVolumeDevices := "[]VolumeDevice{" + for _, f := range this.VolumeDevices { + repeatedStringForVolumeDevices += strings.Replace(strings.Replace(f.String(), "VolumeDevice", "VolumeDevice", 1), `&`, ``, 1) + "," + } + repeatedStringForVolumeDevices += "}" + s := strings.Join([]string{`&EphemeralContainerCommon{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Image:` + fmt.Sprintf("%v", this.Image) + `,`, + `Command:` + fmt.Sprintf("%v", this.Command) + `,`, + `Args:` + fmt.Sprintf("%v", this.Args) + `,`, + `WorkingDir:` + fmt.Sprintf("%v", this.WorkingDir) + `,`, + `Ports:` + repeatedStringForPorts + `,`, + `Env:` + repeatedStringForEnv + `,`, + `Resources:` + strings.Replace(strings.Replace(this.Resources.String(), "ResourceRequirements", "ResourceRequirements", 1), `&`, ``, 1) + `,`, + `VolumeMounts:` + repeatedStringForVolumeMounts + `,`, + `LivenessProbe:` + strings.Replace(this.LivenessProbe.String(), "Probe", "Probe", 1) + `,`, + `ReadinessProbe:` + strings.Replace(this.ReadinessProbe.String(), "Probe", "Probe", 1) + `,`, + `Lifecycle:` + strings.Replace(this.Lifecycle.String(), "Lifecycle", "Lifecycle", 1) + `,`, + `TerminationMessagePath:` + fmt.Sprintf("%v", this.TerminationMessagePath) + `,`, + `ImagePullPolicy:` + fmt.Sprintf("%v", this.ImagePullPolicy) + `,`, + `SecurityContext:` + strings.Replace(this.SecurityContext.String(), "SecurityContext", "SecurityContext", 1) + `,`, + `Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`, + `StdinOnce:` + fmt.Sprintf("%v", this.StdinOnce) + `,`, + `TTY:` + fmt.Sprintf("%v", this.TTY) + `,`, + `EnvFrom:` + repeatedStringForEnvFrom + `,`, + `TerminationMessagePolicy:` + fmt.Sprintf("%v", this.TerminationMessagePolicy) + `,`, + `VolumeDevices:` + repeatedStringForVolumeDevices + `,`, + `StartupProbe:` + strings.Replace(this.StartupProbe.String(), "Probe", "Probe", 1) + `,`, + `}`, + }, "") + return s +} +func (this *EphemeralContainers) String() string { + if this == nil { + return "nil" + } + repeatedStringForEphemeralContainers := "[]EphemeralContainer{" + for _, f := range this.EphemeralContainers { + repeatedStringForEphemeralContainers += strings.Replace(strings.Replace(f.String(), "EphemeralContainer", "EphemeralContainer", 1), `&`, ``, 1) + "," + } + repeatedStringForEphemeralContainers += "}" + s := strings.Join([]string{`&EphemeralContainers{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `EphemeralContainers:` + repeatedStringForEphemeralContainers + `,`, + `}`, + }, "") + return s +} +func (this *Event) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Event{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `InvolvedObject:` + strings.Replace(strings.Replace(this.InvolvedObject.String(), "ObjectReference", "ObjectReference", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `Source:` + strings.Replace(strings.Replace(this.Source.String(), "EventSource", "EventSource", 1), `&`, ``, 1) + `,`, + `FirstTimestamp:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.FirstTimestamp), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `LastTimestamp:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTimestamp), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Count:` + fmt.Sprintf("%v", this.Count) + `,`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `EventTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.EventTime), "MicroTime", "v1.MicroTime", 1), `&`, ``, 1) + `,`, + `Series:` + strings.Replace(this.Series.String(), "EventSeries", "EventSeries", 1) + `,`, + `Action:` + fmt.Sprintf("%v", this.Action) + `,`, + `Related:` + strings.Replace(this.Related.String(), "ObjectReference", "ObjectReference", 1) + `,`, + `ReportingController:` + fmt.Sprintf("%v", this.ReportingController) + `,`, + `ReportingInstance:` + fmt.Sprintf("%v", this.ReportingInstance) + `,`, + `}`, + }, "") + return s +} +func (this *EventList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Event{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Event", "Event", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&EventList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *EventSeries) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EventSeries{`, + `Count:` + fmt.Sprintf("%v", this.Count) + `,`, + `LastObservedTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastObservedTime), "MicroTime", "v1.MicroTime", 1), `&`, ``, 1) + `,`, + `State:` + fmt.Sprintf("%v", this.State) + `,`, + `}`, + }, "") + return s +} +func (this *EventSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EventSource{`, + `Component:` + fmt.Sprintf("%v", this.Component) + `,`, + `Host:` + fmt.Sprintf("%v", this.Host) + `,`, + `}`, + }, "") + return s +} +func (this *ExecAction) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ExecAction{`, + `Command:` + fmt.Sprintf("%v", this.Command) + `,`, + `}`, + }, "") + return s +} +func (this *FCVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FCVolumeSource{`, + `TargetWWNs:` + fmt.Sprintf("%v", this.TargetWWNs) + `,`, + `Lun:` + valueToStringGenerated(this.Lun) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `WWIDs:` + fmt.Sprintf("%v", this.WWIDs) + `,`, + `}`, + }, "") + return s +} +func (this *FlexPersistentVolumeSource) String() string { + if this == nil { + return "nil" + } + keysForOptions := make([]string, 0, len(this.Options)) + for k := range this.Options { + keysForOptions = append(keysForOptions, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForOptions) + mapStringForOptions := "map[string]string{" + for _, k := range keysForOptions { + mapStringForOptions += fmt.Sprintf("%v: %v,", k, this.Options[k]) + } + mapStringForOptions += "}" + s := strings.Join([]string{`&FlexPersistentVolumeSource{`, + `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `SecretRef:` + strings.Replace(this.SecretRef.String(), "SecretReference", "SecretReference", 1) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `Options:` + mapStringForOptions + `,`, + `}`, + }, "") + return s +} +func (this *FlexVolumeSource) String() string { + if this == nil { + return "nil" + } + keysForOptions := make([]string, 0, len(this.Options)) + for k := range this.Options { + keysForOptions = append(keysForOptions, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForOptions) + mapStringForOptions := "map[string]string{" + for _, k := range keysForOptions { + mapStringForOptions += fmt.Sprintf("%v: %v,", k, this.Options[k]) + } + mapStringForOptions += "}" + s := strings.Join([]string{`&FlexVolumeSource{`, + `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `SecretRef:` + strings.Replace(this.SecretRef.String(), "LocalObjectReference", "LocalObjectReference", 1) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `Options:` + mapStringForOptions + `,`, + `}`, + }, "") + return s +} +func (this *FlockerVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FlockerVolumeSource{`, + `DatasetName:` + fmt.Sprintf("%v", this.DatasetName) + `,`, + `DatasetUUID:` + fmt.Sprintf("%v", this.DatasetUUID) + `,`, + `}`, + }, "") + return s +} +func (this *GCEPersistentDiskVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GCEPersistentDiskVolumeSource{`, + `PDName:` + fmt.Sprintf("%v", this.PDName) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `Partition:` + fmt.Sprintf("%v", this.Partition) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *GitRepoVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GitRepoVolumeSource{`, + `Repository:` + fmt.Sprintf("%v", this.Repository) + `,`, + `Revision:` + fmt.Sprintf("%v", this.Revision) + `,`, + `Directory:` + fmt.Sprintf("%v", this.Directory) + `,`, + `}`, + }, "") + return s +} +func (this *GlusterfsPersistentVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GlusterfsPersistentVolumeSource{`, + `EndpointsName:` + fmt.Sprintf("%v", this.EndpointsName) + `,`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `EndpointsNamespace:` + valueToStringGenerated(this.EndpointsNamespace) + `,`, + `}`, + }, "") + return s +} +func (this *GlusterfsVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GlusterfsVolumeSource{`, + `EndpointsName:` + fmt.Sprintf("%v", this.EndpointsName) + `,`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *HTTPGetAction) String() string { + if this == nil { + return "nil" + } + repeatedStringForHTTPHeaders := "[]HTTPHeader{" + for _, f := range this.HTTPHeaders { + repeatedStringForHTTPHeaders += strings.Replace(strings.Replace(f.String(), "HTTPHeader", "HTTPHeader", 1), `&`, ``, 1) + "," + } + repeatedStringForHTTPHeaders += "}" + s := strings.Join([]string{`&HTTPGetAction{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `Port:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Port), "IntOrString", "intstr.IntOrString", 1), `&`, ``, 1) + `,`, + `Host:` + fmt.Sprintf("%v", this.Host) + `,`, + `Scheme:` + fmt.Sprintf("%v", this.Scheme) + `,`, + `HTTPHeaders:` + repeatedStringForHTTPHeaders + `,`, + `}`, + }, "") + return s +} +func (this *HTTPHeader) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HTTPHeader{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *Handler) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Handler{`, + `Exec:` + strings.Replace(this.Exec.String(), "ExecAction", "ExecAction", 1) + `,`, + `HTTPGet:` + strings.Replace(this.HTTPGet.String(), "HTTPGetAction", "HTTPGetAction", 1) + `,`, + `TCPSocket:` + strings.Replace(this.TCPSocket.String(), "TCPSocketAction", "TCPSocketAction", 1) + `,`, + `}`, + }, "") + return s +} +func (this *HostAlias) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HostAlias{`, + `IP:` + fmt.Sprintf("%v", this.IP) + `,`, + `Hostnames:` + fmt.Sprintf("%v", this.Hostnames) + `,`, + `}`, + }, "") + return s +} +func (this *HostPathVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HostPathVolumeSource{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `Type:` + valueToStringGenerated(this.Type) + `,`, + `}`, + }, "") + return s +} +func (this *ISCSIPersistentVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ISCSIPersistentVolumeSource{`, + `TargetPortal:` + fmt.Sprintf("%v", this.TargetPortal) + `,`, + `IQN:` + fmt.Sprintf("%v", this.IQN) + `,`, + `Lun:` + fmt.Sprintf("%v", this.Lun) + `,`, + `ISCSIInterface:` + fmt.Sprintf("%v", this.ISCSIInterface) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `Portals:` + fmt.Sprintf("%v", this.Portals) + `,`, + `DiscoveryCHAPAuth:` + fmt.Sprintf("%v", this.DiscoveryCHAPAuth) + `,`, + `SecretRef:` + strings.Replace(this.SecretRef.String(), "SecretReference", "SecretReference", 1) + `,`, + `SessionCHAPAuth:` + fmt.Sprintf("%v", this.SessionCHAPAuth) + `,`, + `InitiatorName:` + valueToStringGenerated(this.InitiatorName) + `,`, + `}`, + }, "") + return s +} +func (this *ISCSIVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ISCSIVolumeSource{`, + `TargetPortal:` + fmt.Sprintf("%v", this.TargetPortal) + `,`, + `IQN:` + fmt.Sprintf("%v", this.IQN) + `,`, + `Lun:` + fmt.Sprintf("%v", this.Lun) + `,`, + `ISCSIInterface:` + fmt.Sprintf("%v", this.ISCSIInterface) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `Portals:` + fmt.Sprintf("%v", this.Portals) + `,`, + `DiscoveryCHAPAuth:` + fmt.Sprintf("%v", this.DiscoveryCHAPAuth) + `,`, + `SecretRef:` + strings.Replace(this.SecretRef.String(), "LocalObjectReference", "LocalObjectReference", 1) + `,`, + `SessionCHAPAuth:` + fmt.Sprintf("%v", this.SessionCHAPAuth) + `,`, + `InitiatorName:` + valueToStringGenerated(this.InitiatorName) + `,`, + `}`, + }, "") + return s +} +func (this *KeyToPath) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&KeyToPath{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `Mode:` + valueToStringGenerated(this.Mode) + `,`, + `}`, + }, "") + return s +} +func (this *Lifecycle) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Lifecycle{`, + `PostStart:` + strings.Replace(this.PostStart.String(), "Handler", "Handler", 1) + `,`, + `PreStop:` + strings.Replace(this.PreStop.String(), "Handler", "Handler", 1) + `,`, + `}`, + }, "") + return s +} +func (this *LimitRange) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LimitRange{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "LimitRangeSpec", "LimitRangeSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *LimitRangeItem) String() string { + if this == nil { + return "nil" + } + keysForMax := make([]string, 0, len(this.Max)) + for k := range this.Max { + keysForMax = append(keysForMax, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMax) + mapStringForMax := "ResourceList{" + for _, k := range keysForMax { + mapStringForMax += fmt.Sprintf("%v: %v,", k, this.Max[ResourceName(k)]) + } + mapStringForMax += "}" + keysForMin := make([]string, 0, len(this.Min)) + for k := range this.Min { + keysForMin = append(keysForMin, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMin) + mapStringForMin := "ResourceList{" + for _, k := range keysForMin { + mapStringForMin += fmt.Sprintf("%v: %v,", k, this.Min[ResourceName(k)]) + } + mapStringForMin += "}" + keysForDefault := make([]string, 0, len(this.Default)) + for k := range this.Default { + keysForDefault = append(keysForDefault, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForDefault) + mapStringForDefault := "ResourceList{" + for _, k := range keysForDefault { + mapStringForDefault += fmt.Sprintf("%v: %v,", k, this.Default[ResourceName(k)]) + } + mapStringForDefault += "}" + keysForDefaultRequest := make([]string, 0, len(this.DefaultRequest)) + for k := range this.DefaultRequest { + keysForDefaultRequest = append(keysForDefaultRequest, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForDefaultRequest) + mapStringForDefaultRequest := "ResourceList{" + for _, k := range keysForDefaultRequest { + mapStringForDefaultRequest += fmt.Sprintf("%v: %v,", k, this.DefaultRequest[ResourceName(k)]) + } + mapStringForDefaultRequest += "}" + keysForMaxLimitRequestRatio := make([]string, 0, len(this.MaxLimitRequestRatio)) + for k := range this.MaxLimitRequestRatio { + keysForMaxLimitRequestRatio = append(keysForMaxLimitRequestRatio, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMaxLimitRequestRatio) + mapStringForMaxLimitRequestRatio := "ResourceList{" + for _, k := range keysForMaxLimitRequestRatio { + mapStringForMaxLimitRequestRatio += fmt.Sprintf("%v: %v,", k, this.MaxLimitRequestRatio[ResourceName(k)]) + } + mapStringForMaxLimitRequestRatio += "}" + s := strings.Join([]string{`&LimitRangeItem{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Max:` + mapStringForMax + `,`, + `Min:` + mapStringForMin + `,`, + `Default:` + mapStringForDefault + `,`, + `DefaultRequest:` + mapStringForDefaultRequest + `,`, + `MaxLimitRequestRatio:` + mapStringForMaxLimitRequestRatio + `,`, + `}`, + }, "") + return s +} +func (this *LimitRangeList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]LimitRange{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "LimitRange", "LimitRange", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&LimitRangeList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *LimitRangeSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForLimits := "[]LimitRangeItem{" + for _, f := range this.Limits { + repeatedStringForLimits += strings.Replace(strings.Replace(f.String(), "LimitRangeItem", "LimitRangeItem", 1), `&`, ``, 1) + "," + } + repeatedStringForLimits += "}" + s := strings.Join([]string{`&LimitRangeSpec{`, + `Limits:` + repeatedStringForLimits + `,`, + `}`, + }, "") + return s +} +func (this *List) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]RawExtension{" + for _, f := range this.Items { + repeatedStringForItems += fmt.Sprintf("%v", f) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&List{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *LoadBalancerIngress) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LoadBalancerIngress{`, + `IP:` + fmt.Sprintf("%v", this.IP) + `,`, + `Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`, + `}`, + }, "") + return s +} +func (this *LoadBalancerStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForIngress := "[]LoadBalancerIngress{" + for _, f := range this.Ingress { + repeatedStringForIngress += strings.Replace(strings.Replace(f.String(), "LoadBalancerIngress", "LoadBalancerIngress", 1), `&`, ``, 1) + "," + } + repeatedStringForIngress += "}" + s := strings.Join([]string{`&LoadBalancerStatus{`, + `Ingress:` + repeatedStringForIngress + `,`, + `}`, + }, "") + return s +} +func (this *LocalObjectReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LocalObjectReference{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *LocalVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LocalVolumeSource{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `FSType:` + valueToStringGenerated(this.FSType) + `,`, + `}`, + }, "") + return s +} +func (this *NFSVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NFSVolumeSource{`, + `Server:` + fmt.Sprintf("%v", this.Server) + `,`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *Namespace) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Namespace{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "NamespaceSpec", "NamespaceSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "NamespaceStatus", "NamespaceStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NamespaceCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NamespaceCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *NamespaceList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Namespace{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Namespace", "Namespace", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&NamespaceList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *NamespaceSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NamespaceSpec{`, + `Finalizers:` + fmt.Sprintf("%v", this.Finalizers) + `,`, + `}`, + }, "") + return s +} +func (this *NamespaceStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]NamespaceCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "NamespaceCondition", "NamespaceCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&NamespaceStatus{`, + `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s +} +func (this *Node) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Node{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "NodeSpec", "NodeSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "NodeStatus", "NodeStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NodeAddress) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeAddress{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Address:` + fmt.Sprintf("%v", this.Address) + `,`, + `}`, + }, "") + return s +} +func (this *NodeAffinity) String() string { + if this == nil { + return "nil" + } + repeatedStringForPreferredDuringSchedulingIgnoredDuringExecution := "[]PreferredSchedulingTerm{" + for _, f := range this.PreferredDuringSchedulingIgnoredDuringExecution { + repeatedStringForPreferredDuringSchedulingIgnoredDuringExecution += strings.Replace(strings.Replace(f.String(), "PreferredSchedulingTerm", "PreferredSchedulingTerm", 1), `&`, ``, 1) + "," + } + repeatedStringForPreferredDuringSchedulingIgnoredDuringExecution += "}" + s := strings.Join([]string{`&NodeAffinity{`, + `RequiredDuringSchedulingIgnoredDuringExecution:` + strings.Replace(this.RequiredDuringSchedulingIgnoredDuringExecution.String(), "NodeSelector", "NodeSelector", 1) + `,`, + `PreferredDuringSchedulingIgnoredDuringExecution:` + repeatedStringForPreferredDuringSchedulingIgnoredDuringExecution + `,`, + `}`, + }, "") + return s +} +func (this *NodeCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastHeartbeatTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastHeartbeatTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *NodeConfigSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeConfigSource{`, + `ConfigMap:` + strings.Replace(this.ConfigMap.String(), "ConfigMapNodeConfigSource", "ConfigMapNodeConfigSource", 1) + `,`, + `}`, + }, "") + return s +} +func (this *NodeConfigStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeConfigStatus{`, + `Assigned:` + strings.Replace(this.Assigned.String(), "NodeConfigSource", "NodeConfigSource", 1) + `,`, + `Active:` + strings.Replace(this.Active.String(), "NodeConfigSource", "NodeConfigSource", 1) + `,`, + `LastKnownGood:` + strings.Replace(this.LastKnownGood.String(), "NodeConfigSource", "NodeConfigSource", 1) + `,`, + `Error:` + fmt.Sprintf("%v", this.Error) + `,`, + `}`, + }, "") + return s +} +func (this *NodeDaemonEndpoints) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeDaemonEndpoints{`, + `KubeletEndpoint:` + strings.Replace(strings.Replace(this.KubeletEndpoint.String(), "DaemonEndpoint", "DaemonEndpoint", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NodeList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Node{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Node", "Node", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&NodeList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *NodeProxyOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeProxyOptions{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `}`, + }, "") + return s +} +func (this *NodeResources) String() string { + if this == nil { + return "nil" + } + keysForCapacity := make([]string, 0, len(this.Capacity)) + for k := range this.Capacity { + keysForCapacity = append(keysForCapacity, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) + mapStringForCapacity := "ResourceList{" + for _, k := range keysForCapacity { + mapStringForCapacity += fmt.Sprintf("%v: %v,", k, this.Capacity[ResourceName(k)]) + } + mapStringForCapacity += "}" + s := strings.Join([]string{`&NodeResources{`, + `Capacity:` + mapStringForCapacity + `,`, + `}`, + }, "") + return s +} +func (this *NodeSelector) String() string { + if this == nil { + return "nil" + } + repeatedStringForNodeSelectorTerms := "[]NodeSelectorTerm{" + for _, f := range this.NodeSelectorTerms { + repeatedStringForNodeSelectorTerms += strings.Replace(strings.Replace(f.String(), "NodeSelectorTerm", "NodeSelectorTerm", 1), `&`, ``, 1) + "," + } + repeatedStringForNodeSelectorTerms += "}" + s := strings.Join([]string{`&NodeSelector{`, + `NodeSelectorTerms:` + repeatedStringForNodeSelectorTerms + `,`, + `}`, + }, "") + return s +} +func (this *NodeSelectorRequirement) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeSelectorRequirement{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Operator:` + fmt.Sprintf("%v", this.Operator) + `,`, + `Values:` + fmt.Sprintf("%v", this.Values) + `,`, + `}`, + }, "") + return s +} +func (this *NodeSelectorTerm) String() string { + if this == nil { + return "nil" + } + repeatedStringForMatchExpressions := "[]NodeSelectorRequirement{" + for _, f := range this.MatchExpressions { + repeatedStringForMatchExpressions += strings.Replace(strings.Replace(f.String(), "NodeSelectorRequirement", "NodeSelectorRequirement", 1), `&`, ``, 1) + "," + } + repeatedStringForMatchExpressions += "}" + repeatedStringForMatchFields := "[]NodeSelectorRequirement{" + for _, f := range this.MatchFields { + repeatedStringForMatchFields += strings.Replace(strings.Replace(f.String(), "NodeSelectorRequirement", "NodeSelectorRequirement", 1), `&`, ``, 1) + "," + } + repeatedStringForMatchFields += "}" + s := strings.Join([]string{`&NodeSelectorTerm{`, + `MatchExpressions:` + repeatedStringForMatchExpressions + `,`, + `MatchFields:` + repeatedStringForMatchFields + `,`, + `}`, + }, "") + return s +} +func (this *NodeSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForTaints := "[]Taint{" + for _, f := range this.Taints { + repeatedStringForTaints += strings.Replace(strings.Replace(f.String(), "Taint", "Taint", 1), `&`, ``, 1) + "," + } + repeatedStringForTaints += "}" + s := strings.Join([]string{`&NodeSpec{`, + `PodCIDR:` + fmt.Sprintf("%v", this.PodCIDR) + `,`, + `DoNotUseExternalID:` + fmt.Sprintf("%v", this.DoNotUseExternalID) + `,`, + `ProviderID:` + fmt.Sprintf("%v", this.ProviderID) + `,`, + `Unschedulable:` + fmt.Sprintf("%v", this.Unschedulable) + `,`, + `Taints:` + repeatedStringForTaints + `,`, + `ConfigSource:` + strings.Replace(this.ConfigSource.String(), "NodeConfigSource", "NodeConfigSource", 1) + `,`, + `PodCIDRs:` + fmt.Sprintf("%v", this.PodCIDRs) + `,`, + `}`, + }, "") + return s +} +func (this *NodeStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]NodeCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "NodeCondition", "NodeCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + repeatedStringForAddresses := "[]NodeAddress{" + for _, f := range this.Addresses { + repeatedStringForAddresses += strings.Replace(strings.Replace(f.String(), "NodeAddress", "NodeAddress", 1), `&`, ``, 1) + "," + } + repeatedStringForAddresses += "}" + repeatedStringForImages := "[]ContainerImage{" + for _, f := range this.Images { + repeatedStringForImages += strings.Replace(strings.Replace(f.String(), "ContainerImage", "ContainerImage", 1), `&`, ``, 1) + "," + } + repeatedStringForImages += "}" + repeatedStringForVolumesAttached := "[]AttachedVolume{" + for _, f := range this.VolumesAttached { + repeatedStringForVolumesAttached += strings.Replace(strings.Replace(f.String(), "AttachedVolume", "AttachedVolume", 1), `&`, ``, 1) + "," + } + repeatedStringForVolumesAttached += "}" + keysForCapacity := make([]string, 0, len(this.Capacity)) + for k := range this.Capacity { + keysForCapacity = append(keysForCapacity, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) + mapStringForCapacity := "ResourceList{" + for _, k := range keysForCapacity { + mapStringForCapacity += fmt.Sprintf("%v: %v,", k, this.Capacity[ResourceName(k)]) + } + mapStringForCapacity += "}" + keysForAllocatable := make([]string, 0, len(this.Allocatable)) + for k := range this.Allocatable { + keysForAllocatable = append(keysForAllocatable, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAllocatable) + mapStringForAllocatable := "ResourceList{" + for _, k := range keysForAllocatable { + mapStringForAllocatable += fmt.Sprintf("%v: %v,", k, this.Allocatable[ResourceName(k)]) + } + mapStringForAllocatable += "}" + s := strings.Join([]string{`&NodeStatus{`, + `Capacity:` + mapStringForCapacity + `,`, + `Allocatable:` + mapStringForAllocatable + `,`, + `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `Addresses:` + repeatedStringForAddresses + `,`, + `DaemonEndpoints:` + strings.Replace(strings.Replace(this.DaemonEndpoints.String(), "NodeDaemonEndpoints", "NodeDaemonEndpoints", 1), `&`, ``, 1) + `,`, + `NodeInfo:` + strings.Replace(strings.Replace(this.NodeInfo.String(), "NodeSystemInfo", "NodeSystemInfo", 1), `&`, ``, 1) + `,`, + `Images:` + repeatedStringForImages + `,`, + `VolumesInUse:` + fmt.Sprintf("%v", this.VolumesInUse) + `,`, + `VolumesAttached:` + repeatedStringForVolumesAttached + `,`, + `Config:` + strings.Replace(this.Config.String(), "NodeConfigStatus", "NodeConfigStatus", 1) + `,`, + `}`, + }, "") + return s +} +func (this *NodeSystemInfo) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeSystemInfo{`, + `MachineID:` + fmt.Sprintf("%v", this.MachineID) + `,`, + `SystemUUID:` + fmt.Sprintf("%v", this.SystemUUID) + `,`, + `BootID:` + fmt.Sprintf("%v", this.BootID) + `,`, + `KernelVersion:` + fmt.Sprintf("%v", this.KernelVersion) + `,`, + `OSImage:` + fmt.Sprintf("%v", this.OSImage) + `,`, + `ContainerRuntimeVersion:` + fmt.Sprintf("%v", this.ContainerRuntimeVersion) + `,`, + `KubeletVersion:` + fmt.Sprintf("%v", this.KubeletVersion) + `,`, + `KubeProxyVersion:` + fmt.Sprintf("%v", this.KubeProxyVersion) + `,`, + `OperatingSystem:` + fmt.Sprintf("%v", this.OperatingSystem) + `,`, + `Architecture:` + fmt.Sprintf("%v", this.Architecture) + `,`, + `}`, + }, "") + return s +} +func (this *ObjectFieldSelector) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ObjectFieldSelector{`, + `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, + `FieldPath:` + fmt.Sprintf("%v", this.FieldPath) + `,`, + `}`, + }, "") + return s +} +func (this *ObjectReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ObjectReference{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, + `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`, + `FieldPath:` + fmt.Sprintf("%v", this.FieldPath) + `,`, + `}`, + }, "") + return s +} +func (this *PersistentVolume) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PersistentVolume{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PersistentVolumeSpec", "PersistentVolumeSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PersistentVolumeStatus", "PersistentVolumeStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PersistentVolumeClaim) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PersistentVolumeClaim{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PersistentVolumeClaimSpec", "PersistentVolumeClaimSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PersistentVolumeClaimStatus", "PersistentVolumeClaimStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PersistentVolumeClaimCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PersistentVolumeClaimCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastProbeTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastProbeTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *PersistentVolumeClaimList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]PersistentVolumeClaim{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PersistentVolumeClaim", "PersistentVolumeClaim", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&PersistentVolumeClaimList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *PersistentVolumeClaimSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PersistentVolumeClaimSpec{`, + `AccessModes:` + fmt.Sprintf("%v", this.AccessModes) + `,`, + `Resources:` + strings.Replace(strings.Replace(this.Resources.String(), "ResourceRequirements", "ResourceRequirements", 1), `&`, ``, 1) + `,`, + `VolumeName:` + fmt.Sprintf("%v", this.VolumeName) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `StorageClassName:` + valueToStringGenerated(this.StorageClassName) + `,`, + `VolumeMode:` + valueToStringGenerated(this.VolumeMode) + `,`, + `DataSource:` + strings.Replace(this.DataSource.String(), "TypedLocalObjectReference", "TypedLocalObjectReference", 1) + `,`, + `}`, + }, "") + return s +} +func (this *PersistentVolumeClaimStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]PersistentVolumeClaimCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "PersistentVolumeClaimCondition", "PersistentVolumeClaimCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + keysForCapacity := make([]string, 0, len(this.Capacity)) + for k := range this.Capacity { + keysForCapacity = append(keysForCapacity, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) + mapStringForCapacity := "ResourceList{" + for _, k := range keysForCapacity { + mapStringForCapacity += fmt.Sprintf("%v: %v,", k, this.Capacity[ResourceName(k)]) + } + mapStringForCapacity += "}" + s := strings.Join([]string{`&PersistentVolumeClaimStatus{`, + `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, + `AccessModes:` + fmt.Sprintf("%v", this.AccessModes) + `,`, + `Capacity:` + mapStringForCapacity + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s +} +func (this *PersistentVolumeClaimVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PersistentVolumeClaimVolumeSource{`, + `ClaimName:` + fmt.Sprintf("%v", this.ClaimName) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *PersistentVolumeList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]PersistentVolume{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PersistentVolume", "PersistentVolume", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&PersistentVolumeList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *PersistentVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PersistentVolumeSource{`, + `GCEPersistentDisk:` + strings.Replace(this.GCEPersistentDisk.String(), "GCEPersistentDiskVolumeSource", "GCEPersistentDiskVolumeSource", 1) + `,`, + `AWSElasticBlockStore:` + strings.Replace(this.AWSElasticBlockStore.String(), "AWSElasticBlockStoreVolumeSource", "AWSElasticBlockStoreVolumeSource", 1) + `,`, + `HostPath:` + strings.Replace(this.HostPath.String(), "HostPathVolumeSource", "HostPathVolumeSource", 1) + `,`, + `Glusterfs:` + strings.Replace(this.Glusterfs.String(), "GlusterfsPersistentVolumeSource", "GlusterfsPersistentVolumeSource", 1) + `,`, + `NFS:` + strings.Replace(this.NFS.String(), "NFSVolumeSource", "NFSVolumeSource", 1) + `,`, + `RBD:` + strings.Replace(this.RBD.String(), "RBDPersistentVolumeSource", "RBDPersistentVolumeSource", 1) + `,`, + `ISCSI:` + strings.Replace(this.ISCSI.String(), "ISCSIPersistentVolumeSource", "ISCSIPersistentVolumeSource", 1) + `,`, + `Cinder:` + strings.Replace(this.Cinder.String(), "CinderPersistentVolumeSource", "CinderPersistentVolumeSource", 1) + `,`, + `CephFS:` + strings.Replace(this.CephFS.String(), "CephFSPersistentVolumeSource", "CephFSPersistentVolumeSource", 1) + `,`, + `FC:` + strings.Replace(this.FC.String(), "FCVolumeSource", "FCVolumeSource", 1) + `,`, + `Flocker:` + strings.Replace(this.Flocker.String(), "FlockerVolumeSource", "FlockerVolumeSource", 1) + `,`, + `FlexVolume:` + strings.Replace(this.FlexVolume.String(), "FlexPersistentVolumeSource", "FlexPersistentVolumeSource", 1) + `,`, + `AzureFile:` + strings.Replace(this.AzureFile.String(), "AzureFilePersistentVolumeSource", "AzureFilePersistentVolumeSource", 1) + `,`, + `VsphereVolume:` + strings.Replace(this.VsphereVolume.String(), "VsphereVirtualDiskVolumeSource", "VsphereVirtualDiskVolumeSource", 1) + `,`, + `Quobyte:` + strings.Replace(this.Quobyte.String(), "QuobyteVolumeSource", "QuobyteVolumeSource", 1) + `,`, + `AzureDisk:` + strings.Replace(this.AzureDisk.String(), "AzureDiskVolumeSource", "AzureDiskVolumeSource", 1) + `,`, + `PhotonPersistentDisk:` + strings.Replace(this.PhotonPersistentDisk.String(), "PhotonPersistentDiskVolumeSource", "PhotonPersistentDiskVolumeSource", 1) + `,`, + `PortworxVolume:` + strings.Replace(this.PortworxVolume.String(), "PortworxVolumeSource", "PortworxVolumeSource", 1) + `,`, + `ScaleIO:` + strings.Replace(this.ScaleIO.String(), "ScaleIOPersistentVolumeSource", "ScaleIOPersistentVolumeSource", 1) + `,`, + `Local:` + strings.Replace(this.Local.String(), "LocalVolumeSource", "LocalVolumeSource", 1) + `,`, + `StorageOS:` + strings.Replace(this.StorageOS.String(), "StorageOSPersistentVolumeSource", "StorageOSPersistentVolumeSource", 1) + `,`, + `CSI:` + strings.Replace(this.CSI.String(), "CSIPersistentVolumeSource", "CSIPersistentVolumeSource", 1) + `,`, + `}`, + }, "") + return s +} +func (this *PersistentVolumeSpec) String() string { + if this == nil { + return "nil" + } + keysForCapacity := make([]string, 0, len(this.Capacity)) + for k := range this.Capacity { + keysForCapacity = append(keysForCapacity, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) + mapStringForCapacity := "ResourceList{" + for _, k := range keysForCapacity { + mapStringForCapacity += fmt.Sprintf("%v: %v,", k, this.Capacity[ResourceName(k)]) + } + mapStringForCapacity += "}" + s := strings.Join([]string{`&PersistentVolumeSpec{`, + `Capacity:` + mapStringForCapacity + `,`, + `PersistentVolumeSource:` + strings.Replace(strings.Replace(this.PersistentVolumeSource.String(), "PersistentVolumeSource", "PersistentVolumeSource", 1), `&`, ``, 1) + `,`, + `AccessModes:` + fmt.Sprintf("%v", this.AccessModes) + `,`, + `ClaimRef:` + strings.Replace(this.ClaimRef.String(), "ObjectReference", "ObjectReference", 1) + `,`, + `PersistentVolumeReclaimPolicy:` + fmt.Sprintf("%v", this.PersistentVolumeReclaimPolicy) + `,`, + `StorageClassName:` + fmt.Sprintf("%v", this.StorageClassName) + `,`, + `MountOptions:` + fmt.Sprintf("%v", this.MountOptions) + `,`, + `VolumeMode:` + valueToStringGenerated(this.VolumeMode) + `,`, + `NodeAffinity:` + strings.Replace(this.NodeAffinity.String(), "VolumeNodeAffinity", "VolumeNodeAffinity", 1) + `,`, + `}`, + }, "") + return s +} +func (this *PersistentVolumeStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PersistentVolumeStatus{`, + `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `}`, + }, "") + return s +} +func (this *PhotonPersistentDiskVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PhotonPersistentDiskVolumeSource{`, + `PdID:` + fmt.Sprintf("%v", this.PdID) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `}`, + }, "") + return s +} +func (this *Pod) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Pod{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodSpec", "PodSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PodStatus", "PodStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodAffinity) String() string { + if this == nil { + return "nil" + } + repeatedStringForRequiredDuringSchedulingIgnoredDuringExecution := "[]PodAffinityTerm{" + for _, f := range this.RequiredDuringSchedulingIgnoredDuringExecution { + repeatedStringForRequiredDuringSchedulingIgnoredDuringExecution += strings.Replace(strings.Replace(f.String(), "PodAffinityTerm", "PodAffinityTerm", 1), `&`, ``, 1) + "," + } + repeatedStringForRequiredDuringSchedulingIgnoredDuringExecution += "}" + repeatedStringForPreferredDuringSchedulingIgnoredDuringExecution := "[]WeightedPodAffinityTerm{" + for _, f := range this.PreferredDuringSchedulingIgnoredDuringExecution { + repeatedStringForPreferredDuringSchedulingIgnoredDuringExecution += strings.Replace(strings.Replace(f.String(), "WeightedPodAffinityTerm", "WeightedPodAffinityTerm", 1), `&`, ``, 1) + "," + } + repeatedStringForPreferredDuringSchedulingIgnoredDuringExecution += "}" + s := strings.Join([]string{`&PodAffinity{`, + `RequiredDuringSchedulingIgnoredDuringExecution:` + repeatedStringForRequiredDuringSchedulingIgnoredDuringExecution + `,`, + `PreferredDuringSchedulingIgnoredDuringExecution:` + repeatedStringForPreferredDuringSchedulingIgnoredDuringExecution + `,`, + `}`, + }, "") + return s +} +func (this *PodAffinityTerm) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodAffinityTerm{`, + `LabelSelector:` + strings.Replace(fmt.Sprintf("%v", this.LabelSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `Namespaces:` + fmt.Sprintf("%v", this.Namespaces) + `,`, + `TopologyKey:` + fmt.Sprintf("%v", this.TopologyKey) + `,`, + `}`, + }, "") + return s +} +func (this *PodAntiAffinity) String() string { + if this == nil { + return "nil" + } + repeatedStringForRequiredDuringSchedulingIgnoredDuringExecution := "[]PodAffinityTerm{" + for _, f := range this.RequiredDuringSchedulingIgnoredDuringExecution { + repeatedStringForRequiredDuringSchedulingIgnoredDuringExecution += strings.Replace(strings.Replace(f.String(), "PodAffinityTerm", "PodAffinityTerm", 1), `&`, ``, 1) + "," + } + repeatedStringForRequiredDuringSchedulingIgnoredDuringExecution += "}" + repeatedStringForPreferredDuringSchedulingIgnoredDuringExecution := "[]WeightedPodAffinityTerm{" + for _, f := range this.PreferredDuringSchedulingIgnoredDuringExecution { + repeatedStringForPreferredDuringSchedulingIgnoredDuringExecution += strings.Replace(strings.Replace(f.String(), "WeightedPodAffinityTerm", "WeightedPodAffinityTerm", 1), `&`, ``, 1) + "," + } + repeatedStringForPreferredDuringSchedulingIgnoredDuringExecution += "}" + s := strings.Join([]string{`&PodAntiAffinity{`, + `RequiredDuringSchedulingIgnoredDuringExecution:` + repeatedStringForRequiredDuringSchedulingIgnoredDuringExecution + `,`, + `PreferredDuringSchedulingIgnoredDuringExecution:` + repeatedStringForPreferredDuringSchedulingIgnoredDuringExecution + `,`, + `}`, + }, "") + return s +} +func (this *PodAttachOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodAttachOptions{`, + `Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`, + `Stdout:` + fmt.Sprintf("%v", this.Stdout) + `,`, + `Stderr:` + fmt.Sprintf("%v", this.Stderr) + `,`, + `TTY:` + fmt.Sprintf("%v", this.TTY) + `,`, + `Container:` + fmt.Sprintf("%v", this.Container) + `,`, + `}`, + }, "") + return s +} +func (this *PodCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastProbeTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastProbeTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *PodDNSConfig) String() string { + if this == nil { + return "nil" + } + repeatedStringForOptions := "[]PodDNSConfigOption{" + for _, f := range this.Options { + repeatedStringForOptions += strings.Replace(strings.Replace(f.String(), "PodDNSConfigOption", "PodDNSConfigOption", 1), `&`, ``, 1) + "," + } + repeatedStringForOptions += "}" + s := strings.Join([]string{`&PodDNSConfig{`, + `Nameservers:` + fmt.Sprintf("%v", this.Nameservers) + `,`, + `Searches:` + fmt.Sprintf("%v", this.Searches) + `,`, + `Options:` + repeatedStringForOptions + `,`, + `}`, + }, "") + return s +} +func (this *PodDNSConfigOption) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodDNSConfigOption{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Value:` + valueToStringGenerated(this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *PodExecOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodExecOptions{`, + `Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`, + `Stdout:` + fmt.Sprintf("%v", this.Stdout) + `,`, + `Stderr:` + fmt.Sprintf("%v", this.Stderr) + `,`, + `TTY:` + fmt.Sprintf("%v", this.TTY) + `,`, + `Container:` + fmt.Sprintf("%v", this.Container) + `,`, + `Command:` + fmt.Sprintf("%v", this.Command) + `,`, + `}`, + }, "") + return s +} +func (this *PodIP) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodIP{`, + `IP:` + fmt.Sprintf("%v", this.IP) + `,`, + `}`, + }, "") + return s +} +func (this *PodList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Pod{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Pod", "Pod", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&PodList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *PodLogOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodLogOptions{`, + `Container:` + fmt.Sprintf("%v", this.Container) + `,`, + `Follow:` + fmt.Sprintf("%v", this.Follow) + `,`, + `Previous:` + fmt.Sprintf("%v", this.Previous) + `,`, + `SinceSeconds:` + valueToStringGenerated(this.SinceSeconds) + `,`, + `SinceTime:` + strings.Replace(fmt.Sprintf("%v", this.SinceTime), "Time", "v1.Time", 1) + `,`, + `Timestamps:` + fmt.Sprintf("%v", this.Timestamps) + `,`, + `TailLines:` + valueToStringGenerated(this.TailLines) + `,`, + `LimitBytes:` + valueToStringGenerated(this.LimitBytes) + `,`, + `InsecureSkipTLSVerifyBackend:` + fmt.Sprintf("%v", this.InsecureSkipTLSVerifyBackend) + `,`, + `}`, + }, "") + return s +} +func (this *PodPortForwardOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodPortForwardOptions{`, + `Ports:` + fmt.Sprintf("%v", this.Ports) + `,`, + `}`, + }, "") + return s +} +func (this *PodProxyOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodProxyOptions{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `}`, + }, "") + return s +} +func (this *PodReadinessGate) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodReadinessGate{`, + `ConditionType:` + fmt.Sprintf("%v", this.ConditionType) + `,`, + `}`, + }, "") + return s +} +func (this *PodSecurityContext) String() string { + if this == nil { + return "nil" + } + repeatedStringForSysctls := "[]Sysctl{" + for _, f := range this.Sysctls { + repeatedStringForSysctls += strings.Replace(strings.Replace(f.String(), "Sysctl", "Sysctl", 1), `&`, ``, 1) + "," + } + repeatedStringForSysctls += "}" + s := strings.Join([]string{`&PodSecurityContext{`, + `SELinuxOptions:` + strings.Replace(this.SELinuxOptions.String(), "SELinuxOptions", "SELinuxOptions", 1) + `,`, + `RunAsUser:` + valueToStringGenerated(this.RunAsUser) + `,`, + `RunAsNonRoot:` + valueToStringGenerated(this.RunAsNonRoot) + `,`, + `SupplementalGroups:` + fmt.Sprintf("%v", this.SupplementalGroups) + `,`, + `FSGroup:` + valueToStringGenerated(this.FSGroup) + `,`, + `RunAsGroup:` + valueToStringGenerated(this.RunAsGroup) + `,`, + `Sysctls:` + repeatedStringForSysctls + `,`, + `WindowsOptions:` + strings.Replace(this.WindowsOptions.String(), "WindowsSecurityContextOptions", "WindowsSecurityContextOptions", 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodSignature) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodSignature{`, + `PodController:` + strings.Replace(fmt.Sprintf("%v", this.PodController), "OwnerReference", "v1.OwnerReference", 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForVolumes := "[]Volume{" + for _, f := range this.Volumes { + repeatedStringForVolumes += strings.Replace(strings.Replace(f.String(), "Volume", "Volume", 1), `&`, ``, 1) + "," + } + repeatedStringForVolumes += "}" + repeatedStringForContainers := "[]Container{" + for _, f := range this.Containers { + repeatedStringForContainers += strings.Replace(strings.Replace(f.String(), "Container", "Container", 1), `&`, ``, 1) + "," + } + repeatedStringForContainers += "}" + repeatedStringForImagePullSecrets := "[]LocalObjectReference{" + for _, f := range this.ImagePullSecrets { + repeatedStringForImagePullSecrets += strings.Replace(strings.Replace(f.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + "," + } + repeatedStringForImagePullSecrets += "}" + repeatedStringForInitContainers := "[]Container{" + for _, f := range this.InitContainers { + repeatedStringForInitContainers += strings.Replace(strings.Replace(f.String(), "Container", "Container", 1), `&`, ``, 1) + "," + } + repeatedStringForInitContainers += "}" + repeatedStringForTolerations := "[]Toleration{" + for _, f := range this.Tolerations { + repeatedStringForTolerations += strings.Replace(strings.Replace(f.String(), "Toleration", "Toleration", 1), `&`, ``, 1) + "," + } + repeatedStringForTolerations += "}" + repeatedStringForHostAliases := "[]HostAlias{" + for _, f := range this.HostAliases { + repeatedStringForHostAliases += strings.Replace(strings.Replace(f.String(), "HostAlias", "HostAlias", 1), `&`, ``, 1) + "," + } + repeatedStringForHostAliases += "}" + repeatedStringForReadinessGates := "[]PodReadinessGate{" + for _, f := range this.ReadinessGates { + repeatedStringForReadinessGates += strings.Replace(strings.Replace(f.String(), "PodReadinessGate", "PodReadinessGate", 1), `&`, ``, 1) + "," + } + repeatedStringForReadinessGates += "}" + repeatedStringForTopologySpreadConstraints := "[]TopologySpreadConstraint{" + for _, f := range this.TopologySpreadConstraints { + repeatedStringForTopologySpreadConstraints += strings.Replace(strings.Replace(f.String(), "TopologySpreadConstraint", "TopologySpreadConstraint", 1), `&`, ``, 1) + "," + } + repeatedStringForTopologySpreadConstraints += "}" + repeatedStringForEphemeralContainers := "[]EphemeralContainer{" + for _, f := range this.EphemeralContainers { + repeatedStringForEphemeralContainers += strings.Replace(strings.Replace(f.String(), "EphemeralContainer", "EphemeralContainer", 1), `&`, ``, 1) + "," + } + repeatedStringForEphemeralContainers += "}" + keysForNodeSelector := make([]string, 0, len(this.NodeSelector)) + for k := range this.NodeSelector { + keysForNodeSelector = append(keysForNodeSelector, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector) + mapStringForNodeSelector := "map[string]string{" + for _, k := range keysForNodeSelector { + mapStringForNodeSelector += fmt.Sprintf("%v: %v,", k, this.NodeSelector[k]) + } + mapStringForNodeSelector += "}" + keysForOverhead := make([]string, 0, len(this.Overhead)) + for k := range this.Overhead { + keysForOverhead = append(keysForOverhead, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForOverhead) + mapStringForOverhead := "ResourceList{" + for _, k := range keysForOverhead { + mapStringForOverhead += fmt.Sprintf("%v: %v,", k, this.Overhead[ResourceName(k)]) + } + mapStringForOverhead += "}" + s := strings.Join([]string{`&PodSpec{`, + `Volumes:` + repeatedStringForVolumes + `,`, + `Containers:` + repeatedStringForContainers + `,`, + `RestartPolicy:` + fmt.Sprintf("%v", this.RestartPolicy) + `,`, + `TerminationGracePeriodSeconds:` + valueToStringGenerated(this.TerminationGracePeriodSeconds) + `,`, + `ActiveDeadlineSeconds:` + valueToStringGenerated(this.ActiveDeadlineSeconds) + `,`, + `DNSPolicy:` + fmt.Sprintf("%v", this.DNSPolicy) + `,`, + `NodeSelector:` + mapStringForNodeSelector + `,`, + `ServiceAccountName:` + fmt.Sprintf("%v", this.ServiceAccountName) + `,`, + `DeprecatedServiceAccount:` + fmt.Sprintf("%v", this.DeprecatedServiceAccount) + `,`, + `NodeName:` + fmt.Sprintf("%v", this.NodeName) + `,`, + `HostNetwork:` + fmt.Sprintf("%v", this.HostNetwork) + `,`, + `HostPID:` + fmt.Sprintf("%v", this.HostPID) + `,`, + `HostIPC:` + fmt.Sprintf("%v", this.HostIPC) + `,`, + `SecurityContext:` + strings.Replace(this.SecurityContext.String(), "PodSecurityContext", "PodSecurityContext", 1) + `,`, + `ImagePullSecrets:` + repeatedStringForImagePullSecrets + `,`, + `Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`, + `Subdomain:` + fmt.Sprintf("%v", this.Subdomain) + `,`, + `Affinity:` + strings.Replace(this.Affinity.String(), "Affinity", "Affinity", 1) + `,`, + `SchedulerName:` + fmt.Sprintf("%v", this.SchedulerName) + `,`, + `InitContainers:` + repeatedStringForInitContainers + `,`, + `AutomountServiceAccountToken:` + valueToStringGenerated(this.AutomountServiceAccountToken) + `,`, + `Tolerations:` + repeatedStringForTolerations + `,`, + `HostAliases:` + repeatedStringForHostAliases + `,`, + `PriorityClassName:` + fmt.Sprintf("%v", this.PriorityClassName) + `,`, + `Priority:` + valueToStringGenerated(this.Priority) + `,`, + `DNSConfig:` + strings.Replace(this.DNSConfig.String(), "PodDNSConfig", "PodDNSConfig", 1) + `,`, + `ShareProcessNamespace:` + valueToStringGenerated(this.ShareProcessNamespace) + `,`, + `ReadinessGates:` + repeatedStringForReadinessGates + `,`, + `RuntimeClassName:` + valueToStringGenerated(this.RuntimeClassName) + `,`, + `EnableServiceLinks:` + valueToStringGenerated(this.EnableServiceLinks) + `,`, + `PreemptionPolicy:` + valueToStringGenerated(this.PreemptionPolicy) + `,`, + `Overhead:` + mapStringForOverhead + `,`, + `TopologySpreadConstraints:` + repeatedStringForTopologySpreadConstraints + `,`, + `EphemeralContainers:` + repeatedStringForEphemeralContainers + `,`, + `}`, + }, "") + return s +} +func (this *PodStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]PodCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "PodCondition", "PodCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + repeatedStringForContainerStatuses := "[]ContainerStatus{" + for _, f := range this.ContainerStatuses { + repeatedStringForContainerStatuses += strings.Replace(strings.Replace(f.String(), "ContainerStatus", "ContainerStatus", 1), `&`, ``, 1) + "," + } + repeatedStringForContainerStatuses += "}" + repeatedStringForInitContainerStatuses := "[]ContainerStatus{" + for _, f := range this.InitContainerStatuses { + repeatedStringForInitContainerStatuses += strings.Replace(strings.Replace(f.String(), "ContainerStatus", "ContainerStatus", 1), `&`, ``, 1) + "," + } + repeatedStringForInitContainerStatuses += "}" + repeatedStringForPodIPs := "[]PodIP{" + for _, f := range this.PodIPs { + repeatedStringForPodIPs += strings.Replace(strings.Replace(f.String(), "PodIP", "PodIP", 1), `&`, ``, 1) + "," + } + repeatedStringForPodIPs += "}" + repeatedStringForEphemeralContainerStatuses := "[]ContainerStatus{" + for _, f := range this.EphemeralContainerStatuses { + repeatedStringForEphemeralContainerStatuses += strings.Replace(strings.Replace(f.String(), "ContainerStatus", "ContainerStatus", 1), `&`, ``, 1) + "," + } + repeatedStringForEphemeralContainerStatuses += "}" + s := strings.Join([]string{`&PodStatus{`, + `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `HostIP:` + fmt.Sprintf("%v", this.HostIP) + `,`, + `PodIP:` + fmt.Sprintf("%v", this.PodIP) + `,`, + `StartTime:` + strings.Replace(fmt.Sprintf("%v", this.StartTime), "Time", "v1.Time", 1) + `,`, + `ContainerStatuses:` + repeatedStringForContainerStatuses + `,`, + `QOSClass:` + fmt.Sprintf("%v", this.QOSClass) + `,`, + `InitContainerStatuses:` + repeatedStringForInitContainerStatuses + `,`, + `NominatedNodeName:` + fmt.Sprintf("%v", this.NominatedNodeName) + `,`, + `PodIPs:` + repeatedStringForPodIPs + `,`, + `EphemeralContainerStatuses:` + repeatedStringForEphemeralContainerStatuses + `,`, + `}`, + }, "") + return s +} +func (this *PodStatusResult) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodStatusResult{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PodStatus", "PodStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodTemplate) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodTemplate{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodTemplateList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]PodTemplate{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PodTemplate", "PodTemplate", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&PodTemplateList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *PodTemplateSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodTemplateSpec{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodSpec", "PodSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PortworxVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PortworxVolumeSource{`, + `VolumeID:` + fmt.Sprintf("%v", this.VolumeID) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *Preconditions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Preconditions{`, + `UID:` + valueToStringGenerated(this.UID) + `,`, + `}`, + }, "") + return s +} +func (this *PreferAvoidPodsEntry) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PreferAvoidPodsEntry{`, + `PodSignature:` + strings.Replace(strings.Replace(this.PodSignature.String(), "PodSignature", "PodSignature", 1), `&`, ``, 1) + `,`, + `EvictionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.EvictionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *PreferredSchedulingTerm) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PreferredSchedulingTerm{`, + `Weight:` + fmt.Sprintf("%v", this.Weight) + `,`, + `Preference:` + strings.Replace(strings.Replace(this.Preference.String(), "NodeSelectorTerm", "NodeSelectorTerm", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Probe) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Probe{`, + `Handler:` + strings.Replace(strings.Replace(this.Handler.String(), "Handler", "Handler", 1), `&`, ``, 1) + `,`, + `InitialDelaySeconds:` + fmt.Sprintf("%v", this.InitialDelaySeconds) + `,`, + `TimeoutSeconds:` + fmt.Sprintf("%v", this.TimeoutSeconds) + `,`, + `PeriodSeconds:` + fmt.Sprintf("%v", this.PeriodSeconds) + `,`, + `SuccessThreshold:` + fmt.Sprintf("%v", this.SuccessThreshold) + `,`, + `FailureThreshold:` + fmt.Sprintf("%v", this.FailureThreshold) + `,`, + `}`, + }, "") + return s +} +func (this *ProjectedVolumeSource) String() string { + if this == nil { + return "nil" + } + repeatedStringForSources := "[]VolumeProjection{" + for _, f := range this.Sources { + repeatedStringForSources += strings.Replace(strings.Replace(f.String(), "VolumeProjection", "VolumeProjection", 1), `&`, ``, 1) + "," + } + repeatedStringForSources += "}" + s := strings.Join([]string{`&ProjectedVolumeSource{`, + `Sources:` + repeatedStringForSources + `,`, + `DefaultMode:` + valueToStringGenerated(this.DefaultMode) + `,`, + `}`, + }, "") + return s +} +func (this *QuobyteVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&QuobyteVolumeSource{`, + `Registry:` + fmt.Sprintf("%v", this.Registry) + `,`, + `Volume:` + fmt.Sprintf("%v", this.Volume) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `User:` + fmt.Sprintf("%v", this.User) + `,`, + `Group:` + fmt.Sprintf("%v", this.Group) + `,`, + `Tenant:` + fmt.Sprintf("%v", this.Tenant) + `,`, + `}`, + }, "") + return s +} +func (this *RBDPersistentVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RBDPersistentVolumeSource{`, + `CephMonitors:` + fmt.Sprintf("%v", this.CephMonitors) + `,`, + `RBDImage:` + fmt.Sprintf("%v", this.RBDImage) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `RBDPool:` + fmt.Sprintf("%v", this.RBDPool) + `,`, + `RadosUser:` + fmt.Sprintf("%v", this.RadosUser) + `,`, + `Keyring:` + fmt.Sprintf("%v", this.Keyring) + `,`, + `SecretRef:` + strings.Replace(this.SecretRef.String(), "SecretReference", "SecretReference", 1) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *RBDVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RBDVolumeSource{`, + `CephMonitors:` + fmt.Sprintf("%v", this.CephMonitors) + `,`, + `RBDImage:` + fmt.Sprintf("%v", this.RBDImage) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `RBDPool:` + fmt.Sprintf("%v", this.RBDPool) + `,`, + `RadosUser:` + fmt.Sprintf("%v", this.RadosUser) + `,`, + `Keyring:` + fmt.Sprintf("%v", this.Keyring) + `,`, + `SecretRef:` + strings.Replace(this.SecretRef.String(), "LocalObjectReference", "LocalObjectReference", 1) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *RangeAllocation) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RangeAllocation{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Range:` + fmt.Sprintf("%v", this.Range) + `,`, + `Data:` + valueToStringGenerated(this.Data) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicationController) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ReplicationController{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ReplicationControllerSpec", "ReplicationControllerSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ReplicationControllerStatus", "ReplicationControllerStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicationControllerCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ReplicationControllerCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicationControllerList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ReplicationController{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ReplicationController", "ReplicationController", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ReplicationControllerList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ReplicationControllerSpec) String() string { + if this == nil { + return "nil" + } + keysForSelector := make([]string, 0, len(this.Selector)) + for k := range this.Selector { + keysForSelector = append(keysForSelector, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) + mapStringForSelector := "map[string]string{" + for _, k := range keysForSelector { + mapStringForSelector += fmt.Sprintf("%v: %v,", k, this.Selector[k]) + } + mapStringForSelector += "}" + s := strings.Join([]string{`&ReplicationControllerSpec{`, + `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, + `Selector:` + mapStringForSelector + `,`, + `Template:` + strings.Replace(this.Template.String(), "PodTemplateSpec", "PodTemplateSpec", 1) + `,`, + `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicationControllerStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]ReplicationControllerCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "ReplicationControllerCondition", "ReplicationControllerCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&ReplicationControllerStatus{`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `FullyLabeledReplicas:` + fmt.Sprintf("%v", this.FullyLabeledReplicas) + `,`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, + `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s +} +func (this *ResourceFieldSelector) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceFieldSelector{`, + `ContainerName:` + fmt.Sprintf("%v", this.ContainerName) + `,`, + `Resource:` + fmt.Sprintf("%v", this.Resource) + `,`, + `Divisor:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Divisor), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceQuota) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceQuota{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ResourceQuotaSpec", "ResourceQuotaSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ResourceQuotaStatus", "ResourceQuotaStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceQuotaList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ResourceQuota{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ResourceQuota", "ResourceQuota", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ResourceQuotaList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ResourceQuotaSpec) String() string { + if this == nil { + return "nil" + } + keysForHard := make([]string, 0, len(this.Hard)) + for k := range this.Hard { + keysForHard = append(keysForHard, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForHard) + mapStringForHard := "ResourceList{" + for _, k := range keysForHard { + mapStringForHard += fmt.Sprintf("%v: %v,", k, this.Hard[ResourceName(k)]) + } + mapStringForHard += "}" + s := strings.Join([]string{`&ResourceQuotaSpec{`, + `Hard:` + mapStringForHard + `,`, + `Scopes:` + fmt.Sprintf("%v", this.Scopes) + `,`, + `ScopeSelector:` + strings.Replace(this.ScopeSelector.String(), "ScopeSelector", "ScopeSelector", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceQuotaStatus) String() string { + if this == nil { + return "nil" + } + keysForHard := make([]string, 0, len(this.Hard)) + for k := range this.Hard { + keysForHard = append(keysForHard, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForHard) + mapStringForHard := "ResourceList{" + for _, k := range keysForHard { + mapStringForHard += fmt.Sprintf("%v: %v,", k, this.Hard[ResourceName(k)]) + } + mapStringForHard += "}" + keysForUsed := make([]string, 0, len(this.Used)) + for k := range this.Used { + keysForUsed = append(keysForUsed, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForUsed) + mapStringForUsed := "ResourceList{" + for _, k := range keysForUsed { + mapStringForUsed += fmt.Sprintf("%v: %v,", k, this.Used[ResourceName(k)]) + } + mapStringForUsed += "}" + s := strings.Join([]string{`&ResourceQuotaStatus{`, + `Hard:` + mapStringForHard + `,`, + `Used:` + mapStringForUsed + `,`, + `}`, + }, "") + return s +} +func (this *ResourceRequirements) String() string { + if this == nil { + return "nil" + } + keysForLimits := make([]string, 0, len(this.Limits)) + for k := range this.Limits { + keysForLimits = append(keysForLimits, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLimits) + mapStringForLimits := "ResourceList{" + for _, k := range keysForLimits { + mapStringForLimits += fmt.Sprintf("%v: %v,", k, this.Limits[ResourceName(k)]) + } + mapStringForLimits += "}" + keysForRequests := make([]string, 0, len(this.Requests)) + for k := range this.Requests { + keysForRequests = append(keysForRequests, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForRequests) + mapStringForRequests := "ResourceList{" + for _, k := range keysForRequests { + mapStringForRequests += fmt.Sprintf("%v: %v,", k, this.Requests[ResourceName(k)]) + } + mapStringForRequests += "}" + s := strings.Join([]string{`&ResourceRequirements{`, + `Limits:` + mapStringForLimits + `,`, + `Requests:` + mapStringForRequests + `,`, + `}`, + }, "") + return s +} +func (this *SELinuxOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SELinuxOptions{`, + `User:` + fmt.Sprintf("%v", this.User) + `,`, + `Role:` + fmt.Sprintf("%v", this.Role) + `,`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Level:` + fmt.Sprintf("%v", this.Level) + `,`, + `}`, + }, "") + return s +} +func (this *ScaleIOPersistentVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ScaleIOPersistentVolumeSource{`, + `Gateway:` + fmt.Sprintf("%v", this.Gateway) + `,`, + `System:` + fmt.Sprintf("%v", this.System) + `,`, + `SecretRef:` + strings.Replace(this.SecretRef.String(), "SecretReference", "SecretReference", 1) + `,`, + `SSLEnabled:` + fmt.Sprintf("%v", this.SSLEnabled) + `,`, + `ProtectionDomain:` + fmt.Sprintf("%v", this.ProtectionDomain) + `,`, + `StoragePool:` + fmt.Sprintf("%v", this.StoragePool) + `,`, + `StorageMode:` + fmt.Sprintf("%v", this.StorageMode) + `,`, + `VolumeName:` + fmt.Sprintf("%v", this.VolumeName) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *ScaleIOVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ScaleIOVolumeSource{`, + `Gateway:` + fmt.Sprintf("%v", this.Gateway) + `,`, + `System:` + fmt.Sprintf("%v", this.System) + `,`, + `SecretRef:` + strings.Replace(this.SecretRef.String(), "LocalObjectReference", "LocalObjectReference", 1) + `,`, + `SSLEnabled:` + fmt.Sprintf("%v", this.SSLEnabled) + `,`, + `ProtectionDomain:` + fmt.Sprintf("%v", this.ProtectionDomain) + `,`, + `StoragePool:` + fmt.Sprintf("%v", this.StoragePool) + `,`, + `StorageMode:` + fmt.Sprintf("%v", this.StorageMode) + `,`, + `VolumeName:` + fmt.Sprintf("%v", this.VolumeName) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *ScopeSelector) String() string { + if this == nil { + return "nil" + } + repeatedStringForMatchExpressions := "[]ScopedResourceSelectorRequirement{" + for _, f := range this.MatchExpressions { + repeatedStringForMatchExpressions += strings.Replace(strings.Replace(f.String(), "ScopedResourceSelectorRequirement", "ScopedResourceSelectorRequirement", 1), `&`, ``, 1) + "," + } + repeatedStringForMatchExpressions += "}" + s := strings.Join([]string{`&ScopeSelector{`, + `MatchExpressions:` + repeatedStringForMatchExpressions + `,`, + `}`, + }, "") + return s +} +func (this *ScopedResourceSelectorRequirement) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ScopedResourceSelectorRequirement{`, + `ScopeName:` + fmt.Sprintf("%v", this.ScopeName) + `,`, + `Operator:` + fmt.Sprintf("%v", this.Operator) + `,`, + `Values:` + fmt.Sprintf("%v", this.Values) + `,`, + `}`, + }, "") + return s +} +func (this *Secret) String() string { + if this == nil { + return "nil" + } + keysForData := make([]string, 0, len(this.Data)) + for k := range this.Data { + keysForData = append(keysForData, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForData) + mapStringForData := "map[string][]byte{" + for _, k := range keysForData { + mapStringForData += fmt.Sprintf("%v: %v,", k, this.Data[k]) + } + mapStringForData += "}" + keysForStringData := make([]string, 0, len(this.StringData)) + for k := range this.StringData { + keysForStringData = append(keysForStringData, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForStringData) + mapStringForStringData := "map[string]string{" + for _, k := range keysForStringData { + mapStringForStringData += fmt.Sprintf("%v: %v,", k, this.StringData[k]) + } + mapStringForStringData += "}" + s := strings.Join([]string{`&Secret{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Data:` + mapStringForData + `,`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `StringData:` + mapStringForStringData + `,`, + `}`, + }, "") + return s +} +func (this *SecretEnvSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SecretEnvSource{`, + `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, + `Optional:` + valueToStringGenerated(this.Optional) + `,`, + `}`, + }, "") + return s +} +func (this *SecretKeySelector) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SecretKeySelector{`, + `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Optional:` + valueToStringGenerated(this.Optional) + `,`, + `}`, + }, "") + return s +} +func (this *SecretList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Secret{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Secret", "Secret", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&SecretList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *SecretProjection) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]KeyToPath{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "KeyToPath", "KeyToPath", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&SecretProjection{`, + `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `Optional:` + valueToStringGenerated(this.Optional) + `,`, + `}`, + }, "") + return s +} +func (this *SecretReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SecretReference{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `}`, + }, "") + return s +} +func (this *SecretVolumeSource) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]KeyToPath{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "KeyToPath", "KeyToPath", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&SecretVolumeSource{`, + `SecretName:` + fmt.Sprintf("%v", this.SecretName) + `,`, + `Items:` + repeatedStringForItems + `,`, + `DefaultMode:` + valueToStringGenerated(this.DefaultMode) + `,`, + `Optional:` + valueToStringGenerated(this.Optional) + `,`, + `}`, + }, "") + return s +} +func (this *SecurityContext) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SecurityContext{`, + `Capabilities:` + strings.Replace(this.Capabilities.String(), "Capabilities", "Capabilities", 1) + `,`, + `Privileged:` + valueToStringGenerated(this.Privileged) + `,`, + `SELinuxOptions:` + strings.Replace(this.SELinuxOptions.String(), "SELinuxOptions", "SELinuxOptions", 1) + `,`, + `RunAsUser:` + valueToStringGenerated(this.RunAsUser) + `,`, + `RunAsNonRoot:` + valueToStringGenerated(this.RunAsNonRoot) + `,`, + `ReadOnlyRootFilesystem:` + valueToStringGenerated(this.ReadOnlyRootFilesystem) + `,`, + `AllowPrivilegeEscalation:` + valueToStringGenerated(this.AllowPrivilegeEscalation) + `,`, + `RunAsGroup:` + valueToStringGenerated(this.RunAsGroup) + `,`, + `ProcMount:` + valueToStringGenerated(this.ProcMount) + `,`, + `WindowsOptions:` + strings.Replace(this.WindowsOptions.String(), "WindowsSecurityContextOptions", "WindowsSecurityContextOptions", 1) + `,`, + `}`, + }, "") + return s +} +func (this *SerializedReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SerializedReference{`, + `Reference:` + strings.Replace(strings.Replace(this.Reference.String(), "ObjectReference", "ObjectReference", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Service) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Service{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ServiceSpec", "ServiceSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ServiceStatus", "ServiceStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceAccount) String() string { + if this == nil { + return "nil" + } + repeatedStringForSecrets := "[]ObjectReference{" + for _, f := range this.Secrets { + repeatedStringForSecrets += strings.Replace(strings.Replace(f.String(), "ObjectReference", "ObjectReference", 1), `&`, ``, 1) + "," + } + repeatedStringForSecrets += "}" + repeatedStringForImagePullSecrets := "[]LocalObjectReference{" + for _, f := range this.ImagePullSecrets { + repeatedStringForImagePullSecrets += strings.Replace(strings.Replace(f.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + "," + } + repeatedStringForImagePullSecrets += "}" + s := strings.Join([]string{`&ServiceAccount{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Secrets:` + repeatedStringForSecrets + `,`, + `ImagePullSecrets:` + repeatedStringForImagePullSecrets + `,`, + `AutomountServiceAccountToken:` + valueToStringGenerated(this.AutomountServiceAccountToken) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceAccountList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ServiceAccount{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ServiceAccount", "ServiceAccount", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ServiceAccountList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ServiceAccountTokenProjection) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceAccountTokenProjection{`, + `Audience:` + fmt.Sprintf("%v", this.Audience) + `,`, + `ExpirationSeconds:` + valueToStringGenerated(this.ExpirationSeconds) + `,`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Service{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Service", "Service", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ServiceList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ServicePort) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServicePort{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`, + `Port:` + fmt.Sprintf("%v", this.Port) + `,`, + `TargetPort:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.TargetPort), "IntOrString", "intstr.IntOrString", 1), `&`, ``, 1) + `,`, + `NodePort:` + fmt.Sprintf("%v", this.NodePort) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceProxyOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceProxyOptions{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForPorts := "[]ServicePort{" + for _, f := range this.Ports { + repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "ServicePort", "ServicePort", 1), `&`, ``, 1) + "," + } + repeatedStringForPorts += "}" + keysForSelector := make([]string, 0, len(this.Selector)) + for k := range this.Selector { + keysForSelector = append(keysForSelector, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) + mapStringForSelector := "map[string]string{" + for _, k := range keysForSelector { + mapStringForSelector += fmt.Sprintf("%v: %v,", k, this.Selector[k]) + } + mapStringForSelector += "}" + s := strings.Join([]string{`&ServiceSpec{`, + `Ports:` + repeatedStringForPorts + `,`, + `Selector:` + mapStringForSelector + `,`, + `ClusterIP:` + fmt.Sprintf("%v", this.ClusterIP) + `,`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `ExternalIPs:` + fmt.Sprintf("%v", this.ExternalIPs) + `,`, + `SessionAffinity:` + fmt.Sprintf("%v", this.SessionAffinity) + `,`, + `LoadBalancerIP:` + fmt.Sprintf("%v", this.LoadBalancerIP) + `,`, + `LoadBalancerSourceRanges:` + fmt.Sprintf("%v", this.LoadBalancerSourceRanges) + `,`, + `ExternalName:` + fmt.Sprintf("%v", this.ExternalName) + `,`, + `ExternalTrafficPolicy:` + fmt.Sprintf("%v", this.ExternalTrafficPolicy) + `,`, + `HealthCheckNodePort:` + fmt.Sprintf("%v", this.HealthCheckNodePort) + `,`, + `PublishNotReadyAddresses:` + fmt.Sprintf("%v", this.PublishNotReadyAddresses) + `,`, + `SessionAffinityConfig:` + strings.Replace(this.SessionAffinityConfig.String(), "SessionAffinityConfig", "SessionAffinityConfig", 1) + `,`, + `IPFamily:` + valueToStringGenerated(this.IPFamily) + `,`, + `TopologyKeys:` + fmt.Sprintf("%v", this.TopologyKeys) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceStatus{`, + `LoadBalancer:` + strings.Replace(strings.Replace(this.LoadBalancer.String(), "LoadBalancerStatus", "LoadBalancerStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *SessionAffinityConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SessionAffinityConfig{`, + `ClientIP:` + strings.Replace(this.ClientIP.String(), "ClientIPConfig", "ClientIPConfig", 1) + `,`, + `}`, + }, "") + return s +} +func (this *StorageOSPersistentVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StorageOSPersistentVolumeSource{`, + `VolumeName:` + fmt.Sprintf("%v", this.VolumeName) + `,`, + `VolumeNamespace:` + fmt.Sprintf("%v", this.VolumeNamespace) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `SecretRef:` + strings.Replace(this.SecretRef.String(), "ObjectReference", "ObjectReference", 1) + `,`, + `}`, + }, "") + return s +} +func (this *StorageOSVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StorageOSVolumeSource{`, + `VolumeName:` + fmt.Sprintf("%v", this.VolumeName) + `,`, + `VolumeNamespace:` + fmt.Sprintf("%v", this.VolumeNamespace) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `SecretRef:` + strings.Replace(this.SecretRef.String(), "LocalObjectReference", "LocalObjectReference", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Sysctl) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Sysctl{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *TCPSocketAction) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TCPSocketAction{`, + `Port:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Port), "IntOrString", "intstr.IntOrString", 1), `&`, ``, 1) + `,`, + `Host:` + fmt.Sprintf("%v", this.Host) + `,`, + `}`, + }, "") + return s +} +func (this *Taint) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Taint{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `Effect:` + fmt.Sprintf("%v", this.Effect) + `,`, + `TimeAdded:` + strings.Replace(fmt.Sprintf("%v", this.TimeAdded), "Time", "v1.Time", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Toleration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Toleration{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Operator:` + fmt.Sprintf("%v", this.Operator) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `Effect:` + fmt.Sprintf("%v", this.Effect) + `,`, + `TolerationSeconds:` + valueToStringGenerated(this.TolerationSeconds) + `,`, + `}`, + }, "") + return s +} +func (this *TopologySelectorLabelRequirement) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TopologySelectorLabelRequirement{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Values:` + fmt.Sprintf("%v", this.Values) + `,`, + `}`, + }, "") + return s +} +func (this *TopologySelectorTerm) String() string { + if this == nil { + return "nil" + } + repeatedStringForMatchLabelExpressions := "[]TopologySelectorLabelRequirement{" + for _, f := range this.MatchLabelExpressions { + repeatedStringForMatchLabelExpressions += strings.Replace(strings.Replace(f.String(), "TopologySelectorLabelRequirement", "TopologySelectorLabelRequirement", 1), `&`, ``, 1) + "," + } + repeatedStringForMatchLabelExpressions += "}" + s := strings.Join([]string{`&TopologySelectorTerm{`, + `MatchLabelExpressions:` + repeatedStringForMatchLabelExpressions + `,`, + `}`, + }, "") + return s +} +func (this *TopologySpreadConstraint) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TopologySpreadConstraint{`, + `MaxSkew:` + fmt.Sprintf("%v", this.MaxSkew) + `,`, + `TopologyKey:` + fmt.Sprintf("%v", this.TopologyKey) + `,`, + `WhenUnsatisfiable:` + fmt.Sprintf("%v", this.WhenUnsatisfiable) + `,`, + `LabelSelector:` + strings.Replace(fmt.Sprintf("%v", this.LabelSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `}`, + }, "") + return s +} +func (this *TypedLocalObjectReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TypedLocalObjectReference{`, + `APIGroup:` + valueToStringGenerated(this.APIGroup) + `,`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *Volume) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Volume{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `VolumeSource:` + strings.Replace(strings.Replace(this.VolumeSource.String(), "VolumeSource", "VolumeSource", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeDevice) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeDevice{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `DevicePath:` + fmt.Sprintf("%v", this.DevicePath) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeMount) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeMount{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `MountPath:` + fmt.Sprintf("%v", this.MountPath) + `,`, + `SubPath:` + fmt.Sprintf("%v", this.SubPath) + `,`, + `MountPropagation:` + valueToStringGenerated(this.MountPropagation) + `,`, + `SubPathExpr:` + fmt.Sprintf("%v", this.SubPathExpr) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeNodeAffinity) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeNodeAffinity{`, + `Required:` + strings.Replace(this.Required.String(), "NodeSelector", "NodeSelector", 1) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeProjection) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeProjection{`, + `Secret:` + strings.Replace(this.Secret.String(), "SecretProjection", "SecretProjection", 1) + `,`, + `DownwardAPI:` + strings.Replace(this.DownwardAPI.String(), "DownwardAPIProjection", "DownwardAPIProjection", 1) + `,`, + `ConfigMap:` + strings.Replace(this.ConfigMap.String(), "ConfigMapProjection", "ConfigMapProjection", 1) + `,`, + `ServiceAccountToken:` + strings.Replace(this.ServiceAccountToken.String(), "ServiceAccountTokenProjection", "ServiceAccountTokenProjection", 1) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeSource{`, + `HostPath:` + strings.Replace(this.HostPath.String(), "HostPathVolumeSource", "HostPathVolumeSource", 1) + `,`, + `EmptyDir:` + strings.Replace(this.EmptyDir.String(), "EmptyDirVolumeSource", "EmptyDirVolumeSource", 1) + `,`, + `GCEPersistentDisk:` + strings.Replace(this.GCEPersistentDisk.String(), "GCEPersistentDiskVolumeSource", "GCEPersistentDiskVolumeSource", 1) + `,`, + `AWSElasticBlockStore:` + strings.Replace(this.AWSElasticBlockStore.String(), "AWSElasticBlockStoreVolumeSource", "AWSElasticBlockStoreVolumeSource", 1) + `,`, + `GitRepo:` + strings.Replace(this.GitRepo.String(), "GitRepoVolumeSource", "GitRepoVolumeSource", 1) + `,`, + `Secret:` + strings.Replace(this.Secret.String(), "SecretVolumeSource", "SecretVolumeSource", 1) + `,`, + `NFS:` + strings.Replace(this.NFS.String(), "NFSVolumeSource", "NFSVolumeSource", 1) + `,`, + `ISCSI:` + strings.Replace(this.ISCSI.String(), "ISCSIVolumeSource", "ISCSIVolumeSource", 1) + `,`, + `Glusterfs:` + strings.Replace(this.Glusterfs.String(), "GlusterfsVolumeSource", "GlusterfsVolumeSource", 1) + `,`, + `PersistentVolumeClaim:` + strings.Replace(this.PersistentVolumeClaim.String(), "PersistentVolumeClaimVolumeSource", "PersistentVolumeClaimVolumeSource", 1) + `,`, + `RBD:` + strings.Replace(this.RBD.String(), "RBDVolumeSource", "RBDVolumeSource", 1) + `,`, + `FlexVolume:` + strings.Replace(this.FlexVolume.String(), "FlexVolumeSource", "FlexVolumeSource", 1) + `,`, + `Cinder:` + strings.Replace(this.Cinder.String(), "CinderVolumeSource", "CinderVolumeSource", 1) + `,`, + `CephFS:` + strings.Replace(this.CephFS.String(), "CephFSVolumeSource", "CephFSVolumeSource", 1) + `,`, + `Flocker:` + strings.Replace(this.Flocker.String(), "FlockerVolumeSource", "FlockerVolumeSource", 1) + `,`, + `DownwardAPI:` + strings.Replace(this.DownwardAPI.String(), "DownwardAPIVolumeSource", "DownwardAPIVolumeSource", 1) + `,`, + `FC:` + strings.Replace(this.FC.String(), "FCVolumeSource", "FCVolumeSource", 1) + `,`, + `AzureFile:` + strings.Replace(this.AzureFile.String(), "AzureFileVolumeSource", "AzureFileVolumeSource", 1) + `,`, + `ConfigMap:` + strings.Replace(this.ConfigMap.String(), "ConfigMapVolumeSource", "ConfigMapVolumeSource", 1) + `,`, + `VsphereVolume:` + strings.Replace(this.VsphereVolume.String(), "VsphereVirtualDiskVolumeSource", "VsphereVirtualDiskVolumeSource", 1) + `,`, + `Quobyte:` + strings.Replace(this.Quobyte.String(), "QuobyteVolumeSource", "QuobyteVolumeSource", 1) + `,`, + `AzureDisk:` + strings.Replace(this.AzureDisk.String(), "AzureDiskVolumeSource", "AzureDiskVolumeSource", 1) + `,`, + `PhotonPersistentDisk:` + strings.Replace(this.PhotonPersistentDisk.String(), "PhotonPersistentDiskVolumeSource", "PhotonPersistentDiskVolumeSource", 1) + `,`, + `PortworxVolume:` + strings.Replace(this.PortworxVolume.String(), "PortworxVolumeSource", "PortworxVolumeSource", 1) + `,`, + `ScaleIO:` + strings.Replace(this.ScaleIO.String(), "ScaleIOVolumeSource", "ScaleIOVolumeSource", 1) + `,`, + `Projected:` + strings.Replace(this.Projected.String(), "ProjectedVolumeSource", "ProjectedVolumeSource", 1) + `,`, + `StorageOS:` + strings.Replace(this.StorageOS.String(), "StorageOSVolumeSource", "StorageOSVolumeSource", 1) + `,`, + `CSI:` + strings.Replace(this.CSI.String(), "CSIVolumeSource", "CSIVolumeSource", 1) + `,`, + `}`, + }, "") + return s +} +func (this *VsphereVirtualDiskVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VsphereVirtualDiskVolumeSource{`, + `VolumePath:` + fmt.Sprintf("%v", this.VolumePath) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `StoragePolicyName:` + fmt.Sprintf("%v", this.StoragePolicyName) + `,`, + `StoragePolicyID:` + fmt.Sprintf("%v", this.StoragePolicyID) + `,`, + `}`, + }, "") + return s +} +func (this *WeightedPodAffinityTerm) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WeightedPodAffinityTerm{`, + `Weight:` + fmt.Sprintf("%v", this.Weight) + `,`, + `PodAffinityTerm:` + strings.Replace(strings.Replace(this.PodAffinityTerm.String(), "PodAffinityTerm", "PodAffinityTerm", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *WindowsSecurityContextOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WindowsSecurityContextOptions{`, + `GMSACredentialSpecName:` + valueToStringGenerated(this.GMSACredentialSpecName) + `,`, + `GMSACredentialSpec:` + valueToStringGenerated(this.GMSACredentialSpec) + `,`, + `RunAsUserName:` + valueToStringGenerated(this.RunAsUserName) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *AWSElasticBlockStoreVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AWSElasticBlockStoreVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AWSElasticBlockStoreVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FSType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Partition", wireType) + } + m.Partition = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Partition |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Affinity) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Affinity: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Affinity: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeAffinity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodeAffinity == nil { + m.NodeAffinity = &NodeAffinity{} + } + if err := m.NodeAffinity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodAffinity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PodAffinity == nil { + m.PodAffinity = &PodAffinity{} + } + if err := m.PodAffinity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodAntiAffinity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PodAntiAffinity == nil { + m.PodAntiAffinity = &PodAntiAffinity{} + } + if err := m.PodAntiAffinity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AttachedVolume) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AttachedVolume: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AttachedVolume: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = UniqueVolumeName(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DevicePath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DevicePath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AvoidPods) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AvoidPods: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AvoidPods: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreferAvoidPods", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PreferAvoidPods = append(m.PreferAvoidPods, PreferAvoidPodsEntry{}) + if err := m.PreferAvoidPods[len(m.PreferAvoidPods)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AzureDiskVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AzureDiskVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AzureDiskVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DiskName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DiskName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DataDiskURI", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DataDiskURI = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CachingMode", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := AzureDataDiskCachingMode(dAtA[iNdEx:postIndex]) + m.CachingMode = &s + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.FSType = &s + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.ReadOnly = &b + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := AzureDataDiskKind(dAtA[iNdEx:postIndex]) + m.Kind = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AzureFilePersistentVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AzureFilePersistentVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AzureFilePersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SecretName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ShareName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ShareName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretNamespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.SecretNamespace = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AzureFileVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AzureFileVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AzureFileVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SecretName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ShareName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ShareName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Binding) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Binding: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Binding: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Target.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CSIPersistentVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CSIPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Driver = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeHandle", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeHandle = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FSType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeAttributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.VolumeAttributes == nil { + m.VolumeAttributes = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.VolumeAttributes[mapkey] = mapvalue + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ControllerPublishSecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ControllerPublishSecretRef == nil { + m.ControllerPublishSecretRef = &SecretReference{} + } + if err := m.ControllerPublishSecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeStageSecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodeStageSecretRef == nil { + m.NodeStageSecretRef = &SecretReference{} + } + if err := m.NodeStageSecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodePublishSecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodePublishSecretRef == nil { + m.NodePublishSecretRef = &SecretReference{} + } + if err := m.NodePublishSecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ControllerExpandSecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ControllerExpandSecretRef == nil { + m.ControllerExpandSecretRef = &SecretReference{} + } + if err := m.ControllerExpandSecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CSIVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CSIVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CSIVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Driver = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.ReadOnly = &b + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.FSType = &s + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeAttributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.VolumeAttributes == nil { + m.VolumeAttributes = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.VolumeAttributes[mapkey] = mapvalue + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodePublishSecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodePublishSecretRef == nil { + m.NodePublishSecretRef = &LocalObjectReference{} + } + if err := m.NodePublishSecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } } - s := strings.Join([]string{`&VolumeSource{`, - `HostPath:` + strings.Replace(fmt.Sprintf("%v", this.HostPath), "HostPathVolumeSource", "HostPathVolumeSource", 1) + `,`, - `EmptyDir:` + strings.Replace(fmt.Sprintf("%v", this.EmptyDir), "EmptyDirVolumeSource", "EmptyDirVolumeSource", 1) + `,`, - `GCEPersistentDisk:` + strings.Replace(fmt.Sprintf("%v", this.GCEPersistentDisk), "GCEPersistentDiskVolumeSource", "GCEPersistentDiskVolumeSource", 1) + `,`, - `AWSElasticBlockStore:` + strings.Replace(fmt.Sprintf("%v", this.AWSElasticBlockStore), "AWSElasticBlockStoreVolumeSource", "AWSElasticBlockStoreVolumeSource", 1) + `,`, - `GitRepo:` + strings.Replace(fmt.Sprintf("%v", this.GitRepo), "GitRepoVolumeSource", "GitRepoVolumeSource", 1) + `,`, - `Secret:` + strings.Replace(fmt.Sprintf("%v", this.Secret), "SecretVolumeSource", "SecretVolumeSource", 1) + `,`, - `NFS:` + strings.Replace(fmt.Sprintf("%v", this.NFS), "NFSVolumeSource", "NFSVolumeSource", 1) + `,`, - `ISCSI:` + strings.Replace(fmt.Sprintf("%v", this.ISCSI), "ISCSIVolumeSource", "ISCSIVolumeSource", 1) + `,`, - `Glusterfs:` + strings.Replace(fmt.Sprintf("%v", this.Glusterfs), "GlusterfsVolumeSource", "GlusterfsVolumeSource", 1) + `,`, - `PersistentVolumeClaim:` + strings.Replace(fmt.Sprintf("%v", this.PersistentVolumeClaim), "PersistentVolumeClaimVolumeSource", "PersistentVolumeClaimVolumeSource", 1) + `,`, - `RBD:` + strings.Replace(fmt.Sprintf("%v", this.RBD), "RBDVolumeSource", "RBDVolumeSource", 1) + `,`, - `FlexVolume:` + strings.Replace(fmt.Sprintf("%v", this.FlexVolume), "FlexVolumeSource", "FlexVolumeSource", 1) + `,`, - `Cinder:` + strings.Replace(fmt.Sprintf("%v", this.Cinder), "CinderVolumeSource", "CinderVolumeSource", 1) + `,`, - `CephFS:` + strings.Replace(fmt.Sprintf("%v", this.CephFS), "CephFSVolumeSource", "CephFSVolumeSource", 1) + `,`, - `Flocker:` + strings.Replace(fmt.Sprintf("%v", this.Flocker), "FlockerVolumeSource", "FlockerVolumeSource", 1) + `,`, - `DownwardAPI:` + strings.Replace(fmt.Sprintf("%v", this.DownwardAPI), "DownwardAPIVolumeSource", "DownwardAPIVolumeSource", 1) + `,`, - `FC:` + strings.Replace(fmt.Sprintf("%v", this.FC), "FCVolumeSource", "FCVolumeSource", 1) + `,`, - `AzureFile:` + strings.Replace(fmt.Sprintf("%v", this.AzureFile), "AzureFileVolumeSource", "AzureFileVolumeSource", 1) + `,`, - `ConfigMap:` + strings.Replace(fmt.Sprintf("%v", this.ConfigMap), "ConfigMapVolumeSource", "ConfigMapVolumeSource", 1) + `,`, - `VsphereVolume:` + strings.Replace(fmt.Sprintf("%v", this.VsphereVolume), "VsphereVirtualDiskVolumeSource", "VsphereVirtualDiskVolumeSource", 1) + `,`, - `Quobyte:` + strings.Replace(fmt.Sprintf("%v", this.Quobyte), "QuobyteVolumeSource", "QuobyteVolumeSource", 1) + `,`, - `AzureDisk:` + strings.Replace(fmt.Sprintf("%v", this.AzureDisk), "AzureDiskVolumeSource", "AzureDiskVolumeSource", 1) + `,`, - `PhotonPersistentDisk:` + strings.Replace(fmt.Sprintf("%v", this.PhotonPersistentDisk), "PhotonPersistentDiskVolumeSource", "PhotonPersistentDiskVolumeSource", 1) + `,`, - `PortworxVolume:` + strings.Replace(fmt.Sprintf("%v", this.PortworxVolume), "PortworxVolumeSource", "PortworxVolumeSource", 1) + `,`, - `ScaleIO:` + strings.Replace(fmt.Sprintf("%v", this.ScaleIO), "ScaleIOVolumeSource", "ScaleIOVolumeSource", 1) + `,`, - `Projected:` + strings.Replace(fmt.Sprintf("%v", this.Projected), "ProjectedVolumeSource", "ProjectedVolumeSource", 1) + `,`, - `StorageOS:` + strings.Replace(fmt.Sprintf("%v", this.StorageOS), "StorageOSVolumeSource", "StorageOSVolumeSource", 1) + `,`, - `}`, - }, "") - return s -} -func (this *VsphereVirtualDiskVolumeSource) String() string { - if this == nil { - return "nil" + + if iNdEx > l { + return io.ErrUnexpectedEOF } - s := strings.Join([]string{`&VsphereVirtualDiskVolumeSource{`, - `VolumePath:` + fmt.Sprintf("%v", this.VolumePath) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `StoragePolicyName:` + fmt.Sprintf("%v", this.StoragePolicyName) + `,`, - `StoragePolicyID:` + fmt.Sprintf("%v", this.StoragePolicyID) + `,`, - `}`, - }, "") - return s + return nil } -func (this *WeightedPodAffinityTerm) String() string { - if this == nil { - return "nil" +func (m *Capabilities) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Capabilities: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Capabilities: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Add", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Add = append(m.Add, Capability(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Drop", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Drop = append(m.Drop, Capability(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } } - s := strings.Join([]string{`&WeightedPodAffinityTerm{`, - `Weight:` + fmt.Sprintf("%v", this.Weight) + `,`, - `PodAffinityTerm:` + strings.Replace(strings.Replace(this.PodAffinityTerm.String(), "PodAffinityTerm", "PodAffinityTerm", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" + + if iNdEx > l { + return io.ErrUnexpectedEOF } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) + return nil } -func (m *AWSElasticBlockStoreVolumeSource) Unmarshal(dAtA []byte) error { +func (m *CephFSPersistentVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -17310,7 +28908,7 @@ func (m *AWSElasticBlockStoreVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -17318,15 +28916,15 @@ func (m *AWSElasticBlockStoreVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: AWSElasticBlockStoreVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: CephFSPersistentVolumeSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: AWSElasticBlockStoreVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: CephFSPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeID", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Monitors", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -17338,7 +28936,7 @@ func (m *AWSElasticBlockStoreVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -17348,14 +28946,17 @@ func (m *AWSElasticBlockStoreVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.VolumeID = string(dAtA[iNdEx:postIndex]) + m.Monitors = append(m.Monitors, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -17367,7 +28968,7 @@ func (m *AWSElasticBlockStoreVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -17377,16 +28978,19 @@ func (m *AWSElasticBlockStoreVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.FSType = string(dAtA[iNdEx:postIndex]) + m.Path = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Partition", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) } - m.Partition = 0 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -17396,12 +29000,93 @@ func (m *AWSElasticBlockStoreVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Partition |= (int32(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.User = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretFile", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SecretFile = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecretRef == nil { + m.SecretRef = &SecretReference{} + } + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) } @@ -17415,7 +29100,7 @@ func (m *AWSElasticBlockStoreVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -17430,6 +29115,9 @@ func (m *AWSElasticBlockStoreVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -17442,7 +29130,7 @@ func (m *AWSElasticBlockStoreVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *Affinity) Unmarshal(dAtA []byte) error { +func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -17457,7 +29145,7 @@ func (m *Affinity) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -17465,17 +29153,17 @@ func (m *Affinity) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Affinity: wiretype end group for non-group") + return fmt.Errorf("proto: CephFSVolumeSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Affinity: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: CephFSVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NodeAffinity", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Monitors", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -17485,30 +29173,61 @@ func (m *Affinity) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - if m.NodeAffinity == nil { - m.NodeAffinity = &NodeAffinity{} + m.Monitors = append(m.Monitors, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) } - if err := m.NodeAffinity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF } + m.Path = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PodAffinity", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -17518,28 +29237,59 @@ func (m *Affinity) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - if m.PodAffinity == nil { - m.PodAffinity = &PodAffinity{} + m.User = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretFile", wireType) } - if err := m.PodAffinity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SecretFile = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PodAntiAffinity", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -17551,7 +29301,7 @@ func (m *Affinity) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -17560,16 +29310,39 @@ func (m *Affinity) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if m.PodAntiAffinity == nil { - m.PodAntiAffinity = &PodAntiAffinity{} + if m.SecretRef == nil { + m.SecretRef = &LocalObjectReference{} } - if err := m.PodAntiAffinity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -17579,6 +29352,9 @@ func (m *Affinity) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -17591,7 +29367,7 @@ func (m *Affinity) Unmarshal(dAtA []byte) error { } return nil } -func (m *AttachedVolume) Unmarshal(dAtA []byte) error { +func (m *CinderPersistentVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -17606,7 +29382,7 @@ func (m *AttachedVolume) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -17614,15 +29390,15 @@ func (m *AttachedVolume) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: AttachedVolume: wiretype end group for non-group") + return fmt.Errorf("proto: CinderPersistentVolumeSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: AttachedVolume: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: CinderPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field VolumeID", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -17634,7 +29410,7 @@ func (m *AttachedVolume) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -17644,14 +29420,17 @@ func (m *AttachedVolume) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = UniqueVolumeName(dAtA[iNdEx:postIndex]) + m.VolumeID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DevicePath", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -17663,7 +29442,7 @@ func (m *AttachedVolume) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -17673,64 +29452,37 @@ func (m *AttachedVolume) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DevicePath = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) > l { + if postIndex > l { return io.ErrUnexpectedEOF } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AvoidPods) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + m.FSType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AvoidPods: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AvoidPods: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.ReadOnly = bool(v != 0) + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PreferAvoidPods", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -17742,7 +29494,7 @@ func (m *AvoidPods) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -17751,11 +29503,16 @@ func (m *AvoidPods) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.PreferAvoidPods = append(m.PreferAvoidPods, PreferAvoidPodsEntry{}) - if err := m.PreferAvoidPods[len(m.PreferAvoidPods)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.SecretRef == nil { + m.SecretRef = &SecretReference{} + } + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -17768,6 +29525,9 @@ func (m *AvoidPods) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -17780,7 +29540,7 @@ func (m *AvoidPods) Unmarshal(dAtA []byte) error { } return nil } -func (m *AzureDiskVolumeSource) Unmarshal(dAtA []byte) error { +func (m *CinderVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -17795,7 +29555,7 @@ func (m *AzureDiskVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -17803,15 +29563,15 @@ func (m *AzureDiskVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: AzureDiskVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: CinderVolumeSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: AzureDiskVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: CinderVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DiskName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field VolumeID", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -17823,7 +29583,7 @@ func (m *AzureDiskVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -17833,14 +29593,17 @@ func (m *AzureDiskVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.DiskName = string(dAtA[iNdEx:postIndex]) + m.VolumeID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DataDiskURI", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -17852,7 +29615,7 @@ func (m *AzureDiskVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -17862,16 +29625,19 @@ func (m *AzureDiskVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.DataDiskURI = string(dAtA[iNdEx:postIndex]) + m.FSType = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CachingMode", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) } - var stringLen uint64 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -17881,27 +29647,17 @@ func (m *AzureDiskVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := AzureDataDiskCachingMode(dAtA[iNdEx:postIndex]) - m.CachingMode = &s - iNdEx = postIndex + m.ReadOnly = bool(v != 0) case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -17911,48 +29667,86 @@ func (m *AzureDiskVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.FSType = &s + if m.SecretRef == nil { + m.SecretRef = &LocalObjectReference{} + } + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } + if skippy < 0 { + return ErrInvalidLengthGenerated } - b := bool(v != 0) - m.ReadOnly = &b - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated } - var stringLen uint64 + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClientIPConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClientIPConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClientIPConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) + } + var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -17962,22 +29756,12 @@ func (m *AzureDiskVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := AzureDataDiskKind(dAtA[iNdEx:postIndex]) - m.Kind = &s - iNdEx = postIndex + m.TimeoutSeconds = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -17987,6 +29771,9 @@ func (m *AzureDiskVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -17999,7 +29786,7 @@ func (m *AzureDiskVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *AzureFilePersistentVolumeSource) Unmarshal(dAtA []byte) error { +func (m *ComponentCondition) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -18014,7 +29801,7 @@ func (m *AzureFilePersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -18022,15 +29809,15 @@ func (m *AzureFilePersistentVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: AzureFilePersistentVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: ComponentCondition: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: AzureFilePersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ComponentCondition: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -18042,7 +29829,7 @@ func (m *AzureFilePersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -18052,14 +29839,17 @@ func (m *AzureFilePersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.SecretName = string(dAtA[iNdEx:postIndex]) + m.Type = ComponentConditionType(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ShareName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -18071,7 +29861,7 @@ func (m *AzureFilePersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -18081,16 +29871,19 @@ func (m *AzureFilePersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.ShareName = string(dAtA[iNdEx:postIndex]) + m.Status = ConditionStatus(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) } - var v int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -18100,15 +29893,27 @@ func (m *AzureFilePersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - m.ReadOnly = bool(v != 0) + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretNamespace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -18120,7 +29925,7 @@ func (m *AzureFilePersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -18130,11 +29935,13 @@ func (m *AzureFilePersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.SecretNamespace = &s + m.Error = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -18145,6 +29952,9 @@ func (m *AzureFilePersistentVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -18157,7 +29967,7 @@ func (m *AzureFilePersistentVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *AzureFileVolumeSource) Unmarshal(dAtA []byte) error { +func (m *ComponentStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -18172,7 +29982,7 @@ func (m *AzureFileVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -18180,17 +29990,17 @@ func (m *AzureFileVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: AzureFileVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: ComponentStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: AzureFileVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ComponentStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -18200,26 +30010,30 @@ func (m *AzureFileVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.SecretName = string(dAtA[iNdEx:postIndex]) + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ShareName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -18229,41 +30043,26 @@ func (m *AzureFileVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.ShareName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } + m.Conditions = append(m.Conditions, ComponentCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.ReadOnly = bool(v != 0) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -18273,6 +30072,9 @@ func (m *AzureFileVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -18285,7 +30087,7 @@ func (m *AzureFileVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *Binding) Unmarshal(dAtA []byte) error { +func (m *ComponentStatusList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -18300,7 +30102,7 @@ func (m *Binding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -18308,15 +30110,15 @@ func (m *Binding) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Binding: wiretype end group for non-group") + return fmt.Errorf("proto: ComponentStatusList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Binding: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ComponentStatusList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -18328,7 +30130,7 @@ func (m *Binding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -18337,16 +30139,19 @@ func (m *Binding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -18358,7 +30163,7 @@ func (m *Binding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -18367,10 +30172,14 @@ func (m *Binding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Target.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, ComponentStatus{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -18383,6 +30192,9 @@ func (m *Binding) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -18395,7 +30207,7 @@ func (m *Binding) Unmarshal(dAtA []byte) error { } return nil } -func (m *CSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { +func (m *ConfigMap) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -18410,7 +30222,7 @@ func (m *CSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -18418,122 +30230,15 @@ func (m *CSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: CSIPersistentVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: ConfigMap: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: CSIPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ConfigMap: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Driver = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeHandle", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.VolumeHandle = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnly = bool(v != 0) - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.FSType = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeAttributes", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -18545,7 +30250,7 @@ func (m *CSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -18554,25 +30259,21 @@ func (m *CSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - var stringLenmapkey uint64 + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -18582,26 +30283,29 @@ func (m *CSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { + if postIndex > l { return io.ErrUnexpectedEOF } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - if m.VolumeAttributes == nil { - m.VolumeAttributes = make(map[string]string) + if m.Data == nil { + m.Data = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -18611,45 +30315,90 @@ func (m *CSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.VolumeAttributes[mapkey] = mapvalue - } else { - var mapvalue string - m.VolumeAttributes[mapkey] = mapvalue } + m.Data[mapkey] = mapvalue iNdEx = postIndex - case 6: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ControllerPublishSecretRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field BinaryData", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -18661,7 +30410,7 @@ func (m *CSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -18670,81 +30419,110 @@ func (m *CSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ControllerPublishSecretRef == nil { - m.ControllerPublishSecretRef = &SecretReference{} - } - if err := m.ControllerPublishSecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NodeStageSecretRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - if m.NodeStageSecretRef == nil { - m.NodeStageSecretRef = &SecretReference{} - } - if err := m.NodeStageSecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NodePublishSecretRef", wireType) + if m.BinaryData == nil { + m.BinaryData = make(map[string][]byte) } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + var mapkey string + mapvalue := []byte{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapbyteLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapbyteLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intMapbyteLen := int(mapbyteLen) + if intMapbyteLen < 0 { + return ErrInvalidLengthGenerated + } + postbytesIndex := iNdEx + intMapbyteLen + if postbytesIndex < 0 { + return ErrInvalidLengthGenerated + } + if postbytesIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = make([]byte, mapbyteLen) + copy(mapvalue, dAtA[iNdEx:postbytesIndex]) + iNdEx = postbytesIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.NodePublishSecretRef == nil { - m.NodePublishSecretRef = &SecretReference{} - } - if err := m.NodePublishSecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.BinaryData[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -18755,6 +30533,9 @@ func (m *CSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -18767,7 +30548,7 @@ func (m *CSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *Capabilities) Unmarshal(dAtA []byte) error { +func (m *ConfigMapEnvSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -18782,7 +30563,7 @@ func (m *Capabilities) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -18790,17 +30571,17 @@ func (m *Capabilities) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Capabilities: wiretype end group for non-group") + return fmt.Errorf("proto: ConfigMapEnvSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Capabilities: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ConfigMapEnvSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Add", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -18810,26 +30591,30 @@ func (m *Capabilities) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.Add = append(m.Add, Capability(dAtA[iNdEx:postIndex])) + if err := m.LocalObjectReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Drop", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) } - var stringLen uint64 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -18839,21 +30624,13 @@ func (m *Capabilities) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Drop = append(m.Drop, Capability(dAtA[iNdEx:postIndex])) - iNdEx = postIndex + b := bool(v != 0) + m.Optional = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -18863,6 +30640,9 @@ func (m *Capabilities) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -18875,7 +30655,7 @@ func (m *Capabilities) Unmarshal(dAtA []byte) error { } return nil } -func (m *CephFSPersistentVolumeSource) Unmarshal(dAtA []byte) error { +func (m *ConfigMapKeySelector) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -18890,7 +30670,7 @@ func (m *CephFSPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -18898,17 +30678,17 @@ func (m *CephFSPersistentVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: CephFSPersistentVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: ConfigMapKeySelector: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: CephFSPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ConfigMapKeySelector: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Monitors", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -18918,24 +30698,28 @@ func (m *CephFSPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.Monitors = append(m.Monitors, string(dAtA[iNdEx:postIndex])) + if err := m.LocalObjectReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -18947,7 +30731,7 @@ func (m *CephFSPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -18957,16 +30741,19 @@ func (m *CephFSPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Path = string(dAtA[iNdEx:postIndex]) + m.Key = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) } - var stringLen uint64 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -18976,26 +30763,71 @@ func (m *CephFSPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + b := bool(v != 0) + m.Optional = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen - if postIndex > l { + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.User = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConfigMapList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfigMapList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfigMapList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretFile", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -19005,24 +30837,28 @@ func (m *CephFSPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.SecretFile = string(dAtA[iNdEx:postIndex]) + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 5: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -19034,7 +30870,7 @@ func (m *CephFSPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -19043,36 +30879,17 @@ func (m *CephFSPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if m.SecretRef == nil { - m.SecretRef = &SecretReference{} - } - if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, ConfigMap{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnly = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -19082,6 +30899,9 @@ func (m *CephFSPersistentVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -19094,7 +30914,7 @@ func (m *CephFSPersistentVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error { +func (m *ConfigMapNodeConfigSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -19109,7 +30929,7 @@ func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -19117,15 +30937,15 @@ func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: CephFSVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: ConfigMapNodeConfigSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: CephFSVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ConfigMapNodeConfigSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Monitors", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -19137,7 +30957,7 @@ func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -19147,14 +30967,17 @@ func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Monitors = append(m.Monitors, string(dAtA[iNdEx:postIndex])) + m.Namespace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -19166,7 +30989,7 @@ func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -19176,14 +30999,17 @@ func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Path = string(dAtA[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -19195,7 +31021,7 @@ func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -19205,14 +31031,17 @@ func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.User = string(dAtA[iNdEx:postIndex]) + m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretFile", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -19224,7 +31053,7 @@ func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -19234,16 +31063,19 @@ func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.SecretFile = string(dAtA[iNdEx:postIndex]) + m.ResourceVersion = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field KubeletConfigKey", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -19253,45 +31085,24 @@ func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - if m.SecretRef == nil { - m.SecretRef = &LocalObjectReference{} - } - if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.KubeletConfigKey = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnly = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -19301,6 +31112,9 @@ func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -19313,7 +31127,7 @@ func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *CinderPersistentVolumeSource) Unmarshal(dAtA []byte) error { +func (m *ConfigMapProjection) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -19328,7 +31142,7 @@ func (m *CinderPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -19336,17 +31150,17 @@ func (m *CinderPersistentVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: CinderPersistentVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: ConfigMapProjection: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: CinderPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ConfigMapProjection: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeID", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -19356,26 +31170,30 @@ func (m *CinderPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.VolumeID = string(dAtA[iNdEx:postIndex]) + if err := m.LocalObjectReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -19385,24 +31203,29 @@ func (m *CinderPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.FSType = string(dAtA[iNdEx:postIndex]) + m.Items = append(m.Items, KeyToPath{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 3: + case 4: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) } var v int for shift := uint(0); ; shift += 7 { @@ -19414,45 +31237,13 @@ func (m *CinderPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnly = bool(v != 0) - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SecretRef == nil { - m.SecretRef = &SecretReference{} - } - if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex + b := bool(v != 0) + m.Optional = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -19462,6 +31253,9 @@ func (m *CinderPersistentVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -19474,7 +31268,7 @@ func (m *CinderPersistentVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *CinderVolumeSource) Unmarshal(dAtA []byte) error { +func (m *ConfigMapVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -19489,7 +31283,7 @@ func (m *CinderVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -19497,17 +31291,17 @@ func (m *CinderVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: CinderVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: ConfigMapVolumeSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: CinderVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ConfigMapVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeID", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -19517,26 +31311,30 @@ func (m *CinderVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.VolumeID = string(dAtA[iNdEx:postIndex]) + if err := m.LocalObjectReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -19546,26 +31344,31 @@ func (m *CinderVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.FSType = string(dAtA[iNdEx:postIndex]) + m.Items = append(m.Items, KeyToPath{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 3: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DefaultMode", wireType) } - var v int + var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -19575,17 +31378,17 @@ func (m *CinderVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } } - m.ReadOnly = bool(v != 0) + m.DefaultMode = &v case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) } - var msglen int + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -19595,25 +31398,13 @@ func (m *CinderVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SecretRef == nil { - m.SecretRef = &LocalObjectReference{} - } - if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex + b := bool(v != 0) + m.Optional = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -19623,6 +31414,9 @@ func (m *CinderVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -19635,7 +31429,7 @@ func (m *CinderVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *ClientIPConfig) Unmarshal(dAtA []byte) error { +func (m *Container) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -19650,7 +31444,7 @@ func (m *ClientIPConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -19658,17 +31452,17 @@ func (m *ClientIPConfig) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ClientIPConfig: wiretype end group for non-group") + return fmt.Errorf("proto: Container: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ClientIPConfig: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Container: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } - var v int32 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -19678,65 +31472,27 @@ func (m *ClientIPConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - m.TimeoutSeconds = &v - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ComponentCondition) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ComponentCondition: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ComponentCondition: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -19748,7 +31504,7 @@ func (m *ComponentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -19758,14 +31514,17 @@ func (m *ComponentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Type = ComponentConditionType(dAtA[iNdEx:postIndex]) + m.Image = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -19777,7 +31536,7 @@ func (m *ComponentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -19787,14 +31546,17 @@ func (m *ComponentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Status = ConditionStatus(dAtA[iNdEx:postIndex]) + m.Command = append(m.Command, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 3: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Args", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -19806,7 +31568,7 @@ func (m *ComponentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -19816,14 +31578,17 @@ func (m *ComponentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Message = string(dAtA[iNdEx:postIndex]) + m.Args = append(m.Args, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 4: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field WorkingDir", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -19835,7 +31600,7 @@ func (m *ComponentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -19845,64 +31610,17 @@ func (m *ComponentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Error = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ComponentStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ComponentStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ComponentStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.WorkingDir = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -19914,7 +31632,7 @@ func (m *ComponentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -19923,16 +31641,20 @@ func (m *ComponentStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Ports = append(m.Ports, ContainerPort{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -19944,7 +31666,7 @@ func (m *ComponentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -19953,67 +31675,53 @@ func (m *ComponentStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Conditions = append(m.Conditions, ComponentCondition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Env = append(m.Env, EnvVar{}) + if err := m.Env[len(m.Env)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) } - if skippy < 0 { - return ErrInvalidLengthGenerated + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + if msglen < 0 { + return ErrInvalidLengthGenerated } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ComponentStatusList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ComponentStatusList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ComponentStatusList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + iNdEx = postIndex + case 9: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field VolumeMounts", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -20025,7 +31733,7 @@ func (m *ComponentStatusList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -20034,16 +31742,20 @@ func (m *ComponentStatusList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.VolumeMounts = append(m.VolumeMounts, VolumeMount{}) + if err := m.VolumeMounts[len(m.VolumeMounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 10: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LivenessProbe", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -20055,7 +31767,7 @@ func (m *ComponentStatusList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -20064,67 +31776,58 @@ func (m *ComponentStatusList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, ComponentStatus{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.LivenessProbe == nil { + m.LivenessProbe = &Probe{} + } + if err := m.LivenessProbe.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadinessProbe", wireType) } - if skippy < 0 { - return ErrInvalidLengthGenerated + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + if msglen < 0 { + return ErrInvalidLengthGenerated } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ConfigMap) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + if m.ReadinessProbe == nil { + m.ReadinessProbe = &Probe{} } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ConfigMap: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ConfigMap: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + if err := m.ReadinessProbe.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Lifecycle", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -20136,7 +31839,7 @@ func (m *ConfigMap) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -20145,18 +31848,24 @@ func (m *ConfigMap) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.Lifecycle == nil { + m.Lifecycle = &Lifecycle{} + } + if err := m.Lifecycle.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 13: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TerminationMessagePath", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -20166,19 +31875,29 @@ func (m *ConfigMap) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 + m.TerminationMessagePath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImagePullPolicy", wireType) + } + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -20188,12 +31907,29 @@ func (m *ConfigMap) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - var stringLenmapkey uint64 + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ImagePullPolicy = PullPolicy(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecurityContext", wireType) + } + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -20203,76 +31939,33 @@ func (m *ConfigMap) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { + if postIndex > l { return io.ErrUnexpectedEOF } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - if m.Data == nil { - m.Data = make(map[string]string) + if m.SecurityContext == nil { + m.SecurityContext = &SecurityContext{} } - if iNdEx < postIndex { - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Data[mapkey] = mapvalue - } else { - var mapvalue string - m.Data[mapkey] = mapvalue + if err := m.SecurityContext.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BinaryData", wireType) + case 16: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType) } - var msglen int + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -20282,19 +31975,17 @@ func (m *ConfigMap) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF + m.Stdin = bool(v != 0) + case 17: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StdinOnce", wireType) } - var keykey uint64 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -20304,12 +31995,17 @@ func (m *ConfigMap) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - var stringLenmapkey uint64 + m.StdinOnce = bool(v != 0) + case 18: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TTY", wireType) + } + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -20319,125 +32015,15 @@ func (m *ConfigMap) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - if m.BinaryData == nil { - m.BinaryData = make(map[string][]byte) - } - if iNdEx < postIndex { - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var mapbyteLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapbyteLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intMapbyteLen := int(mapbyteLen) - if intMapbyteLen < 0 { - return ErrInvalidLengthGenerated - } - postbytesIndex := iNdEx + intMapbyteLen - if postbytesIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := make([]byte, mapbyteLen) - copy(mapvalue, dAtA[iNdEx:postbytesIndex]) - iNdEx = postbytesIndex - m.BinaryData[mapkey] = mapvalue - } else { - var mapvalue []byte - m.BinaryData[mapkey] = mapvalue - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ConfigMapEnvSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ConfigMapEnvSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ConfigMapEnvSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.TTY = bool(v != 0) + case 19: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field EnvFrom", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -20449,7 +32035,7 @@ func (m *ConfigMapEnvSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -20458,18 +32044,22 @@ func (m *ConfigMapEnvSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.LocalObjectReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.EnvFrom = append(m.EnvFrom, EnvFromSource{}) + if err := m.EnvFrom[len(m.EnvFrom)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) + case 20: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TerminationMessagePolicy", wireType) } - var v int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -20479,66 +32069,27 @@ func (m *ConfigMapEnvSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - b := bool(v != 0) - m.Optional = &b - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ConfigMapKeySelector) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ConfigMapKeySelector: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ConfigMapKeySelector: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.TerminationMessagePolicy = TerminationMessagePolicy(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 21: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field VolumeDevices", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -20550,7 +32101,7 @@ func (m *ConfigMapKeySelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -20559,18 +32110,22 @@ func (m *ConfigMapKeySelector) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.LocalObjectReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.VolumeDevices = append(m.VolumeDevices, VolumeDevice{}) + if err := m.VolumeDevices[len(m.VolumeDevices)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 22: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field StartupProbe", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -20580,42 +32135,28 @@ func (m *ConfigMapKeySelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.Key = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) + if m.StartupProbe == nil { + m.StartupProbe = &Probe{} } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } + if err := m.StartupProbe.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - b := bool(v != 0) - m.Optional = &b + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -20625,6 +32166,9 @@ func (m *ConfigMapKeySelector) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -20637,7 +32181,7 @@ func (m *ConfigMapKeySelector) Unmarshal(dAtA []byte) error { } return nil } -func (m *ConfigMapList) Unmarshal(dAtA []byte) error { +func (m *ContainerImage) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -20652,7 +32196,7 @@ func (m *ConfigMapList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -20660,17 +32204,17 @@ func (m *ConfigMapList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ConfigMapList: wiretype end group for non-group") + return fmt.Errorf("proto: ContainerImage: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ConfigMapList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ContainerImage: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Names", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -20680,27 +32224,29 @@ func (m *ConfigMapList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Names = append(m.Names, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SizeBytes", wireType) } - var msglen int + m.SizeBytes = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -20710,23 +32256,11 @@ func (m *ConfigMapList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + m.SizeBytes |= int64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, ConfigMap{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -20736,6 +32270,9 @@ func (m *ConfigMapList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -20748,7 +32285,7 @@ func (m *ConfigMapList) Unmarshal(dAtA []byte) error { } return nil } -func (m *ConfigMapNodeConfigSource) Unmarshal(dAtA []byte) error { +func (m *ContainerPort) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -20763,7 +32300,7 @@ func (m *ConfigMapNodeConfigSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -20771,15 +32308,15 @@ func (m *ConfigMapNodeConfigSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ConfigMapNodeConfigSource: wiretype end group for non-group") + return fmt.Errorf("proto: ContainerPort: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ConfigMapNodeConfigSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ContainerPort: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -20791,7 +32328,7 @@ func (m *ConfigMapNodeConfigSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -20801,16 +32338,19 @@ func (m *ConfigMapNodeConfigSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Namespace = string(dAtA[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HostPort", wireType) } - var stringLen uint64 + m.HostPort = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -20820,26 +32360,16 @@ func (m *ConfigMapNodeConfigSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + m.HostPort |= int32(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerPort", wireType) } - var stringLen uint64 + m.ContainerPort = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -20849,24 +32379,14 @@ func (m *ConfigMapNodeConfigSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + m.ContainerPort |= int32(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) - iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -20878,7 +32398,7 @@ func (m *ConfigMapNodeConfigSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -20888,14 +32408,17 @@ func (m *ConfigMapNodeConfigSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.ResourceVersion = string(dAtA[iNdEx:postIndex]) + m.Protocol = Protocol(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field KubeletConfigKey", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field HostIP", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -20907,7 +32430,7 @@ func (m *ConfigMapNodeConfigSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -20917,10 +32440,13 @@ func (m *ConfigMapNodeConfigSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.KubeletConfigKey = string(dAtA[iNdEx:postIndex]) + m.HostIP = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -20931,6 +32457,9 @@ func (m *ConfigMapNodeConfigSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -20943,7 +32472,7 @@ func (m *ConfigMapNodeConfigSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *ConfigMapProjection) Unmarshal(dAtA []byte) error { +func (m *ContainerState) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -20958,7 +32487,7 @@ func (m *ConfigMapProjection) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -20966,15 +32495,15 @@ func (m *ConfigMapProjection) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ConfigMapProjection: wiretype end group for non-group") + return fmt.Errorf("proto: ContainerState: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ConfigMapProjection: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ContainerState: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Waiting", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -20986,7 +32515,7 @@ func (m *ConfigMapProjection) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -20995,16 +32524,22 @@ func (m *ConfigMapProjection) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.LocalObjectReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.Waiting == nil { + m.Waiting = &ContainerStateWaiting{} + } + if err := m.Waiting.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Running", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -21016,7 +32551,7 @@ func (m *ConfigMapProjection) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -21025,19 +32560,24 @@ func (m *ConfigMapProjection) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, KeyToPath{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.Running == nil { + m.Running = &ContainerStateRunning{} + } + if err := m.Running.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Terminated", wireType) } - var v int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -21047,13 +32587,28 @@ func (m *ConfigMapProjection) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - b := bool(v != 0) - m.Optional = &b + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Terminated == nil { + m.Terminated = &ContainerStateTerminated{} + } + if err := m.Terminated.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -21063,6 +32618,9 @@ func (m *ConfigMapProjection) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -21075,7 +32633,7 @@ func (m *ConfigMapProjection) Unmarshal(dAtA []byte) error { } return nil } -func (m *ConfigMapVolumeSource) Unmarshal(dAtA []byte) error { +func (m *ContainerStateRunning) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -21090,7 +32648,7 @@ func (m *ConfigMapVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -21098,15 +32656,15 @@ func (m *ConfigMapVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ConfigMapVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: ContainerStateRunning: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ConfigMapVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ContainerStateRunning: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field StartedAt", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -21118,7 +32676,7 @@ func (m *ConfigMapVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -21127,85 +32685,16 @@ func (m *ConfigMapVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LocalObjectReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, KeyToPath{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.StartedAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DefaultMode", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.DefaultMode = &v - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.Optional = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -21215,6 +32704,9 @@ func (m *ConfigMapVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -21227,7 +32719,7 @@ func (m *ConfigMapVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *Container) Unmarshal(dAtA []byte) error { +func (m *ContainerStateTerminated) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -21242,7 +32734,7 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -21250,46 +32742,17 @@ func (m *Container) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Container: wiretype end group for non-group") + return fmt.Errorf("proto: ContainerStateTerminated: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Container: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ContainerStateTerminated: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExitCode", wireType) } - var stringLen uint64 + m.ExitCode = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -21299,26 +32762,16 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + m.ExitCode |= int32(b&0x7F) << shift if b < 0x80 { break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Image = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType) + } } - var stringLen uint64 + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Signal", wireType) + } + m.Signal = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -21328,24 +32781,14 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + m.Signal |= int32(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Command = append(m.Command, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 4: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Args", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -21357,7 +32800,7 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -21367,14 +32810,17 @@ func (m *Container) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Args = append(m.Args, string(dAtA[iNdEx:postIndex])) + m.Reason = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 5: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field WorkingDir", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -21386,7 +32832,7 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -21396,14 +32842,17 @@ func (m *Container) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.WorkingDir = string(dAtA[iNdEx:postIndex]) + m.Message = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 6: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field StartedAt", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -21415,7 +32864,7 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -21424,17 +32873,19 @@ func (m *Container) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Ports = append(m.Ports, ContainerPort{}) - if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.StartedAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 7: + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field FinishedAt", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -21446,7 +32897,7 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -21455,19 +32906,21 @@ func (m *Container) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Env = append(m.Env, EnvVar{}) - if err := m.Env[len(m.Env)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.FinishedAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 8: + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -21477,58 +32930,82 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.ContainerID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeMounts", wireType) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } + if skippy < 0 { + return ErrInvalidLengthGenerated } - if msglen < 0 { + if (iNdEx + skippy) < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.VolumeMounts = append(m.VolumeMounts, VolumeMount{}) - if err := m.VolumeMounts[len(m.VolumeMounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerStateWaiting) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - iNdEx = postIndex - case 10: + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerStateWaiting: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerStateWaiting: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LivenessProbe", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -21538,30 +33015,29 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - if m.LivenessProbe == nil { - m.LivenessProbe = &Probe{} - } - if err := m.LivenessProbe.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Reason = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 11: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadinessProbe", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -21571,61 +33047,80 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - if m.ReadinessProbe == nil { - m.ReadinessProbe = &Probe{} - } - if err := m.ReadinessProbe.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Message = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 12: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Lifecycle", wireType) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } + if skippy < 0 { + return ErrInvalidLengthGenerated } - if msglen < 0 { + if (iNdEx + skippy) < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - if m.Lifecycle == nil { - m.Lifecycle = &Lifecycle{} + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - if err := m.Lifecycle.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + if iNdEx >= l { + return io.ErrUnexpectedEOF } - iNdEx = postIndex - case 13: + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TerminationMessagePath", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -21637,7 +33132,7 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -21647,16 +33142,19 @@ func (m *Container) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.TerminationMessagePath = string(dAtA[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 14: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ImagePullPolicy", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -21666,24 +33164,28 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.ImagePullPolicy = PullPolicy(dAtA[iNdEx:postIndex]) + if err := m.State.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 15: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecurityContext", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LastTerminationState", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -21695,7 +33197,7 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -21704,19 +33206,19 @@ func (m *Container) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if m.SecurityContext == nil { - m.SecurityContext = &SecurityContext{} - } - if err := m.SecurityContext.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.LastTerminationState.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 16: + case 4: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Ready", wireType) } var v int for shift := uint(0); ; shift += 7 { @@ -21728,17 +33230,17 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - m.Stdin = bool(v != 0) - case 17: + m.Ready = bool(v != 0) + case 5: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field StdinOnce", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RestartCount", wireType) } - var v int + m.RestartCount = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -21748,17 +33250,16 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + m.RestartCount |= int32(b&0x7F) << shift if b < 0x80 { break } } - m.StdinOnce = bool(v != 0) - case 18: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TTY", wireType) + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) } - var v int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -21768,17 +33269,29 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - m.TTY = bool(v != 0) - case 19: + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Image = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EnvFrom", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ImageID", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -21788,26 +33301,27 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.EnvFrom = append(m.EnvFrom, EnvFromSource{}) - if err := m.EnvFrom[len(m.EnvFrom)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.ImageID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 20: + case 8: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TerminationMessagePolicy", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -21819,7 +33333,7 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -21829,16 +33343,19 @@ func (m *Container) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.TerminationMessagePolicy = TerminationMessagePolicy(dAtA[iNdEx:postIndex]) + m.ContainerID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 21: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeDevices", wireType) + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Started", wireType) } - var msglen int + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -21848,23 +33365,13 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.VolumeDevices = append(m.VolumeDevices, VolumeDevice{}) - if err := m.VolumeDevices[len(m.VolumeDevices)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex + b := bool(v != 0) + m.Started = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -21874,6 +33381,9 @@ func (m *Container) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -21886,7 +33396,7 @@ func (m *Container) Unmarshal(dAtA []byte) error { } return nil } -func (m *ContainerImage) Unmarshal(dAtA []byte) error { +func (m *DaemonEndpoint) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -21901,7 +33411,7 @@ func (m *ContainerImage) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -21909,46 +33419,17 @@ func (m *ContainerImage) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ContainerImage: wiretype end group for non-group") + return fmt.Errorf("proto: DaemonEndpoint: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ContainerImage: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DaemonEndpoint: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Names", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Names = append(m.Names, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 2: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SizeBytes", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) } - m.SizeBytes = 0 + m.Port = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -21958,7 +33439,7 @@ func (m *ContainerImage) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.SizeBytes |= (int64(b) & 0x7F) << shift + m.Port |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -21972,6 +33453,9 @@ func (m *ContainerImage) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -21984,7 +33468,7 @@ func (m *ContainerImage) Unmarshal(dAtA []byte) error { } return nil } -func (m *ContainerPort) Unmarshal(dAtA []byte) error { +func (m *DownwardAPIProjection) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -21999,7 +33483,7 @@ func (m *ContainerPort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -22007,17 +33491,17 @@ func (m *ContainerPort) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ContainerPort: wiretype end group for non-group") + return fmt.Errorf("proto: DownwardAPIProjection: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ContainerPort: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DownwardAPIProjection: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -22027,116 +33511,25 @@ func (m *ContainerPort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field HostPort", wireType) - } - m.HostPort = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.HostPort |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerPort", wireType) - } - m.ContainerPort = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ContainerPort |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.Protocol = Protocol(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HostIP", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF + m.Items = append(m.Items, DownwardAPIVolumeFile{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.HostIP = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -22147,6 +33540,9 @@ func (m *ContainerPort) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -22159,7 +33555,7 @@ func (m *ContainerPort) Unmarshal(dAtA []byte) error { } return nil } -func (m *ContainerState) Unmarshal(dAtA []byte) error { +func (m *DownwardAPIVolumeFile) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -22174,7 +33570,7 @@ func (m *ContainerState) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -22182,17 +33578,17 @@ func (m *ContainerState) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ContainerState: wiretype end group for non-group") + return fmt.Errorf("proto: DownwardAPIVolumeFile: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ContainerState: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DownwardAPIVolumeFile: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Waiting", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -22202,28 +33598,27 @@ func (m *ContainerState) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - if m.Waiting == nil { - m.Waiting = &ContainerStateWaiting{} - } - if err := m.Waiting.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Path = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Running", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field FieldRef", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -22235,7 +33630,7 @@ func (m *ContainerState) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -22244,19 +33639,22 @@ func (m *ContainerState) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Running == nil { - m.Running = &ContainerStateRunning{} + if m.FieldRef == nil { + m.FieldRef = &ObjectFieldSelector{} } - if err := m.Running.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.FieldRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Terminated", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ResourceFieldRef", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -22268,7 +33666,7 @@ func (m *ContainerState) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -22277,16 +33675,39 @@ func (m *ContainerState) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Terminated == nil { - m.Terminated = &ContainerStateTerminated{} + if m.ResourceFieldRef == nil { + m.ResourceFieldRef = &ResourceFieldSelector{} } - if err := m.Terminated.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ResourceFieldRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Mode = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -22296,6 +33717,9 @@ func (m *ContainerState) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -22308,7 +33732,7 @@ func (m *ContainerState) Unmarshal(dAtA []byte) error { } return nil } -func (m *ContainerStateRunning) Unmarshal(dAtA []byte) error { +func (m *DownwardAPIVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -22323,7 +33747,7 @@ func (m *ContainerStateRunning) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -22331,15 +33755,15 @@ func (m *ContainerStateRunning) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ContainerStateRunning: wiretype end group for non-group") + return fmt.Errorf("proto: DownwardAPIVolumeSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ContainerStateRunning: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DownwardAPIVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StartedAt", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -22351,7 +33775,7 @@ func (m *ContainerStateRunning) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -22360,13 +33784,37 @@ func (m *ContainerStateRunning) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.StartedAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, DownwardAPIVolumeFile{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DefaultMode", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.DefaultMode = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -22376,6 +33824,9 @@ func (m *ContainerStateRunning) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -22388,7 +33839,7 @@ func (m *ContainerStateRunning) Unmarshal(dAtA []byte) error { } return nil } -func (m *ContainerStateTerminated) Unmarshal(dAtA []byte) error { +func (m *EmptyDirVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -22403,7 +33854,7 @@ func (m *ContainerStateTerminated) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -22411,17 +33862,17 @@ func (m *ContainerStateTerminated) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ContainerStateTerminated: wiretype end group for non-group") + return fmt.Errorf("proto: EmptyDirVolumeSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ContainerStateTerminated: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: EmptyDirVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ExitCode", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Medium", wireType) } - m.ExitCode = 0 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -22431,35 +33882,29 @@ func (m *ContainerStateTerminated) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ExitCode |= (int32(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Signal", wireType) + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated } - m.Signal = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Signal |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated } - case 3: + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Medium = StorageMedium(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SizeLimit", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -22469,24 +33914,84 @@ func (m *ContainerStateTerminated) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.Reason = string(dAtA[iNdEx:postIndex]) + if m.SizeLimit == nil { + m.SizeLimit = &resource.Quantity{} + } + if err := m.SizeLimit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 4: + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EndpointAddress) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EndpointAddress: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EndpointAddress: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -22498,7 +34003,7 @@ func (m *ContainerStateTerminated) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -22508,14 +34013,17 @@ func (m *ContainerStateTerminated) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Message = string(dAtA[iNdEx:postIndex]) + m.IP = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 5: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StartedAt", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TargetRef", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -22527,7 +34035,7 @@ func (m *ContainerStateTerminated) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -22536,18 +34044,24 @@ func (m *ContainerStateTerminated) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.StartedAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.TargetRef == nil { + m.TargetRef = &ObjectReference{} + } + if err := m.TargetRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 6: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FinishedAt", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -22557,25 +34071,27 @@ func (m *ContainerStateTerminated) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.FinishedAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Hostname = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 7: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -22587,7 +34103,7 @@ func (m *ContainerStateTerminated) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -22597,10 +34113,14 @@ func (m *ContainerStateTerminated) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.ContainerID = string(dAtA[iNdEx:postIndex]) + s := string(dAtA[iNdEx:postIndex]) + m.NodeName = &s iNdEx = postIndex default: iNdEx = preIndex @@ -22611,6 +34131,9 @@ func (m *ContainerStateTerminated) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -22623,7 +34146,7 @@ func (m *ContainerStateTerminated) Unmarshal(dAtA []byte) error { } return nil } -func (m *ContainerStateWaiting) Unmarshal(dAtA []byte) error { +func (m *EndpointPort) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -22638,7 +34161,7 @@ func (m *ContainerStateWaiting) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -22646,15 +34169,15 @@ func (m *ContainerStateWaiting) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ContainerStateWaiting: wiretype end group for non-group") + return fmt.Errorf("proto: EndpointPort: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ContainerStateWaiting: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: EndpointPort: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -22666,7 +34189,7 @@ func (m *ContainerStateWaiting) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -22676,14 +34199,36 @@ func (m *ContainerStateWaiting) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Reason = string(dAtA[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + m.Port = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Port |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -22695,7 +34240,7 @@ func (m *ContainerStateWaiting) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -22705,10 +34250,13 @@ func (m *ContainerStateWaiting) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Message = string(dAtA[iNdEx:postIndex]) + m.Protocol = Protocol(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -22719,6 +34267,9 @@ func (m *ContainerStateWaiting) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -22731,7 +34282,7 @@ func (m *ContainerStateWaiting) Unmarshal(dAtA []byte) error { } return nil } -func (m *ContainerStatus) Unmarshal(dAtA []byte) error { +func (m *EndpointSubset) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -22746,7 +34297,7 @@ func (m *ContainerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -22754,17 +34305,17 @@ func (m *ContainerStatus) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ContainerStatus: wiretype end group for non-group") + return fmt.Errorf("proto: EndpointSubset: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ContainerStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: EndpointSubset: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Addresses", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -22774,24 +34325,29 @@ func (m *ContainerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + m.Addresses = append(m.Addresses, EndpointAddress{}) + if err := m.Addresses[len(m.Addresses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field NotReadyAddresses", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -22803,7 +34359,7 @@ func (m *ContainerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -22812,16 +34368,20 @@ func (m *ContainerStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.State.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.NotReadyAddresses = append(m.NotReadyAddresses, EndpointAddress{}) + if err := m.NotReadyAddresses[len(m.NotReadyAddresses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastTerminationState", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -22833,7 +34393,7 @@ func (m *ContainerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -22842,86 +34402,75 @@ func (m *ContainerStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.LastTerminationState.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Ports = append(m.Ports, EndpointPort{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Ready", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Ready = bool(v != 0) - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RestartCount", wireType) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - m.RestartCount = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.RestartCount |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } + if skippy < 0 { + return ErrInvalidLengthGenerated } - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Endpoints) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - postIndex := iNdEx + intStringLen - if postIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - m.Image = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 7: + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Endpoints: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Endpoints: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ImageID", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -22931,26 +34480,30 @@ func (m *ContainerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.ImageID = string(dAtA[iNdEx:postIndex]) + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 8: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Subsets", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -22960,20 +34513,25 @@ func (m *ContainerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.ContainerID = string(dAtA[iNdEx:postIndex]) + m.Subsets = append(m.Subsets, EndpointSubset{}) + if err := m.Subsets[len(m.Subsets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -22984,6 +34542,9 @@ func (m *ContainerStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -22996,7 +34557,7 @@ func (m *ContainerStatus) Unmarshal(dAtA []byte) error { } return nil } -func (m *DaemonEndpoint) Unmarshal(dAtA []byte) error { +func (m *EndpointsList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -23011,7 +34572,7 @@ func (m *DaemonEndpoint) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -23019,17 +34580,50 @@ func (m *DaemonEndpoint) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DaemonEndpoint: wiretype end group for non-group") + return fmt.Errorf("proto: EndpointsList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DaemonEndpoint: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: EndpointsList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } - m.Port = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -23039,11 +34633,26 @@ func (m *DaemonEndpoint) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Port |= (int32(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Endpoints{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -23053,6 +34662,9 @@ func (m *DaemonEndpoint) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -23065,7 +34677,7 @@ func (m *DaemonEndpoint) Unmarshal(dAtA []byte) error { } return nil } -func (m *DownwardAPIProjection) Unmarshal(dAtA []byte) error { +func (m *EnvFromSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -23080,7 +34692,7 @@ func (m *DownwardAPIProjection) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -23088,15 +34700,47 @@ func (m *DownwardAPIProjection) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DownwardAPIProjection: wiretype end group for non-group") + return fmt.Errorf("proto: EnvFromSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DownwardAPIProjection: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: EnvFromSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Prefix", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Prefix = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConfigMapRef", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -23108,7 +34752,7 @@ func (m *DownwardAPIProjection) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -23117,11 +34761,52 @@ func (m *DownwardAPIProjection) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, DownwardAPIVolumeFile{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.ConfigMapRef == nil { + m.ConfigMapRef = &ConfigMapEnvSource{} + } + if err := m.ConfigMapRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecretRef == nil { + m.SecretRef = &SecretEnvSource{} + } + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -23134,6 +34819,9 @@ func (m *DownwardAPIProjection) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -23146,7 +34834,7 @@ func (m *DownwardAPIProjection) Unmarshal(dAtA []byte) error { } return nil } -func (m *DownwardAPIVolumeFile) Unmarshal(dAtA []byte) error { +func (m *EnvVar) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -23161,7 +34849,7 @@ func (m *DownwardAPIVolumeFile) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -23169,15 +34857,15 @@ func (m *DownwardAPIVolumeFile) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DownwardAPIVolumeFile: wiretype end group for non-group") + return fmt.Errorf("proto: EnvVar: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DownwardAPIVolumeFile: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: EnvVar: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -23189,7 +34877,7 @@ func (m *DownwardAPIVolumeFile) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -23199,16 +34887,19 @@ func (m *DownwardAPIVolumeFile) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Path = string(dAtA[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FieldRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -23218,28 +34909,27 @@ func (m *DownwardAPIVolumeFile) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - if m.FieldRef == nil { - m.FieldRef = &ObjectFieldSelector{} - } - if err := m.FieldRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Value = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceFieldRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ValueFrom", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -23251,7 +34941,7 @@ func (m *DownwardAPIVolumeFile) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -23260,36 +34950,19 @@ func (m *DownwardAPIVolumeFile) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if m.ResourceFieldRef == nil { - m.ResourceFieldRef = &ResourceFieldSelector{} + if m.ValueFrom == nil { + m.ValueFrom = &EnvVarSource{} } - if err := m.ResourceFieldRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ValueFrom.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Mode = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -23299,6 +34972,9 @@ func (m *DownwardAPIVolumeFile) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -23311,7 +34987,7 @@ func (m *DownwardAPIVolumeFile) Unmarshal(dAtA []byte) error { } return nil } -func (m *DownwardAPIVolumeSource) Unmarshal(dAtA []byte) error { +func (m *EnvVarSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -23326,7 +35002,7 @@ func (m *DownwardAPIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -23334,15 +35010,15 @@ func (m *DownwardAPIVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DownwardAPIVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: EnvVarSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DownwardAPIVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: EnvVarSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field FieldRef", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -23354,7 +35030,7 @@ func (m *DownwardAPIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -23363,19 +35039,24 @@ func (m *DownwardAPIVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, DownwardAPIVolumeFile{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.FieldRef == nil { + m.FieldRef = &ObjectFieldSelector{} + } + if err := m.FieldRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DefaultMode", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceFieldRef", wireType) } - var v int32 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -23385,12 +35066,100 @@ func (m *DownwardAPIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - m.DefaultMode = &v + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ResourceFieldRef == nil { + m.ResourceFieldRef = &ResourceFieldSelector{} + } + if err := m.ResourceFieldRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConfigMapKeyRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ConfigMapKeyRef == nil { + m.ConfigMapKeyRef = &ConfigMapKeySelector{} + } + if err := m.ConfigMapKeyRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretKeyRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecretKeyRef == nil { + m.SecretKeyRef = &SecretKeySelector{} + } + if err := m.SecretKeyRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -23400,6 +35169,9 @@ func (m *DownwardAPIVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -23412,7 +35184,7 @@ func (m *DownwardAPIVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *EmptyDirVolumeSource) Unmarshal(dAtA []byte) error { +func (m *EphemeralContainer) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -23427,7 +35199,7 @@ func (m *EmptyDirVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -23435,17 +35207,17 @@ func (m *EmptyDirVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: EmptyDirVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: EphemeralContainer: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: EmptyDirVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: EphemeralContainer: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Medium", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field EphemeralContainerCommon", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -23455,26 +35227,30 @@ func (m *EmptyDirVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.Medium = StorageMedium(dAtA[iNdEx:postIndex]) + if err := m.EphemeralContainerCommon.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SizeLimit", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TargetContainerName", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -23484,24 +35260,23 @@ func (m *EmptyDirVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - if m.SizeLimit == nil { - m.SizeLimit = &k8s_io_apimachinery_pkg_api_resource.Quantity{} - } - if err := m.SizeLimit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.TargetContainerName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -23512,6 +35287,9 @@ func (m *EmptyDirVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -23524,7 +35302,7 @@ func (m *EmptyDirVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *EndpointAddress) Unmarshal(dAtA []byte) error { +func (m *EphemeralContainerCommon) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -23539,7 +35317,7 @@ func (m *EndpointAddress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -23547,15 +35325,15 @@ func (m *EndpointAddress) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: EndpointAddress: wiretype end group for non-group") + return fmt.Errorf("proto: EphemeralContainerCommon: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: EndpointAddress: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: EphemeralContainerCommon: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -23567,7 +35345,7 @@ func (m *EndpointAddress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -23577,16 +35355,19 @@ func (m *EndpointAddress) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.IP = string(dAtA[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TargetRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -23596,28 +35377,27 @@ func (m *EndpointAddress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - if m.TargetRef == nil { - m.TargetRef = &ObjectReference{} - } - if err := m.TargetRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Image = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -23629,7 +35409,7 @@ func (m *EndpointAddress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -23639,14 +35419,17 @@ func (m *EndpointAddress) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Hostname = string(dAtA[iNdEx:postIndex]) + m.Command = append(m.Command, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Args", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -23658,7 +35441,7 @@ func (m *EndpointAddress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -23668,65 +35451,17 @@ func (m *EndpointAddress) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.NodeName = &s - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *EndpointPort) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: EndpointPort: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: EndpointPort: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.Args = append(m.Args, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field WorkingDir", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -23738,7 +35473,7 @@ func (m *EndpointPort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -23748,35 +35483,19 @@ func (m *EndpointPort) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + m.WorkingDir = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) - } - m.Port = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Port |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -23786,74 +35505,29 @@ func (m *EndpointPort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Protocol = Protocol(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *EndpointSubset) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + m.Ports = append(m.Ports, ContainerPort{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: EndpointSubset: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: EndpointSubset: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + iNdEx = postIndex + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Addresses", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -23865,7 +35539,7 @@ func (m *EndpointSubset) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -23874,17 +35548,20 @@ func (m *EndpointSubset) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Addresses = append(m.Addresses, EndpointAddress{}) - if err := m.Addresses[len(m.Addresses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Env = append(m.Env, EnvVar{}) + if err := m.Env[len(m.Env)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 8: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NotReadyAddresses", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -23896,7 +35573,7 @@ func (m *EndpointSubset) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -23905,17 +35582,19 @@ func (m *EndpointSubset) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.NotReadyAddresses = append(m.NotReadyAddresses, EndpointAddress{}) - if err := m.NotReadyAddresses[len(m.NotReadyAddresses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: + case 9: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field VolumeMounts", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -23927,7 +35606,7 @@ func (m *EndpointSubset) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -23936,67 +35615,20 @@ func (m *EndpointSubset) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Ports = append(m.Ports, EndpointPort{}) - if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.VolumeMounts = append(m.VolumeMounts, VolumeMount{}) + if err := m.VolumeMounts[len(m.VolumeMounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Endpoints) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Endpoints: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Endpoints: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 10: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LivenessProbe", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -24008,7 +35640,7 @@ func (m *Endpoints) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -24017,16 +35649,22 @@ func (m *Endpoints) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.LivenessProbe == nil { + m.LivenessProbe = &Probe{} + } + if err := m.LivenessProbe.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 11: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Subsets", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ReadinessProbe", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -24038,7 +35676,7 @@ func (m *Endpoints) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -24047,67 +35685,22 @@ func (m *Endpoints) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Subsets = append(m.Subsets, EndpointSubset{}) - if err := m.Subsets[len(m.Subsets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) > l { + if postIndex > l { return io.ErrUnexpectedEOF } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *EndpointsList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + if m.ReadinessProbe == nil { + m.ReadinessProbe = &Probe{} } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + if err := m.ReadinessProbe.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: EndpointsList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: EndpointsList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + iNdEx = postIndex + case 12: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Lifecycle", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -24119,7 +35712,7 @@ func (m *EndpointsList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -24128,18 +35721,24 @@ func (m *EndpointsList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.Lifecycle == nil { + m.Lifecycle = &Lifecycle{} + } + if err := m.Lifecycle.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 13: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TerminationMessagePath", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -24149,76 +35748,27 @@ func (m *EndpointsList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, Endpoints{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *EnvFromSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: EnvFromSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: EnvFromSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.TerminationMessagePath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 14: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Prefix", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ImagePullPolicy", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -24230,7 +35780,7 @@ func (m *EnvFromSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -24240,14 +35790,17 @@ func (m *EnvFromSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Prefix = string(dAtA[iNdEx:postIndex]) + m.ImagePullPolicy = PullPolicy(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 15: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConfigMapRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SecurityContext", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -24259,7 +35812,7 @@ func (m *EnvFromSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -24268,19 +35821,82 @@ func (m *EnvFromSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if m.ConfigMapRef == nil { - m.ConfigMapRef = &ConfigMapEnvSource{} + if m.SecurityContext == nil { + m.SecurityContext = &SecurityContext{} } - if err := m.ConfigMapRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.SecurityContext.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: + case 16: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Stdin = bool(v != 0) + case 17: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StdinOnce", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.StdinOnce = bool(v != 0) + case 18: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TTY", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TTY = bool(v != 0) + case 19: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field EnvFrom", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -24292,7 +35908,7 @@ func (m *EnvFromSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -24301,69 +35917,20 @@ func (m *EnvFromSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if m.SecretRef == nil { - m.SecretRef = &SecretEnvSource{} - } - if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.EnvFrom = append(m.EnvFrom, EnvFromSource{}) + if err := m.EnvFrom[len(m.EnvFrom)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *EnvVar) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: EnvVar: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: EnvVar: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 20: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TerminationMessagePolicy", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -24375,7 +35942,7 @@ func (m *EnvVar) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -24385,16 +35952,19 @@ func (m *EnvVar) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + m.TerminationMessagePolicy = TerminationMessagePolicy(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 21: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field VolumeDevices", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -24404,24 +35974,29 @@ func (m *EnvVar) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.Value = string(dAtA[iNdEx:postIndex]) + m.VolumeDevices = append(m.VolumeDevices, VolumeDevice{}) + if err := m.VolumeDevices[len(m.VolumeDevices)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 3: + case 22: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ValueFrom", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field StartupProbe", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -24433,7 +36008,7 @@ func (m *EnvVar) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -24442,13 +36017,16 @@ func (m *EnvVar) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if m.ValueFrom == nil { - m.ValueFrom = &EnvVarSource{} + if m.StartupProbe == nil { + m.StartupProbe = &Probe{} } - if err := m.ValueFrom.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.StartupProbe.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -24461,6 +36039,9 @@ func (m *EnvVar) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -24473,7 +36054,7 @@ func (m *EnvVar) Unmarshal(dAtA []byte) error { } return nil } -func (m *EnvVarSource) Unmarshal(dAtA []byte) error { +func (m *EphemeralContainers) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -24488,7 +36069,7 @@ func (m *EnvVarSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -24496,15 +36077,15 @@ func (m *EnvVarSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: EnvVarSource: wiretype end group for non-group") + return fmt.Errorf("proto: EphemeralContainers: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: EnvVarSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: EphemeralContainers: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FieldRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -24516,7 +36097,7 @@ func (m *EnvVarSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -24525,52 +36106,19 @@ func (m *EnvVarSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.FieldRef == nil { - m.FieldRef = &ObjectFieldSelector{} - } - if err := m.FieldRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceFieldRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - if m.ResourceFieldRef == nil { - m.ResourceFieldRef = &ResourceFieldSelector{} - } - if err := m.ResourceFieldRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConfigMapKeyRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field EphemeralContainers", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -24582,7 +36130,7 @@ func (m *EnvVarSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -24591,46 +36139,14 @@ func (m *EnvVarSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ConfigMapKeyRef == nil { - m.ConfigMapKeyRef = &ConfigMapKeySelector{} - } - if err := m.ConfigMapKeyRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretKeyRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - if m.SecretKeyRef == nil { - m.SecretKeyRef = &SecretKeySelector{} - } - if err := m.SecretKeyRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.EphemeralContainers = append(m.EphemeralContainers, EphemeralContainer{}) + if err := m.EphemeralContainers[len(m.EphemeralContainers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -24643,6 +36159,9 @@ func (m *EnvVarSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -24670,7 +36189,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -24698,7 +36217,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -24707,6 +36226,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -24728,7 +36250,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -24737,6 +36259,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -24758,7 +36283,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -24768,6 +36293,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -24787,7 +36315,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -24797,6 +36325,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -24816,7 +36347,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -24825,6 +36356,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -24846,7 +36380,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -24855,6 +36389,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -24876,7 +36413,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -24885,6 +36422,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -24906,7 +36446,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Count |= (int32(b) & 0x7F) << shift + m.Count |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -24925,7 +36465,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -24935,6 +36475,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -24954,7 +36497,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -24963,6 +36506,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -24984,7 +36530,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -24993,6 +36539,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -25017,7 +36566,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -25027,6 +36576,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -25046,7 +36598,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -25055,6 +36607,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -25079,7 +36634,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -25089,6 +36644,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -25108,7 +36666,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -25118,6 +36676,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -25132,6 +36693,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -25159,7 +36723,7 @@ func (m *EventList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -25187,7 +36751,7 @@ func (m *EventList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -25196,6 +36760,9 @@ func (m *EventList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -25217,7 +36784,7 @@ func (m *EventList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -25226,6 +36793,9 @@ func (m *EventList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -25243,6 +36813,9 @@ func (m *EventList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -25270,7 +36843,7 @@ func (m *EventSeries) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -25298,7 +36871,7 @@ func (m *EventSeries) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Count |= (int32(b) & 0x7F) << shift + m.Count |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -25317,7 +36890,7 @@ func (m *EventSeries) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -25326,6 +36899,9 @@ func (m *EventSeries) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -25347,7 +36923,7 @@ func (m *EventSeries) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -25357,6 +36933,9 @@ func (m *EventSeries) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -25371,6 +36950,9 @@ func (m *EventSeries) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -25398,7 +36980,7 @@ func (m *EventSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -25426,7 +37008,7 @@ func (m *EventSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -25436,6 +37018,9 @@ func (m *EventSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -25455,7 +37040,7 @@ func (m *EventSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -25465,6 +37050,9 @@ func (m *EventSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -25479,6 +37067,9 @@ func (m *EventSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -25506,7 +37097,7 @@ func (m *ExecAction) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -25534,7 +37125,7 @@ func (m *ExecAction) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -25544,6 +37135,9 @@ func (m *ExecAction) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -25558,6 +37152,9 @@ func (m *ExecAction) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -25585,7 +37182,7 @@ func (m *FCVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -25613,7 +37210,7 @@ func (m *FCVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -25623,6 +37220,9 @@ func (m *FCVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -25642,7 +37242,7 @@ func (m *FCVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -25662,7 +37262,7 @@ func (m *FCVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -25672,6 +37272,9 @@ func (m *FCVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -25691,7 +37294,7 @@ func (m *FCVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -25711,7 +37314,7 @@ func (m *FCVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -25721,6 +37324,9 @@ func (m *FCVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -25735,6 +37341,9 @@ func (m *FCVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -25762,7 +37371,7 @@ func (m *FlexPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -25790,7 +37399,7 @@ func (m *FlexPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -25800,6 +37409,9 @@ func (m *FlexPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -25819,7 +37431,7 @@ func (m *FlexPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -25829,6 +37441,9 @@ func (m *FlexPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -25848,7 +37463,7 @@ func (m *FlexPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -25857,6 +37472,9 @@ func (m *FlexPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -25881,7 +37499,7 @@ func (m *FlexPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -25901,7 +37519,7 @@ func (m *FlexPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -25910,54 +37528,20 @@ func (m *FlexPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { + if postIndex > l { return io.ErrUnexpectedEOF } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Options == nil { m.Options = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -25967,41 +37551,86 @@ func (m *FlexPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Options[mapkey] = mapvalue - } else { - var mapvalue string - m.Options[mapkey] = mapvalue } + m.Options[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -26012,6 +37641,9 @@ func (m *FlexPersistentVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -26039,7 +37671,7 @@ func (m *FlexVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -26067,7 +37699,7 @@ func (m *FlexVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -26077,6 +37709,9 @@ func (m *FlexVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -26096,7 +37731,7 @@ func (m *FlexVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -26106,6 +37741,9 @@ func (m *FlexVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -26125,7 +37763,7 @@ func (m *FlexVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -26134,6 +37772,9 @@ func (m *FlexVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -26158,7 +37799,7 @@ func (m *FlexVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -26178,7 +37819,7 @@ func (m *FlexVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -26187,10 +37828,168 @@ func (m *FlexVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 + if m.Options == nil { + m.Options = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Options[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FlockerVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FlockerVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FlockerVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DatasetName", wireType) + } + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -26200,12 +37999,29 @@ func (m *FlexVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - var stringLenmapkey uint64 + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DatasetName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DatasetUUID", wireType) + } + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -26215,71 +38031,180 @@ func (m *FlexVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { return io.ErrUnexpectedEOF } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - if m.Options == nil { - m.Options = make(map[string]string) + m.DatasetUUID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - if iNdEx < postIndex { - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GCEPersistentDiskVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GCEPersistentDiskVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GCEPersistentDiskVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PDName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } + if iNdEx >= l { + return io.ErrUnexpectedEOF } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PDName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { return io.ErrUnexpectedEOF } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Options[mapkey] = mapvalue - } else { - var mapvalue string - m.Options[mapkey] = mapvalue + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FSType = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Partition", wireType) + } + m.Partition = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Partition |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -26289,6 +38214,9 @@ func (m *FlexVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -26301,7 +38229,7 @@ func (m *FlexVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *FlockerVolumeSource) Unmarshal(dAtA []byte) error { +func (m *GitRepoVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -26316,7 +38244,7 @@ func (m *FlockerVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -26324,15 +38252,15 @@ func (m *FlockerVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: FlockerVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: GitRepoVolumeSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: FlockerVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GitRepoVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DatasetName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Repository", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -26344,7 +38272,7 @@ func (m *FlockerVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -26354,14 +38282,17 @@ func (m *FlockerVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.DatasetName = string(dAtA[iNdEx:postIndex]) + m.Repository = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DatasetUUID", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -26373,7 +38304,7 @@ func (m *FlockerVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -26383,93 +38314,17 @@ func (m *FlockerVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DatasetUUID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GCEPersistentDiskVolumeSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GCEPersistentDiskVolumeSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GCEPersistentDiskVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PDName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.PDName = string(dAtA[iNdEx:postIndex]) + m.Revision = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Directory", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -26481,7 +38336,7 @@ func (m *GCEPersistentDiskVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -26491,50 +38346,14 @@ func (m *GCEPersistentDiskVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.FSType = string(dAtA[iNdEx:postIndex]) + m.Directory = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Partition", wireType) - } - m.Partition = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Partition |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnly = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -26544,6 +38363,9 @@ func (m *GCEPersistentDiskVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -26556,7 +38378,7 @@ func (m *GCEPersistentDiskVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *GitRepoVolumeSource) Unmarshal(dAtA []byte) error { +func (m *GlusterfsPersistentVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -26571,7 +38393,7 @@ func (m *GitRepoVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -26579,15 +38401,15 @@ func (m *GitRepoVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GitRepoVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: GlusterfsPersistentVolumeSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GitRepoVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GlusterfsPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Repository", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field EndpointsName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -26599,7 +38421,7 @@ func (m *GitRepoVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -26609,14 +38431,17 @@ func (m *GitRepoVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Repository = string(dAtA[iNdEx:postIndex]) + m.EndpointsName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -26628,7 +38453,7 @@ func (m *GitRepoVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -26638,14 +38463,37 @@ func (m *GitRepoVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Revision = string(dAtA[iNdEx:postIndex]) + m.Path = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Directory", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field EndpointsNamespace", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -26657,7 +38505,7 @@ func (m *GitRepoVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -26667,10 +38515,14 @@ func (m *GitRepoVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Directory = string(dAtA[iNdEx:postIndex]) + s := string(dAtA[iNdEx:postIndex]) + m.EndpointsNamespace = &s iNdEx = postIndex default: iNdEx = preIndex @@ -26681,6 +38533,9 @@ func (m *GitRepoVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -26708,7 +38563,7 @@ func (m *GlusterfsVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -26736,7 +38591,7 @@ func (m *GlusterfsVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -26746,6 +38601,9 @@ func (m *GlusterfsVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -26765,7 +38623,7 @@ func (m *GlusterfsVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -26775,6 +38633,9 @@ func (m *GlusterfsVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -26794,7 +38655,7 @@ func (m *GlusterfsVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -26809,6 +38670,9 @@ func (m *GlusterfsVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -26836,7 +38700,7 @@ func (m *HTTPGetAction) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -26864,7 +38728,7 @@ func (m *HTTPGetAction) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -26874,6 +38738,9 @@ func (m *HTTPGetAction) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -26893,7 +38760,7 @@ func (m *HTTPGetAction) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -26902,6 +38769,9 @@ func (m *HTTPGetAction) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -26923,7 +38793,7 @@ func (m *HTTPGetAction) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -26933,6 +38803,9 @@ func (m *HTTPGetAction) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -26952,7 +38825,7 @@ func (m *HTTPGetAction) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -26962,6 +38835,9 @@ func (m *HTTPGetAction) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -26981,7 +38857,7 @@ func (m *HTTPGetAction) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -26990,6 +38866,9 @@ func (m *HTTPGetAction) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -27007,6 +38886,9 @@ func (m *HTTPGetAction) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -27034,7 +38916,7 @@ func (m *HTTPHeader) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -27062,7 +38944,7 @@ func (m *HTTPHeader) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -27072,6 +38954,9 @@ func (m *HTTPHeader) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -27091,7 +38976,7 @@ func (m *HTTPHeader) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -27101,6 +38986,9 @@ func (m *HTTPHeader) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -27115,6 +39003,9 @@ func (m *HTTPHeader) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -27142,7 +39033,7 @@ func (m *Handler) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -27170,7 +39061,7 @@ func (m *Handler) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -27179,6 +39070,9 @@ func (m *Handler) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -27203,7 +39097,7 @@ func (m *Handler) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -27212,6 +39106,9 @@ func (m *Handler) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -27236,7 +39133,7 @@ func (m *Handler) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -27245,6 +39142,9 @@ func (m *Handler) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -27264,6 +39164,9 @@ func (m *Handler) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -27291,7 +39194,7 @@ func (m *HostAlias) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -27319,7 +39222,7 @@ func (m *HostAlias) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -27329,6 +39232,9 @@ func (m *HostAlias) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -27348,7 +39254,7 @@ func (m *HostAlias) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -27358,6 +39264,9 @@ func (m *HostAlias) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -27372,6 +39281,9 @@ func (m *HostAlias) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -27399,7 +39311,7 @@ func (m *HostPathVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -27427,7 +39339,7 @@ func (m *HostPathVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -27437,6 +39349,9 @@ func (m *HostPathVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -27456,7 +39371,7 @@ func (m *HostPathVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -27466,6 +39381,9 @@ func (m *HostPathVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -27481,6 +39399,9 @@ func (m *HostPathVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -27508,7 +39429,7 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -27536,7 +39457,7 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -27546,6 +39467,9 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -27565,7 +39489,7 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -27575,6 +39499,9 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -27594,7 +39521,7 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Lun |= (int32(b) & 0x7F) << shift + m.Lun |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -27613,7 +39540,7 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -27623,6 +39550,9 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -27642,7 +39572,7 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -27652,6 +39582,9 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -27671,7 +39604,7 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -27691,7 +39624,7 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -27701,6 +39634,9 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -27720,7 +39656,7 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -27740,7 +39676,7 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -27749,6 +39685,9 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -27773,7 +39712,7 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -27793,7 +39732,7 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -27803,6 +39742,9 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -27818,6 +39760,9 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -27845,7 +39790,7 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -27873,7 +39818,7 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -27883,6 +39828,9 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -27902,7 +39850,7 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -27912,6 +39860,9 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -27931,7 +39882,7 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Lun |= (int32(b) & 0x7F) << shift + m.Lun |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -27950,7 +39901,7 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -27960,6 +39911,9 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -27979,7 +39933,7 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -27989,6 +39943,9 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -28008,7 +39965,7 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -28028,7 +39985,7 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -28038,6 +39995,9 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -28057,7 +40017,7 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -28077,7 +40037,7 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -28086,6 +40046,9 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -28110,7 +40073,7 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -28130,7 +40093,7 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -28140,6 +40103,9 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -28155,6 +40121,9 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -28182,7 +40151,7 @@ func (m *KeyToPath) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -28210,7 +40179,7 @@ func (m *KeyToPath) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -28220,6 +40189,9 @@ func (m *KeyToPath) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -28239,7 +40211,7 @@ func (m *KeyToPath) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -28249,6 +40221,9 @@ func (m *KeyToPath) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -28268,7 +40243,7 @@ func (m *KeyToPath) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -28283,6 +40258,9 @@ func (m *KeyToPath) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -28310,7 +40288,7 @@ func (m *Lifecycle) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -28338,7 +40316,7 @@ func (m *Lifecycle) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -28347,6 +40325,9 @@ func (m *Lifecycle) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -28371,7 +40352,7 @@ func (m *Lifecycle) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -28380,6 +40361,9 @@ func (m *Lifecycle) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -28399,6 +40383,9 @@ func (m *Lifecycle) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -28426,7 +40413,7 @@ func (m *LimitRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -28454,7 +40441,7 @@ func (m *LimitRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -28463,6 +40450,9 @@ func (m *LimitRange) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -28484,7 +40474,7 @@ func (m *LimitRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -28493,6 +40483,9 @@ func (m *LimitRange) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -28509,6 +40502,9 @@ func (m *LimitRange) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -28536,7 +40532,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -28564,7 +40560,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -28574,6 +40570,9 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -28593,7 +40592,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -28602,54 +40601,20 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { + if postIndex > l { return io.ErrUnexpectedEOF } - mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Max == nil { m.Max = make(ResourceList) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey ResourceName + mapvalue := &resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -28659,46 +40624,88 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.Max[ResourceName(mapkey)] = *mapvalue - } else { - var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity - m.Max[ResourceName(mapkey)] = mapvalue } + m.Max[ResourceName(mapkey)] = *mapvalue iNdEx = postIndex case 3: if wireType != 2 { @@ -28714,7 +40721,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -28723,54 +40730,20 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { + if postIndex > l { return io.ErrUnexpectedEOF } - mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Min == nil { m.Min = make(ResourceList) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey ResourceName + mapvalue := &resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -28780,46 +40753,88 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.Min[ResourceName(mapkey)] = *mapvalue - } else { - var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity - m.Min[ResourceName(mapkey)] = mapvalue } + m.Min[ResourceName(mapkey)] = *mapvalue iNdEx = postIndex case 4: if wireType != 2 { @@ -28835,7 +40850,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -28844,54 +40859,20 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { + if postIndex > l { return io.ErrUnexpectedEOF } - mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Default == nil { m.Default = make(ResourceList) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey ResourceName + mapvalue := &resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -28901,46 +40882,88 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.Default[ResourceName(mapkey)] = *mapvalue - } else { - var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity - m.Default[ResourceName(mapkey)] = mapvalue } + m.Default[ResourceName(mapkey)] = *mapvalue iNdEx = postIndex case 5: if wireType != 2 { @@ -28956,7 +40979,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -28965,54 +40988,20 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { + if postIndex > l { return io.ErrUnexpectedEOF } - mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.DefaultRequest == nil { m.DefaultRequest = make(ResourceList) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey ResourceName + mapvalue := &resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -29022,46 +41011,88 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.DefaultRequest[ResourceName(mapkey)] = *mapvalue - } else { - var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity - m.DefaultRequest[ResourceName(mapkey)] = mapvalue } + m.DefaultRequest[ResourceName(mapkey)] = *mapvalue iNdEx = postIndex case 6: if wireType != 2 { @@ -29077,7 +41108,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -29086,54 +41117,20 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { + if postIndex > l { return io.ErrUnexpectedEOF } - mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.MaxLimitRequestRatio == nil { m.MaxLimitRequestRatio = make(ResourceList) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey ResourceName + mapvalue := &resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -29143,46 +41140,88 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.MaxLimitRequestRatio[ResourceName(mapkey)] = *mapvalue - } else { - var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity - m.MaxLimitRequestRatio[ResourceName(mapkey)] = mapvalue } + m.MaxLimitRequestRatio[ResourceName(mapkey)] = *mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -29193,6 +41232,9 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -29220,7 +41262,7 @@ func (m *LimitRangeList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -29248,7 +41290,7 @@ func (m *LimitRangeList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -29257,6 +41299,9 @@ func (m *LimitRangeList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -29278,7 +41323,7 @@ func (m *LimitRangeList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -29287,6 +41332,9 @@ func (m *LimitRangeList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -29304,6 +41352,9 @@ func (m *LimitRangeList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -29331,7 +41382,7 @@ func (m *LimitRangeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -29359,7 +41410,7 @@ func (m *LimitRangeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -29368,6 +41419,9 @@ func (m *LimitRangeSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -29385,6 +41439,9 @@ func (m *LimitRangeSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -29412,7 +41469,7 @@ func (m *List) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -29440,7 +41497,7 @@ func (m *List) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -29449,6 +41506,9 @@ func (m *List) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -29470,7 +41530,7 @@ func (m *List) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -29479,10 +41539,13 @@ func (m *List) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, k8s_io_apimachinery_pkg_runtime.RawExtension{}) + m.Items = append(m.Items, runtime.RawExtension{}) if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -29496,6 +41559,9 @@ func (m *List) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -29523,7 +41589,7 @@ func (m *LoadBalancerIngress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -29551,7 +41617,7 @@ func (m *LoadBalancerIngress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -29561,6 +41627,9 @@ func (m *LoadBalancerIngress) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -29580,7 +41649,7 @@ func (m *LoadBalancerIngress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -29590,6 +41659,9 @@ func (m *LoadBalancerIngress) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -29604,6 +41676,9 @@ func (m *LoadBalancerIngress) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -29631,7 +41706,7 @@ func (m *LoadBalancerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -29659,7 +41734,7 @@ func (m *LoadBalancerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -29668,6 +41743,9 @@ func (m *LoadBalancerStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -29685,6 +41763,9 @@ func (m *LoadBalancerStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -29712,7 +41793,7 @@ func (m *LocalObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -29740,7 +41821,7 @@ func (m *LocalObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -29750,6 +41831,9 @@ func (m *LocalObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -29764,6 +41848,9 @@ func (m *LocalObjectReference) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -29791,7 +41878,7 @@ func (m *LocalVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -29819,7 +41906,7 @@ func (m *LocalVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -29829,6 +41916,9 @@ func (m *LocalVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -29848,7 +41938,7 @@ func (m *LocalVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -29858,6 +41948,9 @@ func (m *LocalVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -29873,6 +41966,9 @@ func (m *LocalVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -29900,7 +41996,7 @@ func (m *NFSVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -29928,7 +42024,7 @@ func (m *NFSVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -29938,6 +42034,9 @@ func (m *NFSVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -29957,7 +42056,7 @@ func (m *NFSVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -29967,6 +42066,9 @@ func (m *NFSVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -29986,7 +42088,7 @@ func (m *NFSVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -30001,6 +42103,9 @@ func (m *NFSVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -30028,7 +42133,7 @@ func (m *Namespace) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -30056,7 +42161,7 @@ func (m *Namespace) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -30065,6 +42170,9 @@ func (m *Namespace) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -30086,7 +42194,7 @@ func (m *Namespace) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -30095,6 +42203,9 @@ func (m *Namespace) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -30116,7 +42227,7 @@ func (m *Namespace) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -30125,6 +42236,9 @@ func (m *Namespace) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -30141,6 +42255,223 @@ func (m *Namespace) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NamespaceCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NamespaceCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NamespaceCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = NamespaceConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -30168,7 +42499,7 @@ func (m *NamespaceList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -30196,7 +42527,7 @@ func (m *NamespaceList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -30205,6 +42536,9 @@ func (m *NamespaceList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -30226,7 +42560,7 @@ func (m *NamespaceList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -30235,6 +42569,9 @@ func (m *NamespaceList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -30252,6 +42589,9 @@ func (m *NamespaceList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -30279,7 +42619,7 @@ func (m *NamespaceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -30307,7 +42647,7 @@ func (m *NamespaceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -30317,6 +42657,9 @@ func (m *NamespaceSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -30331,6 +42674,9 @@ func (m *NamespaceSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -30358,7 +42704,7 @@ func (m *NamespaceStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -30386,7 +42732,7 @@ func (m *NamespaceStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -30396,11 +42742,48 @@ func (m *NamespaceStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } m.Phase = NamespacePhase(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, NamespaceCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -30410,6 +42793,9 @@ func (m *NamespaceStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -30437,7 +42823,7 @@ func (m *Node) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -30465,7 +42851,7 @@ func (m *Node) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -30474,6 +42860,9 @@ func (m *Node) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -30495,7 +42884,7 @@ func (m *Node) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -30504,6 +42893,9 @@ func (m *Node) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -30525,7 +42917,7 @@ func (m *Node) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -30534,6 +42926,9 @@ func (m *Node) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -30550,6 +42945,9 @@ func (m *Node) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -30577,7 +42975,7 @@ func (m *NodeAddress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -30605,7 +43003,7 @@ func (m *NodeAddress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -30615,6 +43013,9 @@ func (m *NodeAddress) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -30634,7 +43035,7 @@ func (m *NodeAddress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -30644,6 +43045,9 @@ func (m *NodeAddress) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -30658,6 +43062,9 @@ func (m *NodeAddress) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -30685,7 +43092,7 @@ func (m *NodeAffinity) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -30713,7 +43120,7 @@ func (m *NodeAffinity) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -30722,6 +43129,9 @@ func (m *NodeAffinity) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -30746,7 +43156,7 @@ func (m *NodeAffinity) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -30755,6 +43165,9 @@ func (m *NodeAffinity) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -30772,6 +43185,9 @@ func (m *NodeAffinity) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -30799,7 +43215,7 @@ func (m *NodeCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -30827,7 +43243,7 @@ func (m *NodeCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -30837,6 +43253,9 @@ func (m *NodeCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -30856,7 +43275,7 @@ func (m *NodeCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -30866,6 +43285,9 @@ func (m *NodeCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -30885,7 +43307,7 @@ func (m *NodeCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -30894,6 +43316,9 @@ func (m *NodeCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -30915,7 +43340,7 @@ func (m *NodeCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -30924,6 +43349,9 @@ func (m *NodeCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -30945,7 +43373,7 @@ func (m *NodeCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -30955,6 +43383,9 @@ func (m *NodeCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -30974,7 +43405,7 @@ func (m *NodeCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -30984,6 +43415,9 @@ func (m *NodeCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -30998,6 +43432,9 @@ func (m *NodeCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -31025,7 +43462,7 @@ func (m *NodeConfigSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -31053,7 +43490,7 @@ func (m *NodeConfigSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -31062,6 +43499,9 @@ func (m *NodeConfigSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -31081,6 +43521,9 @@ func (m *NodeConfigSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -31108,7 +43551,7 @@ func (m *NodeConfigStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -31136,7 +43579,7 @@ func (m *NodeConfigStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -31145,6 +43588,9 @@ func (m *NodeConfigStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -31169,7 +43615,7 @@ func (m *NodeConfigStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -31178,6 +43624,9 @@ func (m *NodeConfigStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -31202,7 +43651,7 @@ func (m *NodeConfigStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -31211,6 +43660,9 @@ func (m *NodeConfigStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -31235,7 +43687,7 @@ func (m *NodeConfigStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -31245,6 +43697,9 @@ func (m *NodeConfigStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -31259,6 +43714,9 @@ func (m *NodeConfigStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -31286,7 +43744,7 @@ func (m *NodeDaemonEndpoints) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -31314,7 +43772,7 @@ func (m *NodeDaemonEndpoints) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -31323,6 +43781,9 @@ func (m *NodeDaemonEndpoints) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -31339,6 +43800,9 @@ func (m *NodeDaemonEndpoints) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -31366,7 +43830,7 @@ func (m *NodeList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -31394,7 +43858,7 @@ func (m *NodeList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -31403,6 +43867,9 @@ func (m *NodeList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -31424,7 +43891,7 @@ func (m *NodeList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -31433,6 +43900,9 @@ func (m *NodeList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -31450,6 +43920,9 @@ func (m *NodeList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -31477,7 +43950,7 @@ func (m *NodeProxyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -31505,7 +43978,7 @@ func (m *NodeProxyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -31515,6 +43988,9 @@ func (m *NodeProxyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -31529,6 +44005,9 @@ func (m *NodeProxyOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -31556,7 +44035,7 @@ func (m *NodeResources) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -31584,7 +44063,7 @@ func (m *NodeResources) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -31593,54 +44072,20 @@ func (m *NodeResources) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { + if postIndex > l { return io.ErrUnexpectedEOF } - mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Capacity == nil { m.Capacity = make(ResourceList) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey ResourceName + mapvalue := &resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -31650,46 +44095,88 @@ func (m *NodeResources) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.Capacity[ResourceName(mapkey)] = *mapvalue - } else { - var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity - m.Capacity[ResourceName(mapkey)] = mapvalue } + m.Capacity[ResourceName(mapkey)] = *mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -31700,6 +44187,9 @@ func (m *NodeResources) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -31727,7 +44217,7 @@ func (m *NodeSelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -31755,7 +44245,7 @@ func (m *NodeSelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -31764,6 +44254,9 @@ func (m *NodeSelector) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -31781,6 +44274,9 @@ func (m *NodeSelector) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -31808,7 +44304,7 @@ func (m *NodeSelectorRequirement) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -31836,7 +44332,7 @@ func (m *NodeSelectorRequirement) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -31846,6 +44342,9 @@ func (m *NodeSelectorRequirement) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -31865,7 +44364,7 @@ func (m *NodeSelectorRequirement) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -31875,6 +44374,9 @@ func (m *NodeSelectorRequirement) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -31894,7 +44396,7 @@ func (m *NodeSelectorRequirement) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -31904,6 +44406,9 @@ func (m *NodeSelectorRequirement) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -31918,6 +44423,9 @@ func (m *NodeSelectorRequirement) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -31945,7 +44453,7 @@ func (m *NodeSelectorTerm) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -31973,7 +44481,7 @@ func (m *NodeSelectorTerm) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -31982,6 +44490,9 @@ func (m *NodeSelectorTerm) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -32004,7 +44515,7 @@ func (m *NodeSelectorTerm) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -32013,6 +44524,9 @@ func (m *NodeSelectorTerm) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -32030,6 +44544,9 @@ func (m *NodeSelectorTerm) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -32057,7 +44574,7 @@ func (m *NodeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -32085,7 +44602,7 @@ func (m *NodeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -32095,6 +44612,9 @@ func (m *NodeSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -32102,7 +44622,7 @@ func (m *NodeSpec) Unmarshal(dAtA []byte) error { iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DoNotUse_ExternalID", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DoNotUseExternalID", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -32114,7 +44634,7 @@ func (m *NodeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -32124,10 +44644,13 @@ func (m *NodeSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.DoNotUse_ExternalID = string(dAtA[iNdEx:postIndex]) + m.DoNotUseExternalID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { @@ -32143,7 +44666,7 @@ func (m *NodeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -32153,6 +44676,9 @@ func (m *NodeSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -32172,7 +44698,7 @@ func (m *NodeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -32192,7 +44718,7 @@ func (m *NodeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -32201,6 +44727,9 @@ func (m *NodeSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -32223,7 +44752,7 @@ func (m *NodeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -32232,6 +44761,9 @@ func (m *NodeSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -32242,6 +44774,38 @@ func (m *NodeSpec) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodCIDRs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PodCIDRs = append(m.PodCIDRs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -32251,6 +44815,9 @@ func (m *NodeSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -32278,7 +44845,7 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -32306,7 +44873,7 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -32315,54 +44882,20 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { + if postIndex > l { return io.ErrUnexpectedEOF } - mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Capacity == nil { m.Capacity = make(ResourceList) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey ResourceName + mapvalue := &resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -32372,46 +44905,88 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.Capacity[ResourceName(mapkey)] = *mapvalue - } else { - var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity - m.Capacity[ResourceName(mapkey)] = mapvalue } + m.Capacity[ResourceName(mapkey)] = *mapvalue iNdEx = postIndex case 2: if wireType != 2 { @@ -32427,7 +45002,7 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -32436,54 +45011,20 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { + if postIndex > l { return io.ErrUnexpectedEOF } - mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Allocatable == nil { m.Allocatable = make(ResourceList) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey ResourceName + mapvalue := &resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -32493,46 +45034,88 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.Allocatable[ResourceName(mapkey)] = *mapvalue - } else { - var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity - m.Allocatable[ResourceName(mapkey)] = mapvalue } + m.Allocatable[ResourceName(mapkey)] = *mapvalue iNdEx = postIndex case 3: if wireType != 2 { @@ -32548,7 +45131,7 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -32558,6 +45141,9 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -32577,7 +45163,7 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -32586,6 +45172,9 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -32608,7 +45197,7 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -32617,6 +45206,9 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -32639,7 +45231,7 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -32648,6 +45240,9 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -32669,7 +45264,7 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -32678,6 +45273,9 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -32699,7 +45297,7 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -32708,6 +45306,9 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -32730,7 +45331,7 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -32740,6 +45341,9 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -32759,7 +45363,7 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -32768,6 +45372,9 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -32790,7 +45397,7 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -32799,6 +45406,9 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -32818,6 +45428,9 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -32845,7 +45458,7 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -32873,7 +45486,7 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -32883,6 +45496,9 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -32902,7 +45518,7 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -32912,6 +45528,9 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -32931,7 +45550,7 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -32941,6 +45560,9 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -32960,7 +45582,7 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -32970,6 +45592,9 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -32989,7 +45614,7 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -32999,6 +45624,9 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -33018,7 +45646,7 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -33028,6 +45656,9 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -33047,7 +45678,7 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -33057,6 +45688,9 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -33076,7 +45710,7 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -33086,6 +45720,9 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -33105,7 +45742,7 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -33115,6 +45752,9 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -33134,7 +45774,7 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -33144,6 +45784,9 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -33158,6 +45801,9 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -33185,7 +45831,7 @@ func (m *ObjectFieldSelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -33213,7 +45859,7 @@ func (m *ObjectFieldSelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -33223,6 +45869,9 @@ func (m *ObjectFieldSelector) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -33242,7 +45891,7 @@ func (m *ObjectFieldSelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -33252,6 +45901,9 @@ func (m *ObjectFieldSelector) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -33266,6 +45918,9 @@ func (m *ObjectFieldSelector) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -33293,7 +45948,7 @@ func (m *ObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -33321,7 +45976,7 @@ func (m *ObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -33331,6 +45986,9 @@ func (m *ObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -33350,7 +46008,7 @@ func (m *ObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -33360,6 +46018,9 @@ func (m *ObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -33379,7 +46040,7 @@ func (m *ObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -33389,6 +46050,9 @@ func (m *ObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -33408,7 +46072,7 @@ func (m *ObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -33418,6 +46082,9 @@ func (m *ObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -33437,7 +46104,7 @@ func (m *ObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -33447,6 +46114,9 @@ func (m *ObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -33466,7 +46136,7 @@ func (m *ObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -33476,6 +46146,9 @@ func (m *ObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -33495,7 +46168,7 @@ func (m *ObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -33505,6 +46178,9 @@ func (m *ObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -33519,6 +46195,9 @@ func (m *ObjectReference) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -33546,7 +46225,7 @@ func (m *PersistentVolume) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -33574,7 +46253,7 @@ func (m *PersistentVolume) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -33583,6 +46262,9 @@ func (m *PersistentVolume) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -33604,7 +46286,7 @@ func (m *PersistentVolume) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -33613,6 +46295,9 @@ func (m *PersistentVolume) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -33634,7 +46319,7 @@ func (m *PersistentVolume) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -33643,6 +46328,9 @@ func (m *PersistentVolume) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -33659,6 +46347,9 @@ func (m *PersistentVolume) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -33686,7 +46377,7 @@ func (m *PersistentVolumeClaim) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -33714,7 +46405,7 @@ func (m *PersistentVolumeClaim) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -33723,6 +46414,9 @@ func (m *PersistentVolumeClaim) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -33744,7 +46438,7 @@ func (m *PersistentVolumeClaim) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -33753,6 +46447,9 @@ func (m *PersistentVolumeClaim) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -33774,7 +46471,7 @@ func (m *PersistentVolumeClaim) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -33783,6 +46480,9 @@ func (m *PersistentVolumeClaim) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -33799,6 +46499,9 @@ func (m *PersistentVolumeClaim) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -33826,7 +46529,7 @@ func (m *PersistentVolumeClaimCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -33854,7 +46557,7 @@ func (m *PersistentVolumeClaimCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -33864,6 +46567,9 @@ func (m *PersistentVolumeClaimCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -33883,7 +46589,7 @@ func (m *PersistentVolumeClaimCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -33893,6 +46599,9 @@ func (m *PersistentVolumeClaimCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -33912,7 +46621,7 @@ func (m *PersistentVolumeClaimCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -33921,6 +46630,9 @@ func (m *PersistentVolumeClaimCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -33942,7 +46654,7 @@ func (m *PersistentVolumeClaimCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -33951,6 +46663,9 @@ func (m *PersistentVolumeClaimCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -33972,7 +46687,7 @@ func (m *PersistentVolumeClaimCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -33982,6 +46697,9 @@ func (m *PersistentVolumeClaimCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -34001,7 +46719,7 @@ func (m *PersistentVolumeClaimCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -34011,6 +46729,9 @@ func (m *PersistentVolumeClaimCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -34025,6 +46746,9 @@ func (m *PersistentVolumeClaimCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -34052,7 +46776,7 @@ func (m *PersistentVolumeClaimList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -34080,7 +46804,7 @@ func (m *PersistentVolumeClaimList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -34089,6 +46813,9 @@ func (m *PersistentVolumeClaimList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -34110,7 +46837,7 @@ func (m *PersistentVolumeClaimList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -34119,6 +46846,9 @@ func (m *PersistentVolumeClaimList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -34136,6 +46866,9 @@ func (m *PersistentVolumeClaimList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -34163,7 +46896,7 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -34191,7 +46924,7 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -34201,6 +46934,9 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -34220,7 +46956,7 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -34229,6 +46965,9 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -34250,7 +46989,7 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -34260,6 +46999,9 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -34279,7 +47021,7 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -34288,11 +47030,14 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -34312,7 +47057,7 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -34322,6 +47067,9 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -34342,7 +47090,7 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -34352,6 +47100,9 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -34372,7 +47123,7 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -34381,6 +47132,9 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -34400,6 +47154,9 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -34427,7 +47184,7 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -34455,7 +47212,7 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -34465,6 +47222,9 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -34484,7 +47244,7 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -34494,6 +47254,9 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -34513,7 +47276,7 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -34522,54 +47285,20 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { + if postIndex > l { return io.ErrUnexpectedEOF } - mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Capacity == nil { m.Capacity = make(ResourceList) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey ResourceName + mapvalue := &resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -34579,46 +47308,88 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.Capacity[ResourceName(mapkey)] = *mapvalue - } else { - var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity - m.Capacity[ResourceName(mapkey)] = mapvalue } + m.Capacity[ResourceName(mapkey)] = *mapvalue iNdEx = postIndex case 4: if wireType != 2 { @@ -34634,7 +47405,7 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -34643,6 +47414,9 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -34660,6 +47434,9 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -34687,7 +47464,7 @@ func (m *PersistentVolumeClaimVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -34715,7 +47492,7 @@ func (m *PersistentVolumeClaimVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -34725,6 +47502,9 @@ func (m *PersistentVolumeClaimVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -34744,7 +47524,7 @@ func (m *PersistentVolumeClaimVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -34759,6 +47539,9 @@ func (m *PersistentVolumeClaimVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -34786,7 +47569,7 @@ func (m *PersistentVolumeList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -34814,7 +47597,7 @@ func (m *PersistentVolumeList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -34823,6 +47606,9 @@ func (m *PersistentVolumeList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -34844,7 +47630,7 @@ func (m *PersistentVolumeList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -34853,6 +47639,9 @@ func (m *PersistentVolumeList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -34870,6 +47659,9 @@ func (m *PersistentVolumeList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -34897,7 +47689,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -34925,7 +47717,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -34934,6 +47726,9 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -34958,7 +47753,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -34967,6 +47762,9 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -34991,7 +47789,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -35000,6 +47798,9 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -35024,7 +47825,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -35033,11 +47834,14 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Glusterfs == nil { - m.Glusterfs = &GlusterfsVolumeSource{} + m.Glusterfs = &GlusterfsPersistentVolumeSource{} } if err := m.Glusterfs.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -35057,7 +47861,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -35066,6 +47870,9 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -35090,7 +47897,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -35099,6 +47906,9 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -35123,7 +47933,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -35132,6 +47942,9 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -35156,7 +47969,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -35165,6 +47978,9 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -35189,7 +48005,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -35198,6 +48014,9 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -35222,7 +48041,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -35231,6 +48050,9 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -35255,7 +48077,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -35264,6 +48086,9 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -35288,7 +48113,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -35297,6 +48122,9 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -35321,7 +48149,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -35330,6 +48158,9 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -35354,7 +48185,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -35363,6 +48194,9 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -35387,7 +48221,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -35396,6 +48230,9 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -35420,7 +48257,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -35429,6 +48266,9 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -35453,7 +48293,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -35462,6 +48302,9 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -35486,7 +48329,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -35495,6 +48338,9 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -35519,7 +48365,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -35528,6 +48374,9 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -35552,7 +48401,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -35561,6 +48410,9 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -35585,7 +48437,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -35594,6 +48446,9 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -35618,7 +48473,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -35627,6 +48482,9 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -35646,6 +48504,9 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -35673,7 +48534,7 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -35701,7 +48562,7 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -35710,54 +48571,20 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { + if postIndex > l { return io.ErrUnexpectedEOF } - mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Capacity == nil { m.Capacity = make(ResourceList) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey ResourceName + mapvalue := &resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -35767,46 +48594,88 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.Capacity[ResourceName(mapkey)] = *mapvalue - } else { - var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity - m.Capacity[ResourceName(mapkey)] = mapvalue } + m.Capacity[ResourceName(mapkey)] = *mapvalue iNdEx = postIndex case 2: if wireType != 2 { @@ -35822,7 +48691,7 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -35831,6 +48700,9 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -35852,7 +48724,7 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -35862,6 +48734,9 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -35881,7 +48756,7 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -35890,6 +48765,9 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -35914,7 +48792,7 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -35924,6 +48802,9 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -35943,7 +48824,7 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -35953,6 +48834,9 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -35972,7 +48856,7 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -35982,6 +48866,9 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36001,7 +48888,7 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -36011,6 +48898,9 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36031,7 +48921,7 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -36040,6 +48930,9 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36059,6 +48952,9 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -36086,7 +48982,7 @@ func (m *PersistentVolumeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -36114,7 +49010,7 @@ func (m *PersistentVolumeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -36124,6 +49020,9 @@ func (m *PersistentVolumeStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36143,7 +49042,7 @@ func (m *PersistentVolumeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -36153,6 +49052,9 @@ func (m *PersistentVolumeStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36172,7 +49074,7 @@ func (m *PersistentVolumeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -36182,6 +49084,9 @@ func (m *PersistentVolumeStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36196,6 +49101,9 @@ func (m *PersistentVolumeStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -36223,7 +49131,7 @@ func (m *PhotonPersistentDiskVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -36251,7 +49159,7 @@ func (m *PhotonPersistentDiskVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -36261,6 +49169,9 @@ func (m *PhotonPersistentDiskVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36280,7 +49191,7 @@ func (m *PhotonPersistentDiskVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -36290,6 +49201,9 @@ func (m *PhotonPersistentDiskVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36304,6 +49218,9 @@ func (m *PhotonPersistentDiskVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -36331,7 +49248,7 @@ func (m *Pod) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -36359,7 +49276,7 @@ func (m *Pod) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -36368,6 +49285,9 @@ func (m *Pod) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36389,7 +49309,7 @@ func (m *Pod) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -36398,6 +49318,9 @@ func (m *Pod) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36419,7 +49342,7 @@ func (m *Pod) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -36428,6 +49351,9 @@ func (m *Pod) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36444,6 +49370,9 @@ func (m *Pod) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -36471,7 +49400,7 @@ func (m *PodAffinity) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -36499,7 +49428,7 @@ func (m *PodAffinity) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -36508,6 +49437,9 @@ func (m *PodAffinity) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36530,7 +49462,7 @@ func (m *PodAffinity) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -36539,6 +49471,9 @@ func (m *PodAffinity) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36556,6 +49491,9 @@ func (m *PodAffinity) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -36583,7 +49521,7 @@ func (m *PodAffinityTerm) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -36611,7 +49549,7 @@ func (m *PodAffinityTerm) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -36620,11 +49558,14 @@ func (m *PodAffinityTerm) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.LabelSelector == nil { - m.LabelSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.LabelSelector = &v1.LabelSelector{} } if err := m.LabelSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -36644,7 +49585,7 @@ func (m *PodAffinityTerm) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -36654,6 +49595,9 @@ func (m *PodAffinityTerm) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36673,7 +49617,7 @@ func (m *PodAffinityTerm) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -36683,6 +49627,9 @@ func (m *PodAffinityTerm) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36697,6 +49644,9 @@ func (m *PodAffinityTerm) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -36724,7 +49674,7 @@ func (m *PodAntiAffinity) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -36752,7 +49702,7 @@ func (m *PodAntiAffinity) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -36761,6 +49711,9 @@ func (m *PodAntiAffinity) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36783,7 +49736,7 @@ func (m *PodAntiAffinity) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -36792,6 +49745,9 @@ func (m *PodAntiAffinity) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36809,6 +49765,9 @@ func (m *PodAntiAffinity) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -36836,7 +49795,7 @@ func (m *PodAttachOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -36864,7 +49823,7 @@ func (m *PodAttachOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -36884,7 +49843,7 @@ func (m *PodAttachOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -36904,7 +49863,7 @@ func (m *PodAttachOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -36924,7 +49883,7 @@ func (m *PodAttachOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -36944,7 +49903,7 @@ func (m *PodAttachOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -36954,6 +49913,9 @@ func (m *PodAttachOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36968,6 +49930,9 @@ func (m *PodAttachOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -36995,7 +49960,7 @@ func (m *PodCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -37023,7 +49988,7 @@ func (m *PodCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -37033,6 +49998,9 @@ func (m *PodCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -37052,7 +50020,7 @@ func (m *PodCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -37062,6 +50030,9 @@ func (m *PodCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -37081,7 +50052,7 @@ func (m *PodCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -37090,6 +50061,9 @@ func (m *PodCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -37111,7 +50085,7 @@ func (m *PodCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -37120,6 +50094,9 @@ func (m *PodCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -37141,7 +50118,7 @@ func (m *PodCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -37151,6 +50128,9 @@ func (m *PodCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -37170,7 +50150,7 @@ func (m *PodCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -37180,6 +50160,9 @@ func (m *PodCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -37194,6 +50177,9 @@ func (m *PodCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -37221,7 +50207,7 @@ func (m *PodDNSConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -37249,7 +50235,7 @@ func (m *PodDNSConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -37259,6 +50245,9 @@ func (m *PodDNSConfig) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -37278,7 +50267,7 @@ func (m *PodDNSConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -37288,6 +50277,9 @@ func (m *PodDNSConfig) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -37307,7 +50299,7 @@ func (m *PodDNSConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -37316,6 +50308,9 @@ func (m *PodDNSConfig) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -37333,6 +50328,9 @@ func (m *PodDNSConfig) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -37360,7 +50358,7 @@ func (m *PodDNSConfigOption) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -37388,7 +50386,7 @@ func (m *PodDNSConfigOption) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -37398,6 +50396,9 @@ func (m *PodDNSConfigOption) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -37417,7 +50418,7 @@ func (m *PodDNSConfigOption) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -37427,6 +50428,9 @@ func (m *PodDNSConfigOption) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -37442,6 +50446,9 @@ func (m *PodDNSConfigOption) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -37469,7 +50476,7 @@ func (m *PodExecOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -37497,7 +50504,7 @@ func (m *PodExecOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -37517,7 +50524,7 @@ func (m *PodExecOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -37537,7 +50544,7 @@ func (m *PodExecOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -37557,7 +50564,7 @@ func (m *PodExecOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -37577,7 +50584,7 @@ func (m *PodExecOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -37587,6 +50594,9 @@ func (m *PodExecOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -37606,7 +50616,7 @@ func (m *PodExecOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -37616,6 +50626,9 @@ func (m *PodExecOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -37630,6 +50643,94 @@ func (m *PodExecOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodIP) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodIP: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodIP: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IP = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -37657,7 +50758,7 @@ func (m *PodList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -37685,7 +50786,7 @@ func (m *PodList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -37694,6 +50795,9 @@ func (m *PodList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -37715,7 +50819,7 @@ func (m *PodList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -37724,6 +50828,9 @@ func (m *PodList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -37741,6 +50848,9 @@ func (m *PodList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -37768,7 +50878,7 @@ func (m *PodLogOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -37796,7 +50906,7 @@ func (m *PodLogOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -37806,6 +50916,9 @@ func (m *PodLogOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -37825,7 +50938,7 @@ func (m *PodLogOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -37845,7 +50958,7 @@ func (m *PodLogOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -37865,7 +50978,7 @@ func (m *PodLogOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -37885,7 +50998,7 @@ func (m *PodLogOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -37894,11 +51007,14 @@ func (m *PodLogOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.SinceTime == nil { - m.SinceTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} + m.SinceTime = &v1.Time{} } if err := m.SinceTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -37918,7 +51034,7 @@ func (m *PodLogOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -37938,7 +51054,7 @@ func (m *PodLogOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -37958,12 +51074,32 @@ func (m *PodLogOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } } m.LimitBytes = &v + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InsecureSkipTLSVerifyBackend", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.InsecureSkipTLSVerifyBackend = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -37973,6 +51109,9 @@ func (m *PodLogOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -38000,7 +51139,7 @@ func (m *PodPortForwardOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -38026,7 +51165,7 @@ func (m *PodPortForwardOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -38043,7 +51182,7 @@ func (m *PodPortForwardOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - packedLen |= (int(b) & 0x7F) << shift + packedLen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -38052,9 +51191,23 @@ func (m *PodPortForwardOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.Ports) == 0 { + m.Ports = make([]int32, 0, elementCount) + } for iNdEx < postIndex { var v int32 for shift := uint(0); ; shift += 7 { @@ -38066,7 +51219,7 @@ func (m *PodPortForwardOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -38085,6 +51238,9 @@ func (m *PodPortForwardOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -38112,7 +51268,7 @@ func (m *PodProxyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -38140,7 +51296,7 @@ func (m *PodProxyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -38150,6 +51306,9 @@ func (m *PodProxyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -38164,6 +51323,9 @@ func (m *PodProxyOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -38191,7 +51353,7 @@ func (m *PodReadinessGate) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -38219,7 +51381,7 @@ func (m *PodReadinessGate) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -38229,6 +51391,9 @@ func (m *PodReadinessGate) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -38243,6 +51408,9 @@ func (m *PodReadinessGate) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -38270,7 +51438,7 @@ func (m *PodSecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -38298,7 +51466,7 @@ func (m *PodSecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -38307,6 +51475,9 @@ func (m *PodSecurityContext) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -38331,7 +51502,7 @@ func (m *PodSecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -38351,7 +51522,7 @@ func (m *PodSecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -38370,7 +51541,7 @@ func (m *PodSecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -38387,7 +51558,7 @@ func (m *PodSecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - packedLen |= (int(b) & 0x7F) << shift + packedLen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -38396,9 +51567,23 @@ func (m *PodSecurityContext) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.SupplementalGroups) == 0 { + m.SupplementalGroups = make([]int64, 0, elementCount) + } for iNdEx < postIndex { var v int64 for shift := uint(0); ; shift += 7 { @@ -38410,7 +51595,7 @@ func (m *PodSecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -38434,7 +51619,7 @@ func (m *PodSecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -38454,7 +51639,7 @@ func (m *PodSecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -38474,7 +51659,7 @@ func (m *PodSecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -38483,6 +51668,9 @@ func (m *PodSecurityContext) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -38491,6 +51679,42 @@ func (m *PodSecurityContext) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WindowsOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.WindowsOptions == nil { + m.WindowsOptions = &WindowsSecurityContextOptions{} + } + if err := m.WindowsOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -38500,6 +51724,9 @@ func (m *PodSecurityContext) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -38527,7 +51754,7 @@ func (m *PodSignature) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -38555,7 +51782,7 @@ func (m *PodSignature) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -38564,11 +51791,14 @@ func (m *PodSignature) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.PodController == nil { - m.PodController = &k8s_io_apimachinery_pkg_apis_meta_v1.OwnerReference{} + m.PodController = &v1.OwnerReference{} } if err := m.PodController.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -38583,6 +51813,9 @@ func (m *PodSignature) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -38610,7 +51843,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -38638,7 +51871,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -38647,6 +51880,9 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -38669,7 +51905,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -38678,6 +51914,9 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -38700,7 +51939,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -38710,6 +51949,9 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -38729,7 +51971,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -38749,7 +51991,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -38769,7 +52011,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -38779,6 +52021,9 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -38798,7 +52043,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -38807,54 +52052,20 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { + if postIndex > l { return io.ErrUnexpectedEOF } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.NodeSelector == nil { m.NodeSelector = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -38864,41 +52075,86 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.NodeSelector[mapkey] = mapvalue - } else { - var mapvalue string - m.NodeSelector[mapkey] = mapvalue } + m.NodeSelector[mapkey] = mapvalue iNdEx = postIndex case 8: if wireType != 2 { @@ -38914,7 +52170,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -38924,6 +52180,9 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -38943,7 +52202,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -38953,6 +52212,9 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -38972,7 +52234,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -38982,6 +52244,9 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -39001,7 +52266,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -39021,7 +52286,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -39041,7 +52306,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -39061,7 +52326,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -39070,6 +52335,9 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -39094,7 +52362,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -39103,6 +52371,9 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -39125,7 +52396,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -39135,6 +52406,9 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -39154,7 +52428,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -39164,6 +52438,9 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -39183,7 +52460,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -39192,6 +52469,9 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -39216,7 +52496,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -39226,6 +52506,9 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -39245,7 +52528,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -39254,6 +52537,9 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -39276,7 +52562,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -39297,7 +52583,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -39306,6 +52592,9 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -39328,7 +52617,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -39337,6 +52626,9 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -39359,7 +52651,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -39369,6 +52661,9 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -39388,7 +52683,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -39408,7 +52703,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -39417,6 +52712,9 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -39441,7 +52739,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -39462,7 +52760,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -39471,6 +52769,9 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -39493,7 +52794,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -39503,12 +52804,266 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } s := string(dAtA[iNdEx:postIndex]) m.RuntimeClassName = &s iNdEx = postIndex + case 30: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EnableServiceLinks", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.EnableServiceLinks = &b + case 31: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreemptionPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := PreemptionPolicy(dAtA[iNdEx:postIndex]) + m.PreemptionPolicy = &s + iNdEx = postIndex + case 32: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Overhead", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Overhead == nil { + m.Overhead = make(ResourceList) + } + var mapkey ResourceName + mapvalue := &resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Overhead[ResourceName(mapkey)] = *mapvalue + iNdEx = postIndex + case 33: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TopologySpreadConstraints", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TopologySpreadConstraints = append(m.TopologySpreadConstraints, TopologySpreadConstraint{}) + if err := m.TopologySpreadConstraints[len(m.TopologySpreadConstraints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 34: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EphemeralContainers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EphemeralContainers = append(m.EphemeralContainers, EphemeralContainer{}) + if err := m.EphemeralContainers[len(m.EphemeralContainers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -39518,6 +53073,9 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -39545,7 +53103,7 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -39573,7 +53131,7 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -39583,6 +53141,9 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -39602,7 +53163,7 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -39611,6 +53172,9 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -39633,7 +53197,7 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -39643,6 +53207,9 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -39662,7 +53229,7 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -39672,6 +53239,9 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -39691,7 +53261,7 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -39701,6 +53271,9 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -39720,7 +53293,7 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -39730,6 +53303,9 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -39749,7 +53325,7 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -39758,11 +53334,14 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.StartTime == nil { - m.StartTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} + m.StartTime = &v1.Time{} } if err := m.StartTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -39782,7 +53361,7 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -39791,6 +53370,9 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -39813,7 +53395,7 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -39823,6 +53405,9 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -39842,7 +53427,7 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -39851,6 +53436,9 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -39873,7 +53461,7 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -39883,11 +53471,82 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } m.NominatedNodeName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodIPs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PodIPs = append(m.PodIPs, PodIP{}) + if err := m.PodIPs[len(m.PodIPs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EphemeralContainerStatuses", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EphemeralContainerStatuses = append(m.EphemeralContainerStatuses, ContainerStatus{}) + if err := m.EphemeralContainerStatuses[len(m.EphemeralContainerStatuses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -39897,6 +53556,9 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -39924,7 +53586,7 @@ func (m *PodStatusResult) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -39952,7 +53614,7 @@ func (m *PodStatusResult) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -39961,6 +53623,9 @@ func (m *PodStatusResult) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -39982,7 +53647,7 @@ func (m *PodStatusResult) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -39991,6 +53656,9 @@ func (m *PodStatusResult) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -40007,6 +53675,9 @@ func (m *PodStatusResult) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -40034,7 +53705,7 @@ func (m *PodTemplate) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -40062,7 +53733,7 @@ func (m *PodTemplate) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -40071,6 +53742,9 @@ func (m *PodTemplate) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -40092,7 +53766,7 @@ func (m *PodTemplate) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -40101,6 +53775,9 @@ func (m *PodTemplate) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -40117,6 +53794,9 @@ func (m *PodTemplate) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -40144,7 +53824,7 @@ func (m *PodTemplateList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -40172,7 +53852,7 @@ func (m *PodTemplateList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -40181,6 +53861,9 @@ func (m *PodTemplateList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -40202,7 +53885,7 @@ func (m *PodTemplateList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -40211,6 +53894,9 @@ func (m *PodTemplateList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -40228,6 +53914,9 @@ func (m *PodTemplateList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -40255,7 +53944,7 @@ func (m *PodTemplateSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -40283,7 +53972,7 @@ func (m *PodTemplateSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -40292,6 +53981,9 @@ func (m *PodTemplateSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -40313,7 +54005,7 @@ func (m *PodTemplateSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -40322,6 +54014,9 @@ func (m *PodTemplateSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -40338,6 +54033,9 @@ func (m *PodTemplateSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -40365,7 +54063,7 @@ func (m *PortworxVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -40393,7 +54091,7 @@ func (m *PortworxVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -40403,6 +54101,9 @@ func (m *PortworxVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -40422,7 +54123,7 @@ func (m *PortworxVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -40432,6 +54133,9 @@ func (m *PortworxVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -40451,7 +54155,7 @@ func (m *PortworxVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -40466,6 +54170,9 @@ func (m *PortworxVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -40493,7 +54200,7 @@ func (m *Preconditions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -40521,7 +54228,7 @@ func (m *Preconditions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -40531,6 +54238,9 @@ func (m *Preconditions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -40546,6 +54256,9 @@ func (m *Preconditions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -40573,7 +54286,7 @@ func (m *PreferAvoidPodsEntry) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -40601,7 +54314,7 @@ func (m *PreferAvoidPodsEntry) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -40610,6 +54323,9 @@ func (m *PreferAvoidPodsEntry) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -40631,7 +54347,7 @@ func (m *PreferAvoidPodsEntry) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -40640,6 +54356,9 @@ func (m *PreferAvoidPodsEntry) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -40661,7 +54380,7 @@ func (m *PreferAvoidPodsEntry) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -40671,6 +54390,9 @@ func (m *PreferAvoidPodsEntry) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -40690,7 +54412,7 @@ func (m *PreferAvoidPodsEntry) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -40700,6 +54422,9 @@ func (m *PreferAvoidPodsEntry) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -40714,6 +54439,9 @@ func (m *PreferAvoidPodsEntry) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -40741,7 +54469,7 @@ func (m *PreferredSchedulingTerm) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -40769,7 +54497,7 @@ func (m *PreferredSchedulingTerm) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Weight |= (int32(b) & 0x7F) << shift + m.Weight |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -40788,7 +54516,7 @@ func (m *PreferredSchedulingTerm) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -40797,6 +54525,9 @@ func (m *PreferredSchedulingTerm) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -40813,6 +54544,9 @@ func (m *PreferredSchedulingTerm) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -40840,7 +54574,7 @@ func (m *Probe) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -40868,7 +54602,7 @@ func (m *Probe) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -40877,6 +54611,9 @@ func (m *Probe) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -40898,7 +54635,7 @@ func (m *Probe) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.InitialDelaySeconds |= (int32(b) & 0x7F) << shift + m.InitialDelaySeconds |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -40917,7 +54654,7 @@ func (m *Probe) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.TimeoutSeconds |= (int32(b) & 0x7F) << shift + m.TimeoutSeconds |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -40936,7 +54673,7 @@ func (m *Probe) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.PeriodSeconds |= (int32(b) & 0x7F) << shift + m.PeriodSeconds |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -40955,7 +54692,7 @@ func (m *Probe) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.SuccessThreshold |= (int32(b) & 0x7F) << shift + m.SuccessThreshold |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -40974,7 +54711,7 @@ func (m *Probe) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.FailureThreshold |= (int32(b) & 0x7F) << shift + m.FailureThreshold |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -40988,6 +54725,9 @@ func (m *Probe) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -41015,7 +54755,7 @@ func (m *ProjectedVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -41043,7 +54783,7 @@ func (m *ProjectedVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -41052,6 +54792,9 @@ func (m *ProjectedVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -41074,7 +54817,7 @@ func (m *ProjectedVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -41089,6 +54832,9 @@ func (m *ProjectedVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -41116,7 +54862,7 @@ func (m *QuobyteVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -41144,7 +54890,7 @@ func (m *QuobyteVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -41154,6 +54900,9 @@ func (m *QuobyteVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -41173,7 +54922,7 @@ func (m *QuobyteVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -41183,6 +54932,9 @@ func (m *QuobyteVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -41202,7 +54954,7 @@ func (m *QuobyteVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -41222,7 +54974,7 @@ func (m *QuobyteVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -41232,6 +54984,9 @@ func (m *QuobyteVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -41251,7 +55006,7 @@ func (m *QuobyteVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -41261,11 +55016,46 @@ func (m *QuobyteVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } m.Group = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tenant", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tenant = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -41275,6 +55065,9 @@ func (m *QuobyteVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -41302,7 +55095,7 @@ func (m *RBDPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -41330,7 +55123,7 @@ func (m *RBDPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -41340,6 +55133,9 @@ func (m *RBDPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -41359,7 +55155,7 @@ func (m *RBDPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -41369,6 +55165,9 @@ func (m *RBDPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -41388,7 +55187,7 @@ func (m *RBDPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -41398,6 +55197,9 @@ func (m *RBDPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -41417,7 +55219,7 @@ func (m *RBDPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -41427,6 +55229,9 @@ func (m *RBDPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -41446,7 +55251,7 @@ func (m *RBDPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -41456,6 +55261,9 @@ func (m *RBDPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -41475,7 +55283,7 @@ func (m *RBDPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -41485,6 +55293,9 @@ func (m *RBDPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -41504,7 +55315,7 @@ func (m *RBDPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -41513,6 +55324,9 @@ func (m *RBDPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -41537,7 +55351,7 @@ func (m *RBDPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -41552,6 +55366,9 @@ func (m *RBDPersistentVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -41579,7 +55396,7 @@ func (m *RBDVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -41607,7 +55424,7 @@ func (m *RBDVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -41617,6 +55434,9 @@ func (m *RBDVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -41636,7 +55456,7 @@ func (m *RBDVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -41646,6 +55466,9 @@ func (m *RBDVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -41665,7 +55488,7 @@ func (m *RBDVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -41675,6 +55498,9 @@ func (m *RBDVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -41694,7 +55520,7 @@ func (m *RBDVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -41704,6 +55530,9 @@ func (m *RBDVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -41723,7 +55552,7 @@ func (m *RBDVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -41733,6 +55562,9 @@ func (m *RBDVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -41752,7 +55584,7 @@ func (m *RBDVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -41762,6 +55594,9 @@ func (m *RBDVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -41781,7 +55616,7 @@ func (m *RBDVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -41790,6 +55625,9 @@ func (m *RBDVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -41814,7 +55652,7 @@ func (m *RBDVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -41829,6 +55667,9 @@ func (m *RBDVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -41856,7 +55697,7 @@ func (m *RangeAllocation) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -41884,7 +55725,7 @@ func (m *RangeAllocation) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -41893,6 +55734,9 @@ func (m *RangeAllocation) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -41914,7 +55758,7 @@ func (m *RangeAllocation) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -41924,6 +55768,9 @@ func (m *RangeAllocation) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -41943,7 +55790,7 @@ func (m *RangeAllocation) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= (int(b) & 0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -41952,6 +55799,9 @@ func (m *RangeAllocation) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -41969,6 +55819,9 @@ func (m *RangeAllocation) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -41996,7 +55849,7 @@ func (m *ReplicationController) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -42024,7 +55877,7 @@ func (m *ReplicationController) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -42033,6 +55886,9 @@ func (m *ReplicationController) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -42054,7 +55910,7 @@ func (m *ReplicationController) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -42063,6 +55919,9 @@ func (m *ReplicationController) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -42084,7 +55943,7 @@ func (m *ReplicationController) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -42093,6 +55952,9 @@ func (m *ReplicationController) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -42109,6 +55971,9 @@ func (m *ReplicationController) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -42136,7 +56001,7 @@ func (m *ReplicationControllerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -42164,7 +56029,7 @@ func (m *ReplicationControllerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -42174,6 +56039,9 @@ func (m *ReplicationControllerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -42193,7 +56061,7 @@ func (m *ReplicationControllerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -42203,6 +56071,9 @@ func (m *ReplicationControllerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -42222,7 +56093,7 @@ func (m *ReplicationControllerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -42231,6 +56102,9 @@ func (m *ReplicationControllerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -42252,7 +56126,7 @@ func (m *ReplicationControllerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -42262,6 +56136,9 @@ func (m *ReplicationControllerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -42281,7 +56158,7 @@ func (m *ReplicationControllerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -42291,6 +56168,9 @@ func (m *ReplicationControllerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -42305,6 +56185,9 @@ func (m *ReplicationControllerCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -42332,7 +56215,7 @@ func (m *ReplicationControllerList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -42360,7 +56243,7 @@ func (m *ReplicationControllerList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -42369,6 +56252,9 @@ func (m *ReplicationControllerList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -42390,7 +56276,7 @@ func (m *ReplicationControllerList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -42399,6 +56285,9 @@ func (m *ReplicationControllerList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -42416,6 +56305,9 @@ func (m *ReplicationControllerList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -42443,7 +56335,7 @@ func (m *ReplicationControllerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -42471,7 +56363,7 @@ func (m *ReplicationControllerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -42491,7 +56383,7 @@ func (m *ReplicationControllerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -42500,54 +56392,20 @@ func (m *ReplicationControllerSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { + if postIndex > l { return io.ErrUnexpectedEOF } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Selector == nil { m.Selector = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -42557,41 +56415,86 @@ func (m *ReplicationControllerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Selector[mapkey] = mapvalue - } else { - var mapvalue string - m.Selector[mapkey] = mapvalue } + m.Selector[mapkey] = mapvalue iNdEx = postIndex case 3: if wireType != 2 { @@ -42607,7 +56510,7 @@ func (m *ReplicationControllerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -42616,6 +56519,9 @@ func (m *ReplicationControllerSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -42640,7 +56546,7 @@ func (m *ReplicationControllerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MinReadySeconds |= (int32(b) & 0x7F) << shift + m.MinReadySeconds |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -42654,6 +56560,9 @@ func (m *ReplicationControllerSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -42681,7 +56590,7 @@ func (m *ReplicationControllerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -42709,7 +56618,7 @@ func (m *ReplicationControllerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift + m.Replicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -42728,7 +56637,7 @@ func (m *ReplicationControllerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.FullyLabeledReplicas |= (int32(b) & 0x7F) << shift + m.FullyLabeledReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -42747,7 +56656,7 @@ func (m *ReplicationControllerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ObservedGeneration |= (int64(b) & 0x7F) << shift + m.ObservedGeneration |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -42766,7 +56675,7 @@ func (m *ReplicationControllerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ReadyReplicas |= (int32(b) & 0x7F) << shift + m.ReadyReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -42785,7 +56694,7 @@ func (m *ReplicationControllerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.AvailableReplicas |= (int32(b) & 0x7F) << shift + m.AvailableReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -42804,7 +56713,7 @@ func (m *ReplicationControllerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -42813,6 +56722,9 @@ func (m *ReplicationControllerStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -42830,6 +56742,9 @@ func (m *ReplicationControllerStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -42857,7 +56772,7 @@ func (m *ResourceFieldSelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -42885,7 +56800,7 @@ func (m *ResourceFieldSelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -42895,6 +56810,9 @@ func (m *ResourceFieldSelector) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -42914,7 +56832,7 @@ func (m *ResourceFieldSelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -42924,6 +56842,9 @@ func (m *ResourceFieldSelector) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -42943,7 +56864,7 @@ func (m *ResourceFieldSelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -42952,6 +56873,9 @@ func (m *ResourceFieldSelector) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -42968,6 +56892,9 @@ func (m *ResourceFieldSelector) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -42995,7 +56922,7 @@ func (m *ResourceQuota) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -43023,7 +56950,7 @@ func (m *ResourceQuota) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -43032,6 +56959,9 @@ func (m *ResourceQuota) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -43053,7 +56983,7 @@ func (m *ResourceQuota) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -43062,6 +56992,9 @@ func (m *ResourceQuota) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -43083,7 +57016,7 @@ func (m *ResourceQuota) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -43092,6 +57025,9 @@ func (m *ResourceQuota) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -43108,6 +57044,9 @@ func (m *ResourceQuota) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -43135,7 +57074,7 @@ func (m *ResourceQuotaList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -43163,7 +57102,7 @@ func (m *ResourceQuotaList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -43172,6 +57111,9 @@ func (m *ResourceQuotaList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -43193,7 +57135,7 @@ func (m *ResourceQuotaList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -43202,6 +57144,9 @@ func (m *ResourceQuotaList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -43219,6 +57164,9 @@ func (m *ResourceQuotaList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -43246,7 +57194,7 @@ func (m *ResourceQuotaSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -43274,7 +57222,7 @@ func (m *ResourceQuotaSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -43283,54 +57231,20 @@ func (m *ResourceQuotaSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { + if postIndex > l { return io.ErrUnexpectedEOF } - mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Hard == nil { m.Hard = make(ResourceList) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey ResourceName + mapvalue := &resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -43340,46 +57254,88 @@ func (m *ResourceQuotaSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.Hard[ResourceName(mapkey)] = *mapvalue - } else { - var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity - m.Hard[ResourceName(mapkey)] = mapvalue } + m.Hard[ResourceName(mapkey)] = *mapvalue iNdEx = postIndex case 2: if wireType != 2 { @@ -43395,7 +57351,7 @@ func (m *ResourceQuotaSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -43405,6 +57361,9 @@ func (m *ResourceQuotaSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -43424,7 +57383,7 @@ func (m *ResourceQuotaSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -43433,6 +57392,9 @@ func (m *ResourceQuotaSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -43452,6 +57414,9 @@ func (m *ResourceQuotaSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -43479,7 +57444,7 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -43507,7 +57472,7 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -43516,54 +57481,20 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { + if postIndex > l { return io.ErrUnexpectedEOF } - mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Hard == nil { m.Hard = make(ResourceList) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey ResourceName + mapvalue := &resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -43573,46 +57504,88 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.Hard[ResourceName(mapkey)] = *mapvalue - } else { - var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity - m.Hard[ResourceName(mapkey)] = mapvalue } + m.Hard[ResourceName(mapkey)] = *mapvalue iNdEx = postIndex case 2: if wireType != 2 { @@ -43628,7 +57601,7 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -43637,54 +57610,20 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { + if postIndex > l { return io.ErrUnexpectedEOF } - mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Used == nil { m.Used = make(ResourceList) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey ResourceName + mapvalue := &resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -43694,46 +57633,88 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.Used[ResourceName(mapkey)] = *mapvalue - } else { - var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity - m.Used[ResourceName(mapkey)] = mapvalue } + m.Used[ResourceName(mapkey)] = *mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -43744,6 +57725,9 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -43771,7 +57755,7 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -43799,7 +57783,7 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -43808,54 +57792,20 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { + if postIndex > l { return io.ErrUnexpectedEOF } - mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Limits == nil { m.Limits = make(ResourceList) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey ResourceName + mapvalue := &resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -43865,46 +57815,88 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.Limits[ResourceName(mapkey)] = *mapvalue - } else { - var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity - m.Limits[ResourceName(mapkey)] = mapvalue } + m.Limits[ResourceName(mapkey)] = *mapvalue iNdEx = postIndex case 2: if wireType != 2 { @@ -43920,7 +57912,7 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -43929,54 +57921,20 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { + if postIndex > l { return io.ErrUnexpectedEOF } - mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Requests == nil { m.Requests = make(ResourceList) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey ResourceName + mapvalue := &resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -43986,46 +57944,88 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.Requests[ResourceName(mapkey)] = *mapvalue - } else { - var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity - m.Requests[ResourceName(mapkey)] = mapvalue } + m.Requests[ResourceName(mapkey)] = *mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -44036,6 +58036,9 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -44063,7 +58066,7 @@ func (m *SELinuxOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -44091,7 +58094,7 @@ func (m *SELinuxOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -44101,6 +58104,9 @@ func (m *SELinuxOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -44120,7 +58126,7 @@ func (m *SELinuxOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -44130,6 +58136,9 @@ func (m *SELinuxOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -44149,7 +58158,7 @@ func (m *SELinuxOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -44159,6 +58168,9 @@ func (m *SELinuxOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -44178,7 +58190,7 @@ func (m *SELinuxOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -44188,6 +58200,9 @@ func (m *SELinuxOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -44202,6 +58217,9 @@ func (m *SELinuxOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -44229,7 +58247,7 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -44257,7 +58275,7 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -44267,6 +58285,9 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -44286,7 +58307,7 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -44296,6 +58317,9 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -44315,7 +58339,7 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -44324,6 +58348,9 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -44348,7 +58375,7 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -44368,7 +58395,7 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -44378,6 +58405,9 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -44397,7 +58427,7 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -44407,6 +58437,9 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -44426,7 +58459,7 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -44436,6 +58469,9 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -44455,7 +58491,7 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -44465,6 +58501,9 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -44484,7 +58523,7 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -44494,6 +58533,9 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -44513,7 +58555,7 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -44528,6 +58570,9 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -44555,7 +58600,7 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -44583,7 +58628,7 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -44593,6 +58638,9 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -44612,7 +58660,7 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -44622,6 +58670,9 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -44641,7 +58692,7 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -44650,6 +58701,9 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -44674,7 +58728,7 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -44694,7 +58748,7 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -44704,6 +58758,9 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -44723,7 +58780,7 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -44733,6 +58790,9 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -44752,7 +58812,7 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -44762,6 +58822,9 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -44781,7 +58844,7 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -44791,6 +58854,9 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -44810,7 +58876,7 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -44820,6 +58886,9 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -44839,7 +58908,7 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -44854,6 +58923,9 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -44881,7 +58953,7 @@ func (m *ScopeSelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -44909,7 +58981,7 @@ func (m *ScopeSelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -44918,6 +58990,9 @@ func (m *ScopeSelector) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -44935,6 +59010,9 @@ func (m *ScopeSelector) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -44962,7 +59040,7 @@ func (m *ScopedResourceSelectorRequirement) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -44990,7 +59068,7 @@ func (m *ScopedResourceSelectorRequirement) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -45000,6 +59078,9 @@ func (m *ScopedResourceSelectorRequirement) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45019,7 +59100,7 @@ func (m *ScopedResourceSelectorRequirement) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -45029,6 +59110,9 @@ func (m *ScopedResourceSelectorRequirement) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45048,7 +59132,7 @@ func (m *ScopedResourceSelectorRequirement) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -45058,6 +59142,9 @@ func (m *ScopedResourceSelectorRequirement) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45072,6 +59159,9 @@ func (m *ScopedResourceSelectorRequirement) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -45099,7 +59189,7 @@ func (m *Secret) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -45127,7 +59217,7 @@ func (m *Secret) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -45136,6 +59226,9 @@ func (m *Secret) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45157,7 +59250,7 @@ func (m *Secret) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -45166,54 +59259,20 @@ func (m *Secret) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { + if postIndex > l { return io.ErrUnexpectedEOF } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Data == nil { m.Data = make(map[string][]byte) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + mapvalue := []byte{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -45223,42 +59282,87 @@ func (m *Secret) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } - var mapbyteLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - mapbyteLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapbyteLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapbyteLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } + intMapbyteLen := int(mapbyteLen) + if intMapbyteLen < 0 { + return ErrInvalidLengthGenerated + } + postbytesIndex := iNdEx + intMapbyteLen + if postbytesIndex < 0 { + return ErrInvalidLengthGenerated + } + if postbytesIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = make([]byte, mapbyteLen) + copy(mapvalue, dAtA[iNdEx:postbytesIndex]) + iNdEx = postbytesIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intMapbyteLen := int(mapbyteLen) - if intMapbyteLen < 0 { - return ErrInvalidLengthGenerated - } - postbytesIndex := iNdEx + intMapbyteLen - if postbytesIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := make([]byte, mapbyteLen) - copy(mapvalue, dAtA[iNdEx:postbytesIndex]) - iNdEx = postbytesIndex - m.Data[mapkey] = mapvalue - } else { - var mapvalue []byte - m.Data[mapkey] = mapvalue } + m.Data[mapkey] = mapvalue iNdEx = postIndex case 3: if wireType != 2 { @@ -45274,7 +59378,7 @@ func (m *Secret) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -45284,6 +59388,9 @@ func (m *Secret) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45303,7 +59410,7 @@ func (m *Secret) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -45312,54 +59419,20 @@ func (m *Secret) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { + if postIndex > l { return io.ErrUnexpectedEOF } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.StringData == nil { m.StringData = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -45369,41 +59442,86 @@ func (m *Secret) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.StringData[mapkey] = mapvalue - } else { - var mapvalue string - m.StringData[mapkey] = mapvalue } + m.StringData[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -45414,6 +59532,9 @@ func (m *Secret) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -45441,7 +59562,7 @@ func (m *SecretEnvSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -45469,7 +59590,7 @@ func (m *SecretEnvSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -45478,6 +59599,9 @@ func (m *SecretEnvSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45499,7 +59623,7 @@ func (m *SecretEnvSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -45515,6 +59639,9 @@ func (m *SecretEnvSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -45542,7 +59669,7 @@ func (m *SecretKeySelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -45570,7 +59697,7 @@ func (m *SecretKeySelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -45579,6 +59706,9 @@ func (m *SecretKeySelector) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45600,7 +59730,7 @@ func (m *SecretKeySelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -45610,6 +59740,9 @@ func (m *SecretKeySelector) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45629,7 +59762,7 @@ func (m *SecretKeySelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -45645,6 +59778,9 @@ func (m *SecretKeySelector) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -45672,7 +59808,7 @@ func (m *SecretList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -45700,7 +59836,7 @@ func (m *SecretList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -45709,6 +59845,9 @@ func (m *SecretList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45730,7 +59869,7 @@ func (m *SecretList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -45739,6 +59878,9 @@ func (m *SecretList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45756,6 +59898,9 @@ func (m *SecretList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -45783,7 +59928,7 @@ func (m *SecretProjection) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -45811,7 +59956,7 @@ func (m *SecretProjection) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -45820,6 +59965,9 @@ func (m *SecretProjection) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45841,7 +59989,7 @@ func (m *SecretProjection) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -45850,6 +59998,9 @@ func (m *SecretProjection) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45872,7 +60023,7 @@ func (m *SecretProjection) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -45888,6 +60039,9 @@ func (m *SecretProjection) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -45915,7 +60069,7 @@ func (m *SecretReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -45943,7 +60097,7 @@ func (m *SecretReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -45953,6 +60107,9 @@ func (m *SecretReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45972,7 +60129,7 @@ func (m *SecretReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -45982,6 +60139,9 @@ func (m *SecretReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45996,6 +60156,9 @@ func (m *SecretReference) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -46023,7 +60186,7 @@ func (m *SecretVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -46051,7 +60214,7 @@ func (m *SecretVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -46061,6 +60224,9 @@ func (m *SecretVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -46080,7 +60246,7 @@ func (m *SecretVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -46089,6 +60255,9 @@ func (m *SecretVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -46111,7 +60280,7 @@ func (m *SecretVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -46131,7 +60300,7 @@ func (m *SecretVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -46147,6 +60316,9 @@ func (m *SecretVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -46174,7 +60346,7 @@ func (m *SecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -46202,7 +60374,7 @@ func (m *SecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -46211,6 +60383,9 @@ func (m *SecurityContext) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -46235,7 +60410,7 @@ func (m *SecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -46256,7 +60431,7 @@ func (m *SecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -46265,6 +60440,9 @@ func (m *SecurityContext) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -46289,7 +60467,7 @@ func (m *SecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -46309,7 +60487,7 @@ func (m *SecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -46330,7 +60508,7 @@ func (m *SecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -46351,7 +60529,7 @@ func (m *SecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -46372,7 +60550,7 @@ func (m *SecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -46392,7 +60570,7 @@ func (m *SecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -46402,12 +60580,51 @@ func (m *SecurityContext) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } s := ProcMountType(dAtA[iNdEx:postIndex]) m.ProcMount = &s iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WindowsOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.WindowsOptions == nil { + m.WindowsOptions = &WindowsSecurityContextOptions{} + } + if err := m.WindowsOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -46417,6 +60634,9 @@ func (m *SecurityContext) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -46444,7 +60664,7 @@ func (m *SerializedReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -46472,7 +60692,7 @@ func (m *SerializedReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -46481,6 +60701,9 @@ func (m *SerializedReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -46497,6 +60720,9 @@ func (m *SerializedReference) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -46524,7 +60750,7 @@ func (m *Service) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -46552,7 +60778,7 @@ func (m *Service) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -46561,6 +60787,9 @@ func (m *Service) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -46582,7 +60811,7 @@ func (m *Service) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -46591,6 +60820,9 @@ func (m *Service) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -46612,7 +60844,7 @@ func (m *Service) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -46621,6 +60853,9 @@ func (m *Service) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -46637,6 +60872,9 @@ func (m *Service) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -46664,7 +60902,7 @@ func (m *ServiceAccount) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -46692,7 +60930,7 @@ func (m *ServiceAccount) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -46701,6 +60939,9 @@ func (m *ServiceAccount) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -46722,7 +60963,7 @@ func (m *ServiceAccount) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -46731,6 +60972,9 @@ func (m *ServiceAccount) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -46753,7 +60997,7 @@ func (m *ServiceAccount) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -46762,6 +61006,9 @@ func (m *ServiceAccount) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -46784,7 +61031,7 @@ func (m *ServiceAccount) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -46800,6 +61047,9 @@ func (m *ServiceAccount) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -46827,7 +61077,7 @@ func (m *ServiceAccountList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -46855,7 +61105,7 @@ func (m *ServiceAccountList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -46864,6 +61114,9 @@ func (m *ServiceAccountList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -46885,7 +61138,7 @@ func (m *ServiceAccountList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -46894,6 +61147,9 @@ func (m *ServiceAccountList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -46911,6 +61167,9 @@ func (m *ServiceAccountList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -46938,7 +61197,7 @@ func (m *ServiceAccountTokenProjection) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -46966,7 +61225,7 @@ func (m *ServiceAccountTokenProjection) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -46976,6 +61235,9 @@ func (m *ServiceAccountTokenProjection) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -46995,7 +61257,7 @@ func (m *ServiceAccountTokenProjection) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -47015,7 +61277,7 @@ func (m *ServiceAccountTokenProjection) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -47025,6 +61287,9 @@ func (m *ServiceAccountTokenProjection) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -47039,6 +61304,9 @@ func (m *ServiceAccountTokenProjection) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -47066,7 +61334,7 @@ func (m *ServiceList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -47094,7 +61362,7 @@ func (m *ServiceList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -47103,6 +61371,9 @@ func (m *ServiceList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -47124,7 +61395,7 @@ func (m *ServiceList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -47133,6 +61404,9 @@ func (m *ServiceList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -47150,6 +61424,9 @@ func (m *ServiceList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -47177,7 +61454,7 @@ func (m *ServicePort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -47205,7 +61482,7 @@ func (m *ServicePort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -47215,6 +61492,9 @@ func (m *ServicePort) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -47234,7 +61514,7 @@ func (m *ServicePort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -47244,6 +61524,9 @@ func (m *ServicePort) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -47263,7 +61546,7 @@ func (m *ServicePort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Port |= (int32(b) & 0x7F) << shift + m.Port |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -47282,7 +61565,7 @@ func (m *ServicePort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -47291,6 +61574,9 @@ func (m *ServicePort) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -47312,7 +61598,7 @@ func (m *ServicePort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.NodePort |= (int32(b) & 0x7F) << shift + m.NodePort |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -47326,6 +61612,9 @@ func (m *ServicePort) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -47353,7 +61642,7 @@ func (m *ServiceProxyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -47381,7 +61670,7 @@ func (m *ServiceProxyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -47391,6 +61680,9 @@ func (m *ServiceProxyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -47405,6 +61697,9 @@ func (m *ServiceProxyOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -47432,7 +61727,7 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -47460,7 +61755,7 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -47469,6 +61764,9 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -47491,7 +61789,7 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -47500,54 +61798,20 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { + if postIndex > l { return io.ErrUnexpectedEOF } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Selector == nil { m.Selector = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -47557,41 +61821,86 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Selector[mapkey] = mapvalue - } else { - var mapvalue string - m.Selector[mapkey] = mapvalue } + m.Selector[mapkey] = mapvalue iNdEx = postIndex case 3: if wireType != 2 { @@ -47607,7 +61916,7 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -47617,6 +61926,9 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -47636,7 +61948,7 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -47646,6 +61958,9 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -47665,7 +61980,7 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -47675,6 +61990,9 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -47694,7 +62012,7 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -47704,6 +62022,9 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -47723,7 +62044,7 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -47733,6 +62054,9 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -47752,7 +62076,7 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -47762,6 +62086,9 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -47781,7 +62108,7 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -47791,6 +62118,9 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -47810,7 +62140,7 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -47820,6 +62150,9 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -47839,7 +62172,7 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.HealthCheckNodePort |= (int32(b) & 0x7F) << shift + m.HealthCheckNodePort |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -47858,7 +62191,7 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -47878,7 +62211,7 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -47887,6 +62220,9 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -47897,6 +62233,71 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IPFamily", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := IPFamily(dAtA[iNdEx:postIndex]) + m.IPFamily = &s + iNdEx = postIndex + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TopologyKeys", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TopologyKeys = append(m.TopologyKeys, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -47906,6 +62307,9 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -47933,7 +62337,7 @@ func (m *ServiceStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -47961,7 +62365,7 @@ func (m *ServiceStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -47970,6 +62374,9 @@ func (m *ServiceStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -47986,6 +62393,9 @@ func (m *ServiceStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -48013,7 +62423,7 @@ func (m *SessionAffinityConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -48041,7 +62451,7 @@ func (m *SessionAffinityConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -48050,6 +62460,9 @@ func (m *SessionAffinityConfig) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48069,6 +62482,9 @@ func (m *SessionAffinityConfig) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -48096,7 +62512,7 @@ func (m *StorageOSPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -48124,7 +62540,7 @@ func (m *StorageOSPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -48134,6 +62550,9 @@ func (m *StorageOSPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48153,7 +62572,7 @@ func (m *StorageOSPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -48163,6 +62582,9 @@ func (m *StorageOSPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48182,7 +62604,7 @@ func (m *StorageOSPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -48192,6 +62614,9 @@ func (m *StorageOSPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48211,7 +62636,7 @@ func (m *StorageOSPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -48231,7 +62656,7 @@ func (m *StorageOSPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -48240,6 +62665,9 @@ func (m *StorageOSPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48259,6 +62687,9 @@ func (m *StorageOSPersistentVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -48286,7 +62717,7 @@ func (m *StorageOSVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -48314,7 +62745,7 @@ func (m *StorageOSVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -48324,6 +62755,9 @@ func (m *StorageOSVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48343,7 +62777,7 @@ func (m *StorageOSVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -48353,6 +62787,9 @@ func (m *StorageOSVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48372,7 +62809,7 @@ func (m *StorageOSVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -48382,6 +62819,9 @@ func (m *StorageOSVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48401,7 +62841,7 @@ func (m *StorageOSVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -48421,7 +62861,7 @@ func (m *StorageOSVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -48430,6 +62870,9 @@ func (m *StorageOSVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48449,6 +62892,9 @@ func (m *StorageOSVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -48476,7 +62922,7 @@ func (m *Sysctl) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -48504,7 +62950,7 @@ func (m *Sysctl) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -48514,6 +62960,9 @@ func (m *Sysctl) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48533,7 +62982,7 @@ func (m *Sysctl) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -48543,6 +62992,9 @@ func (m *Sysctl) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48557,6 +63009,9 @@ func (m *Sysctl) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -48584,7 +63039,7 @@ func (m *TCPSocketAction) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -48612,7 +63067,7 @@ func (m *TCPSocketAction) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -48621,6 +63076,9 @@ func (m *TCPSocketAction) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48642,7 +63100,7 @@ func (m *TCPSocketAction) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -48652,6 +63110,9 @@ func (m *TCPSocketAction) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48666,6 +63127,9 @@ func (m *TCPSocketAction) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -48693,7 +63157,7 @@ func (m *Taint) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -48721,7 +63185,7 @@ func (m *Taint) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -48731,6 +63195,9 @@ func (m *Taint) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48750,7 +63217,7 @@ func (m *Taint) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -48760,6 +63227,9 @@ func (m *Taint) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48779,7 +63249,7 @@ func (m *Taint) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -48789,6 +63259,9 @@ func (m *Taint) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48808,7 +63281,7 @@ func (m *Taint) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -48817,11 +63290,14 @@ func (m *Taint) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.TimeAdded == nil { - m.TimeAdded = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} + m.TimeAdded = &v1.Time{} } if err := m.TimeAdded.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -48836,6 +63312,9 @@ func (m *Taint) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -48863,7 +63342,7 @@ func (m *Toleration) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -48891,7 +63370,7 @@ func (m *Toleration) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -48901,6 +63380,9 @@ func (m *Toleration) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48920,7 +63402,7 @@ func (m *Toleration) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -48930,6 +63412,9 @@ func (m *Toleration) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48949,7 +63434,7 @@ func (m *Toleration) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -48959,6 +63444,9 @@ func (m *Toleration) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48978,7 +63466,7 @@ func (m *Toleration) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -48988,6 +63476,9 @@ func (m *Toleration) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -49007,7 +63498,7 @@ func (m *Toleration) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -49022,6 +63513,9 @@ func (m *Toleration) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -49049,7 +63543,7 @@ func (m *TopologySelectorLabelRequirement) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -49077,7 +63571,7 @@ func (m *TopologySelectorLabelRequirement) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -49087,6 +63581,9 @@ func (m *TopologySelectorLabelRequirement) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -49106,7 +63603,7 @@ func (m *TopologySelectorLabelRequirement) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -49116,6 +63613,9 @@ func (m *TopologySelectorLabelRequirement) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -49130,6 +63630,9 @@ func (m *TopologySelectorLabelRequirement) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -49157,7 +63660,7 @@ func (m *TopologySelectorTerm) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -49185,7 +63688,7 @@ func (m *TopologySelectorTerm) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -49194,6 +63697,9 @@ func (m *TopologySelectorTerm) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -49211,6 +63717,181 @@ func (m *TopologySelectorTerm) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TopologySpreadConstraint) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TopologySpreadConstraint: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TopologySpreadConstraint: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxSkew", wireType) + } + m.MaxSkew = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxSkew |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TopologyKey", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TopologyKey = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WhenUnsatisfiable", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.WhenUnsatisfiable = UnsatisfiableConstraintAction(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LabelSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LabelSelector == nil { + m.LabelSelector = &v1.LabelSelector{} + } + if err := m.LabelSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -49238,7 +63919,7 @@ func (m *TypedLocalObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -49266,7 +63947,7 @@ func (m *TypedLocalObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -49276,6 +63957,9 @@ func (m *TypedLocalObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -49296,7 +63980,7 @@ func (m *TypedLocalObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -49306,6 +63990,9 @@ func (m *TypedLocalObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -49325,7 +64012,7 @@ func (m *TypedLocalObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -49335,6 +64022,9 @@ func (m *TypedLocalObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -49349,6 +64039,9 @@ func (m *TypedLocalObjectReference) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -49376,7 +64069,7 @@ func (m *Volume) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -49404,7 +64097,7 @@ func (m *Volume) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -49414,6 +64107,9 @@ func (m *Volume) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -49433,7 +64129,7 @@ func (m *Volume) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -49442,6 +64138,9 @@ func (m *Volume) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -49458,6 +64157,9 @@ func (m *Volume) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -49485,7 +64187,7 @@ func (m *VolumeDevice) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -49513,7 +64215,7 @@ func (m *VolumeDevice) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -49523,6 +64225,9 @@ func (m *VolumeDevice) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -49542,7 +64247,7 @@ func (m *VolumeDevice) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -49552,6 +64257,9 @@ func (m *VolumeDevice) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -49566,6 +64274,9 @@ func (m *VolumeDevice) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -49593,7 +64304,7 @@ func (m *VolumeMount) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -49621,7 +64332,7 @@ func (m *VolumeMount) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -49631,6 +64342,9 @@ func (m *VolumeMount) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -49650,7 +64364,7 @@ func (m *VolumeMount) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -49670,7 +64384,7 @@ func (m *VolumeMount) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -49680,6 +64394,9 @@ func (m *VolumeMount) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -49699,7 +64416,7 @@ func (m *VolumeMount) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -49709,6 +64426,9 @@ func (m *VolumeMount) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -49728,7 +64448,7 @@ func (m *VolumeMount) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -49738,12 +64458,47 @@ func (m *VolumeMount) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } s := MountPropagationMode(dAtA[iNdEx:postIndex]) m.MountPropagation = &s iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SubPathExpr", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SubPathExpr = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -49753,6 +64508,9 @@ func (m *VolumeMount) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -49780,7 +64538,7 @@ func (m *VolumeNodeAffinity) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -49808,7 +64566,7 @@ func (m *VolumeNodeAffinity) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -49817,6 +64575,9 @@ func (m *VolumeNodeAffinity) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -49836,6 +64597,9 @@ func (m *VolumeNodeAffinity) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -49863,7 +64627,7 @@ func (m *VolumeProjection) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -49891,7 +64655,7 @@ func (m *VolumeProjection) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -49900,6 +64664,9 @@ func (m *VolumeProjection) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -49924,7 +64691,7 @@ func (m *VolumeProjection) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -49933,6 +64700,9 @@ func (m *VolumeProjection) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -49957,7 +64727,7 @@ func (m *VolumeProjection) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -49966,6 +64736,9 @@ func (m *VolumeProjection) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -49990,7 +64763,7 @@ func (m *VolumeProjection) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -49999,6 +64772,9 @@ func (m *VolumeProjection) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50018,6 +64794,9 @@ func (m *VolumeProjection) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -50045,7 +64824,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -50073,7 +64852,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -50082,6 +64861,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50106,7 +64888,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -50115,6 +64897,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50139,7 +64924,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -50148,6 +64933,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50172,7 +64960,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -50181,6 +64969,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50205,7 +64996,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -50214,6 +65005,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50238,7 +65032,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -50247,6 +65041,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50271,7 +65068,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -50280,6 +65077,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50304,7 +65104,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -50313,6 +65113,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50337,7 +65140,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -50346,6 +65149,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50370,7 +65176,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -50379,6 +65185,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50403,7 +65212,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -50412,6 +65221,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50436,7 +65248,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -50445,6 +65257,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50469,7 +65284,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -50478,6 +65293,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50502,7 +65320,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -50511,6 +65329,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50535,7 +65356,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -50544,6 +65365,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50568,7 +65392,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -50577,6 +65401,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50601,7 +65428,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -50610,6 +65437,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50634,7 +65464,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -50643,6 +65473,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50667,7 +65500,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -50676,6 +65509,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50700,7 +65536,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -50709,6 +65545,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50733,7 +65572,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -50742,6 +65581,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50766,7 +65608,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -50775,6 +65617,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50799,7 +65644,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -50808,6 +65653,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50832,7 +65680,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -50841,6 +65689,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50865,7 +65716,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -50874,6 +65725,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50898,7 +65752,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -50907,6 +65761,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50931,7 +65788,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -50940,6 +65797,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50950,6 +65810,42 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 28: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CSI", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CSI == nil { + m.CSI = &CSIVolumeSource{} + } + if err := m.CSI.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -50959,6 +65855,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -50986,7 +65885,7 @@ func (m *VsphereVirtualDiskVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -51014,7 +65913,7 @@ func (m *VsphereVirtualDiskVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -51024,6 +65923,9 @@ func (m *VsphereVirtualDiskVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51043,7 +65945,7 @@ func (m *VsphereVirtualDiskVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -51053,6 +65955,9 @@ func (m *VsphereVirtualDiskVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51072,7 +65977,7 @@ func (m *VsphereVirtualDiskVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -51082,6 +65987,9 @@ func (m *VsphereVirtualDiskVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51101,7 +66009,7 @@ func (m *VsphereVirtualDiskVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -51111,6 +66019,9 @@ func (m *VsphereVirtualDiskVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51125,6 +66036,9 @@ func (m *VsphereVirtualDiskVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -51152,7 +66066,7 @@ func (m *WeightedPodAffinityTerm) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -51180,7 +66094,7 @@ func (m *WeightedPodAffinityTerm) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Weight |= (int32(b) & 0x7F) << shift + m.Weight |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -51199,7 +66113,7 @@ func (m *WeightedPodAffinityTerm) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -51208,6 +66122,9 @@ func (m *WeightedPodAffinityTerm) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51224,6 +66141,161 @@ func (m *WeightedPodAffinityTerm) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WindowsSecurityContextOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WindowsSecurityContextOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WindowsSecurityContextOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GMSACredentialSpecName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.GMSACredentialSpecName = &s + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GMSACredentialSpec", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.GMSACredentialSpec = &s + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RunAsUserName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.RunAsUserName = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -51290,10 +66362,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -51322,6 +66397,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -51340,810 +66418,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/core/v1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 12780 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x7d, 0x6b, 0x6c, 0x24, 0x47, - 0x7a, 0xd8, 0xf5, 0xcc, 0x90, 0x9c, 0xf9, 0xf8, 0xae, 0x7d, 0x88, 0x4b, 0x69, 0x77, 0x56, 0xad, - 0xbb, 0xd5, 0xea, 0x24, 0x91, 0xa7, 0x95, 0x74, 0x92, 0x4f, 0x3a, 0xd9, 0x24, 0x87, 0xdc, 0x1d, - 0xed, 0x92, 0x3b, 0xaa, 0xe1, 0xee, 0xde, 0xc9, 0xba, 0xf3, 0x35, 0x67, 0x8a, 0x64, 0x8b, 0xc3, - 0xee, 0x51, 0x77, 0x0f, 0x77, 0xa9, 0xd8, 0x40, 0x72, 0x8e, 0x9d, 0x5c, 0x6c, 0x04, 0x87, 0xd8, - 0xc8, 0xc3, 0x36, 0x1c, 0xc0, 0x71, 0x60, 0x3b, 0x4e, 0x82, 0x38, 0x76, 0x6c, 0xc7, 0x67, 0x27, - 0x8e, 0x9d, 0x1f, 0x0e, 0x10, 0x5c, 0x9c, 0x00, 0xc1, 0x19, 0x30, 0xc2, 0xd8, 0x74, 0x1e, 0xf0, - 0x8f, 0x3c, 0x10, 0xe7, 0x47, 0xcc, 0x18, 0x71, 0x50, 0xcf, 0xae, 0xea, 0xe9, 0x9e, 0x19, 0xae, - 0xb8, 0x94, 0x7c, 0xb8, 0x7f, 0x33, 0xf5, 0x7d, 0xf5, 0x55, 0x75, 0x3d, 0xbf, 0xef, 0xab, 0xef, - 0x01, 0xaf, 0xed, 0xbc, 0x1a, 0xce, 0xb9, 0xfe, 0xfc, 0x4e, 0x67, 0x83, 0x04, 0x1e, 0x89, 0x48, - 0x38, 0xbf, 0x47, 0xbc, 0xa6, 0x1f, 0xcc, 0x0b, 0x80, 0xd3, 0x76, 0xe7, 0x1b, 0x7e, 0x40, 0xe6, - 0xf7, 0x5e, 0x98, 0xdf, 0x22, 0x1e, 0x09, 0x9c, 0x88, 0x34, 0xe7, 0xda, 0x81, 0x1f, 0xf9, 0x08, - 0x71, 0x9c, 0x39, 0xa7, 0xed, 0xce, 0x51, 0x9c, 0xb9, 0xbd, 0x17, 0x66, 0x9f, 0xdf, 0x72, 0xa3, - 0xed, 0xce, 0xc6, 0x5c, 0xc3, 0xdf, 0x9d, 0xdf, 0xf2, 0xb7, 0xfc, 0x79, 0x86, 0xba, 0xd1, 0xd9, - 0x64, 0xff, 0xd8, 0x1f, 0xf6, 0x8b, 0x93, 0x98, 0x7d, 0x29, 0x6e, 0x66, 0xd7, 0x69, 0x6c, 0xbb, - 0x1e, 0x09, 0xf6, 0xe7, 0xdb, 0x3b, 0x5b, 0xac, 0xdd, 0x80, 0x84, 0x7e, 0x27, 0x68, 0x90, 0x64, - 0xc3, 0x3d, 0x6b, 0x85, 0xf3, 0xbb, 0x24, 0x72, 0x52, 0xba, 0x3b, 0x3b, 0x9f, 0x55, 0x2b, 0xe8, - 0x78, 0x91, 0xbb, 0xdb, 0xdd, 0xcc, 0xa7, 0xfb, 0x55, 0x08, 0x1b, 0xdb, 0x64, 0xd7, 0xe9, 0xaa, - 0xf7, 0x62, 0x56, 0xbd, 0x4e, 0xe4, 0xb6, 0xe6, 0x5d, 0x2f, 0x0a, 0xa3, 0x20, 0x59, 0xc9, 0xfe, - 0x86, 0x05, 0x97, 0x17, 0xee, 0xd5, 0x97, 0x5b, 0x4e, 0x18, 0xb9, 0x8d, 0xc5, 0x96, 0xdf, 0xd8, - 0xa9, 0x47, 0x7e, 0x40, 0xee, 0xfa, 0xad, 0xce, 0x2e, 0xa9, 0xb3, 0x81, 0x40, 0xcf, 0x41, 0x71, - 0x8f, 0xfd, 0xaf, 0x56, 0x66, 0xac, 0xcb, 0xd6, 0xd5, 0xd2, 0xe2, 0xd4, 0x6f, 0x1d, 0x94, 0x3f, - 0x76, 0x78, 0x50, 0x2e, 0xde, 0x15, 0xe5, 0x58, 0x61, 0xa0, 0x2b, 0x30, 0xbc, 0x19, 0xae, 0xef, - 0xb7, 0xc9, 0x4c, 0x8e, 0xe1, 0x4e, 0x08, 0xdc, 0xe1, 0x95, 0x3a, 0x2d, 0xc5, 0x02, 0x8a, 0xe6, - 0xa1, 0xd4, 0x76, 0x82, 0xc8, 0x8d, 0x5c, 0xdf, 0x9b, 0xc9, 0x5f, 0xb6, 0xae, 0x0e, 0x2d, 0x4e, - 0x0b, 0xd4, 0x52, 0x4d, 0x02, 0x70, 0x8c, 0x43, 0xbb, 0x11, 0x10, 0xa7, 0x79, 0xdb, 0x6b, 0xed, - 0xcf, 0x14, 0x2e, 0x5b, 0x57, 0x8b, 0x71, 0x37, 0xb0, 0x28, 0xc7, 0x0a, 0xc3, 0xfe, 0x91, 0x1c, - 0x14, 0x17, 0x36, 0x37, 0x5d, 0xcf, 0x8d, 0xf6, 0xd1, 0x5d, 0x18, 0xf3, 0xfc, 0x26, 0x91, 0xff, - 0xd9, 0x57, 0x8c, 0x5e, 0xbb, 0x3c, 0xd7, 0xbd, 0x94, 0xe6, 0xd6, 0x34, 0xbc, 0xc5, 0xa9, 0xc3, - 0x83, 0xf2, 0x98, 0x5e, 0x82, 0x0d, 0x3a, 0x08, 0xc3, 0x68, 0xdb, 0x6f, 0x2a, 0xb2, 0x39, 0x46, - 0xb6, 0x9c, 0x46, 0xb6, 0x16, 0xa3, 0x2d, 0x4e, 0x1e, 0x1e, 0x94, 0x47, 0xb5, 0x02, 0xac, 0x13, - 0x41, 0x1b, 0x30, 0x49, 0xff, 0x7a, 0x91, 0xab, 0xe8, 0xe6, 0x19, 0xdd, 0xa7, 0xb2, 0xe8, 0x6a, - 0xa8, 0x8b, 0x67, 0x0e, 0x0f, 0xca, 0x93, 0x89, 0x42, 0x9c, 0x24, 0x68, 0xbf, 0x0f, 0x13, 0x0b, - 0x51, 0xe4, 0x34, 0xb6, 0x49, 0x93, 0xcf, 0x20, 0x7a, 0x09, 0x0a, 0x9e, 0xb3, 0x4b, 0xc4, 0xfc, - 0x5e, 0x16, 0x03, 0x5b, 0x58, 0x73, 0x76, 0xc9, 0xd1, 0x41, 0x79, 0xea, 0x8e, 0xe7, 0xbe, 0xd7, - 0x11, 0xab, 0x82, 0x96, 0x61, 0x86, 0x8d, 0xae, 0x01, 0x34, 0xc9, 0x9e, 0xdb, 0x20, 0x35, 0x27, - 0xda, 0x16, 0xf3, 0x8d, 0x44, 0x5d, 0xa8, 0x28, 0x08, 0xd6, 0xb0, 0xec, 0x07, 0x50, 0x5a, 0xd8, - 0xf3, 0xdd, 0x66, 0xcd, 0x6f, 0x86, 0x68, 0x07, 0x26, 0xdb, 0x01, 0xd9, 0x24, 0x81, 0x2a, 0x9a, - 0xb1, 0x2e, 0xe7, 0xaf, 0x8e, 0x5e, 0xbb, 0x9a, 0xfa, 0xb1, 0x26, 0xea, 0xb2, 0x17, 0x05, 0xfb, - 0x8b, 0x8f, 0x89, 0xf6, 0x26, 0x13, 0x50, 0x9c, 0xa4, 0x6c, 0xff, 0xcb, 0x1c, 0x9c, 0x5b, 0x78, - 0xbf, 0x13, 0x90, 0x8a, 0x1b, 0xee, 0x24, 0x57, 0x78, 0xd3, 0x0d, 0x77, 0xd6, 0xe2, 0x11, 0x50, - 0x4b, 0xab, 0x22, 0xca, 0xb1, 0xc2, 0x40, 0xcf, 0xc3, 0x08, 0xfd, 0x7d, 0x07, 0x57, 0xc5, 0x27, - 0x9f, 0x11, 0xc8, 0xa3, 0x15, 0x27, 0x72, 0x2a, 0x1c, 0x84, 0x25, 0x0e, 0x5a, 0x85, 0xd1, 0x06, - 0xdb, 0x90, 0x5b, 0xab, 0x7e, 0x93, 0xb0, 0xc9, 0x2c, 0x2d, 0x3e, 0x4b, 0xd1, 0x97, 0xe2, 0xe2, - 0xa3, 0x83, 0xf2, 0x0c, 0xef, 0x9b, 0x20, 0xa1, 0xc1, 0xb0, 0x5e, 0x1f, 0xd9, 0x6a, 0x7f, 0x15, - 0x18, 0x25, 0x48, 0xd9, 0x5b, 0x57, 0xb5, 0xad, 0x32, 0xc4, 0xb6, 0xca, 0x58, 0xfa, 0x36, 0x41, - 0x2f, 0x40, 0x61, 0xc7, 0xf5, 0x9a, 0x33, 0xc3, 0x8c, 0xd6, 0x45, 0x3a, 0xe7, 0x37, 0x5d, 0xaf, - 0x79, 0x74, 0x50, 0x9e, 0x36, 0xba, 0x43, 0x0b, 0x31, 0x43, 0xb5, 0xff, 0xc8, 0x82, 0x32, 0x83, - 0xad, 0xb8, 0x2d, 0x52, 0x23, 0x41, 0xe8, 0x86, 0x11, 0xf1, 0x22, 0x63, 0x40, 0xaf, 0x01, 0x84, - 0xa4, 0x11, 0x90, 0x48, 0x1b, 0x52, 0xb5, 0x30, 0xea, 0x0a, 0x82, 0x35, 0x2c, 0x7a, 0x20, 0x84, - 0xdb, 0x4e, 0xc0, 0xd6, 0x97, 0x18, 0x58, 0x75, 0x20, 0xd4, 0x25, 0x00, 0xc7, 0x38, 0xc6, 0x81, - 0x90, 0xef, 0x77, 0x20, 0xa0, 0xcf, 0xc2, 0x64, 0xdc, 0x58, 0xd8, 0x76, 0x1a, 0x72, 0x00, 0xd9, - 0x96, 0xa9, 0x9b, 0x20, 0x9c, 0xc4, 0xb5, 0xff, 0x9e, 0x25, 0x16, 0x0f, 0xfd, 0xea, 0x8f, 0xf8, - 0xb7, 0xda, 0xbf, 0x6c, 0xc1, 0xc8, 0xa2, 0xeb, 0x35, 0x5d, 0x6f, 0x0b, 0x7d, 0x09, 0x8a, 0xf4, - 0x6e, 0x6a, 0x3a, 0x91, 0x23, 0xce, 0xbd, 0x4f, 0x69, 0x7b, 0x4b, 0x5d, 0x15, 0x73, 0xed, 0x9d, - 0x2d, 0x5a, 0x10, 0xce, 0x51, 0x6c, 0xba, 0xdb, 0x6e, 0x6f, 0xbc, 0x4b, 0x1a, 0xd1, 0x2a, 0x89, - 0x9c, 0xf8, 0x73, 0xe2, 0x32, 0xac, 0xa8, 0xa2, 0x9b, 0x30, 0x1c, 0x39, 0xc1, 0x16, 0x89, 0xc4, - 0x01, 0x98, 0x7a, 0x50, 0xf1, 0x9a, 0x98, 0xee, 0x48, 0xe2, 0x35, 0x48, 0x7c, 0x2d, 0xac, 0xb3, - 0xaa, 0x58, 0x90, 0xb0, 0xff, 0xca, 0x30, 0x5c, 0x58, 0xaa, 0x57, 0x33, 0xd6, 0xd5, 0x15, 0x18, - 0x6e, 0x06, 0xee, 0x1e, 0x09, 0xc4, 0x38, 0x2b, 0x2a, 0x15, 0x56, 0x8a, 0x05, 0x14, 0xbd, 0x0a, - 0x63, 0xfc, 0x42, 0xba, 0xe1, 0x78, 0xcd, 0x96, 0x1c, 0xe2, 0xb3, 0x02, 0x7b, 0xec, 0xae, 0x06, - 0xc3, 0x06, 0xe6, 0x31, 0x17, 0xd5, 0x95, 0xc4, 0x66, 0xcc, 0xba, 0xec, 0xbe, 0x62, 0xc1, 0x14, - 0x6f, 0x66, 0x21, 0x8a, 0x02, 0x77, 0xa3, 0x13, 0x91, 0x70, 0x66, 0x88, 0x9d, 0x74, 0x4b, 0x69, - 0xa3, 0x95, 0x39, 0x02, 0x73, 0x77, 0x13, 0x54, 0xf8, 0x21, 0x38, 0x23, 0xda, 0x9d, 0x4a, 0x82, - 0x71, 0x57, 0xb3, 0xe8, 0x7b, 0x2d, 0x98, 0x6d, 0xf8, 0x5e, 0x14, 0xf8, 0xad, 0x16, 0x09, 0x6a, - 0x9d, 0x8d, 0x96, 0x1b, 0x6e, 0xf3, 0x75, 0x8a, 0xc9, 0x26, 0x3b, 0x09, 0x32, 0xe6, 0x50, 0x21, - 0x89, 0x39, 0xbc, 0x74, 0x78, 0x50, 0x9e, 0x5d, 0xca, 0x24, 0x85, 0x7b, 0x34, 0x83, 0x76, 0x00, - 0xd1, 0xab, 0xb4, 0x1e, 0x39, 0x5b, 0x24, 0x6e, 0x7c, 0x64, 0xf0, 0xc6, 0xcf, 0x1f, 0x1e, 0x94, - 0xd1, 0x5a, 0x17, 0x09, 0x9c, 0x42, 0x16, 0xbd, 0x07, 0x67, 0x69, 0x69, 0xd7, 0xb7, 0x16, 0x07, - 0x6f, 0x6e, 0xe6, 0xf0, 0xa0, 0x7c, 0x76, 0x2d, 0x85, 0x08, 0x4e, 0x25, 0x3d, 0xbb, 0x04, 0xe7, - 0x52, 0xa7, 0x0a, 0x4d, 0x41, 0x7e, 0x87, 0x70, 0x16, 0xa4, 0x84, 0xe9, 0x4f, 0x74, 0x16, 0x86, - 0xf6, 0x9c, 0x56, 0x47, 0xac, 0x52, 0xcc, 0xff, 0x7c, 0x26, 0xf7, 0xaa, 0x65, 0x37, 0x60, 0x6c, - 0xc9, 0x69, 0x3b, 0x1b, 0x6e, 0xcb, 0x8d, 0x5c, 0x12, 0xa2, 0xa7, 0x21, 0xef, 0x34, 0x9b, 0xec, - 0x8a, 0x2c, 0x2d, 0x9e, 0x3b, 0x3c, 0x28, 0xe7, 0x17, 0x9a, 0xf4, 0xac, 0x06, 0x85, 0xb5, 0x8f, - 0x29, 0x06, 0xfa, 0x24, 0x14, 0x9a, 0x81, 0xdf, 0x9e, 0xc9, 0x31, 0x4c, 0x3a, 0x54, 0x85, 0x4a, - 0xe0, 0xb7, 0x13, 0xa8, 0x0c, 0xc7, 0xfe, 0xf5, 0x1c, 0x3c, 0xb1, 0x44, 0xda, 0xdb, 0x2b, 0xf5, - 0x8c, 0x4d, 0x77, 0x15, 0x8a, 0xbb, 0xbe, 0xe7, 0x46, 0x7e, 0x10, 0x8a, 0xa6, 0xd9, 0x6d, 0xb2, - 0x2a, 0xca, 0xb0, 0x82, 0xa2, 0xcb, 0x50, 0x68, 0xc7, 0x9c, 0xc0, 0x98, 0xe4, 0x22, 0x18, 0x0f, - 0xc0, 0x20, 0x14, 0xa3, 0x13, 0x92, 0x40, 0xdc, 0x82, 0x0a, 0xe3, 0x4e, 0x48, 0x02, 0xcc, 0x20, - 0xf1, 0x71, 0x4a, 0x0f, 0x5a, 0xb1, 0xad, 0x12, 0xc7, 0x29, 0x85, 0x60, 0x0d, 0x0b, 0xd5, 0xa0, - 0x14, 0xaa, 0x49, 0x1d, 0x1a, 0x7c, 0x52, 0xc7, 0xd9, 0x79, 0xab, 0x66, 0x32, 0x26, 0x62, 0x1c, - 0x03, 0xc3, 0x7d, 0xcf, 0xdb, 0xaf, 0xe5, 0x00, 0xf1, 0x21, 0xfc, 0x33, 0x36, 0x70, 0x77, 0xba, - 0x07, 0x2e, 0x95, 0xf3, 0xba, 0xe5, 0x37, 0x9c, 0x56, 0xf2, 0x08, 0x3f, 0xa9, 0xd1, 0xfb, 0xdf, - 0x16, 0x3c, 0xb1, 0xe4, 0x7a, 0x4d, 0x12, 0x64, 0x2c, 0xc0, 0x47, 0x23, 0x80, 0x1c, 0xef, 0xa4, - 0x37, 0x96, 0x58, 0xe1, 0x04, 0x96, 0x98, 0xfd, 0x3f, 0x2c, 0x40, 0xfc, 0xb3, 0x3f, 0x72, 0x1f, - 0x7b, 0xa7, 0xfb, 0x63, 0x4f, 0x60, 0x59, 0xd8, 0xb7, 0x60, 0x62, 0xa9, 0xe5, 0x12, 0x2f, 0xaa, - 0xd6, 0x96, 0x7c, 0x6f, 0xd3, 0xdd, 0x42, 0x9f, 0x81, 0x09, 0x2a, 0xd3, 0xfa, 0x9d, 0xa8, 0x4e, - 0x1a, 0xbe, 0xc7, 0xd8, 0x7f, 0x2a, 0x09, 0xa2, 0xc3, 0x83, 0xf2, 0xc4, 0xba, 0x01, 0xc1, 0x09, - 0x4c, 0xfb, 0x77, 0xe9, 0xf8, 0xf9, 0xbb, 0x6d, 0xdf, 0x23, 0x5e, 0xb4, 0xe4, 0x7b, 0x4d, 0x2e, - 0x26, 0x7e, 0x06, 0x0a, 0x11, 0x1d, 0x0f, 0x3e, 0x76, 0x57, 0xe4, 0x46, 0xa1, 0xa3, 0x70, 0x74, - 0x50, 0x3e, 0xdf, 0x5d, 0x83, 0x8d, 0x13, 0xab, 0x83, 0xbe, 0x0d, 0x86, 0xc3, 0xc8, 0x89, 0x3a, - 0xa1, 0x18, 0xcd, 0x27, 0xe5, 0x68, 0xd6, 0x59, 0xe9, 0xd1, 0x41, 0x79, 0x52, 0x55, 0xe3, 0x45, - 0x58, 0x54, 0x40, 0xcf, 0xc0, 0xc8, 0x2e, 0x09, 0x43, 0x67, 0x4b, 0x72, 0xf8, 0x93, 0xa2, 0xee, - 0xc8, 0x2a, 0x2f, 0xc6, 0x12, 0x8e, 0x9e, 0x82, 0x21, 0x12, 0x04, 0x7e, 0x20, 0xf6, 0xe8, 0xb8, - 0x40, 0x1c, 0x5a, 0xa6, 0x85, 0x98, 0xc3, 0xec, 0x7f, 0x63, 0xc1, 0xa4, 0xea, 0x2b, 0x6f, 0xeb, - 0x14, 0x58, 0xb9, 0xb7, 0x01, 0x1a, 0xf2, 0x03, 0x43, 0x76, 0x7b, 0x8c, 0x5e, 0xbb, 0x92, 0xca, - 0xa0, 0x74, 0x0d, 0x63, 0x4c, 0x59, 0x15, 0x85, 0x58, 0xa3, 0x66, 0xff, 0x9a, 0x05, 0x67, 0x12, - 0x5f, 0x74, 0xcb, 0x0d, 0x23, 0xf4, 0x4e, 0xd7, 0x57, 0xcd, 0x0d, 0xf6, 0x55, 0xb4, 0x36, 0xfb, - 0x26, 0xb5, 0x94, 0x65, 0x89, 0xf6, 0x45, 0x37, 0x60, 0xc8, 0x8d, 0xc8, 0xae, 0xfc, 0x98, 0xa7, - 0x7a, 0x7e, 0x0c, 0xef, 0x55, 0x3c, 0x23, 0x55, 0x5a, 0x13, 0x73, 0x02, 0xf6, 0x0f, 0xe5, 0xa1, - 0xc4, 0x97, 0xed, 0xaa, 0xd3, 0x3e, 0x85, 0xb9, 0xa8, 0x42, 0x81, 0x51, 0xe7, 0x1d, 0x7f, 0x3a, - 0xbd, 0xe3, 0xa2, 0x3b, 0x73, 0x54, 0x4e, 0xe3, 0xac, 0xa0, 0xba, 0x1a, 0x68, 0x11, 0x66, 0x24, - 0x90, 0x03, 0xb0, 0xe1, 0x7a, 0x4e, 0xb0, 0x4f, 0xcb, 0x66, 0xf2, 0x8c, 0xe0, 0xf3, 0xbd, 0x09, - 0x2e, 0x2a, 0x7c, 0x4e, 0x56, 0xf5, 0x35, 0x06, 0x60, 0x8d, 0xe8, 0xec, 0x2b, 0x50, 0x52, 0xc8, - 0xc7, 0xe1, 0x71, 0x66, 0x3f, 0x0b, 0x93, 0x89, 0xb6, 0xfa, 0x55, 0x1f, 0xd3, 0x59, 0xa4, 0x5f, - 0x61, 0xa7, 0x80, 0xe8, 0xf5, 0xb2, 0xb7, 0x27, 0x4e, 0xd1, 0xf7, 0xe1, 0x6c, 0x2b, 0xe5, 0x70, - 0x12, 0x53, 0x35, 0xf8, 0x61, 0xf6, 0x84, 0xf8, 0xec, 0xb3, 0x69, 0x50, 0x9c, 0xda, 0x06, 0xbd, - 0xf6, 0xfd, 0x36, 0x5d, 0xf3, 0x4e, 0x8b, 0xf5, 0x57, 0x48, 0xdf, 0xb7, 0x45, 0x19, 0x56, 0x50, - 0x7a, 0x84, 0x9d, 0x55, 0x9d, 0xbf, 0x49, 0xf6, 0xeb, 0xa4, 0x45, 0x1a, 0x91, 0x1f, 0x7c, 0xa8, - 0xdd, 0xbf, 0xc8, 0x47, 0x9f, 0x9f, 0x80, 0xa3, 0x82, 0x40, 0xfe, 0x26, 0xd9, 0xe7, 0x53, 0xa1, - 0x7f, 0x5d, 0xbe, 0xe7, 0xd7, 0xfd, 0x9c, 0x05, 0xe3, 0xea, 0xeb, 0x4e, 0x61, 0xab, 0x2f, 0x9a, - 0x5b, 0xfd, 0x62, 0xcf, 0x05, 0x9e, 0xb1, 0xc9, 0xbf, 0x96, 0x83, 0x0b, 0x0a, 0x87, 0xb2, 0xfb, - 0xfc, 0x8f, 0x58, 0x55, 0xf3, 0x50, 0xf2, 0x94, 0xf6, 0xc0, 0x32, 0xc5, 0xf6, 0x58, 0x77, 0x10, - 0xe3, 0x50, 0xae, 0xcd, 0x8b, 0x45, 0xfc, 0x31, 0x5d, 0xad, 0x26, 0x54, 0x68, 0x8b, 0x90, 0xef, - 0xb8, 0x4d, 0x71, 0x67, 0x7c, 0x4a, 0x8e, 0xf6, 0x9d, 0x6a, 0xe5, 0xe8, 0xa0, 0xfc, 0x64, 0x96, - 0x4a, 0x97, 0x5e, 0x56, 0xe1, 0xdc, 0x9d, 0x6a, 0x05, 0xd3, 0xca, 0x68, 0x01, 0x26, 0xa5, 0xd6, - 0xfa, 0x2e, 0xe5, 0xa0, 0x7c, 0x4f, 0x5c, 0x2d, 0x4a, 0x37, 0x86, 0x4d, 0x30, 0x4e, 0xe2, 0xa3, - 0x0a, 0x4c, 0xed, 0x74, 0x36, 0x48, 0x8b, 0x44, 0xfc, 0x83, 0x6f, 0x12, 0xae, 0x39, 0x2a, 0xc5, - 0xa2, 0xe5, 0xcd, 0x04, 0x1c, 0x77, 0xd5, 0xb0, 0xff, 0x94, 0x1d, 0xf1, 0x62, 0xf4, 0x6a, 0x81, - 0x4f, 0x17, 0x16, 0xa5, 0xfe, 0x61, 0x2e, 0xe7, 0x41, 0x56, 0xc5, 0x4d, 0xb2, 0xbf, 0xee, 0x53, - 0x66, 0x3b, 0x7d, 0x55, 0x18, 0x6b, 0xbe, 0xd0, 0x73, 0xcd, 0xff, 0x42, 0x0e, 0xce, 0xa9, 0x11, - 0x30, 0xf8, 0xba, 0x3f, 0xeb, 0x63, 0xf0, 0x02, 0x8c, 0x36, 0xc9, 0xa6, 0xd3, 0x69, 0x45, 0x4a, - 0x8d, 0x39, 0xc4, 0x55, 0xd9, 0x95, 0xb8, 0x18, 0xeb, 0x38, 0xc7, 0x18, 0xb6, 0x9f, 0x1c, 0x65, - 0x77, 0x6b, 0xe4, 0xd0, 0x35, 0xae, 0x76, 0x8d, 0x95, 0xb9, 0x6b, 0x9e, 0x82, 0x21, 0x77, 0x97, - 0xf2, 0x5a, 0x39, 0x93, 0x85, 0xaa, 0xd2, 0x42, 0xcc, 0x61, 0xe8, 0x13, 0x30, 0xd2, 0xf0, 0x77, - 0x77, 0x1d, 0xaf, 0xc9, 0xae, 0xbc, 0xd2, 0xe2, 0x28, 0x65, 0xc7, 0x96, 0x78, 0x11, 0x96, 0x30, - 0xf4, 0x04, 0x14, 0x9c, 0x60, 0x2b, 0x9c, 0x29, 0x30, 0x9c, 0x22, 0x6d, 0x69, 0x21, 0xd8, 0x0a, - 0x31, 0x2b, 0xa5, 0x52, 0xd5, 0x7d, 0x3f, 0xd8, 0x71, 0xbd, 0xad, 0x8a, 0x1b, 0x88, 0x2d, 0xa1, - 0xee, 0xc2, 0x7b, 0x0a, 0x82, 0x35, 0x2c, 0xb4, 0x02, 0x43, 0x6d, 0x3f, 0x88, 0xc2, 0x99, 0x61, - 0x36, 0xdc, 0x4f, 0x66, 0x1c, 0x44, 0xfc, 0x6b, 0x6b, 0x7e, 0x10, 0xc5, 0x1f, 0x40, 0xff, 0x85, - 0x98, 0x57, 0x47, 0xdf, 0x06, 0x79, 0xe2, 0xed, 0xcd, 0x8c, 0x30, 0x2a, 0xb3, 0x69, 0x54, 0x96, - 0xbd, 0xbd, 0xbb, 0x4e, 0x10, 0x9f, 0xd2, 0xcb, 0xde, 0x1e, 0xa6, 0x75, 0xd0, 0xe7, 0xa1, 0x24, - 0xb7, 0x78, 0x28, 0xd4, 0x1c, 0xa9, 0x4b, 0x4c, 0x1e, 0x0c, 0x98, 0xbc, 0xd7, 0x71, 0x03, 0xb2, - 0x4b, 0xbc, 0x28, 0x8c, 0xcf, 0x34, 0x09, 0x0d, 0x71, 0x4c, 0x0d, 0x7d, 0x5e, 0xea, 0xd6, 0x56, - 0xfd, 0x8e, 0x17, 0x85, 0x33, 0x25, 0xd6, 0xbd, 0xd4, 0x57, 0x8f, 0xbb, 0x31, 0x5e, 0x52, 0xf9, - 0xc6, 0x2b, 0x63, 0x83, 0x14, 0xc2, 0x30, 0xde, 0x72, 0xf7, 0x88, 0x47, 0xc2, 0xb0, 0x16, 0xf8, - 0x1b, 0x64, 0x06, 0x58, 0xcf, 0x2f, 0xa4, 0x3f, 0x06, 0xf8, 0x1b, 0x64, 0x71, 0xfa, 0xf0, 0xa0, - 0x3c, 0x7e, 0x4b, 0xaf, 0x83, 0x4d, 0x12, 0xe8, 0x0e, 0x4c, 0x50, 0xb9, 0xc6, 0x8d, 0x89, 0x8e, - 0xf6, 0x23, 0xca, 0xa4, 0x0f, 0x6c, 0x54, 0xc2, 0x09, 0x22, 0xe8, 0x4d, 0x28, 0xb5, 0xdc, 0x4d, - 0xd2, 0xd8, 0x6f, 0xb4, 0xc8, 0xcc, 0x18, 0xa3, 0x98, 0xba, 0xad, 0x6e, 0x49, 0x24, 0x2e, 0x17, - 0xa9, 0xbf, 0x38, 0xae, 0x8e, 0xee, 0xc2, 0xf9, 0x88, 0x04, 0xbb, 0xae, 0xe7, 0xd0, 0xed, 0x20, - 0xe4, 0x05, 0xf6, 0xa4, 0x32, 0xce, 0xd6, 0xdb, 0x25, 0x31, 0x74, 0xe7, 0xd7, 0x53, 0xb1, 0x70, - 0x46, 0x6d, 0x74, 0x1b, 0x26, 0xd9, 0x4e, 0xa8, 0x75, 0x5a, 0xad, 0x9a, 0xdf, 0x72, 0x1b, 0xfb, - 0x33, 0x13, 0x8c, 0xe0, 0x27, 0xe4, 0xbd, 0x50, 0x35, 0xc1, 0x47, 0x07, 0x65, 0x88, 0xff, 0xe1, - 0x64, 0x6d, 0xb4, 0xc1, 0x74, 0xe8, 0x9d, 0xc0, 0x8d, 0xf6, 0xe9, 0xfa, 0x25, 0x0f, 0xa2, 0x99, - 0xc9, 0x9e, 0xa2, 0xb0, 0x8e, 0xaa, 0x14, 0xed, 0x7a, 0x21, 0x4e, 0x12, 0xa4, 0x5b, 0x3b, 0x8c, - 0x9a, 0xae, 0x37, 0x33, 0xc5, 0x4e, 0x0c, 0xb5, 0x33, 0xea, 0xb4, 0x10, 0x73, 0x18, 0xd3, 0x9f, - 0xd3, 0x1f, 0xb7, 0xe9, 0x09, 0x3a, 0xcd, 0x10, 0x63, 0xfd, 0xb9, 0x04, 0xe0, 0x18, 0x87, 0x32, - 0x35, 0x51, 0xb4, 0x3f, 0x83, 0x18, 0xaa, 0xda, 0x2e, 0xeb, 0xeb, 0x9f, 0xc7, 0xb4, 0x1c, 0xdd, - 0x82, 0x11, 0xe2, 0xed, 0xad, 0x04, 0xfe, 0xee, 0xcc, 0x99, 0xec, 0x3d, 0xbb, 0xcc, 0x51, 0xf8, - 0x81, 0x1e, 0x0b, 0x78, 0xa2, 0x18, 0x4b, 0x12, 0xe8, 0x01, 0xcc, 0xa4, 0xcc, 0x08, 0x9f, 0x80, - 0xb3, 0x6c, 0x02, 0x5e, 0x17, 0x75, 0x67, 0xd6, 0x33, 0xf0, 0x8e, 0x7a, 0xc0, 0x70, 0x26, 0x75, - 0xf4, 0x05, 0x18, 0xe7, 0x1b, 0x8a, 0x3f, 0xbe, 0x85, 0x33, 0xe7, 0xd8, 0xd7, 0x5c, 0xce, 0xde, - 0x9c, 0x1c, 0x71, 0xf1, 0x9c, 0xe8, 0xd0, 0xb8, 0x5e, 0x1a, 0x62, 0x93, 0x9a, 0xbd, 0x01, 0x13, - 0xea, 0xdc, 0x62, 0x4b, 0x07, 0x95, 0x61, 0x88, 0x71, 0x3b, 0x42, 0xbf, 0x55, 0xa2, 0x33, 0xc5, - 0x38, 0x21, 0xcc, 0xcb, 0xd9, 0x4c, 0xb9, 0xef, 0x93, 0xc5, 0xfd, 0x88, 0x70, 0xa9, 0x3a, 0xaf, - 0xcd, 0x94, 0x04, 0xe0, 0x18, 0xc7, 0xfe, 0x7f, 0x9c, 0x6b, 0x8c, 0x0f, 0xc7, 0x01, 0xae, 0x83, - 0xe7, 0xa0, 0xb8, 0xed, 0x87, 0x11, 0xc5, 0x66, 0x6d, 0x0c, 0xc5, 0x7c, 0xe2, 0x0d, 0x51, 0x8e, - 0x15, 0x06, 0x7a, 0x0d, 0xc6, 0x1b, 0x7a, 0x03, 0xe2, 0x2e, 0x53, 0x43, 0x60, 0xb4, 0x8e, 0x4d, - 0x5c, 0xf4, 0x2a, 0x14, 0xd9, 0xd3, 0x79, 0xc3, 0x6f, 0x09, 0x26, 0x4b, 0x5e, 0xc8, 0xc5, 0x9a, - 0x28, 0x3f, 0xd2, 0x7e, 0x63, 0x85, 0x8d, 0xae, 0xc0, 0x30, 0xed, 0x42, 0xb5, 0x26, 0x6e, 0x11, - 0xa5, 0xaa, 0xb9, 0xc1, 0x4a, 0xb1, 0x80, 0xda, 0x7f, 0x2d, 0xa7, 0x8d, 0x32, 0x95, 0x48, 0x09, - 0xaa, 0xc1, 0xc8, 0x7d, 0xc7, 0x8d, 0x5c, 0x6f, 0x4b, 0xb0, 0x0b, 0xcf, 0xf4, 0xbc, 0x52, 0x58, - 0xa5, 0x7b, 0xbc, 0x02, 0xbf, 0xf4, 0xc4, 0x1f, 0x2c, 0xc9, 0x50, 0x8a, 0x41, 0xc7, 0xf3, 0x28, - 0xc5, 0xdc, 0xa0, 0x14, 0x31, 0xaf, 0xc0, 0x29, 0x8a, 0x3f, 0x58, 0x92, 0x41, 0xef, 0x00, 0xc8, - 0x65, 0x49, 0x9a, 0xe2, 0xc9, 0xfa, 0xb9, 0xfe, 0x44, 0xd7, 0x55, 0x9d, 0xc5, 0x09, 0x7a, 0xa5, - 0xc6, 0xff, 0xb1, 0x46, 0xcf, 0x8e, 0x18, 0x5b, 0xd5, 0xdd, 0x19, 0xf4, 0x9d, 0xf4, 0x24, 0x70, - 0x82, 0x88, 0x34, 0x17, 0x22, 0x31, 0x38, 0x9f, 0x1c, 0x4c, 0xa6, 0x58, 0x77, 0x77, 0x89, 0x7e, - 0x6a, 0x08, 0x22, 0x38, 0xa6, 0x67, 0xff, 0x52, 0x1e, 0x66, 0xb2, 0xba, 0x4b, 0x17, 0x1d, 0x79, - 0xe0, 0x46, 0x4b, 0x94, 0x1b, 0xb2, 0xcc, 0x45, 0xb7, 0x2c, 0xca, 0xb1, 0xc2, 0xa0, 0xb3, 0x1f, - 0xba, 0x5b, 0x52, 0x24, 0x1c, 0x8a, 0x67, 0xbf, 0xce, 0x4a, 0xb1, 0x80, 0x52, 0xbc, 0x80, 0x38, - 0xa1, 0xb0, 0x89, 0xd0, 0x56, 0x09, 0x66, 0xa5, 0x58, 0x40, 0x75, 0x7d, 0x53, 0xa1, 0x8f, 0xbe, - 0xc9, 0x18, 0xa2, 0xa1, 0x93, 0x1d, 0x22, 0xf4, 0x45, 0x80, 0x4d, 0xd7, 0x73, 0xc3, 0x6d, 0x46, - 0x7d, 0xf8, 0xd8, 0xd4, 0x15, 0x2f, 0xb5, 0xa2, 0xa8, 0x60, 0x8d, 0x22, 0x7a, 0x19, 0x46, 0xd5, - 0x06, 0xac, 0x56, 0xd8, 0x03, 0x91, 0xf6, 0xe0, 0x1e, 0x9f, 0x46, 0x15, 0xac, 0xe3, 0xd9, 0xef, - 0x26, 0xd7, 0x8b, 0xd8, 0x01, 0xda, 0xf8, 0x5a, 0x83, 0x8e, 0x6f, 0xae, 0xf7, 0xf8, 0xda, 0xbf, - 0x91, 0x87, 0x49, 0xa3, 0xb1, 0x4e, 0x38, 0xc0, 0x99, 0x75, 0x9d, 0xde, 0x73, 0x4e, 0x44, 0xc4, - 0xfe, 0xb3, 0xfb, 0x6f, 0x15, 0xfd, 0x2e, 0xa4, 0x3b, 0x80, 0xd7, 0x47, 0x5f, 0x84, 0x52, 0xcb, - 0x09, 0x99, 0xee, 0x8a, 0x88, 0x7d, 0x37, 0x08, 0xb1, 0x58, 0x8e, 0x70, 0xc2, 0x48, 0xbb, 0x6a, - 0x38, 0xed, 0x98, 0x24, 0xbd, 0x90, 0x29, 0xef, 0x23, 0x8d, 0x6e, 0x54, 0x27, 0x28, 0x83, 0xb4, - 0x8f, 0x39, 0x0c, 0xbd, 0x0a, 0x63, 0x01, 0x61, 0xab, 0x62, 0x89, 0xb2, 0x72, 0x6c, 0x99, 0x0d, - 0xc5, 0x3c, 0x1f, 0xd6, 0x60, 0xd8, 0xc0, 0x8c, 0x59, 0xf9, 0xe1, 0x1e, 0xac, 0xfc, 0x33, 0x30, - 0xc2, 0x7e, 0xa8, 0x15, 0xa0, 0x66, 0xa3, 0xca, 0x8b, 0xb1, 0x84, 0x27, 0x17, 0x4c, 0x71, 0xc0, - 0x05, 0xf3, 0x49, 0x98, 0xa8, 0x38, 0x64, 0xd7, 0xf7, 0x96, 0xbd, 0x66, 0xdb, 0x77, 0xbd, 0x08, - 0xcd, 0x40, 0x81, 0xdd, 0x0e, 0x7c, 0x6f, 0x17, 0x28, 0x05, 0x5c, 0xa0, 0x8c, 0xb9, 0xbd, 0x05, - 0xe7, 0x2a, 0xfe, 0x7d, 0xef, 0xbe, 0x13, 0x34, 0x17, 0x6a, 0x55, 0x4d, 0xce, 0x5d, 0x93, 0x72, - 0x16, 0x37, 0x62, 0x49, 0x3d, 0x53, 0xb5, 0x9a, 0xfc, 0xae, 0x5d, 0x71, 0x5b, 0x24, 0x43, 0x1b, - 0xf1, 0x37, 0x72, 0x46, 0x4b, 0x31, 0xbe, 0x7a, 0x30, 0xb2, 0x32, 0x1f, 0x8c, 0xde, 0x82, 0xe2, - 0xa6, 0x4b, 0x5a, 0x4d, 0x4c, 0x36, 0xc5, 0x12, 0x7b, 0x3a, 0xfb, 0x5d, 0x7e, 0x85, 0x62, 0x4a, - 0xed, 0x13, 0x97, 0xd2, 0x56, 0x44, 0x65, 0xac, 0xc8, 0xa0, 0x1d, 0x98, 0x92, 0x62, 0x80, 0x84, - 0x8a, 0x05, 0xf7, 0x4c, 0x2f, 0xd9, 0xc2, 0x24, 0x7e, 0xf6, 0xf0, 0xa0, 0x3c, 0x85, 0x13, 0x64, - 0x70, 0x17, 0x61, 0x2a, 0x96, 0xed, 0xd2, 0xa3, 0xb5, 0xc0, 0x86, 0x9f, 0x89, 0x65, 0x4c, 0xc2, - 0x64, 0xa5, 0xf6, 0x8f, 0x59, 0xf0, 0x58, 0xd7, 0xc8, 0x08, 0x49, 0xfb, 0x84, 0x67, 0x21, 0x29, - 0xf9, 0xe6, 0xfa, 0x4b, 0xbe, 0xf6, 0xdf, 0xb7, 0xe0, 0xec, 0xf2, 0x6e, 0x3b, 0xda, 0xaf, 0xb8, - 0xe6, 0xeb, 0xce, 0x2b, 0x30, 0xbc, 0x4b, 0x9a, 0x6e, 0x67, 0x57, 0xcc, 0x5c, 0x59, 0x1e, 0x3f, - 0xab, 0xac, 0xf4, 0xe8, 0xa0, 0x3c, 0x5e, 0x8f, 0xfc, 0xc0, 0xd9, 0x22, 0xbc, 0x00, 0x0b, 0x74, - 0x76, 0x88, 0xbb, 0xef, 0x93, 0x5b, 0xee, 0xae, 0x2b, 0xed, 0x2c, 0x7a, 0xea, 0xce, 0xe6, 0xe4, - 0x80, 0xce, 0xbd, 0xd5, 0x71, 0xbc, 0xc8, 0x8d, 0xf6, 0xc5, 0xc3, 0x8c, 0x24, 0x82, 0x63, 0x7a, - 0xf6, 0x37, 0x2c, 0x98, 0x94, 0xeb, 0x7e, 0xa1, 0xd9, 0x0c, 0x48, 0x18, 0xa2, 0x59, 0xc8, 0xb9, - 0x6d, 0xd1, 0x4b, 0x10, 0xbd, 0xcc, 0x55, 0x6b, 0x38, 0xe7, 0xb6, 0x51, 0x0d, 0x4a, 0xdc, 0x5c, - 0x23, 0x5e, 0x5c, 0x03, 0x19, 0x7d, 0xb0, 0x1e, 0xac, 0xcb, 0x9a, 0x38, 0x26, 0x22, 0x39, 0x38, - 0x76, 0x66, 0xe6, 0xcd, 0x57, 0xaf, 0x1b, 0xa2, 0x1c, 0x2b, 0x0c, 0x74, 0x15, 0x8a, 0x9e, 0xdf, - 0xe4, 0xd6, 0x33, 0xfc, 0xf6, 0x63, 0x4b, 0x76, 0x4d, 0x94, 0x61, 0x05, 0xb5, 0x7f, 0xd0, 0x82, - 0x31, 0xf9, 0x65, 0x03, 0x32, 0x93, 0x74, 0x6b, 0xc5, 0x8c, 0x64, 0xbc, 0xb5, 0x28, 0x33, 0xc8, - 0x20, 0x06, 0x0f, 0x98, 0x3f, 0x0e, 0x0f, 0x68, 0xff, 0x68, 0x0e, 0x26, 0x64, 0x77, 0xea, 0x9d, - 0x8d, 0x90, 0x44, 0x68, 0x1d, 0x4a, 0x0e, 0x1f, 0x72, 0x22, 0x57, 0xec, 0x53, 0xe9, 0xc2, 0x87, - 0x31, 0x3f, 0xf1, 0xb5, 0xbc, 0x20, 0x6b, 0xe3, 0x98, 0x10, 0x6a, 0xc1, 0xb4, 0xe7, 0x47, 0xec, - 0x88, 0x56, 0xf0, 0x5e, 0x4f, 0x20, 0x49, 0xea, 0x17, 0x04, 0xf5, 0xe9, 0xb5, 0x24, 0x15, 0xdc, - 0x4d, 0x18, 0x2d, 0x4b, 0x85, 0x47, 0x3e, 0x5b, 0xdc, 0xd0, 0x67, 0x21, 0x5d, 0xdf, 0x61, 0xff, - 0xaa, 0x05, 0x25, 0x89, 0x76, 0x1a, 0xaf, 0x5d, 0xab, 0x30, 0x12, 0xb2, 0x49, 0x90, 0x43, 0x63, - 0xf7, 0xea, 0x38, 0x9f, 0xaf, 0xf8, 0xe6, 0xe1, 0xff, 0x43, 0x2c, 0x69, 0x30, 0x7d, 0xb7, 0xea, - 0xfe, 0x47, 0x44, 0xdf, 0xad, 0xfa, 0x93, 0x71, 0xc3, 0xfc, 0x57, 0xd6, 0x67, 0x4d, 0xac, 0xa5, - 0x0c, 0x52, 0x3b, 0x20, 0x9b, 0xee, 0x83, 0x24, 0x83, 0x54, 0x63, 0xa5, 0x58, 0x40, 0xd1, 0x3b, - 0x30, 0xd6, 0x90, 0x8a, 0xce, 0xf8, 0x18, 0xb8, 0xd2, 0x53, 0xe9, 0xae, 0xde, 0x67, 0xb8, 0x65, - 0xed, 0x92, 0x56, 0x1f, 0x1b, 0xd4, 0xcc, 0xe7, 0xf6, 0x7c, 0xbf, 0xe7, 0xf6, 0x98, 0x6e, 0xf6, - 0xe3, 0xf3, 0x8f, 0x5b, 0x30, 0xcc, 0xd5, 0x65, 0x83, 0xe9, 0x17, 0xb5, 0xe7, 0xaa, 0x78, 0xec, - 0xee, 0xd2, 0x42, 0xf1, 0xfc, 0x84, 0x56, 0xa1, 0xc4, 0x7e, 0x30, 0xb5, 0x41, 0x3e, 0xdb, 0xa4, - 0x98, 0xb7, 0xaa, 0x77, 0xf0, 0xae, 0xac, 0x86, 0x63, 0x0a, 0xf6, 0x0f, 0xe7, 0xe9, 0x51, 0x15, - 0xa3, 0x1a, 0x37, 0xb8, 0xf5, 0xe8, 0x6e, 0xf0, 0xdc, 0xa3, 0xba, 0xc1, 0xb7, 0x60, 0xb2, 0xa1, - 0x3d, 0x6e, 0xc5, 0x33, 0x79, 0xb5, 0xe7, 0x22, 0xd1, 0xde, 0xc1, 0xb8, 0xca, 0x68, 0xc9, 0x24, - 0x82, 0x93, 0x54, 0xd1, 0x77, 0xc2, 0x18, 0x9f, 0x67, 0xd1, 0x0a, 0xb7, 0x58, 0xf8, 0x44, 0xf6, - 0x7a, 0xd1, 0x9b, 0x60, 0x2b, 0xb1, 0xae, 0x55, 0xc7, 0x06, 0x31, 0xfb, 0x97, 0x8a, 0x30, 0xb4, - 0xbc, 0x47, 0xbc, 0xe8, 0x14, 0x0e, 0xa4, 0x06, 0x4c, 0xb8, 0xde, 0x9e, 0xdf, 0xda, 0x23, 0x4d, - 0x0e, 0x3f, 0xce, 0xe5, 0x7a, 0x5e, 0x90, 0x9e, 0xa8, 0x1a, 0x24, 0x70, 0x82, 0xe4, 0xa3, 0x90, - 0x30, 0xaf, 0xc3, 0x30, 0x9f, 0x7b, 0x21, 0x5e, 0xa6, 0x2a, 0x83, 0xd9, 0x20, 0x8a, 0x5d, 0x10, - 0x4b, 0xbf, 0x5c, 0xfb, 0x2c, 0xaa, 0xa3, 0x77, 0x61, 0x62, 0xd3, 0x0d, 0xc2, 0x88, 0x8a, 0x86, - 0x61, 0xe4, 0xec, 0xb6, 0x1f, 0x42, 0xa2, 0x54, 0xe3, 0xb0, 0x62, 0x50, 0xc2, 0x09, 0xca, 0x68, - 0x0b, 0xc6, 0xa9, 0x90, 0x13, 0x37, 0x35, 0x72, 0xec, 0xa6, 0x94, 0xca, 0xe8, 0x96, 0x4e, 0x08, - 0x9b, 0x74, 0xe9, 0x61, 0xd2, 0x60, 0x42, 0x51, 0x91, 0x71, 0x14, 0xea, 0x30, 0xe1, 0xd2, 0x10, - 0x87, 0xd1, 0x33, 0x89, 0x99, 0xad, 0x94, 0xcc, 0x33, 0x49, 0x33, 0x4e, 0xf9, 0x12, 0x94, 0x08, - 0x1d, 0x42, 0x4a, 0x58, 0x28, 0xc6, 0xe7, 0x07, 0xeb, 0xeb, 0xaa, 0xdb, 0x08, 0x7c, 0x53, 0x96, - 0x5f, 0x96, 0x94, 0x70, 0x4c, 0x14, 0x2d, 0xc1, 0x70, 0x48, 0x02, 0x97, 0x84, 0x42, 0x45, 0xde, - 0x63, 0x1a, 0x19, 0x1a, 0xb7, 0x3d, 0xe7, 0xbf, 0xb1, 0xa8, 0x4a, 0x97, 0x97, 0xc3, 0xa4, 0x21, - 0xa6, 0x15, 0xd7, 0x96, 0xd7, 0x02, 0x2b, 0xc5, 0x02, 0x8a, 0xde, 0x84, 0x91, 0x80, 0xb4, 0x98, - 0xb2, 0x68, 0x7c, 0xf0, 0x45, 0xce, 0x75, 0x4f, 0xbc, 0x1e, 0x96, 0x04, 0xd0, 0x4d, 0x40, 0x01, - 0xa1, 0x3c, 0x84, 0xeb, 0x6d, 0x29, 0x63, 0x0e, 0xa1, 0xeb, 0x7e, 0x5c, 0xb4, 0x7f, 0x06, 0xc7, - 0x18, 0xd2, 0x2a, 0x15, 0xa7, 0x54, 0x43, 0xd7, 0x61, 0x5a, 0x95, 0x56, 0xbd, 0x30, 0x72, 0xbc, - 0x06, 0x61, 0x6a, 0xee, 0x52, 0xcc, 0x15, 0xe1, 0x24, 0x02, 0xee, 0xae, 0x63, 0xff, 0x0c, 0x65, - 0x67, 0xe8, 0x68, 0x9d, 0x02, 0x2f, 0xf0, 0x86, 0xc9, 0x0b, 0x5c, 0xc8, 0x9c, 0xb9, 0x0c, 0x3e, - 0xe0, 0xd0, 0x82, 0x51, 0x6d, 0x66, 0xe3, 0x35, 0x6b, 0xf5, 0x58, 0xb3, 0x1d, 0x98, 0xa2, 0x2b, - 0xfd, 0xf6, 0x46, 0x48, 0x82, 0x3d, 0xd2, 0x64, 0x0b, 0x33, 0xf7, 0x70, 0x0b, 0x53, 0xbd, 0x32, - 0xdf, 0x4a, 0x10, 0xc4, 0x5d, 0x4d, 0xa0, 0x57, 0xa4, 0xe6, 0x24, 0x6f, 0x18, 0x69, 0x71, 0xad, - 0xc8, 0xd1, 0x41, 0x79, 0x4a, 0xfb, 0x10, 0x5d, 0x53, 0x62, 0x7f, 0x49, 0x7e, 0xa3, 0x7a, 0xcd, - 0x6f, 0xa8, 0xc5, 0x92, 0x78, 0xcd, 0x57, 0xcb, 0x01, 0xc7, 0x38, 0x74, 0x8f, 0x52, 0x11, 0x24, - 0xf9, 0x9a, 0x4f, 0x05, 0x14, 0xcc, 0x20, 0xf6, 0x8b, 0x00, 0xcb, 0x0f, 0x48, 0x83, 0x2f, 0x75, - 0xfd, 0x01, 0xd2, 0xca, 0x7e, 0x80, 0xb4, 0xff, 0x9d, 0x05, 0x13, 0x2b, 0x4b, 0x86, 0x98, 0x38, - 0x07, 0xc0, 0x65, 0xa3, 0x7b, 0xf7, 0xd6, 0xa4, 0x6e, 0x9d, 0xab, 0x47, 0x55, 0x29, 0xd6, 0x30, - 0xd0, 0x05, 0xc8, 0xb7, 0x3a, 0x9e, 0x10, 0x59, 0x46, 0x0e, 0x0f, 0xca, 0xf9, 0x5b, 0x1d, 0x0f, - 0xd3, 0x32, 0xcd, 0x42, 0x30, 0x3f, 0xb0, 0x85, 0x60, 0x5f, 0xf7, 0x2a, 0x54, 0x86, 0xa1, 0xfb, - 0xf7, 0xdd, 0x26, 0x37, 0x62, 0x17, 0x7a, 0xff, 0x7b, 0xf7, 0xaa, 0x95, 0x10, 0xf3, 0x72, 0xfb, - 0xab, 0x79, 0x98, 0x5d, 0x69, 0x91, 0x07, 0x1f, 0xd0, 0x90, 0x7f, 0x50, 0xfb, 0xc6, 0xe3, 0xf1, - 0x8b, 0xc7, 0xb5, 0x61, 0xed, 0x3f, 0x1e, 0x9b, 0x30, 0xc2, 0x1f, 0xb3, 0xa5, 0x59, 0xff, 0x6b, - 0x69, 0xad, 0x67, 0x0f, 0xc8, 0x1c, 0x7f, 0x14, 0x17, 0xe6, 0xfc, 0xea, 0xa6, 0x15, 0xa5, 0x58, - 0x12, 0x9f, 0xfd, 0x0c, 0x8c, 0xe9, 0x98, 0xc7, 0xb2, 0x26, 0xff, 0x0b, 0x79, 0x98, 0xa2, 0x3d, - 0x78, 0xa4, 0x13, 0x71, 0xa7, 0x7b, 0x22, 0x4e, 0xda, 0xa2, 0xb8, 0xff, 0x6c, 0xbc, 0x93, 0x9c, - 0x8d, 0x17, 0xb2, 0x66, 0xe3, 0xb4, 0xe7, 0xe0, 0x7b, 0x2d, 0x38, 0xb3, 0xd2, 0xf2, 0x1b, 0x3b, - 0x09, 0xab, 0xdf, 0x97, 0x61, 0x94, 0x9e, 0xe3, 0xa1, 0xe1, 0x45, 0x64, 0xf8, 0x95, 0x09, 0x10, - 0xd6, 0xf1, 0xb4, 0x6a, 0x77, 0xee, 0x54, 0x2b, 0x69, 0xee, 0x68, 0x02, 0x84, 0x75, 0x3c, 0xfb, - 0xeb, 0x16, 0x5c, 0xbc, 0xbe, 0xb4, 0x1c, 0x2f, 0xc5, 0x2e, 0x8f, 0x38, 0x2a, 0x05, 0x36, 0xb5, - 0xae, 0xc4, 0x52, 0x60, 0x85, 0xf5, 0x42, 0x40, 0x3f, 0x2a, 0xde, 0x9e, 0x3f, 0x6d, 0xc1, 0x99, - 0xeb, 0x6e, 0x44, 0xaf, 0xe5, 0xa4, 0x6f, 0x16, 0xbd, 0x97, 0x43, 0x37, 0xf2, 0x83, 0xfd, 0xa4, - 0x6f, 0x16, 0x56, 0x10, 0xac, 0x61, 0xf1, 0x96, 0xf7, 0x5c, 0x66, 0x46, 0x95, 0x33, 0x55, 0x51, - 0x58, 0x94, 0x63, 0x85, 0x41, 0x3f, 0xac, 0xe9, 0x06, 0x4c, 0x94, 0xd8, 0x17, 0x27, 0xac, 0xfa, - 0xb0, 0x8a, 0x04, 0xe0, 0x18, 0xc7, 0xfe, 0x31, 0x0b, 0xce, 0x5d, 0x6f, 0x75, 0xc2, 0x88, 0x04, - 0x9b, 0xa1, 0xd1, 0xd9, 0x17, 0xa1, 0x44, 0xa4, 0xb8, 0x2e, 0xfa, 0xaa, 0x18, 0x4c, 0x25, 0xc7, - 0x73, 0xc7, 0x30, 0x85, 0x37, 0x80, 0xe7, 0xc0, 0xf1, 0x5c, 0xc7, 0x7e, 0x3e, 0x07, 0xe3, 0x37, - 0xd6, 0xd7, 0x6b, 0xd7, 0x49, 0x24, 0x6e, 0xb1, 0xfe, 0xaa, 0x66, 0xac, 0x69, 0xcc, 0x7a, 0x09, - 0x45, 0x9d, 0xc8, 0x6d, 0xcd, 0x71, 0x4f, 0xe4, 0xb9, 0xaa, 0x17, 0xdd, 0x0e, 0xea, 0x51, 0xe0, - 0x7a, 0x5b, 0xa9, 0x3a, 0x36, 0x79, 0xd7, 0xe6, 0xb3, 0xee, 0x5a, 0xf4, 0x22, 0x0c, 0x33, 0x57, - 0x68, 0x29, 0x9e, 0x3c, 0xae, 0x64, 0x0a, 0x56, 0x7a, 0x74, 0x50, 0x2e, 0xdd, 0xc1, 0x55, 0xfe, - 0x07, 0x0b, 0x54, 0x74, 0x07, 0x46, 0xb7, 0xa3, 0xa8, 0x7d, 0x83, 0x38, 0x4d, 0x12, 0xc8, 0xd3, - 0xe1, 0x52, 0xda, 0xe9, 0x40, 0x07, 0x81, 0xa3, 0xc5, 0x1b, 0x2a, 0x2e, 0x0b, 0xb1, 0x4e, 0xc7, - 0xae, 0x03, 0xc4, 0xb0, 0x13, 0xd2, 0x2f, 0xd8, 0x7f, 0x60, 0xc1, 0x08, 0xf7, 0x4a, 0x0b, 0xd0, - 0xeb, 0x50, 0x20, 0x0f, 0x48, 0x43, 0x70, 0x8e, 0xa9, 0x1d, 0x8e, 0x19, 0x0f, 0xae, 0x2d, 0xa7, - 0xff, 0x31, 0xab, 0x85, 0x6e, 0xc0, 0x08, 0xed, 0xed, 0x75, 0xe5, 0xa2, 0xf7, 0x64, 0xd6, 0x17, - 0xab, 0x69, 0xe7, 0xbc, 0x8a, 0x28, 0xc2, 0xb2, 0x3a, 0xd3, 0xfc, 0x36, 0xda, 0x75, 0x7a, 0x80, - 0x45, 0xbd, 0xee, 0xd9, 0xf5, 0xa5, 0x1a, 0x47, 0x12, 0xd4, 0xb8, 0xe6, 0x57, 0x16, 0xe2, 0x98, - 0x88, 0xbd, 0x0e, 0x25, 0x3a, 0xa9, 0x0b, 0x2d, 0xd7, 0xe9, 0xad, 0x74, 0x7e, 0x16, 0x4a, 0x52, - 0x01, 0x1c, 0x0a, 0xc7, 0x26, 0x46, 0x55, 0xea, 0x87, 0x43, 0x1c, 0xc3, 0xed, 0x4d, 0x38, 0xcb, - 0x5e, 0xfe, 0x9d, 0x68, 0xdb, 0xd8, 0x63, 0xfd, 0x17, 0xf3, 0x73, 0x42, 0x10, 0xe3, 0x33, 0x33, - 0xa3, 0xf9, 0x0e, 0x8c, 0x49, 0x8a, 0xb1, 0x50, 0x66, 0xff, 0x61, 0x01, 0x1e, 0xaf, 0xd6, 0xb3, - 0x1d, 0x16, 0x5f, 0x85, 0x31, 0xce, 0xa6, 0xd1, 0xa5, 0xed, 0xb4, 0x44, 0xbb, 0xea, 0x5d, 0x6c, - 0x5d, 0x83, 0x61, 0x03, 0x13, 0x5d, 0x84, 0xbc, 0xfb, 0x9e, 0x97, 0x34, 0xc3, 0xad, 0xbe, 0xb5, - 0x86, 0x69, 0x39, 0x05, 0x53, 0x8e, 0x8f, 0x1f, 0xa5, 0x0a, 0xac, 0xb8, 0xbe, 0x37, 0x60, 0xc2, - 0x0d, 0x1b, 0xa1, 0x5b, 0xf5, 0xe8, 0x39, 0x13, 0x3b, 0xbb, 0xc6, 0x4a, 0x02, 0xda, 0x69, 0x05, - 0xc5, 0x09, 0x6c, 0xed, 0x5c, 0x1f, 0x1a, 0x98, 0x6b, 0xec, 0xeb, 0xe9, 0x43, 0x19, 0xe2, 0x36, - 0xfb, 0xba, 0x90, 0x19, 0xb5, 0x09, 0x86, 0x98, 0x7f, 0x70, 0x88, 0x25, 0x8c, 0x4a, 0x60, 0x8d, - 0x6d, 0xa7, 0xbd, 0xd0, 0x89, 0xb6, 0x2b, 0x6e, 0xd8, 0xf0, 0xf7, 0x48, 0xb0, 0xcf, 0x84, 0xe7, - 0x62, 0x2c, 0x81, 0x29, 0xc0, 0xd2, 0x8d, 0x85, 0x1a, 0xc5, 0xc4, 0xdd, 0x75, 0x4c, 0xae, 0x10, - 0x4e, 0x82, 0x2b, 0x5c, 0x80, 0x49, 0xd9, 0x4c, 0x9d, 0x84, 0xec, 0x8e, 0x18, 0x65, 0x1d, 0x53, - 0xa6, 0xb6, 0xa2, 0x58, 0x75, 0x2b, 0x89, 0x8f, 0x5e, 0x81, 0x71, 0xd7, 0x73, 0x23, 0xd7, 0x89, - 0xfc, 0x80, 0xdd, 0xb0, 0x5c, 0x4e, 0x66, 0x96, 0x6c, 0x55, 0x1d, 0x80, 0x4d, 0x3c, 0xfb, 0x3f, - 0x15, 0x60, 0x9a, 0x4d, 0xdb, 0xb7, 0x56, 0xd8, 0x47, 0x66, 0x85, 0xdd, 0xe9, 0x5e, 0x61, 0x27, - 0xc1, 0xee, 0x7e, 0x98, 0xcb, 0xec, 0x5d, 0x28, 0x29, 0x5b, 0x60, 0xe9, 0x0c, 0x60, 0x65, 0x38, - 0x03, 0xf4, 0xe7, 0x3e, 0xe4, 0x33, 0x6e, 0x3e, 0xf5, 0x19, 0xf7, 0x6f, 0x59, 0x10, 0x9b, 0x44, - 0xa2, 0x1b, 0x50, 0x6a, 0xfb, 0xcc, 0xec, 0x20, 0x90, 0xb6, 0x3c, 0x8f, 0xa7, 0x5e, 0x54, 0xfc, - 0x52, 0xe4, 0xe3, 0x57, 0x93, 0x35, 0x70, 0x5c, 0x19, 0x2d, 0xc2, 0x48, 0x3b, 0x20, 0xf5, 0x88, - 0xb9, 0xc0, 0xf6, 0xa5, 0xc3, 0xd7, 0x08, 0xc7, 0xc7, 0xb2, 0xa2, 0xfd, 0x0b, 0x16, 0x00, 0x7f, - 0x29, 0x75, 0xbc, 0x2d, 0x72, 0x0a, 0xda, 0xdf, 0x0a, 0x14, 0xc2, 0x36, 0x69, 0xf4, 0x32, 0x08, - 0x89, 0xfb, 0x53, 0x6f, 0x93, 0x46, 0x3c, 0xe0, 0xf4, 0x1f, 0x66, 0xb5, 0xed, 0xef, 0x03, 0x98, - 0x88, 0xd1, 0xaa, 0x11, 0xd9, 0x45, 0xcf, 0x1b, 0x2e, 0x71, 0x17, 0x12, 0x2e, 0x71, 0x25, 0x86, - 0xad, 0x29, 0x1a, 0xdf, 0x85, 0xfc, 0xae, 0xf3, 0x40, 0x68, 0x92, 0x9e, 0xed, 0xdd, 0x0d, 0x4a, - 0x7f, 0x6e, 0xd5, 0x79, 0xc0, 0x65, 0xa6, 0x67, 0xe5, 0x02, 0x59, 0x75, 0x1e, 0x1c, 0x71, 0xb3, - 0x0f, 0x76, 0x48, 0xdd, 0x72, 0xc3, 0xe8, 0xcb, 0xff, 0x31, 0xfe, 0xcf, 0x96, 0x1d, 0x6d, 0x84, - 0xb5, 0xe5, 0x7a, 0xe2, 0xdd, 0x70, 0xa0, 0xb6, 0x5c, 0x2f, 0xd9, 0x96, 0xeb, 0x0d, 0xd0, 0x96, - 0xeb, 0xa1, 0xf7, 0x61, 0x44, 0xbc, 0xd1, 0x33, 0x5b, 0x6f, 0x53, 0x4b, 0x95, 0xd5, 0x9e, 0x78, - 0xe2, 0xe7, 0x6d, 0xce, 0x4b, 0x99, 0x50, 0x94, 0xf6, 0x6d, 0x57, 0x36, 0x88, 0xfe, 0xba, 0x05, - 0x13, 0xe2, 0x37, 0x26, 0xef, 0x75, 0x48, 0x18, 0x09, 0xde, 0xf3, 0xd3, 0x83, 0xf7, 0x41, 0x54, - 0xe4, 0x5d, 0xf9, 0xb4, 0x3c, 0x66, 0x4d, 0x60, 0xdf, 0x1e, 0x25, 0x7a, 0x81, 0xfe, 0xa1, 0x05, - 0x67, 0x77, 0x9d, 0x07, 0xbc, 0x45, 0x5e, 0x86, 0x9d, 0xc8, 0xf5, 0x85, 0xed, 0xfa, 0xeb, 0x83, - 0x4d, 0x7f, 0x57, 0x75, 0xde, 0x49, 0x69, 0xe6, 0x7a, 0x36, 0x0d, 0xa5, 0x6f, 0x57, 0x53, 0xfb, - 0x35, 0xbb, 0x09, 0x45, 0xb9, 0xde, 0x52, 0x24, 0xef, 0x8a, 0xce, 0x58, 0x1f, 0xdb, 0x44, 0x42, - 0xf7, 0x4b, 0xa3, 0xed, 0x88, 0xb5, 0xf6, 0x48, 0xdb, 0x79, 0x17, 0xc6, 0xf4, 0x35, 0xf6, 0x48, - 0xdb, 0x7a, 0x0f, 0xce, 0xa4, 0xac, 0xa5, 0x47, 0xda, 0xe4, 0x7d, 0xb8, 0x90, 0xb9, 0x3e, 0x1e, - 0x65, 0xc3, 0xf6, 0xcf, 0x5b, 0xfa, 0x39, 0x78, 0x0a, 0x2a, 0xf8, 0x25, 0x53, 0x05, 0x7f, 0xa9, - 0xf7, 0xce, 0xc9, 0xd0, 0xc3, 0xbf, 0xa3, 0x77, 0x9a, 0x9e, 0xea, 0xe8, 0x4d, 0x18, 0x6e, 0xd1, - 0x12, 0x69, 0x1c, 0x62, 0xf7, 0xdf, 0x91, 0x31, 0x2f, 0xc5, 0xca, 0x43, 0x2c, 0x28, 0xd8, 0xbf, - 0x6c, 0x41, 0xe1, 0x14, 0x46, 0x02, 0x9b, 0x23, 0xf1, 0x7c, 0x26, 0x69, 0x11, 0xd2, 0x6c, 0x0e, - 0x3b, 0xf7, 0x97, 0x1f, 0x44, 0xc4, 0x0b, 0x99, 0xa8, 0x98, 0x3a, 0x30, 0xdf, 0x05, 0x67, 0x6e, - 0xf9, 0x4e, 0x73, 0xd1, 0x69, 0x39, 0x5e, 0x83, 0x04, 0x55, 0x6f, 0xab, 0xaf, 0x95, 0x92, 0x6e, - 0x53, 0x94, 0xeb, 0x67, 0x53, 0x64, 0x6f, 0x03, 0xd2, 0x1b, 0x10, 0x76, 0x9c, 0x18, 0x46, 0x5c, - 0xde, 0x94, 0x18, 0xfe, 0xa7, 0xd3, 0xb9, 0xbb, 0xae, 0x9e, 0x69, 0x16, 0x8a, 0xbc, 0x00, 0x4b, - 0x42, 0xf6, 0xab, 0x90, 0xea, 0xbb, 0xd5, 0x5f, 0x6d, 0x60, 0x7f, 0x1e, 0xa6, 0x59, 0xcd, 0x63, - 0x8a, 0xb4, 0x76, 0x42, 0x49, 0x97, 0x12, 0x32, 0xca, 0xfe, 0x8a, 0x05, 0x93, 0x6b, 0x89, 0xf8, - 0x15, 0x57, 0xd8, 0x7b, 0x60, 0x8a, 0x6e, 0xb8, 0xce, 0x4a, 0xb1, 0x80, 0x9e, 0xb8, 0x0e, 0xea, - 0x4f, 0x2d, 0x88, 0xdd, 0x29, 0x4f, 0x81, 0xf1, 0x5a, 0x32, 0x18, 0xaf, 0x54, 0xdd, 0x88, 0xea, - 0x4e, 0x16, 0xdf, 0x85, 0x6e, 0xaa, 0xd8, 0x01, 0x3d, 0xd4, 0x22, 0x31, 0x19, 0xee, 0x69, 0x3e, - 0x61, 0x06, 0x18, 0x90, 0xd1, 0x04, 0x98, 0x29, 0x91, 0xc2, 0xfd, 0x88, 0x98, 0x12, 0xa9, 0xfe, - 0x64, 0xec, 0xd0, 0x9a, 0xd6, 0x65, 0x76, 0x72, 0x7d, 0x3b, 0x33, 0x0d, 0x77, 0x5a, 0xee, 0xfb, - 0x44, 0x05, 0x40, 0x29, 0x0b, 0x53, 0x6f, 0x51, 0x7a, 0x74, 0x50, 0x1e, 0x57, 0xff, 0x78, 0x94, - 0xac, 0xb8, 0x8a, 0x7d, 0x03, 0x26, 0x13, 0x03, 0x86, 0x5e, 0x86, 0xa1, 0xf6, 0xb6, 0x13, 0x92, - 0x84, 0xf9, 0xe4, 0x50, 0x8d, 0x16, 0x1e, 0x1d, 0x94, 0x27, 0x54, 0x05, 0x56, 0x82, 0x39, 0xb6, - 0xfd, 0x3f, 0x2d, 0x28, 0xac, 0xf9, 0xcd, 0xd3, 0x58, 0x4c, 0x6f, 0x18, 0x8b, 0xe9, 0x89, 0xac, - 0x18, 0x83, 0x99, 0xeb, 0x68, 0x25, 0xb1, 0x8e, 0x2e, 0x65, 0x52, 0xe8, 0xbd, 0x84, 0x76, 0x61, - 0x94, 0x45, 0x2e, 0x14, 0xe6, 0x9c, 0x2f, 0x1a, 0x32, 0x40, 0x39, 0x21, 0x03, 0x4c, 0x6a, 0xa8, - 0x9a, 0x24, 0xf0, 0x0c, 0x8c, 0x08, 0x93, 0xc2, 0xa4, 0x11, 0xbc, 0xc0, 0xc5, 0x12, 0x6e, 0xff, - 0x78, 0x1e, 0x8c, 0x48, 0x89, 0xe8, 0x57, 0x2d, 0x98, 0x0b, 0xb8, 0x57, 0x61, 0xb3, 0xd2, 0x09, - 0x5c, 0x6f, 0xab, 0xde, 0xd8, 0x26, 0xcd, 0x4e, 0xcb, 0xf5, 0xb6, 0xaa, 0x5b, 0x9e, 0xaf, 0x8a, - 0x97, 0x1f, 0x90, 0x46, 0x87, 0xbd, 0x0b, 0xf4, 0x09, 0xcb, 0xa8, 0x4c, 0x76, 0xae, 0x1d, 0x1e, - 0x94, 0xe7, 0xf0, 0xb1, 0x68, 0xe3, 0x63, 0xf6, 0x05, 0x7d, 0xdd, 0x82, 0x79, 0x1e, 0x40, 0x70, - 0xf0, 0xfe, 0xf7, 0x90, 0x98, 0x6a, 0x92, 0x54, 0x4c, 0x64, 0x9d, 0x04, 0xbb, 0x8b, 0xaf, 0x88, - 0x01, 0x9d, 0xaf, 0x1d, 0xaf, 0x2d, 0x7c, 0xdc, 0xce, 0xd9, 0xff, 0x22, 0x0f, 0xe3, 0xc2, 0xa1, - 0x5d, 0x44, 0x4a, 0x79, 0xd9, 0x58, 0x12, 0x4f, 0x26, 0x96, 0xc4, 0xb4, 0x81, 0x7c, 0x32, 0x41, - 0x52, 0x42, 0x98, 0x6e, 0x39, 0x61, 0x74, 0x83, 0x38, 0x41, 0xb4, 0x41, 0x1c, 0x6e, 0xca, 0x92, - 0x3f, 0xb6, 0xd9, 0x8d, 0x52, 0xd1, 0xdc, 0x4a, 0x12, 0xc3, 0xdd, 0xf4, 0xd1, 0x1e, 0x20, 0x66, - 0x8f, 0x13, 0x38, 0x5e, 0xc8, 0xbf, 0xc5, 0x15, 0x6f, 0x06, 0xc7, 0x6b, 0x75, 0x56, 0xb4, 0x8a, - 0x6e, 0x75, 0x51, 0xc3, 0x29, 0x2d, 0x68, 0x76, 0x56, 0x43, 0x83, 0xda, 0x59, 0x0d, 0xf7, 0xf1, - 0x34, 0xf1, 0x60, 0xaa, 0x2b, 0x26, 0xc1, 0xdb, 0x50, 0x52, 0xf6, 0x70, 0xe2, 0xd0, 0xe9, 0x1d, - 0xda, 0x23, 0x49, 0x81, 0xab, 0x51, 0x62, 0x5b, 0xcc, 0x98, 0x9c, 0xfd, 0x8f, 0x72, 0x46, 0x83, - 0x7c, 0x12, 0xd7, 0xa0, 0xe8, 0x84, 0xa1, 0xbb, 0xe5, 0x91, 0xa6, 0xd8, 0xb1, 0x1f, 0xcf, 0xda, - 0xb1, 0x46, 0x33, 0xcc, 0x26, 0x71, 0x41, 0xd4, 0xc4, 0x8a, 0x06, 0xba, 0xc1, 0x0d, 0x86, 0xf6, - 0x24, 0xcf, 0x3f, 0x18, 0x35, 0x90, 0x26, 0x45, 0x7b, 0x04, 0x8b, 0xfa, 0xe8, 0x0b, 0xdc, 0xa2, - 0xeb, 0xa6, 0xe7, 0xdf, 0xf7, 0xae, 0xfb, 0xbe, 0xf4, 0x42, 0x1b, 0x8c, 0xe0, 0xb4, 0xb4, 0xe3, - 0x52, 0xd5, 0xb1, 0x49, 0x6d, 0xb0, 0xb8, 0x3d, 0xdf, 0x0d, 0x67, 0x28, 0x69, 0xd3, 0x97, 0x24, - 0x44, 0x04, 0x26, 0x45, 0xb4, 0x04, 0x59, 0x26, 0xc6, 0x2e, 0x95, 0x9d, 0x37, 0x6b, 0xc7, 0x4a, - 0xbf, 0x9b, 0x26, 0x09, 0x9c, 0xa4, 0x69, 0xff, 0x94, 0x05, 0xcc, 0x0a, 0xfe, 0x14, 0x58, 0x86, - 0xcf, 0x9a, 0x2c, 0xc3, 0x4c, 0xd6, 0x20, 0x67, 0x70, 0x0b, 0x2f, 0xf1, 0x95, 0x55, 0x0b, 0xfc, - 0x07, 0xfb, 0xe2, 0x35, 0xbd, 0x3f, 0x27, 0x6b, 0xff, 0x5f, 0x8b, 0x1f, 0x62, 0xca, 0x31, 0x1d, - 0x7d, 0x0f, 0x14, 0x1b, 0x4e, 0xdb, 0x69, 0xf0, 0xb0, 0xbe, 0x99, 0x5a, 0x1d, 0xa3, 0xd2, 0xdc, - 0x92, 0xa8, 0xc1, 0xb5, 0x14, 0x32, 0xea, 0x46, 0x51, 0x16, 0xf7, 0xd5, 0x4c, 0xa8, 0x26, 0x67, - 0x77, 0x60, 0xdc, 0x20, 0xf6, 0x48, 0x45, 0xda, 0xef, 0xe1, 0x57, 0xac, 0x8a, 0x12, 0xb3, 0x0b, - 0xd3, 0x9e, 0xf6, 0x9f, 0x5e, 0x28, 0x52, 0x4c, 0xf9, 0x78, 0xbf, 0x4b, 0x94, 0xdd, 0x3e, 0x9a, - 0x95, 0x7f, 0x82, 0x0c, 0xee, 0xa6, 0x6c, 0xff, 0x84, 0x05, 0x8f, 0xe9, 0x88, 0x5a, 0xcc, 0x80, - 0x7e, 0x7a, 0xe2, 0x0a, 0x14, 0xfd, 0x36, 0x09, 0x9c, 0xc8, 0x0f, 0xc4, 0xad, 0x71, 0x55, 0x0e, - 0xfa, 0x6d, 0x51, 0x7e, 0x24, 0xe2, 0x2b, 0x4a, 0xea, 0xb2, 0x1c, 0xab, 0x9a, 0x54, 0x8e, 0x61, - 0x83, 0x11, 0x8a, 0x78, 0x0e, 0xec, 0x0c, 0x60, 0x4f, 0xa6, 0x21, 0x16, 0x10, 0xfb, 0x0f, 0x2d, - 0xbe, 0xb0, 0xf4, 0xae, 0xa3, 0xf7, 0x60, 0x6a, 0xd7, 0x89, 0x1a, 0xdb, 0xcb, 0x0f, 0xda, 0x01, - 0x57, 0x8f, 0xcb, 0x71, 0x7a, 0xb6, 0xdf, 0x38, 0x69, 0x1f, 0x19, 0x1b, 0xa9, 0xad, 0x26, 0x88, - 0xe1, 0x2e, 0xf2, 0x68, 0x03, 0x46, 0x59, 0x19, 0xb3, 0x86, 0x0e, 0x7b, 0xb1, 0x06, 0x59, 0xad, - 0xa9, 0x57, 0xe7, 0xd5, 0x98, 0x0e, 0xd6, 0x89, 0xda, 0x5f, 0xce, 0xf3, 0xdd, 0xce, 0xb8, 0xed, - 0x67, 0x60, 0xa4, 0xed, 0x37, 0x97, 0xaa, 0x15, 0x2c, 0x66, 0x41, 0x5d, 0x23, 0x35, 0x5e, 0x8c, - 0x25, 0x1c, 0xbd, 0x06, 0x40, 0x1e, 0x44, 0x24, 0xf0, 0x9c, 0x96, 0x32, 0x1a, 0x51, 0x66, 0x92, - 0x15, 0x7f, 0xcd, 0x8f, 0xee, 0x84, 0xe4, 0xbb, 0x96, 0x15, 0x0a, 0xd6, 0xd0, 0xd1, 0x35, 0x80, - 0x76, 0xe0, 0xef, 0xb9, 0x4d, 0xe6, 0x5e, 0x97, 0x37, 0x4d, 0x2a, 0x6a, 0x0a, 0x82, 0x35, 0x2c, - 0xf4, 0x1a, 0x8c, 0x77, 0xbc, 0x90, 0x73, 0x28, 0xce, 0x86, 0x88, 0x4e, 0x58, 0x8c, 0xad, 0x1b, - 0xee, 0xe8, 0x40, 0x6c, 0xe2, 0xa2, 0x05, 0x18, 0x8e, 0x1c, 0x66, 0x13, 0x31, 0x94, 0x6d, 0xdb, - 0xb8, 0x4e, 0x31, 0xf4, 0xa0, 0xb2, 0xb4, 0x02, 0x16, 0x15, 0xd1, 0xdb, 0xd2, 0x57, 0x81, 0x9f, - 0xf5, 0xc2, 0xa8, 0x78, 0xb0, 0x7b, 0x41, 0xf3, 0x54, 0x10, 0xc6, 0xca, 0x06, 0x2d, 0xfb, 0xeb, - 0x25, 0x80, 0x98, 0x1d, 0x47, 0xef, 0x77, 0x9d, 0x47, 0xcf, 0xf5, 0x66, 0xe0, 0x4f, 0xee, 0x30, - 0x42, 0xdf, 0x6f, 0xc1, 0xa8, 0xd3, 0x6a, 0xf9, 0x0d, 0x27, 0x62, 0xa3, 0x9c, 0xeb, 0x7d, 0x1e, - 0x8a, 0xf6, 0x17, 0xe2, 0x1a, 0xbc, 0x0b, 0x2f, 0xca, 0x85, 0xa7, 0x41, 0xfa, 0xf6, 0x42, 0x6f, - 0x18, 0x7d, 0x4a, 0x4a, 0x69, 0x7c, 0x79, 0xcc, 0x26, 0xa5, 0xb4, 0x12, 0x3b, 0xfa, 0x35, 0x01, - 0x0d, 0xdd, 0x31, 0x02, 0xcf, 0x15, 0xb2, 0x63, 0x30, 0x18, 0x5c, 0x69, 0xbf, 0x98, 0x73, 0xa8, - 0xa6, 0x3b, 0x57, 0x0d, 0x65, 0x07, 0x2a, 0xd1, 0xc4, 0x9f, 0x3e, 0x8e, 0x55, 0xef, 0xc2, 0x64, - 0xd3, 0xbc, 0xdb, 0xc5, 0x6a, 0x7a, 0x3a, 0x8b, 0x6e, 0x82, 0x15, 0x88, 0x6f, 0xf3, 0x04, 0x00, - 0x27, 0x09, 0xa3, 0x1a, 0x77, 0x73, 0xab, 0x7a, 0x9b, 0xbe, 0x30, 0x4e, 0xb7, 0x33, 0xe7, 0x72, - 0x3f, 0x8c, 0xc8, 0x2e, 0xc5, 0x8c, 0x2f, 0xed, 0x35, 0x51, 0x17, 0x2b, 0x2a, 0xe8, 0x4d, 0x18, - 0x66, 0x7e, 0xb2, 0xe1, 0x4c, 0x31, 0x5b, 0x99, 0x68, 0x86, 0x78, 0x88, 0x37, 0x15, 0xfb, 0x1b, - 0x62, 0x41, 0x01, 0xdd, 0x90, 0x71, 0x60, 0xc2, 0xaa, 0x77, 0x27, 0x24, 0x2c, 0x0e, 0x4c, 0x69, - 0xf1, 0xe3, 0x71, 0x88, 0x17, 0x5e, 0x9e, 0x1a, 0x3e, 0xde, 0xa8, 0x49, 0x99, 0x23, 0xf1, 0x5f, - 0x46, 0xa5, 0x9f, 0x81, 0xec, 0xee, 0x99, 0x91, 0xeb, 0xe3, 0xe1, 0xbc, 0x6b, 0x92, 0xc0, 0x49, - 0x9a, 0x94, 0xd1, 0xe4, 0x3b, 0x57, 0x98, 0xb7, 0xf7, 0xdb, 0xff, 0x5c, 0xbe, 0x66, 0x97, 0x0c, - 0x2f, 0xc1, 0xa2, 0xfe, 0xa9, 0xde, 0xfa, 0xb3, 0x1e, 0x4c, 0x25, 0xb7, 0xe8, 0x23, 0xe5, 0x32, - 0xfe, 0xa0, 0x00, 0x13, 0xe6, 0x92, 0x42, 0xf3, 0x50, 0x12, 0x44, 0x54, 0x50, 0x52, 0xb5, 0x4b, - 0x56, 0x25, 0x00, 0xc7, 0x38, 0x2c, 0x16, 0x2d, 0xab, 0xae, 0x99, 0x25, 0xc6, 0xb1, 0x68, 0x15, - 0x04, 0x6b, 0x58, 0x54, 0x5e, 0xda, 0xf0, 0xfd, 0x48, 0x5d, 0x2a, 0x6a, 0xdd, 0x2d, 0xb2, 0x52, - 0x2c, 0xa0, 0xf4, 0x32, 0xd9, 0x21, 0x81, 0x47, 0x5a, 0x66, 0xac, 0x33, 0x75, 0x99, 0xdc, 0xd4, - 0x81, 0xd8, 0xc4, 0xa5, 0xb7, 0xa4, 0x1f, 0xb2, 0x85, 0x2c, 0xa4, 0xb2, 0xd8, 0xcc, 0xb3, 0xce, - 0x3d, 0xce, 0x25, 0x1c, 0x7d, 0x1e, 0x1e, 0x53, 0x0e, 0xe2, 0x98, 0x2b, 0xaa, 0x65, 0x8b, 0xc3, - 0x86, 0x12, 0xe5, 0xb1, 0xa5, 0x74, 0x34, 0x9c, 0x55, 0x1f, 0xbd, 0x01, 0x13, 0x82, 0x73, 0x97, - 0x14, 0x47, 0x4c, 0xdb, 0x89, 0x9b, 0x06, 0x14, 0x27, 0xb0, 0x65, 0xb4, 0x36, 0xc6, 0x3c, 0x4b, - 0x0a, 0xc5, 0xee, 0x68, 0x6d, 0x3a, 0x1c, 0x77, 0xd5, 0x40, 0x0b, 0x30, 0xc9, 0x59, 0x2b, 0xd7, - 0xdb, 0xe2, 0x73, 0x22, 0xbc, 0x4f, 0xd4, 0x96, 0xba, 0x6d, 0x82, 0x71, 0x12, 0x1f, 0xbd, 0x0a, - 0x63, 0x4e, 0xd0, 0xd8, 0x76, 0x23, 0xd2, 0x88, 0x3a, 0x01, 0x77, 0x4b, 0xd1, 0x8c, 0x4f, 0x16, - 0x34, 0x18, 0x36, 0x30, 0xed, 0xf7, 0xe1, 0x4c, 0x8a, 0xe3, 0x1a, 0x5d, 0x38, 0x4e, 0xdb, 0x95, - 0xdf, 0x94, 0x30, 0xd8, 0x5c, 0xa8, 0x55, 0xe5, 0xd7, 0x68, 0x58, 0x74, 0x75, 0x32, 0x07, 0x37, - 0x2d, 0x09, 0x85, 0x5a, 0x9d, 0x2b, 0x12, 0x80, 0x63, 0x1c, 0xfb, 0x7f, 0xe5, 0x60, 0x32, 0x45, - 0xf9, 0xce, 0x12, 0x21, 0x24, 0x64, 0x8f, 0x38, 0xef, 0x81, 0x19, 0xfc, 0x2f, 0x77, 0x8c, 0xe0, - 0x7f, 0xf9, 0x7e, 0xc1, 0xff, 0x0a, 0x1f, 0x24, 0xf8, 0x9f, 0x39, 0x62, 0x43, 0x03, 0x8d, 0x58, - 0x4a, 0xc0, 0xc0, 0xe1, 0x63, 0x06, 0x0c, 0x34, 0x06, 0x7d, 0x64, 0x80, 0x41, 0xff, 0xe1, 0x1c, - 0x4c, 0x25, 0x8d, 0xe4, 0x4e, 0x41, 0x1d, 0xfb, 0xa6, 0xa1, 0x8e, 0x4d, 0x4f, 0x2b, 0x92, 0x34, - 0xdd, 0xcb, 0x52, 0xcd, 0xe2, 0x84, 0x6a, 0xf6, 0x93, 0x03, 0x51, 0xeb, 0xad, 0xa6, 0xfd, 0x3b, - 0x39, 0x38, 0x97, 0xac, 0xb2, 0xd4, 0x72, 0xdc, 0xdd, 0x53, 0x18, 0x9b, 0xdb, 0xc6, 0xd8, 0x3c, - 0x3f, 0xc8, 0xd7, 0xb0, 0xae, 0x65, 0x0e, 0xd0, 0xbd, 0xc4, 0x00, 0xcd, 0x0f, 0x4e, 0xb2, 0xf7, - 0x28, 0x7d, 0x23, 0x0f, 0x97, 0x52, 0xeb, 0xc5, 0xda, 0xcc, 0x15, 0x43, 0x9b, 0x79, 0x2d, 0xa1, - 0xcd, 0xb4, 0x7b, 0xd7, 0x3e, 0x19, 0xf5, 0xa6, 0xf0, 0x28, 0x64, 0x01, 0xe2, 0x1e, 0x52, 0xb5, - 0x69, 0x78, 0x14, 0x2a, 0x42, 0xd8, 0xa4, 0xfb, 0xcd, 0xa4, 0xd2, 0xfc, 0x57, 0x16, 0x5c, 0x48, - 0x9d, 0x9b, 0x53, 0x50, 0x61, 0xad, 0x99, 0x2a, 0xac, 0x67, 0x06, 0x5e, 0xad, 0x19, 0x3a, 0xad, - 0xdf, 0x2c, 0x64, 0x7c, 0x0b, 0x13, 0xd0, 0x6f, 0xc3, 0xa8, 0xd3, 0x68, 0x90, 0x30, 0x5c, 0xf5, - 0x9b, 0x2a, 0x60, 0xda, 0xf3, 0x4c, 0xce, 0x8a, 0x8b, 0x8f, 0x0e, 0xca, 0xb3, 0x49, 0x12, 0x31, - 0x18, 0xeb, 0x14, 0xcc, 0x18, 0x8f, 0xb9, 0x13, 0x8d, 0xf1, 0x78, 0x0d, 0x60, 0x4f, 0x71, 0xeb, - 0x49, 0x21, 0x5f, 0xe3, 0xe3, 0x35, 0x2c, 0xf4, 0x05, 0x28, 0x86, 0xe2, 0x1a, 0x17, 0x4b, 0xf1, - 0xc5, 0x01, 0xe7, 0xca, 0xd9, 0x20, 0x2d, 0xd3, 0x75, 0x5d, 0xe9, 0x43, 0x14, 0x49, 0xf4, 0x1d, - 0x30, 0x15, 0xf2, 0xc8, 0x28, 0x4b, 0x2d, 0x27, 0x64, 0x7e, 0x10, 0x62, 0x15, 0x32, 0x7f, 0xf4, - 0x7a, 0x02, 0x86, 0xbb, 0xb0, 0xd1, 0x8a, 0xfc, 0x28, 0x16, 0xc6, 0x85, 0x2f, 0xcc, 0x2b, 0xf1, - 0x07, 0x89, 0x34, 0x4c, 0x67, 0x93, 0xc3, 0xcf, 0x06, 0x5e, 0xab, 0x89, 0xbe, 0x00, 0x40, 0x97, - 0x8f, 0xd0, 0x25, 0x8c, 0x64, 0x1f, 0x9e, 0xf4, 0x54, 0x69, 0xa6, 0x5a, 0x7e, 0x32, 0x5f, 0xbe, - 0x8a, 0x22, 0x82, 0x35, 0x82, 0xf6, 0x0f, 0x17, 0xe0, 0xf1, 0x1e, 0x67, 0x24, 0x5a, 0x30, 0x9f, - 0x40, 0x9f, 0x4d, 0x0a, 0xd7, 0xb3, 0xa9, 0x95, 0x0d, 0x69, 0x3b, 0xb1, 0x14, 0x73, 0x1f, 0x78, - 0x29, 0xfe, 0x80, 0xa5, 0xa9, 0x3d, 0xb8, 0x31, 0xdf, 0x67, 0x8f, 0x79, 0xf6, 0x9f, 0xa0, 0x1e, - 0x64, 0x33, 0x45, 0x99, 0x70, 0x6d, 0xe0, 0xee, 0x0c, 0xac, 0x5d, 0x38, 0x5d, 0xe5, 0xef, 0x97, - 0x2d, 0x78, 0x32, 0xb5, 0xbf, 0x86, 0xc9, 0xc6, 0x3c, 0x94, 0x1a, 0xb4, 0x50, 0x73, 0xdd, 0x8a, - 0x7d, 0x5a, 0x25, 0x00, 0xc7, 0x38, 0x86, 0x65, 0x46, 0xae, 0xaf, 0x65, 0xc6, 0x3f, 0xb7, 0xa0, - 0x6b, 0x7f, 0x9c, 0xc2, 0x41, 0x5d, 0x35, 0x0f, 0xea, 0x8f, 0x0f, 0x32, 0x97, 0x19, 0x67, 0xf4, - 0x7f, 0x9e, 0x84, 0xf3, 0x19, 0xbe, 0x1a, 0x7b, 0x30, 0xbd, 0xd5, 0x20, 0xa6, 0x53, 0x9c, 0xf8, - 0x98, 0x54, 0xff, 0xc1, 0x9e, 0x1e, 0x74, 0x2c, 0x3d, 0xcf, 0x74, 0x17, 0x0a, 0xee, 0x6e, 0x02, - 0x7d, 0xd9, 0x82, 0xb3, 0xce, 0xfd, 0xb0, 0x2b, 0x09, 0xa3, 0x58, 0x33, 0x2f, 0xa5, 0x2a, 0x41, - 0xfa, 0x24, 0x6d, 0xe4, 0xf9, 0x8a, 0xd2, 0xb0, 0x70, 0x6a, 0x5b, 0x08, 0x8b, 0x10, 0x9a, 0x94, - 0x9d, 0xef, 0xe1, 0xb6, 0x99, 0xe6, 0x54, 0xc3, 0x8f, 0x6c, 0x09, 0xc1, 0x8a, 0x0e, 0xba, 0x0b, - 0xa5, 0x2d, 0xe9, 0xe9, 0x26, 0xae, 0x84, 0xd4, 0x3b, 0x36, 0xd5, 0x1d, 0x8e, 0x3f, 0x4b, 0x2a, - 0x10, 0x8e, 0x49, 0xa1, 0x37, 0x20, 0xef, 0x6d, 0x86, 0xbd, 0x12, 0xfd, 0x24, 0x2c, 0x99, 0xb8, - 0x4b, 0xf4, 0xda, 0x4a, 0x1d, 0xd3, 0x8a, 0xe8, 0x06, 0xe4, 0x83, 0x8d, 0xa6, 0xd0, 0xdb, 0xa5, - 0x9e, 0xdc, 0x78, 0xb1, 0x92, 0xbe, 0x48, 0x38, 0x25, 0xbc, 0x58, 0xc1, 0x94, 0x04, 0xaa, 0xc1, - 0x10, 0x73, 0x6b, 0x10, 0xb7, 0x40, 0x2a, 0xbf, 0xdb, 0xc3, 0x3d, 0x88, 0xfb, 0x4d, 0x33, 0x04, - 0xcc, 0x09, 0xa1, 0x75, 0x18, 0x6e, 0xb0, 0xa4, 0x30, 0x22, 0x6a, 0xf3, 0xa7, 0x52, 0x35, 0x74, - 0x3d, 0xb2, 0xe5, 0x08, 0x85, 0x15, 0xc3, 0xc0, 0x82, 0x16, 0xa3, 0x4a, 0xda, 0xdb, 0x9b, 0x21, - 0x93, 0xf0, 0xb3, 0xa8, 0xf6, 0x48, 0x02, 0x25, 0xa8, 0x32, 0x0c, 0x2c, 0x68, 0xa1, 0xcf, 0x40, - 0x6e, 0xb3, 0x21, 0xbc, 0x1e, 0x52, 0x55, 0x75, 0xa6, 0x57, 0xfb, 0xe2, 0xf0, 0xe1, 0x41, 0x39, - 0xb7, 0xb2, 0x84, 0x73, 0x9b, 0x0d, 0xb4, 0x06, 0x23, 0x9b, 0xdc, 0x0f, 0x56, 0x68, 0xe3, 0x9e, - 0x4e, 0x77, 0xd1, 0xed, 0x72, 0x95, 0xe5, 0xd6, 0xfa, 0x02, 0x80, 0x25, 0x11, 0x16, 0x87, 0x52, - 0xf9, 0xf3, 0x8a, 0x80, 0xcc, 0x73, 0xc7, 0xf3, 0xc1, 0xe6, 0xb7, 0x72, 0xec, 0x15, 0x8c, 0x35, - 0x8a, 0xe8, 0x4b, 0x50, 0x72, 0x64, 0xfa, 0x3f, 0x11, 0xb0, 0xe2, 0xc5, 0xd4, 0x8d, 0xd9, 0x3b, - 0x33, 0x22, 0x5f, 0xd5, 0x0a, 0x09, 0xc7, 0x44, 0xd1, 0x0e, 0x8c, 0xef, 0x85, 0xed, 0x6d, 0x22, - 0x37, 0x32, 0x8b, 0x5f, 0x91, 0x71, 0x71, 0xdd, 0x15, 0x88, 0x6e, 0x10, 0x75, 0x9c, 0x56, 0xd7, - 0xd9, 0xc3, 0xde, 0xb2, 0xef, 0xea, 0xc4, 0xb0, 0x49, 0x9b, 0x0e, 0xff, 0x7b, 0x1d, 0x7f, 0x63, - 0x3f, 0x22, 0x22, 0x82, 0x73, 0xea, 0xf0, 0xbf, 0xc5, 0x51, 0xba, 0x87, 0x5f, 0x00, 0xb0, 0x24, - 0x42, 0xb7, 0xba, 0x23, 0x53, 0x6b, 0xb2, 0xc8, 0xcd, 0x19, 0x5b, 0x3d, 0x35, 0xff, 0xa6, 0x36, - 0x28, 0xec, 0x8c, 0x8c, 0x49, 0xb1, 0xb3, 0xb1, 0xbd, 0xed, 0x47, 0xbe, 0x97, 0x38, 0x97, 0xa7, - 0xb3, 0xcf, 0xc6, 0x5a, 0x0a, 0x7e, 0xf7, 0xd9, 0x98, 0x86, 0x85, 0x53, 0xdb, 0x42, 0x4d, 0x98, - 0x68, 0xfb, 0x41, 0x74, 0xdf, 0x0f, 0xe4, 0xfa, 0x42, 0x3d, 0xb4, 0x09, 0x06, 0xa6, 0x68, 0x91, - 0x45, 0x14, 0x37, 0x21, 0x38, 0x41, 0x13, 0x7d, 0x0e, 0x46, 0xc2, 0x86, 0xd3, 0x22, 0xd5, 0xdb, - 0x33, 0x67, 0xb2, 0x2f, 0x9d, 0x3a, 0x47, 0xc9, 0x58, 0x5d, 0x6c, 0x72, 0x04, 0x0a, 0x96, 0xe4, - 0xd0, 0x0a, 0x0c, 0xb1, 0xb4, 0x00, 0x2c, 0xf8, 0x74, 0x46, 0x60, 0xa4, 0x2e, 0xbb, 0x52, 0x7e, - 0x36, 0xb1, 0x62, 0xcc, 0xab, 0xd3, 0x3d, 0x20, 0x98, 0x6a, 0x3f, 0x9c, 0x39, 0x97, 0xbd, 0x07, - 0x04, 0x2f, 0x7e, 0xbb, 0xde, 0x6b, 0x0f, 0x28, 0x24, 0x1c, 0x13, 0xa5, 0x27, 0x33, 0x3d, 0x4d, - 0xcf, 0xf7, 0x30, 0x63, 0xc9, 0x3c, 0x4b, 0xd9, 0xc9, 0x4c, 0x4f, 0x52, 0x4a, 0xc2, 0xfe, 0xbd, - 0x91, 0x6e, 0x4e, 0x85, 0x89, 0x61, 0x7f, 0xd1, 0xea, 0x7a, 0xa1, 0xfb, 0xf4, 0xa0, 0x5a, 0xa1, - 0x13, 0xe4, 0x51, 0xbf, 0x6c, 0xc1, 0xf9, 0x76, 0xea, 0x87, 0x88, 0x6b, 0x7f, 0x30, 0xe5, 0x12, - 0xff, 0x74, 0x15, 0x20, 0x3e, 0x1d, 0x8e, 0x33, 0x5a, 0x4a, 0xca, 0x01, 0xf9, 0x0f, 0x2c, 0x07, - 0xac, 0x42, 0x91, 0xb1, 0x96, 0x7d, 0x92, 0xa4, 0x25, 0xc5, 0x21, 0xc6, 0x40, 0x2c, 0x89, 0x8a, - 0x58, 0x91, 0x40, 0x3f, 0x68, 0xc1, 0xc5, 0x64, 0xd7, 0x31, 0x61, 0x60, 0x11, 0x4e, 0x9d, 0x4b, - 0x80, 0x2b, 0xe2, 0xfb, 0x2f, 0xd6, 0x7a, 0x21, 0x1f, 0xf5, 0x43, 0xc0, 0xbd, 0x1b, 0x43, 0x95, - 0x14, 0x11, 0x74, 0xd8, 0x54, 0xbb, 0x0f, 0x20, 0x86, 0xbe, 0x04, 0x63, 0xbb, 0x7e, 0xc7, 0x8b, - 0x84, 0xd5, 0x8b, 0xf0, 0x53, 0x64, 0xcf, 0xcc, 0xab, 0x5a, 0x39, 0x36, 0xb0, 0x12, 0xc2, 0x6b, - 0xf1, 0xa1, 0x85, 0xd7, 0x77, 0x12, 0xa9, 0xb0, 0x4b, 0xd9, 0x61, 0xfb, 0x84, 0x9c, 0x7f, 0x8c, - 0x84, 0xd8, 0xa7, 0x2b, 0x11, 0xfd, 0x8c, 0x95, 0xc2, 0xca, 0x73, 0x19, 0xf9, 0x75, 0x53, 0x46, - 0xbe, 0x92, 0x94, 0x91, 0xbb, 0x54, 0xae, 0x86, 0x78, 0x3c, 0x78, 0xec, 0xe7, 0x41, 0x83, 0xa9, - 0xd9, 0x2d, 0xb8, 0xdc, 0xef, 0x5a, 0x62, 0xe6, 0x4f, 0x4d, 0xf5, 0xc0, 0x16, 0x9b, 0x3f, 0x35, - 0xab, 0x15, 0xcc, 0x20, 0x83, 0x46, 0xdb, 0xb0, 0xff, 0x9b, 0x05, 0xf9, 0x9a, 0xdf, 0x3c, 0x05, - 0x15, 0xf2, 0x67, 0x0d, 0x15, 0xf2, 0xe3, 0x19, 0x29, 0xca, 0x33, 0x15, 0xc6, 0xcb, 0x09, 0x85, - 0xf1, 0xc5, 0x2c, 0x02, 0xbd, 0xd5, 0xc3, 0x3f, 0x99, 0x07, 0x3d, 0xa1, 0x3a, 0xfa, 0xcd, 0x87, - 0xb1, 0x3d, 0xce, 0xf7, 0xca, 0xb1, 0x2e, 0x28, 0x33, 0xab, 0x29, 0xe9, 0x7a, 0xf7, 0x67, 0xcc, - 0x04, 0xf9, 0x1e, 0x71, 0xb7, 0xb6, 0x23, 0xd2, 0x4c, 0x7e, 0xce, 0xe9, 0x99, 0x20, 0xff, 0x17, - 0x0b, 0x26, 0x13, 0xad, 0xa3, 0x16, 0x8c, 0xb7, 0x74, 0xfd, 0x9f, 0x58, 0xa7, 0x0f, 0xa5, 0x3a, - 0x14, 0x26, 0x9c, 0x5a, 0x11, 0x36, 0x89, 0xa3, 0x39, 0x00, 0xf5, 0x3e, 0x27, 0xf5, 0x5e, 0x8c, - 0xeb, 0x57, 0x0f, 0x78, 0x21, 0xd6, 0x30, 0xd0, 0xcb, 0x30, 0x1a, 0xf9, 0x6d, 0xbf, 0xe5, 0x6f, - 0xed, 0xdf, 0x24, 0x32, 0xbe, 0x8b, 0x32, 0xcc, 0x5a, 0x8f, 0x41, 0x58, 0xc7, 0xb3, 0x7f, 0x3a, - 0x0f, 0xc9, 0x24, 0xfc, 0xdf, 0x5a, 0x93, 0x1f, 0xcd, 0x35, 0xf9, 0x0d, 0x0b, 0xa6, 0x68, 0xeb, - 0xcc, 0x48, 0x44, 0x5e, 0xb6, 0x2a, 0x07, 0x8d, 0xd5, 0x23, 0x07, 0xcd, 0x15, 0x7a, 0x76, 0x35, - 0xfd, 0x4e, 0x24, 0xf4, 0x66, 0xda, 0xe1, 0x44, 0x4b, 0xb1, 0x80, 0x0a, 0x3c, 0x12, 0x04, 0xc2, - 0xf3, 0x49, 0xc7, 0x23, 0x41, 0x80, 0x05, 0x54, 0xa6, 0xa8, 0x29, 0x64, 0xa4, 0xa8, 0x61, 0xd1, - 0xea, 0x84, 0x39, 0x81, 0x60, 0x7b, 0xb4, 0x68, 0x75, 0xd2, 0xce, 0x20, 0xc6, 0xb1, 0x7f, 0x3e, - 0x0f, 0x63, 0x35, 0xbf, 0x19, 0xbf, 0x90, 0xbd, 0x64, 0xbc, 0x90, 0x5d, 0x4e, 0xbc, 0x90, 0x4d, - 0xe9, 0xb8, 0xdf, 0x7a, 0x0f, 0xfb, 0xb0, 0xde, 0xc3, 0xfe, 0x99, 0xc5, 0x66, 0xad, 0xb2, 0x56, - 0x17, 0x29, 0x72, 0x5f, 0x80, 0x51, 0x76, 0x20, 0x31, 0x57, 0x3b, 0xf9, 0x6c, 0xc4, 0xa2, 0xcf, - 0xaf, 0xc5, 0xc5, 0x58, 0xc7, 0x41, 0x57, 0xa1, 0x18, 0x12, 0x27, 0x68, 0x6c, 0xab, 0x33, 0x4e, - 0x3c, 0xaa, 0xf0, 0x32, 0xac, 0xa0, 0xe8, 0xad, 0x38, 0x50, 0x5a, 0x3e, 0x3b, 0xd9, 0xab, 0xde, - 0x1f, 0xbe, 0x45, 0xb2, 0xa3, 0xa3, 0xd9, 0xf7, 0x00, 0x75, 0xe3, 0x0f, 0x10, 0x12, 0xa9, 0x6c, - 0x86, 0x44, 0x2a, 0x75, 0x85, 0x43, 0xfa, 0x13, 0x0b, 0x26, 0x6a, 0x7e, 0x93, 0x6e, 0xdd, 0x6f, - 0xa6, 0x7d, 0xaa, 0x47, 0x89, 0x1c, 0xee, 0x11, 0x25, 0xf2, 0xef, 0x5a, 0x30, 0x52, 0xf3, 0x9b, - 0xa7, 0xa0, 0x6d, 0x7f, 0xdd, 0xd4, 0xb6, 0x3f, 0x96, 0xb1, 0x24, 0x32, 0x14, 0xec, 0xbf, 0x98, - 0x87, 0x71, 0xda, 0x4f, 0x7f, 0x4b, 0xce, 0x92, 0x31, 0x22, 0xd6, 0x00, 0x23, 0x42, 0xd9, 0x5c, - 0xbf, 0xd5, 0xf2, 0xef, 0x27, 0x67, 0x6c, 0x85, 0x95, 0x62, 0x01, 0x45, 0xcf, 0x41, 0xb1, 0x1d, - 0x90, 0x3d, 0xd7, 0x17, 0xfc, 0xa3, 0xf6, 0x76, 0x51, 0x13, 0xe5, 0x58, 0x61, 0x50, 0xb9, 0x2b, - 0x74, 0xbd, 0x06, 0x91, 0x99, 0xa6, 0x0b, 0x2c, 0x19, 0x15, 0x0f, 0xff, 0xac, 0x95, 0x63, 0x03, - 0x0b, 0xdd, 0x83, 0x12, 0xfb, 0xcf, 0x4e, 0x94, 0xe3, 0x27, 0xcf, 0x11, 0x39, 0x17, 0x04, 0x01, - 0x1c, 0xd3, 0x42, 0xd7, 0x00, 0x22, 0x19, 0x22, 0x38, 0x14, 0x91, 0x6d, 0x14, 0xaf, 0xad, 0x82, - 0x07, 0x87, 0x58, 0xc3, 0x42, 0xcf, 0x42, 0x29, 0x72, 0xdc, 0xd6, 0x2d, 0xd7, 0x23, 0x21, 0x53, - 0x39, 0xe7, 0x65, 0x4a, 0x05, 0x51, 0x88, 0x63, 0x38, 0xe5, 0x75, 0x98, 0xdb, 0x37, 0x4f, 0xbd, - 0x55, 0x64, 0xd8, 0x8c, 0xd7, 0xb9, 0xa5, 0x4a, 0xb1, 0x86, 0x61, 0xbf, 0x0a, 0xe7, 0x6a, 0x7e, - 0xb3, 0xe6, 0x07, 0xd1, 0x8a, 0x1f, 0xdc, 0x77, 0x82, 0xa6, 0x9c, 0xbf, 0xb2, 0x8c, 0xee, 0x4f, - 0xcf, 0x9e, 0x21, 0xbe, 0x33, 0x8d, 0xb8, 0xfd, 0x2f, 0x32, 0x6e, 0xe7, 0x98, 0xae, 0x1c, 0x0d, - 0x76, 0xef, 0xaa, 0x2c, 0x7b, 0xd7, 0x9d, 0x88, 0xa0, 0xdb, 0x2c, 0x33, 0x57, 0x7c, 0x05, 0x89, - 0xea, 0xcf, 0x68, 0x99, 0xb9, 0x62, 0x60, 0xea, 0x9d, 0x65, 0xd6, 0xb7, 0x7f, 0x2d, 0xcf, 0x4e, - 0xa3, 0x44, 0xd2, 0x39, 0xf4, 0x45, 0x98, 0x08, 0xc9, 0x2d, 0xd7, 0xeb, 0x3c, 0x90, 0x42, 0x78, - 0x0f, 0x67, 0x9c, 0xfa, 0xb2, 0x8e, 0xc9, 0x55, 0x79, 0x66, 0x19, 0x4e, 0x50, 0xa3, 0xf3, 0x14, - 0x74, 0xbc, 0x85, 0xf0, 0x4e, 0x48, 0x02, 0x91, 0xf4, 0x8c, 0xcd, 0x13, 0x96, 0x85, 0x38, 0x86, - 0xd3, 0x75, 0xc9, 0xfe, 0xac, 0xf9, 0x1e, 0xf6, 0xfd, 0x48, 0xae, 0x64, 0x96, 0x36, 0x47, 0x2b, - 0xc7, 0x06, 0x16, 0x5a, 0x01, 0x14, 0x76, 0xda, 0xed, 0x16, 0x7b, 0xce, 0x77, 0x5a, 0xd7, 0x03, - 0xbf, 0xd3, 0xe6, 0x6f, 0x9d, 0xf9, 0xc5, 0xf3, 0xf4, 0x0a, 0xab, 0x77, 0x41, 0x71, 0x4a, 0x0d, - 0x7a, 0xfa, 0x6c, 0x86, 0xec, 0x37, 0x5b, 0xdd, 0x79, 0xa1, 0x5e, 0xaf, 0xb3, 0x22, 0x2c, 0x61, - 0x74, 0x31, 0xb1, 0xe6, 0x39, 0xe6, 0x70, 0xbc, 0x98, 0xb0, 0x2a, 0xc5, 0x1a, 0x06, 0x5a, 0x86, - 0x91, 0x70, 0x3f, 0x6c, 0x44, 0x22, 0x0e, 0x53, 0x46, 0xfa, 0xca, 0x3a, 0x43, 0xd1, 0x52, 0x2a, - 0xf0, 0x2a, 0x58, 0xd6, 0xb5, 0xbf, 0x87, 0x5d, 0x86, 0x2c, 0x45, 0x56, 0xd4, 0x09, 0x08, 0xda, - 0x85, 0xf1, 0x36, 0x9b, 0x72, 0x11, 0xc0, 0x59, 0xcc, 0xdb, 0x4b, 0x03, 0x4a, 0xb5, 0xf7, 0xe9, - 0x41, 0xa3, 0xb4, 0x4e, 0x4c, 0x5c, 0xa8, 0xe9, 0xe4, 0xb0, 0x49, 0xdd, 0xfe, 0xd7, 0xd3, 0xec, - 0xcc, 0xad, 0x73, 0x51, 0x75, 0x44, 0x18, 0x14, 0x0b, 0xbe, 0x7c, 0x36, 0x5b, 0x67, 0x12, 0x7f, - 0x91, 0x30, 0x4a, 0xc6, 0xb2, 0x2e, 0x7a, 0x8b, 0xbd, 0x4d, 0xf3, 0x83, 0xae, 0x5f, 0xa6, 0x62, - 0x8e, 0x65, 0x3c, 0x43, 0x8b, 0x8a, 0x58, 0x23, 0x82, 0x6e, 0xc1, 0xb8, 0xc8, 0xa8, 0x24, 0x94, - 0x62, 0x79, 0x43, 0xe9, 0x31, 0x8e, 0x75, 0xe0, 0x51, 0xb2, 0x00, 0x9b, 0x95, 0xd1, 0x16, 0x5c, - 0xd4, 0xd2, 0x0b, 0x5e, 0x0f, 0x1c, 0xf6, 0x5e, 0xe9, 0xb2, 0x4d, 0xa4, 0x9d, 0x9b, 0x4f, 0x1e, - 0x1e, 0x94, 0x2f, 0xae, 0xf7, 0x42, 0xc4, 0xbd, 0xe9, 0xa0, 0xdb, 0x70, 0x8e, 0xfb, 0xed, 0x55, - 0x88, 0xd3, 0x6c, 0xb9, 0x9e, 0x3a, 0x98, 0xf9, 0x3a, 0xbc, 0x70, 0x78, 0x50, 0x3e, 0xb7, 0x90, - 0x86, 0x80, 0xd3, 0xeb, 0xa1, 0xd7, 0xa1, 0xd4, 0xf4, 0x42, 0x31, 0x06, 0xc3, 0x46, 0xe6, 0xcc, - 0x52, 0x65, 0xad, 0xae, 0xbe, 0x3f, 0xfe, 0x83, 0xe3, 0x0a, 0x68, 0x8b, 0x2b, 0xc6, 0x94, 0x1c, - 0x3a, 0x92, 0x9d, 0x25, 0x5d, 0x2c, 0x09, 0xc3, 0x73, 0x87, 0x6b, 0x84, 0x95, 0xe5, 0xab, 0xe1, - 0xd4, 0x63, 0x10, 0x46, 0x6f, 0x02, 0xa2, 0x8c, 0x9a, 0xdb, 0x20, 0x0b, 0x0d, 0x16, 0x47, 0x9b, - 0xe9, 0x11, 0x8b, 0x86, 0xa7, 0x04, 0xaa, 0x77, 0x61, 0xe0, 0x94, 0x5a, 0xe8, 0x06, 0x3d, 0xc8, - 0xf4, 0x52, 0x61, 0xc1, 0x2b, 0x99, 0xfb, 0x99, 0x0a, 0x69, 0x07, 0xa4, 0xe1, 0x44, 0xa4, 0x69, - 0x52, 0xc4, 0x89, 0x7a, 0xf4, 0x2e, 0x55, 0x29, 0x75, 0xc0, 0x0c, 0x96, 0xd1, 0x9d, 0x56, 0x87, - 0xca, 0xc5, 0xdb, 0x7e, 0x18, 0xad, 0x91, 0xe8, 0xbe, 0x1f, 0xec, 0x88, 0xd8, 0x64, 0x71, 0x98, - 0xcc, 0x18, 0x84, 0x75, 0x3c, 0xca, 0x07, 0xb3, 0xc7, 0xe1, 0x6a, 0x85, 0xbd, 0xd0, 0x15, 0xe3, - 0x7d, 0x72, 0x83, 0x17, 0x63, 0x09, 0x97, 0xa8, 0xd5, 0xda, 0x12, 0x7b, 0x6d, 0x4b, 0xa0, 0x56, - 0x6b, 0x4b, 0x58, 0xc2, 0x11, 0xe9, 0xce, 0x4a, 0x3a, 0x91, 0xad, 0xd5, 0xec, 0xbe, 0x0e, 0x06, - 0x4c, 0x4c, 0xea, 0xc1, 0x94, 0xca, 0x87, 0xca, 0x83, 0xb6, 0x85, 0x33, 0x93, 0x6c, 0x91, 0x0c, - 0x1e, 0xf1, 0x4d, 0xe9, 0x89, 0xab, 0x09, 0x4a, 0xb8, 0x8b, 0xb6, 0x11, 0xbe, 0x64, 0xaa, 0x6f, - 0x4a, 0xa4, 0x79, 0x28, 0x85, 0x9d, 0x8d, 0xa6, 0xbf, 0xeb, 0xb8, 0x1e, 0x7b, 0x1c, 0xd3, 0x98, - 0xac, 0xba, 0x04, 0xe0, 0x18, 0x07, 0xad, 0x40, 0xd1, 0x91, 0x4a, 0x60, 0x94, 0x1d, 0xab, 0x40, - 0xa9, 0x7e, 0xb9, 0xfb, 0xae, 0x54, 0xfb, 0xaa, 0xba, 0xe8, 0x35, 0x18, 0x17, 0xde, 0x5a, 0x3c, - 0x82, 0x03, 0x7b, 0xbc, 0xd2, 0xcc, 0xf1, 0xeb, 0x3a, 0x10, 0x9b, 0xb8, 0xe8, 0x0b, 0x30, 0x41, - 0xa9, 0xc4, 0x07, 0xdb, 0xcc, 0xd9, 0x41, 0x4e, 0x44, 0x2d, 0xd5, 0x85, 0x5e, 0x19, 0x27, 0x88, - 0xa1, 0x26, 0x3c, 0xe1, 0x74, 0x22, 0x9f, 0x29, 0xd2, 0xcd, 0xf5, 0xbf, 0xee, 0xef, 0x10, 0x8f, - 0xbd, 0x61, 0x15, 0x17, 0x2f, 0x1f, 0x1e, 0x94, 0x9f, 0x58, 0xe8, 0x81, 0x87, 0x7b, 0x52, 0x41, - 0x77, 0x60, 0x34, 0xf2, 0x5b, 0xcc, 0x30, 0x9e, 0xb2, 0x12, 0xe7, 0xb3, 0xc3, 0xff, 0xac, 0x2b, - 0x34, 0x5d, 0x89, 0xa4, 0xaa, 0x62, 0x9d, 0x0e, 0x5a, 0xe7, 0x7b, 0x8c, 0x05, 0x46, 0x25, 0xe1, - 0xcc, 0x63, 0xd9, 0x03, 0xa3, 0xe2, 0xa7, 0x9a, 0x5b, 0x50, 0xd4, 0xc4, 0x3a, 0x19, 0x74, 0x1d, - 0xa6, 0xdb, 0x81, 0xeb, 0xb3, 0x85, 0xad, 0x1e, 0x31, 0x66, 0xcc, 0xec, 0x06, 0xb5, 0x24, 0x02, - 0xee, 0xae, 0x43, 0x85, 0x4c, 0x59, 0x38, 0x73, 0x81, 0xa7, 0xca, 0xe2, 0x8c, 0x37, 0x2f, 0xc3, - 0x0a, 0x8a, 0x56, 0xd9, 0xb9, 0xcc, 0xc5, 0xc1, 0x99, 0xd9, 0xec, 0x18, 0x0f, 0xba, 0xd8, 0xc8, - 0xf9, 0x25, 0xf5, 0x17, 0xc7, 0x14, 0xe8, 0xbd, 0x11, 0x6e, 0x3b, 0x01, 0xa9, 0x05, 0x7e, 0x83, - 0xf0, 0xce, 0x70, 0x9b, 0xfc, 0xc7, 0x79, 0xfc, 0x46, 0x7a, 0x6f, 0xd4, 0xd3, 0x10, 0x70, 0x7a, - 0x3d, 0xd4, 0xd4, 0x32, 0x44, 0x53, 0x36, 0x34, 0x9c, 0x79, 0xa2, 0x87, 0x99, 0x51, 0x82, 0x67, - 0x8d, 0xd7, 0xa2, 0x51, 0x1c, 0xe2, 0x04, 0x4d, 0xf4, 0x1d, 0x30, 0x25, 0xc2, 0x1d, 0xc5, 0xe3, - 0x7e, 0x31, 0xb6, 0x5f, 0xc4, 0x09, 0x18, 0xee, 0xc2, 0x9e, 0xfd, 0x76, 0x98, 0xee, 0xba, 0x71, - 0x8e, 0x15, 0x7c, 0xfc, 0x8f, 0x87, 0xa0, 0xa4, 0x94, 0xe9, 0x68, 0xde, 0x7c, 0x23, 0xb9, 0x90, - 0x7c, 0x23, 0x29, 0x52, 0x9e, 0x5e, 0x7f, 0x16, 0x59, 0x37, 0xcc, 0xea, 0x72, 0xd9, 0xa9, 0xbe, - 0x74, 0xae, 0xbc, 0xaf, 0x8b, 0x9e, 0xa6, 0x1b, 0xc9, 0x0f, 0xfc, 0xd8, 0x52, 0xe8, 0xa9, 0x6e, - 0x19, 0x30, 0xd3, 0x2e, 0x7a, 0x8a, 0x0a, 0x36, 0xcd, 0x6a, 0x2d, 0x99, 0x7a, 0xb2, 0x46, 0x0b, - 0x31, 0x87, 0x31, 0x01, 0x90, 0xb2, 0x47, 0x4c, 0x00, 0x1c, 0x79, 0x48, 0x01, 0x50, 0x12, 0xc0, - 0x31, 0x2d, 0xd4, 0x82, 0xe9, 0x86, 0x99, 0x35, 0x54, 0xb9, 0xe5, 0x3d, 0xd5, 0x37, 0x7f, 0x67, - 0x47, 0x4b, 0xd1, 0xb6, 0x94, 0xa4, 0x82, 0xbb, 0x09, 0xa3, 0xd7, 0xa0, 0xf8, 0x9e, 0x1f, 0xb2, - 0xc5, 0x24, 0x78, 0x04, 0xe9, 0xbe, 0x54, 0x7c, 0xeb, 0x76, 0x9d, 0x95, 0x1f, 0x1d, 0x94, 0x47, - 0x6b, 0x7e, 0x53, 0xfe, 0xc5, 0xaa, 0x02, 0x7a, 0x00, 0xe7, 0x8c, 0x93, 0x55, 0x75, 0x17, 0x06, - 0xef, 0xee, 0x45, 0xd1, 0xdc, 0xb9, 0x6a, 0x1a, 0x25, 0x9c, 0xde, 0x00, 0x3d, 0xae, 0x3c, 0x5f, - 0x64, 0xdc, 0x95, 0x7c, 0x08, 0x63, 0x37, 0x4a, 0xba, 0xf3, 0x7a, 0x02, 0x01, 0x77, 0xd7, 0xb1, - 0x7f, 0x85, 0xbf, 0x3d, 0x08, 0x0d, 0x25, 0x09, 0x3b, 0xad, 0xd3, 0x48, 0xe8, 0xb4, 0x6c, 0x28, - 0x4f, 0x1f, 0xfa, 0x7d, 0xeb, 0x37, 0x2c, 0xf6, 0xbe, 0xb5, 0x4e, 0x76, 0xdb, 0x2d, 0x2a, 0x27, - 0x3f, 0xfa, 0x8e, 0xbf, 0x05, 0xc5, 0x48, 0xb4, 0xd6, 0x2b, 0x07, 0x95, 0xd6, 0x29, 0xf6, 0xc6, - 0xa7, 0x38, 0x14, 0x59, 0x8a, 0x15, 0x19, 0xfb, 0x9f, 0xf0, 0x19, 0x90, 0x90, 0x53, 0x50, 0x64, - 0x55, 0x4c, 0x45, 0x56, 0xb9, 0xcf, 0x17, 0x64, 0x28, 0xb4, 0xfe, 0xb1, 0xd9, 0x6f, 0x26, 0x0c, - 0x7e, 0xd4, 0x1f, 0x56, 0xed, 0x1f, 0xb1, 0xe0, 0x6c, 0x9a, 0x25, 0x12, 0xe5, 0x2a, 0xb9, 0x28, - 0xaa, 0x1e, 0x9a, 0xd5, 0x08, 0xde, 0x15, 0xe5, 0x58, 0x61, 0x0c, 0x9c, 0xde, 0xe1, 0x78, 0xf1, - 0xdd, 0x6e, 0xc3, 0x78, 0x2d, 0x20, 0xda, 0x1d, 0xf0, 0x06, 0xf7, 0x83, 0xe3, 0xfd, 0x79, 0xee, - 0xd8, 0x3e, 0x70, 0xf6, 0xcf, 0xe6, 0xe0, 0x2c, 0x7f, 0x29, 0x5a, 0xd8, 0xf3, 0xdd, 0x66, 0xcd, - 0x6f, 0x8a, 0xd4, 0x1c, 0x6f, 0xc3, 0x58, 0x5b, 0xd3, 0x1f, 0xf4, 0x8a, 0x30, 0xa5, 0xeb, 0x19, - 0x62, 0x39, 0x4e, 0x2f, 0xc5, 0x06, 0x2d, 0xd4, 0x84, 0x31, 0xb2, 0xe7, 0x36, 0xd4, 0x73, 0x43, - 0xee, 0xd8, 0x77, 0x83, 0x6a, 0x65, 0x59, 0xa3, 0x83, 0x0d, 0xaa, 0x8f, 0x20, 0x5b, 0x9b, 0xfd, - 0xa3, 0x16, 0x3c, 0x96, 0x11, 0x8f, 0x8a, 0x36, 0x77, 0x9f, 0xbd, 0xc9, 0x89, 0xc4, 0x4f, 0xaa, - 0x39, 0xfe, 0x52, 0x87, 0x05, 0x14, 0x7d, 0x0e, 0x80, 0xbf, 0xb4, 0x51, 0xb1, 0xa6, 0x5f, 0xe0, - 0x1e, 0x23, 0xe6, 0x88, 0x16, 0x2b, 0x42, 0xd6, 0xc7, 0x1a, 0x2d, 0xfb, 0xa7, 0xf2, 0x30, 0xc4, - 0x5e, 0x76, 0xd0, 0x0a, 0x8c, 0x6c, 0xf3, 0x08, 0xcd, 0x83, 0x04, 0x83, 0x8e, 0xe5, 0x43, 0x5e, - 0x80, 0x65, 0x65, 0xb4, 0x0a, 0x67, 0x78, 0x84, 0xeb, 0x56, 0x85, 0xb4, 0x9c, 0x7d, 0xa9, 0x66, - 0xe0, 0xc9, 0x92, 0x54, 0xdc, 0x8b, 0x6a, 0x37, 0x0a, 0x4e, 0xab, 0x87, 0xde, 0x80, 0x09, 0xca, - 0x97, 0xf9, 0x9d, 0x48, 0x52, 0xe2, 0xb1, 0xad, 0x15, 0x23, 0xb8, 0x6e, 0x40, 0x71, 0x02, 0x9b, - 0x0a, 0x4c, 0xed, 0x2e, 0x85, 0xca, 0x50, 0x2c, 0x30, 0x99, 0x4a, 0x14, 0x13, 0x97, 0x99, 0x20, - 0x75, 0x98, 0xc1, 0xd5, 0xfa, 0x76, 0x40, 0xc2, 0x6d, 0xbf, 0xd5, 0x14, 0xb9, 0xb6, 0x63, 0x13, - 0xa4, 0x04, 0x1c, 0x77, 0xd5, 0xa0, 0x54, 0x36, 0x1d, 0xb7, 0xd5, 0x09, 0x48, 0x4c, 0x65, 0xd8, - 0xa4, 0xb2, 0x92, 0x80, 0xe3, 0xae, 0x1a, 0x74, 0x1d, 0x9d, 0x13, 0xc9, 0xaf, 0xa5, 0x37, 0xbe, - 0xb2, 0x2b, 0x1b, 0x91, 0x7e, 0x49, 0x3d, 0xc2, 0xd1, 0x08, 0xcb, 0x1b, 0x95, 0x3e, 0x5b, 0xd3, - 0x03, 0x0a, 0x8f, 0x24, 0x49, 0xe5, 0x61, 0x52, 0x30, 0xff, 0x9e, 0x05, 0x67, 0x52, 0xec, 0x57, - 0xf9, 0x51, 0xb5, 0xe5, 0x86, 0x91, 0x4a, 0x08, 0xa3, 0x1d, 0x55, 0xbc, 0x1c, 0x2b, 0x0c, 0xba, - 0x1f, 0xf8, 0x61, 0x98, 0x3c, 0x00, 0x85, 0x7d, 0x98, 0x80, 0x1e, 0xef, 0x00, 0x44, 0x97, 0xa1, - 0xd0, 0x09, 0x89, 0x0c, 0x24, 0xa5, 0xce, 0x6f, 0xa6, 0x19, 0x66, 0x10, 0xca, 0x9a, 0x6e, 0x29, - 0xa5, 0xac, 0xc6, 0x9a, 0x72, 0x4d, 0x2b, 0x87, 0xd9, 0x5f, 0xcd, 0xc3, 0x85, 0x4c, 0x4b, 0x75, - 0xda, 0xa5, 0x5d, 0xdf, 0x73, 0x23, 0x5f, 0xbd, 0x1a, 0xf2, 0x50, 0x26, 0xa4, 0xbd, 0xbd, 0x2a, - 0xca, 0xb1, 0xc2, 0x40, 0x57, 0x64, 0x1a, 0xf6, 0x64, 0xca, 0x9b, 0xc5, 0x8a, 0x91, 0x89, 0x7d, - 0xd0, 0x74, 0x62, 0x4f, 0x41, 0xa1, 0xed, 0xfb, 0xad, 0xe4, 0x61, 0x44, 0xbb, 0xeb, 0xfb, 0x2d, - 0xcc, 0x80, 0xe8, 0x13, 0x62, 0x1c, 0x12, 0xcf, 0x64, 0xd8, 0x69, 0xfa, 0xa1, 0x36, 0x18, 0xcf, - 0xc0, 0xc8, 0x0e, 0xd9, 0x0f, 0x5c, 0x6f, 0x2b, 0xf9, 0x7c, 0x7a, 0x93, 0x17, 0x63, 0x09, 0x37, - 0x33, 0x3e, 0x8c, 0x9c, 0x74, 0x1e, 0xb0, 0x62, 0xdf, 0xab, 0xed, 0x07, 0xf2, 0x30, 0x89, 0x17, - 0x2b, 0xdf, 0x9a, 0x88, 0x3b, 0xdd, 0x13, 0x71, 0xd2, 0x79, 0xc0, 0xfa, 0xcf, 0xc6, 0x2f, 0x5a, - 0x30, 0xc9, 0xa2, 0x22, 0x8b, 0x00, 0x1a, 0xae, 0xef, 0x9d, 0x02, 0xeb, 0xf6, 0x14, 0x0c, 0x05, - 0xb4, 0xd1, 0x64, 0x72, 0x1f, 0xd6, 0x13, 0xcc, 0x61, 0xe8, 0x09, 0x28, 0xb0, 0x2e, 0xd0, 0xc9, - 0x1b, 0xe3, 0x79, 0x11, 0x2a, 0x4e, 0xe4, 0x60, 0x56, 0xca, 0xbc, 0xc2, 0x31, 0x69, 0xb7, 0x5c, - 0xde, 0xe9, 0xf8, 0x49, 0xe2, 0xa3, 0xe1, 0x15, 0x9e, 0xda, 0xb5, 0x0f, 0xe6, 0x15, 0x9e, 0x4e, - 0xb2, 0xb7, 0x58, 0xf4, 0xdf, 0x73, 0x70, 0x29, 0xb5, 0xde, 0xc0, 0x5e, 0xe1, 0xbd, 0x6b, 0x9f, - 0x8c, 0x15, 0x4c, 0xba, 0x71, 0x4a, 0xfe, 0x14, 0x8d, 0x53, 0x0a, 0x83, 0x72, 0x8e, 0x43, 0x03, - 0x38, 0x6b, 0xa7, 0x0e, 0xd9, 0x47, 0xc4, 0x59, 0x3b, 0xb5, 0x6f, 0x19, 0x62, 0xdd, 0x9f, 0xe6, - 0x32, 0xbe, 0x85, 0x09, 0x78, 0x57, 0xe9, 0x39, 0xc3, 0x80, 0xa1, 0xe0, 0x84, 0xc7, 0xf8, 0x19, - 0xc3, 0xcb, 0xb0, 0x82, 0x22, 0x57, 0x73, 0x7b, 0xce, 0x65, 0xa7, 0x7e, 0xcc, 0x6c, 0x6a, 0xce, - 0x7c, 0x41, 0x52, 0x43, 0x90, 0xe2, 0x02, 0xbd, 0xaa, 0x09, 0xe5, 0xf9, 0xc1, 0x85, 0xf2, 0xb1, - 0x74, 0x81, 0x1c, 0x2d, 0xc0, 0xe4, 0xae, 0xeb, 0xb1, 0x54, 0xfe, 0x26, 0x2b, 0xaa, 0xa2, 0x80, - 0xac, 0x9a, 0x60, 0x9c, 0xc4, 0x9f, 0x7d, 0x0d, 0xc6, 0x1f, 0x5e, 0x1d, 0xf9, 0x8d, 0x3c, 0x3c, - 0xde, 0x63, 0xdb, 0xf3, 0xb3, 0xde, 0x98, 0x03, 0xed, 0xac, 0xef, 0x9a, 0x87, 0x1a, 0x9c, 0xdd, - 0xec, 0xb4, 0x5a, 0xfb, 0xcc, 0xfe, 0x93, 0x34, 0x25, 0x86, 0xe0, 0x15, 0x9f, 0x90, 0x99, 0x28, - 0x56, 0x52, 0x70, 0x70, 0x6a, 0x4d, 0xf4, 0x26, 0x20, 0x5f, 0xe4, 0x9d, 0xbd, 0x4e, 0x3c, 0xa1, - 0x97, 0x67, 0x03, 0x9f, 0x8f, 0x37, 0xe3, 0xed, 0x2e, 0x0c, 0x9c, 0x52, 0x8b, 0x32, 0xfd, 0xf4, - 0x56, 0xda, 0x57, 0xdd, 0x4a, 0x30, 0xfd, 0x58, 0x07, 0x62, 0x13, 0x17, 0x5d, 0x87, 0x69, 0x67, - 0xcf, 0x71, 0x79, 0x74, 0x3c, 0x49, 0x80, 0x73, 0xfd, 0x4a, 0x09, 0xb6, 0x90, 0x44, 0xc0, 0xdd, - 0x75, 0x12, 0x8e, 0xd1, 0xc3, 0xd9, 0x8e, 0xd1, 0xbd, 0xcf, 0xc5, 0x7e, 0x3a, 0x5d, 0xfb, 0x3f, - 0x58, 0xf4, 0xfa, 0x4a, 0xc9, 0x1d, 0x4f, 0xc7, 0x41, 0xe9, 0x26, 0x35, 0x1f, 0xe5, 0x73, 0x9a, - 0x85, 0x47, 0x0c, 0xc4, 0x26, 0x2e, 0x5f, 0x10, 0x61, 0xec, 0x24, 0x63, 0xb0, 0xee, 0x22, 0xc6, - 0x81, 0xc2, 0x40, 0x9f, 0x87, 0x91, 0xa6, 0xbb, 0xe7, 0x86, 0x7e, 0x20, 0x36, 0xcb, 0x31, 0x5d, - 0x0d, 0xe2, 0x73, 0xb0, 0xc2, 0xc9, 0x60, 0x49, 0xcf, 0xfe, 0x81, 0x1c, 0x8c, 0xcb, 0x16, 0xdf, - 0xea, 0xf8, 0x91, 0x73, 0x0a, 0xd7, 0xf2, 0x75, 0xe3, 0x5a, 0xfe, 0x44, 0xaf, 0x40, 0x0f, 0xac, - 0x4b, 0x99, 0xd7, 0xf1, 0xed, 0xc4, 0x75, 0xfc, 0x74, 0x7f, 0x52, 0xbd, 0xaf, 0xe1, 0x7f, 0x6a, - 0xc1, 0xb4, 0x81, 0x7f, 0x0a, 0xb7, 0xc1, 0x8a, 0x79, 0x1b, 0x3c, 0xd9, 0xf7, 0x1b, 0x32, 0x6e, - 0x81, 0xef, 0xcb, 0x27, 0xfa, 0xce, 0x4e, 0xff, 0xf7, 0xa0, 0xb0, 0xed, 0x04, 0xcd, 0x5e, 0x01, - 0x65, 0xbb, 0x2a, 0xcd, 0xdd, 0x70, 0x82, 0x26, 0x3f, 0xc3, 0x9f, 0x53, 0xd9, 0x2a, 0x9d, 0xa0, - 0xd9, 0xd7, 0x27, 0x8c, 0x35, 0x85, 0x5e, 0x85, 0xe1, 0xb0, 0xe1, 0xb7, 0x95, 0xc5, 0xe6, 0x65, - 0x9e, 0xc9, 0x92, 0x96, 0x1c, 0x1d, 0x94, 0x91, 0xd9, 0x1c, 0x2d, 0xc6, 0x02, 0x1f, 0xbd, 0x0d, - 0xe3, 0xec, 0x97, 0xb2, 0x5c, 0xc8, 0x67, 0xa7, 0x31, 0xa8, 0xeb, 0x88, 0xdc, 0x00, 0xc6, 0x28, - 0xc2, 0x26, 0xa9, 0xd9, 0x2d, 0x28, 0xa9, 0xcf, 0x7a, 0xa4, 0xbe, 0x3c, 0xff, 0x36, 0x0f, 0x67, - 0x52, 0xd6, 0x1c, 0x0a, 0x8d, 0x99, 0x78, 0x61, 0xc0, 0xa5, 0xfa, 0x01, 0xe7, 0x22, 0x64, 0xd2, - 0x50, 0x53, 0xac, 0xad, 0x81, 0x1b, 0xbd, 0x13, 0x92, 0x64, 0xa3, 0xb4, 0xa8, 0x7f, 0xa3, 0xb4, - 0xb1, 0x53, 0x1b, 0x6a, 0xda, 0x90, 0xea, 0xe9, 0x23, 0x9d, 0xd3, 0x3f, 0xca, 0xc3, 0xd9, 0xb4, - 0xd8, 0x33, 0xe8, 0xbb, 0x13, 0x29, 0x6d, 0x5e, 0x1a, 0x34, 0x6a, 0x0d, 0xcf, 0x73, 0x23, 0x12, - 0x34, 0xcf, 0x99, 0x49, 0x6e, 0xfa, 0x0e, 0xb3, 0x68, 0x93, 0x39, 0x80, 0x06, 0x3c, 0x15, 0x91, - 0x3c, 0x3e, 0x3e, 0x3d, 0x70, 0x07, 0x44, 0x0e, 0xa3, 0x30, 0xe1, 0x00, 0x2a, 0x8b, 0xfb, 0x3b, - 0x80, 0xca, 0x96, 0x67, 0x5d, 0x18, 0xd5, 0xbe, 0xe6, 0x91, 0xce, 0xf8, 0x0e, 0xbd, 0xad, 0xb4, - 0x7e, 0x3f, 0xd2, 0x59, 0xff, 0x51, 0x0b, 0x12, 0xe6, 0x91, 0x4a, 0xdd, 0x65, 0x65, 0xaa, 0xbb, - 0x2e, 0x43, 0x21, 0xf0, 0x5b, 0x24, 0x99, 0x41, 0x06, 0xfb, 0x2d, 0x82, 0x19, 0x84, 0x62, 0x44, - 0xb1, 0xb2, 0x63, 0x4c, 0x17, 0xe4, 0x84, 0x88, 0xf6, 0x14, 0x0c, 0xb5, 0xc8, 0x1e, 0x69, 0x25, - 0xc3, 0xb3, 0xdf, 0xa2, 0x85, 0x98, 0xc3, 0xec, 0x5f, 0x2c, 0xc0, 0xc5, 0x9e, 0x2e, 0xd4, 0x54, - 0x1c, 0xda, 0x72, 0x22, 0x72, 0xdf, 0xd9, 0x4f, 0xc6, 0x51, 0xbe, 0xce, 0x8b, 0xb1, 0x84, 0x33, - 0x8b, 0x71, 0x1e, 0x37, 0x31, 0xa1, 0x1c, 0x14, 0xe1, 0x12, 0x05, 0xf4, 0x11, 0x24, 0xa7, 0xbf, - 0x06, 0x10, 0x86, 0xad, 0x65, 0x8f, 0x72, 0x77, 0x4d, 0x61, 0x8a, 0x1e, 0xc7, 0xd7, 0xac, 0xdf, - 0x12, 0x10, 0xac, 0x61, 0xa1, 0x0a, 0x4c, 0xb5, 0x03, 0x3f, 0xe2, 0xba, 0xd6, 0x0a, 0x37, 0x14, - 0x1a, 0x32, 0xbd, 0x57, 0x6b, 0x09, 0x38, 0xee, 0xaa, 0x81, 0x5e, 0x86, 0x51, 0xe1, 0xd1, 0x5a, - 0xf3, 0xfd, 0x96, 0x50, 0x03, 0x29, 0xb3, 0x93, 0x7a, 0x0c, 0xc2, 0x3a, 0x9e, 0x56, 0x8d, 0x29, - 0x70, 0x47, 0x52, 0xab, 0x71, 0x25, 0xae, 0x86, 0x97, 0x88, 0x43, 0x55, 0x1c, 0x28, 0x0e, 0x55, - 0xac, 0x18, 0x2b, 0x0d, 0xfc, 0x66, 0x05, 0x7d, 0x55, 0x49, 0x3f, 0x57, 0x80, 0x33, 0x62, 0xe1, - 0x3c, 0xea, 0xe5, 0xf2, 0x88, 0x52, 0xe8, 0x7f, 0x6b, 0xcd, 0x9c, 0xf6, 0x9a, 0xf9, 0x41, 0x0b, - 0x4c, 0xf6, 0x0a, 0xfd, 0xb9, 0xcc, 0x40, 0xf4, 0x2f, 0x67, 0xb2, 0x6b, 0x4d, 0x79, 0x81, 0x7c, - 0xc0, 0x90, 0xf4, 0xf6, 0xbf, 0xb7, 0xe0, 0xc9, 0xbe, 0x14, 0xd1, 0x32, 0x94, 0x18, 0x0f, 0xa8, - 0x49, 0x67, 0x4f, 0x2b, 0x43, 0x42, 0x09, 0xc8, 0x60, 0x49, 0xe3, 0x9a, 0x68, 0xb9, 0x2b, 0xe2, - 0xff, 0x33, 0x29, 0x11, 0xff, 0xcf, 0x19, 0xc3, 0xf3, 0x90, 0x21, 0xff, 0x7f, 0x25, 0x0f, 0xc3, - 0x7c, 0xc5, 0x9f, 0x82, 0x18, 0xb6, 0x22, 0xf4, 0xb6, 0x3d, 0x22, 0x51, 0xf1, 0xbe, 0xcc, 0x55, - 0x9c, 0xc8, 0xe1, 0x6c, 0x82, 0xba, 0xad, 0x62, 0x0d, 0x2f, 0x9a, 0x33, 0xee, 0xb3, 0xd9, 0x84, - 0x62, 0x12, 0x38, 0x0d, 0xed, 0x76, 0xfb, 0x22, 0x40, 0xc8, 0xb2, 0xe5, 0x53, 0x1a, 0x22, 0xa6, - 0xd9, 0x27, 0x7b, 0xb4, 0x5e, 0x57, 0xc8, 0xbc, 0x0f, 0xf1, 0x4e, 0x57, 0x00, 0xac, 0x51, 0x9c, - 0x7d, 0x05, 0x4a, 0x0a, 0xb9, 0x9f, 0x16, 0x67, 0x4c, 0x67, 0x2e, 0x3e, 0x0b, 0x93, 0x89, 0xb6, - 0x8e, 0xa5, 0x04, 0xfa, 0x25, 0x0b, 0x26, 0x79, 0x97, 0x97, 0xbd, 0x3d, 0x71, 0xa6, 0xbe, 0x0f, - 0x67, 0x5b, 0x29, 0x67, 0x9b, 0x98, 0xd1, 0xc1, 0xcf, 0x42, 0xa5, 0xf4, 0x49, 0x83, 0xe2, 0xd4, - 0x36, 0xd0, 0x55, 0xba, 0x6e, 0xe9, 0xd9, 0xe5, 0xb4, 0x84, 0xf7, 0xd1, 0x18, 0x5f, 0xb3, 0xbc, - 0x0c, 0x2b, 0xa8, 0xfd, 0x3b, 0x16, 0x4c, 0xf3, 0x9e, 0xdf, 0x24, 0xfb, 0x6a, 0x87, 0x7f, 0x98, - 0x7d, 0x17, 0x49, 0x38, 0x72, 0x19, 0x49, 0x38, 0xf4, 0x4f, 0xcb, 0xf7, 0xfc, 0xb4, 0x9f, 0xb5, - 0x40, 0xac, 0xc0, 0x53, 0x10, 0xe5, 0xbf, 0xdd, 0x14, 0xe5, 0x67, 0xb3, 0x17, 0x75, 0x86, 0x0c, - 0xff, 0x27, 0x16, 0x4c, 0x71, 0x84, 0xf8, 0x2d, 0xf9, 0x43, 0x9d, 0x87, 0x41, 0xb2, 0xe9, 0xa9, - 0x14, 0xdb, 0xe9, 0x1f, 0x65, 0x4c, 0x56, 0xa1, 0xe7, 0x64, 0x35, 0xe5, 0x06, 0x3a, 0x46, 0x26, - 0xc9, 0x63, 0x07, 0xb3, 0xb6, 0xff, 0xd0, 0x02, 0xc4, 0x9b, 0x31, 0xd8, 0x1f, 0xca, 0x54, 0xb0, - 0x52, 0xed, 0xba, 0x88, 0x8f, 0x1a, 0x05, 0xc1, 0x1a, 0xd6, 0x89, 0x0c, 0x4f, 0xc2, 0x20, 0x20, - 0xdf, 0xdf, 0x20, 0xe0, 0x18, 0x23, 0xfa, 0x7f, 0x0a, 0x90, 0x74, 0x07, 0x40, 0x77, 0x61, 0xac, - 0xe1, 0xb4, 0x9d, 0x0d, 0xb7, 0xe5, 0x46, 0x2e, 0x09, 0x7b, 0x59, 0x12, 0x2d, 0x69, 0x78, 0xe2, - 0xa9, 0x57, 0x2b, 0xc1, 0x06, 0x1d, 0x34, 0x07, 0xd0, 0x0e, 0xdc, 0x3d, 0xb7, 0x45, 0xb6, 0x98, - 0xc6, 0x81, 0xf9, 0x3b, 0x72, 0xf3, 0x18, 0x59, 0x8a, 0x35, 0x8c, 0x14, 0xd7, 0xb5, 0xfc, 0xa3, - 0x73, 0x5d, 0x2b, 0x1c, 0xd3, 0x75, 0x6d, 0x68, 0x20, 0xd7, 0x35, 0x0c, 0xe7, 0x25, 0x8b, 0x44, - 0xff, 0xaf, 0xb8, 0x2d, 0x22, 0xf8, 0x62, 0xee, 0x05, 0x39, 0x7b, 0x78, 0x50, 0x3e, 0x8f, 0x53, - 0x31, 0x70, 0x46, 0x4d, 0xf4, 0x39, 0x98, 0x71, 0x5a, 0x2d, 0xff, 0xbe, 0x1a, 0xb5, 0xe5, 0xb0, - 0xe1, 0xb4, 0xb8, 0xc6, 0x7e, 0x84, 0x51, 0x7d, 0xe2, 0xf0, 0xa0, 0x3c, 0xb3, 0x90, 0x81, 0x83, - 0x33, 0x6b, 0x27, 0x3c, 0xdf, 0x8a, 0x7d, 0x3d, 0xdf, 0x5e, 0x87, 0x52, 0x3b, 0xf0, 0x1b, 0xab, - 0x9a, 0x37, 0xce, 0x25, 0x96, 0xa7, 0x5e, 0x16, 0x1e, 0x1d, 0x94, 0xc7, 0xd5, 0x1f, 0x76, 0xc3, - 0xc7, 0x15, 0xec, 0x1d, 0x38, 0x53, 0x27, 0x81, 0xcb, 0x32, 0x60, 0x36, 0xe3, 0x0d, 0xbd, 0x0e, - 0xa5, 0x20, 0x71, 0x84, 0x0d, 0x14, 0x58, 0x49, 0x8b, 0xf2, 0x2b, 0x8f, 0xac, 0x98, 0x90, 0xfd, - 0xc7, 0x16, 0x8c, 0x08, 0x87, 0x86, 0x53, 0xe0, 0x9c, 0x16, 0x0c, 0x05, 0x76, 0x39, 0xfd, 0x98, - 0x67, 0x9d, 0xc9, 0x54, 0x5d, 0x57, 0x13, 0xaa, 0xeb, 0x27, 0x7b, 0x11, 0xe9, 0xad, 0xb4, 0xfe, - 0x9b, 0x79, 0x98, 0x30, 0x9d, 0x39, 0x4e, 0x61, 0x08, 0xd6, 0x60, 0x24, 0x14, 0x9e, 0x43, 0xb9, - 0x6c, 0xcb, 0xe9, 0xe4, 0x24, 0xc6, 0x66, 0x51, 0xc2, 0x57, 0x48, 0x12, 0x49, 0x75, 0x49, 0xca, - 0x3f, 0x42, 0x97, 0xa4, 0x7e, 0xfe, 0x34, 0x85, 0x93, 0xf0, 0xa7, 0xb1, 0xbf, 0xc6, 0xae, 0x1a, - 0xbd, 0xfc, 0x14, 0xb8, 0x90, 0xeb, 0xe6, 0xa5, 0x64, 0xf7, 0x58, 0x59, 0xa2, 0x53, 0x19, 0xdc, - 0xc8, 0x2f, 0x58, 0x70, 0x31, 0xe5, 0xab, 0x34, 0xd6, 0xe4, 0x39, 0x28, 0x3a, 0x9d, 0xa6, 0xab, - 0xf6, 0xb2, 0xf6, 0x8c, 0xb5, 0x20, 0xca, 0xb1, 0xc2, 0x40, 0x4b, 0x30, 0x4d, 0x1e, 0xb4, 0x5d, - 0xfe, 0x8e, 0xa8, 0xdb, 0x2e, 0xe6, 0x79, 0x88, 0xd9, 0xe5, 0x24, 0x10, 0x77, 0xe3, 0x2b, 0x77, - 0xec, 0x7c, 0xa6, 0x3b, 0xf6, 0x3f, 0xb0, 0x60, 0x54, 0x74, 0xfb, 0x14, 0x46, 0xfb, 0x3b, 0xcc, - 0xd1, 0x7e, 0xbc, 0xc7, 0x68, 0x67, 0x0c, 0xf3, 0xdf, 0xce, 0xa9, 0xfe, 0xd6, 0xfc, 0x20, 0x1a, - 0x80, 0xe5, 0x79, 0x15, 0x8a, 0xed, 0xc0, 0x8f, 0xfc, 0x86, 0xdf, 0x12, 0x1c, 0xcf, 0x13, 0x71, - 0xb4, 0x00, 0x5e, 0x7e, 0xa4, 0xfd, 0xc6, 0x0a, 0x9b, 0x8d, 0x9e, 0x1f, 0x44, 0x82, 0xcb, 0x88, - 0x47, 0xcf, 0x0f, 0x22, 0xcc, 0x20, 0xa8, 0x09, 0x10, 0x39, 0xc1, 0x16, 0x89, 0x68, 0x99, 0x08, - 0x3c, 0x92, 0x7d, 0x78, 0x74, 0x22, 0xb7, 0x35, 0xe7, 0x7a, 0x51, 0x18, 0x05, 0x73, 0x55, 0x2f, - 0xba, 0x1d, 0x70, 0x01, 0x4a, 0x73, 0xff, 0x57, 0xb4, 0xb0, 0x46, 0x57, 0xfa, 0x68, 0xb2, 0x36, - 0x86, 0xcc, 0x07, 0xf1, 0x35, 0x51, 0x8e, 0x15, 0x86, 0xfd, 0x0a, 0xbb, 0x4a, 0xd8, 0x00, 0x1d, - 0xcf, 0x33, 0xff, 0xeb, 0x45, 0x35, 0xb4, 0xec, 0x35, 0xac, 0xa2, 0xfb, 0xff, 0xf7, 0x3e, 0xb9, - 0x69, 0xc3, 0xba, 0x1f, 0x4d, 0x1c, 0x24, 0x00, 0x7d, 0x67, 0x97, 0x9d, 0xc4, 0xf3, 0x7d, 0xae, - 0x80, 0x63, 0x58, 0x46, 0xb0, 0xb0, 0xd7, 0x2c, 0x3c, 0x70, 0xb5, 0x26, 0x16, 0xb9, 0x16, 0xf6, - 0x5a, 0x00, 0x70, 0x8c, 0x83, 0xe6, 0x85, 0xf8, 0x5d, 0x30, 0x92, 0xdf, 0x49, 0xf1, 0x5b, 0x7e, - 0xbe, 0x26, 0x7f, 0xbf, 0x00, 0xa3, 0x2a, 0x09, 0x5e, 0x8d, 0xe7, 0x12, 0x13, 0x61, 0x58, 0x96, - 0xe3, 0x62, 0xac, 0xe3, 0xa0, 0x75, 0x98, 0x0c, 0xb9, 0xee, 0x45, 0x45, 0xdb, 0xe3, 0x3a, 0xac, - 0x4f, 0x4a, 0xfb, 0x8a, 0xba, 0x09, 0x3e, 0x62, 0x45, 0xfc, 0xe8, 0x90, 0x8e, 0x96, 0x49, 0x12, - 0xe8, 0x0d, 0x98, 0x68, 0xe9, 0xe9, 0xe6, 0x6b, 0x42, 0xc5, 0xa5, 0xcc, 0x8f, 0x8d, 0x64, 0xf4, - 0x35, 0x9c, 0xc0, 0xa6, 0x9c, 0x92, 0x5e, 0x22, 0x22, 0x44, 0x3a, 0xde, 0x16, 0x09, 0x45, 0x0a, - 0x2f, 0xc6, 0x29, 0xdd, 0xca, 0xc0, 0xc1, 0x99, 0xb5, 0xd1, 0xab, 0x30, 0x26, 0x3f, 0x5f, 0x73, - 0x23, 0x8e, 0x8d, 0xdc, 0x35, 0x18, 0x36, 0x30, 0xd1, 0x7d, 0x38, 0x27, 0xff, 0xaf, 0x07, 0xce, - 0xe6, 0xa6, 0xdb, 0x10, 0x5e, 0xdc, 0xdc, 0xd3, 0x67, 0x41, 0xba, 0x0e, 0x2d, 0xa7, 0x21, 0x1d, - 0x1d, 0x94, 0x2f, 0x8b, 0x51, 0x4b, 0x85, 0xb3, 0x49, 0x4c, 0xa7, 0x8f, 0x56, 0xe1, 0xcc, 0x36, - 0x71, 0x5a, 0xd1, 0xf6, 0xd2, 0x36, 0x69, 0xec, 0xc8, 0x4d, 0xc4, 0x9c, 0x93, 0x35, 0xd3, 0xf0, - 0x1b, 0xdd, 0x28, 0x38, 0xad, 0x1e, 0x7a, 0x07, 0x66, 0xda, 0x9d, 0x8d, 0x96, 0x1b, 0x6e, 0xaf, - 0xf9, 0x11, 0x33, 0xe9, 0x50, 0x39, 0xe4, 0x84, 0x17, 0xb3, 0x72, 0xcc, 0xae, 0x65, 0xe0, 0xe1, - 0x4c, 0x0a, 0xe8, 0x7d, 0x38, 0x97, 0x58, 0x0c, 0xc2, 0xa7, 0x72, 0x22, 0x3b, 0xde, 0x6e, 0x3d, - 0xad, 0x82, 0xf0, 0x91, 0x4c, 0x03, 0xe1, 0xf4, 0x26, 0x3e, 0x98, 0xa1, 0xcf, 0x7b, 0xb4, 0xb2, - 0xc6, 0x94, 0xa1, 0x2f, 0xc1, 0x98, 0xbe, 0x8a, 0xc4, 0x05, 0x73, 0x25, 0x9d, 0x67, 0xd1, 0x56, - 0x1b, 0x67, 0xe9, 0xd4, 0x8a, 0xd2, 0x61, 0xd8, 0xa0, 0x68, 0x13, 0x48, 0xff, 0x3e, 0x74, 0x0b, - 0x8a, 0x8d, 0x96, 0x4b, 0xbc, 0xa8, 0x5a, 0xeb, 0x15, 0xf4, 0x63, 0x49, 0xe0, 0x88, 0x01, 0x13, - 0x01, 0x4a, 0x79, 0x19, 0x56, 0x14, 0xec, 0x5f, 0xcf, 0x41, 0xb9, 0x4f, 0xb4, 0xdb, 0x84, 0x3e, - 0xda, 0x1a, 0x48, 0x1f, 0xbd, 0x20, 0x33, 0xe2, 0xad, 0x25, 0x84, 0xf4, 0x44, 0xb6, 0xbb, 0x58, - 0x54, 0x4f, 0xe2, 0x0f, 0x6c, 0x1f, 0xac, 0xab, 0xb4, 0x0b, 0x7d, 0x2d, 0xd7, 0x8d, 0xa7, 0xac, - 0xa1, 0xc1, 0x05, 0x91, 0xcc, 0x67, 0x09, 0xfb, 0x6b, 0x39, 0x38, 0xa7, 0x86, 0xf0, 0x9b, 0x77, - 0xe0, 0xee, 0x74, 0x0f, 0xdc, 0x09, 0x3c, 0xea, 0xd8, 0xb7, 0x61, 0x98, 0x07, 0x4d, 0x19, 0x80, - 0x01, 0x7a, 0xca, 0x8c, 0xb0, 0xa5, 0xae, 0x69, 0x23, 0xca, 0xd6, 0x5f, 0xb2, 0x60, 0x72, 0x7d, - 0xa9, 0x56, 0xf7, 0x1b, 0x3b, 0x24, 0x5a, 0xe0, 0x0c, 0x2b, 0x16, 0xfc, 0x8f, 0xf5, 0x90, 0x7c, - 0x4d, 0x1a, 0xc7, 0x74, 0x19, 0x0a, 0xdb, 0x7e, 0x18, 0x25, 0x5f, 0x7c, 0x6f, 0xf8, 0x61, 0x84, - 0x19, 0xc4, 0xfe, 0x5d, 0x0b, 0x86, 0x58, 0x1e, 0xd7, 0x7e, 0xc9, 0x85, 0x07, 0xf9, 0x2e, 0xf4, - 0x32, 0x0c, 0x93, 0xcd, 0x4d, 0xd2, 0x88, 0xc4, 0xac, 0x4a, 0x77, 0xd4, 0xe1, 0x65, 0x56, 0x4a, - 0x2f, 0x7d, 0xd6, 0x18, 0xff, 0x8b, 0x05, 0x32, 0xba, 0x07, 0xa5, 0xc8, 0xdd, 0x25, 0x0b, 0xcd, - 0xa6, 0x78, 0x33, 0x7b, 0x08, 0xef, 0xdf, 0x75, 0x49, 0x00, 0xc7, 0xb4, 0xec, 0xaf, 0xe6, 0x00, - 0x62, 0xd7, 0xff, 0x7e, 0x9f, 0xb8, 0xd8, 0xf5, 0x9a, 0x72, 0x25, 0xe5, 0x35, 0x05, 0xc5, 0x04, - 0x53, 0x9e, 0x52, 0xd4, 0x30, 0xe5, 0x07, 0x1a, 0xa6, 0xc2, 0x71, 0x86, 0x69, 0x09, 0xa6, 0xe3, - 0xd0, 0x05, 0x66, 0x1c, 0x17, 0x26, 0xa4, 0xac, 0x27, 0x81, 0xb8, 0x1b, 0xdf, 0x26, 0x70, 0x59, - 0x46, 0xd4, 0x94, 0x77, 0x0d, 0x33, 0xc9, 0x3c, 0x46, 0x9e, 0xe9, 0xf8, 0xb9, 0x28, 0x97, 0xf9, - 0x5c, 0xf4, 0x13, 0x16, 0x9c, 0x4d, 0xb6, 0xc3, 0x7c, 0xdf, 0xbe, 0x62, 0xc1, 0x39, 0xf6, 0x68, - 0xc6, 0x5a, 0xed, 0x7e, 0xa2, 0x7b, 0x29, 0x3d, 0xa4, 0x43, 0xef, 0x1e, 0xc7, 0x7e, 0xcf, 0xab, - 0x69, 0xa4, 0x71, 0x7a, 0x8b, 0xf6, 0x57, 0x2c, 0xb8, 0x90, 0x99, 0x3e, 0x08, 0x5d, 0x85, 0xa2, - 0xd3, 0x76, 0xb9, 0x46, 0x4a, 0xec, 0x77, 0x26, 0x3d, 0xd6, 0xaa, 0x5c, 0x1f, 0xa5, 0xa0, 0x2a, - 0xad, 0x61, 0x2e, 0x33, 0xad, 0x61, 0xdf, 0x2c, 0x85, 0xf6, 0xf7, 0x5b, 0x20, 0xdc, 0x9d, 0x06, - 0x38, 0x64, 0xde, 0x96, 0x59, 0x61, 0x8d, 0x60, 0xe6, 0x97, 0xb3, 0xfd, 0xbf, 0x44, 0x08, 0x73, - 0x75, 0xa9, 0x1b, 0x81, 0xcb, 0x0d, 0x5a, 0x76, 0x13, 0x04, 0xb4, 0x42, 0x98, 0xce, 0xaa, 0x7f, - 0x6f, 0xae, 0x01, 0x34, 0x19, 0xae, 0x96, 0x1b, 0x52, 0x5d, 0x21, 0x15, 0x05, 0xc1, 0x1a, 0x96, - 0xfd, 0x43, 0x39, 0x18, 0x95, 0xc1, 0xb3, 0x3b, 0xde, 0x20, 0x92, 0xe5, 0xb1, 0x72, 0xe8, 0xb0, - 0x64, 0xaa, 0x94, 0x70, 0x2d, 0x16, 0xc8, 0xe3, 0x64, 0xaa, 0x12, 0x80, 0x63, 0x1c, 0xf4, 0x0c, - 0x8c, 0x84, 0x9d, 0x0d, 0x86, 0x9e, 0x70, 0xe2, 0xa9, 0xf3, 0x62, 0x2c, 0xe1, 0xe8, 0x73, 0x30, - 0xc5, 0xeb, 0x05, 0x7e, 0xdb, 0xd9, 0xe2, 0xea, 0xcf, 0x21, 0xe5, 0x55, 0x3b, 0xb5, 0x9a, 0x80, - 0x1d, 0x1d, 0x94, 0xcf, 0x26, 0xcb, 0x98, 0xe2, 0xbc, 0x8b, 0x8a, 0xfd, 0x25, 0x40, 0xdd, 0xf1, - 0xc0, 0xd1, 0x9b, 0xdc, 0x94, 0xca, 0x0d, 0x48, 0xb3, 0x97, 0x46, 0x5c, 0x77, 0x02, 0x95, 0x86, - 0xf4, 0xbc, 0x16, 0x56, 0xf5, 0xed, 0xbf, 0x9a, 0x87, 0xa9, 0xa4, 0x4b, 0x20, 0xba, 0x01, 0xc3, - 0xfc, 0xb2, 0x13, 0xe4, 0x7b, 0x3c, 0xb8, 0x6a, 0x8e, 0x84, 0x6c, 0xdb, 0x8b, 0xfb, 0x52, 0xd4, - 0x47, 0xef, 0xc0, 0x68, 0xd3, 0xbf, 0xef, 0xdd, 0x77, 0x82, 0xe6, 0x42, 0xad, 0x2a, 0xd6, 0x65, - 0x2a, 0xcf, 0x5c, 0x89, 0xd1, 0x74, 0xe7, 0x44, 0xf6, 0xb8, 0x10, 0x83, 0xb0, 0x4e, 0x0e, 0xad, - 0xb3, 0x18, 0x87, 0x9b, 0xee, 0xd6, 0xaa, 0xd3, 0xee, 0x65, 0x57, 0xbb, 0x24, 0x91, 0x34, 0xca, - 0xe3, 0x22, 0x10, 0x22, 0x07, 0xe0, 0x98, 0x10, 0xfa, 0x6e, 0x38, 0x13, 0x66, 0xa8, 0xd9, 0xb2, - 0xd2, 0x43, 0xf4, 0xd2, 0x3c, 0x2d, 0x3e, 0x46, 0xa5, 0x99, 0x34, 0x85, 0x5c, 0x5a, 0x33, 0xf6, - 0x97, 0xcf, 0x80, 0xb1, 0x1b, 0x8d, 0x1c, 0x41, 0xd6, 0x09, 0xe5, 0x08, 0xc2, 0x50, 0x24, 0xbb, - 0xed, 0x68, 0xbf, 0xe2, 0x06, 0xbd, 0x72, 0xd8, 0x2d, 0x0b, 0x9c, 0x6e, 0x9a, 0x12, 0x82, 0x15, - 0x9d, 0xf4, 0x44, 0x4e, 0xf9, 0x0f, 0x31, 0x91, 0x53, 0xe1, 0x14, 0x13, 0x39, 0xad, 0xc1, 0xc8, - 0x96, 0x1b, 0x61, 0xd2, 0xf6, 0x05, 0x9b, 0x99, 0xba, 0x0e, 0xaf, 0x73, 0x94, 0xee, 0xe4, 0x21, - 0x02, 0x80, 0x25, 0x11, 0xf4, 0xa6, 0xda, 0x81, 0xc3, 0xd9, 0x52, 0x5a, 0xf7, 0xcb, 0x60, 0xea, - 0x1e, 0x14, 0x89, 0x9b, 0x46, 0x1e, 0x36, 0x71, 0xd3, 0x8a, 0x4c, 0xb7, 0x54, 0xcc, 0x36, 0x82, - 0x67, 0xd9, 0x94, 0xfa, 0x24, 0x59, 0x32, 0x12, 0x53, 0x95, 0x4e, 0x2e, 0x31, 0xd5, 0xf7, 0x5b, - 0x70, 0xae, 0x9d, 0x96, 0xa3, 0x4d, 0x24, 0x49, 0x7a, 0x79, 0xe0, 0x24, 0x74, 0x46, 0x83, 0x4c, - 0x5c, 0x4f, 0x45, 0xc3, 0xe9, 0xcd, 0xd1, 0x81, 0x0e, 0x36, 0x9a, 0x22, 0xb3, 0xd2, 0x53, 0x19, - 0x19, 0xae, 0x7a, 0xe4, 0xb5, 0x5a, 0x4f, 0xc9, 0xa6, 0xf4, 0xf1, 0xac, 0x6c, 0x4a, 0x03, 0xe7, - 0x50, 0x7a, 0x53, 0xe5, 0xb6, 0x1a, 0xcf, 0x5e, 0x4a, 0x3c, 0x73, 0x55, 0xdf, 0x8c, 0x56, 0x6f, - 0xaa, 0x8c, 0x56, 0x3d, 0x62, 0xbd, 0xf1, 0x7c, 0x55, 0x7d, 0xf3, 0x58, 0x69, 0xb9, 0xa8, 0x26, - 0x4f, 0x26, 0x17, 0x95, 0x71, 0xd5, 0xf0, 0x74, 0x48, 0xcf, 0xf6, 0xb9, 0x6a, 0x0c, 0xba, 0xbd, - 0x2f, 0x1b, 0x9e, 0x77, 0x6b, 0xfa, 0xa1, 0xf2, 0x6e, 0xdd, 0xd5, 0xf3, 0x58, 0xa1, 0x3e, 0x89, - 0x9a, 0x28, 0xd2, 0x80, 0xd9, 0xab, 0xee, 0xea, 0x17, 0xe0, 0x99, 0x6c, 0xba, 0xea, 0x9e, 0xeb, - 0xa6, 0x9b, 0x7a, 0x05, 0x76, 0x65, 0xc5, 0x3a, 0x7b, 0x3a, 0x59, 0xb1, 0xce, 0x9d, 0x78, 0x56, - 0xac, 0xf3, 0xa7, 0x90, 0x15, 0xeb, 0xb1, 0x0f, 0x35, 0x2b, 0xd6, 0xcc, 0x23, 0xc8, 0x8a, 0xb5, - 0x16, 0x67, 0xc5, 0xba, 0x90, 0x3d, 0x25, 0x29, 0x96, 0xb9, 0x19, 0xb9, 0xb0, 0xee, 0xb2, 0xe7, - 0x79, 0x1e, 0xb3, 0x42, 0x04, 0xa3, 0x4b, 0xcf, 0xfb, 0x9b, 0x16, 0xd8, 0x82, 0x4f, 0x89, 0x02, - 0xe1, 0x98, 0x14, 0xa5, 0x1b, 0xe7, 0xc6, 0x7a, 0xbc, 0x87, 0x42, 0x36, 0x4d, 0xd5, 0x95, 0x9d, - 0x11, 0xcb, 0xfe, 0xcb, 0x39, 0xb8, 0xd4, 0x7b, 0x5d, 0xc7, 0x7a, 0xb2, 0x5a, 0xfc, 0xae, 0x93, - 0xd0, 0x93, 0x71, 0x21, 0x27, 0xc6, 0x1a, 0x38, 0xb0, 0xcf, 0x75, 0x98, 0x56, 0x26, 0xb9, 0x2d, - 0xb7, 0xb1, 0xaf, 0xe5, 0x03, 0x56, 0xae, 0x87, 0xf5, 0x24, 0x02, 0xee, 0xae, 0x83, 0x16, 0x60, - 0xd2, 0x28, 0xac, 0x56, 0x84, 0x30, 0xa3, 0x14, 0x73, 0x75, 0x13, 0x8c, 0x93, 0xf8, 0xf6, 0xcf, - 0x58, 0xf0, 0x58, 0x46, 0xc2, 0x88, 0x81, 0xe3, 0xd6, 0x6c, 0xc2, 0x64, 0xdb, 0xac, 0xda, 0x27, - 0xbc, 0x95, 0x91, 0x96, 0x42, 0xf5, 0x35, 0x01, 0xc0, 0x49, 0xa2, 0x8b, 0x57, 0x7f, 0xeb, 0xf7, - 0x2f, 0x7d, 0xec, 0xb7, 0x7f, 0xff, 0xd2, 0xc7, 0x7e, 0xe7, 0xf7, 0x2f, 0x7d, 0xec, 0xcf, 0x1f, - 0x5e, 0xb2, 0x7e, 0xeb, 0xf0, 0x92, 0xf5, 0xdb, 0x87, 0x97, 0xac, 0xdf, 0x39, 0xbc, 0x64, 0xfd, - 0xde, 0xe1, 0x25, 0xeb, 0xab, 0x7f, 0x70, 0xe9, 0x63, 0x6f, 0xe7, 0xf6, 0x5e, 0xf8, 0xff, 0x01, - 0x00, 0x00, 0xff, 0xff, 0x20, 0x56, 0xf9, 0x8e, 0x0d, 0xe7, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/core/v1/generated.proto b/vendor/k8s.io/api/core/v1/generated.proto index f76251d5..c05e2351 100644 --- a/vendor/k8s.io/api/core/v1/generated.proto +++ b/vendor/k8s.io/api/core/v1/generated.proto @@ -31,7 +31,7 @@ import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; option go_package = "v1"; // Represents a Persistent Disk resource in AWS. -// +// // An AWS EBS disk must exist before mounting to a container. The disk // must also be in the same AWS zone as the kubelet. An AWS EBS disk // can only be mounted as read/write once. AWS EBS volumes support @@ -161,7 +161,7 @@ message AzureFileVolumeSource { // Deprecated in 1.7, please use the bindings subresource of pods instead. message Binding { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -198,7 +198,7 @@ message CSIPersistentVolumeSource { // ControllerPublishSecretRef is a reference to the secret object containing // sensitive information to pass to the CSI driver to complete the CSI // ControllerPublishVolume and ControllerUnpublishVolume calls. - // This field is optional, and may be empty if no secret is required. If the + // This field is optional, and may be empty if no secret is required. If the // secret object contains more than one secret, all secrets are passed. // +optional optional SecretReference controllerPublishSecretRef = 6; @@ -206,7 +206,7 @@ message CSIPersistentVolumeSource { // NodeStageSecretRef is a reference to the secret object containing sensitive // information to pass to the CSI driver to complete the CSI NodeStageVolume // and NodeStageVolume and NodeUnstageVolume calls. - // This field is optional, and may be empty if no secret is required. If the + // This field is optional, and may be empty if no secret is required. If the // secret object contains more than one secret, all secrets are passed. // +optional optional SecretReference nodeStageSecretRef = 7; @@ -214,10 +214,50 @@ message CSIPersistentVolumeSource { // NodePublishSecretRef is a reference to the secret object containing // sensitive information to pass to the CSI driver to complete the CSI // NodePublishVolume and NodeUnpublishVolume calls. - // This field is optional, and may be empty if no secret is required. If the + // This field is optional, and may be empty if no secret is required. If the // secret object contains more than one secret, all secrets are passed. // +optional optional SecretReference nodePublishSecretRef = 8; + + // ControllerExpandSecretRef is a reference to the secret object containing + // sensitive information to pass to the CSI driver to complete the CSI + // ControllerExpandVolume call. + // This is an alpha field and requires enabling ExpandCSIVolumes feature gate. + // This field is optional, and may be empty if no secret is required. If the + // secret object contains more than one secret, all secrets are passed. + // +optional + optional SecretReference controllerExpandSecretRef = 9; +} + +// Represents a source location of a volume to mount, managed by an external CSI driver +message CSIVolumeSource { + // Driver is the name of the CSI driver that handles this volume. + // Consult with your admin for the correct name as registered in the cluster. + optional string driver = 1; + + // Specifies a read-only configuration for the volume. + // Defaults to false (read/write). + // +optional + optional bool readOnly = 2; + + // Filesystem type to mount. Ex. "ext4", "xfs", "ntfs". + // If not provided, the empty value is passed to the associated CSI driver + // which will determine the default filesystem to apply. + // +optional + optional string fsType = 3; + + // VolumeAttributes stores driver-specific properties that are passed to the CSI + // driver. Consult your driver's documentation for supported values. + // +optional + map volumeAttributes = 4; + + // NodePublishSecretRef is a reference to the secret object containing + // sensitive information to pass to the CSI driver to complete the CSI + // NodePublishVolume and NodeUnpublishVolume calls. + // This field is optional, and may be empty if no secret is required. If the + // secret object contains more than one secret, all secret references are passed. + // +optional + optional LocalObjectReference nodePublishSecretRef = 5; } // Adds and removes POSIX capabilities from running containers. @@ -235,7 +275,7 @@ message Capabilities { // Cephfs volumes do not support ownership management or SELinux relabeling. message CephFSPersistentVolumeSource { // Required: Monitors is a collection of Ceph monitors - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it repeated string monitors = 1; // Optional: Used as the mounted root, rather than the full Ceph tree, default is / @@ -243,23 +283,23 @@ message CephFSPersistentVolumeSource { optional string path = 2; // Optional: User is the rados user name, default is admin - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it // +optional optional string user = 3; // Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it // +optional optional string secretFile = 4; // Optional: SecretRef is reference to the authentication secret for User, default is empty. - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it // +optional optional SecretReference secretRef = 5; // Optional: Defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it // +optional optional bool readOnly = 6; } @@ -268,7 +308,7 @@ message CephFSPersistentVolumeSource { // Cephfs volumes do not support ownership management or SELinux relabeling. message CephFSVolumeSource { // Required: Monitors is a collection of Ceph monitors - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it repeated string monitors = 1; // Optional: Used as the mounted root, rather than the full Ceph tree, default is / @@ -276,23 +316,23 @@ message CephFSVolumeSource { optional string path = 2; // Optional: User is the rados user name, default is admin - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it // +optional optional string user = 3; // Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it // +optional optional string secretFile = 4; // Optional: SecretRef is reference to the authentication secret for User, default is empty. - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it // +optional optional LocalObjectReference secretRef = 5; // Optional: Defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it // +optional optional bool readOnly = 6; } @@ -302,20 +342,20 @@ message CephFSVolumeSource { // The volume must also be in the same region as the kubelet. // Cinder volumes support ownership management and SELinux relabeling. message CinderPersistentVolumeSource { - // volume id used to identify the volume in cinder - // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // volume id used to identify the volume in cinder. + // More info: https://examples.k8s.io/mysql-cinder-pd/README.md optional string volumeID = 1; // Filesystem type to mount. // Must be a filesystem type supported by the host operating system. // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // More info: https://examples.k8s.io/mysql-cinder-pd/README.md // +optional optional string fsType = 2; // Optional: Defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. - // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // More info: https://examples.k8s.io/mysql-cinder-pd/README.md // +optional optional bool readOnly = 3; @@ -330,20 +370,20 @@ message CinderPersistentVolumeSource { // The volume must also be in the same region as the kubelet. // Cinder volumes support ownership management and SELinux relabeling. message CinderVolumeSource { - // volume id used to identify the volume in cinder - // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // volume id used to identify the volume in cinder. + // More info: https://examples.k8s.io/mysql-cinder-pd/README.md optional string volumeID = 1; // Filesystem type to mount. // Must be a filesystem type supported by the host operating system. // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // More info: https://examples.k8s.io/mysql-cinder-pd/README.md // +optional optional string fsType = 2; // Optional: Defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. - // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // More info: https://examples.k8s.io/mysql-cinder-pd/README.md // +optional optional bool readOnly = 3; @@ -386,7 +426,7 @@ message ComponentCondition { // ComponentStatus (and ComponentStatusList) holds the cluster validation info. message ComponentStatus { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -400,7 +440,7 @@ message ComponentStatus { // Status of all the conditions for the component as a list of ComponentStatus objects. message ComponentStatusList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -411,7 +451,7 @@ message ComponentStatusList { // ConfigMap holds configuration data for pods to consume. message ConfigMap { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -436,7 +476,7 @@ message ConfigMap { // ConfigMapEnvSource selects a ConfigMap to populate the environment // variables with. -// +// // The contents of the target ConfigMap's Data field will represent the // key-value pairs as environment variables. message ConfigMapEnvSource { @@ -456,14 +496,14 @@ message ConfigMapKeySelector { // The key to select. optional string key = 2; - // Specify whether the ConfigMap or it's key must be defined + // Specify whether the ConfigMap or its key must be defined // +optional optional bool optional = 3; } // ConfigMapList is a resource containing a list of ConfigMap objects. message ConfigMapList { - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -497,7 +537,7 @@ message ConfigMapNodeConfigSource { } // Adapts a ConfigMap into a projected volume. -// +// // The contents of the target ConfigMap's Data field will be presented in a // projected volume as files using the keys in the Data field as the file names, // unless the items element is populated with specific mappings of keys to paths. @@ -516,13 +556,13 @@ message ConfigMapProjection { // +optional repeated KeyToPath items = 2; - // Specify whether the ConfigMap or it's keys must be defined + // Specify whether the ConfigMap or its keys must be defined // +optional optional bool optional = 4; } // Adapts a ConfigMap into a volume. -// +// // The contents of the target ConfigMap's Data field will be presented in a // volume as files using the keys in the Data field as the file names, unless // the items element is populated with specific mappings of keys to paths. @@ -548,7 +588,7 @@ message ConfigMapVolumeSource { // +optional optional int32 defaultMode = 3; - // Specify whether the ConfigMap or it's keys must be defined + // Specify whether the ConfigMap or its keys must be defined // +optional optional bool optional = 4; } @@ -606,6 +646,9 @@ message Container { // +optional // +patchMergeKey=containerPort // +patchStrategy=merge + // +listType=map + // +listMapKey=containerPort + // +listMapKey=protocol repeated ContainerPort ports = 6; // List of sources to populate environment variables in the container. @@ -638,7 +681,7 @@ message Container { repeated VolumeMount volumeMounts = 9; // volumeDevices is the list of block devices to be used by the container. - // This is an alpha feature and may change in the future. + // This is a beta feature. // +patchMergeKey=devicePath // +patchStrategy=merge // +optional @@ -658,6 +701,17 @@ message Container { // +optional optional Probe readinessProbe = 11; + // StartupProbe indicates that the Pod has successfully initialized. + // If specified, no other probes are executed until this completes successfully. + // If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + // This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + // when it might take a long time to load data or warm a cache, than during steady-state operation. + // This cannot be updated. + // This is an alpha feature enabled by the StartupProbe feature flag. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + // +optional + optional Probe startupProbe = 22; + // Actions that the management system should take in response to container lifecycle events. // Cannot be updated. // +optional @@ -858,6 +912,13 @@ message ContainerStatus { // Container's ID in the format 'docker://'. // +optional optional string containerID = 8; + + // Specifies whether the container has passed its startup probe. + // Initialized as false, becomes true after startupProbe is considered successful. + // Resets to false when the container is restarted, or if kubelet loses state temporarily. + // Is always true when no startupProbe is defined. + // +optional + optional bool started = 9; } // DaemonEndpoint contains information about a single Daemon endpoint. @@ -958,7 +1019,8 @@ message EndpointAddress { // EndpointPort is a tuple that describes a single port. message EndpointPort { - // The name of this port (corresponds to ServicePort.Name). + // The name of this port. This must match the 'name' field in the + // corresponding ServicePort. // Must be a DNS_LABEL. // Optional only if one port is defined. // +optional @@ -1015,7 +1077,7 @@ message EndpointSubset { // ] message Endpoints { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -1033,7 +1095,7 @@ message Endpoints { // EndpointsList is a list of endpoints. message EndpointsList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -1080,7 +1142,7 @@ message EnvVar { // EnvVarSource represents a source for the value of an EnvVar. message EnvVarSource { // Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, - // spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP. + // spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. // +optional optional ObjectFieldSelector fieldRef = 1; @@ -1098,10 +1160,197 @@ message EnvVarSource { optional SecretKeySelector secretKeyRef = 4; } +// An EphemeralContainer is a container that may be added temporarily to an existing pod for +// user-initiated activities such as debugging. Ephemeral containers have no resource or +// scheduling guarantees, and they will not be restarted when they exit or when a pod is +// removed or restarted. If an ephemeral container causes a pod to exceed its resource +// allocation, the pod may be evicted. +// Ephemeral containers may not be added by directly updating the pod spec. They must be added +// via the pod's ephemeralcontainers subresource, and they will appear in the pod spec +// once added. +// This is an alpha feature enabled by the EphemeralContainers feature flag. +message EphemeralContainer { + // Ephemeral containers have all of the fields of Container, plus additional fields + // specific to ephemeral containers. Fields in common with Container are in the + // following inlined struct so than an EphemeralContainer may easily be converted + // to a Container. + optional EphemeralContainerCommon ephemeralContainerCommon = 1; + + // If set, the name of the container from PodSpec that this ephemeral container targets. + // The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. + // If not set then the ephemeral container is run in whatever namespaces are shared + // for the pod. Note that the container runtime must support this feature. + // +optional + optional string targetContainerName = 2; +} + +// EphemeralContainerCommon is a copy of all fields in Container to be inlined in +// EphemeralContainer. This separate type allows easy conversion from EphemeralContainer +// to Container and allows separate documentation for the fields of EphemeralContainer. +// When a new field is added to Container it must be added here as well. +message EphemeralContainerCommon { + // Name of the ephemeral container specified as a DNS_LABEL. + // This name must be unique among all containers, init containers and ephemeral containers. + optional string name = 1; + + // Docker image name. + // More info: https://kubernetes.io/docs/concepts/containers/images + optional string image = 2; + + // Entrypoint array. Not executed within a shell. + // The docker image's ENTRYPOINT is used if this is not provided. + // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax + // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, + // regardless of whether the variable exists or not. + // Cannot be updated. + // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + // +optional + repeated string command = 3; + + // Arguments to the entrypoint. + // The docker image's CMD is used if this is not provided. + // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax + // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, + // regardless of whether the variable exists or not. + // Cannot be updated. + // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + // +optional + repeated string args = 4; + + // Container's working directory. + // If not specified, the container runtime's default will be used, which + // might be configured in the container image. + // Cannot be updated. + // +optional + optional string workingDir = 5; + + // Ports are not allowed for ephemeral containers. + repeated ContainerPort ports = 6; + + // List of sources to populate environment variables in the container. + // The keys defined within a source must be a C_IDENTIFIER. All invalid keys + // will be reported as an event when the container is starting. When a key exists in multiple + // sources, the value associated with the last source will take precedence. + // Values defined by an Env with a duplicate key will take precedence. + // Cannot be updated. + // +optional + repeated EnvFromSource envFrom = 19; + + // List of environment variables to set in the container. + // Cannot be updated. + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + repeated EnvVar env = 7; + + // Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources + // already allocated to the pod. + // +optional + optional ResourceRequirements resources = 8; + + // Pod volumes to mount into the container's filesystem. + // Cannot be updated. + // +optional + // +patchMergeKey=mountPath + // +patchStrategy=merge + repeated VolumeMount volumeMounts = 9; + + // volumeDevices is the list of block devices to be used by the container. + // This is a beta feature. + // +patchMergeKey=devicePath + // +patchStrategy=merge + // +optional + repeated VolumeDevice volumeDevices = 21; + + // Probes are not allowed for ephemeral containers. + // +optional + optional Probe livenessProbe = 10; + + // Probes are not allowed for ephemeral containers. + // +optional + optional Probe readinessProbe = 11; + + // Probes are not allowed for ephemeral containers. + // +optional + optional Probe startupProbe = 22; + + // Lifecycle is not allowed for ephemeral containers. + // +optional + optional Lifecycle lifecycle = 12; + + // Optional: Path at which the file to which the container's termination message + // will be written is mounted into the container's filesystem. + // Message written is intended to be brief final status, such as an assertion failure message. + // Will be truncated by the node if greater than 4096 bytes. The total message length across + // all containers will be limited to 12kb. + // Defaults to /dev/termination-log. + // Cannot be updated. + // +optional + optional string terminationMessagePath = 13; + + // Indicate how the termination message should be populated. File will use the contents of + // terminationMessagePath to populate the container status message on both success and failure. + // FallbackToLogsOnError will use the last chunk of container log output if the termination + // message file is empty and the container exited with an error. + // The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + // Defaults to File. + // Cannot be updated. + // +optional + optional string terminationMessagePolicy = 20; + + // Image pull policy. + // One of Always, Never, IfNotPresent. + // Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + // Cannot be updated. + // More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + // +optional + optional string imagePullPolicy = 14; + + // SecurityContext is not allowed for ephemeral containers. + // +optional + optional SecurityContext securityContext = 15; + + // Whether this container should allocate a buffer for stdin in the container runtime. If this + // is not set, reads from stdin in the container will always result in EOF. + // Default is false. + // +optional + optional bool stdin = 16; + + // Whether the container runtime should close the stdin channel after it has been opened by + // a single attach. When stdin is true the stdin stream will remain open across multiple attach + // sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + // first client attaches to stdin, and then remains open and accepts data until the client disconnects, + // at which time stdin is closed and remains closed until the container is restarted. If this + // flag is false, a container processes that reads from stdin will never receive an EOF. + // Default is false + // +optional + optional bool stdinOnce = 17; + + // Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + // Default is false. + // +optional + optional bool tty = 18; +} + +// A list of ephemeral containers used with the Pod ephemeralcontainers subresource. +message EphemeralContainers { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // A list of ephemeral containers associated with this pod. New ephemeral containers + // may be appended to this list, but existing ephemeral containers may not be removed + // or modified. + // +patchMergeKey=name + // +patchStrategy=merge + repeated EphemeralContainer ephemeralContainers = 2; +} + // Event is a report of an event somewhere in the cluster. message Event { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // The object that this event is about. @@ -1166,7 +1415,7 @@ message Event { // EventList is a list of events. message EventList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -1184,6 +1433,7 @@ message EventSeries { optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime lastObservedTime = 2; // State of this Series: Ongoing or Finished + // Deprecated. Planned removal for 1.18 optional string state = 3; } @@ -1314,7 +1564,7 @@ message FlockerVolumeSource { } // Represents a Persistent Disk resource in Google Compute Engine. -// +// // A GCE PD must exist before mounting to a container. The disk must // also be in the same GCE project and zone as the kubelet. A GCE PD // can only be mounted as read/write once or read-only many times. GCE @@ -1350,7 +1600,7 @@ message GCEPersistentDiskVolumeSource { // Represents a volume that is populated with the contents of a git repository. // Git repo volumes do not support ownership management. // Git repo volumes support SELinux relabeling. -// +// // DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an // EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir // into the Pod's container. @@ -1370,20 +1620,44 @@ message GitRepoVolumeSource { optional string directory = 3; } +// Represents a Glusterfs mount that lasts the lifetime of a pod. +// Glusterfs volumes do not support ownership management or SELinux relabeling. +message GlusterfsPersistentVolumeSource { + // EndpointsName is the endpoint name that details Glusterfs topology. + // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + optional string endpoints = 1; + + // Path is the Glusterfs volume path. + // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + optional string path = 2; + + // ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. + // Defaults to false. + // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + // +optional + optional bool readOnly = 3; + + // EndpointsNamespace is the namespace that contains Glusterfs endpoint. + // If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC. + // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + // +optional + optional string endpointsNamespace = 4; +} + // Represents a Glusterfs mount that lasts the lifetime of a pod. // Glusterfs volumes do not support ownership management or SELinux relabeling. message GlusterfsVolumeSource { // EndpointsName is the endpoint name that details Glusterfs topology. - // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod optional string endpoints = 1; // Path is the Glusterfs volume path. - // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod optional string path = 2; // ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. // Defaults to false. - // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod // +optional optional bool readOnly = 3; } @@ -1609,11 +1883,15 @@ message Lifecycle { // +optional optional Handler postStart = 1; - // PreStop is called immediately before a container is terminated. - // The container is terminated after the handler completes. - // The reason for termination is passed to the handler. - // Regardless of the outcome of the handler, the container is eventually terminated. - // Other management of the container blocks until the hook completes. + // PreStop is called immediately before a container is terminated due to an + // API request or management event such as liveness/startup probe failure, + // preemption, resource contention, etc. The handler is not called if the + // container crashes or exits. The reason for termination is passed to the + // handler. The Pod's termination grace period countdown begins before the + // PreStop hooked is executed. Regardless of the outcome of the handler, the + // container will eventually terminate within the Pod's termination grace + // period. Other management of the container blocks until the hook completes + // or until the termination grace period is reached. // More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks // +optional optional Handler preStop = 2; @@ -1622,12 +1900,12 @@ message Lifecycle { // LimitRange sets resource usage limits for each kind of resource in a Namespace. message LimitRange { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the limits enforced. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional LimitRangeSpec spec = 2; } @@ -1662,7 +1940,7 @@ message LimitRangeItem { // LimitRangeList is a list of LimitRange items. message LimitRangeList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -1680,7 +1958,7 @@ message LimitRangeSpec { // List holds a list of objects, which may not be known by the server. message List { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -1757,25 +2035,43 @@ message NFSVolumeSource { // Use of multiple namespaces is optional. message Namespace { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the behavior of the Namespace. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional NamespaceSpec spec = 2; // Status describes the current status of a Namespace. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional NamespaceStatus status = 3; } +// NamespaceCondition contains details about state of namespace. +message NamespaceCondition { + // Type of namespace controller condition. + optional string type = 1; + + // Status of the condition, one of True, False, Unknown. + optional string status = 2; + + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4; + + // +optional + optional string reason = 5; + + // +optional + optional string message = 6; +} + // NamespaceList is a list of Namespaces. message NamespaceList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -1798,25 +2094,31 @@ message NamespaceStatus { // More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/ // +optional optional string phase = 1; + + // Represents the latest available observations of a namespace's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + repeated NamespaceCondition conditions = 2; } // Node is a worker node in Kubernetes. // Each node will have a unique identifier in the cache (i.e. in etcd). message Node { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the behavior of a node. - // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional NodeSpec spec = 2; // Most recently observed status of the node. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional NodeStatus status = 3; } @@ -1944,7 +2246,7 @@ message NodeDaemonEndpoints { // NodeList is the whole list of all Nodes which have been registered with master. message NodeList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -2012,6 +2314,13 @@ message NodeSpec { // +optional optional string podCIDR = 1; + // podCIDRs represents the IP ranges assigned to the node for usage by Pods on that node. If this + // field is specified, the 0th entry must match the podCIDR field. It may contain at most 1 value for + // each of IPv4 and IPv6. + // +optional + // +patchStrategy=merge + repeated string podCIDRs = 7; + // ID of the node assigned by the cloud provider in the format: :// // +optional optional string providerID = 3; @@ -2064,6 +2373,9 @@ message NodeStatus { // List of addresses reachable to the node. // Queried from cloud provider, if available. // More info: https://kubernetes.io/docs/concepts/nodes/node/#addresses + // Note: This field is declared as mergeable, but the merge key is not sufficiently + // unique, which can cause data corruption when it is merged. Callers should instead + // use a full-replacement patch. See http://pr.k8s.io/79391 for an example. // +optional // +patchMergeKey=type // +patchStrategy=merge @@ -2146,7 +2458,7 @@ message ObjectFieldSelector { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object message ObjectReference { // Kind of the referent. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional string kind = 1; @@ -2170,7 +2482,7 @@ message ObjectReference { optional string apiVersion = 5; // Specific resourceVersion to which this reference is made, if any. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency // +optional optional string resourceVersion = 6; @@ -2191,7 +2503,7 @@ message ObjectReference { // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes message PersistentVolume { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -2212,7 +2524,7 @@ message PersistentVolume { // PersistentVolumeClaim is a user's request for and claim to a persistent volume message PersistentVolumeClaim { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -2256,7 +2568,7 @@ message PersistentVolumeClaimCondition { // PersistentVolumeClaimList is a list of PersistentVolumeClaim items. message PersistentVolumeClaimList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -2293,7 +2605,7 @@ message PersistentVolumeClaimSpec { // volumeMode defines what type of volume is required by the claim. // Value of Filesystem is implied when not included in claim spec. - // This is an alpha feature and may change in the future. + // This is a beta feature. // +optional optional string volumeMode = 6; @@ -2350,7 +2662,7 @@ message PersistentVolumeClaimVolumeSource { // PersistentVolumeList is a list of PersistentVolume items. message PersistentVolumeList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -2384,9 +2696,9 @@ message PersistentVolumeSource { // Glusterfs represents a Glusterfs volume that is attached to a host and // exposed to the pod. Provisioned by an admin. - // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md + // More info: https://examples.k8s.io/volumes/glusterfs/README.md // +optional - optional GlusterfsVolumeSource glusterfs = 4; + optional GlusterfsPersistentVolumeSource glusterfs = 4; // NFS represents an NFS mount on the host. Provisioned by an admin. // More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs @@ -2394,7 +2706,7 @@ message PersistentVolumeSource { optional NFSVolumeSource nfs = 5; // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md + // More info: https://examples.k8s.io/volumes/rbd/README.md // +optional optional RBDPersistentVolumeSource rbd = 6; @@ -2403,8 +2715,8 @@ message PersistentVolumeSource { // +optional optional ISCSIPersistentVolumeSource iscsi = 7; - // Cinder represents a cinder volume attached and mounted on kubelets host machine - // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // Cinder represents a cinder volume attached and mounted on kubelets host machine. + // More info: https://examples.k8s.io/mysql-cinder-pd/README.md // +optional optional CinderPersistentVolumeSource cinder = 8; @@ -2457,11 +2769,11 @@ message PersistentVolumeSource { optional LocalVolumeSource local = 20; // StorageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod - // More info: https://releases.k8s.io/HEAD/examples/volumes/storageos/README.md + // More info: https://examples.k8s.io/volumes/storageos/README.md // +optional optional StorageOSPersistentVolumeSource storageos = 21; - // CSI represents storage that handled by an external CSI driver (Beta feature). + // CSI represents storage that is handled by an external CSI driver (Beta feature). // +optional optional CSIPersistentVolumeSource csi = 22; } @@ -2509,7 +2821,7 @@ message PersistentVolumeSpec { // volumeMode defines if a volume is intended to be used with a formatted filesystem // or to remain in raw block state. Value of Filesystem is implied when not included in spec. - // This is an alpha feature and may change in the future. + // This is a beta feature. // +optional optional string volumeMode = 8; @@ -2551,12 +2863,12 @@ message PhotonPersistentDiskVolumeSource { // by clients and scheduled onto hosts. message Pod { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of the pod. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional PodSpec spec = 2; @@ -2564,7 +2876,7 @@ message Pod { // This data may not be up to date. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional PodStatus status = 3; } @@ -2770,15 +3082,23 @@ message PodExecOptions { repeated string command = 6; } +// IP address information for entries in the (plural) PodIPs field. +// Each entry includes: +// IP: An IP address allocated to the pod. Routable at least within the cluster. +message PodIP { + // ip is an IP address (IPv4 or IPv6) assigned to the pod + optional string ip = 1; +} + // PodList is a list of Pods. message PodList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of pods. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md repeated Pod items = 2; } @@ -2825,6 +3145,15 @@ message PodLogOptions { // slightly more or slightly less than the specified limit. // +optional optional int64 limitBytes = 8; + + // insecureSkipTLSVerifyBackend indicates that the apiserver should not confirm the validity of the + // serving certificate of the backend it is connecting to. This will make the HTTPS connection between the apiserver + // and the backend insecure. This means the apiserver cannot verify the log data it is receiving came from the real + // kubelet. If the kubelet is configured to verify the apiserver's TLS credentials, it does not mean the + // connection to the real kubelet is vulnerable to a man in the middle attack (e.g. an attacker could not intercept + // the actual log data coming from the real kubelet). + // +optional + optional bool insecureSkipTLSVerifyBackend = 9; } // PodPortForwardOptions is the query options to a Pod's port forward call @@ -2865,6 +3194,12 @@ message PodSecurityContext { // +optional optional SELinuxOptions seLinuxOptions = 1; + // The Windows specific settings applied to all containers. + // If unspecified, the options within a container's SecurityContext will be used. + // If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + // +optional + optional WindowsSecurityContextOptions windowsOptions = 8; + // The UID to run the entrypoint of the container process. // Defaults to user specified in image metadata if unspecified. // May also be set in SecurityContext. If set in both SecurityContext and @@ -2899,11 +3234,11 @@ message PodSecurityContext { // A special supplemental group that applies to all containers in a pod. // Some volume types allow the Kubelet to change the ownership of that volume // to be owned by the pod: - // + // // 1. The owning GID will be the FSGroup // 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) // 3. The permission bits are OR'd with rw-rw---- - // + // // If unset, the Kubelet will not modify the ownership and permissions of any volume. // +optional optional int64 fsGroup = 5; @@ -2936,7 +3271,7 @@ message PodSpec { // init container fails, the pod is considered to have failed and is handled according // to its restartPolicy. The name for an init container or normal container must be // unique among all containers. - // Init containers may not have Lifecycle actions, Readiness probes, or Liveness probes. + // Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. // The resourceRequirements of an init container are taken into account during scheduling // by finding the highest request/limit for each resource type, and then using the max of // of that value or the sum of the normal containers. Limits are applied to init containers @@ -2956,6 +3291,16 @@ message PodSpec { // +patchStrategy=merge repeated Container containers = 2; + // List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing + // pod to perform user-initiated actions such as debugging. This list cannot be specified when + // creating a pod, and it cannot be modified by updating the pod spec. In order to add an + // ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. + // This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature. + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + repeated EphemeralContainer ephemeralContainers = 34; + // Restart policy for all containers within the pod. // One of Always, OnFailure, Never. // Default to Always. @@ -3039,7 +3384,6 @@ message PodSpec { // in the same pod, and the first process in each container will not be assigned PID 1. // HostPID and ShareProcessNamespace cannot both be set. // Optional: Default to false. - // This field is beta-level and may be disabled with the PodShareProcessNamespace feature. // +k8s:conversion-gen=false // +optional optional bool shareProcessNamespace = 27; @@ -3114,7 +3458,7 @@ message PodSpec { // If specified, all readiness gates will be evaluated for pod readiness. // A pod is ready when all its containers are ready AND // all conditions specified in the readiness gates have status equal to "True" - // More info: https://github.com/kubernetes/community/blob/master/keps/sig-network/0007-pod-ready%2B%2B.md + // More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md // +optional repeated PodReadinessGate readinessGates = 28; @@ -3122,10 +3466,47 @@ message PodSpec { // to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. // If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an // empty definition that uses the default runtime handler. - // More info: https://github.com/kubernetes/community/blob/master/keps/sig-node/0014-runtime-class.md - // This is an alpha feature and may change in the future. + // More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md + // This is a beta feature as of Kubernetes v1.14. // +optional optional string runtimeClassName = 29; + + // EnableServiceLinks indicates whether information about services should be injected into pod's + // environment variables, matching the syntax of Docker links. + // Optional: Defaults to true. + // +optional + optional bool enableServiceLinks = 30; + + // PreemptionPolicy is the Policy for preempting pods with lower priority. + // One of Never, PreemptLowerPriority. + // Defaults to PreemptLowerPriority if unset. + // This field is alpha-level and is only honored by servers that enable the NonPreemptingPriority feature. + // +optional + optional string preemptionPolicy = 31; + + // Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. + // This field will be autopopulated at admission time by the RuntimeClass admission controller. If + // the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. + // The RuntimeClass admission controller will reject Pod create requests which have the overhead already + // set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value + // defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. + // More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md + // This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature. + // +optional + map overhead = 32; + + // TopologySpreadConstraints describes how a group of pods ought to spread across topology + // domains. Scheduler will schedule pods in a way which abides by the constraints. + // This field is alpha-level and is only honored by clusters that enables the EvenPodsSpread + // feature. + // All topologySpreadConstraints are ANDed. + // +optional + // +patchMergeKey=topologyKey + // +patchStrategy=merge + // +listType=map + // +listMapKey=topologyKey + // +listMapKey=whenUnsatisfiable + repeated TopologySpreadConstraint topologySpreadConstraints = 33; } // PodStatus represents information about the status of a pod. Status may trail the actual @@ -3136,7 +3517,7 @@ message PodStatus { // The conditions array, the reason and message fields, and the individual container status // arrays contain more detail about the pod's status. // There are five possible phase values: - // + // // Pending: The pod has been accepted by the Kubernetes system, but one or more of the // container images has not been created. This includes time before being scheduled as // well as time spent downloading images over the network, which could take a while. @@ -3148,7 +3529,7 @@ message PodStatus { // by the system. // Unknown: For some reason the state of the pod could not be obtained, typically due to an // error in communicating with the host of the pod. - // + // // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-phase // +optional optional string phase = 1; @@ -3188,6 +3569,14 @@ message PodStatus { // +optional optional string podIP = 6; + // podIPs holds the IP addresses allocated to the pod. If this field is specified, the 0th entry must + // match the podIP field. Pods may be allocated at most 1 value for each of IPv4 and IPv6. This list + // is empty if no IPs have been allocated yet. + // +optional + // +patchStrategy=merge + // +patchMergeKey=ip + repeated PodIP podIPs = 12; + // RFC 3339 date and time at which the object was acknowledged by the Kubelet. // This is before the Kubelet pulled the container image(s) for the pod. // +optional @@ -3210,12 +3599,17 @@ message PodStatus { // More info: https://git.k8s.io/community/contributors/design-proposals/node/resource-qos.md // +optional optional string qosClass = 9; + + // Status for any ephemeral containers that have run in this pod. + // This field is alpha-level and is only populated by servers that enable the EphemeralContainers feature. + // +optional + repeated ContainerStatus ephemeralContainerStatuses = 13; } // PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded message PodStatusResult { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -3223,7 +3617,7 @@ message PodStatusResult { // This data may not be up to date. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional PodStatus status = 2; } @@ -3231,12 +3625,12 @@ message PodStatusResult { // PodTemplate describes a template for creating copies of a predefined pod. message PodTemplate { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Template defines the pods that will be created from this pod template. - // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional PodTemplateSpec template = 2; } @@ -3244,7 +3638,7 @@ message PodTemplate { // PodTemplateList is a list of PodTemplates. message PodTemplateList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -3255,12 +3649,12 @@ message PodTemplateList { // PodTemplateSpec describes the data a pod should have when created from a template message PodTemplateSpec { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of the pod. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional PodSpec spec = 2; } @@ -3340,7 +3734,7 @@ message Probe { optional int32 periodSeconds = 4; // Minimum consecutive successes for the probe to be considered successful after having failed. - // Defaults to 1. Must be 1 for liveness. Minimum value is 1. + // Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. // +optional optional int32 successThreshold = 5; @@ -3389,17 +3783,22 @@ message QuobyteVolumeSource { // Default is no group // +optional optional string group = 5; + + // Tenant owning the given Quobyte volume in the Backend + // Used with dynamically provisioned Quobyte volumes, value is set by the plugin + // +optional + optional string tenant = 6; } // Represents a Rados Block Device mount that lasts the lifetime of a pod. // RBD volumes support ownership management and SELinux relabeling. message RBDPersistentVolumeSource { // A collection of Ceph monitors. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it repeated string monitors = 1; // The rados image name. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it optional string image = 2; // Filesystem type of the volume that you want to mount. @@ -3412,32 +3811,32 @@ message RBDPersistentVolumeSource { // The rados pool name. // Default is rbd. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional optional string pool = 4; // The rados user name. // Default is admin. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional optional string user = 5; // Keyring is the path to key ring for RBDUser. // Default is /etc/ceph/keyring. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional optional string keyring = 6; // SecretRef is name of the authentication secret for RBDUser. If provided // overrides keyring. // Default is nil. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional optional SecretReference secretRef = 7; // ReadOnly here will force the ReadOnly setting in VolumeMounts. // Defaults to false. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional optional bool readOnly = 8; } @@ -3446,11 +3845,11 @@ message RBDPersistentVolumeSource { // RBD volumes support ownership management and SELinux relabeling. message RBDVolumeSource { // A collection of Ceph monitors. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it repeated string monitors = 1; // The rados image name. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it optional string image = 2; // Filesystem type of the volume that you want to mount. @@ -3463,32 +3862,32 @@ message RBDVolumeSource { // The rados pool name. // Default is rbd. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional optional string pool = 4; // The rados user name. // Default is admin. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional optional string user = 5; // Keyring is the path to key ring for RBDUser. // Default is /etc/ceph/keyring. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional optional string keyring = 6; // SecretRef is name of the authentication secret for RBDUser. If provided // overrides keyring. // Default is nil. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional optional LocalObjectReference secretRef = 7; // ReadOnly here will force the ReadOnly setting in VolumeMounts. // Defaults to false. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional optional bool readOnly = 8; } @@ -3496,7 +3895,7 @@ message RBDVolumeSource { // RangeAllocation is not a public type. message RangeAllocation { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -3511,12 +3910,12 @@ message RangeAllocation { message ReplicationController { // If the Labels of a ReplicationController are empty, they are defaulted to // be the same as the Pod(s) that the replication controller manages. - // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the specification of the desired behavior of the replication controller. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional ReplicationControllerSpec spec = 2; @@ -3524,7 +3923,7 @@ message ReplicationController { // This data may be out of date by some window of time. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional ReplicationControllerStatus status = 3; } @@ -3553,7 +3952,7 @@ message ReplicationControllerCondition { // ReplicationControllerList is a collection of replication controllers. message ReplicationControllerList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -3639,17 +4038,17 @@ message ResourceFieldSelector { // ResourceQuota sets aggregate quota restrictions enforced per namespace message ResourceQuota { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the desired quota. - // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional ResourceQuotaSpec spec = 2; // Status defines the actual enforced quota and its current usage. - // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional ResourceQuotaStatus status = 3; } @@ -3657,7 +4056,7 @@ message ResourceQuota { // ResourceQuotaList is a list of ResourceQuota items. message ResourceQuotaList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -3853,7 +4252,7 @@ message ScopedResourceSelectorRequirement { // the Data field must be less than MaxSecretSize bytes. message Secret { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -3879,7 +4278,7 @@ message Secret { // SecretEnvSource selects a Secret to populate the environment // variables with. -// +// // The contents of the target Secret's Data field will represent the // key-value pairs as environment variables. message SecretEnvSource { @@ -3899,7 +4298,7 @@ message SecretKeySelector { // The key of the secret to select from. Must be a valid secret key. optional string key = 2; - // Specify whether the Secret or it's key must be defined + // Specify whether the Secret or its key must be defined // +optional optional bool optional = 3; } @@ -3907,7 +4306,7 @@ message SecretKeySelector { // SecretList is a list of Secret. message SecretList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -3917,7 +4316,7 @@ message SecretList { } // Adapts a secret into a projected volume. -// +// // The contents of the target Secret's Data field will be presented in a // projected volume as files using the keys in the Data field as the file names. // Note that this is identical to a secret volume source without the default @@ -3953,7 +4352,7 @@ message SecretReference { } // Adapts a Secret into a volume. -// +// // The contents of the target Secret's Data field will be presented in a volume // as files using the keys in the Data field as the file names. // Secret volumes support ownership management and SELinux relabeling. @@ -3981,7 +4380,7 @@ message SecretVolumeSource { // +optional optional int32 defaultMode = 3; - // Specify whether the Secret or it's keys must be defined + // Specify whether the Secret or its keys must be defined // +optional optional bool optional = 4; } @@ -4008,6 +4407,12 @@ message SecurityContext { // +optional optional SELinuxOptions seLinuxOptions = 3; + // The Windows specific settings applied to all containers. + // If unspecified, the options from the PodSecurityContext will be used. + // If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + // +optional + optional WindowsSecurityContextOptions windowsOptions = 10; + // The UID to run the entrypoint of the container process. // Defaults to user specified in image metadata if unspecified. // May also be set in PodSecurityContext. If set in both SecurityContext and @@ -4065,19 +4470,19 @@ message SerializedReference { // will answer requests sent through the proxy. message Service { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the behavior of a service. - // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional ServiceSpec spec = 2; // Most recently observed status of the service. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional ServiceStatus status = 3; } @@ -4088,7 +4493,7 @@ message Service { // * a set of secrets message ServiceAccount { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -4115,7 +4520,7 @@ message ServiceAccount { // ServiceAccountList is a list of ServiceAccount objects message ServiceAccountList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -4153,7 +4558,7 @@ message ServiceAccountTokenProjection { // ServiceList holds a list of services. message ServiceList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -4164,8 +4569,9 @@ message ServiceList { // ServicePort contains information on service's port. message ServicePort { // The name of this port within the service. This must be a DNS_LABEL. - // All ports within a ServiceSpec must have unique names. This maps to - // the 'Name' field in EndpointPort objects. + // All ports within a ServiceSpec must have unique names. When considering + // the endpoints for a Service, this must match the 'name' field in the + // EndpointPort. // Optional if only one ServicePort is defined on this service. // +optional optional string name = 1; @@ -4215,6 +4621,9 @@ message ServiceSpec { // More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies // +patchMergeKey=port // +patchStrategy=merge + // +listType=map + // +listMapKey=port + // +listMapKey=protocol repeated ServicePort ports = 1; // Route service traffic to pods with label keys and values matching this @@ -4251,7 +4660,7 @@ message ServiceSpec { // "LoadBalancer" builds on NodePort and creates an // external load-balancer (if supported in the current cloud) which routes // to the clusterIP. - // More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services---service-types + // More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types // +optional optional string type = 4; @@ -4322,6 +4731,31 @@ message ServiceSpec { // sessionAffinityConfig contains the configurations of session affinity. // +optional optional SessionAffinityConfig sessionAffinityConfig = 14; + + // ipFamily specifies whether this Service has a preference for a particular IP family (e.g. IPv4 vs. + // IPv6). If a specific IP family is requested, the clusterIP field will be allocated from that family, if it is + // available in the cluster. If no IP family is requested, the cluster's primary IP family will be used. + // Other IP fields (loadBalancerIP, loadBalancerSourceRanges, externalIPs) and controllers which + // allocate external load-balancers should use the same IP family. Endpoints for this Service will be of + // this family. This field is immutable after creation. Assigning a ServiceIPFamily not available in the + // cluster (e.g. IPv6 in IPv4 only cluster) is an error condition and will fail during clusterIP assignment. + // +optional + optional string ipFamily = 15; + + // topologyKeys is a preference-order list of topology keys which + // implementations of services should use to preferentially sort endpoints + // when accessing this Service, it can not be used at the same time as + // externalTrafficPolicy=Local. + // Topology keys must be valid label keys and at most 16 keys may be specified. + // Endpoints are chosen based on the first topology key with available backends. + // If this field is specified and all entries have no backends that match + // the topology of the client, the service has no backends for that client + // and connections should fail. + // The special value "*" may be used to mean "any topology". This catch-all + // value, if used, only makes sense as the last value in the list. + // If this is not specified or empty, no topology constraints will be applied. + // +optional + repeated string topologyKeys = 16; } // ServiceStatus represents the current status of a service. @@ -4500,6 +4934,59 @@ message TopologySelectorTerm { repeated TopologySelectorLabelRequirement matchLabelExpressions = 1; } +// TopologySpreadConstraint specifies how to spread matching pods among the given topology. +message TopologySpreadConstraint { + // MaxSkew describes the degree to which pods may be unevenly distributed. + // It's the maximum permitted difference between the number of matching pods in + // any two topology domains of a given topology type. + // For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + // labelSelector spread as 1/1/0: + // +-------+-------+-------+ + // | zone1 | zone2 | zone3 | + // +-------+-------+-------+ + // | P | P | | + // +-------+-------+-------+ + // - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 1/1/1; + // scheduling it onto zone1(zone2) would make the ActualSkew(2-0) on zone1(zone2) + // violate MaxSkew(1). + // - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + // It's a required field. Default value is 1 and 0 is not allowed. + optional int32 maxSkew = 1; + + // TopologyKey is the key of node labels. Nodes that have a label with this key + // and identical values are considered to be in the same topology. + // We consider each as a "bucket", and try to put balanced number + // of pods into each bucket. + // It's a required field. + optional string topologyKey = 2; + + // WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + // the spread constraint. + // - DoNotSchedule (default) tells the scheduler not to schedule it + // - ScheduleAnyway tells the scheduler to still schedule it + // It's considered as "Unsatisfiable" if and only if placing incoming pod on any + // topology violates "MaxSkew". + // For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + // labelSelector spread as 3/1/1: + // +-------+-------+-------+ + // | zone1 | zone2 | zone3 | + // +-------+-------+-------+ + // | P P P | P | P | + // +-------+-------+-------+ + // If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + // to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + // MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + // won't make it *more* imbalanced. + // It's a required field. + optional string whenUnsatisfiable = 3; + + // LabelSelector is used to find matching pods. + // Pods that match this label selector are counted to determine the number of pods + // in their corresponding topology domain. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector labelSelector = 4; +} + // TypedLocalObjectReference contains enough information to let you locate the // typed referenced object inside the same namespace. message TypedLocalObjectReference { @@ -4563,6 +5050,13 @@ message VolumeMount { // This field is beta in 1.10. // +optional optional string mountPropagation = 5; + + // Expanded path within the volume from which the container's volume should be mounted. + // Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + // Defaults to "" (volume's root). + // SubPathExpr and SubPath are mutually exclusive. + // +optional + optional string subPathExpr = 6; } // VolumeNodeAffinity defines constraints that limit what nodes this volume can be accessed from. @@ -4640,12 +5134,12 @@ message VolumeSource { // ISCSI represents an ISCSI Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. - // More info: https://releases.k8s.io/HEAD/examples/volumes/iscsi/README.md + // More info: https://examples.k8s.io/volumes/iscsi/README.md // +optional optional ISCSIVolumeSource iscsi = 8; // Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. - // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md + // More info: https://examples.k8s.io/volumes/glusterfs/README.md // +optional optional GlusterfsVolumeSource glusterfs = 9; @@ -4656,7 +5150,7 @@ message VolumeSource { optional PersistentVolumeClaimVolumeSource persistentVolumeClaim = 10; // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md + // More info: https://examples.k8s.io/volumes/rbd/README.md // +optional optional RBDVolumeSource rbd = 11; @@ -4665,8 +5159,8 @@ message VolumeSource { // +optional optional FlexVolumeSource flexVolume = 12; - // Cinder represents a cinder volume attached and mounted on kubelets host machine - // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // Cinder represents a cinder volume attached and mounted on kubelets host machine. + // More info: https://examples.k8s.io/mysql-cinder-pd/README.md // +optional optional CinderVolumeSource cinder = 13; @@ -4723,6 +5217,10 @@ message VolumeSource { // StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. // +optional optional StorageOSVolumeSource storageos = 27; + + // CSI (Container Storage Interface) represents storage that is handled by an external CSI driver (Alpha feature). + // +optional + optional CSIVolumeSource csi = 28; } // Represents a vSphere volume resource. @@ -4755,3 +5253,26 @@ message WeightedPodAffinityTerm { optional PodAffinityTerm podAffinityTerm = 2; } +// WindowsSecurityContextOptions contain Windows-specific options and credentials. +message WindowsSecurityContextOptions { + // GMSACredentialSpecName is the name of the GMSA credential spec to use. + // This field is alpha-level and is only honored by servers that enable the WindowsGMSA feature flag. + // +optional + optional string gmsaCredentialSpecName = 1; + + // GMSACredentialSpec is where the GMSA admission webhook + // (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + // GMSA credential spec named by the GMSACredentialSpecName field. + // This field is alpha-level and is only honored by servers that enable the WindowsGMSA feature flag. + // +optional + optional string gmsaCredentialSpec = 2; + + // The UserName in Windows to run the entrypoint of the container process. + // Defaults to the user specified in image metadata if unspecified. + // May also be set in PodSecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence. + // This field is beta-level and may be disabled with the WindowsRunAsUserName feature flag. + // +optional + optional string runAsUserName = 3; +} + diff --git a/vendor/k8s.io/api/core/v1/register.go b/vendor/k8s.io/api/core/v1/register.go index 1aac0cb4..8da1ddea 100644 --- a/vendor/k8s.io/api/core/v1/register.go +++ b/vendor/k8s.io/api/core/v1/register.go @@ -88,6 +88,7 @@ func addKnownTypes(scheme *runtime.Scheme) error { &RangeAllocation{}, &ConfigMap{}, &ConfigMapList{}, + &EphemeralContainers{}, ) // Add common types diff --git a/vendor/k8s.io/api/core/v1/taint.go b/vendor/k8s.io/api/core/v1/taint.go index 7b606a30..db71bd2f 100644 --- a/vendor/k8s.io/api/core/v1/taint.go +++ b/vendor/k8s.io/api/core/v1/taint.go @@ -24,8 +24,14 @@ func (t *Taint) MatchTaint(taintToMatch *Taint) bool { return t.Key == taintToMatch.Key && t.Effect == taintToMatch.Effect } -// taint.ToString() converts taint struct to string in format key=value:effect or key:effect. +// taint.ToString() converts taint struct to string in format '=:', '=:', ':', or ''. func (t *Taint) ToString() string { + if len(t.Effect) == 0 { + if len(t.Value) == 0 { + return fmt.Sprintf("%v", t.Key) + } + return fmt.Sprintf("%v=%v:", t.Key, t.Value) + } if len(t.Value) == 0 { return fmt.Sprintf("%v:%v", t.Key, t.Effect) } diff --git a/vendor/k8s.io/api/core/v1/types.go b/vendor/k8s.io/api/core/v1/types.go index 1ed93772..47a40271 100644 --- a/vendor/k8s.io/api/core/v1/types.go +++ b/vendor/k8s.io/api/core/v1/types.go @@ -30,6 +30,8 @@ const ( NamespaceAll string = "" // NamespaceNodeLease is the namespace where we place node lease objects (used for node heartbeats) NamespaceNodeLease string = "kube-node-lease" + // TopologyKeyAny is the service topology key that matches any node + TopologyKeyAny string = "*" ) // Volume represents a named volume in a pod that may be accessed by any container in the pod. @@ -87,11 +89,11 @@ type VolumeSource struct { NFS *NFSVolumeSource `json:"nfs,omitempty" protobuf:"bytes,7,opt,name=nfs"` // ISCSI represents an ISCSI Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. - // More info: https://releases.k8s.io/HEAD/examples/volumes/iscsi/README.md + // More info: https://examples.k8s.io/volumes/iscsi/README.md // +optional ISCSI *ISCSIVolumeSource `json:"iscsi,omitempty" protobuf:"bytes,8,opt,name=iscsi"` // Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. - // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md + // More info: https://examples.k8s.io/volumes/glusterfs/README.md // +optional Glusterfs *GlusterfsVolumeSource `json:"glusterfs,omitempty" protobuf:"bytes,9,opt,name=glusterfs"` // PersistentVolumeClaimVolumeSource represents a reference to a @@ -100,15 +102,15 @@ type VolumeSource struct { // +optional PersistentVolumeClaim *PersistentVolumeClaimVolumeSource `json:"persistentVolumeClaim,omitempty" protobuf:"bytes,10,opt,name=persistentVolumeClaim"` // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md + // More info: https://examples.k8s.io/volumes/rbd/README.md // +optional RBD *RBDVolumeSource `json:"rbd,omitempty" protobuf:"bytes,11,opt,name=rbd"` // FlexVolume represents a generic volume resource that is // provisioned/attached using an exec based plugin. // +optional FlexVolume *FlexVolumeSource `json:"flexVolume,omitempty" protobuf:"bytes,12,opt,name=flexVolume"` - // Cinder represents a cinder volume attached and mounted on kubelets host machine - // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // Cinder represents a cinder volume attached and mounted on kubelets host machine. + // More info: https://examples.k8s.io/mysql-cinder-pd/README.md // +optional Cinder *CinderVolumeSource `json:"cinder,omitempty" protobuf:"bytes,13,opt,name=cinder"` // CephFS represents a Ceph FS mount on the host that shares a pod's lifetime @@ -151,6 +153,9 @@ type VolumeSource struct { // StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. // +optional StorageOS *StorageOSVolumeSource `json:"storageos,omitempty" protobuf:"bytes,27,opt,name=storageos"` + // CSI (Container Storage Interface) represents storage that is handled by an external CSI driver (Alpha feature). + // +optional + CSI *CSIVolumeSource `json:"csi,omitempty" protobuf:"bytes,28,opt,name=csi"` } // PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace. @@ -189,23 +194,23 @@ type PersistentVolumeSource struct { HostPath *HostPathVolumeSource `json:"hostPath,omitempty" protobuf:"bytes,3,opt,name=hostPath"` // Glusterfs represents a Glusterfs volume that is attached to a host and // exposed to the pod. Provisioned by an admin. - // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md + // More info: https://examples.k8s.io/volumes/glusterfs/README.md // +optional - Glusterfs *GlusterfsVolumeSource `json:"glusterfs,omitempty" protobuf:"bytes,4,opt,name=glusterfs"` + Glusterfs *GlusterfsPersistentVolumeSource `json:"glusterfs,omitempty" protobuf:"bytes,4,opt,name=glusterfs"` // NFS represents an NFS mount on the host. Provisioned by an admin. // More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs // +optional NFS *NFSVolumeSource `json:"nfs,omitempty" protobuf:"bytes,5,opt,name=nfs"` // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md + // More info: https://examples.k8s.io/volumes/rbd/README.md // +optional RBD *RBDPersistentVolumeSource `json:"rbd,omitempty" protobuf:"bytes,6,opt,name=rbd"` // ISCSI represents an ISCSI Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. Provisioned by an admin. // +optional ISCSI *ISCSIPersistentVolumeSource `json:"iscsi,omitempty" protobuf:"bytes,7,opt,name=iscsi"` - // Cinder represents a cinder volume attached and mounted on kubelets host machine - // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // Cinder represents a cinder volume attached and mounted on kubelets host machine. + // More info: https://examples.k8s.io/mysql-cinder-pd/README.md // +optional Cinder *CinderPersistentVolumeSource `json:"cinder,omitempty" protobuf:"bytes,8,opt,name=cinder"` // CephFS represents a Ceph FS mount on the host that shares a pod's lifetime @@ -245,10 +250,10 @@ type PersistentVolumeSource struct { // +optional Local *LocalVolumeSource `json:"local,omitempty" protobuf:"bytes,20,opt,name=local"` // StorageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod - // More info: https://releases.k8s.io/HEAD/examples/volumes/storageos/README.md + // More info: https://examples.k8s.io/volumes/storageos/README.md // +optional StorageOS *StorageOSPersistentVolumeSource `json:"storageos,omitempty" protobuf:"bytes,21,opt,name=storageos"` - // CSI represents storage that handled by an external CSI driver (Beta feature). + // CSI represents storage that is handled by an external CSI driver (Beta feature). // +optional CSI *CSIPersistentVolumeSource `json:"csi,omitempty" protobuf:"bytes,22,opt,name=csi"` } @@ -272,7 +277,7 @@ const ( type PersistentVolume struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -326,7 +331,7 @@ type PersistentVolumeSpec struct { MountOptions []string `json:"mountOptions,omitempty" protobuf:"bytes,7,opt,name=mountOptions"` // volumeMode defines if a volume is intended to be used with a formatted filesystem // or to remain in raw block state. Value of Filesystem is implied when not included in spec. - // This is an alpha feature and may change in the future. + // This is a beta feature. // +optional VolumeMode *PersistentVolumeMode `json:"volumeMode,omitempty" protobuf:"bytes,8,opt,name=volumeMode,casttype=PersistentVolumeMode"` // NodeAffinity defines constraints that limit what nodes this volume can be accessed from. @@ -387,7 +392,7 @@ type PersistentVolumeStatus struct { type PersistentVolumeList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // List of persistent volumes. @@ -402,7 +407,7 @@ type PersistentVolumeList struct { type PersistentVolumeClaim struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -424,7 +429,7 @@ type PersistentVolumeClaim struct { type PersistentVolumeClaimList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // A list of persistent volume claims. @@ -455,7 +460,7 @@ type PersistentVolumeClaimSpec struct { StorageClassName *string `json:"storageClassName,omitempty" protobuf:"bytes,5,opt,name=storageClassName"` // volumeMode defines what type of volume is required by the claim. // Value of Filesystem is implied when not included in claim spec. - // This is an alpha feature and may change in the future. + // This is a beta feature. // +optional VolumeMode *PersistentVolumeMode `json:"volumeMode,omitempty" protobuf:"bytes,6,opt,name=volumeMode,casttype=PersistentVolumeMode"` // This field requires the VolumeSnapshotDataSource alpha feature gate to be @@ -467,7 +472,7 @@ type PersistentVolumeClaimSpec struct { // In the future, we plan to support more data source types and the behavior // of the provisioner may change. // +optional - DataSource *TypedLocalObjectReference `json:"dataSource" protobuf:"bytes,7,opt,name=dataSource"` + DataSource *TypedLocalObjectReference `json:"dataSource,omitempty" protobuf:"bytes,7,opt,name=dataSource"` } // PersistentVolumeClaimConditionType is a valid value of PersistentVolumeClaimCondition.Type @@ -523,7 +528,7 @@ type PersistentVolumeClaimStatus struct { type PersistentVolumeAccessMode string const ( - // can be mounted read/write mode to exactly 1 host + // can be mounted in read/write mode to exactly 1 host ReadWriteOnce PersistentVolumeAccessMode = "ReadWriteOnce" // can be mounted in read-only mode to many hosts ReadOnlyMany PersistentVolumeAccessMode = "ReadOnlyMany" @@ -622,28 +627,52 @@ type EmptyDirVolumeSource struct { // Glusterfs volumes do not support ownership management or SELinux relabeling. type GlusterfsVolumeSource struct { // EndpointsName is the endpoint name that details Glusterfs topology. - // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod EndpointsName string `json:"endpoints" protobuf:"bytes,1,opt,name=endpoints"` // Path is the Glusterfs volume path. - // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod Path string `json:"path" protobuf:"bytes,2,opt,name=path"` // ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. // Defaults to false. - // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod // +optional ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"` } +// Represents a Glusterfs mount that lasts the lifetime of a pod. +// Glusterfs volumes do not support ownership management or SELinux relabeling. +type GlusterfsPersistentVolumeSource struct { + // EndpointsName is the endpoint name that details Glusterfs topology. + // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + EndpointsName string `json:"endpoints" protobuf:"bytes,1,opt,name=endpoints"` + + // Path is the Glusterfs volume path. + // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + Path string `json:"path" protobuf:"bytes,2,opt,name=path"` + + // ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. + // Defaults to false. + // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"` + + // EndpointsNamespace is the namespace that contains Glusterfs endpoint. + // If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC. + // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + // +optional + EndpointsNamespace *string `json:"endpointsNamespace,omitempty" protobuf:"bytes,4,opt,name=endpointsNamespace"` +} + // Represents a Rados Block Device mount that lasts the lifetime of a pod. // RBD volumes support ownership management and SELinux relabeling. type RBDVolumeSource struct { // A collection of Ceph monitors. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it CephMonitors []string `json:"monitors" protobuf:"bytes,1,rep,name=monitors"` // The rados image name. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it RBDImage string `json:"image" protobuf:"bytes,2,opt,name=image"` // Filesystem type of the volume that you want to mount. // Tip: Ensure that the filesystem type is supported by the host operating system. @@ -654,28 +683,28 @@ type RBDVolumeSource struct { FSType string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"` // The rados pool name. // Default is rbd. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional RBDPool string `json:"pool,omitempty" protobuf:"bytes,4,opt,name=pool"` // The rados user name. // Default is admin. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional RadosUser string `json:"user,omitempty" protobuf:"bytes,5,opt,name=user"` // Keyring is the path to key ring for RBDUser. // Default is /etc/ceph/keyring. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional Keyring string `json:"keyring,omitempty" protobuf:"bytes,6,opt,name=keyring"` // SecretRef is name of the authentication secret for RBDUser. If provided // overrides keyring. // Default is nil. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,7,opt,name=secretRef"` // ReadOnly here will force the ReadOnly setting in VolumeMounts. // Defaults to false. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,8,opt,name=readOnly"` } @@ -684,10 +713,10 @@ type RBDVolumeSource struct { // RBD volumes support ownership management and SELinux relabeling. type RBDPersistentVolumeSource struct { // A collection of Ceph monitors. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it CephMonitors []string `json:"monitors" protobuf:"bytes,1,rep,name=monitors"` // The rados image name. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it RBDImage string `json:"image" protobuf:"bytes,2,opt,name=image"` // Filesystem type of the volume that you want to mount. // Tip: Ensure that the filesystem type is supported by the host operating system. @@ -698,28 +727,28 @@ type RBDPersistentVolumeSource struct { FSType string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"` // The rados pool name. // Default is rbd. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional RBDPool string `json:"pool,omitempty" protobuf:"bytes,4,opt,name=pool"` // The rados user name. // Default is admin. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional RadosUser string `json:"user,omitempty" protobuf:"bytes,5,opt,name=user"` // Keyring is the path to key ring for RBDUser. // Default is /etc/ceph/keyring. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional Keyring string `json:"keyring,omitempty" protobuf:"bytes,6,opt,name=keyring"` // SecretRef is name of the authentication secret for RBDUser. If provided // overrides keyring. // Default is nil. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional SecretRef *SecretReference `json:"secretRef,omitempty" protobuf:"bytes,7,opt,name=secretRef"` // ReadOnly here will force the ReadOnly setting in VolumeMounts. // Defaults to false. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,8,opt,name=readOnly"` } @@ -729,18 +758,18 @@ type RBDPersistentVolumeSource struct { // The volume must also be in the same region as the kubelet. // Cinder volumes support ownership management and SELinux relabeling. type CinderVolumeSource struct { - // volume id used to identify the volume in cinder - // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // volume id used to identify the volume in cinder. + // More info: https://examples.k8s.io/mysql-cinder-pd/README.md VolumeID string `json:"volumeID" protobuf:"bytes,1,opt,name=volumeID"` // Filesystem type to mount. // Must be a filesystem type supported by the host operating system. // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // More info: https://examples.k8s.io/mysql-cinder-pd/README.md // +optional FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"` // Optional: Defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. - // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // More info: https://examples.k8s.io/mysql-cinder-pd/README.md // +optional ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"` // Optional: points to a secret object containing parameters used to connect @@ -754,18 +783,18 @@ type CinderVolumeSource struct { // The volume must also be in the same region as the kubelet. // Cinder volumes support ownership management and SELinux relabeling. type CinderPersistentVolumeSource struct { - // volume id used to identify the volume in cinder - // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // volume id used to identify the volume in cinder. + // More info: https://examples.k8s.io/mysql-cinder-pd/README.md VolumeID string `json:"volumeID" protobuf:"bytes,1,opt,name=volumeID"` // Filesystem type to mount. // Must be a filesystem type supported by the host operating system. // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // More info: https://examples.k8s.io/mysql-cinder-pd/README.md // +optional FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"` // Optional: Defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. - // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // More info: https://examples.k8s.io/mysql-cinder-pd/README.md // +optional ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"` // Optional: points to a secret object containing parameters used to connect @@ -778,26 +807,26 @@ type CinderPersistentVolumeSource struct { // Cephfs volumes do not support ownership management or SELinux relabeling. type CephFSVolumeSource struct { // Required: Monitors is a collection of Ceph monitors - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it Monitors []string `json:"monitors" protobuf:"bytes,1,rep,name=monitors"` // Optional: Used as the mounted root, rather than the full Ceph tree, default is / // +optional Path string `json:"path,omitempty" protobuf:"bytes,2,opt,name=path"` // Optional: User is the rados user name, default is admin - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it // +optional User string `json:"user,omitempty" protobuf:"bytes,3,opt,name=user"` // Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it // +optional SecretFile string `json:"secretFile,omitempty" protobuf:"bytes,4,opt,name=secretFile"` // Optional: SecretRef is reference to the authentication secret for User, default is empty. - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it // +optional SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,5,opt,name=secretRef"` // Optional: Defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it // +optional ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,6,opt,name=readOnly"` } @@ -817,26 +846,26 @@ type SecretReference struct { // Cephfs volumes do not support ownership management or SELinux relabeling. type CephFSPersistentVolumeSource struct { // Required: Monitors is a collection of Ceph monitors - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it Monitors []string `json:"monitors" protobuf:"bytes,1,rep,name=monitors"` // Optional: Used as the mounted root, rather than the full Ceph tree, default is / // +optional Path string `json:"path,omitempty" protobuf:"bytes,2,opt,name=path"` // Optional: User is the rados user name, default is admin - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it // +optional User string `json:"user,omitempty" protobuf:"bytes,3,opt,name=user"` // Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it // +optional SecretFile string `json:"secretFile,omitempty" protobuf:"bytes,4,opt,name=secretFile"` // Optional: SecretRef is reference to the authentication secret for User, default is empty. - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it // +optional SecretRef *SecretReference `json:"secretRef,omitempty" protobuf:"bytes,5,opt,name=secretRef"` // Optional: Defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it // +optional ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,6,opt,name=readOnly"` } @@ -931,6 +960,11 @@ type QuobyteVolumeSource struct { // Default is no group // +optional Group string `json:"group,omitempty" protobuf:"bytes,5,opt,name=group"` + + // Tenant owning the given Quobyte volume in the Backend + // Used with dynamically provisioned Quobyte volumes, value is set by the plugin + // +optional + Tenant string `json:"tenant,omitempty" protobuf:"bytes,6,opt,name=tenant"` } // FlexPersistentVolumeSource represents a generic persistent volume resource that is @@ -1062,7 +1096,7 @@ type SecretVolumeSource struct { // mode, like fsGroup, and the result can be other mode bits set. // +optional DefaultMode *int32 `json:"defaultMode,omitempty" protobuf:"bytes,3,opt,name=defaultMode"` - // Specify whether the Secret or it's keys must be defined + // Specify whether the Secret or its keys must be defined // +optional Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"` } @@ -1488,7 +1522,7 @@ type ConfigMapVolumeSource struct { // mode, like fsGroup, and the result can be other mode bits set. // +optional DefaultMode *int32 `json:"defaultMode,omitempty" protobuf:"varint,3,opt,name=defaultMode"` - // Specify whether the ConfigMap or it's keys must be defined + // Specify whether the ConfigMap or its keys must be defined // +optional Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"` } @@ -1515,7 +1549,7 @@ type ConfigMapProjection struct { // relative and may not contain the '..' path or start with '..'. // +optional Items []KeyToPath `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"` - // Specify whether the ConfigMap or it's keys must be defined + // Specify whether the ConfigMap or its keys must be defined // +optional Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"` } @@ -1640,7 +1674,7 @@ type CSIPersistentVolumeSource struct { // ControllerPublishSecretRef is a reference to the secret object containing // sensitive information to pass to the CSI driver to complete the CSI // ControllerPublishVolume and ControllerUnpublishVolume calls. - // This field is optional, and may be empty if no secret is required. If the + // This field is optional, and may be empty if no secret is required. If the // secret object contains more than one secret, all secrets are passed. // +optional ControllerPublishSecretRef *SecretReference `json:"controllerPublishSecretRef,omitempty" protobuf:"bytes,6,opt,name=controllerPublishSecretRef"` @@ -1648,7 +1682,7 @@ type CSIPersistentVolumeSource struct { // NodeStageSecretRef is a reference to the secret object containing sensitive // information to pass to the CSI driver to complete the CSI NodeStageVolume // and NodeStageVolume and NodeUnstageVolume calls. - // This field is optional, and may be empty if no secret is required. If the + // This field is optional, and may be empty if no secret is required. If the // secret object contains more than one secret, all secrets are passed. // +optional NodeStageSecretRef *SecretReference `json:"nodeStageSecretRef,omitempty" protobuf:"bytes,7,opt,name=nodeStageSecretRef"` @@ -1656,10 +1690,50 @@ type CSIPersistentVolumeSource struct { // NodePublishSecretRef is a reference to the secret object containing // sensitive information to pass to the CSI driver to complete the CSI // NodePublishVolume and NodeUnpublishVolume calls. - // This field is optional, and may be empty if no secret is required. If the + // This field is optional, and may be empty if no secret is required. If the // secret object contains more than one secret, all secrets are passed. // +optional NodePublishSecretRef *SecretReference `json:"nodePublishSecretRef,omitempty" protobuf:"bytes,8,opt,name=nodePublishSecretRef"` + + // ControllerExpandSecretRef is a reference to the secret object containing + // sensitive information to pass to the CSI driver to complete the CSI + // ControllerExpandVolume call. + // This is an alpha field and requires enabling ExpandCSIVolumes feature gate. + // This field is optional, and may be empty if no secret is required. If the + // secret object contains more than one secret, all secrets are passed. + // +optional + ControllerExpandSecretRef *SecretReference `json:"controllerExpandSecretRef,omitempty" protobuf:"bytes,9,opt,name=controllerExpandSecretRef"` +} + +// Represents a source location of a volume to mount, managed by an external CSI driver +type CSIVolumeSource struct { + // Driver is the name of the CSI driver that handles this volume. + // Consult with your admin for the correct name as registered in the cluster. + Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"` + + // Specifies a read-only configuration for the volume. + // Defaults to false (read/write). + // +optional + ReadOnly *bool `json:"readOnly,omitempty" protobuf:"varint,2,opt,name=readOnly"` + + // Filesystem type to mount. Ex. "ext4", "xfs", "ntfs". + // If not provided, the empty value is passed to the associated CSI driver + // which will determine the default filesystem to apply. + // +optional + FSType *string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"` + + // VolumeAttributes stores driver-specific properties that are passed to the CSI + // driver. Consult your driver's documentation for supported values. + // +optional + VolumeAttributes map[string]string `json:"volumeAttributes,omitempty" protobuf:"bytes,4,rep,name=volumeAttributes"` + + // NodePublishSecretRef is a reference to the secret object containing + // sensitive information to pass to the CSI driver to complete the CSI + // NodePublishVolume and NodeUnpublishVolume calls. + // This field is optional, and may be empty if no secret is required. If the + // secret object contains more than one secret, all secret references are passed. + // +optional + NodePublishSecretRef *LocalObjectReference `json:"nodePublishSecretRef,omitempty" protobuf:"bytes,5,opt,name=nodePublishSecretRef"` } // ContainerPort represents a network port in a single container. @@ -1708,6 +1782,12 @@ type VolumeMount struct { // This field is beta in 1.10. // +optional MountPropagation *MountPropagationMode `json:"mountPropagation,omitempty" protobuf:"bytes,5,opt,name=mountPropagation,casttype=MountPropagationMode"` + // Expanded path within the volume from which the container's volume should be mounted. + // Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + // Defaults to "" (volume's root). + // SubPathExpr and SubPath are mutually exclusive. + // +optional + SubPathExpr string `json:"subPathExpr,omitempty" protobuf:"bytes,6,opt,name=subPathExpr"` } // MountPropagationMode describes mount propagation. @@ -1768,7 +1848,7 @@ type EnvVar struct { // EnvVarSource represents a source for the value of an EnvVar. type EnvVarSource struct { // Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, - // spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP. + // spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. // +optional FieldRef *ObjectFieldSelector `json:"fieldRef,omitempty" protobuf:"bytes,1,opt,name=fieldRef"` // Selects a resource of the container: only resources limits and requests @@ -1810,7 +1890,7 @@ type ConfigMapKeySelector struct { LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"` // The key to select. Key string `json:"key" protobuf:"bytes,2,opt,name=key"` - // Specify whether the ConfigMap or it's key must be defined + // Specify whether the ConfigMap or its key must be defined // +optional Optional *bool `json:"optional,omitempty" protobuf:"varint,3,opt,name=optional"` } @@ -1821,7 +1901,7 @@ type SecretKeySelector struct { LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"` // The key of the secret to select from. Must be a valid secret key. Key string `json:"key" protobuf:"bytes,2,opt,name=key"` - // Specify whether the Secret or it's key must be defined + // Specify whether the Secret or its key must be defined // +optional Optional *bool `json:"optional,omitempty" protobuf:"varint,3,opt,name=optional"` } @@ -1946,7 +2026,7 @@ type Probe struct { // +optional PeriodSeconds int32 `json:"periodSeconds,omitempty" protobuf:"varint,4,opt,name=periodSeconds"` // Minimum consecutive successes for the probe to be considered successful after having failed. - // Defaults to 1. Must be 1 for liveness. Minimum value is 1. + // Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. // +optional SuccessThreshold int32 `json:"successThreshold,omitempty" protobuf:"varint,5,opt,name=successThreshold"` // Minimum consecutive failures for the probe to be considered failed after having succeeded. @@ -1967,6 +2047,16 @@ const ( PullIfNotPresent PullPolicy = "IfNotPresent" ) +// PreemptionPolicy describes a policy for if/when to preempt a pod. +type PreemptionPolicy string + +const ( + // PreemptLowerPriority means that pod can preempt other pods with lower priority. + PreemptLowerPriority PreemptionPolicy = "PreemptLowerPriority" + // PreemptNever means that pod never preempts other pods with lower priority. + PreemptNever PreemptionPolicy = "Never" +) + // TerminationMessagePolicy describes how termination messages are retrieved from a container. type TerminationMessagePolicy string @@ -2060,6 +2150,9 @@ type Container struct { // +optional // +patchMergeKey=containerPort // +patchStrategy=merge + // +listType=map + // +listMapKey=containerPort + // +listMapKey=protocol Ports []ContainerPort `json:"ports,omitempty" patchStrategy:"merge" patchMergeKey:"containerPort" protobuf:"bytes,6,rep,name=ports"` // List of sources to populate environment variables in the container. // The keys defined within a source must be a C_IDENTIFIER. All invalid keys @@ -2087,7 +2180,7 @@ type Container struct { // +patchStrategy=merge VolumeMounts []VolumeMount `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"mountPath" protobuf:"bytes,9,rep,name=volumeMounts"` // volumeDevices is the list of block devices to be used by the container. - // This is an alpha feature and may change in the future. + // This is a beta feature. // +patchMergeKey=devicePath // +patchStrategy=merge // +optional @@ -2104,6 +2197,16 @@ type Container struct { // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes // +optional ReadinessProbe *Probe `json:"readinessProbe,omitempty" protobuf:"bytes,11,opt,name=readinessProbe"` + // StartupProbe indicates that the Pod has successfully initialized. + // If specified, no other probes are executed until this completes successfully. + // If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + // This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + // when it might take a long time to load data or warm a cache, than during steady-state operation. + // This cannot be updated. + // This is an alpha feature enabled by the StartupProbe feature flag. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + // +optional + StartupProbe *Probe `json:"startupProbe,omitempty" protobuf:"bytes,22,opt,name=startupProbe"` // Actions that the management system should take in response to container lifecycle events. // Cannot be updated. // +optional @@ -2189,11 +2292,15 @@ type Lifecycle struct { // More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks // +optional PostStart *Handler `json:"postStart,omitempty" protobuf:"bytes,1,opt,name=postStart"` - // PreStop is called immediately before a container is terminated. - // The container is terminated after the handler completes. - // The reason for termination is passed to the handler. - // Regardless of the outcome of the handler, the container is eventually terminated. - // Other management of the container blocks until the hook completes. + // PreStop is called immediately before a container is terminated due to an + // API request or management event such as liveness/startup probe failure, + // preemption, resource contention, etc. The handler is not called if the + // container crashes or exits. The reason for termination is passed to the + // handler. The Pod's termination grace period countdown begins before the + // PreStop hooked is executed. Regardless of the outcome of the handler, the + // container will eventually terminate within the Pod's termination grace + // period. Other management of the container blocks until the hook completes + // or until the termination grace period is reached. // More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks // +optional PreStop *Handler `json:"preStop,omitempty" protobuf:"bytes,2,opt,name=preStop"` @@ -2294,6 +2401,12 @@ type ContainerStatus struct { // Container's ID in the format 'docker://'. // +optional ContainerID string `json:"containerID,omitempty" protobuf:"bytes,8,opt,name=containerID"` + // Specifies whether the container has passed its startup probe. + // Initialized as false, becomes true after startupProbe is considered successful. + // Resets to false when the container is restarted, or if kubelet loses state temporarily. + // Is always true when no startupProbe is defined. + // +optional + Started *bool `json:"started,omitempty" protobuf:"varint,9,opt,name=started"` } // PodPhase is a label for the condition of a pod at the current time. @@ -2324,18 +2437,22 @@ type PodConditionType string // These are valid conditions of pod. const ( - // PodScheduled represents status of the scheduling process for this pod. - PodScheduled PodConditionType = "PodScheduled" + // ContainersReady indicates whether all containers in the pod are ready. + ContainersReady PodConditionType = "ContainersReady" + // PodInitialized means that all init containers in the pod have started successfully. + PodInitialized PodConditionType = "Initialized" // PodReady means the pod is able to service requests and should be added to the // load balancing pools of all matching services. PodReady PodConditionType = "Ready" - // PodInitialized means that all init containers in the pod have started successfully. - PodInitialized PodConditionType = "Initialized" + // PodScheduled represents status of the scheduling process for this pod. + PodScheduled PodConditionType = "PodScheduled" +) + +// These are reasons for a pod's transition to a condition. +const ( // PodReasonUnschedulable reason in PodScheduled PodCondition means that the scheduler // can't schedule the pod right now, for example due to insufficient resources in the cluster. PodReasonUnschedulable = "Unschedulable" - // ContainersReady indicates whether all containers in the pod are ready. - ContainersReady PodConditionType = "ContainersReady" ) // PodCondition contains details for the current condition of this pod. @@ -2725,7 +2842,7 @@ type PodSpec struct { // init container fails, the pod is considered to have failed and is handled according // to its restartPolicy. The name for an init container or normal container must be // unique among all containers. - // Init containers may not have Lifecycle actions, Readiness probes, or Liveness probes. + // Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. // The resourceRequirements of an init container are taken into account during scheduling // by finding the highest request/limit for each resource type, and then using the max of // of that value or the sum of the normal containers. Limits are applied to init containers @@ -2743,6 +2860,15 @@ type PodSpec struct { // +patchMergeKey=name // +patchStrategy=merge Containers []Container `json:"containers" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=containers"` + // List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing + // pod to perform user-initiated actions such as debugging. This list cannot be specified when + // creating a pod, and it cannot be modified by updating the pod spec. In order to add an + // ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. + // This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature. + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + EphemeralContainers []EphemeralContainer `json:"ephemeralContainers,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,34,rep,name=ephemeralContainers"` // Restart policy for all containers within the pod. // One of Always, OnFailure, Never. // Default to Always. @@ -2816,7 +2942,6 @@ type PodSpec struct { // in the same pod, and the first process in each container will not be assigned PID 1. // HostPID and ShareProcessNamespace cannot both be set. // Optional: Default to false. - // This field is beta-level and may be disabled with the PodShareProcessNamespace feature. // +k8s:conversion-gen=false // +optional ShareProcessNamespace *bool `json:"shareProcessNamespace,omitempty" protobuf:"varint,27,opt,name=shareProcessNamespace"` @@ -2876,23 +3001,121 @@ type PodSpec struct { // configuration based on DNSPolicy. // +optional DNSConfig *PodDNSConfig `json:"dnsConfig,omitempty" protobuf:"bytes,26,opt,name=dnsConfig"` - // If specified, all readiness gates will be evaluated for pod readiness. // A pod is ready when all its containers are ready AND // all conditions specified in the readiness gates have status equal to "True" - // More info: https://github.com/kubernetes/community/blob/master/keps/sig-network/0007-pod-ready%2B%2B.md + // More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md // +optional ReadinessGates []PodReadinessGate `json:"readinessGates,omitempty" protobuf:"bytes,28,opt,name=readinessGates"` // RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used // to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. // If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an // empty definition that uses the default runtime handler. - // More info: https://github.com/kubernetes/community/blob/master/keps/sig-node/0014-runtime-class.md - // This is an alpha feature and may change in the future. + // More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md + // This is a beta feature as of Kubernetes v1.14. // +optional RuntimeClassName *string `json:"runtimeClassName,omitempty" protobuf:"bytes,29,opt,name=runtimeClassName"` + // EnableServiceLinks indicates whether information about services should be injected into pod's + // environment variables, matching the syntax of Docker links. + // Optional: Defaults to true. + // +optional + EnableServiceLinks *bool `json:"enableServiceLinks,omitempty" protobuf:"varint,30,opt,name=enableServiceLinks"` + // PreemptionPolicy is the Policy for preempting pods with lower priority. + // One of Never, PreemptLowerPriority. + // Defaults to PreemptLowerPriority if unset. + // This field is alpha-level and is only honored by servers that enable the NonPreemptingPriority feature. + // +optional + PreemptionPolicy *PreemptionPolicy `json:"preemptionPolicy,omitempty" protobuf:"bytes,31,opt,name=preemptionPolicy"` + // Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. + // This field will be autopopulated at admission time by the RuntimeClass admission controller. If + // the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. + // The RuntimeClass admission controller will reject Pod create requests which have the overhead already + // set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value + // defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. + // More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md + // This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature. + // +optional + Overhead ResourceList `json:"overhead,omitempty" protobuf:"bytes,32,opt,name=overhead"` + // TopologySpreadConstraints describes how a group of pods ought to spread across topology + // domains. Scheduler will schedule pods in a way which abides by the constraints. + // This field is alpha-level and is only honored by clusters that enables the EvenPodsSpread + // feature. + // All topologySpreadConstraints are ANDed. + // +optional + // +patchMergeKey=topologyKey + // +patchStrategy=merge + // +listType=map + // +listMapKey=topologyKey + // +listMapKey=whenUnsatisfiable + TopologySpreadConstraints []TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty" patchStrategy:"merge" patchMergeKey:"topologyKey" protobuf:"bytes,33,opt,name=topologySpreadConstraints"` } +type UnsatisfiableConstraintAction string + +const ( + // DoNotSchedule instructs the scheduler not to schedule the pod + // when constraints are not satisfied. + DoNotSchedule UnsatisfiableConstraintAction = "DoNotSchedule" + // ScheduleAnyway instructs the scheduler to schedule the pod + // even if constraints are not satisfied. + ScheduleAnyway UnsatisfiableConstraintAction = "ScheduleAnyway" +) + +// TopologySpreadConstraint specifies how to spread matching pods among the given topology. +type TopologySpreadConstraint struct { + // MaxSkew describes the degree to which pods may be unevenly distributed. + // It's the maximum permitted difference between the number of matching pods in + // any two topology domains of a given topology type. + // For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + // labelSelector spread as 1/1/0: + // +-------+-------+-------+ + // | zone1 | zone2 | zone3 | + // +-------+-------+-------+ + // | P | P | | + // +-------+-------+-------+ + // - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 1/1/1; + // scheduling it onto zone1(zone2) would make the ActualSkew(2-0) on zone1(zone2) + // violate MaxSkew(1). + // - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + // It's a required field. Default value is 1 and 0 is not allowed. + MaxSkew int32 `json:"maxSkew" protobuf:"varint,1,opt,name=maxSkew"` + // TopologyKey is the key of node labels. Nodes that have a label with this key + // and identical values are considered to be in the same topology. + // We consider each as a "bucket", and try to put balanced number + // of pods into each bucket. + // It's a required field. + TopologyKey string `json:"topologyKey" protobuf:"bytes,2,opt,name=topologyKey"` + // WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + // the spread constraint. + // - DoNotSchedule (default) tells the scheduler not to schedule it + // - ScheduleAnyway tells the scheduler to still schedule it + // It's considered as "Unsatisfiable" if and only if placing incoming pod on any + // topology violates "MaxSkew". + // For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + // labelSelector spread as 3/1/1: + // +-------+-------+-------+ + // | zone1 | zone2 | zone3 | + // +-------+-------+-------+ + // | P P P | P | P | + // +-------+-------+-------+ + // If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + // to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + // MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + // won't make it *more* imbalanced. + // It's a required field. + WhenUnsatisfiable UnsatisfiableConstraintAction `json:"whenUnsatisfiable" protobuf:"bytes,3,opt,name=whenUnsatisfiable,casttype=UnsatisfiableConstraintAction"` + // LabelSelector is used to find matching pods. + // Pods that match this label selector are counted to determine the number of pods + // in their corresponding topology domain. + // +optional + LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty" protobuf:"bytes,4,opt,name=labelSelector"` +} + +const ( + // The default value for enableServiceLinks attribute. + DefaultEnableServiceLinks = true +) + // HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the // pod's hosts file. type HostAlias struct { @@ -2913,6 +3136,11 @@ type PodSecurityContext struct { // takes precedence for that container. // +optional SELinuxOptions *SELinuxOptions `json:"seLinuxOptions,omitempty" protobuf:"bytes,1,opt,name=seLinuxOptions"` + // The Windows specific settings applied to all containers. + // If unspecified, the options within a container's SecurityContext will be used. + // If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + // +optional + WindowsOptions *WindowsSecurityContextOptions `json:"windowsOptions,omitempty" protobuf:"bytes,8,opt,name=windowsOptions"` // The UID to run the entrypoint of the container process. // Defaults to user specified in image metadata if unspecified. // May also be set in SecurityContext. If set in both SecurityContext and @@ -2998,6 +3226,175 @@ type PodDNSConfigOption struct { Value *string `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"` } +// IP address information for entries in the (plural) PodIPs field. +// Each entry includes: +// IP: An IP address allocated to the pod. Routable at least within the cluster. +type PodIP struct { + // ip is an IP address (IPv4 or IPv6) assigned to the pod + IP string `json:"ip,omitempty" protobuf:"bytes,1,opt,name=ip"` +} + +// EphemeralContainerCommon is a copy of all fields in Container to be inlined in +// EphemeralContainer. This separate type allows easy conversion from EphemeralContainer +// to Container and allows separate documentation for the fields of EphemeralContainer. +// When a new field is added to Container it must be added here as well. +type EphemeralContainerCommon struct { + // Name of the ephemeral container specified as a DNS_LABEL. + // This name must be unique among all containers, init containers and ephemeral containers. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // Docker image name. + // More info: https://kubernetes.io/docs/concepts/containers/images + Image string `json:"image,omitempty" protobuf:"bytes,2,opt,name=image"` + // Entrypoint array. Not executed within a shell. + // The docker image's ENTRYPOINT is used if this is not provided. + // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax + // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, + // regardless of whether the variable exists or not. + // Cannot be updated. + // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + // +optional + Command []string `json:"command,omitempty" protobuf:"bytes,3,rep,name=command"` + // Arguments to the entrypoint. + // The docker image's CMD is used if this is not provided. + // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax + // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, + // regardless of whether the variable exists or not. + // Cannot be updated. + // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + // +optional + Args []string `json:"args,omitempty" protobuf:"bytes,4,rep,name=args"` + // Container's working directory. + // If not specified, the container runtime's default will be used, which + // might be configured in the container image. + // Cannot be updated. + // +optional + WorkingDir string `json:"workingDir,omitempty" protobuf:"bytes,5,opt,name=workingDir"` + // Ports are not allowed for ephemeral containers. + Ports []ContainerPort `json:"ports,omitempty" protobuf:"bytes,6,rep,name=ports"` + // List of sources to populate environment variables in the container. + // The keys defined within a source must be a C_IDENTIFIER. All invalid keys + // will be reported as an event when the container is starting. When a key exists in multiple + // sources, the value associated with the last source will take precedence. + // Values defined by an Env with a duplicate key will take precedence. + // Cannot be updated. + // +optional + EnvFrom []EnvFromSource `json:"envFrom,omitempty" protobuf:"bytes,19,rep,name=envFrom"` + // List of environment variables to set in the container. + // Cannot be updated. + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + Env []EnvVar `json:"env,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=env"` + // Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources + // already allocated to the pod. + // +optional + Resources ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,8,opt,name=resources"` + // Pod volumes to mount into the container's filesystem. + // Cannot be updated. + // +optional + // +patchMergeKey=mountPath + // +patchStrategy=merge + VolumeMounts []VolumeMount `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"mountPath" protobuf:"bytes,9,rep,name=volumeMounts"` + // volumeDevices is the list of block devices to be used by the container. + // This is a beta feature. + // +patchMergeKey=devicePath + // +patchStrategy=merge + // +optional + VolumeDevices []VolumeDevice `json:"volumeDevices,omitempty" patchStrategy:"merge" patchMergeKey:"devicePath" protobuf:"bytes,21,rep,name=volumeDevices"` + // Probes are not allowed for ephemeral containers. + // +optional + LivenessProbe *Probe `json:"livenessProbe,omitempty" protobuf:"bytes,10,opt,name=livenessProbe"` + // Probes are not allowed for ephemeral containers. + // +optional + ReadinessProbe *Probe `json:"readinessProbe,omitempty" protobuf:"bytes,11,opt,name=readinessProbe"` + // Probes are not allowed for ephemeral containers. + // +optional + StartupProbe *Probe `json:"startupProbe,omitempty" protobuf:"bytes,22,opt,name=startupProbe"` + // Lifecycle is not allowed for ephemeral containers. + // +optional + Lifecycle *Lifecycle `json:"lifecycle,omitempty" protobuf:"bytes,12,opt,name=lifecycle"` + // Optional: Path at which the file to which the container's termination message + // will be written is mounted into the container's filesystem. + // Message written is intended to be brief final status, such as an assertion failure message. + // Will be truncated by the node if greater than 4096 bytes. The total message length across + // all containers will be limited to 12kb. + // Defaults to /dev/termination-log. + // Cannot be updated. + // +optional + TerminationMessagePath string `json:"terminationMessagePath,omitempty" protobuf:"bytes,13,opt,name=terminationMessagePath"` + // Indicate how the termination message should be populated. File will use the contents of + // terminationMessagePath to populate the container status message on both success and failure. + // FallbackToLogsOnError will use the last chunk of container log output if the termination + // message file is empty and the container exited with an error. + // The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + // Defaults to File. + // Cannot be updated. + // +optional + TerminationMessagePolicy TerminationMessagePolicy `json:"terminationMessagePolicy,omitempty" protobuf:"bytes,20,opt,name=terminationMessagePolicy,casttype=TerminationMessagePolicy"` + // Image pull policy. + // One of Always, Never, IfNotPresent. + // Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + // Cannot be updated. + // More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + // +optional + ImagePullPolicy PullPolicy `json:"imagePullPolicy,omitempty" protobuf:"bytes,14,opt,name=imagePullPolicy,casttype=PullPolicy"` + // SecurityContext is not allowed for ephemeral containers. + // +optional + SecurityContext *SecurityContext `json:"securityContext,omitempty" protobuf:"bytes,15,opt,name=securityContext"` + + // Variables for interactive containers, these have very specialized use-cases (e.g. debugging) + // and shouldn't be used for general purpose containers. + + // Whether this container should allocate a buffer for stdin in the container runtime. If this + // is not set, reads from stdin in the container will always result in EOF. + // Default is false. + // +optional + Stdin bool `json:"stdin,omitempty" protobuf:"varint,16,opt,name=stdin"` + // Whether the container runtime should close the stdin channel after it has been opened by + // a single attach. When stdin is true the stdin stream will remain open across multiple attach + // sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + // first client attaches to stdin, and then remains open and accepts data until the client disconnects, + // at which time stdin is closed and remains closed until the container is restarted. If this + // flag is false, a container processes that reads from stdin will never receive an EOF. + // Default is false + // +optional + StdinOnce bool `json:"stdinOnce,omitempty" protobuf:"varint,17,opt,name=stdinOnce"` + // Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + // Default is false. + // +optional + TTY bool `json:"tty,omitempty" protobuf:"varint,18,opt,name=tty"` +} + +// EphemeralContainerCommon converts to Container. All fields must be kept in sync between +// these two types. +var _ = Container(EphemeralContainerCommon{}) + +// An EphemeralContainer is a container that may be added temporarily to an existing pod for +// user-initiated activities such as debugging. Ephemeral containers have no resource or +// scheduling guarantees, and they will not be restarted when they exit or when a pod is +// removed or restarted. If an ephemeral container causes a pod to exceed its resource +// allocation, the pod may be evicted. +// Ephemeral containers may not be added by directly updating the pod spec. They must be added +// via the pod's ephemeralcontainers subresource, and they will appear in the pod spec +// once added. +// This is an alpha feature enabled by the EphemeralContainers feature flag. +type EphemeralContainer struct { + // Ephemeral containers have all of the fields of Container, plus additional fields + // specific to ephemeral containers. Fields in common with Container are in the + // following inlined struct so than an EphemeralContainer may easily be converted + // to a Container. + EphemeralContainerCommon `json:",inline" protobuf:"bytes,1,req"` + + // If set, the name of the container from PodSpec that this ephemeral container targets. + // The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. + // If not set then the ephemeral container is run in whatever namespaces are shared + // for the pod. Note that the container runtime must support this feature. + // +optional + TargetContainerName string `json:"targetContainerName,omitempty" protobuf:"bytes,2,opt,name=targetContainerName"` +} + // PodStatus represents information about the status of a pod. Status may trail the actual // state of a system, especially if the node that hosts the pod cannot contact the control // plane. @@ -3053,6 +3450,14 @@ type PodStatus struct { // +optional PodIP string `json:"podIP,omitempty" protobuf:"bytes,6,opt,name=podIP"` + // podIPs holds the IP addresses allocated to the pod. If this field is specified, the 0th entry must + // match the podIP field. Pods may be allocated at most 1 value for each of IPv4 and IPv6. This list + // is empty if no IPs have been allocated yet. + // +optional + // +patchStrategy=merge + // +patchMergeKey=ip + PodIPs []PodIP `json:"podIPs,omitempty" protobuf:"bytes,12,rep,name=podIPs" patchStrategy:"merge" patchMergeKey:"ip"` + // RFC 3339 date and time at which the object was acknowledged by the Kubelet. // This is before the Kubelet pulled the container image(s) for the pod. // +optional @@ -3074,6 +3479,10 @@ type PodStatus struct { // More info: https://git.k8s.io/community/contributors/design-proposals/node/resource-qos.md // +optional QOSClass PodQOSClass `json:"qosClass,omitempty" protobuf:"bytes,9,rep,name=qosClass"` + // Status for any ephemeral containers that have run in this pod. + // This field is alpha-level and is only populated by servers that enable the EphemeralContainers feature. + // +optional + EphemeralContainerStatuses []ContainerStatus `json:"ephemeralContainerStatuses,omitempty" protobuf:"bytes,13,rep,name=ephemeralContainerStatuses"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -3082,19 +3491,21 @@ type PodStatus struct { type PodStatusResult struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Most recently observed status of the pod. // This data may not be up to date. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Status PodStatus `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"` } // +genclient +// +genclient:method=GetEphemeralContainers,verb=get,subresource=ephemeralcontainers,result=EphemeralContainers +// +genclient:method=UpdateEphemeralContainers,verb=update,subresource=ephemeralcontainers,input=EphemeralContainers,result=EphemeralContainers // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // Pod is a collection of containers that can run on a host. This resource is created @@ -3102,12 +3513,12 @@ type PodStatusResult struct { type Pod struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Specification of the desired behavior of the pod. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec PodSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` @@ -3115,7 +3526,7 @@ type Pod struct { // This data may not be up to date. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Status PodStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -3126,24 +3537,24 @@ type Pod struct { type PodList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // List of pods. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md Items []Pod `json:"items" protobuf:"bytes,2,rep,name=items"` } // PodTemplateSpec describes the data a pod should have when created from a template type PodTemplateSpec struct { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Specification of the desired behavior of the pod. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec PodSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` } @@ -3155,12 +3566,12 @@ type PodTemplateSpec struct { type PodTemplate struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Template defines the pods that will be created from this pod template. - // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Template PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,2,opt,name=template"` } @@ -3171,7 +3582,7 @@ type PodTemplate struct { type PodTemplateList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -3273,8 +3684,8 @@ type ReplicationControllerCondition struct { } // +genclient -// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/extensions/v1beta1.Scale -// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/extensions/v1beta1.Scale,result=k8s.io/api/extensions/v1beta1.Scale +// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/autoscaling/v1.Scale +// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // ReplicationController represents the configuration of a replication controller. @@ -3283,12 +3694,12 @@ type ReplicationController struct { // If the Labels of a ReplicationController are empty, they are defaulted to // be the same as the Pod(s) that the replication controller manages. - // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec defines the specification of the desired behavior of the replication controller. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec ReplicationControllerSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` @@ -3296,7 +3707,7 @@ type ReplicationController struct { // This data may be out of date by some window of time. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Status ReplicationControllerStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -3307,7 +3718,7 @@ type ReplicationController struct { type ReplicationControllerList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -3408,12 +3819,28 @@ type LoadBalancerIngress struct { Hostname string `json:"hostname,omitempty" protobuf:"bytes,2,opt,name=hostname"` } +// IPFamily represents the IP Family (IPv4 or IPv6). This type is used +// to express the family of an IP expressed by a type (i.e. service.Spec.IPFamily) +type IPFamily string + +const ( + // IPv4Protocol indicates that this IP is IPv4 protocol + IPv4Protocol IPFamily = "IPv4" + // IPv6Protocol indicates that this IP is IPv6 protocol + IPv6Protocol IPFamily = "IPv6" + // MaxServiceTopologyKeys is the largest number of topology keys allowed on a service + MaxServiceTopologyKeys = 16 +) + // ServiceSpec describes the attributes that a user creates on a service. type ServiceSpec struct { // The list of ports that are exposed by this service. // More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies // +patchMergeKey=port // +patchStrategy=merge + // +listType=map + // +listMapKey=port + // +listMapKey=protocol Ports []ServicePort `json:"ports,omitempty" patchStrategy:"merge" patchMergeKey:"port" protobuf:"bytes,1,rep,name=ports"` // Route service traffic to pods with label keys and values matching this @@ -3450,7 +3877,7 @@ type ServiceSpec struct { // "LoadBalancer" builds on NodePort and creates an // external load-balancer (if supported in the current cloud) which routes // to the clusterIP. - // More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services---service-types + // More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types // +optional Type ServiceType `json:"type,omitempty" protobuf:"bytes,4,opt,name=type,casttype=ServiceType"` @@ -3517,16 +3944,43 @@ type ServiceSpec struct { // of peer discovery. // +optional PublishNotReadyAddresses bool `json:"publishNotReadyAddresses,omitempty" protobuf:"varint,13,opt,name=publishNotReadyAddresses"` + // sessionAffinityConfig contains the configurations of session affinity. // +optional SessionAffinityConfig *SessionAffinityConfig `json:"sessionAffinityConfig,omitempty" protobuf:"bytes,14,opt,name=sessionAffinityConfig"` + + // ipFamily specifies whether this Service has a preference for a particular IP family (e.g. IPv4 vs. + // IPv6). If a specific IP family is requested, the clusterIP field will be allocated from that family, if it is + // available in the cluster. If no IP family is requested, the cluster's primary IP family will be used. + // Other IP fields (loadBalancerIP, loadBalancerSourceRanges, externalIPs) and controllers which + // allocate external load-balancers should use the same IP family. Endpoints for this Service will be of + // this family. This field is immutable after creation. Assigning a ServiceIPFamily not available in the + // cluster (e.g. IPv6 in IPv4 only cluster) is an error condition and will fail during clusterIP assignment. + // +optional + IPFamily *IPFamily `json:"ipFamily,omitempty" protobuf:"bytes,15,opt,name=ipFamily,Configcasttype=IPFamily"` + + // topologyKeys is a preference-order list of topology keys which + // implementations of services should use to preferentially sort endpoints + // when accessing this Service, it can not be used at the same time as + // externalTrafficPolicy=Local. + // Topology keys must be valid label keys and at most 16 keys may be specified. + // Endpoints are chosen based on the first topology key with available backends. + // If this field is specified and all entries have no backends that match + // the topology of the client, the service has no backends for that client + // and connections should fail. + // The special value "*" may be used to mean "any topology". This catch-all + // value, if used, only makes sense as the last value in the list. + // If this is not specified or empty, no topology constraints will be applied. + // +optional + TopologyKeys []string `json:"topologyKeys,omitempty" protobuf:"bytes,16,opt,name=topologyKeys"` } // ServicePort contains information on service's port. type ServicePort struct { // The name of this port within the service. This must be a DNS_LABEL. - // All ports within a ServiceSpec must have unique names. This maps to - // the 'Name' field in EndpointPort objects. + // All ports within a ServiceSpec must have unique names. When considering + // the endpoints for a Service, this must match the 'name' field in the + // EndpointPort. // Optional if only one ServicePort is defined on this service. // +optional Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` @@ -3569,19 +4023,19 @@ type ServicePort struct { type Service struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec defines the behavior of a service. - // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec ServiceSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` // Most recently observed status of the service. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Status ServiceStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -3598,7 +4052,7 @@ const ( type ServiceList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -3616,7 +4070,7 @@ type ServiceList struct { type ServiceAccount struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -3646,7 +4100,7 @@ type ServiceAccount struct { type ServiceAccountList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -3673,7 +4127,7 @@ type ServiceAccountList struct { type Endpoints struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -3735,7 +4189,8 @@ type EndpointAddress struct { // EndpointPort is a tuple that describes a single port. type EndpointPort struct { - // The name of this port (corresponds to ServicePort.Name). + // The name of this port. This must match the 'name' field in the + // corresponding ServicePort. // Must be a DNS_LABEL. // Optional only if one port is defined. // +optional @@ -3757,7 +4212,7 @@ type EndpointPort struct { type EndpointsList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -3770,6 +4225,14 @@ type NodeSpec struct { // PodCIDR represents the pod IP range assigned to the node. // +optional PodCIDR string `json:"podCIDR,omitempty" protobuf:"bytes,1,opt,name=podCIDR"` + + // podCIDRs represents the IP ranges assigned to the node for usage by Pods on that node. If this + // field is specified, the 0th entry must match the podCIDR field. It may contain at most 1 value for + // each of IPv4 and IPv6. + // +optional + // +patchStrategy=merge + PodCIDRs []string `json:"podCIDRs,omitempty" protobuf:"bytes,7,opt,name=podCIDRs" patchStrategy:"merge"` + // ID of the node assigned by the cloud provider in the format: :// // +optional ProviderID string `json:"providerID,omitempty" protobuf:"bytes,3,opt,name=providerID"` @@ -3788,7 +4251,7 @@ type NodeSpec struct { // Deprecated. Not all kubelets will set this field. Remove field after 1.13. // see: https://issues.k8s.io/61966 // +optional - DoNotUse_ExternalID string `json:"externalID,omitempty" protobuf:"bytes,2,opt,name=externalID"` + DoNotUseExternalID string `json:"externalID,omitempty" protobuf:"bytes,2,opt,name=externalID"` } // NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil. @@ -3951,6 +4414,9 @@ type NodeStatus struct { // List of addresses reachable to the node. // Queried from cloud provider, if available. // More info: https://kubernetes.io/docs/concepts/nodes/node/#addresses + // Note: This field is declared as mergeable, but the merge key is not sufficiently + // unique, which can cause data corruption when it is merged. Callers should instead + // use a full-replacement patch. See http://pr.k8s.io/79391 for an example. // +optional // +patchMergeKey=type // +patchStrategy=merge @@ -4050,9 +4516,6 @@ type NodeConditionType string const ( // NodeReady means kubelet is healthy and ready to accept pods. NodeReady NodeConditionType = "Ready" - // NodeOutOfDisk means the kubelet will not accept new pods due to insufficient free disk - // space on the node. - NodeOutOfDisk NodeConditionType = "OutOfDisk" // NodeMemoryPressure means the kubelet is under pressure due to insufficient available memory. NodeMemoryPressure NodeConditionType = "MemoryPressure" // NodeDiskPressure means the kubelet is under pressure due to insufficient available disk. @@ -4143,19 +4606,19 @@ type ResourceList map[ResourceName]resource.Quantity type Node struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec defines the behavior of a node. - // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec NodeSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` // Most recently observed status of the node. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Status NodeStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -4166,7 +4629,7 @@ type Node struct { type NodeList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -4197,6 +4660,12 @@ type NamespaceStatus struct { // More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/ // +optional Phase NamespacePhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=NamespacePhase"` + + // Represents the latest available observations of a namespace's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []NamespaceCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=conditions"` } type NamespacePhase string @@ -4209,6 +4678,42 @@ const ( NamespaceTerminating NamespacePhase = "Terminating" ) +const ( + // NamespaceTerminatingCause is returned as a defaults.cause item when a change is + // forbidden due to the namespace being terminated. + NamespaceTerminatingCause metav1.CauseType = "NamespaceTerminating" +) + +type NamespaceConditionType string + +// These are valid conditions of a namespace. +const ( + // NamespaceDeletionDiscoveryFailure contains information about namespace deleter errors during resource discovery. + NamespaceDeletionDiscoveryFailure NamespaceConditionType = "NamespaceDeletionDiscoveryFailure" + // NamespaceDeletionContentFailure contains information about namespace deleter errors during deletion of resources. + NamespaceDeletionContentFailure NamespaceConditionType = "NamespaceDeletionContentFailure" + // NamespaceDeletionGVParsingFailure contains information about namespace deleter errors parsing GV for legacy types. + NamespaceDeletionGVParsingFailure NamespaceConditionType = "NamespaceDeletionGroupVersionParsingFailure" + // NamespaceContentRemaining contains information about resources remaining in a namespace. + NamespaceContentRemaining NamespaceConditionType = "NamespaceContentRemaining" + // NamespaceFinalizersRemaining contains information about which finalizers are on resources remaining in a namespace. + NamespaceFinalizersRemaining NamespaceConditionType = "NamespaceFinalizersRemaining" +) + +// NamespaceCondition contains details about state of namespace. +type NamespaceCondition struct { + // Type of namespace controller condition. + Type NamespaceConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=NamespaceConditionType"` + // Status of the condition, one of True, False, Unknown. + Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"` + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"` + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"` + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"` +} + // +genclient // +genclient:nonNamespaced // +genclient:skipVerbs=deleteCollection @@ -4219,17 +4724,17 @@ const ( type Namespace struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec defines the behavior of the Namespace. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec NamespaceSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` // Status describes the current status of a Namespace. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Status NamespaceStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -4240,7 +4745,7 @@ type Namespace struct { type NamespaceList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -4256,7 +4761,7 @@ type NamespaceList struct { type Binding struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -4264,6 +4769,22 @@ type Binding struct { Target ObjectReference `json:"target" protobuf:"bytes,2,opt,name=target"` } +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// A list of ephemeral containers used with the Pod ephemeralcontainers subresource. +type EphemeralContainers struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // A list of ephemeral containers associated with this pod. New ephemeral containers + // may be appended to this list, but existing ephemeral containers may not be removed + // or modified. + // +patchMergeKey=name + // +patchStrategy=merge + EphemeralContainers []EphemeralContainer `json:"ephemeralContainers" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=ephemeralContainers"` +} + // Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out. // +k8s:openapi-gen=false type Preconditions struct { @@ -4272,6 +4793,7 @@ type Preconditions struct { UID *types.UID `json:"uid,omitempty" protobuf:"bytes,1,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"` } +// +k8s:conversion-gen:explicit-from=net/url.Values // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // PodLogOptions is the query options for a Pod's logs REST call. @@ -4312,8 +4834,18 @@ type PodLogOptions struct { // slightly more or slightly less than the specified limit. // +optional LimitBytes *int64 `json:"limitBytes,omitempty" protobuf:"varint,8,opt,name=limitBytes"` + + // insecureSkipTLSVerifyBackend indicates that the apiserver should not confirm the validity of the + // serving certificate of the backend it is connecting to. This will make the HTTPS connection between the apiserver + // and the backend insecure. This means the apiserver cannot verify the log data it is receiving came from the real + // kubelet. If the kubelet is configured to verify the apiserver's TLS credentials, it does not mean the + // connection to the real kubelet is vulnerable to a man in the middle attack (e.g. an attacker could not intercept + // the actual log data coming from the real kubelet). + // +optional + InsecureSkipTLSVerifyBackend bool `json:"insecureSkipTLSVerifyBackend,omitempty" protobuf:"varint,9,opt,name=insecureSkipTLSVerifyBackend"` } +// +k8s:conversion-gen:explicit-from=net/url.Values // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // PodAttachOptions is the query options to a Pod's remote attach call. @@ -4351,6 +4883,7 @@ type PodAttachOptions struct { Container string `json:"container,omitempty" protobuf:"bytes,5,opt,name=container"` } +// +k8s:conversion-gen:explicit-from=net/url.Values // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // PodExecOptions is the query options to a Pod's remote exec call. @@ -4389,6 +4922,7 @@ type PodExecOptions struct { Command []string `json:"command" protobuf:"bytes,6,rep,name=command"` } +// +k8s:conversion-gen:explicit-from=net/url.Values // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // PodPortForwardOptions is the query options to a Pod's port forward call @@ -4406,6 +4940,7 @@ type PodPortForwardOptions struct { Ports []int32 `json:"ports,omitempty" protobuf:"varint,1,rep,name=ports"` } +// +k8s:conversion-gen:explicit-from=net/url.Values // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // PodProxyOptions is the query options to a Pod's proxy call. @@ -4417,6 +4952,7 @@ type PodProxyOptions struct { Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"` } +// +k8s:conversion-gen:explicit-from=net/url.Values // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // NodeProxyOptions is the query options to a Node's proxy call. @@ -4428,6 +4964,7 @@ type NodeProxyOptions struct { Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"` } +// +k8s:conversion-gen:explicit-from=net/url.Values // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // ServiceProxyOptions is the query options to a Service's proxy call. @@ -4447,7 +4984,7 @@ type ServiceProxyOptions struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object type ObjectReference struct { // Kind of the referent. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional Kind string `json:"kind,omitempty" protobuf:"bytes,1,opt,name=kind"` // Namespace of the referent. @@ -4466,7 +5003,7 @@ type ObjectReference struct { // +optional APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,5,opt,name=apiVersion"` // Specific resourceVersion to which this reference is made, if any. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency // +optional ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,6,opt,name=resourceVersion"` @@ -4541,7 +5078,7 @@ const ( type Event struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` // The object that this event is about. @@ -4611,6 +5148,7 @@ type EventSeries struct { // Time of the last occurrence observed LastObservedTime metav1.MicroTime `json:"lastObservedTime,omitempty" protobuf:"bytes,2,name=lastObservedTime"` // State of this Series: Ongoing or Finished + // Deprecated. Planned removal for 1.18 State EventSeriesState `json:"state,omitempty" protobuf:"bytes,3,name=state"` } @@ -4628,7 +5166,7 @@ const ( type EventList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -4688,12 +5226,12 @@ type LimitRangeSpec struct { type LimitRange struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec defines the limits enforced. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec LimitRangeSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` } @@ -4704,7 +5242,7 @@ type LimitRange struct { type LimitRangeList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -4844,17 +5382,17 @@ type ResourceQuotaStatus struct { type ResourceQuota struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec defines the desired quota. - // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec ResourceQuotaSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` // Status defines the actual enforced quota and its current usage. - // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Status ResourceQuotaStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -4865,7 +5403,7 @@ type ResourceQuota struct { type ResourceQuotaList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -4882,7 +5420,7 @@ type ResourceQuotaList struct { type Secret struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -4987,6 +5525,10 @@ const ( TLSCertKey = "tls.crt" // TLSPrivateKeyKey is the key for the private key field in a TLS secret. TLSPrivateKeyKey = "tls.key" + // SecretTypeBootstrapToken is used during the automated bootstrap process (first + // implemented by kubeadm). It stores tokens that are used to sign well known + // ConfigMaps. They are used for authn. + SecretTypeBootstrapToken SecretType = "bootstrap.kubernetes.io/token" ) // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -4995,7 +5537,7 @@ const ( type SecretList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -5011,7 +5553,7 @@ type SecretList struct { type ConfigMap struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -5040,7 +5582,7 @@ type ConfigMap struct { type ConfigMapList struct { metav1.TypeMeta `json:",inline"` - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -5082,7 +5624,7 @@ type ComponentCondition struct { type ComponentStatus struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -5099,7 +5641,7 @@ type ComponentStatus struct { type ComponentStatusList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -5173,6 +5715,11 @@ type SecurityContext struct { // PodSecurityContext, the value specified in SecurityContext takes precedence. // +optional SELinuxOptions *SELinuxOptions `json:"seLinuxOptions,omitempty" protobuf:"bytes,3,opt,name=seLinuxOptions"` + // The Windows specific settings applied to all containers. + // If unspecified, the options from the PodSecurityContext will be used. + // If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + // +optional + WindowsOptions *WindowsSecurityContextOptions `json:"windowsOptions,omitempty" protobuf:"bytes,10,opt,name=windowsOptions"` // The UID to run the entrypoint of the container process. // Defaults to user specified in image metadata if unspecified. // May also be set in PodSecurityContext. If set in both SecurityContext and @@ -5210,7 +5757,7 @@ type SecurityContext struct { // readonly paths and masked paths. // This requires the ProcMountType feature flag to be enabled. // +optional - ProcMount *ProcMountType `json:"procMount,omitEmpty" protobuf:"bytes,9,opt,name=procMount"` + ProcMount *ProcMountType `json:"procMount,omitempty" protobuf:"bytes,9,opt,name=procMount"` } type ProcMountType string @@ -5243,13 +5790,36 @@ type SELinuxOptions struct { Level string `json:"level,omitempty" protobuf:"bytes,4,opt,name=level"` } +// WindowsSecurityContextOptions contain Windows-specific options and credentials. +type WindowsSecurityContextOptions struct { + // GMSACredentialSpecName is the name of the GMSA credential spec to use. + // This field is alpha-level and is only honored by servers that enable the WindowsGMSA feature flag. + // +optional + GMSACredentialSpecName *string `json:"gmsaCredentialSpecName,omitempty" protobuf:"bytes,1,opt,name=gmsaCredentialSpecName"` + + // GMSACredentialSpec is where the GMSA admission webhook + // (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + // GMSA credential spec named by the GMSACredentialSpecName field. + // This field is alpha-level and is only honored by servers that enable the WindowsGMSA feature flag. + // +optional + GMSACredentialSpec *string `json:"gmsaCredentialSpec,omitempty" protobuf:"bytes,2,opt,name=gmsaCredentialSpec"` + + // The UserName in Windows to run the entrypoint of the container process. + // Defaults to the user specified in image metadata if unspecified. + // May also be set in PodSecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence. + // This field is beta-level and may be disabled with the WindowsRunAsUserName feature flag. + // +optional + RunAsUserName *string `json:"runAsUserName,omitempty" protobuf:"bytes,3,opt,name=runAsUserName"` +} + // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // RangeAllocation is not a public type. type RangeAllocation struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` diff --git a/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go index c781e545..441d3e10 100644 --- a/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go @@ -108,7 +108,7 @@ func (AzureFileVolumeSource) SwaggerDoc() map[string]string { var map_Binding = map[string]string{ "": "Binding ties one object to another; for example, a pod is bound to a node by a scheduler. Deprecated in 1.7, please use the bindings subresource of pods instead.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "target": "The target object that you want to bind to the standard object.", } @@ -123,15 +123,29 @@ var map_CSIPersistentVolumeSource = map[string]string{ "readOnly": "Optional: The value to pass to ControllerPublishVolumeRequest. Defaults to false (read/write).", "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\".", "volumeAttributes": "Attributes of the volume to publish.", - "controllerPublishSecretRef": "ControllerPublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI ControllerPublishVolume and ControllerUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", - "nodeStageSecretRef": "NodeStageSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodeStageVolume and NodeStageVolume and NodeUnstageVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", - "nodePublishSecretRef": "NodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", + "controllerPublishSecretRef": "ControllerPublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI ControllerPublishVolume and ControllerUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", + "nodeStageSecretRef": "NodeStageSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodeStageVolume and NodeStageVolume and NodeUnstageVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", + "nodePublishSecretRef": "NodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", + "controllerExpandSecretRef": "ControllerExpandSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI ControllerExpandVolume call. This is an alpha field and requires enabling ExpandCSIVolumes feature gate. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", } func (CSIPersistentVolumeSource) SwaggerDoc() map[string]string { return map_CSIPersistentVolumeSource } +var map_CSIVolumeSource = map[string]string{ + "": "Represents a source location of a volume to mount, managed by an external CSI driver", + "driver": "Driver is the name of the CSI driver that handles this volume. Consult with your admin for the correct name as registered in the cluster.", + "readOnly": "Specifies a read-only configuration for the volume. Defaults to false (read/write).", + "fsType": "Filesystem type to mount. Ex. \"ext4\", \"xfs\", \"ntfs\". If not provided, the empty value is passed to the associated CSI driver which will determine the default filesystem to apply.", + "volumeAttributes": "VolumeAttributes stores driver-specific properties that are passed to the CSI driver. Consult your driver's documentation for supported values.", + "nodePublishSecretRef": "NodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secret references are passed.", +} + +func (CSIVolumeSource) SwaggerDoc() map[string]string { + return map_CSIVolumeSource +} + var map_Capabilities = map[string]string{ "": "Adds and removes POSIX capabilities from running containers.", "add": "Added capabilities", @@ -144,12 +158,12 @@ func (Capabilities) SwaggerDoc() map[string]string { var map_CephFSPersistentVolumeSource = map[string]string{ "": "Represents a Ceph Filesystem mount that lasts the lifetime of a pod Cephfs volumes do not support ownership management or SELinux relabeling.", - "monitors": "Required: Monitors is a collection of Ceph monitors More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", + "monitors": "Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", "path": "Optional: Used as the mounted root, rather than the full Ceph tree, default is /", - "user": "Optional: User is the rados user name, default is admin More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", - "secretFile": "Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", - "secretRef": "Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", - "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", + "user": "Optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + "secretFile": "Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + "secretRef": "Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", } func (CephFSPersistentVolumeSource) SwaggerDoc() map[string]string { @@ -158,12 +172,12 @@ func (CephFSPersistentVolumeSource) SwaggerDoc() map[string]string { var map_CephFSVolumeSource = map[string]string{ "": "Represents a Ceph Filesystem mount that lasts the lifetime of a pod Cephfs volumes do not support ownership management or SELinux relabeling.", - "monitors": "Required: Monitors is a collection of Ceph monitors More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", + "monitors": "Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", "path": "Optional: Used as the mounted root, rather than the full Ceph tree, default is /", - "user": "Optional: User is the rados user name, default is admin More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", - "secretFile": "Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", - "secretRef": "Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", - "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", + "user": "Optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + "secretFile": "Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + "secretRef": "Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", } func (CephFSVolumeSource) SwaggerDoc() map[string]string { @@ -172,9 +186,9 @@ func (CephFSVolumeSource) SwaggerDoc() map[string]string { var map_CinderPersistentVolumeSource = map[string]string{ "": "Represents a cinder volume resource in Openstack. A Cinder volume must exist before mounting to a container. The volume must also be in the same region as the kubelet. Cinder volumes support ownership management and SELinux relabeling.", - "volumeID": "volume id used to identify the volume in cinder More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", - "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", - "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", + "volumeID": "volume id used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", + "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", + "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", "secretRef": "Optional: points to a secret object containing parameters used to connect to OpenStack.", } @@ -184,9 +198,9 @@ func (CinderPersistentVolumeSource) SwaggerDoc() map[string]string { var map_CinderVolumeSource = map[string]string{ "": "Represents a cinder volume resource in Openstack. A Cinder volume must exist before mounting to a container. The volume must also be in the same region as the kubelet. Cinder volumes support ownership management and SELinux relabeling.", - "volumeID": "volume id used to identify the volume in cinder More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", - "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", - "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", + "volumeID": "volume id used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", + "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", + "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", "secretRef": "Optional: points to a secret object containing parameters used to connect to OpenStack.", } @@ -217,7 +231,7 @@ func (ComponentCondition) SwaggerDoc() map[string]string { var map_ComponentStatus = map[string]string{ "": "ComponentStatus (and ComponentStatusList) holds the cluster validation info.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "conditions": "List of component conditions observed", } @@ -227,7 +241,7 @@ func (ComponentStatus) SwaggerDoc() map[string]string { var map_ComponentStatusList = map[string]string{ "": "Status of all the conditions for the component as a list of ComponentStatus objects.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "items": "List of ComponentStatus objects.", } @@ -237,7 +251,7 @@ func (ComponentStatusList) SwaggerDoc() map[string]string { var map_ConfigMap = map[string]string{ "": "ConfigMap holds configuration data for pods to consume.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "data": "Data contains the configuration data. Each key must consist of alphanumeric characters, '-', '_' or '.'. Values with non-UTF-8 byte sequences must use the BinaryData field. The keys stored in Data must not overlap with the keys in the BinaryData field, this is enforced during validation process.", "binaryData": "BinaryData contains the binary data. Each key must consist of alphanumeric characters, '-', '_' or '.'. BinaryData can contain byte sequences that are not in the UTF-8 range. The keys stored in BinaryData must not overlap with the ones in the Data field, this is enforced during validation process. Using this field will require 1.10+ apiserver and kubelet.", } @@ -258,7 +272,7 @@ func (ConfigMapEnvSource) SwaggerDoc() map[string]string { var map_ConfigMapKeySelector = map[string]string{ "": "Selects a key from a ConfigMap.", "key": "The key to select.", - "optional": "Specify whether the ConfigMap or it's key must be defined", + "optional": "Specify whether the ConfigMap or its key must be defined", } func (ConfigMapKeySelector) SwaggerDoc() map[string]string { @@ -267,7 +281,7 @@ func (ConfigMapKeySelector) SwaggerDoc() map[string]string { var map_ConfigMapList = map[string]string{ "": "ConfigMapList is a resource containing a list of ConfigMap objects.", - "metadata": "More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "Items is the list of ConfigMaps.", } @@ -291,7 +305,7 @@ func (ConfigMapNodeConfigSource) SwaggerDoc() map[string]string { var map_ConfigMapProjection = map[string]string{ "": "Adapts a ConfigMap into a projected volume.\n\nThe contents of the target ConfigMap's Data field will be presented in a projected volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. Note that this is identical to a configmap volume source without the default mode.", "items": "If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.", - "optional": "Specify whether the ConfigMap or it's keys must be defined", + "optional": "Specify whether the ConfigMap or its keys must be defined", } func (ConfigMapProjection) SwaggerDoc() map[string]string { @@ -302,7 +316,7 @@ var map_ConfigMapVolumeSource = map[string]string{ "": "Adapts a ConfigMap into a volume.\n\nThe contents of the target ConfigMap's Data field will be presented in a volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. ConfigMap volumes support ownership management and SELinux relabeling.", "items": "If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.", "defaultMode": "Optional: mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", - "optional": "Specify whether the ConfigMap or it's keys must be defined", + "optional": "Specify whether the ConfigMap or its keys must be defined", } func (ConfigMapVolumeSource) SwaggerDoc() map[string]string { @@ -321,9 +335,10 @@ var map_Container = map[string]string{ "env": "List of environment variables to set in the container. Cannot be updated.", "resources": "Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/", "volumeMounts": "Pod volumes to mount into the container's filesystem. Cannot be updated.", - "volumeDevices": "volumeDevices is the list of block devices to be used by the container. This is an alpha feature and may change in the future.", + "volumeDevices": "volumeDevices is the list of block devices to be used by the container. This is a beta feature.", "livenessProbe": "Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", "readinessProbe": "Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + "startupProbe": "StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. This is an alpha feature enabled by the StartupProbe feature flag. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", "lifecycle": "Actions that the management system should take in response to container lifecycle events. Cannot be updated.", "terminationMessagePath": "Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.", "terminationMessagePolicy": "Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.", @@ -416,6 +431,7 @@ var map_ContainerStatus = map[string]string{ "image": "The image the container is running. More info: https://kubernetes.io/docs/concepts/containers/images", "imageID": "ImageID of the container's image.", "containerID": "Container's ID in the format 'docker://'.", + "started": "Specifies whether the container has passed its startup probe. Initialized as false, becomes true after startupProbe is considered successful. Resets to false when the container is restarted, or if kubelet loses state temporarily. Is always true when no startupProbe is defined.", } func (ContainerStatus) SwaggerDoc() map[string]string { @@ -486,7 +502,7 @@ func (EndpointAddress) SwaggerDoc() map[string]string { var map_EndpointPort = map[string]string{ "": "EndpointPort is a tuple that describes a single port.", - "name": "The name of this port (corresponds to ServicePort.Name). Must be a DNS_LABEL. Optional only if one port is defined.", + "name": "The name of this port. This must match the 'name' field in the corresponding ServicePort. Must be a DNS_LABEL. Optional only if one port is defined.", "port": "The port number of the endpoint.", "protocol": "The IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.", } @@ -508,7 +524,7 @@ func (EndpointSubset) SwaggerDoc() map[string]string { var map_Endpoints = map[string]string{ "": "Endpoints is a collection of endpoints that implement the actual service. Example:\n Name: \"mysvc\",\n Subsets: [\n {\n Addresses: [{\"ip\": \"10.10.1.1\"}, {\"ip\": \"10.10.2.2\"}],\n Ports: [{\"name\": \"a\", \"port\": 8675}, {\"name\": \"b\", \"port\": 309}]\n },\n {\n Addresses: [{\"ip\": \"10.10.3.3\"}],\n Ports: [{\"name\": \"a\", \"port\": 93}, {\"name\": \"b\", \"port\": 76}]\n },\n ]", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "subsets": "The set of all endpoints is the union of all subsets. Addresses are placed into subsets according to the IPs they share. A single address with multiple ports, some of which are ready and some of which are not (because they come from different containers) will result in the address being displayed in different subsets for the different ports. No address will appear in both Addresses and NotReadyAddresses in the same subset. Sets of addresses and ports that comprise a service.", } @@ -518,7 +534,7 @@ func (Endpoints) SwaggerDoc() map[string]string { var map_EndpointsList = map[string]string{ "": "EndpointsList is a list of endpoints.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "items": "List of endpoints.", } @@ -550,7 +566,7 @@ func (EnvVar) SwaggerDoc() map[string]string { var map_EnvVarSource = map[string]string{ "": "EnvVarSource represents a source for the value of an EnvVar.", - "fieldRef": "Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP.", + "fieldRef": "Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.", "resourceFieldRef": "Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.", "configMapKeyRef": "Selects a key of a ConfigMap.", "secretKeyRef": "Selects a key of a secret in the pod's namespace", @@ -560,9 +576,57 @@ func (EnvVarSource) SwaggerDoc() map[string]string { return map_EnvVarSource } +var map_EphemeralContainer = map[string]string{ + "": "An EphemeralContainer is a container that may be added temporarily to an existing pod for user-initiated activities such as debugging. Ephemeral containers have no resource or scheduling guarantees, and they will not be restarted when they exit or when a pod is removed or restarted. If an ephemeral container causes a pod to exceed its resource allocation, the pod may be evicted. Ephemeral containers may not be added by directly updating the pod spec. They must be added via the pod's ephemeralcontainers subresource, and they will appear in the pod spec once added. This is an alpha feature enabled by the EphemeralContainers feature flag.", + "targetContainerName": "If set, the name of the container from PodSpec that this ephemeral container targets. The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. If not set then the ephemeral container is run in whatever namespaces are shared for the pod. Note that the container runtime must support this feature.", +} + +func (EphemeralContainer) SwaggerDoc() map[string]string { + return map_EphemeralContainer +} + +var map_EphemeralContainerCommon = map[string]string{ + "": "EphemeralContainerCommon is a copy of all fields in Container to be inlined in EphemeralContainer. This separate type allows easy conversion from EphemeralContainer to Container and allows separate documentation for the fields of EphemeralContainer. When a new field is added to Container it must be added here as well.", + "name": "Name of the ephemeral container specified as a DNS_LABEL. This name must be unique among all containers, init containers and ephemeral containers.", + "image": "Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images", + "command": "Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + "args": "Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + "workingDir": "Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.", + "ports": "Ports are not allowed for ephemeral containers.", + "envFrom": "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.", + "env": "List of environment variables to set in the container. Cannot be updated.", + "resources": "Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources already allocated to the pod.", + "volumeMounts": "Pod volumes to mount into the container's filesystem. Cannot be updated.", + "volumeDevices": "volumeDevices is the list of block devices to be used by the container. This is a beta feature.", + "livenessProbe": "Probes are not allowed for ephemeral containers.", + "readinessProbe": "Probes are not allowed for ephemeral containers.", + "startupProbe": "Probes are not allowed for ephemeral containers.", + "lifecycle": "Lifecycle is not allowed for ephemeral containers.", + "terminationMessagePath": "Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.", + "terminationMessagePolicy": "Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.", + "imagePullPolicy": "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images", + "securityContext": "SecurityContext is not allowed for ephemeral containers.", + "stdin": "Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.", + "stdinOnce": "Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false", + "tty": "Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.", +} + +func (EphemeralContainerCommon) SwaggerDoc() map[string]string { + return map_EphemeralContainerCommon +} + +var map_EphemeralContainers = map[string]string{ + "": "A list of ephemeral containers used with the Pod ephemeralcontainers subresource.", + "ephemeralContainers": "A list of ephemeral containers associated with this pod. New ephemeral containers may be appended to this list, but existing ephemeral containers may not be removed or modified.", +} + +func (EphemeralContainers) SwaggerDoc() map[string]string { + return map_EphemeralContainers +} + var map_Event = map[string]string{ "": "Event is a report of an event somewhere in the cluster.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "involvedObject": "The object that this event is about.", "reason": "This should be a short, machine understandable string that gives the reason for the transition into the object's current status.", "message": "A human-readable description of the status of this operation.", @@ -585,7 +649,7 @@ func (Event) SwaggerDoc() map[string]string { var map_EventList = map[string]string{ "": "EventList is a list of events.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "items": "List of events", } @@ -597,7 +661,7 @@ var map_EventSeries = map[string]string{ "": "EventSeries contain information on series of events, i.e. thing that was/is happening continuously for some time.", "count": "Number of occurrences in this series up to the last heartbeat time", "lastObservedTime": "Time of the last occurrence observed", - "state": "State of this Series: Ongoing or Finished", + "state": "State of this Series: Ongoing or Finished Deprecated. Planned removal for 1.18", } func (EventSeries) SwaggerDoc() map[string]string { @@ -695,11 +759,23 @@ func (GitRepoVolumeSource) SwaggerDoc() map[string]string { return map_GitRepoVolumeSource } +var map_GlusterfsPersistentVolumeSource = map[string]string{ + "": "Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling.", + "endpoints": "EndpointsName is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", + "path": "Path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", + "readOnly": "ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", + "endpointsNamespace": "EndpointsNamespace is the namespace that contains Glusterfs endpoint. If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", +} + +func (GlusterfsPersistentVolumeSource) SwaggerDoc() map[string]string { + return map_GlusterfsPersistentVolumeSource +} + var map_GlusterfsVolumeSource = map[string]string{ "": "Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling.", - "endpoints": "EndpointsName is the endpoint name that details Glusterfs topology. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod", - "path": "Path is the Glusterfs volume path. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod", - "readOnly": "ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod", + "endpoints": "EndpointsName is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", + "path": "Path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", + "readOnly": "ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", } func (GlusterfsVolumeSource) SwaggerDoc() map[string]string { @@ -812,7 +888,7 @@ func (KeyToPath) SwaggerDoc() map[string]string { var map_Lifecycle = map[string]string{ "": "Lifecycle describes actions that the management system should take in response to container lifecycle events. For the PostStart and PreStop lifecycle handlers, management of the container blocks until the action is complete, unless the container process fails, in which case the handler is aborted.", "postStart": "PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks", - "preStop": "PreStop is called immediately before a container is terminated. The container is terminated after the handler completes. The reason for termination is passed to the handler. Regardless of the outcome of the handler, the container is eventually terminated. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks", + "preStop": "PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The reason for termination is passed to the handler. The Pod's termination grace period countdown begins before the PreStop hooked is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod's termination grace period. Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks", } func (Lifecycle) SwaggerDoc() map[string]string { @@ -821,8 +897,8 @@ func (Lifecycle) SwaggerDoc() map[string]string { var map_LimitRange = map[string]string{ "": "LimitRange sets resource usage limits for each kind of resource in a Namespace.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Spec defines the limits enforced. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Spec defines the limits enforced. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (LimitRange) SwaggerDoc() map[string]string { @@ -845,7 +921,7 @@ func (LimitRangeItem) SwaggerDoc() map[string]string { var map_LimitRangeList = map[string]string{ "": "LimitRangeList is a list of LimitRange items.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "items": "Items is a list of LimitRange objects. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/", } @@ -913,18 +989,28 @@ func (NFSVolumeSource) SwaggerDoc() map[string]string { var map_Namespace = map[string]string{ "": "Namespace provides a scope for Names. Use of multiple namespaces is optional.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Spec defines the behavior of the Namespace. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - "status": "Status describes the current status of a Namespace. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Spec defines the behavior of the Namespace. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "Status describes the current status of a Namespace. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (Namespace) SwaggerDoc() map[string]string { return map_Namespace } +var map_NamespaceCondition = map[string]string{ + "": "NamespaceCondition contains details about state of namespace.", + "type": "Type of namespace controller condition.", + "status": "Status of the condition, one of True, False, Unknown.", +} + +func (NamespaceCondition) SwaggerDoc() map[string]string { + return map_NamespaceCondition +} + var map_NamespaceList = map[string]string{ "": "NamespaceList is a list of Namespaces.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "items": "Items is the list of Namespace objects in the list. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/", } @@ -942,8 +1028,9 @@ func (NamespaceSpec) SwaggerDoc() map[string]string { } var map_NamespaceStatus = map[string]string{ - "": "NamespaceStatus is information about the current status of a Namespace.", - "phase": "Phase is the current lifecycle phase of the namespace. More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/", + "": "NamespaceStatus is information about the current status of a Namespace.", + "phase": "Phase is the current lifecycle phase of the namespace. More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/", + "conditions": "Represents the latest available observations of a namespace's current state.", } func (NamespaceStatus) SwaggerDoc() map[string]string { @@ -952,9 +1039,9 @@ func (NamespaceStatus) SwaggerDoc() map[string]string { var map_Node = map[string]string{ "": "Node is a worker node in Kubernetes. Each node will have a unique identifier in the cache (i.e. in etcd).", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Spec defines the behavior of a node. https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - "status": "Most recently observed status of the node. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Spec defines the behavior of a node. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "Most recently observed status of the node. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (Node) SwaggerDoc() map[string]string { @@ -1027,7 +1114,7 @@ func (NodeDaemonEndpoints) SwaggerDoc() map[string]string { var map_NodeList = map[string]string{ "": "NodeList is the whole list of all Nodes which have been registered with master.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "items": "List of nodes", } @@ -1086,6 +1173,7 @@ func (NodeSelectorTerm) SwaggerDoc() map[string]string { var map_NodeSpec = map[string]string{ "": "NodeSpec describes the attributes that a node is created with.", "podCIDR": "PodCIDR represents the pod IP range assigned to the node.", + "podCIDRs": "podCIDRs represents the IP ranges assigned to the node for usage by Pods on that node. If this field is specified, the 0th entry must match the podCIDR field. It may contain at most 1 value for each of IPv4 and IPv6.", "providerID": "ID of the node assigned by the cloud provider in the format: ://", "unschedulable": "Unschedulable controls node schedulability of new pods. By default, node is schedulable. More info: https://kubernetes.io/docs/concepts/nodes/node/#manual-node-administration", "taints": "If specified, the node's taints.", @@ -1103,7 +1191,7 @@ var map_NodeStatus = map[string]string{ "allocatable": "Allocatable represents the resources of a node that are available for scheduling. Defaults to Capacity.", "phase": "NodePhase is the recently observed lifecycle phase of the node. More info: https://kubernetes.io/docs/concepts/nodes/node/#phase The field is never populated, and now is deprecated.", "conditions": "Conditions is an array of current observed node conditions. More info: https://kubernetes.io/docs/concepts/nodes/node/#condition", - "addresses": "List of addresses reachable to the node. Queried from cloud provider, if available. More info: https://kubernetes.io/docs/concepts/nodes/node/#addresses", + "addresses": "List of addresses reachable to the node. Queried from cloud provider, if available. More info: https://kubernetes.io/docs/concepts/nodes/node/#addresses Note: This field is declared as mergeable, but the merge key is not sufficiently unique, which can cause data corruption when it is merged. Callers should instead use a full-replacement patch. See http://pr.k8s.io/79391 for an example.", "daemonEndpoints": "Endpoints of daemons running on the Node.", "nodeInfo": "Set of ids/uuids to uniquely identify the node. More info: https://kubernetes.io/docs/concepts/nodes/node/#info", "images": "List of container images on this node", @@ -1146,12 +1234,12 @@ func (ObjectFieldSelector) SwaggerDoc() map[string]string { var map_ObjectReference = map[string]string{ "": "ObjectReference contains enough information to let you inspect or modify the referred object.", - "kind": "Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "kind": "Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "namespace": "Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/", "name": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", "uid": "UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids", "apiVersion": "API version of the referent.", - "resourceVersion": "Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency", + "resourceVersion": "Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency", "fieldPath": "If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: \"spec.containers{name}\" (where \"name\" refers to the name of the container that triggered the event) or if no container name is specified \"spec.containers[2]\" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object.", } @@ -1161,7 +1249,7 @@ func (ObjectReference) SwaggerDoc() map[string]string { var map_PersistentVolume = map[string]string{ "": "PersistentVolume (PV) is a storage resource provisioned by an administrator. It is analogous to a node. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "spec": "Spec defines a specification of a persistent volume owned by the cluster. Provisioned by an administrator. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistent-volumes", "status": "Status represents the current information/status for the persistent volume. Populated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistent-volumes", } @@ -1172,7 +1260,7 @@ func (PersistentVolume) SwaggerDoc() map[string]string { var map_PersistentVolumeClaim = map[string]string{ "": "PersistentVolumeClaim is a user's request for and claim to a persistent volume", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "spec": "Spec defines the desired characteristics of a volume requested by a pod author. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", "status": "Status represents the current information/status of a persistent volume claim. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", } @@ -1195,7 +1283,7 @@ func (PersistentVolumeClaimCondition) SwaggerDoc() map[string]string { var map_PersistentVolumeClaimList = map[string]string{ "": "PersistentVolumeClaimList is a list of PersistentVolumeClaim items.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "items": "A list of persistent volume claims. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", } @@ -1210,7 +1298,7 @@ var map_PersistentVolumeClaimSpec = map[string]string{ "resources": "Resources represents the minimum resources the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources", "volumeName": "VolumeName is the binding reference to the PersistentVolume backing this claim.", "storageClassName": "Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1", - "volumeMode": "volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec. This is an alpha feature and may change in the future.", + "volumeMode": "volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec. This is a beta feature.", "dataSource": "This field requires the VolumeSnapshotDataSource alpha feature gate to be enabled and currently VolumeSnapshot is the only supported data source. If the provisioner can support VolumeSnapshot data source, it will create a new volume and data will be restored to the volume at the same time. If the provisioner does not support VolumeSnapshot data source, volume will not be created and the failure will be reported as an event. In the future, we plan to support more data source types and the behavior of the provisioner may change.", } @@ -1242,7 +1330,7 @@ func (PersistentVolumeClaimVolumeSource) SwaggerDoc() map[string]string { var map_PersistentVolumeList = map[string]string{ "": "PersistentVolumeList is a list of PersistentVolume items.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "items": "List of persistent volumes. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes", } @@ -1255,11 +1343,11 @@ var map_PersistentVolumeSource = map[string]string{ "gcePersistentDisk": "GCEPersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", "awsElasticBlockStore": "AWSElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", "hostPath": "HostPath represents a directory on the host. Provisioned by a developer or tester. This is useful for single-node development and testing only! On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath", - "glusterfs": "Glusterfs represents a Glusterfs volume that is attached to a host and exposed to the pod. Provisioned by an admin. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md", + "glusterfs": "Glusterfs represents a Glusterfs volume that is attached to a host and exposed to the pod. Provisioned by an admin. More info: https://examples.k8s.io/volumes/glusterfs/README.md", "nfs": "NFS represents an NFS mount on the host. Provisioned by an admin. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", - "rbd": "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md", + "rbd": "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md", "iscsi": "ISCSI represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin.", - "cinder": "Cinder represents a cinder volume attached and mounted on kubelets host machine More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", + "cinder": "Cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", "cephfs": "CephFS represents a Ceph FS mount on the host that shares a pod's lifetime", "fc": "FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.", "flocker": "Flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running", @@ -1272,8 +1360,8 @@ var map_PersistentVolumeSource = map[string]string{ "portworxVolume": "PortworxVolume represents a portworx volume attached and mounted on kubelets host machine", "scaleIO": "ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.", "local": "Local represents directly-attached storage with node affinity", - "storageos": "StorageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod More info: https://releases.k8s.io/HEAD/examples/volumes/storageos/README.md", - "csi": "CSI represents storage that handled by an external CSI driver (Beta feature).", + "storageos": "StorageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod More info: https://examples.k8s.io/volumes/storageos/README.md", + "csi": "CSI represents storage that is handled by an external CSI driver (Beta feature).", } func (PersistentVolumeSource) SwaggerDoc() map[string]string { @@ -1288,7 +1376,7 @@ var map_PersistentVolumeSpec = map[string]string{ "persistentVolumeReclaimPolicy": "What happens to a persistent volume when released from its claim. Valid options are Retain (default for manually created PersistentVolumes), Delete (default for dynamically provisioned PersistentVolumes), and Recycle (deprecated). Recycle must be supported by the volume plugin underlying this PersistentVolume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#reclaiming", "storageClassName": "Name of StorageClass to which this persistent volume belongs. Empty value means that this volume does not belong to any StorageClass.", "mountOptions": "A list of mount options, e.g. [\"ro\", \"soft\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options", - "volumeMode": "volumeMode defines if a volume is intended to be used with a formatted filesystem or to remain in raw block state. Value of Filesystem is implied when not included in spec. This is an alpha feature and may change in the future.", + "volumeMode": "volumeMode defines if a volume is intended to be used with a formatted filesystem or to remain in raw block state. Value of Filesystem is implied when not included in spec. This is a beta feature.", "nodeAffinity": "NodeAffinity defines constraints that limit what nodes this volume can be accessed from. This field influences the scheduling of pods that use this volume.", } @@ -1319,9 +1407,9 @@ func (PhotonPersistentDiskVolumeSource) SwaggerDoc() map[string]string { var map_Pod = map[string]string{ "": "Pod is a collection of containers that can run on a host. This resource is created by clients and scheduled onto hosts.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Specification of the desired behavior of the pod. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - "status": "Most recently observed status of the pod. This data may not be up to date. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Specification of the desired behavior of the pod. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "Most recently observed status of the pod. This data may not be up to date. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (Pod) SwaggerDoc() map[string]string { @@ -1420,10 +1508,19 @@ func (PodExecOptions) SwaggerDoc() map[string]string { return map_PodExecOptions } +var map_PodIP = map[string]string{ + "": "IP address information for entries in the (plural) PodIPs field. Each entry includes:\n IP: An IP address allocated to the pod. Routable at least within the cluster.", + "ip": "ip is an IP address (IPv4 or IPv6) assigned to the pod", +} + +func (PodIP) SwaggerDoc() map[string]string { + return map_PodIP +} + var map_PodList = map[string]string{ "": "PodList is a list of Pods.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - "items": "List of pods. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "items": "List of pods. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md", } func (PodList) SwaggerDoc() map[string]string { @@ -1431,15 +1528,16 @@ func (PodList) SwaggerDoc() map[string]string { } var map_PodLogOptions = map[string]string{ - "": "PodLogOptions is the query options for a Pod's logs REST call.", - "container": "The container for which to stream logs. Defaults to only container if there is one container in the pod.", - "follow": "Follow the log stream of the pod. Defaults to false.", - "previous": "Return previous terminated container logs. Defaults to false.", - "sinceSeconds": "A relative time in seconds before the current time from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.", - "sinceTime": "An RFC3339 timestamp from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.", - "timestamps": "If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line of log output. Defaults to false.", - "tailLines": "If set, the number of lines from the end of the logs to show. If not specified, logs are shown from the creation of the container or sinceSeconds or sinceTime", - "limitBytes": "If set, the number of bytes to read from the server before terminating the log output. This may not display a complete final line of logging, and may return slightly more or slightly less than the specified limit.", + "": "PodLogOptions is the query options for a Pod's logs REST call.", + "container": "The container for which to stream logs. Defaults to only container if there is one container in the pod.", + "follow": "Follow the log stream of the pod. Defaults to false.", + "previous": "Return previous terminated container logs. Defaults to false.", + "sinceSeconds": "A relative time in seconds before the current time from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.", + "sinceTime": "An RFC3339 timestamp from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.", + "timestamps": "If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line of log output. Defaults to false.", + "tailLines": "If set, the number of lines from the end of the logs to show. If not specified, logs are shown from the creation of the container or sinceSeconds or sinceTime", + "limitBytes": "If set, the number of bytes to read from the server before terminating the log output. This may not display a complete final line of logging, and may return slightly more or slightly less than the specified limit.", + "insecureSkipTLSVerifyBackend": "insecureSkipTLSVerifyBackend indicates that the apiserver should not confirm the validity of the serving certificate of the backend it is connecting to. This will make the HTTPS connection between the apiserver and the backend insecure. This means the apiserver cannot verify the log data it is receiving came from the real kubelet. If the kubelet is configured to verify the apiserver's TLS credentials, it does not mean the connection to the real kubelet is vulnerable to a man in the middle attack (e.g. an attacker could not intercept the actual log data coming from the real kubelet).", } func (PodLogOptions) SwaggerDoc() map[string]string { @@ -1476,6 +1574,7 @@ func (PodReadinessGate) SwaggerDoc() map[string]string { var map_PodSecurityContext = map[string]string{ "": "PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext.", "seLinuxOptions": "The SELinux context to be applied to all containers. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.", + "windowsOptions": "The Windows specific settings applied to all containers. If unspecified, the options within a container's SecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", "runAsUser": "The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.", "runAsGroup": "The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.", "runAsNonRoot": "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", @@ -1500,8 +1599,9 @@ func (PodSignature) SwaggerDoc() map[string]string { var map_PodSpec = map[string]string{ "": "PodSpec is a description of a pod.", "volumes": "List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes", - "initContainers": "List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, or Liveness probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/", + "initContainers": "List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/", "containers": "List of containers belonging to the pod. Containers cannot currently be added or removed. There must be at least one container in a Pod. Cannot be updated.", + "ephemeralContainers": "List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.", "restartPolicy": "Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy", "terminationGracePeriodSeconds": "Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds.", "activeDeadlineSeconds": "Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer.", @@ -1514,7 +1614,7 @@ var map_PodSpec = map[string]string{ "hostNetwork": "Host networking requested for this pod. Use the host's network namespace. If this option is set, the ports that will be used must be specified. Default to false.", "hostPID": "Use the host's pid namespace. Optional: Default to false.", "hostIPC": "Use the host's ipc namespace. Optional: Default to false.", - "shareProcessNamespace": "Share a single process namespace between all of the containers in a pod. When this is set containers will be able to view and signal processes from other containers in the same pod, and the first process in each container will not be assigned PID 1. HostPID and ShareProcessNamespace cannot both be set. Optional: Default to false. This field is beta-level and may be disabled with the PodShareProcessNamespace feature.", + "shareProcessNamespace": "Share a single process namespace between all of the containers in a pod. When this is set containers will be able to view and signal processes from other containers in the same pod, and the first process in each container will not be assigned PID 1. HostPID and ShareProcessNamespace cannot both be set. Optional: Default to false.", "securityContext": "SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field.", "imagePullSecrets": "ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod", "hostname": "Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value.", @@ -1526,8 +1626,12 @@ var map_PodSpec = map[string]string{ "priorityClassName": "If specified, indicates the pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.", "priority": "The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority.", "dnsConfig": "Specifies the DNS parameters of a pod. Parameters specified here will be merged to the generated DNS configuration based on DNSPolicy.", - "readinessGates": "If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \"True\" More info: https://github.com/kubernetes/community/blob/master/keps/sig-network/0007-pod-ready%2B%2B.md", - "runtimeClassName": "RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://github.com/kubernetes/community/blob/master/keps/sig-node/0014-runtime-class.md This is an alpha feature and may change in the future.", + "readinessGates": "If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \"True\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md", + "runtimeClassName": "RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md This is a beta feature as of Kubernetes v1.14.", + "enableServiceLinks": "EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.", + "preemptionPolicy": "PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is alpha-level and is only honored by servers that enable the NonPreemptingPriority feature.", + "overhead": "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature.", + "topologySpreadConstraints": "TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. This field is alpha-level and is only honored by clusters that enables the EvenPodsSpread feature. All topologySpreadConstraints are ANDed.", } func (PodSpec) SwaggerDoc() map[string]string { @@ -1535,18 +1639,20 @@ func (PodSpec) SwaggerDoc() map[string]string { } var map_PodStatus = map[string]string{ - "": "PodStatus represents information about the status of a pod. Status may trail the actual state of a system, especially if the node that hosts the pod cannot contact the control plane.", - "phase": "The phase of a Pod is a simple, high-level summary of where the Pod is in its lifecycle. The conditions array, the reason and message fields, and the individual container status arrays contain more detail about the pod's status. There are five possible phase values:\n\nPending: The pod has been accepted by the Kubernetes system, but one or more of the container images has not been created. This includes time before being scheduled as well as time spent downloading images over the network, which could take a while. Running: The pod has been bound to a node, and all of the containers have been created. At least one container is still running, or is in the process of starting or restarting. Succeeded: All containers in the pod have terminated in success, and will not be restarted. Failed: All containers in the pod have terminated, and at least one container has terminated in failure. The container either exited with non-zero status or was terminated by the system. Unknown: For some reason the state of the pod could not be obtained, typically due to an error in communicating with the host of the pod.\n\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-phase", - "conditions": "Current service state of pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions", - "message": "A human readable message indicating details about why the pod is in this condition.", - "reason": "A brief CamelCase message indicating details about why the pod is in this state. e.g. 'Evicted'", - "nominatedNodeName": "nominatedNodeName is set only when this pod preempts other pods on the node, but it cannot be scheduled right away as preemption victims receive their graceful termination periods. This field does not guarantee that the pod will be scheduled on this node. Scheduler may decide to place the pod elsewhere if other nodes become available sooner. Scheduler may also decide to give the resources on this node to a higher priority pod that is created after preemption. As a result, this field may be different than PodSpec.nodeName when the pod is scheduled.", - "hostIP": "IP address of the host to which the pod is assigned. Empty if not yet scheduled.", - "podIP": "IP address allocated to the pod. Routable at least within the cluster. Empty if not yet allocated.", - "startTime": "RFC 3339 date and time at which the object was acknowledged by the Kubelet. This is before the Kubelet pulled the container image(s) for the pod.", - "initContainerStatuses": "The list has one entry per init container in the manifest. The most recent successful init container will have ready = true, the most recently started container will have startTime set. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status", - "containerStatuses": "The list has one entry per container in the manifest. Each entry is currently the output of `docker inspect`. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status", - "qosClass": "The Quality of Service (QOS) classification assigned to the pod based on resource requirements See PodQOSClass type for available QOS classes More info: https://git.k8s.io/community/contributors/design-proposals/node/resource-qos.md", + "": "PodStatus represents information about the status of a pod. Status may trail the actual state of a system, especially if the node that hosts the pod cannot contact the control plane.", + "phase": "The phase of a Pod is a simple, high-level summary of where the Pod is in its lifecycle. The conditions array, the reason and message fields, and the individual container status arrays contain more detail about the pod's status. There are five possible phase values:\n\nPending: The pod has been accepted by the Kubernetes system, but one or more of the container images has not been created. This includes time before being scheduled as well as time spent downloading images over the network, which could take a while. Running: The pod has been bound to a node, and all of the containers have been created. At least one container is still running, or is in the process of starting or restarting. Succeeded: All containers in the pod have terminated in success, and will not be restarted. Failed: All containers in the pod have terminated, and at least one container has terminated in failure. The container either exited with non-zero status or was terminated by the system. Unknown: For some reason the state of the pod could not be obtained, typically due to an error in communicating with the host of the pod.\n\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-phase", + "conditions": "Current service state of pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions", + "message": "A human readable message indicating details about why the pod is in this condition.", + "reason": "A brief CamelCase message indicating details about why the pod is in this state. e.g. 'Evicted'", + "nominatedNodeName": "nominatedNodeName is set only when this pod preempts other pods on the node, but it cannot be scheduled right away as preemption victims receive their graceful termination periods. This field does not guarantee that the pod will be scheduled on this node. Scheduler may decide to place the pod elsewhere if other nodes become available sooner. Scheduler may also decide to give the resources on this node to a higher priority pod that is created after preemption. As a result, this field may be different than PodSpec.nodeName when the pod is scheduled.", + "hostIP": "IP address of the host to which the pod is assigned. Empty if not yet scheduled.", + "podIP": "IP address allocated to the pod. Routable at least within the cluster. Empty if not yet allocated.", + "podIPs": "podIPs holds the IP addresses allocated to the pod. If this field is specified, the 0th entry must match the podIP field. Pods may be allocated at most 1 value for each of IPv4 and IPv6. This list is empty if no IPs have been allocated yet.", + "startTime": "RFC 3339 date and time at which the object was acknowledged by the Kubelet. This is before the Kubelet pulled the container image(s) for the pod.", + "initContainerStatuses": "The list has one entry per init container in the manifest. The most recent successful init container will have ready = true, the most recently started container will have startTime set. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status", + "containerStatuses": "The list has one entry per container in the manifest. Each entry is currently the output of `docker inspect`. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status", + "qosClass": "The Quality of Service (QOS) classification assigned to the pod based on resource requirements See PodQOSClass type for available QOS classes More info: https://git.k8s.io/community/contributors/design-proposals/node/resource-qos.md", + "ephemeralContainerStatuses": "Status for any ephemeral containers that have run in this pod. This field is alpha-level and is only populated by servers that enable the EphemeralContainers feature.", } func (PodStatus) SwaggerDoc() map[string]string { @@ -1555,8 +1661,8 @@ func (PodStatus) SwaggerDoc() map[string]string { var map_PodStatusResult = map[string]string{ "": "PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "status": "Most recently observed status of the pod. This data may not be up to date. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "status": "Most recently observed status of the pod. This data may not be up to date. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (PodStatusResult) SwaggerDoc() map[string]string { @@ -1565,8 +1671,8 @@ func (PodStatusResult) SwaggerDoc() map[string]string { var map_PodTemplate = map[string]string{ "": "PodTemplate describes a template for creating copies of a predefined pod.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "template": "Template defines the pods that will be created from this pod template. https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "template": "Template defines the pods that will be created from this pod template. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (PodTemplate) SwaggerDoc() map[string]string { @@ -1575,7 +1681,7 @@ func (PodTemplate) SwaggerDoc() map[string]string { var map_PodTemplateList = map[string]string{ "": "PodTemplateList is a list of PodTemplates.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "items": "List of pod templates", } @@ -1585,8 +1691,8 @@ func (PodTemplateList) SwaggerDoc() map[string]string { var map_PodTemplateSpec = map[string]string{ "": "PodTemplateSpec describes the data a pod should have when created from a template", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Specification of the desired behavior of the pod. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Specification of the desired behavior of the pod. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (PodTemplateSpec) SwaggerDoc() map[string]string { @@ -1636,11 +1742,11 @@ func (PreferredSchedulingTerm) SwaggerDoc() map[string]string { } var map_Probe = map[string]string{ - "": "Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic.", + "": "Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic.", "initialDelaySeconds": "Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", "timeoutSeconds": "Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", "periodSeconds": "How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.", - "successThreshold": "Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1.", + "successThreshold": "Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.", "failureThreshold": "Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.", } @@ -1665,6 +1771,7 @@ var map_QuobyteVolumeSource = map[string]string{ "readOnly": "ReadOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false.", "user": "User to map volume access to Defaults to serivceaccount user", "group": "Group to map volume access to Default is no group", + "tenant": "Tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin", } func (QuobyteVolumeSource) SwaggerDoc() map[string]string { @@ -1673,14 +1780,14 @@ func (QuobyteVolumeSource) SwaggerDoc() map[string]string { var map_RBDPersistentVolumeSource = map[string]string{ "": "Represents a Rados Block Device mount that lasts the lifetime of a pod. RBD volumes support ownership management and SELinux relabeling.", - "monitors": "A collection of Ceph monitors. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", - "image": "The rados image name. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", + "monitors": "A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "image": "The rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", "fsType": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd", - "pool": "The rados pool name. Default is rbd. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", - "user": "The rados user name. Default is admin. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", - "keyring": "Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", - "secretRef": "SecretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", - "readOnly": "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", + "pool": "The rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "user": "The rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "keyring": "Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "secretRef": "SecretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "readOnly": "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", } func (RBDPersistentVolumeSource) SwaggerDoc() map[string]string { @@ -1689,14 +1796,14 @@ func (RBDPersistentVolumeSource) SwaggerDoc() map[string]string { var map_RBDVolumeSource = map[string]string{ "": "Represents a Rados Block Device mount that lasts the lifetime of a pod. RBD volumes support ownership management and SELinux relabeling.", - "monitors": "A collection of Ceph monitors. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", - "image": "The rados image name. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", + "monitors": "A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "image": "The rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", "fsType": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd", - "pool": "The rados pool name. Default is rbd. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", - "user": "The rados user name. Default is admin. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", - "keyring": "Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", - "secretRef": "SecretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", - "readOnly": "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", + "pool": "The rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "user": "The rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "keyring": "Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "secretRef": "SecretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "readOnly": "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", } func (RBDVolumeSource) SwaggerDoc() map[string]string { @@ -1705,7 +1812,7 @@ func (RBDVolumeSource) SwaggerDoc() map[string]string { var map_RangeAllocation = map[string]string{ "": "RangeAllocation is not a public type.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "range": "Range is string that identifies the range represented by 'data'.", "data": "Data is a bit array containing all allocated addresses in the previous segment.", } @@ -1716,9 +1823,9 @@ func (RangeAllocation) SwaggerDoc() map[string]string { var map_ReplicationController = map[string]string{ "": "ReplicationController represents the configuration of a replication controller.", - "metadata": "If the Labels of a ReplicationController are empty, they are defaulted to be the same as the Pod(s) that the replication controller manages. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Spec defines the specification of the desired behavior of the replication controller. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - "status": "Status is the most recently observed status of the replication controller. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "If the Labels of a ReplicationController are empty, they are defaulted to be the same as the Pod(s) that the replication controller manages. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Spec defines the specification of the desired behavior of the replication controller. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "Status is the most recently observed status of the replication controller. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (ReplicationController) SwaggerDoc() map[string]string { @@ -1740,7 +1847,7 @@ func (ReplicationControllerCondition) SwaggerDoc() map[string]string { var map_ReplicationControllerList = map[string]string{ "": "ReplicationControllerList is a collection of replication controllers.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "items": "List of replication controllers. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller", } @@ -1787,9 +1894,9 @@ func (ResourceFieldSelector) SwaggerDoc() map[string]string { var map_ResourceQuota = map[string]string{ "": "ResourceQuota sets aggregate quota restrictions enforced per namespace", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Spec defines the desired quota. https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - "status": "Status defines the actual enforced quota and its current usage. https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Spec defines the desired quota. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "Status defines the actual enforced quota and its current usage. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (ResourceQuota) SwaggerDoc() map[string]string { @@ -1798,7 +1905,7 @@ func (ResourceQuota) SwaggerDoc() map[string]string { var map_ResourceQuotaList = map[string]string{ "": "ResourceQuotaList is a list of ResourceQuota items.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "items": "Items is a list of ResourceQuota objects. More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/", } @@ -1907,7 +2014,7 @@ func (ScopedResourceSelectorRequirement) SwaggerDoc() map[string]string { var map_Secret = map[string]string{ "": "Secret holds secret data of a certain type. The total bytes of the values in the Data field must be less than MaxSecretSize bytes.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "data": "Data contains the secret data. Each key must consist of alphanumeric characters, '-', '_' or '.'. The serialized form of the secret data is a base64 encoded string, representing the arbitrary (possibly non-string) data value here. Described in https://tools.ietf.org/html/rfc4648#section-4", "stringData": "stringData allows specifying non-binary secret data in string form. It is provided as a write-only convenience method. All keys and values are merged into the data field on write, overwriting any existing values. It is never output when reading from the API.", "type": "Used to facilitate programmatic handling of secret data.", @@ -1929,7 +2036,7 @@ func (SecretEnvSource) SwaggerDoc() map[string]string { var map_SecretKeySelector = map[string]string{ "": "SecretKeySelector selects a key of a Secret.", "key": "The key of the secret to select from. Must be a valid secret key.", - "optional": "Specify whether the Secret or it's key must be defined", + "optional": "Specify whether the Secret or its key must be defined", } func (SecretKeySelector) SwaggerDoc() map[string]string { @@ -1938,7 +2045,7 @@ func (SecretKeySelector) SwaggerDoc() map[string]string { var map_SecretList = map[string]string{ "": "SecretList is a list of Secret.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "items": "Items is a list of secret objects. More info: https://kubernetes.io/docs/concepts/configuration/secret", } @@ -1971,7 +2078,7 @@ var map_SecretVolumeSource = map[string]string{ "secretName": "Name of the secret in the pod's namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret", "items": "If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.", "defaultMode": "Optional: mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", - "optional": "Specify whether the Secret or it's keys must be defined", + "optional": "Specify whether the Secret or its keys must be defined", } func (SecretVolumeSource) SwaggerDoc() map[string]string { @@ -1983,6 +2090,7 @@ var map_SecurityContext = map[string]string{ "capabilities": "The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime.", "privileged": "Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false.", "seLinuxOptions": "The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", + "windowsOptions": "The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", "runAsUser": "The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", "runAsGroup": "The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", "runAsNonRoot": "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", @@ -2006,9 +2114,9 @@ func (SerializedReference) SwaggerDoc() map[string]string { var map_Service = map[string]string{ "": "Service is a named abstraction of software service (for example, mysql) consisting of local port (for example 3306) that the proxy listens on, and the selector that determines which pods will answer requests sent through the proxy.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Spec defines the behavior of a service. https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - "status": "Most recently observed status of the service. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Spec defines the behavior of a service. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "Most recently observed status of the service. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (Service) SwaggerDoc() map[string]string { @@ -2017,7 +2125,7 @@ func (Service) SwaggerDoc() map[string]string { var map_ServiceAccount = map[string]string{ "": "ServiceAccount binds together: * a name, understood by users, and perhaps by peripheral systems, for an identity * a principal that can be authenticated and authorized * a set of secrets", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "secrets": "Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount. More info: https://kubernetes.io/docs/concepts/configuration/secret", "imagePullSecrets": "ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. More info: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod", "automountServiceAccountToken": "AutomountServiceAccountToken indicates whether pods running as this service account should have an API token automatically mounted. Can be overridden at the pod level.", @@ -2029,7 +2137,7 @@ func (ServiceAccount) SwaggerDoc() map[string]string { var map_ServiceAccountList = map[string]string{ "": "ServiceAccountList is a list of ServiceAccount objects", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "items": "List of ServiceAccounts. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/", } @@ -2050,7 +2158,7 @@ func (ServiceAccountTokenProjection) SwaggerDoc() map[string]string { var map_ServiceList = map[string]string{ "": "ServiceList holds a list of services.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "items": "List of services", } @@ -2060,7 +2168,7 @@ func (ServiceList) SwaggerDoc() map[string]string { var map_ServicePort = map[string]string{ "": "ServicePort contains information on service's port.", - "name": "The name of this port within the service. This must be a DNS_LABEL. All ports within a ServiceSpec must have unique names. This maps to the 'Name' field in EndpointPort objects. Optional if only one ServicePort is defined on this service.", + "name": "The name of this port within the service. This must be a DNS_LABEL. All ports within a ServiceSpec must have unique names. When considering the endpoints for a Service, this must match the 'name' field in the EndpointPort. Optional if only one ServicePort is defined on this service.", "protocol": "The IP protocol for this port. Supports \"TCP\", \"UDP\", and \"SCTP\". Default is TCP.", "port": "The port that will be exposed by this service.", "targetPort": "Number or name of the port to access on the pods targeted by the service. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. If this is a string, it will be looked up as a named port in the target Pod's container ports. If this is not specified, the value of the 'port' field is used (an identity map). This field is ignored for services with clusterIP=None, and should be omitted or set equal to the 'port' field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service", @@ -2085,7 +2193,7 @@ var map_ServiceSpec = map[string]string{ "ports": "The list of ports that are exposed by this service. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies", "selector": "Route service traffic to pods with label keys and values matching this selector. If empty or not present, the service is assumed to have an external process managing its endpoints, which Kubernetes will not modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if type is ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/", "clusterIP": "clusterIP is the IP address of the service and is usually assigned randomly by the master. If an address is specified manually and is not in use by others, it will be allocated to the service; otherwise, creation of the service will fail. This field can not be changed through updates. Valid values are \"None\", empty string (\"\"), or a valid IP address. \"None\" can be specified for headless services when proxying is not required. Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if type is ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies", - "type": "type determines how the Service is exposed. Defaults to ClusterIP. Valid options are ExternalName, ClusterIP, NodePort, and LoadBalancer. \"ExternalName\" maps to the specified externalName. \"ClusterIP\" allocates a cluster-internal IP address for load-balancing to endpoints. Endpoints are determined by the selector or if that is not specified, by manual construction of an Endpoints object. If clusterIP is \"None\", no virtual IP is allocated and the endpoints are published as a set of endpoints rather than a stable IP. \"NodePort\" builds on ClusterIP and allocates a port on every node which routes to the clusterIP. \"LoadBalancer\" builds on NodePort and creates an external load-balancer (if supported in the current cloud) which routes to the clusterIP. More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services ", + "type": "type determines how the Service is exposed. Defaults to ClusterIP. Valid options are ExternalName, ClusterIP, NodePort, and LoadBalancer. \"ExternalName\" maps to the specified externalName. \"ClusterIP\" allocates a cluster-internal IP address for load-balancing to endpoints. Endpoints are determined by the selector or if that is not specified, by manual construction of an Endpoints object. If clusterIP is \"None\", no virtual IP is allocated and the endpoints are published as a set of endpoints rather than a stable IP. \"NodePort\" builds on ClusterIP and allocates a port on every node which routes to the clusterIP. \"LoadBalancer\" builds on NodePort and creates an external load-balancer (if supported in the current cloud) which routes to the clusterIP. More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types", "externalIPs": "externalIPs is a list of IP addresses for which nodes in the cluster will also accept traffic for this service. These IPs are not managed by Kubernetes. The user is responsible for ensuring that traffic arrives at a node with this IP. A common example is external load-balancers that are not part of the Kubernetes system.", "sessionAffinity": "Supports \"ClientIP\" and \"None\". Used to maintain session affinity. Enable client IP based session affinity. Must be ClientIP or None. Defaults to None. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies", "loadBalancerIP": "Only applies to Service Type: LoadBalancer LoadBalancer will get created with the IP specified in this field. This feature depends on whether the underlying cloud-provider supports specifying the loadBalancerIP when a load balancer is created. This field will be ignored if the cloud-provider does not support the feature.", @@ -2095,6 +2203,8 @@ var map_ServiceSpec = map[string]string{ "healthCheckNodePort": "healthCheckNodePort specifies the healthcheck nodePort for the service. If not specified, HealthCheckNodePort is created by the service api backend with the allocated nodePort. Will use user-specified nodePort value if specified by the client. Only effects when Type is set to LoadBalancer and ExternalTrafficPolicy is set to Local.", "publishNotReadyAddresses": "publishNotReadyAddresses, when set to true, indicates that DNS implementations must publish the notReadyAddresses of subsets for the Endpoints associated with the Service. The default value is false. The primary use case for setting this field is to use a StatefulSet's Headless Service to propagate SRV records for its Pods without respect to their readiness for purpose of peer discovery.", "sessionAffinityConfig": "sessionAffinityConfig contains the configurations of session affinity.", + "ipFamily": "ipFamily specifies whether this Service has a preference for a particular IP family (e.g. IPv4 vs. IPv6). If a specific IP family is requested, the clusterIP field will be allocated from that family, if it is available in the cluster. If no IP family is requested, the cluster's primary IP family will be used. Other IP fields (loadBalancerIP, loadBalancerSourceRanges, externalIPs) and controllers which allocate external load-balancers should use the same IP family. Endpoints for this Service will be of this family. This field is immutable after creation. Assigning a ServiceIPFamily not available in the cluster (e.g. IPv6 in IPv4 only cluster) is an error condition and will fail during clusterIP assignment.", + "topologyKeys": "topologyKeys is a preference-order list of topology keys which implementations of services should use to preferentially sort endpoints when accessing this Service, it can not be used at the same time as externalTrafficPolicy=Local. Topology keys must be valid label keys and at most 16 keys may be specified. Endpoints are chosen based on the first topology key with available backends. If this field is specified and all entries have no backends that match the topology of the client, the service has no backends for that client and connections should fail. The special value \"*\" may be used to mean \"any topology\". This catch-all value, if used, only makes sense as the last value in the list. If this is not specified or empty, no topology constraints will be applied.", } func (ServiceSpec) SwaggerDoc() map[string]string { @@ -2201,7 +2311,7 @@ func (TopologySelectorLabelRequirement) SwaggerDoc() map[string]string { } var map_TopologySelectorTerm = map[string]string{ - "": "A topology selector term represents the result of label queries. A null or empty topology selector term matches no objects. The requirements of them are ANDed. It provides a subset of functionality as NodeSelectorTerm. This is an alpha feature and may change in the future.", + "": "A topology selector term represents the result of label queries. A null or empty topology selector term matches no objects. The requirements of them are ANDed. It provides a subset of functionality as NodeSelectorTerm. This is an alpha feature and may change in the future.", "matchLabelExpressions": "A list of topology selector requirements by labels.", } @@ -2209,6 +2319,18 @@ func (TopologySelectorTerm) SwaggerDoc() map[string]string { return map_TopologySelectorTerm } +var map_TopologySpreadConstraint = map[string]string{ + "": "TopologySpreadConstraint specifies how to spread matching pods among the given topology.", + "maxSkew": "MaxSkew describes the degree to which pods may be unevenly distributed. It's the maximum permitted difference between the number of matching pods in any two topology domains of a given topology type. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 1/1/0: ", + "topologyKey": "TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a \"bucket\", and try to put balanced number of pods into each bucket. It's a required field.", + "whenUnsatisfiable": "WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it - ScheduleAnyway tells the scheduler to still schedule it It's considered as \"Unsatisfiable\" if and only if placing incoming pod on any topology violates \"MaxSkew\". For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: ", + "labelSelector": "LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain.", +} + +func (TopologySpreadConstraint) SwaggerDoc() map[string]string { + return map_TopologySpreadConstraint +} + var map_TypedLocalObjectReference = map[string]string{ "": "TypedLocalObjectReference contains enough information to let you locate the typed referenced object inside the same namespace.", "apiGroup": "APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.", @@ -2246,6 +2368,7 @@ var map_VolumeMount = map[string]string{ "mountPath": "Path within the container at which the volume should be mounted. Must not contain ':'.", "subPath": "Path within the volume from which the container's volume should be mounted. Defaults to \"\" (volume's root).", "mountPropagation": "mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10.", + "subPathExpr": "Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to \"\" (volume's root). SubPathExpr and SubPath are mutually exclusive.", } func (VolumeMount) SwaggerDoc() map[string]string { @@ -2282,26 +2405,27 @@ var map_VolumeSource = map[string]string{ "gitRepo": "GitRepo represents a git repository at a particular revision. DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.", "secret": "Secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret", "nfs": "NFS represents an NFS mount on the host that shares a pod's lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", - "iscsi": "ISCSI represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://releases.k8s.io/HEAD/examples/volumes/iscsi/README.md", - "glusterfs": "Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md", + "iscsi": "ISCSI represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md", + "glusterfs": "Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md", "persistentVolumeClaim": "PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", - "rbd": "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md", - "flexVolume": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", - "cinder": "Cinder represents a cinder volume attached and mounted on kubelets host machine More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", - "cephfs": "CephFS represents a Ceph FS mount on the host that shares a pod's lifetime", - "flocker": "Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running", - "downwardAPI": "DownwardAPI represents downward API about the pod that should populate this volume", - "fc": "FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.", - "azureFile": "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.", - "configMap": "ConfigMap represents a configMap that should populate this volume", - "vsphereVolume": "VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine", - "quobyte": "Quobyte represents a Quobyte mount on the host that shares a pod's lifetime", - "azureDisk": "AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.", - "photonPersistentDisk": "PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine", - "projected": "Items for all in one resources secrets, configmaps, and downward API", - "portworxVolume": "PortworxVolume represents a portworx volume attached and mounted on kubelets host machine", - "scaleIO": "ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.", - "storageos": "StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.", + "rbd": "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md", + "flexVolume": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", + "cinder": "Cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", + "cephfs": "CephFS represents a Ceph FS mount on the host that shares a pod's lifetime", + "flocker": "Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running", + "downwardAPI": "DownwardAPI represents downward API about the pod that should populate this volume", + "fc": "FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.", + "azureFile": "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.", + "configMap": "ConfigMap represents a configMap that should populate this volume", + "vsphereVolume": "VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine", + "quobyte": "Quobyte represents a Quobyte mount on the host that shares a pod's lifetime", + "azureDisk": "AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.", + "photonPersistentDisk": "PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine", + "projected": "Items for all in one resources secrets, configmaps, and downward API", + "portworxVolume": "PortworxVolume represents a portworx volume attached and mounted on kubelets host machine", + "scaleIO": "ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.", + "storageos": "StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.", + "csi": "CSI (Container Storage Interface) represents storage that is handled by an external CSI driver (Alpha feature).", } func (VolumeSource) SwaggerDoc() map[string]string { @@ -2330,4 +2454,15 @@ func (WeightedPodAffinityTerm) SwaggerDoc() map[string]string { return map_WeightedPodAffinityTerm } +var map_WindowsSecurityContextOptions = map[string]string{ + "": "WindowsSecurityContextOptions contain Windows-specific options and credentials.", + "gmsaCredentialSpecName": "GMSACredentialSpecName is the name of the GMSA credential spec to use. This field is alpha-level and is only honored by servers that enable the WindowsGMSA feature flag.", + "gmsaCredentialSpec": "GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field. This field is alpha-level and is only honored by servers that enable the WindowsGMSA feature flag.", + "runAsUserName": "The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. This field is beta-level and may be disabled with the WindowsRunAsUserName feature flag.", +} + +func (WindowsSecurityContextOptions) SwaggerDoc() map[string]string { + return map_WindowsSecurityContextOptions +} + // AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/core/v1/well_known_labels.go b/vendor/k8s.io/api/core/v1/well_known_labels.go new file mode 100644 index 00000000..22aa55b9 --- /dev/null +++ b/vendor/k8s.io/api/core/v1/well_known_labels.go @@ -0,0 +1,50 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +const ( + LabelHostname = "kubernetes.io/hostname" + + LabelZoneFailureDomain = "failure-domain.beta.kubernetes.io/zone" + LabelZoneRegion = "failure-domain.beta.kubernetes.io/region" + LabelZoneFailureDomainStable = "topology.kubernetes.io/zone" + LabelZoneRegionStable = "topology.kubernetes.io/region" + + LabelInstanceType = "beta.kubernetes.io/instance-type" + LabelInstanceTypeStable = "node.kubernetes.io/instance-type" + + LabelOSStable = "kubernetes.io/os" + LabelArchStable = "kubernetes.io/arch" + + // LabelWindowsBuild is used on Windows nodes to specify the Windows build number starting with v1.17.0. + // It's in the format MajorVersion.MinorVersion.BuildNumber (for ex: 10.0.17763) + LabelWindowsBuild = "node.kubernetes.io/windows-build" + + // LabelNamespaceSuffixKubelet is an allowed label namespace suffix kubelets can self-set ([*.]kubelet.kubernetes.io/*) + LabelNamespaceSuffixKubelet = "kubelet.kubernetes.io" + // LabelNamespaceSuffixNode is an allowed label namespace suffix kubelets can self-set ([*.]node.kubernetes.io/*) + LabelNamespaceSuffixNode = "node.kubernetes.io" + + // LabelNamespaceNodeRestriction is a forbidden label namespace that kubelets may not self-set when the NodeRestriction admission plugin is enabled + LabelNamespaceNodeRestriction = "node-restriction.kubernetes.io" + + // IsHeadlessService is added by Controller to an Endpoint denoting if its parent + // Service is Headless. The existence of this label can be used further by other + // controllers and kube-proxy to check if the Endpoint objects should be replicated when + // using Headless Services + IsHeadlessService = "service.kubernetes.io/headless" +) diff --git a/vendor/k8s.io/api/core/v1/well_known_taints.go b/vendor/k8s.io/api/core/v1/well_known_taints.go new file mode 100644 index 00000000..e3905192 --- /dev/null +++ b/vendor/k8s.io/api/core/v1/well_known_taints.go @@ -0,0 +1,55 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +const ( + // TaintNodeNotReady will be added when node is not ready + // and feature-gate for TaintBasedEvictions flag is enabled, + // and removed when node becomes ready. + TaintNodeNotReady = "node.kubernetes.io/not-ready" + + // TaintNodeUnreachable will be added when node becomes unreachable + // (corresponding to NodeReady status ConditionUnknown) + // and feature-gate for TaintBasedEvictions flag is enabled, + // and removed when node becomes reachable (NodeReady status ConditionTrue). + TaintNodeUnreachable = "node.kubernetes.io/unreachable" + + // TaintNodeUnschedulable will be added when node becomes unschedulable + // and feature-gate for TaintNodesByCondition flag is enabled, + // and removed when node becomes scheduable. + TaintNodeUnschedulable = "node.kubernetes.io/unschedulable" + + // TaintNodeMemoryPressure will be added when node has memory pressure + // and feature-gate for TaintNodesByCondition flag is enabled, + // and removed when node has enough memory. + TaintNodeMemoryPressure = "node.kubernetes.io/memory-pressure" + + // TaintNodeDiskPressure will be added when node has disk pressure + // and feature-gate for TaintNodesByCondition flag is enabled, + // and removed when node has enough disk. + TaintNodeDiskPressure = "node.kubernetes.io/disk-pressure" + + // TaintNodeNetworkUnavailable will be added when node's network is unavailable + // and feature-gate for TaintNodesByCondition flag is enabled, + // and removed when network becomes ready. + TaintNodeNetworkUnavailable = "node.kubernetes.io/network-unavailable" + + // TaintNodePIDPressure will be added when node has pid pressure + // and feature-gate for TaintNodesByCondition flag is enabled, + // and removed when node has enough disk. + TaintNodePIDPressure = "node.kubernetes.io/pid-pressure" +) diff --git a/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go index f8f3471a..ac4855ab 100644 --- a/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go @@ -237,6 +237,11 @@ func (in *CSIPersistentVolumeSource) DeepCopyInto(out *CSIPersistentVolumeSource *out = new(SecretReference) **out = **in } + if in.ControllerExpandSecretRef != nil { + in, out := &in.ControllerExpandSecretRef, &out.ControllerExpandSecretRef + *out = new(SecretReference) + **out = **in + } return } @@ -250,6 +255,44 @@ func (in *CSIPersistentVolumeSource) DeepCopy() *CSIPersistentVolumeSource { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CSIVolumeSource) DeepCopyInto(out *CSIVolumeSource) { + *out = *in + if in.ReadOnly != nil { + in, out := &in.ReadOnly, &out.ReadOnly + *out = new(bool) + **out = **in + } + if in.FSType != nil { + in, out := &in.FSType, &out.FSType + *out = new(string) + **out = **in + } + if in.VolumeAttributes != nil { + in, out := &in.VolumeAttributes, &out.VolumeAttributes + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.NodePublishSecretRef != nil { + in, out := &in.NodePublishSecretRef, &out.NodePublishSecretRef + *out = new(LocalObjectReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSIVolumeSource. +func (in *CSIVolumeSource) DeepCopy() *CSIVolumeSource { + if in == nil { + return nil + } + out := new(CSIVolumeSource) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Capabilities) DeepCopyInto(out *Capabilities) { *out = *in @@ -442,7 +485,7 @@ func (in *ComponentStatus) DeepCopyObject() runtime.Object { func (in *ComponentStatusList) DeepCopyInto(out *ComponentStatusList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ComponentStatus, len(*in)) @@ -567,7 +610,7 @@ func (in *ConfigMapKeySelector) DeepCopy() *ConfigMapKeySelector { func (in *ConfigMapList) DeepCopyInto(out *ConfigMapList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ConfigMap, len(*in)) @@ -730,6 +773,11 @@ func (in *Container) DeepCopyInto(out *Container) { *out = new(Probe) (*in).DeepCopyInto(*out) } + if in.StartupProbe != nil { + in, out := &in.StartupProbe, &out.StartupProbe + *out = new(Probe) + (*in).DeepCopyInto(*out) + } if in.Lifecycle != nil { in, out := &in.Lifecycle, &out.Lifecycle *out = new(Lifecycle) @@ -877,6 +925,11 @@ func (in *ContainerStatus) DeepCopyInto(out *ContainerStatus) { *out = *in in.State.DeepCopyInto(&out.State) in.LastTerminationState.DeepCopyInto(&out.LastTerminationState) + if in.Started != nil { + in, out := &in.Started, &out.Started + *out = new(bool) + **out = **in + } return } @@ -1123,7 +1176,7 @@ func (in *Endpoints) DeepCopyObject() runtime.Object { func (in *EndpointsList) DeepCopyInto(out *EndpointsList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Endpoints, len(*in)) @@ -1235,6 +1288,139 @@ func (in *EnvVarSource) DeepCopy() *EnvVarSource { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EphemeralContainer) DeepCopyInto(out *EphemeralContainer) { + *out = *in + in.EphemeralContainerCommon.DeepCopyInto(&out.EphemeralContainerCommon) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EphemeralContainer. +func (in *EphemeralContainer) DeepCopy() *EphemeralContainer { + if in == nil { + return nil + } + out := new(EphemeralContainer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EphemeralContainerCommon) DeepCopyInto(out *EphemeralContainerCommon) { + *out = *in + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]ContainerPort, len(*in)) + copy(*out, *in) + } + if in.EnvFrom != nil { + in, out := &in.EnvFrom, &out.EnvFrom + *out = make([]EnvFromSource, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.Resources.DeepCopyInto(&out.Resources) + if in.VolumeMounts != nil { + in, out := &in.VolumeMounts, &out.VolumeMounts + *out = make([]VolumeMount, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VolumeDevices != nil { + in, out := &in.VolumeDevices, &out.VolumeDevices + *out = make([]VolumeDevice, len(*in)) + copy(*out, *in) + } + if in.LivenessProbe != nil { + in, out := &in.LivenessProbe, &out.LivenessProbe + *out = new(Probe) + (*in).DeepCopyInto(*out) + } + if in.ReadinessProbe != nil { + in, out := &in.ReadinessProbe, &out.ReadinessProbe + *out = new(Probe) + (*in).DeepCopyInto(*out) + } + if in.StartupProbe != nil { + in, out := &in.StartupProbe, &out.StartupProbe + *out = new(Probe) + (*in).DeepCopyInto(*out) + } + if in.Lifecycle != nil { + in, out := &in.Lifecycle, &out.Lifecycle + *out = new(Lifecycle) + (*in).DeepCopyInto(*out) + } + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(SecurityContext) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EphemeralContainerCommon. +func (in *EphemeralContainerCommon) DeepCopy() *EphemeralContainerCommon { + if in == nil { + return nil + } + out := new(EphemeralContainerCommon) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EphemeralContainers) DeepCopyInto(out *EphemeralContainers) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.EphemeralContainers != nil { + in, out := &in.EphemeralContainers, &out.EphemeralContainers + *out = make([]EphemeralContainer, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EphemeralContainers. +func (in *EphemeralContainers) DeepCopy() *EphemeralContainers { + if in == nil { + return nil + } + out := new(EphemeralContainers) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EphemeralContainers) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Event) DeepCopyInto(out *Event) { *out = *in @@ -1280,7 +1466,7 @@ func (in *Event) DeepCopyObject() runtime.Object { func (in *EventList) DeepCopyInto(out *EventList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Event, len(*in)) @@ -1498,6 +1684,27 @@ func (in *GitRepoVolumeSource) DeepCopy() *GitRepoVolumeSource { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GlusterfsPersistentVolumeSource) DeepCopyInto(out *GlusterfsPersistentVolumeSource) { + *out = *in + if in.EndpointsNamespace != nil { + in, out := &in.EndpointsNamespace, &out.EndpointsNamespace + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlusterfsPersistentVolumeSource. +func (in *GlusterfsPersistentVolumeSource) DeepCopy() *GlusterfsPersistentVolumeSource { + if in == nil { + return nil + } + out := new(GlusterfsPersistentVolumeSource) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *GlusterfsVolumeSource) DeepCopyInto(out *GlusterfsVolumeSource) { *out = *in @@ -1816,7 +2023,7 @@ func (in *LimitRangeItem) DeepCopy() *LimitRangeItem { func (in *LimitRangeList) DeepCopyInto(out *LimitRangeList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]LimitRange, len(*in)) @@ -1872,7 +2079,7 @@ func (in *LimitRangeSpec) DeepCopy() *LimitRangeSpec { func (in *List) DeepCopyInto(out *List) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]runtime.RawExtension, len(*in)) @@ -1997,7 +2204,7 @@ func (in *Namespace) DeepCopyInto(out *Namespace) { out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) - out.Status = in.Status + in.Status.DeepCopyInto(&out.Status) return } @@ -2019,11 +2226,28 @@ func (in *Namespace) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NamespaceCondition) DeepCopyInto(out *NamespaceCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespaceCondition. +func (in *NamespaceCondition) DeepCopy() *NamespaceCondition { + if in == nil { + return nil + } + out := new(NamespaceCondition) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NamespaceList) DeepCopyInto(out *NamespaceList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Namespace, len(*in)) @@ -2076,6 +2300,13 @@ func (in *NamespaceSpec) DeepCopy() *NamespaceSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NamespaceStatus) DeepCopyInto(out *NamespaceStatus) { *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]NamespaceCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } @@ -2252,7 +2483,7 @@ func (in *NodeDaemonEndpoints) DeepCopy() *NodeDaemonEndpoints { func (in *NodeList) DeepCopyInto(out *NodeList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Node, len(*in)) @@ -2406,6 +2637,11 @@ func (in *NodeSelectorTerm) DeepCopy() *NodeSelectorTerm { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NodeSpec) DeepCopyInto(out *NodeSpec) { *out = *in + if in.PodCIDRs != nil { + in, out := &in.PodCIDRs, &out.PodCIDRs + *out = make([]string, len(*in)) + copy(*out, *in) + } if in.Taints != nil { in, out := &in.Taints, &out.Taints *out = make([]Taint, len(*in)) @@ -2631,7 +2867,7 @@ func (in *PersistentVolumeClaimCondition) DeepCopy() *PersistentVolumeClaimCondi func (in *PersistentVolumeClaimList) DeepCopyInto(out *PersistentVolumeClaimList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]PersistentVolumeClaim, len(*in)) @@ -2757,7 +2993,7 @@ func (in *PersistentVolumeClaimVolumeSource) DeepCopy() *PersistentVolumeClaimVo func (in *PersistentVolumeList) DeepCopyInto(out *PersistentVolumeList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]PersistentVolume, len(*in)) @@ -2806,8 +3042,8 @@ func (in *PersistentVolumeSource) DeepCopyInto(out *PersistentVolumeSource) { } if in.Glusterfs != nil { in, out := &in.Glusterfs, &out.Glusterfs - *out = new(GlusterfsVolumeSource) - **out = **in + *out = new(GlusterfsPersistentVolumeSource) + (*in).DeepCopyInto(*out) } if in.NFS != nil { in, out := &in.NFS, &out.NFS @@ -3234,11 +3470,27 @@ func (in *PodExecOptions) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodIP) DeepCopyInto(out *PodIP) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodIP. +func (in *PodIP) DeepCopy() *PodIP { + if in == nil { + return nil + } + out := new(PodIP) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PodList) DeepCopyInto(out *PodList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Pod, len(*in)) @@ -3390,6 +3642,11 @@ func (in *PodSecurityContext) DeepCopyInto(out *PodSecurityContext) { *out = new(SELinuxOptions) **out = **in } + if in.WindowsOptions != nil { + in, out := &in.WindowsOptions, &out.WindowsOptions + *out = new(WindowsSecurityContextOptions) + (*in).DeepCopyInto(*out) + } if in.RunAsUser != nil { in, out := &in.RunAsUser, &out.RunAsUser *out = new(int64) @@ -3478,6 +3735,13 @@ func (in *PodSpec) DeepCopyInto(out *PodSpec) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.EphemeralContainers != nil { + in, out := &in.EphemeralContainers, &out.EphemeralContainers + *out = make([]EphemeralContainer, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } if in.TerminationGracePeriodSeconds != nil { in, out := &in.TerminationGracePeriodSeconds, &out.TerminationGracePeriodSeconds *out = new(int64) @@ -3554,6 +3818,30 @@ func (in *PodSpec) DeepCopyInto(out *PodSpec) { *out = new(string) **out = **in } + if in.EnableServiceLinks != nil { + in, out := &in.EnableServiceLinks, &out.EnableServiceLinks + *out = new(bool) + **out = **in + } + if in.PreemptionPolicy != nil { + in, out := &in.PreemptionPolicy, &out.PreemptionPolicy + *out = new(PreemptionPolicy) + **out = **in + } + if in.Overhead != nil { + in, out := &in.Overhead, &out.Overhead + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.TopologySpreadConstraints != nil { + in, out := &in.TopologySpreadConstraints, &out.TopologySpreadConstraints + *out = make([]TopologySpreadConstraint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } @@ -3577,6 +3865,11 @@ func (in *PodStatus) DeepCopyInto(out *PodStatus) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.PodIPs != nil { + in, out := &in.PodIPs, &out.PodIPs + *out = make([]PodIP, len(*in)) + copy(*out, *in) + } if in.StartTime != nil { in, out := &in.StartTime, &out.StartTime *out = (*in).DeepCopy() @@ -3595,6 +3888,13 @@ func (in *PodStatus) DeepCopyInto(out *PodStatus) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.EphemeralContainerStatuses != nil { + in, out := &in.EphemeralContainerStatuses, &out.EphemeralContainerStatuses + *out = make([]ContainerStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } @@ -3666,7 +3966,7 @@ func (in *PodTemplate) DeepCopyObject() runtime.Object { func (in *PodTemplateList) DeepCopyInto(out *PodTemplateList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]PodTemplate, len(*in)) @@ -3978,7 +4278,7 @@ func (in *ReplicationControllerCondition) DeepCopy() *ReplicationControllerCondi func (in *ReplicationControllerList) DeepCopyInto(out *ReplicationControllerList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ReplicationController, len(*in)) @@ -4134,7 +4434,7 @@ func (in *ResourceQuota) DeepCopyObject() runtime.Object { func (in *ResourceQuotaList) DeepCopyInto(out *ResourceQuotaList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ResourceQuota, len(*in)) @@ -4454,7 +4754,7 @@ func (in *SecretKeySelector) DeepCopy() *SecretKeySelector { func (in *SecretList) DeepCopyInto(out *SecretList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Secret, len(*in)) @@ -4579,6 +4879,11 @@ func (in *SecurityContext) DeepCopyInto(out *SecurityContext) { *out = new(SELinuxOptions) **out = **in } + if in.WindowsOptions != nil { + in, out := &in.WindowsOptions, &out.WindowsOptions + *out = new(WindowsSecurityContextOptions) + (*in).DeepCopyInto(*out) + } if in.RunAsUser != nil { in, out := &in.RunAsUser, &out.RunAsUser *out = new(int64) @@ -4721,7 +5026,7 @@ func (in *ServiceAccount) DeepCopyObject() runtime.Object { func (in *ServiceAccountList) DeepCopyInto(out *ServiceAccountList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ServiceAccount, len(*in)) @@ -4775,7 +5080,7 @@ func (in *ServiceAccountTokenProjection) DeepCopy() *ServiceAccountTokenProjecti func (in *ServiceList) DeepCopyInto(out *ServiceList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Service, len(*in)) @@ -4876,6 +5181,16 @@ func (in *ServiceSpec) DeepCopyInto(out *ServiceSpec) { *out = new(SessionAffinityConfig) (*in).DeepCopyInto(*out) } + if in.IPFamily != nil { + in, out := &in.IPFamily, &out.IPFamily + *out = new(IPFamily) + **out = **in + } + if in.TopologyKeys != nil { + in, out := &in.TopologyKeys, &out.TopologyKeys + *out = make([]string, len(*in)) + copy(*out, *in) + } return } @@ -5087,6 +5402,27 @@ func (in *TopologySelectorTerm) DeepCopy() *TopologySelectorTerm { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopologySpreadConstraint) DeepCopyInto(out *TopologySpreadConstraint) { + *out = *in + if in.LabelSelector != nil { + in, out := &in.LabelSelector, &out.LabelSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopologySpreadConstraint. +func (in *TopologySpreadConstraint) DeepCopy() *TopologySpreadConstraint { + if in == nil { + return nil + } + out := new(TopologySpreadConstraint) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TypedLocalObjectReference) DeepCopyInto(out *TypedLocalObjectReference) { *out = *in @@ -5357,6 +5693,11 @@ func (in *VolumeSource) DeepCopyInto(out *VolumeSource) { *out = new(StorageOSVolumeSource) (*in).DeepCopyInto(*out) } + if in.CSI != nil { + in, out := &in.CSI, &out.CSI + *out = new(CSIVolumeSource) + (*in).DeepCopyInto(*out) + } return } @@ -5402,3 +5743,34 @@ func (in *WeightedPodAffinityTerm) DeepCopy() *WeightedPodAffinityTerm { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsSecurityContextOptions) DeepCopyInto(out *WindowsSecurityContextOptions) { + *out = *in + if in.GMSACredentialSpecName != nil { + in, out := &in.GMSACredentialSpecName, &out.GMSACredentialSpecName + *out = new(string) + **out = **in + } + if in.GMSACredentialSpec != nil { + in, out := &in.GMSACredentialSpec, &out.GMSACredentialSpec + *out = new(string) + **out = **in + } + if in.RunAsUserName != nil { + in, out := &in.RunAsUserName, &out.RunAsUserName + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsSecurityContextOptions. +func (in *WindowsSecurityContextOptions) DeepCopy() *WindowsSecurityContextOptions { + if in == nil { + return nil + } + out := new(WindowsSecurityContextOptions) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/discovery/v1alpha1/doc.go b/vendor/k8s.io/api/discovery/v1alpha1/doc.go new file mode 100644 index 00000000..ffd6b0b5 --- /dev/null +++ b/vendor/k8s.io/api/discovery/v1alpha1/doc.go @@ -0,0 +1,22 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:openapi-gen=true +// +groupName=discovery.k8s.io + +package v1alpha1 // import "k8s.io/api/discovery/v1alpha1" diff --git a/vendor/k8s.io/api/discovery/v1alpha1/generated.pb.go b/vendor/k8s.io/api/discovery/v1alpha1/generated.pb.go new file mode 100644 index 00000000..fa4d3ac5 --- /dev/null +++ b/vendor/k8s.io/api/discovery/v1alpha1/generated.pb.go @@ -0,0 +1,1730 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/api/discovery/v1alpha1/generated.proto + +package v1alpha1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +func (m *Endpoint) Reset() { *m = Endpoint{} } +func (*Endpoint) ProtoMessage() {} +func (*Endpoint) Descriptor() ([]byte, []int) { + return fileDescriptor_772f83c5b34e07a5, []int{0} +} +func (m *Endpoint) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Endpoint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Endpoint) XXX_Merge(src proto.Message) { + xxx_messageInfo_Endpoint.Merge(m, src) +} +func (m *Endpoint) XXX_Size() int { + return m.Size() +} +func (m *Endpoint) XXX_DiscardUnknown() { + xxx_messageInfo_Endpoint.DiscardUnknown(m) +} + +var xxx_messageInfo_Endpoint proto.InternalMessageInfo + +func (m *EndpointConditions) Reset() { *m = EndpointConditions{} } +func (*EndpointConditions) ProtoMessage() {} +func (*EndpointConditions) Descriptor() ([]byte, []int) { + return fileDescriptor_772f83c5b34e07a5, []int{1} +} +func (m *EndpointConditions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EndpointConditions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EndpointConditions) XXX_Merge(src proto.Message) { + xxx_messageInfo_EndpointConditions.Merge(m, src) +} +func (m *EndpointConditions) XXX_Size() int { + return m.Size() +} +func (m *EndpointConditions) XXX_DiscardUnknown() { + xxx_messageInfo_EndpointConditions.DiscardUnknown(m) +} + +var xxx_messageInfo_EndpointConditions proto.InternalMessageInfo + +func (m *EndpointPort) Reset() { *m = EndpointPort{} } +func (*EndpointPort) ProtoMessage() {} +func (*EndpointPort) Descriptor() ([]byte, []int) { + return fileDescriptor_772f83c5b34e07a5, []int{2} +} +func (m *EndpointPort) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EndpointPort) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EndpointPort) XXX_Merge(src proto.Message) { + xxx_messageInfo_EndpointPort.Merge(m, src) +} +func (m *EndpointPort) XXX_Size() int { + return m.Size() +} +func (m *EndpointPort) XXX_DiscardUnknown() { + xxx_messageInfo_EndpointPort.DiscardUnknown(m) +} + +var xxx_messageInfo_EndpointPort proto.InternalMessageInfo + +func (m *EndpointSlice) Reset() { *m = EndpointSlice{} } +func (*EndpointSlice) ProtoMessage() {} +func (*EndpointSlice) Descriptor() ([]byte, []int) { + return fileDescriptor_772f83c5b34e07a5, []int{3} +} +func (m *EndpointSlice) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EndpointSlice) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EndpointSlice) XXX_Merge(src proto.Message) { + xxx_messageInfo_EndpointSlice.Merge(m, src) +} +func (m *EndpointSlice) XXX_Size() int { + return m.Size() +} +func (m *EndpointSlice) XXX_DiscardUnknown() { + xxx_messageInfo_EndpointSlice.DiscardUnknown(m) +} + +var xxx_messageInfo_EndpointSlice proto.InternalMessageInfo + +func (m *EndpointSliceList) Reset() { *m = EndpointSliceList{} } +func (*EndpointSliceList) ProtoMessage() {} +func (*EndpointSliceList) Descriptor() ([]byte, []int) { + return fileDescriptor_772f83c5b34e07a5, []int{4} +} +func (m *EndpointSliceList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EndpointSliceList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EndpointSliceList) XXX_Merge(src proto.Message) { + xxx_messageInfo_EndpointSliceList.Merge(m, src) +} +func (m *EndpointSliceList) XXX_Size() int { + return m.Size() +} +func (m *EndpointSliceList) XXX_DiscardUnknown() { + xxx_messageInfo_EndpointSliceList.DiscardUnknown(m) +} + +var xxx_messageInfo_EndpointSliceList proto.InternalMessageInfo + +func init() { + proto.RegisterType((*Endpoint)(nil), "k8s.io.api.discovery.v1alpha1.Endpoint") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.discovery.v1alpha1.Endpoint.TopologyEntry") + proto.RegisterType((*EndpointConditions)(nil), "k8s.io.api.discovery.v1alpha1.EndpointConditions") + proto.RegisterType((*EndpointPort)(nil), "k8s.io.api.discovery.v1alpha1.EndpointPort") + proto.RegisterType((*EndpointSlice)(nil), "k8s.io.api.discovery.v1alpha1.EndpointSlice") + proto.RegisterType((*EndpointSliceList)(nil), "k8s.io.api.discovery.v1alpha1.EndpointSliceList") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/discovery/v1alpha1/generated.proto", fileDescriptor_772f83c5b34e07a5) +} + +var fileDescriptor_772f83c5b34e07a5 = []byte{ + // 746 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x53, 0x4b, 0x6f, 0xd3, 0x4a, + 0x14, 0x8e, 0x9b, 0x5a, 0xb2, 0x27, 0x8d, 0x6e, 0x3b, 0xba, 0x8b, 0x28, 0xf7, 0x5e, 0x3b, 0xca, + 0x5d, 0x10, 0xa9, 0x30, 0x26, 0x15, 0x45, 0x15, 0xac, 0x6a, 0x28, 0x0f, 0x89, 0x47, 0x18, 0xba, + 0x40, 0x88, 0x05, 0x13, 0x7b, 0xea, 0x98, 0x24, 0x1e, 0xcb, 0x9e, 0x44, 0xca, 0x8e, 0x9f, 0xc0, + 0x0f, 0x62, 0x89, 0x50, 0x97, 0x5d, 0x76, 0x65, 0x51, 0xf7, 0x5f, 0x74, 0x85, 0x66, 0xfc, 0x4a, + 0x09, 0x8f, 0xec, 0x66, 0xbe, 0x39, 0xdf, 0x77, 0xce, 0xf9, 0xe6, 0x1c, 0xf0, 0x68, 0x7c, 0x10, + 0x23, 0x9f, 0x59, 0xe3, 0xd9, 0x90, 0x46, 0x01, 0xe5, 0x34, 0xb6, 0xe6, 0x34, 0x70, 0x59, 0x64, + 0xe5, 0x0f, 0x24, 0xf4, 0x2d, 0xd7, 0x8f, 0x1d, 0x36, 0xa7, 0xd1, 0xc2, 0x9a, 0xf7, 0xc9, 0x24, + 0x1c, 0x91, 0xbe, 0xe5, 0xd1, 0x80, 0x46, 0x84, 0x53, 0x17, 0x85, 0x11, 0xe3, 0x0c, 0xfe, 0x97, + 0x85, 0x23, 0x12, 0xfa, 0xa8, 0x0c, 0x47, 0x45, 0x78, 0xfb, 0x96, 0xe7, 0xf3, 0xd1, 0x6c, 0x88, + 0x1c, 0x36, 0xb5, 0x3c, 0xe6, 0x31, 0x4b, 0xb2, 0x86, 0xb3, 0x13, 0x79, 0x93, 0x17, 0x79, 0xca, + 0xd4, 0xda, 0xdd, 0xa5, 0xe4, 0x0e, 0x8b, 0xa8, 0x35, 0x5f, 0xc9, 0xd8, 0xbe, 0x53, 0xc5, 0x4c, + 0x89, 0x33, 0xf2, 0x03, 0x51, 0x5f, 0x38, 0xf6, 0x04, 0x10, 0x5b, 0x53, 0xca, 0xc9, 0xcf, 0x58, + 0xd6, 0xaf, 0x58, 0xd1, 0x2c, 0xe0, 0xfe, 0x94, 0xae, 0x10, 0xee, 0xfe, 0x89, 0x10, 0x3b, 0x23, + 0x3a, 0x25, 0x3f, 0xf2, 0xba, 0x9f, 0xeb, 0x40, 0x3b, 0x0a, 0xdc, 0x90, 0xf9, 0x01, 0x87, 0xbb, + 0x40, 0x27, 0xae, 0x1b, 0xd1, 0x38, 0xa6, 0x71, 0x4b, 0xe9, 0xd4, 0x7b, 0xba, 0xdd, 0x4c, 0x13, + 0x53, 0x3f, 0x2c, 0x40, 0x5c, 0xbd, 0x43, 0x0a, 0x80, 0xc3, 0x02, 0xd7, 0xe7, 0x3e, 0x0b, 0xe2, + 0xd6, 0x46, 0x47, 0xe9, 0x35, 0xf6, 0xfa, 0xe8, 0xb7, 0xfe, 0xa2, 0x22, 0xd3, 0x83, 0x92, 0x68, + 0xc3, 0xd3, 0xc4, 0xac, 0xa5, 0x89, 0x09, 0x2a, 0x0c, 0x2f, 0x09, 0xc3, 0x1e, 0xd0, 0x46, 0x2c, + 0xe6, 0x01, 0x99, 0xd2, 0x56, 0xbd, 0xa3, 0xf4, 0x74, 0x7b, 0x2b, 0x4d, 0x4c, 0xed, 0x49, 0x8e, + 0xe1, 0xf2, 0x15, 0x0e, 0x80, 0xce, 0x49, 0xe4, 0x51, 0x8e, 0xe9, 0x49, 0x6b, 0x53, 0xd6, 0xf3, + 0xff, 0x72, 0x3d, 0xe2, 0x87, 0xd0, 0xbc, 0x8f, 0x5e, 0x0e, 0x3f, 0x50, 0x47, 0x04, 0xd1, 0x88, + 0x06, 0x0e, 0xcd, 0x5a, 0x3c, 0x2e, 0x98, 0xb8, 0x12, 0x81, 0x0e, 0xd0, 0x38, 0x0b, 0xd9, 0x84, + 0x79, 0x8b, 0x96, 0xda, 0xa9, 0xf7, 0x1a, 0x7b, 0xfb, 0x6b, 0x36, 0x88, 0x8e, 0x73, 0xde, 0x51, + 0xc0, 0xa3, 0x85, 0xbd, 0x9d, 0x37, 0xa9, 0x15, 0x30, 0x2e, 0x85, 0xdb, 0xf7, 0x41, 0xf3, 0x5a, + 0x30, 0xdc, 0x06, 0xf5, 0x31, 0x5d, 0xb4, 0x14, 0xd1, 0x2c, 0x16, 0x47, 0xf8, 0x37, 0x50, 0xe7, + 0x64, 0x32, 0xa3, 0xd2, 0x65, 0x1d, 0x67, 0x97, 0x7b, 0x1b, 0x07, 0x4a, 0x77, 0x1f, 0xc0, 0x55, + 0x4f, 0xa1, 0x09, 0xd4, 0x88, 0x12, 0x37, 0xd3, 0xd0, 0x6c, 0x3d, 0x4d, 0x4c, 0x15, 0x0b, 0x00, + 0x67, 0x78, 0xf7, 0xab, 0x02, 0xb6, 0x0a, 0xde, 0x80, 0x45, 0x1c, 0xfe, 0x0b, 0x36, 0xa5, 0xc3, + 0x32, 0xa9, 0xad, 0xa5, 0x89, 0xb9, 0xf9, 0x42, 0xb8, 0x2b, 0x51, 0xf8, 0x18, 0x68, 0x72, 0x5a, + 0x1c, 0x36, 0xc9, 0x4a, 0xb0, 0x77, 0x45, 0x33, 0x83, 0x1c, 0xbb, 0x4a, 0xcc, 0x7f, 0x56, 0x37, + 0x01, 0x15, 0xcf, 0xb8, 0x24, 0x8b, 0x34, 0x21, 0x8b, 0xb8, 0xfc, 0x48, 0x35, 0x4b, 0x23, 0xd2, + 0x63, 0x89, 0xc2, 0x3e, 0x68, 0x90, 0x30, 0x2c, 0x68, 0xf2, 0x0b, 0x75, 0xfb, 0xaf, 0x34, 0x31, + 0x1b, 0x87, 0x15, 0x8c, 0x97, 0x63, 0xba, 0x97, 0x1b, 0xa0, 0x59, 0x34, 0xf2, 0x7a, 0xe2, 0x3b, + 0x14, 0xbe, 0x07, 0x9a, 0x58, 0x2a, 0x97, 0x70, 0x22, 0xbb, 0x69, 0xec, 0xdd, 0x5e, 0xfa, 0xb3, + 0x72, 0x37, 0x50, 0x38, 0xf6, 0x04, 0x10, 0x23, 0x11, 0x5d, 0x8d, 0xc5, 0x73, 0xca, 0x49, 0x35, + 0x93, 0x15, 0x86, 0x4b, 0x55, 0xf8, 0x10, 0x34, 0xf2, 0x2d, 0x38, 0x5e, 0x84, 0x34, 0x2f, 0xb3, + 0x9b, 0x53, 0x1a, 0x87, 0xd5, 0xd3, 0xd5, 0xf5, 0x2b, 0x5e, 0xa6, 0xc1, 0x37, 0x40, 0xa7, 0x79, + 0xe1, 0x62, 0x7b, 0xc4, 0x70, 0xdd, 0x58, 0x73, 0xb8, 0xec, 0x9d, 0x3c, 0x99, 0x5e, 0x20, 0x31, + 0xae, 0xc4, 0xe0, 0x00, 0xa8, 0xc2, 0xce, 0xb8, 0x55, 0x97, 0xaa, 0xbb, 0x6b, 0xaa, 0x8a, 0x8f, + 0xb0, 0x9b, 0xb9, 0xb2, 0x2a, 0x6e, 0x31, 0xce, 0x84, 0xba, 0x5f, 0x14, 0xb0, 0x73, 0xcd, 0xe5, + 0x67, 0x7e, 0xcc, 0xe1, 0xbb, 0x15, 0xa7, 0xd1, 0x7a, 0x4e, 0x0b, 0xb6, 0xf4, 0xb9, 0x5c, 0x8b, + 0x02, 0x59, 0x72, 0xf9, 0x15, 0x50, 0x7d, 0x4e, 0xa7, 0x85, 0x37, 0x37, 0xd7, 0xec, 0x42, 0x96, + 0x57, 0xb5, 0xf1, 0x54, 0x48, 0xe0, 0x4c, 0xc9, 0x46, 0xa7, 0x17, 0x46, 0xed, 0xec, 0xc2, 0xa8, + 0x9d, 0x5f, 0x18, 0xb5, 0x8f, 0xa9, 0xa1, 0x9c, 0xa6, 0x86, 0x72, 0x96, 0x1a, 0xca, 0x79, 0x6a, + 0x28, 0xdf, 0x52, 0x43, 0xf9, 0x74, 0x69, 0xd4, 0xde, 0x6a, 0x85, 0xe6, 0xf7, 0x00, 0x00, 0x00, + 0xff, 0xff, 0x65, 0x85, 0x5a, 0x9b, 0x75, 0x06, 0x00, 0x00, +} + +func (m *Endpoint) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Endpoint) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Endpoint) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Topology) > 0 { + keysForTopology := make([]string, 0, len(m.Topology)) + for k := range m.Topology { + keysForTopology = append(keysForTopology, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForTopology) + for iNdEx := len(keysForTopology) - 1; iNdEx >= 0; iNdEx-- { + v := m.Topology[string(keysForTopology[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForTopology[iNdEx]) + copy(dAtA[i:], keysForTopology[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForTopology[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2a + } + } + if m.TargetRef != nil { + { + size, err := m.TargetRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.Hostname != nil { + i -= len(*m.Hostname) + copy(dAtA[i:], *m.Hostname) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Hostname))) + i-- + dAtA[i] = 0x1a + } + { + size, err := m.Conditions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if len(m.Addresses) > 0 { + for iNdEx := len(m.Addresses) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Addresses[iNdEx]) + copy(dAtA[i:], m.Addresses[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Addresses[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *EndpointConditions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EndpointConditions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EndpointConditions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Ready != nil { + i-- + if *m.Ready { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *EndpointPort) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EndpointPort) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EndpointPort) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.AppProtocol != nil { + i -= len(*m.AppProtocol) + copy(dAtA[i:], *m.AppProtocol) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.AppProtocol))) + i-- + dAtA[i] = 0x22 + } + if m.Port != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Port)) + i-- + dAtA[i] = 0x18 + } + if m.Protocol != nil { + i -= len(*m.Protocol) + copy(dAtA[i:], *m.Protocol) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Protocol))) + i-- + dAtA[i] = 0x12 + } + if m.Name != nil { + i -= len(*m.Name) + copy(dAtA[i:], *m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *EndpointSlice) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EndpointSlice) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EndpointSlice) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.AddressType) + copy(dAtA[i:], m.AddressType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AddressType))) + i-- + dAtA[i] = 0x22 + if len(m.Ports) > 0 { + for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ports[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.Endpoints) > 0 { + for iNdEx := len(m.Endpoints) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Endpoints[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *EndpointSliceList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EndpointSliceList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EndpointSliceList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Endpoint) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Addresses) > 0 { + for _, s := range m.Addresses { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.Conditions.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Hostname != nil { + l = len(*m.Hostname) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.TargetRef != nil { + l = m.TargetRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Topology) > 0 { + for k, v := range m.Topology { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *EndpointConditions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Ready != nil { + n += 2 + } + return n +} + +func (m *EndpointPort) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Name != nil { + l = len(*m.Name) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Protocol != nil { + l = len(*m.Protocol) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Port != nil { + n += 1 + sovGenerated(uint64(*m.Port)) + } + if m.AppProtocol != nil { + l = len(*m.AppProtocol) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *EndpointSlice) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Endpoints) > 0 { + for _, e := range m.Endpoints { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.AddressType) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *EndpointSliceList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Endpoint) String() string { + if this == nil { + return "nil" + } + keysForTopology := make([]string, 0, len(this.Topology)) + for k := range this.Topology { + keysForTopology = append(keysForTopology, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForTopology) + mapStringForTopology := "map[string]string{" + for _, k := range keysForTopology { + mapStringForTopology += fmt.Sprintf("%v: %v,", k, this.Topology[k]) + } + mapStringForTopology += "}" + s := strings.Join([]string{`&Endpoint{`, + `Addresses:` + fmt.Sprintf("%v", this.Addresses) + `,`, + `Conditions:` + strings.Replace(strings.Replace(this.Conditions.String(), "EndpointConditions", "EndpointConditions", 1), `&`, ``, 1) + `,`, + `Hostname:` + valueToStringGenerated(this.Hostname) + `,`, + `TargetRef:` + strings.Replace(fmt.Sprintf("%v", this.TargetRef), "ObjectReference", "v1.ObjectReference", 1) + `,`, + `Topology:` + mapStringForTopology + `,`, + `}`, + }, "") + return s +} +func (this *EndpointConditions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EndpointConditions{`, + `Ready:` + valueToStringGenerated(this.Ready) + `,`, + `}`, + }, "") + return s +} +func (this *EndpointPort) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EndpointPort{`, + `Name:` + valueToStringGenerated(this.Name) + `,`, + `Protocol:` + valueToStringGenerated(this.Protocol) + `,`, + `Port:` + valueToStringGenerated(this.Port) + `,`, + `AppProtocol:` + valueToStringGenerated(this.AppProtocol) + `,`, + `}`, + }, "") + return s +} +func (this *EndpointSlice) String() string { + if this == nil { + return "nil" + } + repeatedStringForEndpoints := "[]Endpoint{" + for _, f := range this.Endpoints { + repeatedStringForEndpoints += strings.Replace(strings.Replace(f.String(), "Endpoint", "Endpoint", 1), `&`, ``, 1) + "," + } + repeatedStringForEndpoints += "}" + repeatedStringForPorts := "[]EndpointPort{" + for _, f := range this.Ports { + repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "EndpointPort", "EndpointPort", 1), `&`, ``, 1) + "," + } + repeatedStringForPorts += "}" + s := strings.Join([]string{`&EndpointSlice{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Endpoints:` + repeatedStringForEndpoints + `,`, + `Ports:` + repeatedStringForPorts + `,`, + `AddressType:` + fmt.Sprintf("%v", this.AddressType) + `,`, + `}`, + }, "") + return s +} +func (this *EndpointSliceList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]EndpointSlice{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "EndpointSlice", "EndpointSlice", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&EndpointSliceList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Endpoint) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Endpoint: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Endpoint: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Addresses", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Addresses = append(m.Addresses, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Conditions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Hostname = &s + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TargetRef == nil { + m.TargetRef = &v1.ObjectReference{} + } + if err := m.TargetRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Topology", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Topology == nil { + m.Topology = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Topology[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EndpointConditions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EndpointConditions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EndpointConditions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Ready", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Ready = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EndpointPort) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EndpointPort: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EndpointPort: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Name = &s + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := k8s_io_api_core_v1.Protocol(dAtA[iNdEx:postIndex]) + m.Protocol = &s + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Port = &v + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AppProtocol", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.AppProtocol = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EndpointSlice) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EndpointSlice: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EndpointSlice: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Endpoints", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Endpoints = append(m.Endpoints, Endpoint{}) + if err := m.Endpoints[len(m.Endpoints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ports = append(m.Ports, EndpointPort{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AddressType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AddressType = AddressType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EndpointSliceList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EndpointSliceList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EndpointSliceList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, EndpointSlice{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) diff --git a/vendor/k8s.io/api/discovery/v1alpha1/generated.proto b/vendor/k8s.io/api/discovery/v1alpha1/generated.proto new file mode 100644 index 00000000..62074e7a --- /dev/null +++ b/vendor/k8s.io/api/discovery/v1alpha1/generated.proto @@ -0,0 +1,157 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.api.discovery.v1alpha1; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1alpha1"; + +// Endpoint represents a single logical "backend" implementing a service. +message Endpoint { + // addresses of this endpoint. The contents of this field are interpreted + // according to the corresponding EndpointSlice addressType field. Consumers + // must handle different types of addresses in the context of their own + // capabilities. This must contain at least one address but no more than + // 100. + // +listType=set + repeated string addresses = 1; + + // conditions contains information about the current status of the endpoint. + optional EndpointConditions conditions = 2; + + // hostname of this endpoint. This field may be used by consumers of + // endpoints to distinguish endpoints from each other (e.g. in DNS names). + // Multiple endpoints which use the same hostname should be considered + // fungible (e.g. multiple A values in DNS). Must pass DNS Label (RFC 1123) + // validation. + // +optional + optional string hostname = 3; + + // targetRef is a reference to a Kubernetes object that represents this + // endpoint. + // +optional + optional k8s.io.api.core.v1.ObjectReference targetRef = 4; + + // topology contains arbitrary topology information associated with the + // endpoint. These key/value pairs must conform with the label format. + // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels + // Topology may include a maximum of 16 key/value pairs. This includes, but + // is not limited to the following well known keys: + // * kubernetes.io/hostname: the value indicates the hostname of the node + // where the endpoint is located. This should match the corresponding + // node label. + // * topology.kubernetes.io/zone: the value indicates the zone where the + // endpoint is located. This should match the corresponding node label. + // * topology.kubernetes.io/region: the value indicates the region where the + // endpoint is located. This should match the corresponding node label. + // +optional + map topology = 5; +} + +// EndpointConditions represents the current condition of an endpoint. +message EndpointConditions { + // ready indicates that this endpoint is prepared to receive traffic, + // according to whatever system is managing the endpoint. A nil value + // indicates an unknown state. In most cases consumers should interpret this + // unknown state as ready. + // +optional + optional bool ready = 1; +} + +// EndpointPort represents a Port used by an EndpointSlice +message EndpointPort { + // The name of this port. All ports in an EndpointSlice must have a unique + // name. If the EndpointSlice is dervied from a Kubernetes service, this + // corresponds to the Service.ports[].name. + // Name must either be an empty string or pass DNS_LABEL validation: + // * must be no more than 63 characters long. + // * must consist of lower case alphanumeric characters or '-'. + // * must start and end with an alphanumeric character. + // Default is empty string. + optional string name = 1; + + // The IP protocol for this port. + // Must be UDP, TCP, or SCTP. + // Default is TCP. + optional string protocol = 2; + + // The port number of the endpoint. + // If this is not specified, ports are not restricted and must be + // interpreted in the context of the specific consumer. + optional int32 port = 3; + + // The application protocol for this port. + // This field follows standard Kubernetes label syntax. + // Un-prefixed names are reserved for IANA standard service names (as per + // RFC-6335 and http://www.iana.org/assignments/service-names). + // Non-standard protocols should use prefixed names. + // Default is empty string. + optional string appProtocol = 4; +} + +// EndpointSlice represents a subset of the endpoints that implement a service. +// For a given service there may be multiple EndpointSlice objects, selected by +// labels, which must be joined to produce the full set of endpoints. +message EndpointSlice { + // Standard object's metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // addressType specifies the type of address carried by this EndpointSlice. + // All addresses in this slice must be the same type. This field is + // immutable after creation. The following address types are currently + // supported: + // * IPv4: Represents an IPv4 Address. + // * IPv6: Represents an IPv6 Address. + // * FQDN: Represents a Fully Qualified Domain Name. + optional string addressType = 4; + + // endpoints is a list of unique endpoints in this slice. Each slice may + // include a maximum of 1000 endpoints. + // +listType=atomic + repeated Endpoint endpoints = 2; + + // ports specifies the list of network ports exposed by each endpoint in + // this slice. Each port must have a unique name. When ports is empty, it + // indicates that there are no defined ports. When a port is defined with a + // nil port value, it indicates "all ports". Each slice may include a + // maximum of 100 ports. + // +optional + // +listType=atomic + repeated EndpointPort ports = 3; +} + +// EndpointSliceList represents a list of endpoint slices +message EndpointSliceList { + // Standard list metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of endpoint slices + // +listType=set + repeated EndpointSlice items = 2; +} + diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/register.go b/vendor/k8s.io/api/discovery/v1alpha1/register.go similarity index 52% rename from vendor/k8s.io/kubernetes/pkg/apis/core/register.go rename to vendor/k8s.io/api/discovery/v1alpha1/register.go index c6cd8681..55b73f99 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/core/register.go +++ b/vendor/k8s.io/api/discovery/v1alpha1/register.go @@ -1,5 +1,5 @@ /* -Copyright 2014 The Kubernetes Authors. +Copyright 2019 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package core +package v1alpha1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -22,11 +22,11 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" ) -// GroupName is the group name use in this package -const GroupName = "" +// GroupName is the group name used in this package +const GroupName = "discovery.k8s.io" // SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} // Kind takes an unqualified kind and returns a Group qualified GroupKind func Kind(kind string) schema.GroupKind { @@ -39,60 +39,18 @@ func Resource(resource string) schema.GroupResource { } var ( + // SchemeBuilder is the scheme builder with scheme init functions to run for this API package SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - AddToScheme = SchemeBuilder.AddToScheme + // AddToScheme is a common registration function for mapping packaged scoped group & version keys to a scheme + AddToScheme = SchemeBuilder.AddToScheme ) +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { - if err := scheme.AddIgnoredConversionType(&metav1.TypeMeta{}, &metav1.TypeMeta{}); err != nil { - return err - } scheme.AddKnownTypes(SchemeGroupVersion, - &Pod{}, - &PodList{}, - &PodStatusResult{}, - &PodTemplate{}, - &PodTemplateList{}, - &ReplicationControllerList{}, - &ReplicationController{}, - &ServiceList{}, - &Service{}, - &ServiceProxyOptions{}, - &NodeList{}, - &Node{}, - &NodeProxyOptions{}, - &Endpoints{}, - &EndpointsList{}, - &Binding{}, - &Event{}, - &EventList{}, - &List{}, - &LimitRange{}, - &LimitRangeList{}, - &ResourceQuota{}, - &ResourceQuotaList{}, - &Namespace{}, - &NamespaceList{}, - &ServiceAccount{}, - &ServiceAccountList{}, - &Secret{}, - &SecretList{}, - &PersistentVolume{}, - &PersistentVolumeList{}, - &PersistentVolumeClaim{}, - &PersistentVolumeClaimList{}, - &PodAttachOptions{}, - &PodLogOptions{}, - &PodExecOptions{}, - &PodPortForwardOptions{}, - &PodProxyOptions{}, - &ComponentStatus{}, - &ComponentStatusList{}, - &SerializedReference{}, - &RangeAllocation{}, - &ConfigMap{}, - &ConfigMapList{}, + &EndpointSlice{}, + &EndpointSliceList{}, ) - + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) return nil } diff --git a/vendor/k8s.io/api/discovery/v1alpha1/types.go b/vendor/k8s.io/api/discovery/v1alpha1/types.go new file mode 100644 index 00000000..fff30b5c --- /dev/null +++ b/vendor/k8s.io/api/discovery/v1alpha1/types.go @@ -0,0 +1,162 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// EndpointSlice represents a subset of the endpoints that implement a service. +// For a given service there may be multiple EndpointSlice objects, selected by +// labels, which must be joined to produce the full set of endpoints. +type EndpointSlice struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // addressType specifies the type of address carried by this EndpointSlice. + // All addresses in this slice must be the same type. This field is + // immutable after creation. The following address types are currently + // supported: + // * IPv4: Represents an IPv4 Address. + // * IPv6: Represents an IPv6 Address. + // * FQDN: Represents a Fully Qualified Domain Name. + AddressType AddressType `json:"addressType" protobuf:"bytes,4,rep,name=addressType"` + // endpoints is a list of unique endpoints in this slice. Each slice may + // include a maximum of 1000 endpoints. + // +listType=atomic + Endpoints []Endpoint `json:"endpoints" protobuf:"bytes,2,rep,name=endpoints"` + // ports specifies the list of network ports exposed by each endpoint in + // this slice. Each port must have a unique name. When ports is empty, it + // indicates that there are no defined ports. When a port is defined with a + // nil port value, it indicates "all ports". Each slice may include a + // maximum of 100 ports. + // +optional + // +listType=atomic + Ports []EndpointPort `json:"ports" protobuf:"bytes,3,rep,name=ports"` +} + +// AddressType represents the type of address referred to by an endpoint. +type AddressType string + +const ( + // AddressTypeIP represents an IP Address. + // This address type has been deprecated and has been replaced by the IPv4 + // and IPv6 adddress types. New resources with this address type will be + // considered invalid. This will be fully removed in 1.18. + // +deprecated + AddressTypeIP = AddressType("IP") + // AddressTypeIPv4 represents an IPv4 Address. + AddressTypeIPv4 = AddressType(v1.IPv4Protocol) + // AddressTypeIPv6 represents an IPv6 Address. + AddressTypeIPv6 = AddressType(v1.IPv6Protocol) + // AddressTypeFQDN represents a FQDN. + AddressTypeFQDN = AddressType("FQDN") +) + +// Endpoint represents a single logical "backend" implementing a service. +type Endpoint struct { + // addresses of this endpoint. The contents of this field are interpreted + // according to the corresponding EndpointSlice addressType field. Consumers + // must handle different types of addresses in the context of their own + // capabilities. This must contain at least one address but no more than + // 100. + // +listType=set + Addresses []string `json:"addresses" protobuf:"bytes,1,rep,name=addresses"` + // conditions contains information about the current status of the endpoint. + Conditions EndpointConditions `json:"conditions,omitempty" protobuf:"bytes,2,opt,name=conditions"` + // hostname of this endpoint. This field may be used by consumers of + // endpoints to distinguish endpoints from each other (e.g. in DNS names). + // Multiple endpoints which use the same hostname should be considered + // fungible (e.g. multiple A values in DNS). Must pass DNS Label (RFC 1123) + // validation. + // +optional + Hostname *string `json:"hostname,omitempty" protobuf:"bytes,3,opt,name=hostname"` + // targetRef is a reference to a Kubernetes object that represents this + // endpoint. + // +optional + TargetRef *v1.ObjectReference `json:"targetRef,omitempty" protobuf:"bytes,4,opt,name=targetRef"` + // topology contains arbitrary topology information associated with the + // endpoint. These key/value pairs must conform with the label format. + // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels + // Topology may include a maximum of 16 key/value pairs. This includes, but + // is not limited to the following well known keys: + // * kubernetes.io/hostname: the value indicates the hostname of the node + // where the endpoint is located. This should match the corresponding + // node label. + // * topology.kubernetes.io/zone: the value indicates the zone where the + // endpoint is located. This should match the corresponding node label. + // * topology.kubernetes.io/region: the value indicates the region where the + // endpoint is located. This should match the corresponding node label. + // +optional + Topology map[string]string `json:"topology,omitempty" protobuf:"bytes,5,opt,name=topology"` +} + +// EndpointConditions represents the current condition of an endpoint. +type EndpointConditions struct { + // ready indicates that this endpoint is prepared to receive traffic, + // according to whatever system is managing the endpoint. A nil value + // indicates an unknown state. In most cases consumers should interpret this + // unknown state as ready. + // +optional + Ready *bool `json:"ready,omitempty" protobuf:"bytes,1,name=ready"` +} + +// EndpointPort represents a Port used by an EndpointSlice +type EndpointPort struct { + // The name of this port. All ports in an EndpointSlice must have a unique + // name. If the EndpointSlice is dervied from a Kubernetes service, this + // corresponds to the Service.ports[].name. + // Name must either be an empty string or pass DNS_LABEL validation: + // * must be no more than 63 characters long. + // * must consist of lower case alphanumeric characters or '-'. + // * must start and end with an alphanumeric character. + // Default is empty string. + Name *string `json:"name,omitempty" protobuf:"bytes,1,name=name"` + // The IP protocol for this port. + // Must be UDP, TCP, or SCTP. + // Default is TCP. + Protocol *v1.Protocol `json:"protocol,omitempty" protobuf:"bytes,2,name=protocol"` + // The port number of the endpoint. + // If this is not specified, ports are not restricted and must be + // interpreted in the context of the specific consumer. + Port *int32 `json:"port,omitempty" protobuf:"bytes,3,opt,name=port"` + // The application protocol for this port. + // This field follows standard Kubernetes label syntax. + // Un-prefixed names are reserved for IANA standard service names (as per + // RFC-6335 and http://www.iana.org/assignments/service-names). + // Non-standard protocols should use prefixed names. + // Default is empty string. + AppProtocol *string `json:"appProtocol,omitempty" protobuf:"bytes,4,name=appProtocol"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// EndpointSliceList represents a list of endpoint slices +type EndpointSliceList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // List of endpoint slices + // +listType=set + Items []EndpointSlice `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/api/discovery/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/discovery/v1alpha1/types_swagger_doc_generated.go new file mode 100644 index 00000000..1ba2d60d --- /dev/null +++ b/vendor/k8s.io/api/discovery/v1alpha1/types_swagger_doc_generated.go @@ -0,0 +1,86 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_Endpoint = map[string]string{ + "": "Endpoint represents a single logical \"backend\" implementing a service.", + "addresses": "addresses of this endpoint. The contents of this field are interpreted according to the corresponding EndpointSlice addressType field. Consumers must handle different types of addresses in the context of their own capabilities. This must contain at least one address but no more than 100.", + "conditions": "conditions contains information about the current status of the endpoint.", + "hostname": "hostname of this endpoint. This field may be used by consumers of endpoints to distinguish endpoints from each other (e.g. in DNS names). Multiple endpoints which use the same hostname should be considered fungible (e.g. multiple A values in DNS). Must pass DNS Label (RFC 1123) validation.", + "targetRef": "targetRef is a reference to a Kubernetes object that represents this endpoint.", + "topology": "topology contains arbitrary topology information associated with the endpoint. These key/value pairs must conform with the label format. https://kubernetes.io/docs/concepts/overview/working-with-objects/labels Topology may include a maximum of 16 key/value pairs. This includes, but is not limited to the following well known keys: * kubernetes.io/hostname: the value indicates the hostname of the node\n where the endpoint is located. This should match the corresponding\n node label.\n* topology.kubernetes.io/zone: the value indicates the zone where the\n endpoint is located. This should match the corresponding node label.\n* topology.kubernetes.io/region: the value indicates the region where the\n endpoint is located. This should match the corresponding node label.", +} + +func (Endpoint) SwaggerDoc() map[string]string { + return map_Endpoint +} + +var map_EndpointConditions = map[string]string{ + "": "EndpointConditions represents the current condition of an endpoint.", + "ready": "ready indicates that this endpoint is prepared to receive traffic, according to whatever system is managing the endpoint. A nil value indicates an unknown state. In most cases consumers should interpret this unknown state as ready.", +} + +func (EndpointConditions) SwaggerDoc() map[string]string { + return map_EndpointConditions +} + +var map_EndpointPort = map[string]string{ + "": "EndpointPort represents a Port used by an EndpointSlice", + "name": "The name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass DNS_LABEL validation: * must be no more than 63 characters long. * must consist of lower case alphanumeric characters or '-'. * must start and end with an alphanumeric character. Default is empty string.", + "protocol": "The IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.", + "port": "The port number of the endpoint. If this is not specified, ports are not restricted and must be interpreted in the context of the specific consumer.", + "appProtocol": "The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and http://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names. Default is empty string.", +} + +func (EndpointPort) SwaggerDoc() map[string]string { + return map_EndpointPort +} + +var map_EndpointSlice = map[string]string{ + "": "EndpointSlice represents a subset of the endpoints that implement a service. For a given service there may be multiple EndpointSlice objects, selected by labels, which must be joined to produce the full set of endpoints.", + "metadata": "Standard object's metadata.", + "addressType": "addressType specifies the type of address carried by this EndpointSlice. All addresses in this slice must be the same type. This field is immutable after creation. The following address types are currently supported: * IPv4: Represents an IPv4 Address. * IPv6: Represents an IPv6 Address. * FQDN: Represents a Fully Qualified Domain Name.", + "endpoints": "endpoints is a list of unique endpoints in this slice. Each slice may include a maximum of 1000 endpoints.", + "ports": "ports specifies the list of network ports exposed by each endpoint in this slice. Each port must have a unique name. When ports is empty, it indicates that there are no defined ports. When a port is defined with a nil port value, it indicates \"all ports\". Each slice may include a maximum of 100 ports.", +} + +func (EndpointSlice) SwaggerDoc() map[string]string { + return map_EndpointSlice +} + +var map_EndpointSliceList = map[string]string{ + "": "EndpointSliceList represents a list of endpoint slices", + "metadata": "Standard list metadata.", + "items": "List of endpoint slices", +} + +func (EndpointSliceList) SwaggerDoc() map[string]string { + return map_EndpointSliceList +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/discovery/v1alpha1/well_known_labels.go b/vendor/k8s.io/api/discovery/v1alpha1/well_known_labels.go new file mode 100644 index 00000000..8f9c72f0 --- /dev/null +++ b/vendor/k8s.io/api/discovery/v1alpha1/well_known_labels.go @@ -0,0 +1,28 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +const ( + // LabelServiceName is used to indicate the name of a Kubernetes service. + LabelServiceName = "kubernetes.io/service-name" + // LabelManagedBy is used to indicate the controller or entity that manages + // an EndpointSlice. This label aims to enable different EndpointSlice + // objects to be managed by different controllers or entities within the + // same cluster. It is highly recommended to configure this label for all + // EndpointSlices. + LabelManagedBy = "endpointslice.kubernetes.io/managed-by" +) diff --git a/vendor/k8s.io/api/discovery/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/discovery/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 00000000..c72f64ac --- /dev/null +++ b/vendor/k8s.io/api/discovery/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,195 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Endpoint) DeepCopyInto(out *Endpoint) { + *out = *in + if in.Addresses != nil { + in, out := &in.Addresses, &out.Addresses + *out = make([]string, len(*in)) + copy(*out, *in) + } + in.Conditions.DeepCopyInto(&out.Conditions) + if in.Hostname != nil { + in, out := &in.Hostname, &out.Hostname + *out = new(string) + **out = **in + } + if in.TargetRef != nil { + in, out := &in.TargetRef, &out.TargetRef + *out = new(v1.ObjectReference) + **out = **in + } + if in.Topology != nil { + in, out := &in.Topology, &out.Topology + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Endpoint. +func (in *Endpoint) DeepCopy() *Endpoint { + if in == nil { + return nil + } + out := new(Endpoint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointConditions) DeepCopyInto(out *EndpointConditions) { + *out = *in + if in.Ready != nil { + in, out := &in.Ready, &out.Ready + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointConditions. +func (in *EndpointConditions) DeepCopy() *EndpointConditions { + if in == nil { + return nil + } + out := new(EndpointConditions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointPort) DeepCopyInto(out *EndpointPort) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(v1.Protocol) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(int32) + **out = **in + } + if in.AppProtocol != nil { + in, out := &in.AppProtocol, &out.AppProtocol + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointPort. +func (in *EndpointPort) DeepCopy() *EndpointPort { + if in == nil { + return nil + } + out := new(EndpointPort) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointSlice) DeepCopyInto(out *EndpointSlice) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Endpoints != nil { + in, out := &in.Endpoints, &out.Endpoints + *out = make([]Endpoint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]EndpointPort, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointSlice. +func (in *EndpointSlice) DeepCopy() *EndpointSlice { + if in == nil { + return nil + } + out := new(EndpointSlice) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EndpointSlice) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointSliceList) DeepCopyInto(out *EndpointSliceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]EndpointSlice, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointSliceList. +func (in *EndpointSliceList) DeepCopy() *EndpointSliceList { + if in == nil { + return nil + } + out := new(EndpointSliceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EndpointSliceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/vendor/k8s.io/api/discovery/v1beta1/doc.go b/vendor/k8s.io/api/discovery/v1beta1/doc.go new file mode 100644 index 00000000..9b54d1b9 --- /dev/null +++ b/vendor/k8s.io/api/discovery/v1beta1/doc.go @@ -0,0 +1,22 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:openapi-gen=true +// +groupName=discovery.k8s.io + +package v1beta1 // import "k8s.io/api/discovery/v1beta1" diff --git a/vendor/k8s.io/api/discovery/v1beta1/generated.pb.go b/vendor/k8s.io/api/discovery/v1beta1/generated.pb.go new file mode 100644 index 00000000..2283d12d --- /dev/null +++ b/vendor/k8s.io/api/discovery/v1beta1/generated.pb.go @@ -0,0 +1,1730 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/api/discovery/v1beta1/generated.proto + +package v1beta1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +func (m *Endpoint) Reset() { *m = Endpoint{} } +func (*Endpoint) ProtoMessage() {} +func (*Endpoint) Descriptor() ([]byte, []int) { + return fileDescriptor_ece80bbc872d519b, []int{0} +} +func (m *Endpoint) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Endpoint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Endpoint) XXX_Merge(src proto.Message) { + xxx_messageInfo_Endpoint.Merge(m, src) +} +func (m *Endpoint) XXX_Size() int { + return m.Size() +} +func (m *Endpoint) XXX_DiscardUnknown() { + xxx_messageInfo_Endpoint.DiscardUnknown(m) +} + +var xxx_messageInfo_Endpoint proto.InternalMessageInfo + +func (m *EndpointConditions) Reset() { *m = EndpointConditions{} } +func (*EndpointConditions) ProtoMessage() {} +func (*EndpointConditions) Descriptor() ([]byte, []int) { + return fileDescriptor_ece80bbc872d519b, []int{1} +} +func (m *EndpointConditions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EndpointConditions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EndpointConditions) XXX_Merge(src proto.Message) { + xxx_messageInfo_EndpointConditions.Merge(m, src) +} +func (m *EndpointConditions) XXX_Size() int { + return m.Size() +} +func (m *EndpointConditions) XXX_DiscardUnknown() { + xxx_messageInfo_EndpointConditions.DiscardUnknown(m) +} + +var xxx_messageInfo_EndpointConditions proto.InternalMessageInfo + +func (m *EndpointPort) Reset() { *m = EndpointPort{} } +func (*EndpointPort) ProtoMessage() {} +func (*EndpointPort) Descriptor() ([]byte, []int) { + return fileDescriptor_ece80bbc872d519b, []int{2} +} +func (m *EndpointPort) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EndpointPort) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EndpointPort) XXX_Merge(src proto.Message) { + xxx_messageInfo_EndpointPort.Merge(m, src) +} +func (m *EndpointPort) XXX_Size() int { + return m.Size() +} +func (m *EndpointPort) XXX_DiscardUnknown() { + xxx_messageInfo_EndpointPort.DiscardUnknown(m) +} + +var xxx_messageInfo_EndpointPort proto.InternalMessageInfo + +func (m *EndpointSlice) Reset() { *m = EndpointSlice{} } +func (*EndpointSlice) ProtoMessage() {} +func (*EndpointSlice) Descriptor() ([]byte, []int) { + return fileDescriptor_ece80bbc872d519b, []int{3} +} +func (m *EndpointSlice) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EndpointSlice) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EndpointSlice) XXX_Merge(src proto.Message) { + xxx_messageInfo_EndpointSlice.Merge(m, src) +} +func (m *EndpointSlice) XXX_Size() int { + return m.Size() +} +func (m *EndpointSlice) XXX_DiscardUnknown() { + xxx_messageInfo_EndpointSlice.DiscardUnknown(m) +} + +var xxx_messageInfo_EndpointSlice proto.InternalMessageInfo + +func (m *EndpointSliceList) Reset() { *m = EndpointSliceList{} } +func (*EndpointSliceList) ProtoMessage() {} +func (*EndpointSliceList) Descriptor() ([]byte, []int) { + return fileDescriptor_ece80bbc872d519b, []int{4} +} +func (m *EndpointSliceList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EndpointSliceList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EndpointSliceList) XXX_Merge(src proto.Message) { + xxx_messageInfo_EndpointSliceList.Merge(m, src) +} +func (m *EndpointSliceList) XXX_Size() int { + return m.Size() +} +func (m *EndpointSliceList) XXX_DiscardUnknown() { + xxx_messageInfo_EndpointSliceList.DiscardUnknown(m) +} + +var xxx_messageInfo_EndpointSliceList proto.InternalMessageInfo + +func init() { + proto.RegisterType((*Endpoint)(nil), "k8s.io.api.discovery.v1beta1.Endpoint") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.discovery.v1beta1.Endpoint.TopologyEntry") + proto.RegisterType((*EndpointConditions)(nil), "k8s.io.api.discovery.v1beta1.EndpointConditions") + proto.RegisterType((*EndpointPort)(nil), "k8s.io.api.discovery.v1beta1.EndpointPort") + proto.RegisterType((*EndpointSlice)(nil), "k8s.io.api.discovery.v1beta1.EndpointSlice") + proto.RegisterType((*EndpointSliceList)(nil), "k8s.io.api.discovery.v1beta1.EndpointSliceList") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/discovery/v1beta1/generated.proto", fileDescriptor_ece80bbc872d519b) +} + +var fileDescriptor_ece80bbc872d519b = []byte{ + // 745 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x53, 0xcf, 0x6b, 0xdb, 0x48, + 0x14, 0xb6, 0xe2, 0x88, 0x95, 0xc6, 0x31, 0x9b, 0x0c, 0x7b, 0x30, 0xde, 0x20, 0x19, 0x2f, 0x2c, + 0x66, 0x43, 0xa4, 0x75, 0xc8, 0x2e, 0x61, 0xf7, 0x14, 0xed, 0x86, 0xb6, 0xd0, 0x36, 0x66, 0x1a, + 0x28, 0x94, 0x1e, 0x3a, 0x96, 0x26, 0xb2, 0x6a, 0x5b, 0x23, 0x34, 0x63, 0x83, 0x6f, 0xfd, 0x13, + 0xfa, 0xf7, 0xf4, 0x5a, 0x28, 0x39, 0xe6, 0x98, 0x93, 0xa8, 0xd5, 0xff, 0x22, 0xa7, 0x32, 0xa3, + 0x5f, 0x76, 0xdd, 0x1f, 0xbe, 0xcd, 0x7c, 0xf3, 0xbe, 0xef, 0xbd, 0xf7, 0xcd, 0x7b, 0xe0, 0x62, + 0x7c, 0xc6, 0xac, 0x80, 0xda, 0xe3, 0xd9, 0x90, 0xc4, 0x21, 0xe1, 0x84, 0xd9, 0x73, 0x12, 0x7a, + 0x34, 0xb6, 0xf3, 0x07, 0x1c, 0x05, 0xb6, 0x17, 0x30, 0x97, 0xce, 0x49, 0xbc, 0xb0, 0xe7, 0xfd, + 0x21, 0xe1, 0xb8, 0x6f, 0xfb, 0x24, 0x24, 0x31, 0xe6, 0xc4, 0xb3, 0xa2, 0x98, 0x72, 0x0a, 0x0f, + 0xb3, 0x68, 0x0b, 0x47, 0x81, 0x55, 0x46, 0x5b, 0x79, 0x74, 0xfb, 0xd8, 0x0f, 0xf8, 0x68, 0x36, + 0xb4, 0x5c, 0x3a, 0xb5, 0x7d, 0xea, 0x53, 0x5b, 0x92, 0x86, 0xb3, 0x6b, 0x79, 0x93, 0x17, 0x79, + 0xca, 0xc4, 0xda, 0xdd, 0x95, 0xd4, 0x2e, 0x8d, 0x89, 0x3d, 0xdf, 0x48, 0xd8, 0x3e, 0xad, 0x62, + 0xa6, 0xd8, 0x1d, 0x05, 0xa1, 0xa8, 0x2e, 0x1a, 0xfb, 0x02, 0x60, 0xf6, 0x94, 0x70, 0xfc, 0x35, + 0x96, 0xfd, 0x2d, 0x56, 0x3c, 0x0b, 0x79, 0x30, 0x25, 0x1b, 0x84, 0xbf, 0x7f, 0x44, 0x60, 0xee, + 0x88, 0x4c, 0xf1, 0x97, 0xbc, 0xee, 0xbb, 0x3a, 0xd0, 0x2e, 0x42, 0x2f, 0xa2, 0x41, 0xc8, 0xe1, + 0x11, 0xd0, 0xb1, 0xe7, 0xc5, 0x84, 0x31, 0xc2, 0x5a, 0x4a, 0xa7, 0xde, 0xd3, 0x9d, 0x66, 0x9a, + 0x98, 0xfa, 0x79, 0x01, 0xa2, 0xea, 0x1d, 0x7a, 0x00, 0xb8, 0x34, 0xf4, 0x02, 0x1e, 0xd0, 0x90, + 0xb5, 0x76, 0x3a, 0x4a, 0xaf, 0x71, 0xf2, 0xa7, 0xf5, 0x3d, 0x7b, 0xad, 0x22, 0xd1, 0x7f, 0x25, + 0xcf, 0x81, 0x37, 0x89, 0x59, 0x4b, 0x13, 0x13, 0x54, 0x18, 0x5a, 0xd1, 0x85, 0x3d, 0xa0, 0x8d, + 0x28, 0xe3, 0x21, 0x9e, 0x92, 0x56, 0xbd, 0xa3, 0xf4, 0x74, 0x67, 0x2f, 0x4d, 0x4c, 0xed, 0x61, + 0x8e, 0xa1, 0xf2, 0x15, 0x0e, 0x80, 0xce, 0x71, 0xec, 0x13, 0x8e, 0xc8, 0x75, 0x6b, 0x57, 0x96, + 0xf3, 0xdb, 0x6a, 0x39, 0xe2, 0x83, 0xac, 0x79, 0xdf, 0xba, 0x1c, 0xbe, 0x26, 0xae, 0x08, 0x22, + 0x31, 0x09, 0x5d, 0x92, 0x75, 0x78, 0x55, 0x30, 0x51, 0x25, 0x02, 0x87, 0x40, 0xe3, 0x34, 0xa2, + 0x13, 0xea, 0x2f, 0x5a, 0x6a, 0xa7, 0xde, 0x6b, 0x9c, 0x9c, 0x6e, 0xd7, 0x9f, 0x75, 0x95, 0xd3, + 0x2e, 0x42, 0x1e, 0x2f, 0x9c, 0xfd, 0xbc, 0x47, 0xad, 0x80, 0x51, 0xa9, 0xdb, 0xfe, 0x17, 0x34, + 0xd7, 0x82, 0xe1, 0x3e, 0xa8, 0x8f, 0xc9, 0xa2, 0xa5, 0x88, 0x5e, 0x91, 0x38, 0xc2, 0x5f, 0x80, + 0x3a, 0xc7, 0x93, 0x19, 0x91, 0x1e, 0xeb, 0x28, 0xbb, 0xfc, 0xb3, 0x73, 0xa6, 0x74, 0xff, 0x02, + 0x70, 0xd3, 0x52, 0x68, 0x02, 0x35, 0x26, 0xd8, 0xcb, 0x34, 0x34, 0x47, 0x4f, 0x13, 0x53, 0x45, + 0x02, 0x40, 0x19, 0xde, 0xfd, 0xa0, 0x80, 0xbd, 0x82, 0x37, 0xa0, 0x31, 0x87, 0x87, 0x60, 0x57, + 0x1a, 0x2c, 0x93, 0x3a, 0x5a, 0x9a, 0x98, 0xbb, 0x4f, 0x85, 0xb9, 0x12, 0x85, 0x0f, 0x80, 0x26, + 0x67, 0xc5, 0xa5, 0x93, 0xac, 0x04, 0xe7, 0x48, 0x34, 0x33, 0xc8, 0xb1, 0xfb, 0xc4, 0xfc, 0x75, + 0x73, 0x0f, 0xac, 0xe2, 0x19, 0x95, 0x64, 0x91, 0x26, 0xa2, 0x31, 0x97, 0xff, 0xa8, 0x66, 0x69, + 0x44, 0x7a, 0x24, 0x51, 0xd8, 0x07, 0x0d, 0x1c, 0x45, 0x05, 0x4d, 0xfe, 0xa0, 0xee, 0xfc, 0x9c, + 0x26, 0x66, 0xe3, 0xbc, 0x82, 0xd1, 0x6a, 0x4c, 0x77, 0xb9, 0x03, 0x9a, 0x45, 0x23, 0xcf, 0x26, + 0x81, 0x4b, 0xe0, 0x2b, 0xa0, 0x89, 0x95, 0xf2, 0x30, 0xc7, 0xb2, 0x9b, 0xf5, 0x91, 0x2c, 0x37, + 0xc3, 0x8a, 0xc6, 0xbe, 0x00, 0x98, 0x25, 0xa2, 0xab, 0xa9, 0x78, 0x42, 0x38, 0xae, 0x46, 0xb2, + 0xc2, 0x50, 0xa9, 0x0a, 0xff, 0x07, 0x8d, 0x7c, 0x07, 0xae, 0x16, 0x11, 0xc9, 0xcb, 0xec, 0xe6, + 0x94, 0xc6, 0x79, 0xf5, 0x74, 0xbf, 0x7e, 0x45, 0xab, 0x34, 0xf8, 0x1c, 0xe8, 0x24, 0x2f, 0x5c, + 0xec, 0x8e, 0x98, 0xad, 0xdf, 0xb7, 0x9b, 0x2d, 0xe7, 0x20, 0xcf, 0xa5, 0x17, 0x08, 0x43, 0x95, + 0x16, 0xbc, 0x04, 0xaa, 0x70, 0x93, 0xb5, 0xea, 0x52, 0xf4, 0x8f, 0xed, 0x44, 0xc5, 0x37, 0x38, + 0xcd, 0x5c, 0x58, 0x15, 0x37, 0x86, 0x32, 0x9d, 0xee, 0x7b, 0x05, 0x1c, 0xac, 0x79, 0xfc, 0x38, + 0x60, 0x1c, 0xbe, 0xdc, 0xf0, 0xd9, 0xda, 0xce, 0x67, 0xc1, 0x96, 0x2e, 0x97, 0x4b, 0x51, 0x20, + 0x2b, 0x1e, 0x0f, 0x80, 0x1a, 0x70, 0x32, 0x2d, 0x9c, 0x39, 0xda, 0xae, 0x09, 0x59, 0x5d, 0xd5, + 0xc5, 0x23, 0xa1, 0x80, 0x32, 0x21, 0xe7, 0xf8, 0x66, 0x69, 0xd4, 0x6e, 0x97, 0x46, 0xed, 0x6e, + 0x69, 0xd4, 0xde, 0xa4, 0x86, 0x72, 0x93, 0x1a, 0xca, 0x6d, 0x6a, 0x28, 0x77, 0xa9, 0xa1, 0x7c, + 0x4c, 0x0d, 0xe5, 0xed, 0x27, 0xa3, 0xf6, 0xe2, 0xa7, 0x5c, 0xf2, 0x73, 0x00, 0x00, 0x00, 0xff, + 0xff, 0x29, 0x1a, 0xa2, 0x6f, 0x6d, 0x06, 0x00, 0x00, +} + +func (m *Endpoint) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Endpoint) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Endpoint) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Topology) > 0 { + keysForTopology := make([]string, 0, len(m.Topology)) + for k := range m.Topology { + keysForTopology = append(keysForTopology, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForTopology) + for iNdEx := len(keysForTopology) - 1; iNdEx >= 0; iNdEx-- { + v := m.Topology[string(keysForTopology[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForTopology[iNdEx]) + copy(dAtA[i:], keysForTopology[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForTopology[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2a + } + } + if m.TargetRef != nil { + { + size, err := m.TargetRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.Hostname != nil { + i -= len(*m.Hostname) + copy(dAtA[i:], *m.Hostname) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Hostname))) + i-- + dAtA[i] = 0x1a + } + { + size, err := m.Conditions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if len(m.Addresses) > 0 { + for iNdEx := len(m.Addresses) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Addresses[iNdEx]) + copy(dAtA[i:], m.Addresses[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Addresses[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *EndpointConditions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EndpointConditions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EndpointConditions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Ready != nil { + i-- + if *m.Ready { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *EndpointPort) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EndpointPort) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EndpointPort) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.AppProtocol != nil { + i -= len(*m.AppProtocol) + copy(dAtA[i:], *m.AppProtocol) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.AppProtocol))) + i-- + dAtA[i] = 0x22 + } + if m.Port != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Port)) + i-- + dAtA[i] = 0x18 + } + if m.Protocol != nil { + i -= len(*m.Protocol) + copy(dAtA[i:], *m.Protocol) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Protocol))) + i-- + dAtA[i] = 0x12 + } + if m.Name != nil { + i -= len(*m.Name) + copy(dAtA[i:], *m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *EndpointSlice) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EndpointSlice) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EndpointSlice) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.AddressType) + copy(dAtA[i:], m.AddressType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AddressType))) + i-- + dAtA[i] = 0x22 + if len(m.Ports) > 0 { + for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ports[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.Endpoints) > 0 { + for iNdEx := len(m.Endpoints) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Endpoints[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *EndpointSliceList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EndpointSliceList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EndpointSliceList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Endpoint) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Addresses) > 0 { + for _, s := range m.Addresses { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.Conditions.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Hostname != nil { + l = len(*m.Hostname) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.TargetRef != nil { + l = m.TargetRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Topology) > 0 { + for k, v := range m.Topology { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *EndpointConditions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Ready != nil { + n += 2 + } + return n +} + +func (m *EndpointPort) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Name != nil { + l = len(*m.Name) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Protocol != nil { + l = len(*m.Protocol) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Port != nil { + n += 1 + sovGenerated(uint64(*m.Port)) + } + if m.AppProtocol != nil { + l = len(*m.AppProtocol) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *EndpointSlice) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Endpoints) > 0 { + for _, e := range m.Endpoints { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.AddressType) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *EndpointSliceList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Endpoint) String() string { + if this == nil { + return "nil" + } + keysForTopology := make([]string, 0, len(this.Topology)) + for k := range this.Topology { + keysForTopology = append(keysForTopology, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForTopology) + mapStringForTopology := "map[string]string{" + for _, k := range keysForTopology { + mapStringForTopology += fmt.Sprintf("%v: %v,", k, this.Topology[k]) + } + mapStringForTopology += "}" + s := strings.Join([]string{`&Endpoint{`, + `Addresses:` + fmt.Sprintf("%v", this.Addresses) + `,`, + `Conditions:` + strings.Replace(strings.Replace(this.Conditions.String(), "EndpointConditions", "EndpointConditions", 1), `&`, ``, 1) + `,`, + `Hostname:` + valueToStringGenerated(this.Hostname) + `,`, + `TargetRef:` + strings.Replace(fmt.Sprintf("%v", this.TargetRef), "ObjectReference", "v1.ObjectReference", 1) + `,`, + `Topology:` + mapStringForTopology + `,`, + `}`, + }, "") + return s +} +func (this *EndpointConditions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EndpointConditions{`, + `Ready:` + valueToStringGenerated(this.Ready) + `,`, + `}`, + }, "") + return s +} +func (this *EndpointPort) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EndpointPort{`, + `Name:` + valueToStringGenerated(this.Name) + `,`, + `Protocol:` + valueToStringGenerated(this.Protocol) + `,`, + `Port:` + valueToStringGenerated(this.Port) + `,`, + `AppProtocol:` + valueToStringGenerated(this.AppProtocol) + `,`, + `}`, + }, "") + return s +} +func (this *EndpointSlice) String() string { + if this == nil { + return "nil" + } + repeatedStringForEndpoints := "[]Endpoint{" + for _, f := range this.Endpoints { + repeatedStringForEndpoints += strings.Replace(strings.Replace(f.String(), "Endpoint", "Endpoint", 1), `&`, ``, 1) + "," + } + repeatedStringForEndpoints += "}" + repeatedStringForPorts := "[]EndpointPort{" + for _, f := range this.Ports { + repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "EndpointPort", "EndpointPort", 1), `&`, ``, 1) + "," + } + repeatedStringForPorts += "}" + s := strings.Join([]string{`&EndpointSlice{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Endpoints:` + repeatedStringForEndpoints + `,`, + `Ports:` + repeatedStringForPorts + `,`, + `AddressType:` + fmt.Sprintf("%v", this.AddressType) + `,`, + `}`, + }, "") + return s +} +func (this *EndpointSliceList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]EndpointSlice{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "EndpointSlice", "EndpointSlice", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&EndpointSliceList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Endpoint) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Endpoint: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Endpoint: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Addresses", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Addresses = append(m.Addresses, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Conditions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Hostname = &s + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TargetRef == nil { + m.TargetRef = &v1.ObjectReference{} + } + if err := m.TargetRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Topology", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Topology == nil { + m.Topology = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Topology[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EndpointConditions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EndpointConditions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EndpointConditions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Ready", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Ready = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EndpointPort) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EndpointPort: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EndpointPort: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Name = &s + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := k8s_io_api_core_v1.Protocol(dAtA[iNdEx:postIndex]) + m.Protocol = &s + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Port = &v + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AppProtocol", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.AppProtocol = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EndpointSlice) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EndpointSlice: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EndpointSlice: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Endpoints", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Endpoints = append(m.Endpoints, Endpoint{}) + if err := m.Endpoints[len(m.Endpoints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ports = append(m.Ports, EndpointPort{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AddressType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AddressType = AddressType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EndpointSliceList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EndpointSliceList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EndpointSliceList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, EndpointSlice{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) diff --git a/vendor/k8s.io/api/discovery/v1beta1/generated.proto b/vendor/k8s.io/api/discovery/v1beta1/generated.proto new file mode 100644 index 00000000..cce6f970 --- /dev/null +++ b/vendor/k8s.io/api/discovery/v1beta1/generated.proto @@ -0,0 +1,157 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.api.discovery.v1beta1; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1beta1"; + +// Endpoint represents a single logical "backend" implementing a service. +message Endpoint { + // addresses of this endpoint. The contents of this field are interpreted + // according to the corresponding EndpointSlice addressType field. Consumers + // must handle different types of addresses in the context of their own + // capabilities. This must contain at least one address but no more than + // 100. + // +listType=set + repeated string addresses = 1; + + // conditions contains information about the current status of the endpoint. + optional EndpointConditions conditions = 2; + + // hostname of this endpoint. This field may be used by consumers of + // endpoints to distinguish endpoints from each other (e.g. in DNS names). + // Multiple endpoints which use the same hostname should be considered + // fungible (e.g. multiple A values in DNS). Must pass DNS Label (RFC 1123) + // validation. + // +optional + optional string hostname = 3; + + // targetRef is a reference to a Kubernetes object that represents this + // endpoint. + // +optional + optional k8s.io.api.core.v1.ObjectReference targetRef = 4; + + // topology contains arbitrary topology information associated with the + // endpoint. These key/value pairs must conform with the label format. + // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels + // Topology may include a maximum of 16 key/value pairs. This includes, but + // is not limited to the following well known keys: + // * kubernetes.io/hostname: the value indicates the hostname of the node + // where the endpoint is located. This should match the corresponding + // node label. + // * topology.kubernetes.io/zone: the value indicates the zone where the + // endpoint is located. This should match the corresponding node label. + // * topology.kubernetes.io/region: the value indicates the region where the + // endpoint is located. This should match the corresponding node label. + // +optional + map topology = 5; +} + +// EndpointConditions represents the current condition of an endpoint. +message EndpointConditions { + // ready indicates that this endpoint is prepared to receive traffic, + // according to whatever system is managing the endpoint. A nil value + // indicates an unknown state. In most cases consumers should interpret this + // unknown state as ready. + // +optional + optional bool ready = 1; +} + +// EndpointPort represents a Port used by an EndpointSlice +message EndpointPort { + // The name of this port. All ports in an EndpointSlice must have a unique + // name. If the EndpointSlice is dervied from a Kubernetes service, this + // corresponds to the Service.ports[].name. + // Name must either be an empty string or pass DNS_LABEL validation: + // * must be no more than 63 characters long. + // * must consist of lower case alphanumeric characters or '-'. + // * must start and end with an alphanumeric character. + // Default is empty string. + optional string name = 1; + + // The IP protocol for this port. + // Must be UDP, TCP, or SCTP. + // Default is TCP. + optional string protocol = 2; + + // The port number of the endpoint. + // If this is not specified, ports are not restricted and must be + // interpreted in the context of the specific consumer. + optional int32 port = 3; + + // The application protocol for this port. + // This field follows standard Kubernetes label syntax. + // Un-prefixed names are reserved for IANA standard service names (as per + // RFC-6335 and http://www.iana.org/assignments/service-names). + // Non-standard protocols should use prefixed names. + // Default is empty string. + optional string appProtocol = 4; +} + +// EndpointSlice represents a subset of the endpoints that implement a service. +// For a given service there may be multiple EndpointSlice objects, selected by +// labels, which must be joined to produce the full set of endpoints. +message EndpointSlice { + // Standard object's metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // addressType specifies the type of address carried by this EndpointSlice. + // All addresses in this slice must be the same type. This field is + // immutable after creation. The following address types are currently + // supported: + // * IPv4: Represents an IPv4 Address. + // * IPv6: Represents an IPv6 Address. + // * FQDN: Represents a Fully Qualified Domain Name. + optional string addressType = 4; + + // endpoints is a list of unique endpoints in this slice. Each slice may + // include a maximum of 1000 endpoints. + // +listType=atomic + repeated Endpoint endpoints = 2; + + // ports specifies the list of network ports exposed by each endpoint in + // this slice. Each port must have a unique name. When ports is empty, it + // indicates that there are no defined ports. When a port is defined with a + // nil port value, it indicates "all ports". Each slice may include a + // maximum of 100 ports. + // +optional + // +listType=atomic + repeated EndpointPort ports = 3; +} + +// EndpointSliceList represents a list of endpoint slices +message EndpointSliceList { + // Standard list metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of endpoint slices + // +listType=set + repeated EndpointSlice items = 2; +} + diff --git a/vendor/k8s.io/api/discovery/v1beta1/register.go b/vendor/k8s.io/api/discovery/v1beta1/register.go new file mode 100644 index 00000000..fc993c5f --- /dev/null +++ b/vendor/k8s.io/api/discovery/v1beta1/register.go @@ -0,0 +1,56 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name used in this package +const GroupName = "discovery.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // SchemeBuilder is the scheme builder with scheme init functions to run for this API package + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // AddToScheme is a common registration function for mapping packaged scoped group & version keys to a scheme + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &EndpointSlice{}, + &EndpointSliceList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/discovery/v1beta1/types.go b/vendor/k8s.io/api/discovery/v1beta1/types.go new file mode 100644 index 00000000..e3dc5653 --- /dev/null +++ b/vendor/k8s.io/api/discovery/v1beta1/types.go @@ -0,0 +1,162 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// EndpointSlice represents a subset of the endpoints that implement a service. +// For a given service there may be multiple EndpointSlice objects, selected by +// labels, which must be joined to produce the full set of endpoints. +type EndpointSlice struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // addressType specifies the type of address carried by this EndpointSlice. + // All addresses in this slice must be the same type. This field is + // immutable after creation. The following address types are currently + // supported: + // * IPv4: Represents an IPv4 Address. + // * IPv6: Represents an IPv6 Address. + // * FQDN: Represents a Fully Qualified Domain Name. + AddressType AddressType `json:"addressType" protobuf:"bytes,4,rep,name=addressType"` + // endpoints is a list of unique endpoints in this slice. Each slice may + // include a maximum of 1000 endpoints. + // +listType=atomic + Endpoints []Endpoint `json:"endpoints" protobuf:"bytes,2,rep,name=endpoints"` + // ports specifies the list of network ports exposed by each endpoint in + // this slice. Each port must have a unique name. When ports is empty, it + // indicates that there are no defined ports. When a port is defined with a + // nil port value, it indicates "all ports". Each slice may include a + // maximum of 100 ports. + // +optional + // +listType=atomic + Ports []EndpointPort `json:"ports" protobuf:"bytes,3,rep,name=ports"` +} + +// AddressType represents the type of address referred to by an endpoint. +type AddressType string + +const ( + // AddressTypeIP represents an IP Address. + // This address type has been deprecated and has been replaced by the IPv4 + // and IPv6 adddress types. New resources with this address type will be + // considered invalid. This will be fully removed in 1.18. + // +deprecated + AddressTypeIP = AddressType("IP") + // AddressTypeIPv4 represents an IPv4 Address. + AddressTypeIPv4 = AddressType(v1.IPv4Protocol) + // AddressTypeIPv6 represents an IPv6 Address. + AddressTypeIPv6 = AddressType(v1.IPv6Protocol) + // AddressTypeFQDN represents a FQDN. + AddressTypeFQDN = AddressType("FQDN") +) + +// Endpoint represents a single logical "backend" implementing a service. +type Endpoint struct { + // addresses of this endpoint. The contents of this field are interpreted + // according to the corresponding EndpointSlice addressType field. Consumers + // must handle different types of addresses in the context of their own + // capabilities. This must contain at least one address but no more than + // 100. + // +listType=set + Addresses []string `json:"addresses" protobuf:"bytes,1,rep,name=addresses"` + // conditions contains information about the current status of the endpoint. + Conditions EndpointConditions `json:"conditions,omitempty" protobuf:"bytes,2,opt,name=conditions"` + // hostname of this endpoint. This field may be used by consumers of + // endpoints to distinguish endpoints from each other (e.g. in DNS names). + // Multiple endpoints which use the same hostname should be considered + // fungible (e.g. multiple A values in DNS). Must pass DNS Label (RFC 1123) + // validation. + // +optional + Hostname *string `json:"hostname,omitempty" protobuf:"bytes,3,opt,name=hostname"` + // targetRef is a reference to a Kubernetes object that represents this + // endpoint. + // +optional + TargetRef *v1.ObjectReference `json:"targetRef,omitempty" protobuf:"bytes,4,opt,name=targetRef"` + // topology contains arbitrary topology information associated with the + // endpoint. These key/value pairs must conform with the label format. + // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels + // Topology may include a maximum of 16 key/value pairs. This includes, but + // is not limited to the following well known keys: + // * kubernetes.io/hostname: the value indicates the hostname of the node + // where the endpoint is located. This should match the corresponding + // node label. + // * topology.kubernetes.io/zone: the value indicates the zone where the + // endpoint is located. This should match the corresponding node label. + // * topology.kubernetes.io/region: the value indicates the region where the + // endpoint is located. This should match the corresponding node label. + // +optional + Topology map[string]string `json:"topology,omitempty" protobuf:"bytes,5,opt,name=topology"` +} + +// EndpointConditions represents the current condition of an endpoint. +type EndpointConditions struct { + // ready indicates that this endpoint is prepared to receive traffic, + // according to whatever system is managing the endpoint. A nil value + // indicates an unknown state. In most cases consumers should interpret this + // unknown state as ready. + // +optional + Ready *bool `json:"ready,omitempty" protobuf:"bytes,1,name=ready"` +} + +// EndpointPort represents a Port used by an EndpointSlice +type EndpointPort struct { + // The name of this port. All ports in an EndpointSlice must have a unique + // name. If the EndpointSlice is dervied from a Kubernetes service, this + // corresponds to the Service.ports[].name. + // Name must either be an empty string or pass DNS_LABEL validation: + // * must be no more than 63 characters long. + // * must consist of lower case alphanumeric characters or '-'. + // * must start and end with an alphanumeric character. + // Default is empty string. + Name *string `json:"name,omitempty" protobuf:"bytes,1,name=name"` + // The IP protocol for this port. + // Must be UDP, TCP, or SCTP. + // Default is TCP. + Protocol *v1.Protocol `json:"protocol,omitempty" protobuf:"bytes,2,name=protocol"` + // The port number of the endpoint. + // If this is not specified, ports are not restricted and must be + // interpreted in the context of the specific consumer. + Port *int32 `json:"port,omitempty" protobuf:"bytes,3,opt,name=port"` + // The application protocol for this port. + // This field follows standard Kubernetes label syntax. + // Un-prefixed names are reserved for IANA standard service names (as per + // RFC-6335 and http://www.iana.org/assignments/service-names). + // Non-standard protocols should use prefixed names. + // Default is empty string. + AppProtocol *string `json:"appProtocol,omitempty" protobuf:"bytes,4,name=appProtocol"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// EndpointSliceList represents a list of endpoint slices +type EndpointSliceList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // List of endpoint slices + // +listType=set + Items []EndpointSlice `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/api/discovery/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/discovery/v1beta1/types_swagger_doc_generated.go new file mode 100644 index 00000000..9dd3a035 --- /dev/null +++ b/vendor/k8s.io/api/discovery/v1beta1/types_swagger_doc_generated.go @@ -0,0 +1,86 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_Endpoint = map[string]string{ + "": "Endpoint represents a single logical \"backend\" implementing a service.", + "addresses": "addresses of this endpoint. The contents of this field are interpreted according to the corresponding EndpointSlice addressType field. Consumers must handle different types of addresses in the context of their own capabilities. This must contain at least one address but no more than 100.", + "conditions": "conditions contains information about the current status of the endpoint.", + "hostname": "hostname of this endpoint. This field may be used by consumers of endpoints to distinguish endpoints from each other (e.g. in DNS names). Multiple endpoints which use the same hostname should be considered fungible (e.g. multiple A values in DNS). Must pass DNS Label (RFC 1123) validation.", + "targetRef": "targetRef is a reference to a Kubernetes object that represents this endpoint.", + "topology": "topology contains arbitrary topology information associated with the endpoint. These key/value pairs must conform with the label format. https://kubernetes.io/docs/concepts/overview/working-with-objects/labels Topology may include a maximum of 16 key/value pairs. This includes, but is not limited to the following well known keys: * kubernetes.io/hostname: the value indicates the hostname of the node\n where the endpoint is located. This should match the corresponding\n node label.\n* topology.kubernetes.io/zone: the value indicates the zone where the\n endpoint is located. This should match the corresponding node label.\n* topology.kubernetes.io/region: the value indicates the region where the\n endpoint is located. This should match the corresponding node label.", +} + +func (Endpoint) SwaggerDoc() map[string]string { + return map_Endpoint +} + +var map_EndpointConditions = map[string]string{ + "": "EndpointConditions represents the current condition of an endpoint.", + "ready": "ready indicates that this endpoint is prepared to receive traffic, according to whatever system is managing the endpoint. A nil value indicates an unknown state. In most cases consumers should interpret this unknown state as ready.", +} + +func (EndpointConditions) SwaggerDoc() map[string]string { + return map_EndpointConditions +} + +var map_EndpointPort = map[string]string{ + "": "EndpointPort represents a Port used by an EndpointSlice", + "name": "The name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass DNS_LABEL validation: * must be no more than 63 characters long. * must consist of lower case alphanumeric characters or '-'. * must start and end with an alphanumeric character. Default is empty string.", + "protocol": "The IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.", + "port": "The port number of the endpoint. If this is not specified, ports are not restricted and must be interpreted in the context of the specific consumer.", + "appProtocol": "The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and http://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names. Default is empty string.", +} + +func (EndpointPort) SwaggerDoc() map[string]string { + return map_EndpointPort +} + +var map_EndpointSlice = map[string]string{ + "": "EndpointSlice represents a subset of the endpoints that implement a service. For a given service there may be multiple EndpointSlice objects, selected by labels, which must be joined to produce the full set of endpoints.", + "metadata": "Standard object's metadata.", + "addressType": "addressType specifies the type of address carried by this EndpointSlice. All addresses in this slice must be the same type. This field is immutable after creation. The following address types are currently supported: * IPv4: Represents an IPv4 Address. * IPv6: Represents an IPv6 Address. * FQDN: Represents a Fully Qualified Domain Name.", + "endpoints": "endpoints is a list of unique endpoints in this slice. Each slice may include a maximum of 1000 endpoints.", + "ports": "ports specifies the list of network ports exposed by each endpoint in this slice. Each port must have a unique name. When ports is empty, it indicates that there are no defined ports. When a port is defined with a nil port value, it indicates \"all ports\". Each slice may include a maximum of 100 ports.", +} + +func (EndpointSlice) SwaggerDoc() map[string]string { + return map_EndpointSlice +} + +var map_EndpointSliceList = map[string]string{ + "": "EndpointSliceList represents a list of endpoint slices", + "metadata": "Standard list metadata.", + "items": "List of endpoint slices", +} + +func (EndpointSliceList) SwaggerDoc() map[string]string { + return map_EndpointSliceList +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/discovery/v1beta1/well_known_labels.go b/vendor/k8s.io/api/discovery/v1beta1/well_known_labels.go new file mode 100644 index 00000000..b0caa3c6 --- /dev/null +++ b/vendor/k8s.io/api/discovery/v1beta1/well_known_labels.go @@ -0,0 +1,28 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +const ( + // LabelServiceName is used to indicate the name of a Kubernetes service. + LabelServiceName = "kubernetes.io/service-name" + // LabelManagedBy is used to indicate the controller or entity that manages + // an EndpointSlice. This label aims to enable different EndpointSlice + // objects to be managed by different controllers or entities within the + // same cluster. It is highly recommended to configure this label for all + // EndpointSlices. + LabelManagedBy = "endpointslice.kubernetes.io/managed-by" +) diff --git a/vendor/k8s.io/api/discovery/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/discovery/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 00000000..8490ec73 --- /dev/null +++ b/vendor/k8s.io/api/discovery/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,195 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Endpoint) DeepCopyInto(out *Endpoint) { + *out = *in + if in.Addresses != nil { + in, out := &in.Addresses, &out.Addresses + *out = make([]string, len(*in)) + copy(*out, *in) + } + in.Conditions.DeepCopyInto(&out.Conditions) + if in.Hostname != nil { + in, out := &in.Hostname, &out.Hostname + *out = new(string) + **out = **in + } + if in.TargetRef != nil { + in, out := &in.TargetRef, &out.TargetRef + *out = new(v1.ObjectReference) + **out = **in + } + if in.Topology != nil { + in, out := &in.Topology, &out.Topology + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Endpoint. +func (in *Endpoint) DeepCopy() *Endpoint { + if in == nil { + return nil + } + out := new(Endpoint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointConditions) DeepCopyInto(out *EndpointConditions) { + *out = *in + if in.Ready != nil { + in, out := &in.Ready, &out.Ready + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointConditions. +func (in *EndpointConditions) DeepCopy() *EndpointConditions { + if in == nil { + return nil + } + out := new(EndpointConditions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointPort) DeepCopyInto(out *EndpointPort) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(v1.Protocol) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(int32) + **out = **in + } + if in.AppProtocol != nil { + in, out := &in.AppProtocol, &out.AppProtocol + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointPort. +func (in *EndpointPort) DeepCopy() *EndpointPort { + if in == nil { + return nil + } + out := new(EndpointPort) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointSlice) DeepCopyInto(out *EndpointSlice) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Endpoints != nil { + in, out := &in.Endpoints, &out.Endpoints + *out = make([]Endpoint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]EndpointPort, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointSlice. +func (in *EndpointSlice) DeepCopy() *EndpointSlice { + if in == nil { + return nil + } + out := new(EndpointSlice) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EndpointSlice) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointSliceList) DeepCopyInto(out *EndpointSliceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]EndpointSlice, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointSliceList. +func (in *EndpointSliceList) DeepCopy() *EndpointSliceList { + if in == nil { + return nil + } + out := new(EndpointSliceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EndpointSliceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/vendor/k8s.io/api/events/v1beta1/doc.go b/vendor/k8s.io/api/events/v1beta1/doc.go index 8b1a3e31..9bec7b3c 100644 --- a/vendor/k8s.io/api/events/v1beta1/doc.go +++ b/vendor/k8s.io/api/events/v1beta1/doc.go @@ -15,7 +15,9 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +k8s:openapi-gen=true // +groupName=events.k8s.io + package v1beta1 // import "k8s.io/api/events/v1beta1" diff --git a/vendor/k8s.io/api/events/v1beta1/generated.pb.go b/vendor/k8s.io/api/events/v1beta1/generated.pb.go index e24a82ab..0e9a8e78 100644 --- a/vendor/k8s.io/api/events/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/events/v1beta1/generated.pb.go @@ -14,33 +14,24 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/events/v1beta1/generated.proto -// DO NOT EDIT! -/* - Package v1beta1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/events/v1beta1/generated.proto - - It has these top-level messages: - Event - EventList - EventSeries -*/ package v1beta1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import k8s_io_api_core_v1 "k8s.io/api/core/v1" + io "io" -import strings "strings" -import reflect "reflect" + proto "github.com/gogo/protobuf/proto" + v11 "k8s.io/api/core/v1" -import io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -53,27 +44,159 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *Event) Reset() { *m = Event{} } -func (*Event) ProtoMessage() {} -func (*Event) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *Event) Reset() { *m = Event{} } +func (*Event) ProtoMessage() {} +func (*Event) Descriptor() ([]byte, []int) { + return fileDescriptor_4f97f691c32a5ac8, []int{0} +} +func (m *Event) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Event) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Event) XXX_Merge(src proto.Message) { + xxx_messageInfo_Event.Merge(m, src) +} +func (m *Event) XXX_Size() int { + return m.Size() +} +func (m *Event) XXX_DiscardUnknown() { + xxx_messageInfo_Event.DiscardUnknown(m) +} -func (m *EventList) Reset() { *m = EventList{} } -func (*EventList) ProtoMessage() {} -func (*EventList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +var xxx_messageInfo_Event proto.InternalMessageInfo -func (m *EventSeries) Reset() { *m = EventSeries{} } -func (*EventSeries) ProtoMessage() {} -func (*EventSeries) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +func (m *EventList) Reset() { *m = EventList{} } +func (*EventList) ProtoMessage() {} +func (*EventList) Descriptor() ([]byte, []int) { + return fileDescriptor_4f97f691c32a5ac8, []int{1} +} +func (m *EventList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EventList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EventList) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventList.Merge(m, src) +} +func (m *EventList) XXX_Size() int { + return m.Size() +} +func (m *EventList) XXX_DiscardUnknown() { + xxx_messageInfo_EventList.DiscardUnknown(m) +} + +var xxx_messageInfo_EventList proto.InternalMessageInfo + +func (m *EventSeries) Reset() { *m = EventSeries{} } +func (*EventSeries) ProtoMessage() {} +func (*EventSeries) Descriptor() ([]byte, []int) { + return fileDescriptor_4f97f691c32a5ac8, []int{2} +} +func (m *EventSeries) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EventSeries) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EventSeries) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventSeries.Merge(m, src) +} +func (m *EventSeries) XXX_Size() int { + return m.Size() +} +func (m *EventSeries) XXX_DiscardUnknown() { + xxx_messageInfo_EventSeries.DiscardUnknown(m) +} + +var xxx_messageInfo_EventSeries proto.InternalMessageInfo func init() { proto.RegisterType((*Event)(nil), "k8s.io.api.events.v1beta1.Event") proto.RegisterType((*EventList)(nil), "k8s.io.api.events.v1beta1.EventList") proto.RegisterType((*EventSeries)(nil), "k8s.io.api.events.v1beta1.EventSeries") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/events/v1beta1/generated.proto", fileDescriptor_4f97f691c32a5ac8) +} + +var fileDescriptor_4f97f691c32a5ac8 = []byte{ + // 801 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x54, 0xcd, 0x6e, 0xdb, 0x46, + 0x10, 0x16, 0x13, 0x4b, 0xb2, 0x56, 0x49, 0x2c, 0x6f, 0x0e, 0xde, 0xb8, 0x00, 0xa5, 0x2a, 0x40, + 0x20, 0x14, 0x08, 0x59, 0x07, 0x45, 0xdb, 0x6b, 0x18, 0xb9, 0x45, 0x02, 0xbb, 0x01, 0xd6, 0x3e, + 0x15, 0x3d, 0x64, 0x45, 0x4d, 0x68, 0x56, 0xe2, 0x2e, 0xb1, 0xbb, 0x12, 0xe0, 0x5b, 0x2f, 0x05, + 0x7a, 0xec, 0x33, 0xf4, 0x09, 0xfa, 0x18, 0x3e, 0xe6, 0x98, 0x93, 0x50, 0xb3, 0x6f, 0xd1, 0x53, + 0xc1, 0xe5, 0x4a, 0x94, 0xf5, 0x83, 0xa8, 0xe8, 0x4d, 0x9c, 0xf9, 0x7e, 0x66, 0x66, 0x47, 0x83, + 0x82, 0xd1, 0xb7, 0xca, 0x8b, 0x85, 0x3f, 0x9a, 0x0c, 0x40, 0x72, 0xd0, 0xa0, 0xfc, 0x29, 0xf0, + 0xa1, 0x90, 0xbe, 0x4d, 0xb0, 0x34, 0xf6, 0x61, 0x0a, 0x5c, 0x2b, 0x7f, 0x7a, 0x32, 0x00, 0xcd, + 0x4e, 0xfc, 0x08, 0x38, 0x48, 0xa6, 0x61, 0xe8, 0xa5, 0x52, 0x68, 0x81, 0x9f, 0x14, 0x50, 0x8f, + 0xa5, 0xb1, 0x57, 0x40, 0x3d, 0x0b, 0x3d, 0x7e, 0x1e, 0xc5, 0xfa, 0x6a, 0x32, 0xf0, 0x42, 0x91, + 0xf8, 0x91, 0x88, 0x84, 0x6f, 0x18, 0x83, 0xc9, 0x7b, 0xf3, 0x65, 0x3e, 0xcc, 0xaf, 0x42, 0xe9, + 0xb8, 0xbb, 0x64, 0x1a, 0x0a, 0x09, 0xfe, 0x74, 0xcd, 0xed, 0xf8, 0xab, 0x12, 0x93, 0xb0, 0xf0, + 0x2a, 0xe6, 0x20, 0xaf, 0xfd, 0x74, 0x14, 0xe5, 0x01, 0xe5, 0x27, 0xa0, 0xd9, 0x26, 0x96, 0xbf, + 0x8d, 0x25, 0x27, 0x5c, 0xc7, 0x09, 0xac, 0x11, 0xbe, 0xfe, 0x14, 0x41, 0x85, 0x57, 0x90, 0xb0, + 0x55, 0x5e, 0xf7, 0x8f, 0x06, 0xaa, 0x9e, 0xe6, 0x43, 0xc0, 0xef, 0xd0, 0x7e, 0x5e, 0xcd, 0x90, + 0x69, 0x46, 0x9c, 0x8e, 0xd3, 0x6b, 0xbe, 0xf8, 0xd2, 0x2b, 0x27, 0xb5, 0x10, 0xf5, 0xd2, 0x51, + 0x94, 0x07, 0x94, 0x97, 0xa3, 0xbd, 0xe9, 0x89, 0xf7, 0x76, 0xf0, 0x33, 0x84, 0xfa, 0x1c, 0x34, + 0x0b, 0xf0, 0xcd, 0xac, 0x5d, 0xc9, 0x66, 0x6d, 0x54, 0xc6, 0xe8, 0x42, 0x15, 0xbf, 0x43, 0x0d, + 0x33, 0xef, 0xcb, 0x38, 0x01, 0x72, 0xcf, 0x58, 0xf8, 0xbb, 0x59, 0x9c, 0xc7, 0xa1, 0x14, 0x39, + 0x2d, 0x38, 0xb4, 0x0e, 0x8d, 0xd3, 0xb9, 0x12, 0x2d, 0x45, 0xf1, 0x1b, 0x54, 0x53, 0x20, 0x63, + 0x50, 0xe4, 0xbe, 0x91, 0x7f, 0xe6, 0x6d, 0x7d, 0x6b, 0xcf, 0x08, 0x5c, 0x18, 0x74, 0x80, 0xb2, + 0x59, 0xbb, 0x56, 0xfc, 0xa6, 0x56, 0x01, 0x9f, 0xa3, 0xc7, 0x12, 0x52, 0x21, 0x75, 0xcc, 0xa3, + 0x57, 0x82, 0x6b, 0x29, 0xc6, 0x63, 0x90, 0x64, 0xaf, 0xe3, 0xf4, 0x1a, 0xc1, 0x67, 0xb6, 0x8c, + 0xc7, 0x74, 0x1d, 0x42, 0x37, 0xf1, 0xf0, 0xf7, 0xe8, 0x70, 0x11, 0x7e, 0xcd, 0x95, 0x66, 0x3c, + 0x04, 0x52, 0x35, 0x62, 0x4f, 0xac, 0xd8, 0x21, 0x5d, 0x05, 0xd0, 0x75, 0x0e, 0x7e, 0x86, 0x6a, + 0x2c, 0xd4, 0xb1, 0xe0, 0xa4, 0x66, 0xd8, 0x8f, 0x2c, 0xbb, 0xf6, 0xd2, 0x44, 0xa9, 0xcd, 0xe6, + 0x38, 0x09, 0x4c, 0x09, 0x4e, 0xea, 0x77, 0x71, 0xd4, 0x44, 0xa9, 0xcd, 0xe2, 0x4b, 0xd4, 0x90, + 0x10, 0x31, 0x39, 0x8c, 0x79, 0x44, 0xf6, 0xcd, 0xd8, 0x9e, 0x2e, 0x8f, 0x2d, 0x5f, 0xec, 0xf2, + 0x99, 0x29, 0xbc, 0x07, 0x09, 0x3c, 0x5c, 0x7a, 0x09, 0x3a, 0x67, 0xd3, 0x52, 0x08, 0xbf, 0x41, + 0x75, 0x09, 0xe3, 0x7c, 0xd1, 0x48, 0x63, 0x77, 0xcd, 0x66, 0x36, 0x6b, 0xd7, 0x69, 0xc1, 0xa3, + 0x73, 0x01, 0xdc, 0x41, 0x7b, 0x5c, 0x68, 0x20, 0xc8, 0xf4, 0xf1, 0xc0, 0xfa, 0xee, 0xfd, 0x20, + 0x34, 0x50, 0x93, 0xc9, 0x11, 0xfa, 0x3a, 0x05, 0xd2, 0xbc, 0x8b, 0xb8, 0xbc, 0x4e, 0x81, 0x9a, + 0x0c, 0x06, 0xd4, 0x1a, 0x42, 0x2a, 0x21, 0xcc, 0x15, 0x2f, 0xc4, 0x44, 0x86, 0x40, 0x1e, 0x98, + 0xc2, 0xda, 0x9b, 0x0a, 0x2b, 0x96, 0xc3, 0xc0, 0x02, 0x62, 0xe5, 0x5a, 0xfd, 0x15, 0x01, 0xba, + 0x26, 0x89, 0x7f, 0x73, 0x10, 0x29, 0x83, 0xdf, 0xc5, 0x52, 0x99, 0xc5, 0x54, 0x9a, 0x25, 0x29, + 0x79, 0x68, 0xfc, 0xbe, 0xd8, 0x6d, 0xe5, 0xcd, 0xb6, 0x77, 0xac, 0x35, 0xe9, 0x6f, 0xd1, 0xa4, + 0x5b, 0xdd, 0xf0, 0xaf, 0x0e, 0x3a, 0x2a, 0x93, 0x67, 0x6c, 0xb9, 0x92, 0x47, 0xff, 0xb9, 0x92, + 0xb6, 0xad, 0xe4, 0xa8, 0xbf, 0x59, 0x92, 0x6e, 0xf3, 0xc2, 0x2f, 0xd1, 0x41, 0x99, 0x7a, 0x25, + 0x26, 0x5c, 0x93, 0x83, 0x8e, 0xd3, 0xab, 0x06, 0x47, 0x56, 0xf2, 0xa0, 0x7f, 0x37, 0x4d, 0x57, + 0xf1, 0xdd, 0x3f, 0x1d, 0x54, 0xfc, 0xdf, 0xcf, 0x62, 0xa5, 0xf1, 0x4f, 0x6b, 0x87, 0xca, 0xdb, + 0xad, 0x91, 0x9c, 0x6d, 0xce, 0x54, 0xcb, 0x3a, 0xef, 0xcf, 0x23, 0x4b, 0x47, 0xea, 0x14, 0x55, + 0x63, 0x0d, 0x89, 0x22, 0xf7, 0x3a, 0xf7, 0x7b, 0xcd, 0x17, 0x9d, 0x4f, 0x5d, 0x90, 0xe0, 0xa1, + 0x15, 0xab, 0xbe, 0xce, 0x69, 0xb4, 0x60, 0x77, 0x33, 0x07, 0x35, 0x97, 0x2e, 0x0c, 0x7e, 0x8a, + 0xaa, 0xa1, 0xe9, 0xdd, 0x31, 0xbd, 0x2f, 0x48, 0x45, 0xc7, 0x45, 0x0e, 0x4f, 0x50, 0x6b, 0xcc, + 0x94, 0x7e, 0x3b, 0x50, 0x20, 0xa7, 0x30, 0xfc, 0x3f, 0x77, 0x72, 0xb1, 0xb4, 0x67, 0x2b, 0x82, + 0x74, 0xcd, 0x02, 0x7f, 0x83, 0xaa, 0x4a, 0x33, 0x0d, 0xe6, 0x68, 0x36, 0x82, 0xcf, 0xe7, 0xb5, + 0x5d, 0xe4, 0xc1, 0x7f, 0x66, 0xed, 0xd6, 0x52, 0x23, 0x26, 0x46, 0x0b, 0x7c, 0xf0, 0xfc, 0xe6, + 0xd6, 0xad, 0x7c, 0xb8, 0x75, 0x2b, 0x1f, 0x6f, 0xdd, 0xca, 0x2f, 0x99, 0xeb, 0xdc, 0x64, 0xae, + 0xf3, 0x21, 0x73, 0x9d, 0x8f, 0x99, 0xeb, 0xfc, 0x95, 0xb9, 0xce, 0xef, 0x7f, 0xbb, 0x95, 0x1f, + 0xeb, 0x76, 0x5e, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x25, 0x9b, 0x14, 0x4d, 0xbd, 0x07, 0x00, + 0x00, +} + func (m *Event) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -81,112 +204,139 @@ func (m *Event) Marshal() (dAtA []byte, err error) { } func (m *Event) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Event) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.EventTime.Size())) - n2, err := m.EventTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i = encodeVarintGenerated(dAtA, i, uint64(m.DeprecatedCount)) + i-- + dAtA[i] = 0x78 + { + size, err := m.DeprecatedLastTimestamp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n2 - if m.Series != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Series.Size())) - n3, err := m.Series.MarshalTo(dAtA[i:]) + i-- + dAtA[i] = 0x72 + { + size, err := m.DeprecatedFirstTimestamp.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n3 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ReportingController))) - i += copy(dAtA[i:], m.ReportingController) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ReportingInstance))) - i += copy(dAtA[i:], m.ReportingInstance) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Action))) - i += copy(dAtA[i:], m.Action) - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Regarding.Size())) - n4, err := m.Regarding.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i-- + dAtA[i] = 0x6a + { + size, err := m.DeprecatedSource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n4 + i-- + dAtA[i] = 0x62 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0x5a + i -= len(m.Note) + copy(dAtA[i:], m.Note) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Note))) + i-- + dAtA[i] = 0x52 if m.Related != nil { + { + size, err := m.Related.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x4a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Related.Size())) - n5, err := m.Related.MarshalTo(dAtA[i:]) + } + { + size, err := m.Regarding.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n5 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Note))) - i += copy(dAtA[i:], m.Note) - dAtA[i] = 0x5a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x62 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DeprecatedSource.Size())) - n6, err := m.DeprecatedSource.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i-- + dAtA[i] = 0x42 + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x3a + i -= len(m.Action) + copy(dAtA[i:], m.Action) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Action))) + i-- + dAtA[i] = 0x32 + i -= len(m.ReportingInstance) + copy(dAtA[i:], m.ReportingInstance) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ReportingInstance))) + i-- + dAtA[i] = 0x2a + i -= len(m.ReportingController) + copy(dAtA[i:], m.ReportingController) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ReportingController))) + i-- + dAtA[i] = 0x22 + if m.Series != nil { + { + size, err := m.Series.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a } - i += n6 - dAtA[i] = 0x6a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DeprecatedFirstTimestamp.Size())) - n7, err := m.DeprecatedFirstTimestamp.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.EventTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n7 - dAtA[i] = 0x72 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DeprecatedLastTimestamp.Size())) - n8, err := m.DeprecatedLastTimestamp.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n8 - dAtA[i] = 0x78 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DeprecatedCount)) - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *EventList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -194,37 +344,46 @@ func (m *EventList) Marshal() (dAtA []byte, err error) { } func (m *EventList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EventList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n9, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n9 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *EventSeries) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -232,56 +391,51 @@ func (m *EventSeries) Marshal() (dAtA []byte, err error) { } func (m *EventSeries) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EventSeries) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Count)) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastObservedTime.Size())) - n10, err := m.LastObservedTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n10 - dAtA[i] = 0x1a - i++ + i -= len(m.State) + copy(dAtA[i:], m.State) i = encodeVarintGenerated(dAtA, i, uint64(len(m.State))) - i += copy(dAtA[i:], m.State) - return i, nil + i-- + dAtA[i] = 0x1a + { + size, err := m.LastObservedTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(m.Count)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *Event) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -321,6 +475,9 @@ func (m *Event) Size() (n int) { } func (m *EventList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -335,6 +492,9 @@ func (m *EventList) Size() (n int) { } func (m *EventSeries) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.Count)) @@ -346,14 +506,7 @@ func (m *EventSeries) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -363,20 +516,20 @@ func (this *Event) String() string { return "nil" } s := strings.Join([]string{`&Event{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `EventTime:` + strings.Replace(strings.Replace(this.EventTime.String(), "MicroTime", "k8s_io_apimachinery_pkg_apis_meta_v1.MicroTime", 1), `&`, ``, 1) + `,`, - `Series:` + strings.Replace(fmt.Sprintf("%v", this.Series), "EventSeries", "EventSeries", 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `EventTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.EventTime), "MicroTime", "v1.MicroTime", 1), `&`, ``, 1) + `,`, + `Series:` + strings.Replace(this.Series.String(), "EventSeries", "EventSeries", 1) + `,`, `ReportingController:` + fmt.Sprintf("%v", this.ReportingController) + `,`, `ReportingInstance:` + fmt.Sprintf("%v", this.ReportingInstance) + `,`, `Action:` + fmt.Sprintf("%v", this.Action) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Regarding:` + strings.Replace(strings.Replace(this.Regarding.String(), "ObjectReference", "k8s_io_api_core_v1.ObjectReference", 1), `&`, ``, 1) + `,`, - `Related:` + strings.Replace(fmt.Sprintf("%v", this.Related), "ObjectReference", "k8s_io_api_core_v1.ObjectReference", 1) + `,`, + `Regarding:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Regarding), "ObjectReference", "v11.ObjectReference", 1), `&`, ``, 1) + `,`, + `Related:` + strings.Replace(fmt.Sprintf("%v", this.Related), "ObjectReference", "v11.ObjectReference", 1) + `,`, `Note:` + fmt.Sprintf("%v", this.Note) + `,`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `DeprecatedSource:` + strings.Replace(strings.Replace(this.DeprecatedSource.String(), "EventSource", "k8s_io_api_core_v1.EventSource", 1), `&`, ``, 1) + `,`, - `DeprecatedFirstTimestamp:` + strings.Replace(strings.Replace(this.DeprecatedFirstTimestamp.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `DeprecatedLastTimestamp:` + strings.Replace(strings.Replace(this.DeprecatedLastTimestamp.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `DeprecatedSource:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.DeprecatedSource), "EventSource", "v11.EventSource", 1), `&`, ``, 1) + `,`, + `DeprecatedFirstTimestamp:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.DeprecatedFirstTimestamp), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `DeprecatedLastTimestamp:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.DeprecatedLastTimestamp), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, `DeprecatedCount:` + fmt.Sprintf("%v", this.DeprecatedCount) + `,`, `}`, }, "") @@ -386,9 +539,14 @@ func (this *EventList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]Event{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Event", "Event", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&EventList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Event", "Event", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -399,7 +557,7 @@ func (this *EventSeries) String() string { } s := strings.Join([]string{`&EventSeries{`, `Count:` + fmt.Sprintf("%v", this.Count) + `,`, - `LastObservedTime:` + strings.Replace(strings.Replace(this.LastObservedTime.String(), "MicroTime", "k8s_io_apimachinery_pkg_apis_meta_v1.MicroTime", 1), `&`, ``, 1) + `,`, + `LastObservedTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastObservedTime), "MicroTime", "v1.MicroTime", 1), `&`, ``, 1) + `,`, `State:` + fmt.Sprintf("%v", this.State) + `,`, `}`, }, "") @@ -428,7 +586,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -456,7 +614,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -465,6 +623,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -486,7 +647,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -495,6 +656,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -516,7 +680,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -525,6 +689,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -549,7 +716,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -559,6 +726,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -578,7 +748,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -588,6 +758,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -607,7 +780,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -617,6 +790,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -636,7 +812,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -646,6 +822,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -665,7 +844,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -674,6 +853,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -695,7 +877,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -704,11 +886,14 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Related == nil { - m.Related = &k8s_io_api_core_v1.ObjectReference{} + m.Related = &v11.ObjectReference{} } if err := m.Related.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -728,7 +913,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -738,6 +923,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -757,7 +945,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -767,6 +955,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -786,7 +977,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -795,6 +986,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -816,7 +1010,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -825,6 +1019,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -846,7 +1043,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -855,6 +1052,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -876,7 +1076,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.DeprecatedCount |= (int32(b) & 0x7F) << shift + m.DeprecatedCount |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -890,6 +1090,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -917,7 +1120,7 @@ func (m *EventList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -945,7 +1148,7 @@ func (m *EventList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -954,6 +1157,9 @@ func (m *EventList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -975,7 +1181,7 @@ func (m *EventList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -984,6 +1190,9 @@ func (m *EventList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1001,6 +1210,9 @@ func (m *EventList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1028,7 +1240,7 @@ func (m *EventSeries) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1056,7 +1268,7 @@ func (m *EventSeries) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Count |= (int32(b) & 0x7F) << shift + m.Count |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -1075,7 +1287,7 @@ func (m *EventSeries) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1084,6 +1296,9 @@ func (m *EventSeries) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1105,7 +1320,7 @@ func (m *EventSeries) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1115,6 +1330,9 @@ func (m *EventSeries) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1129,6 +1347,9 @@ func (m *EventSeries) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1195,10 +1416,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -1227,6 +1451,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -1245,62 +1472,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/events/v1beta1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 801 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x54, 0xcd, 0x6e, 0xdb, 0x46, - 0x10, 0x16, 0x13, 0x4b, 0xb2, 0x56, 0x49, 0x2c, 0x6f, 0x0e, 0xde, 0xb8, 0x00, 0xa5, 0x2a, 0x40, - 0x20, 0x14, 0x08, 0x59, 0x07, 0x45, 0xdb, 0x6b, 0x18, 0xb9, 0x45, 0x02, 0xbb, 0x01, 0xd6, 0x3e, - 0x15, 0x3d, 0x64, 0x45, 0x4d, 0x68, 0x56, 0xe2, 0x2e, 0xb1, 0xbb, 0x12, 0xe0, 0x5b, 0x2f, 0x05, - 0x7a, 0xec, 0x33, 0xf4, 0x09, 0xfa, 0x18, 0x3e, 0xe6, 0x98, 0x93, 0x50, 0xb3, 0x6f, 0xd1, 0x53, - 0xc1, 0xe5, 0x4a, 0x94, 0xf5, 0x83, 0xa8, 0xe8, 0x4d, 0x9c, 0xf9, 0x7e, 0x66, 0x66, 0x47, 0x83, - 0x82, 0xd1, 0xb7, 0xca, 0x8b, 0x85, 0x3f, 0x9a, 0x0c, 0x40, 0x72, 0xd0, 0xa0, 0xfc, 0x29, 0xf0, - 0xa1, 0x90, 0xbe, 0x4d, 0xb0, 0x34, 0xf6, 0x61, 0x0a, 0x5c, 0x2b, 0x7f, 0x7a, 0x32, 0x00, 0xcd, - 0x4e, 0xfc, 0x08, 0x38, 0x48, 0xa6, 0x61, 0xe8, 0xa5, 0x52, 0x68, 0x81, 0x9f, 0x14, 0x50, 0x8f, - 0xa5, 0xb1, 0x57, 0x40, 0x3d, 0x0b, 0x3d, 0x7e, 0x1e, 0xc5, 0xfa, 0x6a, 0x32, 0xf0, 0x42, 0x91, - 0xf8, 0x91, 0x88, 0x84, 0x6f, 0x18, 0x83, 0xc9, 0x7b, 0xf3, 0x65, 0x3e, 0xcc, 0xaf, 0x42, 0xe9, - 0xb8, 0xbb, 0x64, 0x1a, 0x0a, 0x09, 0xfe, 0x74, 0xcd, 0xed, 0xf8, 0xab, 0x12, 0x93, 0xb0, 0xf0, - 0x2a, 0xe6, 0x20, 0xaf, 0xfd, 0x74, 0x14, 0xe5, 0x01, 0xe5, 0x27, 0xa0, 0xd9, 0x26, 0x96, 0xbf, - 0x8d, 0x25, 0x27, 0x5c, 0xc7, 0x09, 0xac, 0x11, 0xbe, 0xfe, 0x14, 0x41, 0x85, 0x57, 0x90, 0xb0, - 0x55, 0x5e, 0xf7, 0x8f, 0x06, 0xaa, 0x9e, 0xe6, 0x43, 0xc0, 0xef, 0xd0, 0x7e, 0x5e, 0xcd, 0x90, - 0x69, 0x46, 0x9c, 0x8e, 0xd3, 0x6b, 0xbe, 0xf8, 0xd2, 0x2b, 0x27, 0xb5, 0x10, 0xf5, 0xd2, 0x51, - 0x94, 0x07, 0x94, 0x97, 0xa3, 0xbd, 0xe9, 0x89, 0xf7, 0x76, 0xf0, 0x33, 0x84, 0xfa, 0x1c, 0x34, - 0x0b, 0xf0, 0xcd, 0xac, 0x5d, 0xc9, 0x66, 0x6d, 0x54, 0xc6, 0xe8, 0x42, 0x15, 0xbf, 0x43, 0x0d, - 0x33, 0xef, 0xcb, 0x38, 0x01, 0x72, 0xcf, 0x58, 0xf8, 0xbb, 0x59, 0x9c, 0xc7, 0xa1, 0x14, 0x39, - 0x2d, 0x38, 0xb4, 0x0e, 0x8d, 0xd3, 0xb9, 0x12, 0x2d, 0x45, 0xf1, 0x1b, 0x54, 0x53, 0x20, 0x63, - 0x50, 0xe4, 0xbe, 0x91, 0x7f, 0xe6, 0x6d, 0x7d, 0x6b, 0xcf, 0x08, 0x5c, 0x18, 0x74, 0x80, 0xb2, - 0x59, 0xbb, 0x56, 0xfc, 0xa6, 0x56, 0x01, 0x9f, 0xa3, 0xc7, 0x12, 0x52, 0x21, 0x75, 0xcc, 0xa3, - 0x57, 0x82, 0x6b, 0x29, 0xc6, 0x63, 0x90, 0x64, 0xaf, 0xe3, 0xf4, 0x1a, 0xc1, 0x67, 0xb6, 0x8c, - 0xc7, 0x74, 0x1d, 0x42, 0x37, 0xf1, 0xf0, 0xf7, 0xe8, 0x70, 0x11, 0x7e, 0xcd, 0x95, 0x66, 0x3c, - 0x04, 0x52, 0x35, 0x62, 0x4f, 0xac, 0xd8, 0x21, 0x5d, 0x05, 0xd0, 0x75, 0x0e, 0x7e, 0x86, 0x6a, - 0x2c, 0xd4, 0xb1, 0xe0, 0xa4, 0x66, 0xd8, 0x8f, 0x2c, 0xbb, 0xf6, 0xd2, 0x44, 0xa9, 0xcd, 0xe6, - 0x38, 0x09, 0x4c, 0x09, 0x4e, 0xea, 0x77, 0x71, 0xd4, 0x44, 0xa9, 0xcd, 0xe2, 0x4b, 0xd4, 0x90, - 0x10, 0x31, 0x39, 0x8c, 0x79, 0x44, 0xf6, 0xcd, 0xd8, 0x9e, 0x2e, 0x8f, 0x2d, 0x5f, 0xec, 0xf2, - 0x99, 0x29, 0xbc, 0x07, 0x09, 0x3c, 0x5c, 0x7a, 0x09, 0x3a, 0x67, 0xd3, 0x52, 0x08, 0xbf, 0x41, - 0x75, 0x09, 0xe3, 0x7c, 0xd1, 0x48, 0x63, 0x77, 0xcd, 0x66, 0x36, 0x6b, 0xd7, 0x69, 0xc1, 0xa3, - 0x73, 0x01, 0xdc, 0x41, 0x7b, 0x5c, 0x68, 0x20, 0xc8, 0xf4, 0xf1, 0xc0, 0xfa, 0xee, 0xfd, 0x20, - 0x34, 0x50, 0x93, 0xc9, 0x11, 0xfa, 0x3a, 0x05, 0xd2, 0xbc, 0x8b, 0xb8, 0xbc, 0x4e, 0x81, 0x9a, - 0x0c, 0x06, 0xd4, 0x1a, 0x42, 0x2a, 0x21, 0xcc, 0x15, 0x2f, 0xc4, 0x44, 0x86, 0x40, 0x1e, 0x98, - 0xc2, 0xda, 0x9b, 0x0a, 0x2b, 0x96, 0xc3, 0xc0, 0x02, 0x62, 0xe5, 0x5a, 0xfd, 0x15, 0x01, 0xba, - 0x26, 0x89, 0x7f, 0x73, 0x10, 0x29, 0x83, 0xdf, 0xc5, 0x52, 0x99, 0xc5, 0x54, 0x9a, 0x25, 0x29, - 0x79, 0x68, 0xfc, 0xbe, 0xd8, 0x6d, 0xe5, 0xcd, 0xb6, 0x77, 0xac, 0x35, 0xe9, 0x6f, 0xd1, 0xa4, - 0x5b, 0xdd, 0xf0, 0xaf, 0x0e, 0x3a, 0x2a, 0x93, 0x67, 0x6c, 0xb9, 0x92, 0x47, 0xff, 0xb9, 0x92, - 0xb6, 0xad, 0xe4, 0xa8, 0xbf, 0x59, 0x92, 0x6e, 0xf3, 0xc2, 0x2f, 0xd1, 0x41, 0x99, 0x7a, 0x25, - 0x26, 0x5c, 0x93, 0x83, 0x8e, 0xd3, 0xab, 0x06, 0x47, 0x56, 0xf2, 0xa0, 0x7f, 0x37, 0x4d, 0x57, - 0xf1, 0xdd, 0x3f, 0x1d, 0x54, 0xfc, 0xdf, 0xcf, 0x62, 0xa5, 0xf1, 0x4f, 0x6b, 0x87, 0xca, 0xdb, - 0xad, 0x91, 0x9c, 0x6d, 0xce, 0x54, 0xcb, 0x3a, 0xef, 0xcf, 0x23, 0x4b, 0x47, 0xea, 0x14, 0x55, - 0x63, 0x0d, 0x89, 0x22, 0xf7, 0x3a, 0xf7, 0x7b, 0xcd, 0x17, 0x9d, 0x4f, 0x5d, 0x90, 0xe0, 0xa1, - 0x15, 0xab, 0xbe, 0xce, 0x69, 0xb4, 0x60, 0x77, 0x33, 0x07, 0x35, 0x97, 0x2e, 0x0c, 0x7e, 0x8a, - 0xaa, 0xa1, 0xe9, 0xdd, 0x31, 0xbd, 0x2f, 0x48, 0x45, 0xc7, 0x45, 0x0e, 0x4f, 0x50, 0x6b, 0xcc, - 0x94, 0x7e, 0x3b, 0x50, 0x20, 0xa7, 0x30, 0xfc, 0x3f, 0x77, 0x72, 0xb1, 0xb4, 0x67, 0x2b, 0x82, - 0x74, 0xcd, 0x02, 0x7f, 0x83, 0xaa, 0x4a, 0x33, 0x0d, 0xe6, 0x68, 0x36, 0x82, 0xcf, 0xe7, 0xb5, - 0x5d, 0xe4, 0xc1, 0x7f, 0x66, 0xed, 0xd6, 0x52, 0x23, 0x26, 0x46, 0x0b, 0x7c, 0xf0, 0xfc, 0xe6, - 0xd6, 0xad, 0x7c, 0xb8, 0x75, 0x2b, 0x1f, 0x6f, 0xdd, 0xca, 0x2f, 0x99, 0xeb, 0xdc, 0x64, 0xae, - 0xf3, 0x21, 0x73, 0x9d, 0x8f, 0x99, 0xeb, 0xfc, 0x95, 0xb9, 0xce, 0xef, 0x7f, 0xbb, 0x95, 0x1f, - 0xeb, 0x76, 0x5e, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x25, 0x9b, 0x14, 0x4d, 0xbd, 0x07, 0x00, - 0x00, -} diff --git a/vendor/k8s.io/api/events/v1beta1/generated.proto b/vendor/k8s.io/api/events/v1beta1/generated.proto index b3e565e6..58f5aa42 100644 --- a/vendor/k8s.io/api/events/v1beta1/generated.proto +++ b/vendor/k8s.io/api/events/v1beta1/generated.proto @@ -98,7 +98,7 @@ message Event { // EventList is a list of Event objects. message EventList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -116,6 +116,7 @@ message EventSeries { optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime lastObservedTime = 2; // Information whether this series is ongoing or finished. + // Deprecated. Planned removal for 1.18 optional string state = 3; } diff --git a/vendor/k8s.io/api/events/v1beta1/types.go b/vendor/k8s.io/api/events/v1beta1/types.go index dc48ddb0..0571fbb2 100644 --- a/vendor/k8s.io/api/events/v1beta1/types.go +++ b/vendor/k8s.io/api/events/v1beta1/types.go @@ -43,11 +43,11 @@ type Event struct { // ID of the controller instance, e.g. `kubelet-xyzf`. // +optional - ReportingInstance string `json:"reportingInstance,omitemtpy" protobuf:"bytes,5,opt,name=reportingInstance"` + ReportingInstance string `json:"reportingInstance,omitempty" protobuf:"bytes,5,opt,name=reportingInstance"` // What action was taken/failed regarding to the regarding object. // +optional - Action string `json:"action,omitemtpy" protobuf:"bytes,6,name=action"` + Action string `json:"action,omitempty" protobuf:"bytes,6,name=action"` // Why the action was taken. Reason string `json:"reason,omitempty" protobuf:"bytes,7,name=reason"` @@ -96,6 +96,7 @@ type EventSeries struct { // Time when last Event from the series was seen before last heartbeat. LastObservedTime metav1.MicroTime `json:"lastObservedTime" protobuf:"bytes,2,opt,name=lastObservedTime"` // Information whether this series is ongoing or finished. + // Deprecated. Planned removal for 1.18 State EventSeriesState `json:"state" protobuf:"bytes,3,opt,name=state"` } @@ -113,7 +114,7 @@ const ( type EventList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` diff --git a/vendor/k8s.io/api/events/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/events/v1beta1/types_swagger_doc_generated.go index a15672c1..639daca6 100644 --- a/vendor/k8s.io/api/events/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/events/v1beta1/types_swagger_doc_generated.go @@ -51,7 +51,7 @@ func (Event) SwaggerDoc() map[string]string { var map_EventList = map[string]string{ "": "EventList is a list of Event objects.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "Items is a list of schema objects.", } @@ -63,7 +63,7 @@ var map_EventSeries = map[string]string{ "": "EventSeries contain information on series of events, i.e. thing that was/is happening continuously for some time.", "count": "Number of occurrences in this series up to the last heartbeat time", "lastObservedTime": "Time when last Event from the series was seen before last heartbeat.", - "state": "Information whether this series is ongoing or finished.", + "state": "Information whether this series is ongoing or finished. Deprecated. Planned removal for 1.18", } func (EventSeries) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/events/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/events/v1beta1/zz_generated.deepcopy.go index e52e142c..779ebaf6 100644 --- a/vendor/k8s.io/api/events/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/events/v1beta1/zz_generated.deepcopy.go @@ -70,7 +70,7 @@ func (in *Event) DeepCopyObject() runtime.Object { func (in *EventList) DeepCopyInto(out *EventList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Event, len(*in)) diff --git a/vendor/k8s.io/api/extensions/v1beta1/doc.go b/vendor/k8s.io/api/extensions/v1beta1/doc.go index 8ce18304..fa799f30 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/doc.go +++ b/vendor/k8s.io/api/extensions/v1beta1/doc.go @@ -15,6 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +k8s:openapi-gen=true package v1beta1 // import "k8s.io/api/extensions/v1beta1" diff --git a/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go b/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go index 72d64db3..65b47eab 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go @@ -14,94 +14,29 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/extensions/v1beta1/generated.proto -// DO NOT EDIT! -/* - Package v1beta1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/extensions/v1beta1/generated.proto - - It has these top-level messages: - AllowedFlexVolume - AllowedHostPath - CustomMetricCurrentStatus - CustomMetricCurrentStatusList - CustomMetricTarget - CustomMetricTargetList - DaemonSet - DaemonSetCondition - DaemonSetList - DaemonSetSpec - DaemonSetStatus - DaemonSetUpdateStrategy - Deployment - DeploymentCondition - DeploymentList - DeploymentRollback - DeploymentSpec - DeploymentStatus - DeploymentStrategy - FSGroupStrategyOptions - HTTPIngressPath - HTTPIngressRuleValue - HostPortRange - IDRange - IPBlock - Ingress - IngressBackend - IngressList - IngressRule - IngressRuleValue - IngressSpec - IngressStatus - IngressTLS - NetworkPolicy - NetworkPolicyEgressRule - NetworkPolicyIngressRule - NetworkPolicyList - NetworkPolicyPeer - NetworkPolicyPort - NetworkPolicySpec - PodSecurityPolicy - PodSecurityPolicyList - PodSecurityPolicySpec - ReplicaSet - ReplicaSetCondition - ReplicaSetList - ReplicaSetSpec - ReplicaSetStatus - ReplicationControllerDummy - RollbackConfig - RollingUpdateDaemonSet - RollingUpdateDeployment - RunAsUserStrategyOptions - SELinuxStrategyOptions - Scale - ScaleSpec - ScaleStatus - SupplementalGroupsStrategyOptions -*/ package v1beta1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import k8s_io_api_core_v1 "k8s.io/api/core/v1" - -import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +import ( + fmt "fmt" -import k8s_io_apimachinery_pkg_util_intstr "k8s.io/apimachinery/pkg/util/intstr" + io "io" -import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + v11 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" -import strings "strings" -import reflect "reflect" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" -import io "io" + intstr "k8s.io/apimachinery/pkg/util/intstr" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -114,2818 +49,4808 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *AllowedFlexVolume) Reset() { *m = AllowedFlexVolume{} } -func (*AllowedFlexVolume) ProtoMessage() {} -func (*AllowedFlexVolume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *AllowedCSIDriver) Reset() { *m = AllowedCSIDriver{} } +func (*AllowedCSIDriver) ProtoMessage() {} +func (*AllowedCSIDriver) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{0} +} +func (m *AllowedCSIDriver) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AllowedCSIDriver) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AllowedCSIDriver) XXX_Merge(src proto.Message) { + xxx_messageInfo_AllowedCSIDriver.Merge(m, src) +} +func (m *AllowedCSIDriver) XXX_Size() int { + return m.Size() +} +func (m *AllowedCSIDriver) XXX_DiscardUnknown() { + xxx_messageInfo_AllowedCSIDriver.DiscardUnknown(m) +} -func (m *AllowedHostPath) Reset() { *m = AllowedHostPath{} } -func (*AllowedHostPath) ProtoMessage() {} -func (*AllowedHostPath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +var xxx_messageInfo_AllowedCSIDriver proto.InternalMessageInfo -func (m *CustomMetricCurrentStatus) Reset() { *m = CustomMetricCurrentStatus{} } -func (*CustomMetricCurrentStatus) ProtoMessage() {} -func (*CustomMetricCurrentStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{2} +func (m *AllowedFlexVolume) Reset() { *m = AllowedFlexVolume{} } +func (*AllowedFlexVolume) ProtoMessage() {} +func (*AllowedFlexVolume) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{1} } - -func (m *CustomMetricCurrentStatusList) Reset() { *m = CustomMetricCurrentStatusList{} } -func (*CustomMetricCurrentStatusList) ProtoMessage() {} -func (*CustomMetricCurrentStatusList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{3} +func (m *AllowedFlexVolume) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AllowedFlexVolume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AllowedFlexVolume) XXX_Merge(src proto.Message) { + xxx_messageInfo_AllowedFlexVolume.Merge(m, src) +} +func (m *AllowedFlexVolume) XXX_Size() int { + return m.Size() +} +func (m *AllowedFlexVolume) XXX_DiscardUnknown() { + xxx_messageInfo_AllowedFlexVolume.DiscardUnknown(m) } -func (m *CustomMetricTarget) Reset() { *m = CustomMetricTarget{} } -func (*CustomMetricTarget) ProtoMessage() {} -func (*CustomMetricTarget) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +var xxx_messageInfo_AllowedFlexVolume proto.InternalMessageInfo -func (m *CustomMetricTargetList) Reset() { *m = CustomMetricTargetList{} } -func (*CustomMetricTargetList) ProtoMessage() {} -func (*CustomMetricTargetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +func (m *AllowedHostPath) Reset() { *m = AllowedHostPath{} } +func (*AllowedHostPath) ProtoMessage() {} +func (*AllowedHostPath) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{2} +} +func (m *AllowedHostPath) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AllowedHostPath) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AllowedHostPath) XXX_Merge(src proto.Message) { + xxx_messageInfo_AllowedHostPath.Merge(m, src) +} +func (m *AllowedHostPath) XXX_Size() int { + return m.Size() +} +func (m *AllowedHostPath) XXX_DiscardUnknown() { + xxx_messageInfo_AllowedHostPath.DiscardUnknown(m) +} -func (m *DaemonSet) Reset() { *m = DaemonSet{} } -func (*DaemonSet) ProtoMessage() {} -func (*DaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +var xxx_messageInfo_AllowedHostPath proto.InternalMessageInfo -func (m *DaemonSetCondition) Reset() { *m = DaemonSetCondition{} } -func (*DaemonSetCondition) ProtoMessage() {} -func (*DaemonSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +func (m *DaemonSet) Reset() { *m = DaemonSet{} } +func (*DaemonSet) ProtoMessage() {} +func (*DaemonSet) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{3} +} +func (m *DaemonSet) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DaemonSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DaemonSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_DaemonSet.Merge(m, src) +} +func (m *DaemonSet) XXX_Size() int { + return m.Size() +} +func (m *DaemonSet) XXX_DiscardUnknown() { + xxx_messageInfo_DaemonSet.DiscardUnknown(m) +} -func (m *DaemonSetList) Reset() { *m = DaemonSetList{} } -func (*DaemonSetList) ProtoMessage() {} -func (*DaemonSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +var xxx_messageInfo_DaemonSet proto.InternalMessageInfo -func (m *DaemonSetSpec) Reset() { *m = DaemonSetSpec{} } -func (*DaemonSetSpec) ProtoMessage() {} -func (*DaemonSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +func (m *DaemonSetCondition) Reset() { *m = DaemonSetCondition{} } +func (*DaemonSetCondition) ProtoMessage() {} +func (*DaemonSetCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{4} +} +func (m *DaemonSetCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DaemonSetCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DaemonSetCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_DaemonSetCondition.Merge(m, src) +} +func (m *DaemonSetCondition) XXX_Size() int { + return m.Size() +} +func (m *DaemonSetCondition) XXX_DiscardUnknown() { + xxx_messageInfo_DaemonSetCondition.DiscardUnknown(m) +} -func (m *DaemonSetStatus) Reset() { *m = DaemonSetStatus{} } -func (*DaemonSetStatus) ProtoMessage() {} -func (*DaemonSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } +var xxx_messageInfo_DaemonSetCondition proto.InternalMessageInfo -func (m *DaemonSetUpdateStrategy) Reset() { *m = DaemonSetUpdateStrategy{} } -func (*DaemonSetUpdateStrategy) ProtoMessage() {} -func (*DaemonSetUpdateStrategy) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{11} +func (m *DaemonSetList) Reset() { *m = DaemonSetList{} } +func (*DaemonSetList) ProtoMessage() {} +func (*DaemonSetList) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{5} +} +func (m *DaemonSetList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DaemonSetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DaemonSetList) XXX_Merge(src proto.Message) { + xxx_messageInfo_DaemonSetList.Merge(m, src) +} +func (m *DaemonSetList) XXX_Size() int { + return m.Size() +} +func (m *DaemonSetList) XXX_DiscardUnknown() { + xxx_messageInfo_DaemonSetList.DiscardUnknown(m) } -func (m *Deployment) Reset() { *m = Deployment{} } -func (*Deployment) ProtoMessage() {} -func (*Deployment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } - -func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} } -func (*DeploymentCondition) ProtoMessage() {} -func (*DeploymentCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } +var xxx_messageInfo_DaemonSetList proto.InternalMessageInfo -func (m *DeploymentList) Reset() { *m = DeploymentList{} } -func (*DeploymentList) ProtoMessage() {} -func (*DeploymentList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } +func (m *DaemonSetSpec) Reset() { *m = DaemonSetSpec{} } +func (*DaemonSetSpec) ProtoMessage() {} +func (*DaemonSetSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{6} +} +func (m *DaemonSetSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DaemonSetSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DaemonSetSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_DaemonSetSpec.Merge(m, src) +} +func (m *DaemonSetSpec) XXX_Size() int { + return m.Size() +} +func (m *DaemonSetSpec) XXX_DiscardUnknown() { + xxx_messageInfo_DaemonSetSpec.DiscardUnknown(m) +} -func (m *DeploymentRollback) Reset() { *m = DeploymentRollback{} } -func (*DeploymentRollback) ProtoMessage() {} -func (*DeploymentRollback) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } +var xxx_messageInfo_DaemonSetSpec proto.InternalMessageInfo -func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} } -func (*DeploymentSpec) ProtoMessage() {} -func (*DeploymentSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } +func (m *DaemonSetStatus) Reset() { *m = DaemonSetStatus{} } +func (*DaemonSetStatus) ProtoMessage() {} +func (*DaemonSetStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{7} +} +func (m *DaemonSetStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DaemonSetStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DaemonSetStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_DaemonSetStatus.Merge(m, src) +} +func (m *DaemonSetStatus) XXX_Size() int { + return m.Size() +} +func (m *DaemonSetStatus) XXX_DiscardUnknown() { + xxx_messageInfo_DaemonSetStatus.DiscardUnknown(m) +} -func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} } -func (*DeploymentStatus) ProtoMessage() {} -func (*DeploymentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } +var xxx_messageInfo_DaemonSetStatus proto.InternalMessageInfo -func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} } -func (*DeploymentStrategy) ProtoMessage() {} -func (*DeploymentStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } +func (m *DaemonSetUpdateStrategy) Reset() { *m = DaemonSetUpdateStrategy{} } +func (*DaemonSetUpdateStrategy) ProtoMessage() {} +func (*DaemonSetUpdateStrategy) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{8} +} +func (m *DaemonSetUpdateStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DaemonSetUpdateStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DaemonSetUpdateStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_DaemonSetUpdateStrategy.Merge(m, src) +} +func (m *DaemonSetUpdateStrategy) XXX_Size() int { + return m.Size() +} +func (m *DaemonSetUpdateStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_DaemonSetUpdateStrategy.DiscardUnknown(m) +} -func (m *FSGroupStrategyOptions) Reset() { *m = FSGroupStrategyOptions{} } -func (*FSGroupStrategyOptions) ProtoMessage() {} -func (*FSGroupStrategyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } +var xxx_messageInfo_DaemonSetUpdateStrategy proto.InternalMessageInfo -func (m *HTTPIngressPath) Reset() { *m = HTTPIngressPath{} } -func (*HTTPIngressPath) ProtoMessage() {} -func (*HTTPIngressPath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{20} } +func (m *Deployment) Reset() { *m = Deployment{} } +func (*Deployment) ProtoMessage() {} +func (*Deployment) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{9} +} +func (m *Deployment) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Deployment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Deployment) XXX_Merge(src proto.Message) { + xxx_messageInfo_Deployment.Merge(m, src) +} +func (m *Deployment) XXX_Size() int { + return m.Size() +} +func (m *Deployment) XXX_DiscardUnknown() { + xxx_messageInfo_Deployment.DiscardUnknown(m) +} -func (m *HTTPIngressRuleValue) Reset() { *m = HTTPIngressRuleValue{} } -func (*HTTPIngressRuleValue) ProtoMessage() {} -func (*HTTPIngressRuleValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{21} } +var xxx_messageInfo_Deployment proto.InternalMessageInfo -func (m *HostPortRange) Reset() { *m = HostPortRange{} } -func (*HostPortRange) ProtoMessage() {} -func (*HostPortRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } +func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} } +func (*DeploymentCondition) ProtoMessage() {} +func (*DeploymentCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{10} +} +func (m *DeploymentCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentCondition.Merge(m, src) +} +func (m *DeploymentCondition) XXX_Size() int { + return m.Size() +} +func (m *DeploymentCondition) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentCondition.DiscardUnknown(m) +} -func (m *IDRange) Reset() { *m = IDRange{} } -func (*IDRange) ProtoMessage() {} -func (*IDRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } +var xxx_messageInfo_DeploymentCondition proto.InternalMessageInfo -func (m *IPBlock) Reset() { *m = IPBlock{} } -func (*IPBlock) ProtoMessage() {} -func (*IPBlock) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } +func (m *DeploymentList) Reset() { *m = DeploymentList{} } +func (*DeploymentList) ProtoMessage() {} +func (*DeploymentList) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{11} +} +func (m *DeploymentList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentList) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentList.Merge(m, src) +} +func (m *DeploymentList) XXX_Size() int { + return m.Size() +} +func (m *DeploymentList) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentList.DiscardUnknown(m) +} -func (m *Ingress) Reset() { *m = Ingress{} } -func (*Ingress) ProtoMessage() {} -func (*Ingress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } +var xxx_messageInfo_DeploymentList proto.InternalMessageInfo -func (m *IngressBackend) Reset() { *m = IngressBackend{} } -func (*IngressBackend) ProtoMessage() {} -func (*IngressBackend) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } +func (m *DeploymentRollback) Reset() { *m = DeploymentRollback{} } +func (*DeploymentRollback) ProtoMessage() {} +func (*DeploymentRollback) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{12} +} +func (m *DeploymentRollback) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentRollback) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentRollback) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentRollback.Merge(m, src) +} +func (m *DeploymentRollback) XXX_Size() int { + return m.Size() +} +func (m *DeploymentRollback) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentRollback.DiscardUnknown(m) +} -func (m *IngressList) Reset() { *m = IngressList{} } -func (*IngressList) ProtoMessage() {} -func (*IngressList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{27} } +var xxx_messageInfo_DeploymentRollback proto.InternalMessageInfo -func (m *IngressRule) Reset() { *m = IngressRule{} } -func (*IngressRule) ProtoMessage() {} -func (*IngressRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{28} } +func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} } +func (*DeploymentSpec) ProtoMessage() {} +func (*DeploymentSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{13} +} +func (m *DeploymentSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentSpec.Merge(m, src) +} +func (m *DeploymentSpec) XXX_Size() int { + return m.Size() +} +func (m *DeploymentSpec) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentSpec.DiscardUnknown(m) +} -func (m *IngressRuleValue) Reset() { *m = IngressRuleValue{} } -func (*IngressRuleValue) ProtoMessage() {} -func (*IngressRuleValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{29} } +var xxx_messageInfo_DeploymentSpec proto.InternalMessageInfo -func (m *IngressSpec) Reset() { *m = IngressSpec{} } -func (*IngressSpec) ProtoMessage() {} -func (*IngressSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{30} } +func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} } +func (*DeploymentStatus) ProtoMessage() {} +func (*DeploymentStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{14} +} +func (m *DeploymentStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentStatus.Merge(m, src) +} +func (m *DeploymentStatus) XXX_Size() int { + return m.Size() +} +func (m *DeploymentStatus) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentStatus.DiscardUnknown(m) +} -func (m *IngressStatus) Reset() { *m = IngressStatus{} } -func (*IngressStatus) ProtoMessage() {} -func (*IngressStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{31} } +var xxx_messageInfo_DeploymentStatus proto.InternalMessageInfo -func (m *IngressTLS) Reset() { *m = IngressTLS{} } -func (*IngressTLS) ProtoMessage() {} -func (*IngressTLS) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{32} } +func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} } +func (*DeploymentStrategy) ProtoMessage() {} +func (*DeploymentStrategy) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{15} +} +func (m *DeploymentStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentStrategy.Merge(m, src) +} +func (m *DeploymentStrategy) XXX_Size() int { + return m.Size() +} +func (m *DeploymentStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentStrategy.DiscardUnknown(m) +} -func (m *NetworkPolicy) Reset() { *m = NetworkPolicy{} } -func (*NetworkPolicy) ProtoMessage() {} -func (*NetworkPolicy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{33} } +var xxx_messageInfo_DeploymentStrategy proto.InternalMessageInfo -func (m *NetworkPolicyEgressRule) Reset() { *m = NetworkPolicyEgressRule{} } -func (*NetworkPolicyEgressRule) ProtoMessage() {} -func (*NetworkPolicyEgressRule) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{34} +func (m *FSGroupStrategyOptions) Reset() { *m = FSGroupStrategyOptions{} } +func (*FSGroupStrategyOptions) ProtoMessage() {} +func (*FSGroupStrategyOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{16} } - -func (m *NetworkPolicyIngressRule) Reset() { *m = NetworkPolicyIngressRule{} } -func (*NetworkPolicyIngressRule) ProtoMessage() {} -func (*NetworkPolicyIngressRule) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{35} +func (m *FSGroupStrategyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FSGroupStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FSGroupStrategyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_FSGroupStrategyOptions.Merge(m, src) +} +func (m *FSGroupStrategyOptions) XXX_Size() int { + return m.Size() +} +func (m *FSGroupStrategyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_FSGroupStrategyOptions.DiscardUnknown(m) } -func (m *NetworkPolicyList) Reset() { *m = NetworkPolicyList{} } -func (*NetworkPolicyList) ProtoMessage() {} -func (*NetworkPolicyList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{36} } - -func (m *NetworkPolicyPeer) Reset() { *m = NetworkPolicyPeer{} } -func (*NetworkPolicyPeer) ProtoMessage() {} -func (*NetworkPolicyPeer) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{37} } +var xxx_messageInfo_FSGroupStrategyOptions proto.InternalMessageInfo -func (m *NetworkPolicyPort) Reset() { *m = NetworkPolicyPort{} } -func (*NetworkPolicyPort) ProtoMessage() {} -func (*NetworkPolicyPort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{38} } +func (m *HTTPIngressPath) Reset() { *m = HTTPIngressPath{} } +func (*HTTPIngressPath) ProtoMessage() {} +func (*HTTPIngressPath) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{17} +} +func (m *HTTPIngressPath) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HTTPIngressPath) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HTTPIngressPath) XXX_Merge(src proto.Message) { + xxx_messageInfo_HTTPIngressPath.Merge(m, src) +} +func (m *HTTPIngressPath) XXX_Size() int { + return m.Size() +} +func (m *HTTPIngressPath) XXX_DiscardUnknown() { + xxx_messageInfo_HTTPIngressPath.DiscardUnknown(m) +} -func (m *NetworkPolicySpec) Reset() { *m = NetworkPolicySpec{} } -func (*NetworkPolicySpec) ProtoMessage() {} -func (*NetworkPolicySpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{39} } +var xxx_messageInfo_HTTPIngressPath proto.InternalMessageInfo -func (m *PodSecurityPolicy) Reset() { *m = PodSecurityPolicy{} } -func (*PodSecurityPolicy) ProtoMessage() {} -func (*PodSecurityPolicy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{40} } +func (m *HTTPIngressRuleValue) Reset() { *m = HTTPIngressRuleValue{} } +func (*HTTPIngressRuleValue) ProtoMessage() {} +func (*HTTPIngressRuleValue) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{18} +} +func (m *HTTPIngressRuleValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HTTPIngressRuleValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HTTPIngressRuleValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_HTTPIngressRuleValue.Merge(m, src) +} +func (m *HTTPIngressRuleValue) XXX_Size() int { + return m.Size() +} +func (m *HTTPIngressRuleValue) XXX_DiscardUnknown() { + xxx_messageInfo_HTTPIngressRuleValue.DiscardUnknown(m) +} -func (m *PodSecurityPolicyList) Reset() { *m = PodSecurityPolicyList{} } -func (*PodSecurityPolicyList) ProtoMessage() {} -func (*PodSecurityPolicyList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{41} } +var xxx_messageInfo_HTTPIngressRuleValue proto.InternalMessageInfo -func (m *PodSecurityPolicySpec) Reset() { *m = PodSecurityPolicySpec{} } -func (*PodSecurityPolicySpec) ProtoMessage() {} -func (*PodSecurityPolicySpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{42} } +func (m *HostPortRange) Reset() { *m = HostPortRange{} } +func (*HostPortRange) ProtoMessage() {} +func (*HostPortRange) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{19} +} +func (m *HostPortRange) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HostPortRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HostPortRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_HostPortRange.Merge(m, src) +} +func (m *HostPortRange) XXX_Size() int { + return m.Size() +} +func (m *HostPortRange) XXX_DiscardUnknown() { + xxx_messageInfo_HostPortRange.DiscardUnknown(m) +} -func (m *ReplicaSet) Reset() { *m = ReplicaSet{} } -func (*ReplicaSet) ProtoMessage() {} -func (*ReplicaSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{43} } +var xxx_messageInfo_HostPortRange proto.InternalMessageInfo -func (m *ReplicaSetCondition) Reset() { *m = ReplicaSetCondition{} } -func (*ReplicaSetCondition) ProtoMessage() {} -func (*ReplicaSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{44} } +func (m *IDRange) Reset() { *m = IDRange{} } +func (*IDRange) ProtoMessage() {} +func (*IDRange) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{20} +} +func (m *IDRange) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IDRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IDRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_IDRange.Merge(m, src) +} +func (m *IDRange) XXX_Size() int { + return m.Size() +} +func (m *IDRange) XXX_DiscardUnknown() { + xxx_messageInfo_IDRange.DiscardUnknown(m) +} -func (m *ReplicaSetList) Reset() { *m = ReplicaSetList{} } -func (*ReplicaSetList) ProtoMessage() {} -func (*ReplicaSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{45} } +var xxx_messageInfo_IDRange proto.InternalMessageInfo -func (m *ReplicaSetSpec) Reset() { *m = ReplicaSetSpec{} } -func (*ReplicaSetSpec) ProtoMessage() {} -func (*ReplicaSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{46} } +func (m *IPBlock) Reset() { *m = IPBlock{} } +func (*IPBlock) ProtoMessage() {} +func (*IPBlock) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{21} +} +func (m *IPBlock) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IPBlock) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IPBlock) XXX_Merge(src proto.Message) { + xxx_messageInfo_IPBlock.Merge(m, src) +} +func (m *IPBlock) XXX_Size() int { + return m.Size() +} +func (m *IPBlock) XXX_DiscardUnknown() { + xxx_messageInfo_IPBlock.DiscardUnknown(m) +} -func (m *ReplicaSetStatus) Reset() { *m = ReplicaSetStatus{} } -func (*ReplicaSetStatus) ProtoMessage() {} -func (*ReplicaSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{47} } +var xxx_messageInfo_IPBlock proto.InternalMessageInfo -func (m *ReplicationControllerDummy) Reset() { *m = ReplicationControllerDummy{} } -func (*ReplicationControllerDummy) ProtoMessage() {} -func (*ReplicationControllerDummy) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{48} +func (m *Ingress) Reset() { *m = Ingress{} } +func (*Ingress) ProtoMessage() {} +func (*Ingress) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{22} +} +func (m *Ingress) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Ingress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Ingress) XXX_Merge(src proto.Message) { + xxx_messageInfo_Ingress.Merge(m, src) +} +func (m *Ingress) XXX_Size() int { + return m.Size() +} +func (m *Ingress) XXX_DiscardUnknown() { + xxx_messageInfo_Ingress.DiscardUnknown(m) } -func (m *RollbackConfig) Reset() { *m = RollbackConfig{} } -func (*RollbackConfig) ProtoMessage() {} -func (*RollbackConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{49} } - -func (m *RollingUpdateDaemonSet) Reset() { *m = RollingUpdateDaemonSet{} } -func (*RollingUpdateDaemonSet) ProtoMessage() {} -func (*RollingUpdateDaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{50} } +var xxx_messageInfo_Ingress proto.InternalMessageInfo -func (m *RollingUpdateDeployment) Reset() { *m = RollingUpdateDeployment{} } -func (*RollingUpdateDeployment) ProtoMessage() {} -func (*RollingUpdateDeployment) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{51} +func (m *IngressBackend) Reset() { *m = IngressBackend{} } +func (*IngressBackend) ProtoMessage() {} +func (*IngressBackend) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{23} } - -func (m *RunAsUserStrategyOptions) Reset() { *m = RunAsUserStrategyOptions{} } -func (*RunAsUserStrategyOptions) ProtoMessage() {} -func (*RunAsUserStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{52} +func (m *IngressBackend) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IngressBackend) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IngressBackend) XXX_Merge(src proto.Message) { + xxx_messageInfo_IngressBackend.Merge(m, src) +} +func (m *IngressBackend) XXX_Size() int { + return m.Size() +} +func (m *IngressBackend) XXX_DiscardUnknown() { + xxx_messageInfo_IngressBackend.DiscardUnknown(m) } -func (m *SELinuxStrategyOptions) Reset() { *m = SELinuxStrategyOptions{} } -func (*SELinuxStrategyOptions) ProtoMessage() {} -func (*SELinuxStrategyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{53} } - -func (m *Scale) Reset() { *m = Scale{} } -func (*Scale) ProtoMessage() {} -func (*Scale) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{54} } +var xxx_messageInfo_IngressBackend proto.InternalMessageInfo -func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } -func (*ScaleSpec) ProtoMessage() {} -func (*ScaleSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{55} } +func (m *IngressList) Reset() { *m = IngressList{} } +func (*IngressList) ProtoMessage() {} +func (*IngressList) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{24} +} +func (m *IngressList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IngressList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IngressList) XXX_Merge(src proto.Message) { + xxx_messageInfo_IngressList.Merge(m, src) +} +func (m *IngressList) XXX_Size() int { + return m.Size() +} +func (m *IngressList) XXX_DiscardUnknown() { + xxx_messageInfo_IngressList.DiscardUnknown(m) +} -func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } -func (*ScaleStatus) ProtoMessage() {} -func (*ScaleStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{56} } +var xxx_messageInfo_IngressList proto.InternalMessageInfo -func (m *SupplementalGroupsStrategyOptions) Reset() { *m = SupplementalGroupsStrategyOptions{} } -func (*SupplementalGroupsStrategyOptions) ProtoMessage() {} -func (*SupplementalGroupsStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{57} +func (m *IngressRule) Reset() { *m = IngressRule{} } +func (*IngressRule) ProtoMessage() {} +func (*IngressRule) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{25} } - -func init() { - proto.RegisterType((*AllowedFlexVolume)(nil), "k8s.io.api.extensions.v1beta1.AllowedFlexVolume") - proto.RegisterType((*AllowedHostPath)(nil), "k8s.io.api.extensions.v1beta1.AllowedHostPath") - proto.RegisterType((*CustomMetricCurrentStatus)(nil), "k8s.io.api.extensions.v1beta1.CustomMetricCurrentStatus") - proto.RegisterType((*CustomMetricCurrentStatusList)(nil), "k8s.io.api.extensions.v1beta1.CustomMetricCurrentStatusList") - proto.RegisterType((*CustomMetricTarget)(nil), "k8s.io.api.extensions.v1beta1.CustomMetricTarget") - proto.RegisterType((*CustomMetricTargetList)(nil), "k8s.io.api.extensions.v1beta1.CustomMetricTargetList") - proto.RegisterType((*DaemonSet)(nil), "k8s.io.api.extensions.v1beta1.DaemonSet") - proto.RegisterType((*DaemonSetCondition)(nil), "k8s.io.api.extensions.v1beta1.DaemonSetCondition") - proto.RegisterType((*DaemonSetList)(nil), "k8s.io.api.extensions.v1beta1.DaemonSetList") - proto.RegisterType((*DaemonSetSpec)(nil), "k8s.io.api.extensions.v1beta1.DaemonSetSpec") - proto.RegisterType((*DaemonSetStatus)(nil), "k8s.io.api.extensions.v1beta1.DaemonSetStatus") - proto.RegisterType((*DaemonSetUpdateStrategy)(nil), "k8s.io.api.extensions.v1beta1.DaemonSetUpdateStrategy") - proto.RegisterType((*Deployment)(nil), "k8s.io.api.extensions.v1beta1.Deployment") - proto.RegisterType((*DeploymentCondition)(nil), "k8s.io.api.extensions.v1beta1.DeploymentCondition") - proto.RegisterType((*DeploymentList)(nil), "k8s.io.api.extensions.v1beta1.DeploymentList") - proto.RegisterType((*DeploymentRollback)(nil), "k8s.io.api.extensions.v1beta1.DeploymentRollback") - proto.RegisterType((*DeploymentSpec)(nil), "k8s.io.api.extensions.v1beta1.DeploymentSpec") - proto.RegisterType((*DeploymentStatus)(nil), "k8s.io.api.extensions.v1beta1.DeploymentStatus") - proto.RegisterType((*DeploymentStrategy)(nil), "k8s.io.api.extensions.v1beta1.DeploymentStrategy") - proto.RegisterType((*FSGroupStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.FSGroupStrategyOptions") - proto.RegisterType((*HTTPIngressPath)(nil), "k8s.io.api.extensions.v1beta1.HTTPIngressPath") - proto.RegisterType((*HTTPIngressRuleValue)(nil), "k8s.io.api.extensions.v1beta1.HTTPIngressRuleValue") - proto.RegisterType((*HostPortRange)(nil), "k8s.io.api.extensions.v1beta1.HostPortRange") - proto.RegisterType((*IDRange)(nil), "k8s.io.api.extensions.v1beta1.IDRange") - proto.RegisterType((*IPBlock)(nil), "k8s.io.api.extensions.v1beta1.IPBlock") - proto.RegisterType((*Ingress)(nil), "k8s.io.api.extensions.v1beta1.Ingress") - proto.RegisterType((*IngressBackend)(nil), "k8s.io.api.extensions.v1beta1.IngressBackend") - proto.RegisterType((*IngressList)(nil), "k8s.io.api.extensions.v1beta1.IngressList") - proto.RegisterType((*IngressRule)(nil), "k8s.io.api.extensions.v1beta1.IngressRule") - proto.RegisterType((*IngressRuleValue)(nil), "k8s.io.api.extensions.v1beta1.IngressRuleValue") - proto.RegisterType((*IngressSpec)(nil), "k8s.io.api.extensions.v1beta1.IngressSpec") - proto.RegisterType((*IngressStatus)(nil), "k8s.io.api.extensions.v1beta1.IngressStatus") - proto.RegisterType((*IngressTLS)(nil), "k8s.io.api.extensions.v1beta1.IngressTLS") - proto.RegisterType((*NetworkPolicy)(nil), "k8s.io.api.extensions.v1beta1.NetworkPolicy") - proto.RegisterType((*NetworkPolicyEgressRule)(nil), "k8s.io.api.extensions.v1beta1.NetworkPolicyEgressRule") - proto.RegisterType((*NetworkPolicyIngressRule)(nil), "k8s.io.api.extensions.v1beta1.NetworkPolicyIngressRule") - proto.RegisterType((*NetworkPolicyList)(nil), "k8s.io.api.extensions.v1beta1.NetworkPolicyList") - proto.RegisterType((*NetworkPolicyPeer)(nil), "k8s.io.api.extensions.v1beta1.NetworkPolicyPeer") - proto.RegisterType((*NetworkPolicyPort)(nil), "k8s.io.api.extensions.v1beta1.NetworkPolicyPort") - proto.RegisterType((*NetworkPolicySpec)(nil), "k8s.io.api.extensions.v1beta1.NetworkPolicySpec") - proto.RegisterType((*PodSecurityPolicy)(nil), "k8s.io.api.extensions.v1beta1.PodSecurityPolicy") - proto.RegisterType((*PodSecurityPolicyList)(nil), "k8s.io.api.extensions.v1beta1.PodSecurityPolicyList") - proto.RegisterType((*PodSecurityPolicySpec)(nil), "k8s.io.api.extensions.v1beta1.PodSecurityPolicySpec") - proto.RegisterType((*ReplicaSet)(nil), "k8s.io.api.extensions.v1beta1.ReplicaSet") - proto.RegisterType((*ReplicaSetCondition)(nil), "k8s.io.api.extensions.v1beta1.ReplicaSetCondition") - proto.RegisterType((*ReplicaSetList)(nil), "k8s.io.api.extensions.v1beta1.ReplicaSetList") - proto.RegisterType((*ReplicaSetSpec)(nil), "k8s.io.api.extensions.v1beta1.ReplicaSetSpec") - proto.RegisterType((*ReplicaSetStatus)(nil), "k8s.io.api.extensions.v1beta1.ReplicaSetStatus") - proto.RegisterType((*ReplicationControllerDummy)(nil), "k8s.io.api.extensions.v1beta1.ReplicationControllerDummy") - proto.RegisterType((*RollbackConfig)(nil), "k8s.io.api.extensions.v1beta1.RollbackConfig") - proto.RegisterType((*RollingUpdateDaemonSet)(nil), "k8s.io.api.extensions.v1beta1.RollingUpdateDaemonSet") - proto.RegisterType((*RollingUpdateDeployment)(nil), "k8s.io.api.extensions.v1beta1.RollingUpdateDeployment") - proto.RegisterType((*RunAsUserStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.RunAsUserStrategyOptions") - proto.RegisterType((*SELinuxStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.SELinuxStrategyOptions") - proto.RegisterType((*Scale)(nil), "k8s.io.api.extensions.v1beta1.Scale") - proto.RegisterType((*ScaleSpec)(nil), "k8s.io.api.extensions.v1beta1.ScaleSpec") - proto.RegisterType((*ScaleStatus)(nil), "k8s.io.api.extensions.v1beta1.ScaleStatus") - proto.RegisterType((*SupplementalGroupsStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.SupplementalGroupsStrategyOptions") +func (m *IngressRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) } -func (m *AllowedFlexVolume) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +func (m *IngressRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *AllowedFlexVolume) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) - i += copy(dAtA[i:], m.Driver) - return i, nil +func (m *IngressRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_IngressRule.Merge(m, src) +} +func (m *IngressRule) XXX_Size() int { + return m.Size() +} +func (m *IngressRule) XXX_DiscardUnknown() { + xxx_messageInfo_IngressRule.DiscardUnknown(m) } -func (m *AllowedHostPath) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_IngressRule proto.InternalMessageInfo + +func (m *IngressRuleValue) Reset() { *m = IngressRuleValue{} } +func (*IngressRuleValue) ProtoMessage() {} +func (*IngressRuleValue) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{26} +} +func (m *IngressRuleValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IngressRuleValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *AllowedHostPath) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PathPrefix))) - i += copy(dAtA[i:], m.PathPrefix) - dAtA[i] = 0x10 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - return i, nil +func (m *IngressRuleValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_IngressRuleValue.Merge(m, src) +} +func (m *IngressRuleValue) XXX_Size() int { + return m.Size() +} +func (m *IngressRuleValue) XXX_DiscardUnknown() { + xxx_messageInfo_IngressRuleValue.DiscardUnknown(m) } -func (m *CustomMetricCurrentStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_IngressRuleValue proto.InternalMessageInfo + +func (m *IngressSpec) Reset() { *m = IngressSpec{} } +func (*IngressSpec) ProtoMessage() {} +func (*IngressSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{27} +} +func (m *IngressSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IngressSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *IngressSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_IngressSpec.Merge(m, src) +} +func (m *IngressSpec) XXX_Size() int { + return m.Size() +} +func (m *IngressSpec) XXX_DiscardUnknown() { + xxx_messageInfo_IngressSpec.DiscardUnknown(m) } -func (m *CustomMetricCurrentStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentValue.Size())) - n1, err := m.CurrentValue.MarshalTo(dAtA[i:]) +var xxx_messageInfo_IngressSpec proto.InternalMessageInfo + +func (m *IngressStatus) Reset() { *m = IngressStatus{} } +func (*IngressStatus) ProtoMessage() {} +func (*IngressStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{28} +} +func (m *IngressStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IngressStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err + return nil, err } - i += n1 - return i, nil + return b[:n], nil +} +func (m *IngressStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_IngressStatus.Merge(m, src) +} +func (m *IngressStatus) XXX_Size() int { + return m.Size() +} +func (m *IngressStatus) XXX_DiscardUnknown() { + xxx_messageInfo_IngressStatus.DiscardUnknown(m) } -func (m *CustomMetricCurrentStatusList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_IngressStatus proto.InternalMessageInfo + +func (m *IngressTLS) Reset() { *m = IngressTLS{} } +func (*IngressTLS) ProtoMessage() {} +func (*IngressTLS) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{29} +} +func (m *IngressTLS) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IngressTLS) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *CustomMetricCurrentStatusList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil +func (m *IngressTLS) XXX_Merge(src proto.Message) { + xxx_messageInfo_IngressTLS.Merge(m, src) +} +func (m *IngressTLS) XXX_Size() int { + return m.Size() +} +func (m *IngressTLS) XXX_DiscardUnknown() { + xxx_messageInfo_IngressTLS.DiscardUnknown(m) } -func (m *CustomMetricTarget) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_IngressTLS proto.InternalMessageInfo + +func (m *NetworkPolicy) Reset() { *m = NetworkPolicy{} } +func (*NetworkPolicy) ProtoMessage() {} +func (*NetworkPolicy) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{30} +} +func (m *NetworkPolicy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NetworkPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *NetworkPolicy) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkPolicy.Merge(m, src) +} +func (m *NetworkPolicy) XXX_Size() int { + return m.Size() +} +func (m *NetworkPolicy) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkPolicy.DiscardUnknown(m) } -func (m *CustomMetricTarget) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.TargetValue.Size())) - n2, err := m.TargetValue.MarshalTo(dAtA[i:]) +var xxx_messageInfo_NetworkPolicy proto.InternalMessageInfo + +func (m *NetworkPolicyEgressRule) Reset() { *m = NetworkPolicyEgressRule{} } +func (*NetworkPolicyEgressRule) ProtoMessage() {} +func (*NetworkPolicyEgressRule) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{31} +} +func (m *NetworkPolicyEgressRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NetworkPolicyEgressRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err + return nil, err } - i += n2 - return i, nil + return b[:n], nil +} +func (m *NetworkPolicyEgressRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkPolicyEgressRule.Merge(m, src) +} +func (m *NetworkPolicyEgressRule) XXX_Size() int { + return m.Size() +} +func (m *NetworkPolicyEgressRule) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkPolicyEgressRule.DiscardUnknown(m) } -func (m *CustomMetricTargetList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_NetworkPolicyEgressRule proto.InternalMessageInfo + +func (m *NetworkPolicyIngressRule) Reset() { *m = NetworkPolicyIngressRule{} } +func (*NetworkPolicyIngressRule) ProtoMessage() {} +func (*NetworkPolicyIngressRule) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{32} +} +func (m *NetworkPolicyIngressRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NetworkPolicyIngressRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *CustomMetricTargetList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil +func (m *NetworkPolicyIngressRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkPolicyIngressRule.Merge(m, src) +} +func (m *NetworkPolicyIngressRule) XXX_Size() int { + return m.Size() +} +func (m *NetworkPolicyIngressRule) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkPolicyIngressRule.DiscardUnknown(m) } -func (m *DaemonSet) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_NetworkPolicyIngressRule proto.InternalMessageInfo + +func (m *NetworkPolicyList) Reset() { *m = NetworkPolicyList{} } +func (*NetworkPolicyList) ProtoMessage() {} +func (*NetworkPolicyList) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{33} +} +func (m *NetworkPolicyList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NetworkPolicyList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *NetworkPolicyList) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkPolicyList.Merge(m, src) +} +func (m *NetworkPolicyList) XXX_Size() int { + return m.Size() +} +func (m *NetworkPolicyList) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkPolicyList.DiscardUnknown(m) } -func (m *DaemonSet) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n3, err := m.ObjectMeta.MarshalTo(dAtA[i:]) +var xxx_messageInfo_NetworkPolicyList proto.InternalMessageInfo + +func (m *NetworkPolicyPeer) Reset() { *m = NetworkPolicyPeer{} } +func (*NetworkPolicyPeer) ProtoMessage() {} +func (*NetworkPolicyPeer) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{34} +} +func (m *NetworkPolicyPeer) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NetworkPolicyPeer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err + return nil, err } - i += n3 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n4, err := m.Spec.MarshalTo(dAtA[i:]) + return b[:n], nil +} +func (m *NetworkPolicyPeer) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkPolicyPeer.Merge(m, src) +} +func (m *NetworkPolicyPeer) XXX_Size() int { + return m.Size() +} +func (m *NetworkPolicyPeer) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkPolicyPeer.DiscardUnknown(m) +} + +var xxx_messageInfo_NetworkPolicyPeer proto.InternalMessageInfo + +func (m *NetworkPolicyPort) Reset() { *m = NetworkPolicyPort{} } +func (*NetworkPolicyPort) ProtoMessage() {} +func (*NetworkPolicyPort) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{35} +} +func (m *NetworkPolicyPort) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NetworkPolicyPort) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err + return nil, err } - i += n4 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n5, err := m.Status.MarshalTo(dAtA[i:]) + return b[:n], nil +} +func (m *NetworkPolicyPort) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkPolicyPort.Merge(m, src) +} +func (m *NetworkPolicyPort) XXX_Size() int { + return m.Size() +} +func (m *NetworkPolicyPort) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkPolicyPort.DiscardUnknown(m) +} + +var xxx_messageInfo_NetworkPolicyPort proto.InternalMessageInfo + +func (m *NetworkPolicySpec) Reset() { *m = NetworkPolicySpec{} } +func (*NetworkPolicySpec) ProtoMessage() {} +func (*NetworkPolicySpec) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{36} +} +func (m *NetworkPolicySpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NetworkPolicySpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err + return nil, err } - i += n5 - return i, nil + return b[:n], nil +} +func (m *NetworkPolicySpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkPolicySpec.Merge(m, src) +} +func (m *NetworkPolicySpec) XXX_Size() int { + return m.Size() +} +func (m *NetworkPolicySpec) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkPolicySpec.DiscardUnknown(m) } -func (m *DaemonSetCondition) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_NetworkPolicySpec proto.InternalMessageInfo + +func (m *PodSecurityPolicy) Reset() { *m = PodSecurityPolicy{} } +func (*PodSecurityPolicy) ProtoMessage() {} +func (*PodSecurityPolicy) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{37} +} +func (m *PodSecurityPolicy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodSecurityPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *PodSecurityPolicy) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSecurityPolicy.Merge(m, src) +} +func (m *PodSecurityPolicy) XXX_Size() int { + return m.Size() +} +func (m *PodSecurityPolicy) XXX_DiscardUnknown() { + xxx_messageInfo_PodSecurityPolicy.DiscardUnknown(m) } -func (m *DaemonSetCondition) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n6, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) +var xxx_messageInfo_PodSecurityPolicy proto.InternalMessageInfo + +func (m *PodSecurityPolicyList) Reset() { *m = PodSecurityPolicyList{} } +func (*PodSecurityPolicyList) ProtoMessage() {} +func (*PodSecurityPolicyList) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{38} +} +func (m *PodSecurityPolicyList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodSecurityPolicyList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err + return nil, err } - i += n6 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil + return b[:n], nil +} +func (m *PodSecurityPolicyList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSecurityPolicyList.Merge(m, src) +} +func (m *PodSecurityPolicyList) XXX_Size() int { + return m.Size() +} +func (m *PodSecurityPolicyList) XXX_DiscardUnknown() { + xxx_messageInfo_PodSecurityPolicyList.DiscardUnknown(m) } -func (m *DaemonSetList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_PodSecurityPolicyList proto.InternalMessageInfo + +func (m *PodSecurityPolicySpec) Reset() { *m = PodSecurityPolicySpec{} } +func (*PodSecurityPolicySpec) ProtoMessage() {} +func (*PodSecurityPolicySpec) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{39} +} +func (m *PodSecurityPolicySpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodSecurityPolicySpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *DaemonSetList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n7, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil +func (m *PodSecurityPolicySpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSecurityPolicySpec.Merge(m, src) +} +func (m *PodSecurityPolicySpec) XXX_Size() int { + return m.Size() +} +func (m *PodSecurityPolicySpec) XXX_DiscardUnknown() { + xxx_messageInfo_PodSecurityPolicySpec.DiscardUnknown(m) } -func (m *DaemonSetSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_PodSecurityPolicySpec proto.InternalMessageInfo + +func (m *ReplicaSet) Reset() { *m = ReplicaSet{} } +func (*ReplicaSet) ProtoMessage() {} +func (*ReplicaSet) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{40} +} +func (m *ReplicaSet) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicaSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *ReplicaSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicaSet.Merge(m, src) +} +func (m *ReplicaSet) XXX_Size() int { + return m.Size() +} +func (m *ReplicaSet) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicaSet.DiscardUnknown(m) } -func (m *DaemonSetSpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Selector != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n8, err := m.Selector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n8 - } - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n9, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n9 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UpdateStrategy.Size())) - n10, err := m.UpdateStrategy.MarshalTo(dAtA[i:]) +var xxx_messageInfo_ReplicaSet proto.InternalMessageInfo + +func (m *ReplicaSetCondition) Reset() { *m = ReplicaSetCondition{} } +func (*ReplicaSetCondition) ProtoMessage() {} +func (*ReplicaSetCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{41} +} +func (m *ReplicaSetCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicaSetCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err - } - i += n10 - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.TemplateGeneration)) - if m.RevisionHistoryLimit != nil { - dAtA[i] = 0x30 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + return nil, err } - return i, nil + return b[:n], nil +} +func (m *ReplicaSetCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicaSetCondition.Merge(m, src) +} +func (m *ReplicaSetCondition) XXX_Size() int { + return m.Size() +} +func (m *ReplicaSetCondition) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicaSetCondition.DiscardUnknown(m) } -func (m *DaemonSetStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_ReplicaSetCondition proto.InternalMessageInfo + +func (m *ReplicaSetList) Reset() { *m = ReplicaSetList{} } +func (*ReplicaSetList) ProtoMessage() {} +func (*ReplicaSetList) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{42} +} +func (m *ReplicaSetList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicaSetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *DaemonSetStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentNumberScheduled)) - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NumberMisscheduled)) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredNumberScheduled)) - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NumberReady)) - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) - dAtA[i] = 0x30 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedNumberScheduled)) - dAtA[i] = 0x38 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NumberAvailable)) - dAtA[i] = 0x40 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NumberUnavailable)) - if m.CollisionCount != nil { - dAtA[i] = 0x48 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) - } - if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil +func (m *ReplicaSetList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicaSetList.Merge(m, src) +} +func (m *ReplicaSetList) XXX_Size() int { + return m.Size() +} +func (m *ReplicaSetList) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicaSetList.DiscardUnknown(m) } -func (m *DaemonSetUpdateStrategy) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_ReplicaSetList proto.InternalMessageInfo + +func (m *ReplicaSetSpec) Reset() { *m = ReplicaSetSpec{} } +func (*ReplicaSetSpec) ProtoMessage() {} +func (*ReplicaSetSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{43} +} +func (m *ReplicaSetSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicaSetSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *ReplicaSetSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicaSetSpec.Merge(m, src) +} +func (m *ReplicaSetSpec) XXX_Size() int { + return m.Size() +} +func (m *ReplicaSetSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicaSetSpec.DiscardUnknown(m) } -func (m *DaemonSetUpdateStrategy) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - if m.RollingUpdate != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) - n11, err := m.RollingUpdate.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n11 +var xxx_messageInfo_ReplicaSetSpec proto.InternalMessageInfo + +func (m *ReplicaSetStatus) Reset() { *m = ReplicaSetStatus{} } +func (*ReplicaSetStatus) ProtoMessage() {} +func (*ReplicaSetStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{44} +} +func (m *ReplicaSetStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicaSetStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } - return i, nil + return b[:n], nil +} +func (m *ReplicaSetStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicaSetStatus.Merge(m, src) +} +func (m *ReplicaSetStatus) XXX_Size() int { + return m.Size() +} +func (m *ReplicaSetStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicaSetStatus.DiscardUnknown(m) } -func (m *Deployment) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_ReplicaSetStatus proto.InternalMessageInfo + +func (m *ReplicationControllerDummy) Reset() { *m = ReplicationControllerDummy{} } +func (*ReplicationControllerDummy) ProtoMessage() {} +func (*ReplicationControllerDummy) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{45} +} +func (m *ReplicationControllerDummy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicationControllerDummy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *ReplicationControllerDummy) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicationControllerDummy.Merge(m, src) +} +func (m *ReplicationControllerDummy) XXX_Size() int { + return m.Size() +} +func (m *ReplicationControllerDummy) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicationControllerDummy.DiscardUnknown(m) } -func (m *Deployment) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n12, err := m.ObjectMeta.MarshalTo(dAtA[i:]) +var xxx_messageInfo_ReplicationControllerDummy proto.InternalMessageInfo + +func (m *RollbackConfig) Reset() { *m = RollbackConfig{} } +func (*RollbackConfig) ProtoMessage() {} +func (*RollbackConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{46} +} +func (m *RollbackConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RollbackConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err + return nil, err } - i += n12 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n13, err := m.Spec.MarshalTo(dAtA[i:]) + return b[:n], nil +} +func (m *RollbackConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_RollbackConfig.Merge(m, src) +} +func (m *RollbackConfig) XXX_Size() int { + return m.Size() +} +func (m *RollbackConfig) XXX_DiscardUnknown() { + xxx_messageInfo_RollbackConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_RollbackConfig proto.InternalMessageInfo + +func (m *RollingUpdateDaemonSet) Reset() { *m = RollingUpdateDaemonSet{} } +func (*RollingUpdateDaemonSet) ProtoMessage() {} +func (*RollingUpdateDaemonSet) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{47} +} +func (m *RollingUpdateDaemonSet) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RollingUpdateDaemonSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err + return nil, err } - i += n13 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n14, err := m.Status.MarshalTo(dAtA[i:]) + return b[:n], nil +} +func (m *RollingUpdateDaemonSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_RollingUpdateDaemonSet.Merge(m, src) +} +func (m *RollingUpdateDaemonSet) XXX_Size() int { + return m.Size() +} +func (m *RollingUpdateDaemonSet) XXX_DiscardUnknown() { + xxx_messageInfo_RollingUpdateDaemonSet.DiscardUnknown(m) +} + +var xxx_messageInfo_RollingUpdateDaemonSet proto.InternalMessageInfo + +func (m *RollingUpdateDeployment) Reset() { *m = RollingUpdateDeployment{} } +func (*RollingUpdateDeployment) ProtoMessage() {} +func (*RollingUpdateDeployment) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{48} +} +func (m *RollingUpdateDeployment) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RollingUpdateDeployment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err + return nil, err } - i += n14 - return i, nil + return b[:n], nil +} +func (m *RollingUpdateDeployment) XXX_Merge(src proto.Message) { + xxx_messageInfo_RollingUpdateDeployment.Merge(m, src) +} +func (m *RollingUpdateDeployment) XXX_Size() int { + return m.Size() +} +func (m *RollingUpdateDeployment) XXX_DiscardUnknown() { + xxx_messageInfo_RollingUpdateDeployment.DiscardUnknown(m) } -func (m *DeploymentCondition) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_RollingUpdateDeployment proto.InternalMessageInfo + +func (m *RunAsGroupStrategyOptions) Reset() { *m = RunAsGroupStrategyOptions{} } +func (*RunAsGroupStrategyOptions) ProtoMessage() {} +func (*RunAsGroupStrategyOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{49} +} +func (m *RunAsGroupStrategyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RunAsGroupStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *RunAsGroupStrategyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_RunAsGroupStrategyOptions.Merge(m, src) +} +func (m *RunAsGroupStrategyOptions) XXX_Size() int { + return m.Size() +} +func (m *RunAsGroupStrategyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_RunAsGroupStrategyOptions.DiscardUnknown(m) } -func (m *DeploymentCondition) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastUpdateTime.Size())) - n15, err := m.LastUpdateTime.MarshalTo(dAtA[i:]) +var xxx_messageInfo_RunAsGroupStrategyOptions proto.InternalMessageInfo + +func (m *RunAsUserStrategyOptions) Reset() { *m = RunAsUserStrategyOptions{} } +func (*RunAsUserStrategyOptions) ProtoMessage() {} +func (*RunAsUserStrategyOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{50} +} +func (m *RunAsUserStrategyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RunAsUserStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err + return nil, err } - i += n15 - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n16, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + return b[:n], nil +} +func (m *RunAsUserStrategyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_RunAsUserStrategyOptions.Merge(m, src) +} +func (m *RunAsUserStrategyOptions) XXX_Size() int { + return m.Size() +} +func (m *RunAsUserStrategyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_RunAsUserStrategyOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_RunAsUserStrategyOptions proto.InternalMessageInfo + +func (m *RuntimeClassStrategyOptions) Reset() { *m = RuntimeClassStrategyOptions{} } +func (*RuntimeClassStrategyOptions) ProtoMessage() {} +func (*RuntimeClassStrategyOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{51} +} +func (m *RuntimeClassStrategyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RuntimeClassStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err + return nil, err } - i += n16 - return i, nil + return b[:n], nil +} +func (m *RuntimeClassStrategyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_RuntimeClassStrategyOptions.Merge(m, src) +} +func (m *RuntimeClassStrategyOptions) XXX_Size() int { + return m.Size() +} +func (m *RuntimeClassStrategyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_RuntimeClassStrategyOptions.DiscardUnknown(m) } -func (m *DeploymentList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_RuntimeClassStrategyOptions proto.InternalMessageInfo + +func (m *SELinuxStrategyOptions) Reset() { *m = SELinuxStrategyOptions{} } +func (*SELinuxStrategyOptions) ProtoMessage() {} +func (*SELinuxStrategyOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{52} +} +func (m *SELinuxStrategyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SELinuxStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *SELinuxStrategyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_SELinuxStrategyOptions.Merge(m, src) +} +func (m *SELinuxStrategyOptions) XXX_Size() int { + return m.Size() +} +func (m *SELinuxStrategyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_SELinuxStrategyOptions.DiscardUnknown(m) } -func (m *DeploymentList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n17, err := m.ListMeta.MarshalTo(dAtA[i:]) +var xxx_messageInfo_SELinuxStrategyOptions proto.InternalMessageInfo + +func (m *Scale) Reset() { *m = Scale{} } +func (*Scale) ProtoMessage() {} +func (*Scale) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{53} +} +func (m *Scale) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Scale) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err - } - i += n17 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } + return nil, err } - return i, nil + return b[:n], nil +} +func (m *Scale) XXX_Merge(src proto.Message) { + xxx_messageInfo_Scale.Merge(m, src) +} +func (m *Scale) XXX_Size() int { + return m.Size() +} +func (m *Scale) XXX_DiscardUnknown() { + xxx_messageInfo_Scale.DiscardUnknown(m) } -func (m *DeploymentRollback) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_Scale proto.InternalMessageInfo + +func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } +func (*ScaleSpec) ProtoMessage() {} +func (*ScaleSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{54} +} +func (m *ScaleSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ScaleSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *ScaleSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ScaleSpec.Merge(m, src) +} +func (m *ScaleSpec) XXX_Size() int { + return m.Size() +} +func (m *ScaleSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ScaleSpec.DiscardUnknown(m) } -func (m *DeploymentRollback) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - if len(m.UpdatedAnnotations) > 0 { - keysForUpdatedAnnotations := make([]string, 0, len(m.UpdatedAnnotations)) - for k := range m.UpdatedAnnotations { - keysForUpdatedAnnotations = append(keysForUpdatedAnnotations, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForUpdatedAnnotations) - for _, k := range keysForUpdatedAnnotations { - dAtA[i] = 0x12 - i++ - v := m.UpdatedAnnotations[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } - } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RollbackTo.Size())) - n18, err := m.RollbackTo.MarshalTo(dAtA[i:]) +var xxx_messageInfo_ScaleSpec proto.InternalMessageInfo + +func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } +func (*ScaleStatus) ProtoMessage() {} +func (*ScaleStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{55} +} +func (m *ScaleStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ScaleStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err + return nil, err } - i += n18 - return i, nil + return b[:n], nil +} +func (m *ScaleStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ScaleStatus.Merge(m, src) +} +func (m *ScaleStatus) XXX_Size() int { + return m.Size() +} +func (m *ScaleStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ScaleStatus.DiscardUnknown(m) } -func (m *DeploymentSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_ScaleStatus proto.InternalMessageInfo + +func (m *SupplementalGroupsStrategyOptions) Reset() { *m = SupplementalGroupsStrategyOptions{} } +func (*SupplementalGroupsStrategyOptions) ProtoMessage() {} +func (*SupplementalGroupsStrategyOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{56} +} +func (m *SupplementalGroupsStrategyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SupplementalGroupsStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *SupplementalGroupsStrategyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_SupplementalGroupsStrategyOptions.Merge(m, src) +} +func (m *SupplementalGroupsStrategyOptions) XXX_Size() int { + return m.Size() +} +func (m *SupplementalGroupsStrategyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_SupplementalGroupsStrategyOptions.DiscardUnknown(m) } -func (m *DeploymentSpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Replicas != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) - } - if m.Selector != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n19, err := m.Selector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n19 - } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n20, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n20 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Strategy.Size())) - n21, err := m.Strategy.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n21 - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) - if m.RevisionHistoryLimit != nil { - dAtA[i] = 0x30 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) - } - dAtA[i] = 0x38 - i++ - if m.Paused { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if m.RollbackTo != nil { - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RollbackTo.Size())) - n22, err := m.RollbackTo.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n22 - } - if m.ProgressDeadlineSeconds != nil { - dAtA[i] = 0x48 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.ProgressDeadlineSeconds)) - } - return i, nil +var xxx_messageInfo_SupplementalGroupsStrategyOptions proto.InternalMessageInfo + +func init() { + proto.RegisterType((*AllowedCSIDriver)(nil), "k8s.io.api.extensions.v1beta1.AllowedCSIDriver") + proto.RegisterType((*AllowedFlexVolume)(nil), "k8s.io.api.extensions.v1beta1.AllowedFlexVolume") + proto.RegisterType((*AllowedHostPath)(nil), "k8s.io.api.extensions.v1beta1.AllowedHostPath") + proto.RegisterType((*DaemonSet)(nil), "k8s.io.api.extensions.v1beta1.DaemonSet") + proto.RegisterType((*DaemonSetCondition)(nil), "k8s.io.api.extensions.v1beta1.DaemonSetCondition") + proto.RegisterType((*DaemonSetList)(nil), "k8s.io.api.extensions.v1beta1.DaemonSetList") + proto.RegisterType((*DaemonSetSpec)(nil), "k8s.io.api.extensions.v1beta1.DaemonSetSpec") + proto.RegisterType((*DaemonSetStatus)(nil), "k8s.io.api.extensions.v1beta1.DaemonSetStatus") + proto.RegisterType((*DaemonSetUpdateStrategy)(nil), "k8s.io.api.extensions.v1beta1.DaemonSetUpdateStrategy") + proto.RegisterType((*Deployment)(nil), "k8s.io.api.extensions.v1beta1.Deployment") + proto.RegisterType((*DeploymentCondition)(nil), "k8s.io.api.extensions.v1beta1.DeploymentCondition") + proto.RegisterType((*DeploymentList)(nil), "k8s.io.api.extensions.v1beta1.DeploymentList") + proto.RegisterType((*DeploymentRollback)(nil), "k8s.io.api.extensions.v1beta1.DeploymentRollback") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.extensions.v1beta1.DeploymentRollback.UpdatedAnnotationsEntry") + proto.RegisterType((*DeploymentSpec)(nil), "k8s.io.api.extensions.v1beta1.DeploymentSpec") + proto.RegisterType((*DeploymentStatus)(nil), "k8s.io.api.extensions.v1beta1.DeploymentStatus") + proto.RegisterType((*DeploymentStrategy)(nil), "k8s.io.api.extensions.v1beta1.DeploymentStrategy") + proto.RegisterType((*FSGroupStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.FSGroupStrategyOptions") + proto.RegisterType((*HTTPIngressPath)(nil), "k8s.io.api.extensions.v1beta1.HTTPIngressPath") + proto.RegisterType((*HTTPIngressRuleValue)(nil), "k8s.io.api.extensions.v1beta1.HTTPIngressRuleValue") + proto.RegisterType((*HostPortRange)(nil), "k8s.io.api.extensions.v1beta1.HostPortRange") + proto.RegisterType((*IDRange)(nil), "k8s.io.api.extensions.v1beta1.IDRange") + proto.RegisterType((*IPBlock)(nil), "k8s.io.api.extensions.v1beta1.IPBlock") + proto.RegisterType((*Ingress)(nil), "k8s.io.api.extensions.v1beta1.Ingress") + proto.RegisterType((*IngressBackend)(nil), "k8s.io.api.extensions.v1beta1.IngressBackend") + proto.RegisterType((*IngressList)(nil), "k8s.io.api.extensions.v1beta1.IngressList") + proto.RegisterType((*IngressRule)(nil), "k8s.io.api.extensions.v1beta1.IngressRule") + proto.RegisterType((*IngressRuleValue)(nil), "k8s.io.api.extensions.v1beta1.IngressRuleValue") + proto.RegisterType((*IngressSpec)(nil), "k8s.io.api.extensions.v1beta1.IngressSpec") + proto.RegisterType((*IngressStatus)(nil), "k8s.io.api.extensions.v1beta1.IngressStatus") + proto.RegisterType((*IngressTLS)(nil), "k8s.io.api.extensions.v1beta1.IngressTLS") + proto.RegisterType((*NetworkPolicy)(nil), "k8s.io.api.extensions.v1beta1.NetworkPolicy") + proto.RegisterType((*NetworkPolicyEgressRule)(nil), "k8s.io.api.extensions.v1beta1.NetworkPolicyEgressRule") + proto.RegisterType((*NetworkPolicyIngressRule)(nil), "k8s.io.api.extensions.v1beta1.NetworkPolicyIngressRule") + proto.RegisterType((*NetworkPolicyList)(nil), "k8s.io.api.extensions.v1beta1.NetworkPolicyList") + proto.RegisterType((*NetworkPolicyPeer)(nil), "k8s.io.api.extensions.v1beta1.NetworkPolicyPeer") + proto.RegisterType((*NetworkPolicyPort)(nil), "k8s.io.api.extensions.v1beta1.NetworkPolicyPort") + proto.RegisterType((*NetworkPolicySpec)(nil), "k8s.io.api.extensions.v1beta1.NetworkPolicySpec") + proto.RegisterType((*PodSecurityPolicy)(nil), "k8s.io.api.extensions.v1beta1.PodSecurityPolicy") + proto.RegisterType((*PodSecurityPolicyList)(nil), "k8s.io.api.extensions.v1beta1.PodSecurityPolicyList") + proto.RegisterType((*PodSecurityPolicySpec)(nil), "k8s.io.api.extensions.v1beta1.PodSecurityPolicySpec") + proto.RegisterType((*ReplicaSet)(nil), "k8s.io.api.extensions.v1beta1.ReplicaSet") + proto.RegisterType((*ReplicaSetCondition)(nil), "k8s.io.api.extensions.v1beta1.ReplicaSetCondition") + proto.RegisterType((*ReplicaSetList)(nil), "k8s.io.api.extensions.v1beta1.ReplicaSetList") + proto.RegisterType((*ReplicaSetSpec)(nil), "k8s.io.api.extensions.v1beta1.ReplicaSetSpec") + proto.RegisterType((*ReplicaSetStatus)(nil), "k8s.io.api.extensions.v1beta1.ReplicaSetStatus") + proto.RegisterType((*ReplicationControllerDummy)(nil), "k8s.io.api.extensions.v1beta1.ReplicationControllerDummy") + proto.RegisterType((*RollbackConfig)(nil), "k8s.io.api.extensions.v1beta1.RollbackConfig") + proto.RegisterType((*RollingUpdateDaemonSet)(nil), "k8s.io.api.extensions.v1beta1.RollingUpdateDaemonSet") + proto.RegisterType((*RollingUpdateDeployment)(nil), "k8s.io.api.extensions.v1beta1.RollingUpdateDeployment") + proto.RegisterType((*RunAsGroupStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.RunAsGroupStrategyOptions") + proto.RegisterType((*RunAsUserStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.RunAsUserStrategyOptions") + proto.RegisterType((*RuntimeClassStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.RuntimeClassStrategyOptions") + proto.RegisterType((*SELinuxStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.SELinuxStrategyOptions") + proto.RegisterType((*Scale)(nil), "k8s.io.api.extensions.v1beta1.Scale") + proto.RegisterType((*ScaleSpec)(nil), "k8s.io.api.extensions.v1beta1.ScaleSpec") + proto.RegisterType((*ScaleStatus)(nil), "k8s.io.api.extensions.v1beta1.ScaleStatus") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.extensions.v1beta1.ScaleStatus.SelectorEntry") + proto.RegisterType((*SupplementalGroupsStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.SupplementalGroupsStrategyOptions") } -func (m *DeploymentStatus) Marshal() (dAtA []byte, err error) { +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/extensions/v1beta1/generated.proto", fileDescriptor_cdc93917efc28165) +} + +var fileDescriptor_cdc93917efc28165 = []byte{ + // 3684 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5b, 0x4f, 0x6c, 0x1b, 0x47, + 0x77, 0xf7, 0x92, 0x94, 0x48, 0x3d, 0xfd, 0x1f, 0xc9, 0x12, 0x3f, 0x3b, 0x16, 0xfd, 0x6d, 0x00, + 0xd7, 0x49, 0x6d, 0x32, 0x76, 0x6c, 0x7f, 0xae, 0x8d, 0x7e, 0x89, 0x28, 0x59, 0xb6, 0x52, 0xfd, + 0x61, 0x86, 0x92, 0x1b, 0x04, 0x4d, 0x9a, 0x15, 0x39, 0xa2, 0xd6, 0x5a, 0xee, 0x6e, 0x76, 0x87, + 0x8a, 0x08, 0xf4, 0xd0, 0x43, 0x51, 0xa0, 0x40, 0x8b, 0xf6, 0x92, 0xb6, 0xc7, 0x06, 0x05, 0x7a, + 0x6a, 0xd1, 0xde, 0xda, 0x43, 0x10, 0xa0, 0x40, 0x0a, 0x18, 0x45, 0x5a, 0xe4, 0xd6, 0x9c, 0x84, + 0x46, 0x39, 0x15, 0x3d, 0xf5, 0x56, 0xf8, 0x50, 0x14, 0x33, 0x3b, 0xfb, 0x7f, 0x57, 0x5c, 0x29, + 0xb6, 0xd0, 0x00, 0xbd, 0x89, 0xf3, 0xde, 0xfb, 0xbd, 0x37, 0x33, 0x6f, 0xde, 0x7b, 0x33, 0xfb, + 0x04, 0x2b, 0xfb, 0xf7, 0xed, 0xaa, 0x6a, 0xd4, 0xf6, 0x7b, 0x3b, 0xc4, 0xd2, 0x09, 0x25, 0x76, + 0xed, 0x80, 0xe8, 0x6d, 0xc3, 0xaa, 0x09, 0x82, 0x62, 0xaa, 0x35, 0x72, 0x48, 0x89, 0x6e, 0xab, + 0x86, 0x6e, 0xd7, 0x0e, 0x6e, 0xed, 0x10, 0xaa, 0xdc, 0xaa, 0x75, 0x88, 0x4e, 0x2c, 0x85, 0x92, + 0x76, 0xd5, 0xb4, 0x0c, 0x6a, 0xa0, 0x2b, 0x0e, 0x7b, 0x55, 0x31, 0xd5, 0xaa, 0xcf, 0x5e, 0x15, + 0xec, 0x97, 0x6e, 0x76, 0x54, 0xba, 0xd7, 0xdb, 0xa9, 0xb6, 0x8c, 0x6e, 0xad, 0x63, 0x74, 0x8c, + 0x1a, 0x97, 0xda, 0xe9, 0xed, 0xf2, 0x5f, 0xfc, 0x07, 0xff, 0xcb, 0x41, 0xbb, 0x24, 0x07, 0x94, + 0xb7, 0x0c, 0x8b, 0xd4, 0x0e, 0x62, 0x1a, 0x2f, 0xdd, 0xf1, 0x79, 0xba, 0x4a, 0x6b, 0x4f, 0xd5, + 0x89, 0xd5, 0xaf, 0x99, 0xfb, 0x1d, 0x36, 0x60, 0xd7, 0xba, 0x84, 0x2a, 0x49, 0x52, 0xb5, 0x34, + 0x29, 0xab, 0xa7, 0x53, 0xb5, 0x4b, 0x62, 0x02, 0xf7, 0x06, 0x09, 0xd8, 0xad, 0x3d, 0xd2, 0x55, + 0x62, 0x72, 0x6f, 0xa7, 0xc9, 0xf5, 0xa8, 0xaa, 0xd5, 0x54, 0x9d, 0xda, 0xd4, 0x8a, 0x0a, 0xc9, + 0x77, 0x60, 0x6a, 0x51, 0xd3, 0x8c, 0xcf, 0x48, 0x7b, 0xa9, 0xb9, 0xba, 0x6c, 0xa9, 0x07, 0xc4, + 0x42, 0x57, 0xa1, 0xa0, 0x2b, 0x5d, 0x52, 0x96, 0xae, 0x4a, 0xd7, 0x47, 0xea, 0x63, 0xcf, 0x8f, + 0x2a, 0x17, 0x8e, 0x8f, 0x2a, 0x85, 0x0d, 0xa5, 0x4b, 0x30, 0xa7, 0xc8, 0x0f, 0x61, 0x5a, 0x48, + 0xad, 0x68, 0xe4, 0xf0, 0xa9, 0xa1, 0xf5, 0xba, 0x04, 0x5d, 0x83, 0xe1, 0x36, 0x07, 0x10, 0x82, + 0x13, 0x42, 0x70, 0xd8, 0x81, 0xc5, 0x82, 0x2a, 0xdb, 0x30, 0x29, 0x84, 0x9f, 0x18, 0x36, 0x6d, + 0x28, 0x74, 0x0f, 0xdd, 0x06, 0x30, 0x15, 0xba, 0xd7, 0xb0, 0xc8, 0xae, 0x7a, 0x28, 0xc4, 0x91, + 0x10, 0x87, 0x86, 0x47, 0xc1, 0x01, 0x2e, 0x74, 0x03, 0x4a, 0x16, 0x51, 0xda, 0x9b, 0xba, 0xd6, + 0x2f, 0xe7, 0xae, 0x4a, 0xd7, 0x4b, 0xf5, 0x29, 0x21, 0x51, 0xc2, 0x62, 0x1c, 0x7b, 0x1c, 0xf2, + 0xe7, 0x39, 0x18, 0x59, 0x56, 0x48, 0xd7, 0xd0, 0x9b, 0x84, 0xa2, 0x4f, 0xa0, 0xc4, 0xb6, 0xab, + 0xad, 0x50, 0x85, 0x6b, 0x1b, 0xbd, 0xfd, 0x56, 0xd5, 0x77, 0x27, 0x6f, 0xf5, 0xaa, 0xe6, 0x7e, + 0x87, 0x0d, 0xd8, 0x55, 0xc6, 0x5d, 0x3d, 0xb8, 0x55, 0xdd, 0xdc, 0x79, 0x46, 0x5a, 0x74, 0x9d, + 0x50, 0xc5, 0xb7, 0xcf, 0x1f, 0xc3, 0x1e, 0x2a, 0xda, 0x80, 0x82, 0x6d, 0x92, 0x16, 0xb7, 0x6c, + 0xf4, 0xf6, 0x8d, 0xea, 0x89, 0xce, 0x5a, 0xf5, 0x2c, 0x6b, 0x9a, 0xa4, 0xe5, 0xaf, 0x38, 0xfb, + 0x85, 0x39, 0x0e, 0x7a, 0x0a, 0xc3, 0x36, 0x55, 0x68, 0xcf, 0x2e, 0xe7, 0x39, 0x62, 0x35, 0x33, + 0x22, 0x97, 0xf2, 0x37, 0xc3, 0xf9, 0x8d, 0x05, 0x9a, 0xfc, 0x1f, 0x39, 0x40, 0x1e, 0xef, 0x92, + 0xa1, 0xb7, 0x55, 0xaa, 0x1a, 0x3a, 0x7a, 0x00, 0x05, 0xda, 0x37, 0x5d, 0x17, 0xb8, 0xe6, 0x1a, + 0xb4, 0xd5, 0x37, 0xc9, 0x8b, 0xa3, 0xca, 0x5c, 0x5c, 0x82, 0x51, 0x30, 0x97, 0x41, 0x6b, 0x9e, + 0xa9, 0x39, 0x2e, 0x7d, 0x27, 0xac, 0xfa, 0xc5, 0x51, 0x25, 0xe1, 0xb0, 0x55, 0x3d, 0xa4, 0xb0, + 0x81, 0xe8, 0x00, 0x90, 0xa6, 0xd8, 0x74, 0xcb, 0x52, 0x74, 0xdb, 0xd1, 0xa4, 0x76, 0x89, 0x58, + 0x84, 0x37, 0xb3, 0x6d, 0x1a, 0x93, 0xa8, 0x5f, 0x12, 0x56, 0xa0, 0xb5, 0x18, 0x1a, 0x4e, 0xd0, + 0xc0, 0xbc, 0xd9, 0x22, 0x8a, 0x6d, 0xe8, 0xe5, 0x42, 0xd8, 0x9b, 0x31, 0x1f, 0xc5, 0x82, 0x8a, + 0xde, 0x80, 0x62, 0x97, 0xd8, 0xb6, 0xd2, 0x21, 0xe5, 0x21, 0xce, 0x38, 0x29, 0x18, 0x8b, 0xeb, + 0xce, 0x30, 0x76, 0xe9, 0xf2, 0x97, 0x12, 0x8c, 0x7b, 0x2b, 0xb7, 0xa6, 0xda, 0x14, 0xfd, 0x56, + 0xcc, 0x0f, 0xab, 0xd9, 0xa6, 0xc4, 0xa4, 0xb9, 0x17, 0x7a, 0x3e, 0xef, 0x8e, 0x04, 0x7c, 0x70, + 0x1d, 0x86, 0x54, 0x4a, 0xba, 0x6c, 0x1f, 0xf2, 0xd7, 0x47, 0x6f, 0x5f, 0xcf, 0xea, 0x32, 0xf5, + 0x71, 0x01, 0x3a, 0xb4, 0xca, 0xc4, 0xb1, 0x83, 0x22, 0xff, 0x69, 0x21, 0x60, 0x3e, 0x73, 0x4d, + 0xf4, 0x11, 0x94, 0x6c, 0xa2, 0x91, 0x16, 0x35, 0x2c, 0x61, 0xfe, 0xdb, 0x19, 0xcd, 0x57, 0x76, + 0x88, 0xd6, 0x14, 0xa2, 0xf5, 0x31, 0x66, 0xbf, 0xfb, 0x0b, 0x7b, 0x90, 0xe8, 0x7d, 0x28, 0x51, + 0xd2, 0x35, 0x35, 0x85, 0x12, 0x71, 0x8e, 0x5e, 0x0f, 0x4e, 0x81, 0x79, 0x0e, 0x03, 0x6b, 0x18, + 0xed, 0x2d, 0xc1, 0xc6, 0x8f, 0x8f, 0xb7, 0x24, 0xee, 0x28, 0xf6, 0x60, 0xd0, 0x01, 0x4c, 0xf4, + 0xcc, 0x36, 0xe3, 0xa4, 0x2c, 0x0a, 0x76, 0xfa, 0xc2, 0x93, 0xee, 0x65, 0x5d, 0x9b, 0xed, 0x90, + 0x74, 0x7d, 0x4e, 0xe8, 0x9a, 0x08, 0x8f, 0xe3, 0x88, 0x16, 0xb4, 0x08, 0x93, 0x5d, 0x55, 0x67, + 0x71, 0xa9, 0xdf, 0x24, 0x2d, 0x43, 0x6f, 0xdb, 0xdc, 0xad, 0x86, 0xea, 0xf3, 0x02, 0x60, 0x72, + 0x3d, 0x4c, 0xc6, 0x51, 0x7e, 0xf4, 0x1e, 0x20, 0x77, 0x1a, 0x8f, 0x9d, 0x20, 0xae, 0x1a, 0x3a, + 0xf7, 0xb9, 0xbc, 0xef, 0xdc, 0x5b, 0x31, 0x0e, 0x9c, 0x20, 0x85, 0xd6, 0x60, 0xd6, 0x22, 0x07, + 0x2a, 0x9b, 0xe3, 0x13, 0xd5, 0xa6, 0x86, 0xd5, 0x5f, 0x53, 0xbb, 0x2a, 0x2d, 0x0f, 0x73, 0x9b, + 0xca, 0xc7, 0x47, 0x95, 0x59, 0x9c, 0x40, 0xc7, 0x89, 0x52, 0xf2, 0x9f, 0x0d, 0xc3, 0x64, 0x24, + 0xde, 0xa0, 0xa7, 0x30, 0xd7, 0xea, 0x59, 0x16, 0xd1, 0xe9, 0x46, 0xaf, 0xbb, 0x43, 0xac, 0x66, + 0x6b, 0x8f, 0xb4, 0x7b, 0x1a, 0x69, 0x73, 0x47, 0x19, 0xaa, 0x2f, 0x08, 0x8b, 0xe7, 0x96, 0x12, + 0xb9, 0x70, 0x8a, 0x34, 0x5b, 0x05, 0x9d, 0x0f, 0xad, 0xab, 0xb6, 0xed, 0x61, 0xe6, 0x38, 0xa6, + 0xb7, 0x0a, 0x1b, 0x31, 0x0e, 0x9c, 0x20, 0xc5, 0x6c, 0x6c, 0x13, 0x5b, 0xb5, 0x48, 0x3b, 0x6a, + 0x63, 0x3e, 0x6c, 0xe3, 0x72, 0x22, 0x17, 0x4e, 0x91, 0x46, 0x77, 0x61, 0xd4, 0xd1, 0xc6, 0xf7, + 0x4f, 0x6c, 0xf4, 0x8c, 0x00, 0x1b, 0xdd, 0xf0, 0x49, 0x38, 0xc8, 0xc7, 0xa6, 0x66, 0xec, 0xd8, + 0xc4, 0x3a, 0x20, 0xed, 0xf4, 0x0d, 0xde, 0x8c, 0x71, 0xe0, 0x04, 0x29, 0x36, 0x35, 0xc7, 0x03, + 0x63, 0x53, 0x1b, 0x0e, 0x4f, 0x6d, 0x3b, 0x91, 0x0b, 0xa7, 0x48, 0x33, 0x3f, 0x76, 0x4c, 0x5e, + 0x3c, 0x50, 0x54, 0x4d, 0xd9, 0xd1, 0x48, 0xb9, 0x18, 0xf6, 0xe3, 0x8d, 0x30, 0x19, 0x47, 0xf9, + 0xd1, 0x63, 0x98, 0x76, 0x86, 0xb6, 0x75, 0xc5, 0x03, 0x29, 0x71, 0x90, 0x9f, 0x09, 0x90, 0xe9, + 0x8d, 0x28, 0x03, 0x8e, 0xcb, 0xa0, 0x07, 0x30, 0xd1, 0x32, 0x34, 0x8d, 0xfb, 0xe3, 0x92, 0xd1, + 0xd3, 0x69, 0x79, 0x84, 0xa3, 0x20, 0x76, 0x1e, 0x97, 0x42, 0x14, 0x1c, 0xe1, 0x44, 0x04, 0xa0, + 0xe5, 0x26, 0x1c, 0xbb, 0x0c, 0x3c, 0x3e, 0xde, 0xca, 0x1a, 0x03, 0xbc, 0x54, 0xe5, 0xd7, 0x00, + 0xde, 0x90, 0x8d, 0x03, 0xc0, 0xf2, 0x3f, 0x4b, 0x30, 0x9f, 0x12, 0x3a, 0xd0, 0x3b, 0xa1, 0x14, + 0xfb, 0xab, 0x91, 0x14, 0x7b, 0x39, 0x45, 0x2c, 0x90, 0x67, 0x75, 0x18, 0xb7, 0xd8, 0xac, 0xf4, + 0x8e, 0xc3, 0x22, 0x62, 0xe4, 0xdd, 0x01, 0xd3, 0xc0, 0x41, 0x19, 0x3f, 0xe6, 0x4f, 0x1f, 0x1f, + 0x55, 0xc6, 0x43, 0x34, 0x1c, 0x86, 0x97, 0xff, 0x3c, 0x07, 0xb0, 0x4c, 0x4c, 0xcd, 0xe8, 0x77, + 0x89, 0x7e, 0x1e, 0x35, 0xd4, 0x66, 0xa8, 0x86, 0xba, 0x39, 0x68, 0x7b, 0x3c, 0xd3, 0x52, 0x8b, + 0xa8, 0xdf, 0x8c, 0x14, 0x51, 0xb5, 0xec, 0x90, 0x27, 0x57, 0x51, 0xff, 0x96, 0x87, 0x19, 0x9f, + 0xd9, 0x2f, 0xa3, 0x1e, 0x86, 0xf6, 0xf8, 0x57, 0x22, 0x7b, 0x3c, 0x9f, 0x20, 0xf2, 0xca, 0xea, + 0xa8, 0x67, 0x30, 0xc1, 0xaa, 0x1c, 0x67, 0x2f, 0x79, 0x0d, 0x35, 0x7c, 0xea, 0x1a, 0xca, 0xcb, + 0x76, 0x6b, 0x21, 0x24, 0x1c, 0x41, 0x4e, 0xa9, 0xd9, 0x8a, 0x3f, 0xc5, 0x9a, 0xed, 0x2b, 0x09, + 0x26, 0xfc, 0x6d, 0x3a, 0x87, 0xa2, 0x6d, 0x23, 0x5c, 0xb4, 0xbd, 0x91, 0xd9, 0x45, 0x53, 0xaa, + 0xb6, 0xff, 0x66, 0x05, 0xbe, 0xc7, 0xc4, 0x0e, 0xf8, 0x8e, 0xd2, 0xda, 0x1f, 0x7c, 0xc7, 0x43, + 0x9f, 0x4b, 0x80, 0x44, 0x16, 0x58, 0xd4, 0x75, 0x83, 0x2a, 0x4e, 0xac, 0x74, 0xcc, 0x5a, 0xcd, + 0x6c, 0x96, 0xab, 0xb1, 0xba, 0x1d, 0xc3, 0x7a, 0xa4, 0x53, 0xab, 0xef, 0x6f, 0x72, 0x9c, 0x01, + 0x27, 0x18, 0x80, 0x14, 0x00, 0x4b, 0x60, 0x6e, 0x19, 0xe2, 0x20, 0xdf, 0xcc, 0x10, 0xf3, 0x98, + 0xc0, 0x92, 0xa1, 0xef, 0xaa, 0x1d, 0x3f, 0xec, 0x60, 0x0f, 0x08, 0x07, 0x40, 0x2f, 0x3d, 0x82, + 0xf9, 0x14, 0x6b, 0xd1, 0x14, 0xe4, 0xf7, 0x49, 0xdf, 0x59, 0x36, 0xcc, 0xfe, 0x44, 0xb3, 0x30, + 0x74, 0xa0, 0x68, 0x3d, 0x27, 0xfc, 0x8e, 0x60, 0xe7, 0xc7, 0x83, 0xdc, 0x7d, 0x49, 0xfe, 0x72, + 0x28, 0xe8, 0x3b, 0xbc, 0x62, 0xbe, 0xce, 0x2e, 0xad, 0xa6, 0xa6, 0xb6, 0x14, 0x5b, 0x14, 0x42, + 0x63, 0xce, 0x85, 0xd5, 0x19, 0xc3, 0x1e, 0x35, 0x54, 0x5b, 0xe7, 0x5e, 0x6d, 0x6d, 0x9d, 0x7f, + 0x39, 0xb5, 0xf5, 0x6f, 0x43, 0xc9, 0x76, 0xab, 0xea, 0x02, 0x87, 0xbc, 0x75, 0x8a, 0xf8, 0x2a, + 0x0a, 0x6a, 0x4f, 0x81, 0x57, 0x4a, 0x7b, 0xa0, 0x49, 0x45, 0xf4, 0xd0, 0x29, 0x8b, 0xe8, 0x97, + 0x5a, 0xf8, 0xb2, 0x78, 0x63, 0x2a, 0x3d, 0x9b, 0xb4, 0x79, 0x6c, 0x2b, 0xf9, 0xf1, 0xa6, 0xc1, + 0x47, 0xb1, 0xa0, 0xa2, 0x8f, 0x42, 0x2e, 0x5b, 0x3a, 0x8b, 0xcb, 0x4e, 0xa4, 0xbb, 0x2b, 0xda, + 0x86, 0x79, 0xd3, 0x32, 0x3a, 0x16, 0xb1, 0xed, 0x65, 0xa2, 0xb4, 0x35, 0x55, 0x27, 0xee, 0xfa, + 0x38, 0x15, 0xd1, 0xe5, 0xe3, 0xa3, 0xca, 0x7c, 0x23, 0x99, 0x05, 0xa7, 0xc9, 0xca, 0xcf, 0x0b, + 0x30, 0x15, 0xcd, 0x80, 0x29, 0x45, 0xaa, 0x74, 0xa6, 0x22, 0xf5, 0x46, 0xe0, 0x30, 0x38, 0x15, + 0x7c, 0xe0, 0x05, 0x27, 0x76, 0x20, 0x16, 0x61, 0x52, 0x44, 0x03, 0x97, 0x28, 0xca, 0x74, 0x6f, + 0xf7, 0xb7, 0xc3, 0x64, 0x1c, 0xe5, 0x47, 0x0f, 0x61, 0xdc, 0xe2, 0x75, 0xb7, 0x0b, 0xe0, 0xd4, + 0xae, 0x17, 0x05, 0xc0, 0x38, 0x0e, 0x12, 0x71, 0x98, 0x97, 0xd5, 0xad, 0x7e, 0x39, 0xea, 0x02, + 0x14, 0xc2, 0x75, 0xeb, 0x62, 0x94, 0x01, 0xc7, 0x65, 0xd0, 0x3a, 0xcc, 0xf4, 0xf4, 0x38, 0x94, + 0xe3, 0xca, 0x97, 0x05, 0xd4, 0xcc, 0x76, 0x9c, 0x05, 0x27, 0xc9, 0xa1, 0xdd, 0x50, 0x29, 0x3b, + 0xcc, 0xc3, 0xf3, 0xed, 0xcc, 0x07, 0x2f, 0x73, 0x2d, 0x9b, 0x50, 0x6e, 0x97, 0xb2, 0x96, 0xdb, + 0xf2, 0x3f, 0x4a, 0xc1, 0x24, 0xe4, 0x95, 0xc0, 0x83, 0x5e, 0x99, 0x62, 0x12, 0x81, 0xea, 0xc8, + 0x48, 0xae, 0x7e, 0xef, 0x9d, 0xaa, 0xfa, 0xf5, 0x93, 0xe7, 0xe0, 0xf2, 0xf7, 0x0b, 0x09, 0xe6, + 0x56, 0x9a, 0x8f, 0x2d, 0xa3, 0x67, 0xba, 0xe6, 0x6c, 0x9a, 0xce, 0xd2, 0xfc, 0x02, 0x0a, 0x56, + 0x4f, 0x73, 0xe7, 0xf1, 0xba, 0x3b, 0x0f, 0xdc, 0xd3, 0xd8, 0x3c, 0x66, 0x22, 0x52, 0xce, 0x24, + 0x98, 0x00, 0xda, 0x80, 0x61, 0x4b, 0xd1, 0x3b, 0xc4, 0x4d, 0xab, 0xd7, 0x06, 0x58, 0xbf, 0xba, + 0x8c, 0x19, 0x7b, 0xa0, 0xb0, 0xe1, 0xd2, 0x58, 0xa0, 0xc8, 0x7f, 0x24, 0xc1, 0xe4, 0x93, 0xad, + 0xad, 0xc6, 0xaa, 0xce, 0x4f, 0x34, 0x7f, 0x5b, 0xbd, 0x0a, 0x05, 0x53, 0xa1, 0x7b, 0xd1, 0x4c, + 0xcf, 0x68, 0x98, 0x53, 0xd0, 0x07, 0x50, 0x64, 0x91, 0x84, 0xe8, 0xed, 0x8c, 0xa5, 0xb6, 0x80, + 0xaf, 0x3b, 0x42, 0x7e, 0xf5, 0x24, 0x06, 0xb0, 0x0b, 0x27, 0xef, 0xc3, 0x6c, 0xc0, 0x1c, 0xb6, + 0x1e, 0x4f, 0x59, 0x76, 0x44, 0x4d, 0x18, 0x62, 0x9a, 0x59, 0x0e, 0xcc, 0x67, 0x78, 0xcc, 0x8c, + 0x4c, 0xc9, 0xaf, 0x74, 0xd8, 0x2f, 0x1b, 0x3b, 0x58, 0xf2, 0x3a, 0x8c, 0xf3, 0x07, 0x65, 0xc3, + 0xa2, 0x7c, 0x59, 0xd0, 0x15, 0xc8, 0x77, 0x55, 0x5d, 0xe4, 0xd9, 0x51, 0x21, 0x93, 0x67, 0x39, + 0x82, 0x8d, 0x73, 0xb2, 0x72, 0x28, 0x22, 0x8f, 0x4f, 0x56, 0x0e, 0x31, 0x1b, 0x97, 0x1f, 0x43, + 0x51, 0x2c, 0x77, 0x10, 0x28, 0x7f, 0x32, 0x50, 0x3e, 0x01, 0x68, 0x13, 0x8a, 0xab, 0x8d, 0xba, + 0x66, 0x38, 0x55, 0x57, 0x4b, 0x6d, 0x5b, 0xd1, 0xbd, 0x58, 0x5a, 0x5d, 0xc6, 0x98, 0x53, 0x90, + 0x0c, 0xc3, 0xe4, 0xb0, 0x45, 0x4c, 0xca, 0x3d, 0x62, 0xa4, 0x0e, 0x6c, 0x97, 0x1f, 0xf1, 0x11, + 0x2c, 0x28, 0xf2, 0x1f, 0xe7, 0xa0, 0x28, 0x96, 0xe3, 0x1c, 0x6e, 0x61, 0x6b, 0xa1, 0x5b, 0xd8, + 0x9b, 0xd9, 0x5c, 0x23, 0xf5, 0x0a, 0xb6, 0x15, 0xb9, 0x82, 0xdd, 0xc8, 0x88, 0x77, 0xf2, 0xfd, + 0xeb, 0xef, 0x24, 0x98, 0x08, 0x3b, 0x25, 0xba, 0x0b, 0xa3, 0x2c, 0xe1, 0xa8, 0x2d, 0xb2, 0xe1, + 0xd7, 0xb9, 0xde, 0x23, 0x4c, 0xd3, 0x27, 0xe1, 0x20, 0x1f, 0xea, 0x78, 0x62, 0xcc, 0x8f, 0xc4, + 0xa4, 0xd3, 0x97, 0xb4, 0x47, 0x55, 0xad, 0xea, 0x7c, 0x5a, 0xa9, 0xae, 0xea, 0x74, 0xd3, 0x6a, + 0x52, 0x4b, 0xd5, 0x3b, 0x31, 0x45, 0xdc, 0x29, 0x83, 0xc8, 0xf2, 0x3f, 0x48, 0x30, 0x2a, 0x4c, + 0x3e, 0x87, 0x5b, 0xc5, 0x6f, 0x84, 0x6f, 0x15, 0xd7, 0x32, 0x1e, 0xf0, 0xe4, 0x2b, 0xc5, 0x5f, + 0xf9, 0xa6, 0xb3, 0x23, 0xcd, 0xbc, 0x7a, 0xcf, 0xb0, 0x69, 0xd4, 0xab, 0xd9, 0x61, 0xc4, 0x9c, + 0x82, 0x7a, 0x30, 0xa5, 0x46, 0x62, 0x80, 0x58, 0xda, 0x5a, 0x36, 0x4b, 0x3c, 0xb1, 0x7a, 0x59, + 0xc0, 0x4f, 0x45, 0x29, 0x38, 0xa6, 0x42, 0x26, 0x10, 0xe3, 0x42, 0xef, 0x43, 0x61, 0x8f, 0x52, + 0x33, 0xe1, 0xbd, 0x7a, 0x40, 0xe4, 0xf1, 0x4d, 0x28, 0xf1, 0xd9, 0x6d, 0x6d, 0x35, 0x30, 0x87, + 0x92, 0xff, 0xc7, 0x5f, 0x8f, 0xa6, 0xe3, 0xe3, 0x5e, 0x3c, 0x95, 0xce, 0x12, 0x4f, 0x47, 0x93, + 0x62, 0x29, 0x7a, 0x02, 0x79, 0xaa, 0x65, 0xbd, 0x16, 0x0a, 0xc4, 0xad, 0xb5, 0xa6, 0x1f, 0x90, + 0xb6, 0xd6, 0x9a, 0x98, 0x41, 0xa0, 0x4d, 0x18, 0x62, 0xd9, 0x87, 0x1d, 0xc1, 0x7c, 0xf6, 0x23, + 0xcd, 0xe6, 0xef, 0x3b, 0x04, 0xfb, 0x65, 0x63, 0x07, 0x47, 0xfe, 0x14, 0xc6, 0x43, 0xe7, 0x14, + 0x7d, 0x02, 0x63, 0x9a, 0xa1, 0xb4, 0xeb, 0x8a, 0xa6, 0xe8, 0x2d, 0xe2, 0x7e, 0x1c, 0xb8, 0x96, + 0x74, 0xc3, 0x58, 0x0b, 0xf0, 0x89, 0x53, 0x3e, 0x2b, 0x94, 0x8c, 0x05, 0x69, 0x38, 0x84, 0x28, + 0x2b, 0x00, 0xfe, 0x1c, 0x51, 0x05, 0x86, 0x98, 0x9f, 0x39, 0xf9, 0x64, 0xa4, 0x3e, 0xc2, 0x2c, + 0x64, 0xee, 0x67, 0x63, 0x67, 0x1c, 0xdd, 0x06, 0xb0, 0x49, 0xcb, 0x22, 0x94, 0x07, 0x83, 0x5c, + 0xf8, 0x03, 0x63, 0xd3, 0xa3, 0xe0, 0x00, 0x97, 0xfc, 0x4f, 0x12, 0x8c, 0x6f, 0x10, 0xfa, 0x99, + 0x61, 0xed, 0x37, 0x0c, 0x4d, 0x6d, 0xf5, 0xcf, 0x21, 0xd8, 0xe2, 0x50, 0xb0, 0x7d, 0x6b, 0xc0, + 0xce, 0x84, 0xac, 0x4b, 0x0b, 0xb9, 0xf2, 0x57, 0x12, 0xcc, 0x87, 0x38, 0x1f, 0xf9, 0x47, 0x77, + 0x1b, 0x86, 0x4c, 0xc3, 0xa2, 0x6e, 0x22, 0x3e, 0x95, 0x42, 0x16, 0xc6, 0x02, 0xa9, 0x98, 0xc1, + 0x60, 0x07, 0x0d, 0xad, 0x41, 0x8e, 0x1a, 0xc2, 0x55, 0x4f, 0x87, 0x49, 0x88, 0x55, 0x07, 0x81, + 0x99, 0xdb, 0x32, 0x70, 0x8e, 0x1a, 0x6c, 0x23, 0xca, 0x21, 0xae, 0x60, 0xf0, 0x79, 0x45, 0x33, + 0xc0, 0x50, 0xd8, 0xb5, 0x8c, 0xee, 0x99, 0xe7, 0xe0, 0x6d, 0xc4, 0x8a, 0x65, 0x74, 0x31, 0xc7, + 0x92, 0xbf, 0x96, 0x60, 0x3a, 0xc4, 0x79, 0x0e, 0x81, 0xff, 0xfd, 0x70, 0xe0, 0xbf, 0x71, 0x9a, + 0x89, 0xa4, 0x84, 0xff, 0xaf, 0x73, 0x91, 0x69, 0xb0, 0x09, 0xa3, 0x5d, 0x18, 0x35, 0x8d, 0x76, + 0xf3, 0x25, 0x7c, 0x0e, 0x9c, 0x64, 0x79, 0xb3, 0xe1, 0x63, 0xe1, 0x20, 0x30, 0x3a, 0x84, 0x69, + 0x5d, 0xe9, 0x12, 0xdb, 0x54, 0x5a, 0xa4, 0xf9, 0x12, 0x1e, 0x48, 0x2e, 0xf2, 0xef, 0x0d, 0x51, + 0x44, 0x1c, 0x57, 0x82, 0xd6, 0xa1, 0xa8, 0x9a, 0xbc, 0x8e, 0x13, 0xb5, 0xcb, 0xc0, 0x2c, 0xea, + 0x54, 0x7d, 0x4e, 0x3c, 0x17, 0x3f, 0xb0, 0x8b, 0x21, 0xff, 0x75, 0xd4, 0x1b, 0x98, 0xff, 0xa1, + 0xc7, 0x50, 0xe2, 0x8d, 0x19, 0x2d, 0x43, 0x73, 0xbf, 0x0c, 0xb0, 0x9d, 0x6d, 0x88, 0xb1, 0x17, + 0x47, 0x95, 0xcb, 0x09, 0x8f, 0xbe, 0x2e, 0x19, 0x7b, 0xc2, 0x68, 0x03, 0x0a, 0xe6, 0x8f, 0xa9, + 0x60, 0x78, 0x92, 0xe3, 0x65, 0x0b, 0xc7, 0x91, 0x7f, 0x2f, 0x1f, 0x31, 0x97, 0xa7, 0xba, 0x67, + 0x2f, 0x6d, 0xd7, 0xbd, 0x8a, 0x29, 0x75, 0xe7, 0x77, 0xa0, 0x28, 0x32, 0xbc, 0x70, 0xe6, 0x5f, + 0x9c, 0xc6, 0x99, 0x83, 0x59, 0xcc, 0xbb, 0xb0, 0xb8, 0x83, 0x2e, 0x30, 0xfa, 0x18, 0x86, 0x89, + 0xa3, 0xc2, 0xc9, 0x8d, 0xf7, 0x4e, 0xa3, 0xc2, 0x8f, 0xab, 0x7e, 0xa1, 0x2a, 0xc6, 0x04, 0x2a, + 0x7a, 0x87, 0xad, 0x17, 0xe3, 0x65, 0x97, 0x40, 0xbb, 0x5c, 0xe0, 0xe9, 0xea, 0x8a, 0x33, 0x6d, + 0x6f, 0xf8, 0xc5, 0x51, 0x05, 0xfc, 0x9f, 0x38, 0x28, 0x21, 0xff, 0x8b, 0x04, 0xd3, 0x7c, 0x85, + 0x5a, 0x3d, 0x4b, 0xa5, 0xfd, 0x73, 0x4b, 0x4c, 0x4f, 0x43, 0x89, 0xe9, 0xce, 0x80, 0x65, 0x89, + 0x59, 0x98, 0x9a, 0x9c, 0xbe, 0x91, 0xe0, 0x62, 0x8c, 0xfb, 0x1c, 0xe2, 0xe2, 0x76, 0x38, 0x2e, + 0xbe, 0x75, 0xda, 0x09, 0xa5, 0xc4, 0xc6, 0xff, 0x9a, 0x4e, 0x98, 0x0e, 0x3f, 0x29, 0xb7, 0x01, + 0x4c, 0x4b, 0x3d, 0x50, 0x35, 0xd2, 0x11, 0x1f, 0xc1, 0x4b, 0x81, 0x16, 0x27, 0x8f, 0x82, 0x03, + 0x5c, 0xc8, 0x86, 0xb9, 0x36, 0xd9, 0x55, 0x7a, 0x1a, 0x5d, 0x6c, 0xb7, 0x97, 0x14, 0x53, 0xd9, + 0x51, 0x35, 0x95, 0xaa, 0xe2, 0xb9, 0x60, 0xa4, 0xfe, 0xd0, 0xf9, 0x38, 0x9d, 0xc4, 0xf1, 0xe2, + 0xa8, 0x72, 0x25, 0xe9, 0xeb, 0x90, 0xcb, 0xd2, 0xc7, 0x29, 0xd0, 0xa8, 0x0f, 0x65, 0x8b, 0x7c, + 0xda, 0x53, 0x2d, 0xd2, 0x5e, 0xb6, 0x0c, 0x33, 0xa4, 0x36, 0xcf, 0xd5, 0xfe, 0xfa, 0xf1, 0x51, + 0xa5, 0x8c, 0x53, 0x78, 0x06, 0x2b, 0x4e, 0x85, 0x47, 0xcf, 0x60, 0x46, 0x11, 0xcd, 0x68, 0x41, + 0xad, 0xce, 0x29, 0xb9, 0x7f, 0x7c, 0x54, 0x99, 0x59, 0x8c, 0x93, 0x07, 0x2b, 0x4c, 0x02, 0x45, + 0x35, 0x28, 0x1e, 0xf0, 0xbe, 0x35, 0xbb, 0x3c, 0xc4, 0xf1, 0x59, 0x22, 0x28, 0x3a, 0xad, 0x6c, + 0x0c, 0x73, 0x78, 0xa5, 0xc9, 0x4f, 0x9f, 0xcb, 0xc5, 0x2e, 0x94, 0xac, 0x96, 0x14, 0x27, 0x9e, + 0xbf, 0x18, 0x97, 0xfc, 0xa8, 0xf5, 0xc4, 0x27, 0xe1, 0x20, 0x1f, 0xfa, 0x08, 0x46, 0xf6, 0xc4, + 0xab, 0x84, 0x5d, 0x2e, 0x66, 0x4a, 0xc2, 0xa1, 0x57, 0x8c, 0xfa, 0xb4, 0x50, 0x31, 0xe2, 0x0e, + 0xdb, 0xd8, 0x47, 0x44, 0x6f, 0x40, 0x91, 0xff, 0x58, 0x5d, 0xe6, 0xcf, 0x71, 0x25, 0x3f, 0xb6, + 0x3d, 0x71, 0x86, 0xb1, 0x4b, 0x77, 0x59, 0x57, 0x1b, 0x4b, 0xfc, 0x59, 0x38, 0xc2, 0xba, 0xda, + 0x58, 0xc2, 0x2e, 0x1d, 0x7d, 0x02, 0x45, 0x9b, 0xac, 0xa9, 0x7a, 0xef, 0xb0, 0x0c, 0x99, 0x3e, + 0x2a, 0x37, 0x1f, 0x71, 0xee, 0xc8, 0xc3, 0x98, 0xaf, 0x41, 0xd0, 0xb1, 0x0b, 0x8b, 0xf6, 0x60, + 0xc4, 0xea, 0xe9, 0x8b, 0xf6, 0xb6, 0x4d, 0xac, 0xf2, 0x28, 0xd7, 0x31, 0x28, 0x9c, 0x63, 0x97, + 0x3f, 0xaa, 0xc5, 0x5b, 0x21, 0x8f, 0x03, 0xfb, 0xe0, 0x68, 0x0f, 0x80, 0xff, 0xe0, 0x6f, 0x70, + 0xe5, 0x39, 0xae, 0xea, 0x7e, 0x16, 0x55, 0x49, 0x4f, 0x7d, 0xe2, 0x1d, 0xde, 0x23, 0xe3, 0x00, + 0x36, 0xfa, 0x43, 0x09, 0x90, 0xdd, 0x33, 0x4d, 0x8d, 0x74, 0x89, 0x4e, 0x15, 0x8d, 0x8f, 0xda, + 0xe5, 0x31, 0xae, 0xf2, 0xdd, 0x41, 0x2b, 0x18, 0x13, 0x8c, 0xaa, 0xf6, 0x9e, 0xd7, 0xe3, 0xac, + 0x38, 0x41, 0x2f, 0xdb, 0xc4, 0x5d, 0x31, 0xeb, 0xf1, 0x4c, 0x9b, 0x98, 0xfc, 0xba, 0xe9, 0x6f, + 0xa2, 0xa0, 0x63, 0x17, 0x16, 0x3d, 0x85, 0x39, 0xb7, 0xc1, 0x12, 0x1b, 0x06, 0x5d, 0x51, 0x35, + 0x62, 0xf7, 0x6d, 0x4a, 0xba, 0xe5, 0x09, 0xee, 0x60, 0x5e, 0x97, 0x09, 0x4e, 0xe4, 0xc2, 0x29, + 0xd2, 0xa8, 0x0b, 0x15, 0x37, 0x38, 0xb1, 0x93, 0xeb, 0x45, 0xc7, 0x47, 0x76, 0x4b, 0xd1, 0x9c, + 0x2f, 0x0e, 0x93, 0x5c, 0xc1, 0xeb, 0xc7, 0x47, 0x95, 0xca, 0xf2, 0xc9, 0xac, 0x78, 0x10, 0x16, + 0xfa, 0x00, 0xca, 0x4a, 0x9a, 0x9e, 0x29, 0xae, 0xe7, 0x35, 0x16, 0xf1, 0x52, 0x15, 0xa4, 0x4a, + 0x23, 0x0a, 0x53, 0x4a, 0xb8, 0xd5, 0xd5, 0x2e, 0x4f, 0x67, 0x7a, 0xf2, 0x8c, 0x74, 0xc8, 0xfa, + 0xcf, 0x1e, 0x11, 0x82, 0x8d, 0x63, 0x1a, 0xd0, 0xef, 0x00, 0x52, 0xa2, 0xdd, 0xb9, 0x76, 0x19, + 0x65, 0x4a, 0x74, 0xb1, 0xb6, 0x5e, 0xdf, 0xed, 0x62, 0x24, 0x1b, 0x27, 0xe8, 0x61, 0x05, 0xba, + 0x12, 0xe9, 0x28, 0xb6, 0xcb, 0xf3, 0x5c, 0x79, 0x2d, 0x9b, 0x72, 0x4f, 0x2e, 0xf0, 0x61, 0x25, + 0x8a, 0x88, 0xe3, 0x4a, 0xd0, 0x1a, 0xcc, 0x8a, 0xc1, 0x6d, 0xdd, 0x56, 0x76, 0x49, 0xb3, 0x6f, + 0xb7, 0xa8, 0x66, 0x97, 0x67, 0x78, 0x7c, 0xe7, 0x1f, 0xf7, 0x16, 0x13, 0xe8, 0x38, 0x51, 0x0a, + 0xbd, 0x0b, 0x53, 0xbb, 0x86, 0xb5, 0xa3, 0xb6, 0xdb, 0x44, 0x77, 0x91, 0x66, 0x39, 0xd2, 0x2c, + 0xdb, 0x87, 0x95, 0x08, 0x0d, 0xc7, 0xb8, 0x91, 0x0d, 0x17, 0x05, 0x72, 0xc3, 0x32, 0x5a, 0xeb, + 0x46, 0x4f, 0xa7, 0x4e, 0xd9, 0x77, 0xd1, 0x4b, 0xa3, 0x17, 0x17, 0x93, 0x18, 0x5e, 0x1c, 0x55, + 0xae, 0x26, 0x57, 0xf9, 0x3e, 0x13, 0x4e, 0xc6, 0x46, 0x26, 0x8c, 0x89, 0x3e, 0xf1, 0x25, 0x4d, + 0xb1, 0xed, 0x72, 0x99, 0x1f, 0xfd, 0x07, 0x83, 0x03, 0x9e, 0x27, 0x12, 0x3d, 0xff, 0x53, 0xc7, + 0x47, 0x95, 0xb1, 0x20, 0x03, 0x0e, 0x69, 0xe0, 0x7d, 0x41, 0xe2, 0x6b, 0xd4, 0xf9, 0xf4, 0x56, + 0x9f, 0xae, 0x2f, 0xc8, 0x37, 0xed, 0xa5, 0xf5, 0x05, 0x05, 0x20, 0x4f, 0x7e, 0x97, 0xfe, 0xcf, + 0x1c, 0xcc, 0xf8, 0xcc, 0x99, 0xfb, 0x82, 0x12, 0x44, 0xfe, 0xbf, 0xbf, 0x3a, 0x5b, 0xaf, 0x8e, + 0xbf, 0x74, 0xff, 0xf7, 0x7a, 0x75, 0x7c, 0xdb, 0x52, 0x6e, 0x0f, 0x7f, 0x9b, 0x0b, 0x4e, 0xe0, + 0x94, 0x0d, 0x23, 0x2f, 0xa1, 0xc5, 0xf8, 0x27, 0xd7, 0x73, 0x22, 0x7f, 0x93, 0x87, 0xa9, 0xe8, + 0x69, 0x0c, 0xf5, 0x15, 0x48, 0x03, 0xfb, 0x0a, 0x1a, 0x30, 0xbb, 0xdb, 0xd3, 0xb4, 0x3e, 0x9f, + 0x43, 0xa0, 0xb9, 0xc0, 0xf9, 0x2e, 0xf8, 0x9a, 0x90, 0x9c, 0x5d, 0x49, 0xe0, 0xc1, 0x89, 0x92, + 0xf1, 0x36, 0x83, 0xc2, 0x8f, 0x6d, 0x33, 0x18, 0x3a, 0x43, 0x9b, 0x41, 0x72, 0xa7, 0x46, 0xfe, + 0x4c, 0x9d, 0x1a, 0x67, 0xe9, 0x31, 0x48, 0x08, 0x62, 0x03, 0xfb, 0x65, 0x5f, 0x83, 0x4b, 0x42, + 0x8c, 0xf2, 0xde, 0x01, 0x9d, 0x5a, 0x86, 0xa6, 0x11, 0x6b, 0xb9, 0xd7, 0xed, 0xf6, 0xe5, 0x5f, + 0xc2, 0x44, 0xb8, 0x2b, 0xc6, 0xd9, 0x69, 0xa7, 0x31, 0x47, 0x7c, 0x9d, 0x0d, 0xec, 0xb4, 0x33, + 0x8e, 0x3d, 0x0e, 0xf9, 0xf7, 0x25, 0x98, 0x4b, 0xee, 0x7e, 0x45, 0x1a, 0x4c, 0x74, 0x95, 0xc3, + 0x60, 0x47, 0xb2, 0x74, 0xc6, 0x77, 0x33, 0xde, 0x0e, 0xb1, 0x1e, 0xc2, 0xc2, 0x11, 0x6c, 0xf9, + 0x07, 0x09, 0xe6, 0x53, 0x1a, 0x11, 0xce, 0xd7, 0x12, 0xf4, 0x21, 0x94, 0xba, 0xca, 0x61, 0xb3, + 0x67, 0x75, 0xc8, 0x99, 0x5f, 0x0a, 0xf9, 0x71, 0x5f, 0x17, 0x28, 0xd8, 0xc3, 0x93, 0xff, 0x52, + 0x82, 0x9f, 0xa5, 0x5e, 0xa4, 0xd0, 0xbd, 0x50, 0xcf, 0x84, 0x1c, 0xe9, 0x99, 0x40, 0x71, 0xc1, + 0x57, 0xd4, 0x32, 0xf1, 0x85, 0x04, 0xe5, 0xb4, 0x9b, 0x25, 0xba, 0x1b, 0x32, 0xf2, 0xe7, 0x11, + 0x23, 0xa7, 0x63, 0x72, 0xaf, 0xc8, 0xc6, 0x7f, 0x95, 0xe0, 0xf2, 0x09, 0x15, 0x9a, 0x77, 0x81, + 0x21, 0xed, 0x20, 0x17, 0x7f, 0xd4, 0x16, 0x5f, 0xc4, 0xfc, 0x0b, 0x4c, 0x02, 0x0f, 0x4e, 0x95, + 0x46, 0xdb, 0x30, 0x2f, 0x6e, 0x4f, 0x51, 0x9a, 0x28, 0x3e, 0x78, 0x6b, 0xd9, 0x72, 0x32, 0x0b, + 0x4e, 0x93, 0x95, 0xff, 0x46, 0x82, 0xb9, 0xe4, 0x27, 0x03, 0xf4, 0x76, 0x68, 0xc9, 0x2b, 0x91, + 0x25, 0x9f, 0x8c, 0x48, 0x89, 0x05, 0xff, 0x18, 0x26, 0xc4, 0xc3, 0x82, 0x80, 0x11, 0xce, 0x2c, + 0x27, 0xe5, 0x17, 0x01, 0xe1, 0x96, 0xb7, 0xfc, 0x98, 0x84, 0xc7, 0x70, 0x04, 0x4d, 0xfe, 0x83, + 0x1c, 0x0c, 0x35, 0x5b, 0x8a, 0x46, 0xce, 0xa1, 0xba, 0x7d, 0x2f, 0x54, 0xdd, 0x0e, 0xfa, 0xa7, + 0x2d, 0x6e, 0x55, 0x6a, 0x61, 0x8b, 0x23, 0x85, 0xed, 0x9b, 0x99, 0xd0, 0x4e, 0xae, 0x69, 0x7f, + 0x0d, 0x46, 0x3c, 0xa5, 0xa7, 0x4b, 0xb5, 0xf2, 0x5f, 0xe4, 0x60, 0x34, 0xa0, 0xe2, 0x94, 0x89, + 0x7a, 0x37, 0x54, 0x9d, 0xe4, 0x33, 0x3c, 0xe3, 0x04, 0x74, 0x55, 0xdd, 0x7a, 0xc4, 0x69, 0x3a, + 0xf6, 0xdb, 0x4c, 0xe3, 0x65, 0xca, 0x2f, 0x61, 0x82, 0x2a, 0x56, 0x87, 0x50, 0xef, 0xb3, 0x46, + 0x9e, 0xfb, 0xa2, 0xd7, 0xfd, 0xbe, 0x15, 0xa2, 0xe2, 0x08, 0xf7, 0xa5, 0x87, 0x30, 0x1e, 0x52, + 0x76, 0xaa, 0x9e, 0xe1, 0xbf, 0x97, 0xe0, 0xe7, 0x03, 0x9f, 0x82, 0x50, 0x3d, 0x74, 0x48, 0xaa, + 0x91, 0x43, 0xb2, 0x90, 0x0e, 0xf0, 0xea, 0x7a, 0xcf, 0xea, 0x37, 0x9f, 0x7f, 0xbf, 0x70, 0xe1, + 0xdb, 0xef, 0x17, 0x2e, 0x7c, 0xf7, 0xfd, 0xc2, 0x85, 0xdf, 0x3d, 0x5e, 0x90, 0x9e, 0x1f, 0x2f, + 0x48, 0xdf, 0x1e, 0x2f, 0x48, 0xdf, 0x1d, 0x2f, 0x48, 0xff, 0x7e, 0xbc, 0x20, 0xfd, 0xc9, 0x0f, + 0x0b, 0x17, 0x3e, 0x2c, 0x0a, 0xb8, 0xff, 0x0d, 0x00, 0x00, 0xff, 0xff, 0xa0, 0x62, 0xda, 0xf9, + 0x07, 0x3e, 0x00, 0x00, +} + +func (m *AllowedCSIDriver) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *DeploymentStatus) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *AllowedCSIDriver) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AllowedCSIDriver) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UnavailableReplicas)) - if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - dAtA[i] = 0x38 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) - if m.CollisionCount != nil { - dAtA[i] = 0x40 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) - } - return i, nil + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *DeploymentStrategy) Marshal() (dAtA []byte, err error) { +func (m *AllowedFlexVolume) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *DeploymentStrategy) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *AllowedFlexVolume) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AllowedFlexVolume) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + i -= len(m.Driver) + copy(dAtA[i:], m.Driver) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - if m.RollingUpdate != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) - n23, err := m.RollingUpdate.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n23 - } - return i, nil + return len(dAtA) - i, nil } -func (m *FSGroupStrategyOptions) Marshal() (dAtA []byte, err error) { +func (m *AllowedHostPath) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *FSGroupStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *AllowedHostPath) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AllowedHostPath) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i += copy(dAtA[i:], m.Rule) - if len(m.Ranges) > 0 { - for _, msg := range m.Ranges { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - return i, nil + i-- + dAtA[i] = 0x10 + i -= len(m.PathPrefix) + copy(dAtA[i:], m.PathPrefix) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PathPrefix))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *HTTPIngressPath) Marshal() (dAtA []byte, err error) { +func (m *DaemonSet) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *HTTPIngressPath) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *DaemonSet) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DaemonSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i += copy(dAtA[i:], m.Path) + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Backend.Size())) - n24, err := m.Backend.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n24 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *HTTPIngressRuleValue) Marshal() (dAtA []byte, err error) { +func (m *DaemonSetCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *HTTPIngressRuleValue) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *DaemonSetCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DaemonSetCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Paths) > 0 { - for _, msg := range m.Paths { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *HostPortRange) Marshal() (dAtA []byte, err error) { +func (m *DaemonSetList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *HostPortRange) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Min)) - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Max)) - return i, nil -} - -func (m *IDRange) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *IDRange) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Min)) - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Max)) - return i, nil -} - -func (m *IPBlock) Marshal() (dAtA []byte, err error) { +func (m *DaemonSetList) MarshalTo(dAtA []byte) (int, error) { size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *IPBlock) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *DaemonSetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.CIDR))) - i += copy(dAtA[i:], m.CIDR) - if len(m.Except) > 0 { - for _, s := range m.Except { - dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *Ingress) Marshal() (dAtA []byte, err error) { +func (m *DaemonSetSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *Ingress) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *DaemonSetSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DaemonSetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n25, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.RevisionHistoryLimit != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + i-- + dAtA[i] = 0x30 } - i += n25 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n26, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i = encodeVarintGenerated(dAtA, i, uint64(m.TemplateGeneration)) + i-- + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + i-- + dAtA[i] = 0x20 + { + size, err := m.UpdateStrategy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n26 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n27, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } - i += n27 - return i, nil + return len(dAtA) - i, nil } -func (m *IngressBackend) Marshal() (dAtA []byte, err error) { +func (m *DaemonSetStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *IngressBackend) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *DaemonSetStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DaemonSetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceName))) - i += copy(dAtA[i:], m.ServiceName) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ServicePort.Size())) - n28, err := m.ServicePort.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } + } + if m.CollisionCount != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) + i-- + dAtA[i] = 0x48 } - i += n28 - return i, nil + i = encodeVarintGenerated(dAtA, i, uint64(m.NumberUnavailable)) + i-- + dAtA[i] = 0x40 + i = encodeVarintGenerated(dAtA, i, uint64(m.NumberAvailable)) + i-- + dAtA[i] = 0x38 + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedNumberScheduled)) + i-- + dAtA[i] = 0x30 + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.NumberReady)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredNumberScheduled)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.NumberMisscheduled)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentNumberScheduled)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } -func (m *IngressList) Marshal() (dAtA []byte, err error) { +func (m *DaemonSetUpdateStrategy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *IngressList) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *DaemonSetUpdateStrategy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DaemonSetUpdateStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n29, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n29 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.RollingUpdate != nil { + { + size, err := m.RollingUpdate.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *IngressRule) Marshal() (dAtA []byte, err error) { +func (m *Deployment) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *IngressRule) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) - i += copy(dAtA[i:], m.Host) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.IngressRuleValue.Size())) - n30, err := m.IngressRuleValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n30 - return i, nil -} - -func (m *IngressRuleValue) Marshal() (dAtA []byte, err error) { +func (m *Deployment) MarshalTo(dAtA []byte) (int, error) { size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *IngressRuleValue) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *Deployment) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.HTTP != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.HTTP.Size())) - n31, err := m.HTTP.MarshalTo(dAtA[i:]) + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n31 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *IngressSpec) Marshal() (dAtA []byte, err error) { +func (m *DeploymentCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *IngressSpec) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *DeploymentCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Backend != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Backend.Size())) - n32, err := m.Backend.MarshalTo(dAtA[i:]) + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n32 - } - if len(m.TLS) > 0 { - for _, msg := range m.TLS { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if len(m.Rules) > 0 { - for _, msg := range m.Rules { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + i-- + dAtA[i] = 0x3a + { + size, err := m.LastUpdateTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0x32 + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *IngressStatus) Marshal() (dAtA []byte, err error) { +func (m *DeploymentList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *IngressStatus) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *DeploymentList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LoadBalancer.Size())) - n33, err := m.LoadBalancer.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n33 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *IngressTLS) Marshal() (dAtA []byte, err error) { +func (m *DeploymentRollback) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *IngressTLS) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *DeploymentRollback) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentRollback) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Hosts) > 0 { - for _, s := range m.Hosts { + { + size, err := m.RollbackTo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if len(m.UpdatedAnnotations) > 0 { + keysForUpdatedAnnotations := make([]string, 0, len(m.UpdatedAnnotations)) + for k := range m.UpdatedAnnotations { + keysForUpdatedAnnotations = append(keysForUpdatedAnnotations, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForUpdatedAnnotations) + for iNdEx := len(keysForUpdatedAnnotations) - 1; iNdEx >= 0; iNdEx-- { + v := m.UpdatedAnnotations[string(keysForUpdatedAnnotations[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForUpdatedAnnotations[iNdEx]) + copy(dAtA[i:], keysForUpdatedAnnotations[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForUpdatedAnnotations[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 } } - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretName))) - i += copy(dAtA[i:], m.SecretName) - return i, nil + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *NetworkPolicy) Marshal() (dAtA []byte, err error) { +func (m *DeploymentSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NetworkPolicy) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n34, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n34 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n35, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n35 - return i, nil -} - -func (m *NetworkPolicyEgressRule) Marshal() (dAtA []byte, err error) { +func (m *DeploymentSpec) MarshalTo(dAtA []byte) (int, error) { size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *NetworkPolicyEgressRule) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *DeploymentSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Ports) > 0 { - for _, msg := range m.Ports { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.ProgressDeadlineSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.ProgressDeadlineSeconds)) + i-- + dAtA[i] = 0x48 + } + if m.RollbackTo != nil { + { + size, err := m.RollbackTo.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x42 } - if len(m.To) > 0 { - for _, msg := range m.To { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + i-- + if m.Paused { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 + if m.RevisionHistoryLimit != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + i-- + dAtA[i] = 0x30 + } + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + i-- + dAtA[i] = 0x28 + { + size, err := m.Strategy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 + } + if m.Replicas != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + i-- + dAtA[i] = 0x8 } - return i, nil + return len(dAtA) - i, nil } -func (m *NetworkPolicyIngressRule) Marshal() (dAtA []byte, err error) { +func (m *DeploymentStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NetworkPolicyIngressRule) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *DeploymentStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Ports) > 0 { - for _, msg := range m.Ports { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } + if m.CollisionCount != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) + i-- + dAtA[i] = 0x40 } - if len(m.From) > 0 { - for _, msg := range m.From { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) + i-- + dAtA[i] = 0x38 + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x32 } } - return i, nil + i = encodeVarintGenerated(dAtA, i, uint64(m.UnavailableReplicas)) + i-- + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } -func (m *NetworkPolicyList) Marshal() (dAtA []byte, err error) { +func (m *DeploymentStrategy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NetworkPolicyList) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *DeploymentStrategy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n36, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n36 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.RollingUpdate != nil { + { + size, err := m.RollingUpdate.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *NetworkPolicyPeer) Marshal() (dAtA []byte, err error) { +func (m *FSGroupStrategyOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NetworkPolicyPeer) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *FSGroupStrategyOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FSGroupStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.PodSelector != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PodSelector.Size())) - n37, err := m.PodSelector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n37 - } - if m.NamespaceSelector != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NamespaceSelector.Size())) - n38, err := m.NamespaceSelector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n38 - } - if m.IPBlock != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.IPBlock.Size())) - n39, err := m.IPBlock.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Ranges) > 0 { + for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } - i += n39 } - return i, nil + i -= len(m.Rule) + copy(dAtA[i:], m.Rule) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *NetworkPolicyPort) Marshal() (dAtA []byte, err error) { +func (m *HTTPIngressPath) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NetworkPolicyPort) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *HTTPIngressPath) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HTTPIngressPath) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Protocol != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Protocol))) - i += copy(dAtA[i:], *m.Protocol) - } - if m.Port != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Port.Size())) - n40, err := m.Port.MarshalTo(dAtA[i:]) + { + size, err := m.Backend.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n40 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *NetworkPolicySpec) Marshal() (dAtA []byte, err error) { +func (m *HTTPIngressRuleValue) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NetworkPolicySpec) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *HTTPIngressRuleValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HTTPIngressRuleValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PodSelector.Size())) - n41, err := m.PodSelector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n41 - if len(m.Ingress) > 0 { - for _, msg := range m.Ingress { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.Egress) > 0 { - for _, msg := range m.Egress { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.PolicyTypes) > 0 { - for _, s := range m.PolicyTypes { - dAtA[i] = 0x22 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ + if len(m.Paths) > 0 { + for iNdEx := len(m.Paths) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Paths[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } -func (m *PodSecurityPolicy) Marshal() (dAtA []byte, err error) { +func (m *HostPortRange) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PodSecurityPolicy) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *HostPortRange) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HostPortRange) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n42, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n42 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n43, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n43 - return i, nil + i = encodeVarintGenerated(dAtA, i, uint64(m.Max)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.Min)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } -func (m *PodSecurityPolicyList) Marshal() (dAtA []byte, err error) { +func (m *IDRange) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PodSecurityPolicyList) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *IDRange) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IDRange) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n44, err := m.ListMeta.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Max)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.Min)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *IPBlock) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { - return 0, err + return nil, err } - i += n44 - if len(m.Items) > 0 { - for _, msg := range m.Items { + return dAtA[:n], nil +} + +func (m *IPBlock) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IPBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Except) > 0 { + for iNdEx := len(m.Except) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Except[iNdEx]) + copy(dAtA[i:], m.Except[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Except[iNdEx]))) + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n } } - return i, nil + i -= len(m.CIDR) + copy(dAtA[i:], m.CIDR) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CIDR))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PodSecurityPolicySpec) Marshal() (dAtA []byte, err error) { +func (m *Ingress) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PodSecurityPolicySpec) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *Ingress) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Ingress) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - if m.Privileged { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if len(m.DefaultAddCapabilities) > 0 { - for _, s := range m.DefaultAddCapabilities { - dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if len(m.RequiredDropCapabilities) > 0 { - for _, s := range m.RequiredDropCapabilities { - dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if len(m.AllowedCapabilities) > 0 { - for _, s := range m.AllowedCapabilities { - dAtA[i] = 0x22 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if len(m.Volumes) > 0 { - for _, s := range m.Volumes { - dAtA[i] = 0x2a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - dAtA[i] = 0x30 - i++ - if m.HostNetwork { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if len(m.HostPorts) > 0 { - for _, msg := range m.HostPorts { - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - dAtA[i] = 0x40 - i++ - if m.HostPID { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x48 - i++ - if m.HostIPC { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SELinux.Size())) - n45, err := m.SELinux.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n45 - dAtA[i] = 0x5a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RunAsUser.Size())) - n46, err := m.RunAsUser.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n46 - dAtA[i] = 0x62 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SupplementalGroups.Size())) - n47, err := m.SupplementalGroups.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n47 - dAtA[i] = 0x6a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.FSGroup.Size())) - n48, err := m.FSGroup.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n48 - dAtA[i] = 0x70 - i++ - if m.ReadOnlyRootFilesystem { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if m.DefaultAllowPrivilegeEscalation != nil { - dAtA[i] = 0x78 - i++ - if *m.DefaultAllowPrivilegeEscalation { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if m.AllowPrivilegeEscalation != nil { - dAtA[i] = 0x80 - i++ - dAtA[i] = 0x1 - i++ - if *m.AllowPrivilegeEscalation { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if len(m.AllowedHostPaths) > 0 { - for _, msg := range m.AllowedHostPaths { - dAtA[i] = 0x8a - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.AllowedFlexVolumes) > 0 { - for _, msg := range m.AllowedFlexVolumes { - dAtA[i] = 0x92 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.AllowedUnsafeSysctls) > 0 { - for _, s := range m.AllowedUnsafeSysctls { - dAtA[i] = 0x9a - i++ - dAtA[i] = 0x1 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if len(m.ForbiddenSysctls) > 0 { - for _, s := range m.ForbiddenSysctls { - dAtA[i] = 0xa2 - i++ - dAtA[i] = 0x1 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if len(m.AllowedProcMountTypes) > 0 { - for _, s := range m.AllowedProcMountTypes { - dAtA[i] = 0xaa - i++ - dAtA[i] = 0x1 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ReplicaSet) Marshal() (dAtA []byte, err error) { +func (m *IngressBackend) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ReplicaSet) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *IngressBackend) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressBackend) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n49, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ServicePort.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n49 + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n50, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n50 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n51, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n51 - return i, nil + i -= len(m.ServiceName) + copy(dAtA[i:], m.ServiceName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ReplicaSetCondition) Marshal() (dAtA []byte, err error) { +func (m *IngressList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ReplicaSetCondition) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n52, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n52 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil -} - -func (m *ReplicaSetList) Marshal() (dAtA []byte, err error) { +func (m *IngressList) MarshalTo(dAtA []byte) (int, error) { size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ReplicaSetList) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *IngressList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n53, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n53 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ReplicaSetSpec) Marshal() (dAtA []byte, err error) { +func (m *IngressRule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ReplicaSetSpec) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *IngressRule) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Replicas != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) - } - if m.Selector != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n54, err := m.Selector.MarshalTo(dAtA[i:]) + { + size, err := m.IngressRuleValue.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n54 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n55, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n55 - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.Host) + copy(dAtA[i:], m.Host) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ReplicaSetStatus) Marshal() (dAtA []byte, err error) { +func (m *IngressRuleValue) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ReplicaSetStatus) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *IngressRuleValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressRuleValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.FullyLabeledReplicas)) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) - if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.HTTP != nil { + { + size, err := m.HTTP.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } -func (m *ReplicationControllerDummy) Marshal() (dAtA []byte, err error) { +func (m *IngressSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ReplicationControllerDummy) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - return i, nil -} - -func (m *RollbackConfig) Marshal() (dAtA []byte, err error) { +func (m *IngressSpec) MarshalTo(dAtA []byte) (int, error) { size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *RollbackConfig) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *IngressSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Revision)) - return i, nil -} - -func (m *RollingUpdateDaemonSet) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.TLS) > 0 { + for iNdEx := len(m.TLS) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.TLS[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if m.Backend != nil { + { + size, err := m.Backend.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *IngressStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *RollingUpdateDaemonSet) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *IngressStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.MaxUnavailable != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MaxUnavailable.Size())) - n56, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) + { + size, err := m.LoadBalancer.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n56 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *RollingUpdateDeployment) Marshal() (dAtA []byte, err error) { +func (m *IngressTLS) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *RollingUpdateDeployment) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *IngressTLS) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressTLS) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.MaxUnavailable != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MaxUnavailable.Size())) - n57, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n57 - } - if m.MaxSurge != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MaxSurge.Size())) - n58, err := m.MaxSurge.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i -= len(m.SecretName) + copy(dAtA[i:], m.SecretName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretName))) + i-- + dAtA[i] = 0x12 + if len(m.Hosts) > 0 { + for iNdEx := len(m.Hosts) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Hosts[iNdEx]) + copy(dAtA[i:], m.Hosts[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Hosts[iNdEx]))) + i-- + dAtA[i] = 0xa } - i += n58 } - return i, nil + return len(dAtA) - i, nil } -func (m *RunAsUserStrategyOptions) Marshal() (dAtA []byte, err error) { +func (m *NetworkPolicy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *RunAsUserStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *NetworkPolicy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NetworkPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i += copy(dAtA[i:], m.Rule) - if len(m.Ranges) > 0 { - for _, msg := range m.Ranges { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *SELinuxStrategyOptions) Marshal() (dAtA []byte, err error) { +func (m *NetworkPolicyEgressRule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *SELinuxStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *NetworkPolicyEgressRule) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NetworkPolicyEgressRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i += copy(dAtA[i:], m.Rule) - if m.SELinuxOptions != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SELinuxOptions.Size())) - n59, err := m.SELinuxOptions.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.To) > 0 { + for iNdEx := len(m.To) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.To[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Ports) > 0 { + for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ports[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } - i += n59 } - return i, nil + return len(dAtA) - i, nil } -func (m *Scale) Marshal() (dAtA []byte, err error) { +func (m *NetworkPolicyIngressRule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *Scale) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *NetworkPolicyIngressRule) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NetworkPolicyIngressRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n60, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n60 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n61, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.From) > 0 { + for iNdEx := len(m.From) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.From[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } } - i += n61 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n62, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Ports) > 0 { + for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ports[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } } - i += n62 - return i, nil + return len(dAtA) - i, nil } -func (m *ScaleSpec) Marshal() (dAtA []byte, err error) { +func (m *NetworkPolicyList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ScaleSpec) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *NetworkPolicyList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NetworkPolicyList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - return i, nil + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ScaleStatus) Marshal() (dAtA []byte, err error) { +func (m *NetworkPolicyPeer) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ScaleStatus) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *NetworkPolicyPeer) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NetworkPolicyPeer) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - if len(m.Selector) > 0 { - keysForSelector := make([]string, 0, len(m.Selector)) - for k := range m.Selector { - keysForSelector = append(keysForSelector, string(k)) + if m.IPBlock != nil { + { + size, err := m.IPBlock.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) - for _, k := range keysForSelector { - dAtA[i] = 0x12 - i++ - v := m.Selector[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) + i-- + dAtA[i] = 0x1a + } + if m.NamespaceSelector != nil { + { + size, err := m.NamespaceSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetSelector))) - i += copy(dAtA[i:], m.TargetSelector) - return i, nil + if m.PodSelector != nil { + { + size, err := m.PodSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *SupplementalGroupsStrategyOptions) Marshal() (dAtA []byte, err error) { +func (m *NetworkPolicyPort) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *SupplementalGroupsStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *NetworkPolicyPort) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NetworkPolicyPort) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i += copy(dAtA[i:], m.Rule) - if len(m.Ranges) > 0 { - for _, msg := range m.Ranges { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.Port != nil { + { + size, err := m.Port.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 } - return i, nil -} - -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ + if m.Protocol != nil { + i -= len(*m.Protocol) + copy(dAtA[i:], *m.Protocol) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Protocol))) + i-- + dAtA[i] = 0xa } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *AllowedFlexVolume) Size() (n int) { - var l int - _ = l - l = len(m.Driver) - n += 1 + l + sovGenerated(uint64(l)) - return n + return len(dAtA) - i, nil } -func (m *AllowedHostPath) Size() (n int) { - var l int - _ = l - l = len(m.PathPrefix) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - return n +func (m *NetworkPolicySpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *CustomMetricCurrentStatus) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = m.CurrentValue.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *NetworkPolicySpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *CustomMetricCurrentStatusList) Size() (n int) { +func (m *NetworkPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + if len(m.PolicyTypes) > 0 { + for iNdEx := len(m.PolicyTypes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.PolicyTypes[iNdEx]) + copy(dAtA[i:], m.PolicyTypes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PolicyTypes[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if len(m.Egress) > 0 { + for iNdEx := len(m.Egress) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Egress[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.Ingress) > 0 { + for iNdEx := len(m.Ingress) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ingress[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.PodSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodSecurityPolicy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodSecurityPolicy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodSecurityPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodSecurityPolicyList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodSecurityPolicyList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodSecurityPolicyList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodSecurityPolicySpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodSecurityPolicySpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodSecurityPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.RuntimeClass != nil { + { + size, err := m.RuntimeClass.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc2 + } + if len(m.AllowedCSIDrivers) > 0 { + for iNdEx := len(m.AllowedCSIDrivers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AllowedCSIDrivers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xba + } + } + if m.RunAsGroup != nil { + { + size, err := m.RunAsGroup.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb2 + } + if len(m.AllowedProcMountTypes) > 0 { + for iNdEx := len(m.AllowedProcMountTypes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AllowedProcMountTypes[iNdEx]) + copy(dAtA[i:], m.AllowedProcMountTypes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllowedProcMountTypes[iNdEx]))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xaa + } + } + if len(m.ForbiddenSysctls) > 0 { + for iNdEx := len(m.ForbiddenSysctls) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ForbiddenSysctls[iNdEx]) + copy(dAtA[i:], m.ForbiddenSysctls[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ForbiddenSysctls[iNdEx]))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa2 + } + } + if len(m.AllowedUnsafeSysctls) > 0 { + for iNdEx := len(m.AllowedUnsafeSysctls) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AllowedUnsafeSysctls[iNdEx]) + copy(dAtA[i:], m.AllowedUnsafeSysctls[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllowedUnsafeSysctls[iNdEx]))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x9a + } + } + if len(m.AllowedFlexVolumes) > 0 { + for iNdEx := len(m.AllowedFlexVolumes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AllowedFlexVolumes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x92 + } + } + if len(m.AllowedHostPaths) > 0 { + for iNdEx := len(m.AllowedHostPaths) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AllowedHostPaths[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + } + } + if m.AllowPrivilegeEscalation != nil { + i-- + if *m.AllowPrivilegeEscalation { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x80 + } + if m.DefaultAllowPrivilegeEscalation != nil { + i-- + if *m.DefaultAllowPrivilegeEscalation { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x78 + } + i-- + if m.ReadOnlyRootFilesystem { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x70 + { + size, err := m.FSGroup.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x6a + { + size, err := m.SupplementalGroups.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x62 + { + size, err := m.RunAsUser.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + { + size, err := m.SELinux.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + i-- + if m.HostIPC { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x48 + i-- + if m.HostPID { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x40 + if len(m.HostPorts) > 0 { + for iNdEx := len(m.HostPorts) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.HostPorts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + } + i-- + if m.HostNetwork { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + if len(m.Volumes) > 0 { + for iNdEx := len(m.Volumes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Volumes[iNdEx]) + copy(dAtA[i:], m.Volumes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Volumes[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + if len(m.AllowedCapabilities) > 0 { + for iNdEx := len(m.AllowedCapabilities) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AllowedCapabilities[iNdEx]) + copy(dAtA[i:], m.AllowedCapabilities[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllowedCapabilities[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if len(m.RequiredDropCapabilities) > 0 { + for iNdEx := len(m.RequiredDropCapabilities) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.RequiredDropCapabilities[iNdEx]) + copy(dAtA[i:], m.RequiredDropCapabilities[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RequiredDropCapabilities[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.DefaultAddCapabilities) > 0 { + for iNdEx := len(m.DefaultAddCapabilities) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.DefaultAddCapabilities[iNdEx]) + copy(dAtA[i:], m.DefaultAddCapabilities[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DefaultAddCapabilities[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + i-- + if m.Privileged { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *ReplicaSet) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReplicaSet) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicaSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ReplicaSetCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReplicaSetCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicaSetCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ReplicaSetList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReplicaSetList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicaSetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ReplicaSetSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReplicaSetSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicaSetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + i-- + dAtA[i] = 0x20 + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Replicas != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ReplicaSetStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReplicaSetStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicaSetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) + i-- + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.FullyLabeledReplicas)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *ReplicationControllerDummy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReplicationControllerDummy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicationControllerDummy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *RollbackConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RollbackConfig) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RollbackConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.Revision)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *RollingUpdateDaemonSet) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RollingUpdateDaemonSet) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RollingUpdateDaemonSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.MaxUnavailable != nil { + { + size, err := m.MaxUnavailable.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RollingUpdateDeployment) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RollingUpdateDeployment) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RollingUpdateDeployment) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.MaxSurge != nil { + { + size, err := m.MaxSurge.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.MaxUnavailable != nil { + { + size, err := m.MaxUnavailable.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RunAsGroupStrategyOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil +} + +func (m *RunAsGroupStrategyOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RunAsGroupStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Ranges) > 0 { + for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Rule) + copy(dAtA[i:], m.Rule) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RunAsUserStrategyOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RunAsUserStrategyOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RunAsUserStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Ranges) > 0 { + for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Rule) + copy(dAtA[i:], m.Rule) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RuntimeClassStrategyOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RuntimeClassStrategyOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RuntimeClassStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.DefaultRuntimeClassName != nil { + i -= len(*m.DefaultRuntimeClassName) + copy(dAtA[i:], *m.DefaultRuntimeClassName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.DefaultRuntimeClassName))) + i-- + dAtA[i] = 0x12 + } + if len(m.AllowedRuntimeClassNames) > 0 { + for iNdEx := len(m.AllowedRuntimeClassNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AllowedRuntimeClassNames[iNdEx]) + copy(dAtA[i:], m.AllowedRuntimeClassNames[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllowedRuntimeClassNames[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *SELinuxStrategyOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SELinuxStrategyOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SELinuxStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.SELinuxOptions != nil { + { + size, err := m.SELinuxOptions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Rule) + copy(dAtA[i:], m.Rule) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Scale) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Scale) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Scale) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ScaleSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScaleSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ScaleSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *ScaleStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScaleStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ScaleStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.TargetSelector) + copy(dAtA[i:], m.TargetSelector) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetSelector))) + i-- + dAtA[i] = 0x1a + if len(m.Selector) > 0 { + keysForSelector := make([]string, 0, len(m.Selector)) + for k := range m.Selector { + keysForSelector = append(keysForSelector, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) + for iNdEx := len(keysForSelector) - 1; iNdEx >= 0; iNdEx-- { + v := m.Selector[string(keysForSelector[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForSelector[iNdEx]) + copy(dAtA[i:], keysForSelector[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForSelector[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *SupplementalGroupsStrategyOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *CustomMetricTarget) Size() (n int) { +func (m *SupplementalGroupsStrategyOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SupplementalGroupsStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Ranges) > 0 { + for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Rule) + copy(dAtA[i:], m.Rule) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *AllowedCSIDriver) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Name) n += 1 + l + sovGenerated(uint64(l)) - l = m.TargetValue.Size() - n += 1 + l + sovGenerated(uint64(l)) return n } -func (m *CustomMetricTargetList) Size() (n int) { +func (m *AllowedFlexVolume) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + l = len(m.Driver) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *AllowedHostPath) Size() (n int) { + if m == nil { + return 0 } + var l int + _ = l + l = len(m.PathPrefix) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 return n } func (m *DaemonSet) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -2938,6 +4863,9 @@ func (m *DaemonSet) Size() (n int) { } func (m *DaemonSetCondition) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -2954,6 +4882,9 @@ func (m *DaemonSetCondition) Size() (n int) { } func (m *DaemonSetList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -2968,6 +4899,9 @@ func (m *DaemonSetList) Size() (n int) { } func (m *DaemonSetSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Selector != nil { @@ -2987,6 +4921,9 @@ func (m *DaemonSetSpec) Size() (n int) { } func (m *DaemonSetStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.CurrentNumberScheduled)) @@ -3010,6 +4947,9 @@ func (m *DaemonSetStatus) Size() (n int) { } func (m *DaemonSetUpdateStrategy) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -3022,6 +4962,9 @@ func (m *DaemonSetUpdateStrategy) Size() (n int) { } func (m *Deployment) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -3034,6 +4977,9 @@ func (m *Deployment) Size() (n int) { } func (m *DeploymentCondition) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -3052,6 +4998,9 @@ func (m *DeploymentCondition) Size() (n int) { } func (m *DeploymentList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -3066,6 +5015,9 @@ func (m *DeploymentList) Size() (n int) { } func (m *DeploymentRollback) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Name) @@ -3084,6 +5036,9 @@ func (m *DeploymentRollback) Size() (n int) { } func (m *DeploymentSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Replicas != nil { @@ -3113,6 +5068,9 @@ func (m *DeploymentSpec) Size() (n int) { } func (m *DeploymentStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.ObservedGeneration)) @@ -3134,6 +5092,9 @@ func (m *DeploymentStatus) Size() (n int) { } func (m *DeploymentStrategy) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -3146,6 +5107,9 @@ func (m *DeploymentStrategy) Size() (n int) { } func (m *FSGroupStrategyOptions) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Rule) @@ -3160,6 +5124,9 @@ func (m *FSGroupStrategyOptions) Size() (n int) { } func (m *HTTPIngressPath) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Path) @@ -3170,6 +5137,9 @@ func (m *HTTPIngressPath) Size() (n int) { } func (m *HTTPIngressRuleValue) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Paths) > 0 { @@ -3182,6 +5152,9 @@ func (m *HTTPIngressRuleValue) Size() (n int) { } func (m *HostPortRange) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.Min)) @@ -3190,6 +5163,9 @@ func (m *HostPortRange) Size() (n int) { } func (m *IDRange) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.Min)) @@ -3198,6 +5174,9 @@ func (m *IDRange) Size() (n int) { } func (m *IPBlock) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.CIDR) @@ -3212,6 +5191,9 @@ func (m *IPBlock) Size() (n int) { } func (m *Ingress) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -3224,6 +5206,9 @@ func (m *Ingress) Size() (n int) { } func (m *IngressBackend) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.ServiceName) @@ -3234,6 +5219,9 @@ func (m *IngressBackend) Size() (n int) { } func (m *IngressList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -3248,6 +5236,9 @@ func (m *IngressList) Size() (n int) { } func (m *IngressRule) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Host) @@ -3258,6 +5249,9 @@ func (m *IngressRule) Size() (n int) { } func (m *IngressRuleValue) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.HTTP != nil { @@ -3268,6 +5262,9 @@ func (m *IngressRuleValue) Size() (n int) { } func (m *IngressSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Backend != nil { @@ -3290,6 +5287,9 @@ func (m *IngressSpec) Size() (n int) { } func (m *IngressStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.LoadBalancer.Size() @@ -3298,6 +5298,9 @@ func (m *IngressStatus) Size() (n int) { } func (m *IngressTLS) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Hosts) > 0 { @@ -3312,6 +5315,9 @@ func (m *IngressTLS) Size() (n int) { } func (m *NetworkPolicy) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -3322,6 +5328,9 @@ func (m *NetworkPolicy) Size() (n int) { } func (m *NetworkPolicyEgressRule) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Ports) > 0 { @@ -3340,6 +5349,9 @@ func (m *NetworkPolicyEgressRule) Size() (n int) { } func (m *NetworkPolicyIngressRule) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Ports) > 0 { @@ -3358,6 +5370,9 @@ func (m *NetworkPolicyIngressRule) Size() (n int) { } func (m *NetworkPolicyList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -3372,6 +5387,9 @@ func (m *NetworkPolicyList) Size() (n int) { } func (m *NetworkPolicyPeer) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.PodSelector != nil { @@ -3390,6 +5408,9 @@ func (m *NetworkPolicyPeer) Size() (n int) { } func (m *NetworkPolicyPort) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Protocol != nil { @@ -3404,6 +5425,9 @@ func (m *NetworkPolicyPort) Size() (n int) { } func (m *NetworkPolicySpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.PodSelector.Size() @@ -3430,6 +5454,9 @@ func (m *NetworkPolicySpec) Size() (n int) { } func (m *PodSecurityPolicy) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -3440,6 +5467,9 @@ func (m *PodSecurityPolicy) Size() (n int) { } func (m *PodSecurityPolicyList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -3454,6 +5484,9 @@ func (m *PodSecurityPolicyList) Size() (n int) { } func (m *PodSecurityPolicySpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 2 @@ -3535,10 +5568,27 @@ func (m *PodSecurityPolicySpec) Size() (n int) { n += 2 + l + sovGenerated(uint64(l)) } } + if m.RunAsGroup != nil { + l = m.RunAsGroup.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if len(m.AllowedCSIDrivers) > 0 { + for _, e := range m.AllowedCSIDrivers { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + if m.RuntimeClass != nil { + l = m.RuntimeClass.Size() + n += 2 + l + sovGenerated(uint64(l)) + } return n } func (m *ReplicaSet) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -3551,6 +5601,9 @@ func (m *ReplicaSet) Size() (n int) { } func (m *ReplicaSetCondition) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -3567,6 +5620,9 @@ func (m *ReplicaSetCondition) Size() (n int) { } func (m *ReplicaSetList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -3581,6 +5637,9 @@ func (m *ReplicaSetList) Size() (n int) { } func (m *ReplicaSetSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Replicas != nil { @@ -3597,6 +5656,9 @@ func (m *ReplicaSetSpec) Size() (n int) { } func (m *ReplicaSetStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.Replicas)) @@ -3614,12 +5676,18 @@ func (m *ReplicaSetStatus) Size() (n int) { } func (m *ReplicationControllerDummy) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l return n } func (m *RollbackConfig) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.Revision)) @@ -3627,6 +5695,9 @@ func (m *RollbackConfig) Size() (n int) { } func (m *RollingUpdateDaemonSet) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.MaxUnavailable != nil { @@ -3637,6 +5708,9 @@ func (m *RollingUpdateDaemonSet) Size() (n int) { } func (m *RollingUpdateDeployment) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.MaxUnavailable != nil { @@ -3650,7 +5724,27 @@ func (m *RollingUpdateDeployment) Size() (n int) { return n } +func (m *RunAsGroupStrategyOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Rule) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Ranges) > 0 { + for _, e := range m.Ranges { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + func (m *RunAsUserStrategyOptions) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Rule) @@ -3664,7 +5758,29 @@ func (m *RunAsUserStrategyOptions) Size() (n int) { return n } +func (m *RuntimeClassStrategyOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.AllowedRuntimeClassNames) > 0 { + for _, s := range m.AllowedRuntimeClassNames { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.DefaultRuntimeClassName != nil { + l = len(*m.DefaultRuntimeClassName) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + func (m *SELinuxStrategyOptions) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Rule) @@ -3677,6 +5793,9 @@ func (m *SELinuxStrategyOptions) Size() (n int) { } func (m *Scale) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -3689,6 +5808,9 @@ func (m *Scale) Size() (n int) { } func (m *ScaleSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.Replicas)) @@ -3696,6 +5818,9 @@ func (m *ScaleSpec) Size() (n int) { } func (m *ScaleStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.Replicas)) @@ -3713,6 +5838,9 @@ func (m *ScaleStatus) Size() (n int) { } func (m *SupplementalGroupsStrategyOptions) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Rule) @@ -3727,77 +5855,38 @@ func (m *SupplementalGroupsStrategyOptions) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } -func (this *AllowedFlexVolume) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AllowedFlexVolume{`, - `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, - `}`, - }, "") - return s -} -func (this *AllowedHostPath) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AllowedHostPath{`, - `PathPrefix:` + fmt.Sprintf("%v", this.PathPrefix) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `}`, - }, "") - return s -} -func (this *CustomMetricCurrentStatus) String() string { +func (this *AllowedCSIDriver) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&CustomMetricCurrentStatus{`, + s := strings.Join([]string{`&AllowedCSIDriver{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `CurrentValue:` + strings.Replace(strings.Replace(this.CurrentValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *CustomMetricCurrentStatusList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CustomMetricCurrentStatusList{`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "CustomMetricCurrentStatus", "CustomMetricCurrentStatus", 1), `&`, ``, 1) + `,`, `}`, }, "") return s } -func (this *CustomMetricTarget) String() string { +func (this *AllowedFlexVolume) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&CustomMetricTarget{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `TargetValue:` + strings.Replace(strings.Replace(this.TargetValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, + s := strings.Join([]string{`&AllowedFlexVolume{`, + `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, `}`, }, "") return s } -func (this *CustomMetricTargetList) String() string { +func (this *AllowedHostPath) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&CustomMetricTargetList{`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "CustomMetricTarget", "CustomMetricTarget", 1), `&`, ``, 1) + `,`, + s := strings.Join([]string{`&AllowedHostPath{`, + `PathPrefix:` + fmt.Sprintf("%v", this.PathPrefix) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, `}`, }, "") return s @@ -3807,7 +5896,7 @@ func (this *DaemonSet) String() string { return "nil" } s := strings.Join([]string{`&DaemonSet{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DaemonSetSpec", "DaemonSetSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DaemonSetStatus", "DaemonSetStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -3821,7 +5910,7 @@ func (this *DaemonSetCondition) String() string { s := strings.Join([]string{`&DaemonSetCondition{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `}`, @@ -3832,9 +5921,14 @@ func (this *DaemonSetList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]DaemonSet{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "DaemonSet", "DaemonSet", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&DaemonSetList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "DaemonSet", "DaemonSet", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -3844,8 +5938,8 @@ func (this *DaemonSetSpec) String() string { return "nil" } s := strings.Join([]string{`&DaemonSetSpec{`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, `UpdateStrategy:` + strings.Replace(strings.Replace(this.UpdateStrategy.String(), "DaemonSetUpdateStrategy", "DaemonSetUpdateStrategy", 1), `&`, ``, 1) + `,`, `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, `TemplateGeneration:` + fmt.Sprintf("%v", this.TemplateGeneration) + `,`, @@ -3858,6 +5952,11 @@ func (this *DaemonSetStatus) String() string { if this == nil { return "nil" } + repeatedStringForConditions := "[]DaemonSetCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "DaemonSetCondition", "DaemonSetCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" s := strings.Join([]string{`&DaemonSetStatus{`, `CurrentNumberScheduled:` + fmt.Sprintf("%v", this.CurrentNumberScheduled) + `,`, `NumberMisscheduled:` + fmt.Sprintf("%v", this.NumberMisscheduled) + `,`, @@ -3868,7 +5967,7 @@ func (this *DaemonSetStatus) String() string { `NumberAvailable:` + fmt.Sprintf("%v", this.NumberAvailable) + `,`, `NumberUnavailable:` + fmt.Sprintf("%v", this.NumberUnavailable) + `,`, `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "DaemonSetCondition", "DaemonSetCondition", 1), `&`, ``, 1) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, `}`, }, "") return s @@ -3879,7 +5978,7 @@ func (this *DaemonSetUpdateStrategy) String() string { } s := strings.Join([]string{`&DaemonSetUpdateStrategy{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `RollingUpdate:` + strings.Replace(fmt.Sprintf("%v", this.RollingUpdate), "RollingUpdateDaemonSet", "RollingUpdateDaemonSet", 1) + `,`, + `RollingUpdate:` + strings.Replace(this.RollingUpdate.String(), "RollingUpdateDaemonSet", "RollingUpdateDaemonSet", 1) + `,`, `}`, }, "") return s @@ -3889,7 +5988,7 @@ func (this *Deployment) String() string { return "nil" } s := strings.Join([]string{`&Deployment{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DeploymentSpec", "DeploymentSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DeploymentStatus", "DeploymentStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -3905,8 +6004,8 @@ func (this *DeploymentCondition) String() string { `Status:` + fmt.Sprintf("%v", this.Status) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `LastUpdateTime:` + strings.Replace(strings.Replace(this.LastUpdateTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastUpdateTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastUpdateTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -3915,9 +6014,14 @@ func (this *DeploymentList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]Deployment{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Deployment", "Deployment", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&DeploymentList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Deployment", "Deployment", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -3950,13 +6054,13 @@ func (this *DeploymentSpec) String() string { } s := strings.Join([]string{`&DeploymentSpec{`, `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, `Strategy:` + strings.Replace(strings.Replace(this.Strategy.String(), "DeploymentStrategy", "DeploymentStrategy", 1), `&`, ``, 1) + `,`, `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, `Paused:` + fmt.Sprintf("%v", this.Paused) + `,`, - `RollbackTo:` + strings.Replace(fmt.Sprintf("%v", this.RollbackTo), "RollbackConfig", "RollbackConfig", 1) + `,`, + `RollbackTo:` + strings.Replace(this.RollbackTo.String(), "RollbackConfig", "RollbackConfig", 1) + `,`, `ProgressDeadlineSeconds:` + valueToStringGenerated(this.ProgressDeadlineSeconds) + `,`, `}`, }, "") @@ -3966,13 +6070,18 @@ func (this *DeploymentStatus) String() string { if this == nil { return "nil" } + repeatedStringForConditions := "[]DeploymentCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "DeploymentCondition", "DeploymentCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" s := strings.Join([]string{`&DeploymentStatus{`, `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, `UpdatedReplicas:` + fmt.Sprintf("%v", this.UpdatedReplicas) + `,`, `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, `UnavailableReplicas:` + fmt.Sprintf("%v", this.UnavailableReplicas) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "DeploymentCondition", "DeploymentCondition", 1), `&`, ``, 1) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, `}`, @@ -3985,7 +6094,7 @@ func (this *DeploymentStrategy) String() string { } s := strings.Join([]string{`&DeploymentStrategy{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `RollingUpdate:` + strings.Replace(fmt.Sprintf("%v", this.RollingUpdate), "RollingUpdateDeployment", "RollingUpdateDeployment", 1) + `,`, + `RollingUpdate:` + strings.Replace(this.RollingUpdate.String(), "RollingUpdateDeployment", "RollingUpdateDeployment", 1) + `,`, `}`, }, "") return s @@ -3994,9 +6103,14 @@ func (this *FSGroupStrategyOptions) String() string { if this == nil { return "nil" } + repeatedStringForRanges := "[]IDRange{" + for _, f := range this.Ranges { + repeatedStringForRanges += strings.Replace(strings.Replace(f.String(), "IDRange", "IDRange", 1), `&`, ``, 1) + "," + } + repeatedStringForRanges += "}" s := strings.Join([]string{`&FSGroupStrategyOptions{`, `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `Ranges:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ranges), "IDRange", "IDRange", 1), `&`, ``, 1) + `,`, + `Ranges:` + repeatedStringForRanges + `,`, `}`, }, "") return s @@ -4016,8 +6130,13 @@ func (this *HTTPIngressRuleValue) String() string { if this == nil { return "nil" } + repeatedStringForPaths := "[]HTTPIngressPath{" + for _, f := range this.Paths { + repeatedStringForPaths += strings.Replace(strings.Replace(f.String(), "HTTPIngressPath", "HTTPIngressPath", 1), `&`, ``, 1) + "," + } + repeatedStringForPaths += "}" s := strings.Join([]string{`&HTTPIngressRuleValue{`, - `Paths:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Paths), "HTTPIngressPath", "HTTPIngressPath", 1), `&`, ``, 1) + `,`, + `Paths:` + repeatedStringForPaths + `,`, `}`, }, "") return s @@ -4060,7 +6179,7 @@ func (this *Ingress) String() string { return "nil" } s := strings.Join([]string{`&Ingress{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "IngressSpec", "IngressSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "IngressStatus", "IngressStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -4073,7 +6192,7 @@ func (this *IngressBackend) String() string { } s := strings.Join([]string{`&IngressBackend{`, `ServiceName:` + fmt.Sprintf("%v", this.ServiceName) + `,`, - `ServicePort:` + strings.Replace(strings.Replace(this.ServicePort.String(), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1), `&`, ``, 1) + `,`, + `ServicePort:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ServicePort), "IntOrString", "intstr.IntOrString", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -4082,9 +6201,14 @@ func (this *IngressList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]Ingress{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Ingress", "Ingress", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&IngressList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Ingress", "Ingress", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -4105,7 +6229,7 @@ func (this *IngressRuleValue) String() string { return "nil" } s := strings.Join([]string{`&IngressRuleValue{`, - `HTTP:` + strings.Replace(fmt.Sprintf("%v", this.HTTP), "HTTPIngressRuleValue", "HTTPIngressRuleValue", 1) + `,`, + `HTTP:` + strings.Replace(this.HTTP.String(), "HTTPIngressRuleValue", "HTTPIngressRuleValue", 1) + `,`, `}`, }, "") return s @@ -4114,10 +6238,20 @@ func (this *IngressSpec) String() string { if this == nil { return "nil" } + repeatedStringForTLS := "[]IngressTLS{" + for _, f := range this.TLS { + repeatedStringForTLS += strings.Replace(strings.Replace(f.String(), "IngressTLS", "IngressTLS", 1), `&`, ``, 1) + "," + } + repeatedStringForTLS += "}" + repeatedStringForRules := "[]IngressRule{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "IngressRule", "IngressRule", 1), `&`, ``, 1) + "," + } + repeatedStringForRules += "}" s := strings.Join([]string{`&IngressSpec{`, - `Backend:` + strings.Replace(fmt.Sprintf("%v", this.Backend), "IngressBackend", "IngressBackend", 1) + `,`, - `TLS:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.TLS), "IngressTLS", "IngressTLS", 1), `&`, ``, 1) + `,`, - `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "IngressRule", "IngressRule", 1), `&`, ``, 1) + `,`, + `Backend:` + strings.Replace(this.Backend.String(), "IngressBackend", "IngressBackend", 1) + `,`, + `TLS:` + repeatedStringForTLS + `,`, + `Rules:` + repeatedStringForRules + `,`, `}`, }, "") return s @@ -4127,7 +6261,7 @@ func (this *IngressStatus) String() string { return "nil" } s := strings.Join([]string{`&IngressStatus{`, - `LoadBalancer:` + strings.Replace(strings.Replace(this.LoadBalancer.String(), "LoadBalancerStatus", "k8s_io_api_core_v1.LoadBalancerStatus", 1), `&`, ``, 1) + `,`, + `LoadBalancer:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LoadBalancer), "LoadBalancerStatus", "v11.LoadBalancerStatus", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -4148,7 +6282,7 @@ func (this *NetworkPolicy) String() string { return "nil" } s := strings.Join([]string{`&NetworkPolicy{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "NetworkPolicySpec", "NetworkPolicySpec", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -4158,9 +6292,19 @@ func (this *NetworkPolicyEgressRule) String() string { if this == nil { return "nil" } + repeatedStringForPorts := "[]NetworkPolicyPort{" + for _, f := range this.Ports { + repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPort", "NetworkPolicyPort", 1), `&`, ``, 1) + "," + } + repeatedStringForPorts += "}" + repeatedStringForTo := "[]NetworkPolicyPeer{" + for _, f := range this.To { + repeatedStringForTo += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPeer", "NetworkPolicyPeer", 1), `&`, ``, 1) + "," + } + repeatedStringForTo += "}" s := strings.Join([]string{`&NetworkPolicyEgressRule{`, - `Ports:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ports), "NetworkPolicyPort", "NetworkPolicyPort", 1), `&`, ``, 1) + `,`, - `To:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.To), "NetworkPolicyPeer", "NetworkPolicyPeer", 1), `&`, ``, 1) + `,`, + `Ports:` + repeatedStringForPorts + `,`, + `To:` + repeatedStringForTo + `,`, `}`, }, "") return s @@ -4169,9 +6313,19 @@ func (this *NetworkPolicyIngressRule) String() string { if this == nil { return "nil" } + repeatedStringForPorts := "[]NetworkPolicyPort{" + for _, f := range this.Ports { + repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPort", "NetworkPolicyPort", 1), `&`, ``, 1) + "," + } + repeatedStringForPorts += "}" + repeatedStringForFrom := "[]NetworkPolicyPeer{" + for _, f := range this.From { + repeatedStringForFrom += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPeer", "NetworkPolicyPeer", 1), `&`, ``, 1) + "," + } + repeatedStringForFrom += "}" s := strings.Join([]string{`&NetworkPolicyIngressRule{`, - `Ports:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ports), "NetworkPolicyPort", "NetworkPolicyPort", 1), `&`, ``, 1) + `,`, - `From:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.From), "NetworkPolicyPeer", "NetworkPolicyPeer", 1), `&`, ``, 1) + `,`, + `Ports:` + repeatedStringForPorts + `,`, + `From:` + repeatedStringForFrom + `,`, `}`, }, "") return s @@ -4180,9 +6334,14 @@ func (this *NetworkPolicyList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]NetworkPolicy{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "NetworkPolicy", "NetworkPolicy", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&NetworkPolicyList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "NetworkPolicy", "NetworkPolicy", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -4192,9 +6351,9 @@ func (this *NetworkPolicyPeer) String() string { return "nil" } s := strings.Join([]string{`&NetworkPolicyPeer{`, - `PodSelector:` + strings.Replace(fmt.Sprintf("%v", this.PodSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `IPBlock:` + strings.Replace(fmt.Sprintf("%v", this.IPBlock), "IPBlock", "IPBlock", 1) + `,`, + `PodSelector:` + strings.Replace(fmt.Sprintf("%v", this.PodSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `IPBlock:` + strings.Replace(this.IPBlock.String(), "IPBlock", "IPBlock", 1) + `,`, `}`, }, "") return s @@ -4205,7 +6364,7 @@ func (this *NetworkPolicyPort) String() string { } s := strings.Join([]string{`&NetworkPolicyPort{`, `Protocol:` + valueToStringGenerated(this.Protocol) + `,`, - `Port:` + strings.Replace(fmt.Sprintf("%v", this.Port), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, + `Port:` + strings.Replace(fmt.Sprintf("%v", this.Port), "IntOrString", "intstr.IntOrString", 1) + `,`, `}`, }, "") return s @@ -4214,10 +6373,20 @@ func (this *NetworkPolicySpec) String() string { if this == nil { return "nil" } + repeatedStringForIngress := "[]NetworkPolicyIngressRule{" + for _, f := range this.Ingress { + repeatedStringForIngress += strings.Replace(strings.Replace(f.String(), "NetworkPolicyIngressRule", "NetworkPolicyIngressRule", 1), `&`, ``, 1) + "," + } + repeatedStringForIngress += "}" + repeatedStringForEgress := "[]NetworkPolicyEgressRule{" + for _, f := range this.Egress { + repeatedStringForEgress += strings.Replace(strings.Replace(f.String(), "NetworkPolicyEgressRule", "NetworkPolicyEgressRule", 1), `&`, ``, 1) + "," + } + repeatedStringForEgress += "}" s := strings.Join([]string{`&NetworkPolicySpec{`, - `PodSelector:` + strings.Replace(strings.Replace(this.PodSelector.String(), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1), `&`, ``, 1) + `,`, - `Ingress:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ingress), "NetworkPolicyIngressRule", "NetworkPolicyIngressRule", 1), `&`, ``, 1) + `,`, - `Egress:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Egress), "NetworkPolicyEgressRule", "NetworkPolicyEgressRule", 1), `&`, ``, 1) + `,`, + `PodSelector:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.PodSelector), "LabelSelector", "v1.LabelSelector", 1), `&`, ``, 1) + `,`, + `Ingress:` + repeatedStringForIngress + `,`, + `Egress:` + repeatedStringForEgress + `,`, `PolicyTypes:` + fmt.Sprintf("%v", this.PolicyTypes) + `,`, `}`, }, "") @@ -4228,7 +6397,7 @@ func (this *PodSecurityPolicy) String() string { return "nil" } s := strings.Join([]string{`&PodSecurityPolicy{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodSecurityPolicySpec", "PodSecurityPolicySpec", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -4238,9 +6407,14 @@ func (this *PodSecurityPolicyList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]PodSecurityPolicy{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PodSecurityPolicy", "PodSecurityPolicy", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&PodSecurityPolicyList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "PodSecurityPolicy", "PodSecurityPolicy", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -4249,6 +6423,26 @@ func (this *PodSecurityPolicySpec) String() string { if this == nil { return "nil" } + repeatedStringForHostPorts := "[]HostPortRange{" + for _, f := range this.HostPorts { + repeatedStringForHostPorts += strings.Replace(strings.Replace(f.String(), "HostPortRange", "HostPortRange", 1), `&`, ``, 1) + "," + } + repeatedStringForHostPorts += "}" + repeatedStringForAllowedHostPaths := "[]AllowedHostPath{" + for _, f := range this.AllowedHostPaths { + repeatedStringForAllowedHostPaths += strings.Replace(strings.Replace(f.String(), "AllowedHostPath", "AllowedHostPath", 1), `&`, ``, 1) + "," + } + repeatedStringForAllowedHostPaths += "}" + repeatedStringForAllowedFlexVolumes := "[]AllowedFlexVolume{" + for _, f := range this.AllowedFlexVolumes { + repeatedStringForAllowedFlexVolumes += strings.Replace(strings.Replace(f.String(), "AllowedFlexVolume", "AllowedFlexVolume", 1), `&`, ``, 1) + "," + } + repeatedStringForAllowedFlexVolumes += "}" + repeatedStringForAllowedCSIDrivers := "[]AllowedCSIDriver{" + for _, f := range this.AllowedCSIDrivers { + repeatedStringForAllowedCSIDrivers += strings.Replace(strings.Replace(f.String(), "AllowedCSIDriver", "AllowedCSIDriver", 1), `&`, ``, 1) + "," + } + repeatedStringForAllowedCSIDrivers += "}" s := strings.Join([]string{`&PodSecurityPolicySpec{`, `Privileged:` + fmt.Sprintf("%v", this.Privileged) + `,`, `DefaultAddCapabilities:` + fmt.Sprintf("%v", this.DefaultAddCapabilities) + `,`, @@ -4256,7 +6450,7 @@ func (this *PodSecurityPolicySpec) String() string { `AllowedCapabilities:` + fmt.Sprintf("%v", this.AllowedCapabilities) + `,`, `Volumes:` + fmt.Sprintf("%v", this.Volumes) + `,`, `HostNetwork:` + fmt.Sprintf("%v", this.HostNetwork) + `,`, - `HostPorts:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.HostPorts), "HostPortRange", "HostPortRange", 1), `&`, ``, 1) + `,`, + `HostPorts:` + repeatedStringForHostPorts + `,`, `HostPID:` + fmt.Sprintf("%v", this.HostPID) + `,`, `HostIPC:` + fmt.Sprintf("%v", this.HostIPC) + `,`, `SELinux:` + strings.Replace(strings.Replace(this.SELinux.String(), "SELinuxStrategyOptions", "SELinuxStrategyOptions", 1), `&`, ``, 1) + `,`, @@ -4266,11 +6460,14 @@ func (this *PodSecurityPolicySpec) String() string { `ReadOnlyRootFilesystem:` + fmt.Sprintf("%v", this.ReadOnlyRootFilesystem) + `,`, `DefaultAllowPrivilegeEscalation:` + valueToStringGenerated(this.DefaultAllowPrivilegeEscalation) + `,`, `AllowPrivilegeEscalation:` + valueToStringGenerated(this.AllowPrivilegeEscalation) + `,`, - `AllowedHostPaths:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.AllowedHostPaths), "AllowedHostPath", "AllowedHostPath", 1), `&`, ``, 1) + `,`, - `AllowedFlexVolumes:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.AllowedFlexVolumes), "AllowedFlexVolume", "AllowedFlexVolume", 1), `&`, ``, 1) + `,`, + `AllowedHostPaths:` + repeatedStringForAllowedHostPaths + `,`, + `AllowedFlexVolumes:` + repeatedStringForAllowedFlexVolumes + `,`, `AllowedUnsafeSysctls:` + fmt.Sprintf("%v", this.AllowedUnsafeSysctls) + `,`, `ForbiddenSysctls:` + fmt.Sprintf("%v", this.ForbiddenSysctls) + `,`, `AllowedProcMountTypes:` + fmt.Sprintf("%v", this.AllowedProcMountTypes) + `,`, + `RunAsGroup:` + strings.Replace(this.RunAsGroup.String(), "RunAsGroupStrategyOptions", "RunAsGroupStrategyOptions", 1) + `,`, + `AllowedCSIDrivers:` + repeatedStringForAllowedCSIDrivers + `,`, + `RuntimeClass:` + strings.Replace(this.RuntimeClass.String(), "RuntimeClassStrategyOptions", "RuntimeClassStrategyOptions", 1) + `,`, `}`, }, "") return s @@ -4280,7 +6477,7 @@ func (this *ReplicaSet) String() string { return "nil" } s := strings.Join([]string{`&ReplicaSet{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ReplicaSetSpec", "ReplicaSetSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ReplicaSetStatus", "ReplicaSetStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -4294,7 +6491,7 @@ func (this *ReplicaSetCondition) String() string { s := strings.Join([]string{`&ReplicaSetCondition{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `}`, @@ -4305,9 +6502,14 @@ func (this *ReplicaSetList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]ReplicaSet{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ReplicaSet", "ReplicaSet", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&ReplicaSetList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ReplicaSet", "ReplicaSet", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -4318,8 +6520,8 @@ func (this *ReplicaSetSpec) String() string { } s := strings.Join([]string{`&ReplicaSetSpec{`, `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, `}`, }, "") @@ -4329,13 +6531,18 @@ func (this *ReplicaSetStatus) String() string { if this == nil { return "nil" } + repeatedStringForConditions := "[]ReplicaSetCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "ReplicaSetCondition", "ReplicaSetCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" s := strings.Join([]string{`&ReplicaSetStatus{`, `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, `FullyLabeledReplicas:` + fmt.Sprintf("%v", this.FullyLabeledReplicas) + `,`, `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "ReplicaSetCondition", "ReplicaSetCondition", 1), `&`, ``, 1) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, `}`, }, "") return s @@ -4364,7 +6571,7 @@ func (this *RollingUpdateDaemonSet) String() string { return "nil" } s := strings.Join([]string{`&RollingUpdateDaemonSet{`, - `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, + `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`, `}`, }, "") return s @@ -4374,276 +6581,135 @@ func (this *RollingUpdateDeployment) String() string { return "nil" } s := strings.Join([]string{`&RollingUpdateDeployment{`, - `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, - `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, + `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`, + `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "intstr.IntOrString", 1) + `,`, `}`, }, "") return s } -func (this *RunAsUserStrategyOptions) String() string { +func (this *RunAsGroupStrategyOptions) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&RunAsUserStrategyOptions{`, - `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `Ranges:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ranges), "IDRange", "IDRange", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *SELinuxStrategyOptions) String() string { - if this == nil { - return "nil" + repeatedStringForRanges := "[]IDRange{" + for _, f := range this.Ranges { + repeatedStringForRanges += strings.Replace(strings.Replace(f.String(), "IDRange", "IDRange", 1), `&`, ``, 1) + "," } - s := strings.Join([]string{`&SELinuxStrategyOptions{`, + repeatedStringForRanges += "}" + s := strings.Join([]string{`&RunAsGroupStrategyOptions{`, `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `SELinuxOptions:` + strings.Replace(fmt.Sprintf("%v", this.SELinuxOptions), "SELinuxOptions", "k8s_io_api_core_v1.SELinuxOptions", 1) + `,`, + `Ranges:` + repeatedStringForRanges + `,`, `}`, }, "") return s } -func (this *Scale) String() string { +func (this *RunAsUserStrategyOptions) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&Scale{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ScaleSpec", "ScaleSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ScaleStatus", "ScaleStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ScaleSpec) String() string { - if this == nil { - return "nil" + repeatedStringForRanges := "[]IDRange{" + for _, f := range this.Ranges { + repeatedStringForRanges += strings.Replace(strings.Replace(f.String(), "IDRange", "IDRange", 1), `&`, ``, 1) + "," } - s := strings.Join([]string{`&ScaleSpec{`, - `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + repeatedStringForRanges += "}" + s := strings.Join([]string{`&RunAsUserStrategyOptions{`, + `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, + `Ranges:` + repeatedStringForRanges + `,`, `}`, }, "") return s } -func (this *ScaleStatus) String() string { +func (this *RuntimeClassStrategyOptions) String() string { if this == nil { return "nil" } - keysForSelector := make([]string, 0, len(this.Selector)) - for k := range this.Selector { - keysForSelector = append(keysForSelector, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) - mapStringForSelector := "map[string]string{" - for _, k := range keysForSelector { - mapStringForSelector += fmt.Sprintf("%v: %v,", k, this.Selector[k]) - } - mapStringForSelector += "}" - s := strings.Join([]string{`&ScaleStatus{`, - `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, - `Selector:` + mapStringForSelector + `,`, - `TargetSelector:` + fmt.Sprintf("%v", this.TargetSelector) + `,`, + s := strings.Join([]string{`&RuntimeClassStrategyOptions{`, + `AllowedRuntimeClassNames:` + fmt.Sprintf("%v", this.AllowedRuntimeClassNames) + `,`, + `DefaultRuntimeClassName:` + valueToStringGenerated(this.DefaultRuntimeClassName) + `,`, `}`, }, "") return s } -func (this *SupplementalGroupsStrategyOptions) String() string { +func (this *SELinuxStrategyOptions) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&SupplementalGroupsStrategyOptions{`, + s := strings.Join([]string{`&SELinuxStrategyOptions{`, `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `Ranges:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ranges), "IDRange", "IDRange", 1), `&`, ``, 1) + `,`, + `SELinuxOptions:` + strings.Replace(fmt.Sprintf("%v", this.SELinuxOptions), "SELinuxOptions", "v11.SELinuxOptions", 1) + `,`, `}`, }, "") return s } -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *AllowedFlexVolume) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AllowedFlexVolume: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AllowedFlexVolume: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Driver = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } +func (this *Scale) String() string { + if this == nil { + return "nil" } - - if iNdEx > l { - return io.ErrUnexpectedEOF + s := strings.Join([]string{`&Scale{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ScaleSpec", "ScaleSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ScaleStatus", "ScaleStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ScaleSpec) String() string { + if this == nil { + return "nil" } - return nil + s := strings.Join([]string{`&ScaleSpec{`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `}`, + }, "") + return s } -func (m *AllowedHostPath) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AllowedHostPath: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AllowedHostPath: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PathPrefix", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PathPrefix = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnly = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } +func (this *ScaleStatus) String() string { + if this == nil { + return "nil" } - - if iNdEx > l { - return io.ErrUnexpectedEOF + keysForSelector := make([]string, 0, len(this.Selector)) + for k := range this.Selector { + keysForSelector = append(keysForSelector, k) } - return nil + github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) + mapStringForSelector := "map[string]string{" + for _, k := range keysForSelector { + mapStringForSelector += fmt.Sprintf("%v: %v,", k, this.Selector[k]) + } + mapStringForSelector += "}" + s := strings.Join([]string{`&ScaleStatus{`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `Selector:` + mapStringForSelector + `,`, + `TargetSelector:` + fmt.Sprintf("%v", this.TargetSelector) + `,`, + `}`, + }, "") + return s +} +func (this *SupplementalGroupsStrategyOptions) String() string { + if this == nil { + return "nil" + } + repeatedStringForRanges := "[]IDRange{" + for _, f := range this.Ranges { + repeatedStringForRanges += strings.Replace(strings.Replace(f.String(), "IDRange", "IDRange", 1), `&`, ``, 1) + "," + } + repeatedStringForRanges += "}" + s := strings.Join([]string{`&SupplementalGroupsStrategyOptions{`, + `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, + `Ranges:` + repeatedStringForRanges + `,`, + `}`, + }, "") + return s } -func (m *CustomMetricCurrentStatus) Unmarshal(dAtA []byte) error { +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *AllowedCSIDriver) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -4658,7 +6724,7 @@ func (m *CustomMetricCurrentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4666,10 +6732,10 @@ func (m *CustomMetricCurrentStatus) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: CustomMetricCurrentStatus: wiretype end group for non-group") + return fmt.Errorf("proto: AllowedCSIDriver: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: CustomMetricCurrentStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: AllowedCSIDriver: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -4686,7 +6752,7 @@ func (m *CustomMetricCurrentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4696,121 +6762,13 @@ func (m *CustomMetricCurrentStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CurrentValue", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.CurrentValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CustomMetricCurrentStatusList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CustomMetricCurrentStatusList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CustomMetricCurrentStatusList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, CustomMetricCurrentStatus{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + return io.ErrUnexpectedEOF } + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -4821,6 +6779,9 @@ func (m *CustomMetricCurrentStatusList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4833,7 +6794,7 @@ func (m *CustomMetricCurrentStatusList) Unmarshal(dAtA []byte) error { } return nil } -func (m *CustomMetricTarget) Unmarshal(dAtA []byte) error { +func (m *AllowedFlexVolume) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -4848,7 +6809,7 @@ func (m *CustomMetricTarget) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4856,15 +6817,15 @@ func (m *CustomMetricTarget) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: CustomMetricTarget: wiretype end group for non-group") + return fmt.Errorf("proto: AllowedFlexVolume: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: CustomMetricTarget: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: AllowedFlexVolume: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -4876,7 +6837,7 @@ func (m *CustomMetricTarget) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4886,40 +6847,13 @@ func (m *CustomMetricTarget) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TargetValue", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.TargetValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Driver = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -4930,6 +6864,9 @@ func (m *CustomMetricTarget) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4942,7 +6879,7 @@ func (m *CustomMetricTarget) Unmarshal(dAtA []byte) error { } return nil } -func (m *CustomMetricTargetList) Unmarshal(dAtA []byte) error { +func (m *AllowedHostPath) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -4957,7 +6894,7 @@ func (m *CustomMetricTargetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4965,17 +6902,17 @@ func (m *CustomMetricTargetList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: CustomMetricTargetList: wiretype end group for non-group") + return fmt.Errorf("proto: AllowedHostPath: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: CustomMetricTargetList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: AllowedHostPath: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PathPrefix", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -4985,23 +6922,44 @@ func (m *CustomMetricTargetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, CustomMetricTarget{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.PathPrefix = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -5011,6 +6969,9 @@ func (m *CustomMetricTargetList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5038,7 +6999,7 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5066,7 +7027,7 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5075,6 +7036,9 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5096,7 +7060,7 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5105,6 +7069,9 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5126,7 +7093,7 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5135,6 +7102,9 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5151,6 +7121,9 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5178,7 +7151,7 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5206,7 +7179,7 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5216,6 +7189,9 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5235,7 +7211,7 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5245,6 +7221,9 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5264,7 +7243,7 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5273,6 +7252,9 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5294,7 +7276,7 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5304,6 +7286,9 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5323,7 +7308,7 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5333,6 +7318,9 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5347,6 +7335,9 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5374,7 +7365,7 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5402,7 +7393,7 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5411,6 +7402,9 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5432,7 +7426,7 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5441,6 +7435,9 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5458,6 +7455,9 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5485,7 +7485,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5513,7 +7513,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5522,11 +7522,14 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -5546,7 +7549,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5555,6 +7558,9 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5576,7 +7582,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5585,6 +7591,9 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5606,7 +7615,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MinReadySeconds |= (int32(b) & 0x7F) << shift + m.MinReadySeconds |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5625,7 +7634,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.TemplateGeneration |= (int64(b) & 0x7F) << shift + m.TemplateGeneration |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -5644,7 +7653,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5659,6 +7668,9 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5686,7 +7698,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5714,7 +7726,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.CurrentNumberScheduled |= (int32(b) & 0x7F) << shift + m.CurrentNumberScheduled |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5733,7 +7745,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.NumberMisscheduled |= (int32(b) & 0x7F) << shift + m.NumberMisscheduled |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5752,7 +7764,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.DesiredNumberScheduled |= (int32(b) & 0x7F) << shift + m.DesiredNumberScheduled |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5771,7 +7783,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.NumberReady |= (int32(b) & 0x7F) << shift + m.NumberReady |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5790,7 +7802,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ObservedGeneration |= (int64(b) & 0x7F) << shift + m.ObservedGeneration |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -5809,7 +7821,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.UpdatedNumberScheduled |= (int32(b) & 0x7F) << shift + m.UpdatedNumberScheduled |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5828,7 +7840,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.NumberAvailable |= (int32(b) & 0x7F) << shift + m.NumberAvailable |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5847,7 +7859,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.NumberUnavailable |= (int32(b) & 0x7F) << shift + m.NumberUnavailable |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5866,7 +7878,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5886,7 +7898,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5895,6 +7907,9 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5912,6 +7927,9 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5939,7 +7957,7 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5967,7 +7985,7 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5977,6 +7995,9 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5996,7 +8017,7 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6005,6 +8026,9 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6024,6 +8048,9 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6051,7 +8078,7 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6079,7 +8106,7 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6088,6 +8115,9 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6109,7 +8139,7 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6118,6 +8148,9 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6139,7 +8172,7 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6148,6 +8181,9 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6164,6 +8200,9 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6191,7 +8230,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6219,7 +8258,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6229,6 +8268,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6248,7 +8290,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6258,6 +8300,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6277,7 +8322,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6287,6 +8332,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6306,7 +8354,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6316,6 +8364,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6335,7 +8386,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6344,6 +8395,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6365,7 +8419,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6374,6 +8428,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6390,6 +8447,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6417,7 +8477,7 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6445,7 +8505,7 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6454,6 +8514,9 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6475,7 +8538,7 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6484,6 +8547,9 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6501,6 +8567,9 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6528,7 +8597,7 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6556,7 +8625,7 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6566,6 +8635,9 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6585,7 +8657,7 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6594,54 +8666,20 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { + if postIndex > l { return io.ErrUnexpectedEOF } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.UpdatedAnnotations == nil { m.UpdatedAnnotations = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -6651,41 +8689,86 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.UpdatedAnnotations[mapkey] = mapvalue - } else { - var mapvalue string - m.UpdatedAnnotations[mapkey] = mapvalue } + m.UpdatedAnnotations[mapkey] = mapvalue iNdEx = postIndex case 3: if wireType != 2 { @@ -6701,7 +8784,7 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6710,6 +8793,9 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6726,6 +8812,9 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6753,7 +8842,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6781,7 +8870,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -6801,7 +8890,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6810,11 +8899,14 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -6834,7 +8926,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6843,6 +8935,9 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6864,7 +8959,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6873,6 +8968,9 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6894,7 +8992,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MinReadySeconds |= (int32(b) & 0x7F) << shift + m.MinReadySeconds |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -6913,7 +9011,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -6933,7 +9031,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6953,7 +9051,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6962,6 +9060,9 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6986,7 +9087,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -7001,6 +9102,9 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7028,7 +9132,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7056,7 +9160,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ObservedGeneration |= (int64(b) & 0x7F) << shift + m.ObservedGeneration |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -7075,7 +9179,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift + m.Replicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -7094,7 +9198,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.UpdatedReplicas |= (int32(b) & 0x7F) << shift + m.UpdatedReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -7113,7 +9217,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.AvailableReplicas |= (int32(b) & 0x7F) << shift + m.AvailableReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -7132,7 +9236,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.UnavailableReplicas |= (int32(b) & 0x7F) << shift + m.UnavailableReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -7151,7 +9255,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -7160,6 +9264,9 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7182,7 +9289,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ReadyReplicas |= (int32(b) & 0x7F) << shift + m.ReadyReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -7201,7 +9308,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -7216,6 +9323,9 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7243,7 +9353,7 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7271,7 +9381,7 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7281,6 +9391,9 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7300,7 +9413,7 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -7309,6 +9422,9 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7328,6 +9444,9 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7355,7 +9474,7 @@ func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7383,7 +9502,7 @@ func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7393,6 +9512,9 @@ func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7412,7 +9534,7 @@ func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -7421,6 +9543,9 @@ func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7438,6 +9563,9 @@ func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7465,7 +9593,7 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7493,7 +9621,7 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7503,6 +9631,9 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7522,7 +9653,7 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -7531,6 +9662,9 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7544,7 +9678,10 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -7574,7 +9711,7 @@ func (m *HTTPIngressRuleValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7602,7 +9739,7 @@ func (m *HTTPIngressRuleValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -7611,6 +9748,9 @@ func (m *HTTPIngressRuleValue) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7628,6 +9768,9 @@ func (m *HTTPIngressRuleValue) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7655,7 +9798,7 @@ func (m *HostPortRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7683,7 +9826,7 @@ func (m *HostPortRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Min |= (int32(b) & 0x7F) << shift + m.Min |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -7702,7 +9845,7 @@ func (m *HostPortRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Max |= (int32(b) & 0x7F) << shift + m.Max |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -7716,6 +9859,9 @@ func (m *HostPortRange) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7743,7 +9889,7 @@ func (m *IDRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7771,7 +9917,7 @@ func (m *IDRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Min |= (int64(b) & 0x7F) << shift + m.Min |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -7790,7 +9936,7 @@ func (m *IDRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Max |= (int64(b) & 0x7F) << shift + m.Max |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -7804,6 +9950,9 @@ func (m *IDRange) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7831,7 +9980,7 @@ func (m *IPBlock) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7859,7 +10008,7 @@ func (m *IPBlock) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7869,6 +10018,9 @@ func (m *IPBlock) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7888,7 +10040,7 @@ func (m *IPBlock) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7898,6 +10050,9 @@ func (m *IPBlock) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7912,6 +10067,9 @@ func (m *IPBlock) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7939,7 +10097,7 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7967,7 +10125,7 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -7976,6 +10134,9 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7997,7 +10158,7 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -8006,6 +10167,9 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8027,7 +10191,7 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -8036,6 +10200,9 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8052,6 +10219,9 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -8079,7 +10249,7 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8107,7 +10277,7 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8117,6 +10287,9 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8136,7 +10309,7 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -8145,6 +10318,9 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8161,6 +10337,9 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -8188,7 +10367,7 @@ func (m *IngressList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8216,7 +10395,7 @@ func (m *IngressList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -8225,6 +10404,9 @@ func (m *IngressList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8246,7 +10428,7 @@ func (m *IngressList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -8255,6 +10437,9 @@ func (m *IngressList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8272,6 +10457,9 @@ func (m *IngressList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -8299,7 +10487,7 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8327,7 +10515,7 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8337,6 +10525,9 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8356,7 +10547,7 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -8365,6 +10556,9 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8381,6 +10575,9 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -8408,7 +10605,7 @@ func (m *IngressRuleValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8436,7 +10633,7 @@ func (m *IngressRuleValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -8445,6 +10642,9 @@ func (m *IngressRuleValue) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8464,6 +10664,9 @@ func (m *IngressRuleValue) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -8491,7 +10694,7 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8519,7 +10722,7 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -8528,6 +10731,9 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8552,7 +10758,7 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -8561,6 +10767,9 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8583,7 +10792,7 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -8592,6 +10801,9 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8609,6 +10821,9 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -8636,7 +10851,7 @@ func (m *IngressStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8664,7 +10879,7 @@ func (m *IngressStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -8673,6 +10888,9 @@ func (m *IngressStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8689,6 +10907,9 @@ func (m *IngressStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -8716,7 +10937,7 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8744,7 +10965,7 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8754,6 +10975,9 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8773,7 +10997,7 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8783,6 +11007,9 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8797,6 +11024,9 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -8824,7 +11054,7 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8852,7 +11082,7 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -8861,6 +11091,9 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8882,7 +11115,7 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -8891,6 +11124,9 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8907,6 +11143,9 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -8934,7 +11173,7 @@ func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8962,7 +11201,7 @@ func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -8971,6 +11210,9 @@ func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8993,7 +11235,7 @@ func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -9002,6 +11244,9 @@ func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -9019,6 +11264,9 @@ func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -9046,7 +11294,7 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -9074,7 +11322,7 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -9083,6 +11331,9 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -9105,7 +11356,7 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -9114,6 +11365,9 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -9131,6 +11385,9 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -9158,7 +11415,7 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -9186,7 +11443,7 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -9195,6 +11452,9 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -9216,7 +11476,7 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -9225,6 +11485,9 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -9242,6 +11505,9 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -9269,7 +11535,7 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -9297,7 +11563,7 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -9306,11 +11572,14 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.PodSelector == nil { - m.PodSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.PodSelector = &v1.LabelSelector{} } if err := m.PodSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -9330,7 +11599,7 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -9339,11 +11608,14 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.NamespaceSelector == nil { - m.NamespaceSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.NamespaceSelector = &v1.LabelSelector{} } if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -9363,7 +11635,7 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -9372,6 +11644,9 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -9391,6 +11666,9 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -9418,7 +11696,7 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -9446,7 +11724,7 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -9456,6 +11734,9 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -9476,7 +11757,7 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -9485,11 +11766,14 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Port == nil { - m.Port = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + m.Port = &intstr.IntOrString{} } if err := m.Port.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -9504,6 +11788,9 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -9531,7 +11818,7 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -9559,7 +11846,7 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -9568,6 +11855,9 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -9589,7 +11879,7 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -9598,6 +11888,9 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -9620,7 +11913,7 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -9629,6 +11922,9 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -9651,7 +11947,7 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -9661,6 +11957,9 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -9675,6 +11974,9 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -9702,7 +12004,7 @@ func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -9730,7 +12032,7 @@ func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -9739,6 +12041,9 @@ func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -9760,7 +12065,7 @@ func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -9769,6 +12074,9 @@ func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -9785,6 +12093,9 @@ func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -9812,7 +12123,7 @@ func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -9840,7 +12151,7 @@ func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -9849,6 +12160,9 @@ func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -9870,7 +12184,7 @@ func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -9879,6 +12193,9 @@ func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -9896,6 +12213,9 @@ func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -9923,7 +12243,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -9951,7 +12271,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -9971,7 +12291,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -9981,6 +12301,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10000,7 +12323,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -10010,6 +12333,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10029,7 +12355,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -10039,6 +12365,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10058,7 +12387,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -10068,6 +12397,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10087,7 +12419,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -10107,7 +12439,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -10116,6 +12448,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10138,7 +12473,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -10158,7 +12493,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -10178,7 +12513,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -10187,6 +12522,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10208,7 +12546,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -10217,6 +12555,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10238,7 +12579,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -10247,6 +12588,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10268,7 +12612,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -10277,6 +12621,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10298,7 +12645,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -10318,7 +12665,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -10339,7 +12686,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -10360,7 +12707,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -10369,6 +12716,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10391,28 +12741,127 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AllowedFlexVolumes = append(m.AllowedFlexVolumes, AllowedFlexVolume{}) + if err := m.AllowedFlexVolumes[len(m.AllowedFlexVolumes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 19: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowedUnsafeSysctls", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AllowedUnsafeSysctls = append(m.AllowedUnsafeSysctls, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 20: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ForbiddenSysctls", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ForbiddenSysctls = append(m.ForbiddenSysctls, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 21: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowedProcMountTypes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.AllowedFlexVolumes = append(m.AllowedFlexVolumes, AllowedFlexVolume{}) - if err := m.AllowedFlexVolumes[len(m.AllowedFlexVolumes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.AllowedProcMountTypes = append(m.AllowedProcMountTypes, k8s_io_api_core_v1.ProcMountType(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 19: + case 22: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedUnsafeSysctls", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RunAsGroup", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -10422,26 +12871,33 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.AllowedUnsafeSysctls = append(m.AllowedUnsafeSysctls, string(dAtA[iNdEx:postIndex])) + if m.RunAsGroup == nil { + m.RunAsGroup = &RunAsGroupStrategyOptions{} + } + if err := m.RunAsGroup.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 20: + case 23: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ForbiddenSysctls", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AllowedCSIDrivers", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -10451,26 +12907,31 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.ForbiddenSysctls = append(m.ForbiddenSysctls, string(dAtA[iNdEx:postIndex])) + m.AllowedCSIDrivers = append(m.AllowedCSIDrivers, AllowedCSIDriver{}) + if err := m.AllowedCSIDrivers[len(m.AllowedCSIDrivers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 21: + case 24: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedProcMountTypes", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RuntimeClass", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -10480,20 +12941,27 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.AllowedProcMountTypes = append(m.AllowedProcMountTypes, k8s_io_api_core_v1.ProcMountType(dAtA[iNdEx:postIndex])) + if m.RuntimeClass == nil { + m.RuntimeClass = &RuntimeClassStrategyOptions{} + } + if err := m.RuntimeClass.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -10504,6 +12972,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -10531,7 +13002,7 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -10559,7 +13030,7 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -10568,6 +13039,9 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10589,7 +13063,7 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -10598,6 +13072,9 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10619,7 +13096,7 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -10628,6 +13105,9 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10644,6 +13124,9 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -10671,7 +13154,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -10699,7 +13182,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -10709,6 +13192,9 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10728,7 +13214,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -10738,6 +13224,9 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10757,7 +13246,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -10766,6 +13255,9 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10787,7 +13279,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -10797,6 +13289,9 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10816,7 +13311,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -10826,6 +13321,9 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10840,6 +13338,9 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -10867,7 +13368,7 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -10895,7 +13396,7 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -10904,6 +13405,9 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10925,7 +13429,7 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -10934,6 +13438,9 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10951,6 +13458,9 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -10978,7 +13488,7 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -11006,7 +13516,7 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -11026,7 +13536,7 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -11035,11 +13545,14 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -11059,7 +13572,7 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -11068,6 +13581,9 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -11089,7 +13605,7 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MinReadySeconds |= (int32(b) & 0x7F) << shift + m.MinReadySeconds |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -11103,6 +13619,9 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -11130,7 +13649,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -11158,7 +13677,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift + m.Replicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -11177,7 +13696,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.FullyLabeledReplicas |= (int32(b) & 0x7F) << shift + m.FullyLabeledReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -11196,7 +13715,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ObservedGeneration |= (int64(b) & 0x7F) << shift + m.ObservedGeneration |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -11215,7 +13734,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ReadyReplicas |= (int32(b) & 0x7F) << shift + m.ReadyReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -11234,7 +13753,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.AvailableReplicas |= (int32(b) & 0x7F) << shift + m.AvailableReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -11253,7 +13772,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -11262,6 +13781,9 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -11279,6 +13801,9 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -11306,7 +13831,7 @@ func (m *ReplicationControllerDummy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -11329,6 +13854,170 @@ func (m *ReplicationControllerDummy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RollbackConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RollbackConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RollbackConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) + } + m.Revision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Revision |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RollingUpdateDaemonSet: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RollingUpdateDaemonSet: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MaxUnavailable == nil { + m.MaxUnavailable = &intstr.IntOrString{} + } + if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -11341,7 +14030,7 @@ func (m *ReplicationControllerDummy) Unmarshal(dAtA []byte) error { } return nil } -func (m *RollbackConfig) Unmarshal(dAtA []byte) error { +func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -11356,7 +14045,7 @@ func (m *RollbackConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -11364,17 +14053,53 @@ func (m *RollbackConfig) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RollbackConfig: wiretype end group for non-group") + return fmt.Errorf("proto: RollingUpdateDeployment: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RollbackConfig: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RollingUpdateDeployment: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType) } - m.Revision = 0 + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MaxUnavailable == nil { + m.MaxUnavailable = &intstr.IntOrString{} + } + if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxSurge", wireType) + } + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -11384,11 +14109,28 @@ func (m *RollbackConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Revision |= (int64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MaxSurge == nil { + m.MaxSurge = &intstr.IntOrString{} + } + if err := m.MaxSurge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -11398,6 +14140,9 @@ func (m *RollbackConfig) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -11410,7 +14155,7 @@ func (m *RollbackConfig) Unmarshal(dAtA []byte) error { } return nil } -func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { +func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -11425,7 +14170,7 @@ func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -11433,15 +14178,47 @@ func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RollingUpdateDaemonSet: wiretype end group for non-group") + return fmt.Errorf("proto: RunAsGroupStrategyOptions: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RollingUpdateDaemonSet: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RunAsGroupStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rule = RunAsGroupStrategy(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11453,7 +14230,7 @@ func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -11462,13 +14239,14 @@ func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if m.MaxUnavailable == nil { - m.MaxUnavailable = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} - } - if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Ranges = append(m.Ranges, IDRange{}) + if err := m.Ranges[len(m.Ranges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -11481,6 +14259,9 @@ func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -11493,7 +14274,7 @@ func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { } return nil } -func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { +func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -11508,7 +14289,7 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -11516,17 +14297,17 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RollingUpdateDeployment: wiretype end group for non-group") + return fmt.Errorf("proto: RunAsUserStrategyOptions: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RollingUpdateDeployment: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RunAsUserStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -11536,28 +14317,27 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - if m.MaxUnavailable == nil { - m.MaxUnavailable = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} - } - if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Rule = RunAsUserStrategy(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxSurge", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11569,7 +14349,7 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -11578,13 +14358,14 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if m.MaxSurge == nil { - m.MaxSurge = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} - } - if err := m.MaxSurge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Ranges = append(m.Ranges, IDRange{}) + if err := m.Ranges[len(m.Ranges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -11597,6 +14378,9 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -11609,7 +14393,7 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { } return nil } -func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { +func (m *RuntimeClassStrategyOptions) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -11624,7 +14408,7 @@ func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -11632,15 +14416,15 @@ func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RunAsUserStrategyOptions: wiretype end group for non-group") + return fmt.Errorf("proto: RuntimeClassStrategyOptions: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RunAsUserStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RuntimeClassStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AllowedRuntimeClassNames", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -11652,7 +14436,7 @@ func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -11662,16 +14446,19 @@ func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Rule = RunAsUserStrategy(dAtA[iNdEx:postIndex]) + m.AllowedRuntimeClassNames = append(m.AllowedRuntimeClassNames, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DefaultRuntimeClassName", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -11681,22 +14468,24 @@ func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.Ranges = append(m.Ranges, IDRange{}) - if err := m.Ranges[len(m.Ranges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + s := string(dAtA[iNdEx:postIndex]) + m.DefaultRuntimeClassName = &s iNdEx = postIndex default: iNdEx = preIndex @@ -11707,6 +14496,9 @@ func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -11734,7 +14526,7 @@ func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -11762,7 +14554,7 @@ func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -11772,6 +14564,9 @@ func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -11791,7 +14586,7 @@ func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -11800,11 +14595,14 @@ func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.SELinuxOptions == nil { - m.SELinuxOptions = &k8s_io_api_core_v1.SELinuxOptions{} + m.SELinuxOptions = &v11.SELinuxOptions{} } if err := m.SELinuxOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -11819,6 +14617,9 @@ func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -11846,7 +14647,7 @@ func (m *Scale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -11874,7 +14675,7 @@ func (m *Scale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -11883,6 +14684,9 @@ func (m *Scale) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -11904,7 +14708,7 @@ func (m *Scale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -11913,6 +14717,9 @@ func (m *Scale) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -11934,7 +14741,7 @@ func (m *Scale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -11943,6 +14750,9 @@ func (m *Scale) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -11959,6 +14769,9 @@ func (m *Scale) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -11986,7 +14799,7 @@ func (m *ScaleSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -12014,7 +14827,7 @@ func (m *ScaleSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift + m.Replicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -12028,6 +14841,9 @@ func (m *ScaleSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -12055,7 +14871,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -12083,7 +14899,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift + m.Replicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -12102,7 +14918,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -12111,54 +14927,20 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { + if postIndex > l { return io.ErrUnexpectedEOF } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Selector == nil { m.Selector = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -12168,41 +14950,86 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Selector[mapkey] = mapvalue - } else { - var mapvalue string - m.Selector[mapkey] = mapvalue } + m.Selector[mapkey] = mapvalue iNdEx = postIndex case 3: if wireType != 2 { @@ -12218,7 +15045,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -12228,6 +15055,9 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -12242,6 +15072,9 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -12269,7 +15102,7 @@ func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -12297,7 +15130,7 @@ func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -12307,6 +15140,9 @@ func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -12326,7 +15162,7 @@ func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -12335,6 +15171,9 @@ func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -12352,6 +15191,9 @@ func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -12418,10 +15260,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -12450,6 +15295,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -12468,241 +15316,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/extensions/v1beta1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 3665 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5b, 0xcd, 0x6f, 0x24, 0x49, - 0x56, 0xef, 0xac, 0x2a, 0xbb, 0xca, 0xcf, 0xed, 0xaf, 0xb0, 0xdb, 0x5d, 0xdb, 0x33, 0xed, 0xea, - 0xcd, 0x91, 0x9a, 0x9e, 0xa1, 0xa7, 0x6a, 0xba, 0xe7, 0x63, 0x87, 0x69, 0xb1, 0xbb, 0x2e, 0xbb, - 0xdd, 0xed, 0xc5, 0x1f, 0x35, 0x51, 0x76, 0xb3, 0x8c, 0x98, 0x65, 0xd2, 0x55, 0xe1, 0x72, 0x8e, - 0xb3, 0x32, 0x73, 0x33, 0x22, 0xbd, 0x2e, 0x09, 0x21, 0x0e, 0x08, 0x09, 0x09, 0x04, 0x1c, 0x96, - 0x0f, 0x71, 0x61, 0x2f, 0x9c, 0x40, 0x70, 0x83, 0xc3, 0x6a, 0x24, 0xa4, 0x45, 0x1a, 0xa1, 0x45, - 0xda, 0x1b, 0x7b, 0xb2, 0x18, 0xcf, 0x09, 0xf1, 0x0f, 0xa0, 0x3e, 0x20, 0x14, 0x91, 0x91, 0xdf, - 0x99, 0xae, 0x2a, 0x4f, 0xb7, 0x85, 0xd0, 0xde, 0x2a, 0xe3, 0xbd, 0xf7, 0x7b, 0x2f, 0x22, 0x5e, - 0xbc, 0xf7, 0xe2, 0xa3, 0x60, 0xe3, 0xf8, 0x7d, 0x5a, 0xd7, 0xad, 0xc6, 0xb1, 0x7b, 0x40, 0x1c, - 0x93, 0x30, 0x42, 0x1b, 0x27, 0xc4, 0xec, 0x5a, 0x4e, 0x43, 0x12, 0x34, 0x5b, 0x6f, 0x90, 0x53, - 0x46, 0x4c, 0xaa, 0x5b, 0x26, 0x6d, 0x9c, 0x3c, 0x38, 0x20, 0x4c, 0x7b, 0xd0, 0xe8, 0x11, 0x93, - 0x38, 0x1a, 0x23, 0xdd, 0xba, 0xed, 0x58, 0xcc, 0x42, 0xb7, 0x3d, 0xf6, 0xba, 0x66, 0xeb, 0xf5, - 0x90, 0xbd, 0x2e, 0xd9, 0x6f, 0xbd, 0xd9, 0xd3, 0xd9, 0x91, 0x7b, 0x50, 0xef, 0x58, 0xfd, 0x46, - 0xcf, 0xea, 0x59, 0x0d, 0x21, 0x75, 0xe0, 0x1e, 0x8a, 0x2f, 0xf1, 0x21, 0x7e, 0x79, 0x68, 0xb7, - 0xd4, 0x88, 0xf2, 0x8e, 0xe5, 0x90, 0xc6, 0x49, 0x4a, 0xe3, 0xad, 0x77, 0x42, 0x9e, 0xbe, 0xd6, - 0x39, 0xd2, 0x4d, 0xe2, 0x0c, 0x1a, 0xf6, 0x71, 0x4f, 0x08, 0x39, 0x84, 0x5a, 0xae, 0xd3, 0x21, - 0x63, 0x49, 0xd1, 0x46, 0x9f, 0x30, 0x2d, 0x4b, 0x57, 0x23, 0x4f, 0xca, 0x71, 0x4d, 0xa6, 0xf7, - 0xd3, 0x6a, 0xde, 0x1b, 0x26, 0x40, 0x3b, 0x47, 0xa4, 0xaf, 0xa5, 0xe4, 0xde, 0xce, 0x93, 0x73, - 0x99, 0x6e, 0x34, 0x74, 0x93, 0x51, 0xe6, 0x24, 0x85, 0xd4, 0x47, 0xb0, 0xb0, 0x6a, 0x18, 0xd6, - 0x0f, 0x48, 0x77, 0xc3, 0x20, 0xa7, 0xcf, 0x2c, 0xc3, 0xed, 0x13, 0x74, 0x17, 0x26, 0xbb, 0x8e, - 0x7e, 0x42, 0x9c, 0xaa, 0x72, 0x47, 0xb9, 0x37, 0xd5, 0x9c, 0xfd, 0xfc, 0xac, 0x76, 0xed, 0xfc, - 0xac, 0x36, 0xb9, 0x2e, 0x5a, 0xb1, 0xa4, 0xaa, 0x14, 0xe6, 0xa4, 0xf0, 0x53, 0x8b, 0xb2, 0x96, - 0xc6, 0x8e, 0xd0, 0x43, 0x00, 0x5b, 0x63, 0x47, 0x2d, 0x87, 0x1c, 0xea, 0xa7, 0x52, 0x1c, 0x49, - 0x71, 0x68, 0x05, 0x14, 0x1c, 0xe1, 0x42, 0xf7, 0xa1, 0xe2, 0x10, 0xad, 0xbb, 0x6b, 0x1a, 0x83, - 0x6a, 0xe1, 0x8e, 0x72, 0xaf, 0xd2, 0x9c, 0x97, 0x12, 0x15, 0x2c, 0xdb, 0x71, 0xc0, 0xa1, 0xfe, - 0xa5, 0x02, 0x5f, 0x5b, 0x73, 0x29, 0xb3, 0xfa, 0xdb, 0x84, 0x39, 0x7a, 0x67, 0xcd, 0x75, 0x1c, - 0x62, 0xb2, 0x36, 0xd3, 0x98, 0x4b, 0xd1, 0x1d, 0x28, 0x99, 0x5a, 0x9f, 0x48, 0xcd, 0xd7, 0x25, - 0x4e, 0x69, 0x47, 0xeb, 0x13, 0x2c, 0x28, 0xe8, 0x23, 0x98, 0x38, 0xd1, 0x0c, 0x97, 0x08, 0x55, - 0xd3, 0x0f, 0xeb, 0xf5, 0xd0, 0xfb, 0x82, 0x61, 0xab, 0xdb, 0xc7, 0x3d, 0xe1, 0x8e, 0xbe, 0x2f, - 0xd4, 0x3f, 0x74, 0x35, 0x93, 0xe9, 0x6c, 0xd0, 0x5c, 0x92, 0x90, 0xd7, 0xa5, 0xde, 0x67, 0x1c, - 0x0b, 0x7b, 0x90, 0xea, 0xef, 0xc0, 0xed, 0x5c, 0xd3, 0xb6, 0x74, 0xca, 0xd0, 0xc7, 0x30, 0xa1, - 0x33, 0xd2, 0xa7, 0x55, 0xe5, 0x4e, 0xf1, 0xde, 0xf4, 0xc3, 0xf7, 0xeb, 0x17, 0xba, 0x7e, 0x3d, - 0x17, 0xac, 0x39, 0x23, 0xcd, 0x98, 0xd8, 0xe4, 0x70, 0xd8, 0x43, 0x55, 0xff, 0x54, 0x01, 0x14, - 0x95, 0xd9, 0xd3, 0x9c, 0x1e, 0x61, 0x23, 0x0c, 0xca, 0x6f, 0x7c, 0xb5, 0x41, 0x59, 0x94, 0x90, - 0xd3, 0x9e, 0xc2, 0xd8, 0x98, 0xd8, 0xb0, 0x9c, 0x36, 0x49, 0x0c, 0xc6, 0xb3, 0xf8, 0x60, 0x3c, - 0x18, 0x63, 0x30, 0x3c, 0x94, 0x9c, 0x51, 0xf8, 0x61, 0x01, 0xa6, 0xd6, 0x35, 0xd2, 0xb7, 0xcc, - 0x36, 0x61, 0xe8, 0x13, 0xa8, 0xf0, 0xa5, 0xd9, 0xd5, 0x98, 0x26, 0x06, 0x60, 0xfa, 0xe1, 0x5b, - 0x17, 0xf5, 0x8e, 0xd6, 0x39, 0x77, 0xfd, 0xe4, 0x41, 0x7d, 0xf7, 0xe0, 0x53, 0xd2, 0x61, 0xdb, - 0x84, 0x69, 0xa1, 0x07, 0x87, 0x6d, 0x38, 0x40, 0x45, 0x3b, 0x50, 0xa2, 0x36, 0xe9, 0xc8, 0xb1, - 0xbb, 0x3f, 0xa4, 0x1b, 0x81, 0x65, 0x6d, 0x9b, 0x74, 0xc2, 0xc9, 0xe0, 0x5f, 0x58, 0xe0, 0xa0, - 0x67, 0x30, 0x49, 0xc5, 0x2c, 0x57, 0x8b, 0xa9, 0xd9, 0xb8, 0x18, 0xd1, 0xf3, 0x8d, 0x60, 0xb9, - 0x7a, 0xdf, 0x58, 0xa2, 0xa9, 0xff, 0x59, 0x00, 0x14, 0xf0, 0xae, 0x59, 0x66, 0x57, 0x67, 0xba, - 0x65, 0xa2, 0x0f, 0xa0, 0xc4, 0x06, 0xb6, 0xef, 0x1d, 0x77, 0x7d, 0x83, 0xf6, 0x06, 0x36, 0x79, - 0x7e, 0x56, 0x5b, 0x4e, 0x4b, 0x70, 0x0a, 0x16, 0x32, 0x68, 0x2b, 0x30, 0xb5, 0x20, 0xa4, 0xdf, - 0x89, 0xab, 0x7e, 0x7e, 0x56, 0xcb, 0x08, 0xc7, 0xf5, 0x00, 0x29, 0x6e, 0x20, 0x3a, 0x01, 0x64, - 0x68, 0x94, 0xed, 0x39, 0x9a, 0x49, 0x3d, 0x4d, 0x7a, 0x9f, 0xc8, 0x41, 0x78, 0x63, 0xb4, 0x49, - 0xe3, 0x12, 0xcd, 0x5b, 0xd2, 0x0a, 0xb4, 0x95, 0x42, 0xc3, 0x19, 0x1a, 0x78, 0xbc, 0x73, 0x88, - 0x46, 0x2d, 0xb3, 0x5a, 0x8a, 0xc7, 0x3b, 0x2c, 0x5a, 0xb1, 0xa4, 0xa2, 0xd7, 0xa1, 0xdc, 0x27, - 0x94, 0x6a, 0x3d, 0x52, 0x9d, 0x10, 0x8c, 0x73, 0x92, 0xb1, 0xbc, 0xed, 0x35, 0x63, 0x9f, 0xae, - 0xfe, 0x58, 0x81, 0x99, 0x60, 0xe4, 0x84, 0xb7, 0xff, 0x66, 0xca, 0x0f, 0xeb, 0xa3, 0x75, 0x89, - 0x4b, 0x0b, 0x2f, 0x0c, 0xa2, 0xa2, 0xdf, 0x12, 0xf1, 0xc1, 0x6d, 0x7f, 0x2d, 0x15, 0xc4, 0x5a, - 0xba, 0x37, 0xaa, 0xcb, 0xe4, 0x2c, 0xa1, 0x3f, 0x2b, 0x45, 0xcc, 0xe7, 0xae, 0x89, 0x3e, 0x86, - 0x0a, 0x25, 0x06, 0xe9, 0x30, 0xcb, 0x91, 0xe6, 0xbf, 0x3d, 0xa2, 0xf9, 0xda, 0x01, 0x31, 0xda, - 0x52, 0xb4, 0x79, 0x9d, 0xdb, 0xef, 0x7f, 0xe1, 0x00, 0x12, 0x7d, 0x08, 0x15, 0x46, 0xfa, 0xb6, - 0xa1, 0x31, 0x3f, 0x06, 0xbd, 0x16, 0xed, 0x02, 0xf7, 0x1c, 0x0e, 0xd6, 0xb2, 0xba, 0x7b, 0x92, - 0x4d, 0x2c, 0x9f, 0x60, 0x48, 0xfc, 0x56, 0x1c, 0xc0, 0xa0, 0x13, 0x98, 0x75, 0xed, 0x2e, 0xe7, - 0x64, 0x3c, 0xe3, 0xf5, 0x06, 0xd2, 0x93, 0xde, 0x1b, 0x75, 0x6c, 0xf6, 0x63, 0xd2, 0xcd, 0x65, - 0xa9, 0x6b, 0x36, 0xde, 0x8e, 0x13, 0x5a, 0xd0, 0x2a, 0xcc, 0xf5, 0x75, 0x93, 0x67, 0xae, 0x41, - 0x9b, 0x74, 0x2c, 0xb3, 0x4b, 0x85, 0x5b, 0x4d, 0x34, 0x6f, 0x4a, 0x80, 0xb9, 0xed, 0x38, 0x19, - 0x27, 0xf9, 0xd1, 0x77, 0x00, 0xf9, 0xdd, 0x78, 0xe2, 0x25, 0x6c, 0xdd, 0x32, 0x85, 0xcf, 0x15, - 0x43, 0xe7, 0xde, 0x4b, 0x71, 0xe0, 0x0c, 0x29, 0xb4, 0x05, 0x4b, 0x0e, 0x39, 0xd1, 0x79, 0x1f, - 0x9f, 0xea, 0x94, 0x59, 0xce, 0x60, 0x4b, 0xef, 0xeb, 0xac, 0x3a, 0x29, 0x6c, 0xaa, 0x9e, 0x9f, - 0xd5, 0x96, 0x70, 0x06, 0x1d, 0x67, 0x4a, 0xa9, 0x7f, 0x3e, 0x09, 0x73, 0x89, 0x78, 0x83, 0x9e, - 0xc1, 0x72, 0xc7, 0x4b, 0x4e, 0x3b, 0x6e, 0xff, 0x80, 0x38, 0xed, 0xce, 0x11, 0xe9, 0xba, 0x06, - 0xe9, 0x0a, 0x47, 0x99, 0x68, 0xae, 0x48, 0x8b, 0x97, 0xd7, 0x32, 0xb9, 0x70, 0x8e, 0x34, 0x1f, - 0x05, 0x53, 0x34, 0x6d, 0xeb, 0x94, 0x06, 0x98, 0x05, 0x81, 0x19, 0x8c, 0xc2, 0x4e, 0x8a, 0x03, - 0x67, 0x48, 0x71, 0x1b, 0xbb, 0x84, 0xea, 0x0e, 0xe9, 0x26, 0x6d, 0x2c, 0xc6, 0x6d, 0x5c, 0xcf, - 0xe4, 0xc2, 0x39, 0xd2, 0xe8, 0x5d, 0x98, 0xf6, 0xb4, 0x89, 0xf9, 0x93, 0x13, 0x1d, 0xa4, 0xc3, - 0x9d, 0x90, 0x84, 0xa3, 0x7c, 0xbc, 0x6b, 0xd6, 0x01, 0x25, 0xce, 0x09, 0xe9, 0xe6, 0x4f, 0xf0, - 0x6e, 0x8a, 0x03, 0x67, 0x48, 0xf1, 0xae, 0x79, 0x1e, 0x98, 0xea, 0xda, 0x64, 0xbc, 0x6b, 0xfb, - 0x99, 0x5c, 0x38, 0x47, 0x9a, 0xfb, 0xb1, 0x67, 0xf2, 0xea, 0x89, 0xa6, 0x1b, 0xda, 0x81, 0x41, - 0xaa, 0xe5, 0xb8, 0x1f, 0xef, 0xc4, 0xc9, 0x38, 0xc9, 0x8f, 0x9e, 0xc0, 0x82, 0xd7, 0xb4, 0x6f, - 0x6a, 0x01, 0x48, 0x45, 0x80, 0x7c, 0x4d, 0x82, 0x2c, 0xec, 0x24, 0x19, 0x70, 0x5a, 0x06, 0x7d, - 0x00, 0xb3, 0x1d, 0xcb, 0x30, 0x84, 0x3f, 0xae, 0x59, 0xae, 0xc9, 0xaa, 0x53, 0x02, 0x05, 0xf1, - 0xf5, 0xb8, 0x16, 0xa3, 0xe0, 0x04, 0x27, 0x22, 0x00, 0x1d, 0x3f, 0xe1, 0xd0, 0x2a, 0x8c, 0x54, - 0x6b, 0xa4, 0x93, 0x5e, 0x58, 0x03, 0x04, 0x4d, 0x14, 0x47, 0x80, 0xd5, 0x7f, 0x55, 0xe0, 0x66, - 0x4e, 0xe8, 0x40, 0xdf, 0x8a, 0xa5, 0xd8, 0x5f, 0x4e, 0xa4, 0xd8, 0x57, 0x72, 0xc4, 0x22, 0x79, - 0xd6, 0x84, 0x19, 0x87, 0xf7, 0xca, 0xec, 0x79, 0x2c, 0x32, 0x46, 0xbe, 0x3b, 0xa4, 0x1b, 0x38, - 0x2a, 0x13, 0xc6, 0xfc, 0x85, 0xf3, 0xb3, 0xda, 0x4c, 0x8c, 0x86, 0xe3, 0xf0, 0xea, 0x5f, 0x14, - 0x00, 0xd6, 0x89, 0x6d, 0x58, 0x83, 0x3e, 0x31, 0xaf, 0xa2, 0x86, 0xda, 0x8d, 0xd5, 0x50, 0x6f, - 0x0e, 0x9b, 0x9e, 0xc0, 0xb4, 0xdc, 0x22, 0xea, 0xd7, 0x13, 0x45, 0x54, 0x63, 0x74, 0xc8, 0x8b, - 0xab, 0xa8, 0x7f, 0x2f, 0xc2, 0x62, 0xc8, 0x1c, 0x96, 0x51, 0x8f, 0x62, 0x73, 0xfc, 0x4b, 0x89, - 0x39, 0xbe, 0x99, 0x21, 0xf2, 0xd2, 0xea, 0xa8, 0x17, 0x5f, 0xcf, 0xa0, 0x4f, 0x61, 0x96, 0x17, - 0x4e, 0x9e, 0x7b, 0x88, 0xb2, 0x6c, 0x72, 0xec, 0xb2, 0x2c, 0x48, 0xa0, 0x5b, 0x31, 0x24, 0x9c, - 0x40, 0xce, 0x29, 0x03, 0xcb, 0x2f, 0xbb, 0x0c, 0x54, 0x3f, 0x53, 0x60, 0x36, 0x9c, 0xa6, 0x2b, - 0x28, 0xda, 0x76, 0xe2, 0x45, 0xdb, 0xeb, 0x23, 0xbb, 0x68, 0x4e, 0xd5, 0xf6, 0xdf, 0xbc, 0xc0, - 0x0f, 0x98, 0xf8, 0x02, 0x3f, 0xd0, 0x3a, 0xc7, 0x23, 0x6c, 0xff, 0x7e, 0xa8, 0x00, 0x92, 0x59, - 0x60, 0xd5, 0x34, 0x2d, 0xa6, 0x79, 0xb1, 0xd2, 0x33, 0x6b, 0x73, 0x64, 0xb3, 0x7c, 0x8d, 0xf5, - 0xfd, 0x14, 0xd6, 0x63, 0x93, 0x39, 0x83, 0x70, 0x46, 0xd2, 0x0c, 0x38, 0xc3, 0x00, 0xa4, 0x01, - 0x38, 0x12, 0x73, 0xcf, 0x92, 0x0b, 0xf9, 0xcd, 0x11, 0x62, 0x1e, 0x17, 0x58, 0xb3, 0xcc, 0x43, - 0xbd, 0x17, 0x86, 0x1d, 0x1c, 0x00, 0xe1, 0x08, 0xe8, 0xad, 0xc7, 0x70, 0x33, 0xc7, 0x5a, 0x34, - 0x0f, 0xc5, 0x63, 0x32, 0xf0, 0x86, 0x0d, 0xf3, 0x9f, 0x68, 0x29, 0xba, 0x4d, 0x9e, 0x92, 0x3b, - 0xdc, 0x0f, 0x0a, 0xef, 0x2b, 0xea, 0x8f, 0x27, 0xa2, 0xbe, 0x23, 0x2a, 0xe6, 0x7b, 0x50, 0x71, - 0x88, 0x6d, 0xe8, 0x1d, 0x8d, 0xca, 0x42, 0xe8, 0xba, 0x77, 0xa4, 0xe1, 0xb5, 0xe1, 0x80, 0x1a, - 0xab, 0xad, 0x0b, 0x2f, 0xb7, 0xb6, 0x2e, 0xbe, 0x98, 0xda, 0xfa, 0xb7, 0xa0, 0x42, 0xfd, 0xaa, - 0xba, 0x24, 0x20, 0x1f, 0x8c, 0x11, 0x5f, 0x65, 0x41, 0x1d, 0x28, 0x08, 0x4a, 0xe9, 0x00, 0x34, - 0xab, 0x88, 0x9e, 0x18, 0xb3, 0x88, 0x7e, 0xa1, 0x85, 0x2f, 0x8f, 0xa9, 0xb6, 0xe6, 0x52, 0xd2, - 0x15, 0x81, 0xa8, 0x12, 0xc6, 0xd4, 0x96, 0x68, 0xc5, 0x92, 0x8a, 0x3e, 0x8e, 0xb9, 0x6c, 0xe5, - 0x32, 0x2e, 0x3b, 0x9b, 0xef, 0xae, 0x68, 0x1f, 0x6e, 0xda, 0x8e, 0xd5, 0x73, 0x08, 0xa5, 0xeb, - 0x44, 0xeb, 0x1a, 0xba, 0x49, 0xfc, 0xf1, 0xf1, 0x2a, 0xa2, 0x57, 0xce, 0xcf, 0x6a, 0x37, 0x5b, - 0xd9, 0x2c, 0x38, 0x4f, 0x56, 0xfd, 0xbc, 0x04, 0xf3, 0xc9, 0x0c, 0x98, 0x53, 0xa4, 0x2a, 0x97, - 0x2a, 0x52, 0xef, 0x47, 0x16, 0x83, 0x57, 0xc1, 0x47, 0xce, 0xf8, 0x52, 0x0b, 0x62, 0x15, 0xe6, - 0x64, 0x34, 0xf0, 0x89, 0xb2, 0x4c, 0x0f, 0x66, 0x7f, 0x3f, 0x4e, 0xc6, 0x49, 0x7e, 0x5e, 0x7a, - 0x86, 0x15, 0xa5, 0x0f, 0x52, 0x8a, 0x97, 0x9e, 0xab, 0x49, 0x06, 0x9c, 0x96, 0x41, 0xdb, 0xb0, - 0xe8, 0x9a, 0x69, 0x28, 0xcf, 0x1b, 0x5f, 0x91, 0x50, 0x8b, 0xfb, 0x69, 0x16, 0x9c, 0x25, 0x87, - 0x0e, 0x63, 0xd5, 0xe8, 0xa4, 0x88, 0xb0, 0x0f, 0x47, 0x5e, 0x3b, 0x23, 0x97, 0xa3, 0xe8, 0x11, - 0xcc, 0x38, 0x62, 0xdf, 0xe1, 0x1b, 0xec, 0xd5, 0xee, 0x37, 0xa4, 0xd8, 0x0c, 0x8e, 0x12, 0x71, - 0x9c, 0x37, 0xa3, 0xdc, 0xae, 0x8c, 0x5a, 0x6e, 0xab, 0xff, 0xac, 0x44, 0x93, 0x50, 0x50, 0x02, - 0x0f, 0x3b, 0x65, 0x4a, 0x49, 0x44, 0xaa, 0x23, 0x2b, 0xbb, 0xfa, 0x7d, 0x6f, 0xac, 0xea, 0x37, - 0x4c, 0x9e, 0xc3, 0xcb, 0xdf, 0x1f, 0x29, 0xb0, 0xbc, 0xd1, 0x7e, 0xe2, 0x58, 0xae, 0xed, 0x9b, - 0xb3, 0x6b, 0x7b, 0xe3, 0xfa, 0x0d, 0x28, 0x39, 0xae, 0xe1, 0xf7, 0xe3, 0x35, 0xbf, 0x1f, 0xd8, - 0x35, 0x78, 0x3f, 0x16, 0x13, 0x52, 0x5e, 0x27, 0xb8, 0x00, 0xda, 0x81, 0x49, 0x47, 0x33, 0x7b, - 0xc4, 0x4f, 0xab, 0x77, 0x87, 0x58, 0xbf, 0xb9, 0x8e, 0x39, 0x7b, 0xa4, 0x78, 0x13, 0xd2, 0x58, - 0xa2, 0xa8, 0x7f, 0xa4, 0xc0, 0xdc, 0xd3, 0xbd, 0xbd, 0xd6, 0xa6, 0x29, 0x56, 0xb4, 0x38, 0x7d, - 0xbf, 0x03, 0x25, 0x5b, 0x63, 0x47, 0xc9, 0x4c, 0xcf, 0x69, 0x58, 0x50, 0xd0, 0x77, 0xa1, 0xcc, - 0x23, 0x09, 0x31, 0xbb, 0x23, 0x96, 0xda, 0x12, 0xbe, 0xe9, 0x09, 0x85, 0x15, 0xa2, 0x6c, 0xc0, - 0x3e, 0x9c, 0x7a, 0x0c, 0x4b, 0x11, 0x73, 0xf8, 0x78, 0x88, 0x63, 0x60, 0xd4, 0x86, 0x09, 0xae, - 0xd9, 0x3f, 0xe5, 0x1d, 0x76, 0x98, 0x99, 0xe8, 0x52, 0x58, 0xe9, 0xf0, 0x2f, 0x8a, 0x3d, 0x2c, - 0x75, 0x1b, 0x66, 0xc4, 0x95, 0x83, 0xe5, 0x30, 0x31, 0x2c, 0xe8, 0x36, 0x14, 0xfb, 0xba, 0x29, - 0xf3, 0xec, 0xb4, 0x94, 0x29, 0xf2, 0x1c, 0xc1, 0xdb, 0x05, 0x59, 0x3b, 0x95, 0x91, 0x27, 0x24, - 0x6b, 0xa7, 0x98, 0xb7, 0xab, 0x4f, 0xa0, 0x2c, 0x87, 0x3b, 0x0a, 0x54, 0xbc, 0x18, 0xa8, 0x98, - 0x01, 0xb4, 0x0b, 0xe5, 0xcd, 0x56, 0xd3, 0xb0, 0xbc, 0xaa, 0xab, 0xa3, 0x77, 0x9d, 0xe4, 0x5c, - 0xac, 0x6d, 0xae, 0x63, 0x2c, 0x28, 0x48, 0x85, 0x49, 0x72, 0xda, 0x21, 0x36, 0x13, 0x1e, 0x31, - 0xd5, 0x04, 0x3e, 0xcb, 0x8f, 0x45, 0x0b, 0x96, 0x14, 0xf5, 0x8f, 0x0b, 0x50, 0x96, 0xc3, 0x71, - 0x05, 0xbb, 0xb0, 0xad, 0xd8, 0x2e, 0xec, 0x8d, 0xd1, 0x5c, 0x23, 0x77, 0x0b, 0xb6, 0x97, 0xd8, - 0x82, 0xdd, 0x1f, 0x11, 0xef, 0xe2, 0xfd, 0xd7, 0x3f, 0x28, 0x30, 0x1b, 0x77, 0x4a, 0xf4, 0x2e, - 0x4c, 0xf3, 0x84, 0xa3, 0x77, 0xc8, 0x4e, 0x58, 0xe7, 0x06, 0x87, 0x30, 0xed, 0x90, 0x84, 0xa3, - 0x7c, 0xa8, 0x17, 0x88, 0x71, 0x3f, 0x92, 0x9d, 0xce, 0x1f, 0x52, 0x97, 0xe9, 0x46, 0xdd, 0xbb, - 0x46, 0xab, 0x6f, 0x9a, 0x6c, 0xd7, 0x69, 0x33, 0x47, 0x37, 0x7b, 0x29, 0x45, 0xc2, 0x29, 0xa3, - 0xc8, 0xea, 0x3f, 0x29, 0x30, 0x2d, 0x4d, 0xbe, 0x82, 0x5d, 0xc5, 0xaf, 0xc5, 0x77, 0x15, 0x77, - 0x47, 0x5c, 0xe0, 0xd9, 0x5b, 0x8a, 0xbf, 0x09, 0x4d, 0xe7, 0x4b, 0x9a, 0x7b, 0xf5, 0x91, 0x45, - 0x59, 0xd2, 0xab, 0xf9, 0x62, 0xc4, 0x82, 0x82, 0x5c, 0x98, 0xd7, 0x13, 0x31, 0x40, 0x0e, 0x6d, - 0x63, 0x34, 0x4b, 0x02, 0xb1, 0x66, 0x55, 0xc2, 0xcf, 0x27, 0x29, 0x38, 0xa5, 0x42, 0x25, 0x90, - 0xe2, 0x42, 0x1f, 0x42, 0xe9, 0x88, 0x31, 0x3b, 0xe3, 0xbc, 0x7a, 0x48, 0xe4, 0x09, 0x4d, 0xa8, - 0x88, 0xde, 0xed, 0xed, 0xb5, 0xb0, 0x80, 0x52, 0xff, 0x27, 0x1c, 0x8f, 0xb6, 0xe7, 0xe3, 0x41, - 0x3c, 0x55, 0x2e, 0x13, 0x4f, 0xa7, 0xb3, 0x62, 0x29, 0x7a, 0x0a, 0x45, 0x66, 0x8c, 0xba, 0x2d, - 0x94, 0x88, 0x7b, 0x5b, 0xed, 0x30, 0x20, 0xed, 0x6d, 0xb5, 0x31, 0x87, 0x40, 0xbb, 0x30, 0xc1, - 0xb3, 0x0f, 0x5f, 0x82, 0xc5, 0xd1, 0x97, 0x34, 0xef, 0x7f, 0xe8, 0x10, 0xfc, 0x8b, 0x62, 0x0f, - 0x47, 0xfd, 0x3e, 0xcc, 0xc4, 0xd6, 0x29, 0xfa, 0x04, 0xae, 0x1b, 0x96, 0xd6, 0x6d, 0x6a, 0x86, - 0x66, 0x76, 0x88, 0x7f, 0x39, 0x70, 0x37, 0x6b, 0x87, 0xb1, 0x15, 0xe1, 0x93, 0xab, 0x3c, 0xb8, - 0x4e, 0x8d, 0xd2, 0x70, 0x0c, 0x51, 0xd5, 0x00, 0xc2, 0x3e, 0xa2, 0x1a, 0x4c, 0x70, 0x3f, 0xf3, - 0xf2, 0xc9, 0x54, 0x73, 0x8a, 0x5b, 0xc8, 0xdd, 0x8f, 0x62, 0xaf, 0x1d, 0x3d, 0x04, 0xa0, 0xa4, - 0xe3, 0x10, 0x26, 0x82, 0x41, 0x21, 0x7e, 0x05, 0xdd, 0x0e, 0x28, 0x38, 0xc2, 0xa5, 0xfe, 0x8b, - 0x02, 0x33, 0x3b, 0x84, 0xfd, 0xc0, 0x72, 0x8e, 0x5b, 0x96, 0xa1, 0x77, 0x06, 0x57, 0x10, 0x6c, - 0x71, 0x2c, 0xd8, 0xbe, 0x35, 0x64, 0x66, 0x62, 0xd6, 0xe5, 0x85, 0x5c, 0xf5, 0x33, 0x05, 0x6e, - 0xc6, 0x38, 0x1f, 0x87, 0x4b, 0x77, 0x1f, 0x26, 0x6c, 0xcb, 0x61, 0x7e, 0x22, 0x1e, 0x4b, 0x21, - 0x0f, 0x63, 0x91, 0x54, 0xcc, 0x61, 0xb0, 0x87, 0x86, 0xb6, 0xa0, 0xc0, 0x2c, 0xe9, 0xaa, 0xe3, - 0x61, 0x12, 0xe2, 0x34, 0x41, 0x62, 0x16, 0xf6, 0x2c, 0x5c, 0x60, 0x16, 0x9f, 0x88, 0x6a, 0x8c, - 0x2b, 0x1a, 0x7c, 0x5e, 0x52, 0x0f, 0x30, 0x94, 0x0e, 0x1d, 0xab, 0x7f, 0xe9, 0x3e, 0x04, 0x13, - 0xb1, 0xe1, 0x58, 0x7d, 0x2c, 0xb0, 0xd4, 0x9f, 0x28, 0xb0, 0x10, 0xe3, 0xbc, 0x82, 0xc0, 0xff, - 0x61, 0x3c, 0xf0, 0xdf, 0x1f, 0xa7, 0x23, 0x39, 0xe1, 0xff, 0x27, 0x85, 0x44, 0x37, 0x78, 0x87, - 0xd1, 0x21, 0x4c, 0xdb, 0x56, 0xb7, 0xfd, 0x02, 0xae, 0x03, 0xe7, 0x78, 0xde, 0x6c, 0x85, 0x58, - 0x38, 0x0a, 0x8c, 0x4e, 0x61, 0xc1, 0xd4, 0xfa, 0x84, 0xda, 0x5a, 0x87, 0xb4, 0x5f, 0xc0, 0x01, - 0xc9, 0x0d, 0x71, 0xdf, 0x90, 0x44, 0xc4, 0x69, 0x25, 0x68, 0x1b, 0xca, 0xba, 0x2d, 0xea, 0x38, - 0x59, 0xbb, 0x0c, 0xcd, 0xa2, 0x5e, 0xd5, 0xe7, 0xc5, 0x73, 0xf9, 0x81, 0x7d, 0x0c, 0xf5, 0x6f, - 0x93, 0xde, 0xc0, 0xfd, 0x0f, 0x3d, 0x81, 0x8a, 0x78, 0x84, 0xd3, 0xb1, 0x0c, 0xff, 0x66, 0x80, - 0xcf, 0x6c, 0x4b, 0xb6, 0x3d, 0x3f, 0xab, 0xbd, 0x92, 0x71, 0xe8, 0xeb, 0x93, 0x71, 0x20, 0x8c, - 0x76, 0xa0, 0x64, 0x7f, 0x95, 0x0a, 0x46, 0x24, 0x39, 0x51, 0xb6, 0x08, 0x1c, 0xf5, 0xf7, 0x8a, - 0x09, 0x73, 0x45, 0xaa, 0xfb, 0xf4, 0x85, 0xcd, 0x7a, 0x50, 0x31, 0xe5, 0xce, 0xfc, 0x01, 0x94, - 0x65, 0x86, 0x97, 0xce, 0xfc, 0x8d, 0x71, 0x9c, 0x39, 0x9a, 0xc5, 0x82, 0x0d, 0x8b, 0xdf, 0xe8, - 0x03, 0xa3, 0xef, 0xc1, 0x24, 0xf1, 0x54, 0x78, 0xb9, 0xf1, 0xbd, 0x71, 0x54, 0x84, 0x71, 0x35, - 0x2c, 0x54, 0x65, 0x9b, 0x44, 0x45, 0xdf, 0xe2, 0xe3, 0xc5, 0x79, 0xf9, 0x26, 0x90, 0x56, 0x4b, - 0x22, 0x5d, 0xdd, 0xf6, 0xba, 0x1d, 0x34, 0x3f, 0x3f, 0xab, 0x41, 0xf8, 0x89, 0xa3, 0x12, 0xea, - 0xbf, 0x29, 0xb0, 0x20, 0x46, 0xa8, 0xe3, 0x3a, 0x3a, 0x1b, 0x5c, 0x59, 0x62, 0x7a, 0x16, 0x4b, - 0x4c, 0xef, 0x0c, 0x19, 0x96, 0x94, 0x85, 0xb9, 0xc9, 0xe9, 0xa7, 0x0a, 0xdc, 0x48, 0x71, 0x5f, - 0x41, 0x5c, 0xdc, 0x8f, 0xc7, 0xc5, 0xb7, 0xc6, 0xed, 0x50, 0x4e, 0x6c, 0xfc, 0xab, 0xb9, 0x8c, - 0xee, 0x88, 0x95, 0xf2, 0x10, 0xc0, 0x76, 0xf4, 0x13, 0xdd, 0x20, 0x3d, 0x79, 0x09, 0x5e, 0x89, - 0x3c, 0x82, 0x0b, 0x28, 0x38, 0xc2, 0x85, 0x28, 0x2c, 0x77, 0xc9, 0xa1, 0xe6, 0x1a, 0x6c, 0xb5, - 0xdb, 0x5d, 0xd3, 0x6c, 0xed, 0x40, 0x37, 0x74, 0xa6, 0xcb, 0xe3, 0x82, 0xa9, 0xe6, 0x23, 0xef, - 0x72, 0x3a, 0x8b, 0xe3, 0xf9, 0x59, 0xed, 0x76, 0xd6, 0xed, 0x90, 0xcf, 0x32, 0xc0, 0x39, 0xd0, - 0x68, 0x00, 0x55, 0x87, 0x7c, 0xdf, 0xd5, 0x1d, 0xd2, 0x5d, 0x77, 0x2c, 0x3b, 0xa6, 0xb6, 0x28, - 0xd4, 0xfe, 0xea, 0xf9, 0x59, 0xad, 0x8a, 0x73, 0x78, 0x86, 0x2b, 0xce, 0x85, 0x47, 0x9f, 0xc2, - 0xa2, 0xe6, 0xbd, 0x1d, 0x8c, 0x69, 0xf5, 0x56, 0xc9, 0xfb, 0xe7, 0x67, 0xb5, 0xc5, 0xd5, 0x34, - 0x79, 0xb8, 0xc2, 0x2c, 0x50, 0xd4, 0x80, 0xf2, 0x89, 0x78, 0xd9, 0x48, 0xab, 0x13, 0x02, 0x9f, - 0x27, 0x82, 0xb2, 0xf7, 0xd8, 0x91, 0x63, 0x4e, 0x6e, 0xb4, 0xc5, 0xea, 0xf3, 0xb9, 0xf8, 0x86, - 0x92, 0xd7, 0x92, 0x72, 0xc5, 0x8b, 0x13, 0xe3, 0x4a, 0x18, 0xb5, 0x9e, 0x86, 0x24, 0x1c, 0xe5, - 0x43, 0x1f, 0xc3, 0xd4, 0x91, 0x3c, 0x95, 0xa0, 0xd5, 0xf2, 0x48, 0x49, 0x38, 0x76, 0x8a, 0xd1, - 0x5c, 0x90, 0x2a, 0xa6, 0xfc, 0x66, 0x8a, 0x43, 0x44, 0xf4, 0x3a, 0x94, 0xc5, 0xc7, 0xe6, 0xba, - 0x38, 0x8e, 0xab, 0x84, 0xb1, 0xed, 0xa9, 0xd7, 0x8c, 0x7d, 0xba, 0xcf, 0xba, 0xd9, 0x5a, 0x13, - 0xc7, 0xc2, 0x09, 0xd6, 0xcd, 0xd6, 0x1a, 0xf6, 0xe9, 0xe8, 0x13, 0x28, 0x53, 0xb2, 0xa5, 0x9b, - 0xee, 0x69, 0x15, 0x46, 0xba, 0x54, 0x6e, 0x3f, 0x16, 0xdc, 0x89, 0x83, 0xb1, 0x50, 0x83, 0xa4, - 0x63, 0x1f, 0x16, 0x1d, 0xc1, 0x94, 0xe3, 0x9a, 0xab, 0x74, 0x9f, 0x12, 0xa7, 0x3a, 0x2d, 0x74, - 0x0c, 0x0b, 0xe7, 0xd8, 0xe7, 0x4f, 0x6a, 0x09, 0x46, 0x28, 0xe0, 0xc0, 0x21, 0x38, 0xfa, 0x43, - 0x05, 0x10, 0x75, 0x6d, 0xdb, 0x20, 0x7d, 0x62, 0x32, 0xcd, 0x10, 0x67, 0x71, 0xb4, 0x7a, 0x5d, - 0xe8, 0xfc, 0xf6, 0xb0, 0x7e, 0xa5, 0x04, 0x93, 0xca, 0x83, 0x43, 0xef, 0x34, 0x2b, 0xce, 0xd0, - 0xcb, 0x87, 0xf6, 0x90, 0x8a, 0xdf, 0xd5, 0x99, 0x91, 0x86, 0x36, 0xfb, 0xcc, 0x31, 0x1c, 0x5a, - 0x49, 0xc7, 0x3e, 0x2c, 0x7a, 0x06, 0xcb, 0xfe, 0xc3, 0x58, 0x6c, 0x59, 0x6c, 0x43, 0x37, 0x08, - 0x1d, 0x50, 0x46, 0xfa, 0xd5, 0x59, 0x31, 0xed, 0xc1, 0xdb, 0x0f, 0x9c, 0xc9, 0x85, 0x73, 0xa4, - 0x51, 0x1f, 0x6a, 0x7e, 0xc8, 0xe0, 0xeb, 0x29, 0x88, 0x59, 0x8f, 0x69, 0x47, 0x33, 0xbc, 0x7b, - 0x80, 0x39, 0xa1, 0xe0, 0xb5, 0xf3, 0xb3, 0x5a, 0x6d, 0xfd, 0x62, 0x56, 0x3c, 0x0c, 0x0b, 0x7d, - 0x17, 0xaa, 0x5a, 0x9e, 0x9e, 0x79, 0xa1, 0xe7, 0x55, 0x1e, 0x87, 0x72, 0x15, 0xe4, 0x4a, 0x23, - 0x06, 0xf3, 0x5a, 0xfc, 0x89, 0x32, 0xad, 0x2e, 0x8c, 0x74, 0x10, 0x99, 0x78, 0xd9, 0x1c, 0x1e, - 0x46, 0x24, 0x08, 0x14, 0xa7, 0x34, 0xa0, 0xdf, 0x06, 0xa4, 0x25, 0x5f, 0x55, 0xd3, 0x2a, 0x1a, - 0x29, 0xfd, 0xa4, 0x9e, 0x63, 0x87, 0x6e, 0x97, 0x22, 0x51, 0x9c, 0xa1, 0x07, 0x6d, 0xc1, 0x92, - 0x6c, 0xdd, 0x37, 0xa9, 0x76, 0x48, 0xda, 0x03, 0xda, 0x61, 0x06, 0xad, 0x2e, 0x8a, 0xd8, 0x27, - 0x2e, 0xbe, 0x56, 0x33, 0xe8, 0x38, 0x53, 0x0a, 0x7d, 0x1b, 0xe6, 0x0f, 0x2d, 0xe7, 0x40, 0xef, - 0x76, 0x89, 0xe9, 0x23, 0x2d, 0x09, 0xa4, 0x25, 0x3e, 0x1a, 0x1b, 0x09, 0x1a, 0x4e, 0x71, 0x23, - 0x0a, 0x37, 0x24, 0x72, 0xcb, 0xb1, 0x3a, 0xdb, 0x96, 0x6b, 0x32, 0xaf, 0x24, 0xba, 0x11, 0xa4, - 0x98, 0x1b, 0xab, 0x59, 0x0c, 0xcf, 0xcf, 0x6a, 0x77, 0xb2, 0x2b, 0xe0, 0x90, 0x09, 0x67, 0x63, - 0x8b, 0x17, 0x2c, 0xf2, 0x3e, 0xe3, 0x6a, 0x5e, 0x01, 0x8f, 0xf7, 0x82, 0x25, 0x34, 0xed, 0x85, - 0xbd, 0x60, 0x89, 0x40, 0x5e, 0x7c, 0x82, 0xfa, 0x5f, 0x05, 0x58, 0x0c, 0x99, 0x47, 0x7e, 0xc1, - 0x92, 0x21, 0xf2, 0x8b, 0x97, 0xc0, 0xc3, 0x5f, 0x02, 0x7f, 0xa6, 0xc0, 0x6c, 0x38, 0x74, 0xff, - 0xf7, 0x5e, 0x95, 0x84, 0xb6, 0xe5, 0xd4, 0xb9, 0x7f, 0x5f, 0x88, 0x76, 0xe0, 0xff, 0xfd, 0xd3, - 0x86, 0xaf, 0xfe, 0x7c, 0x57, 0xfd, 0x69, 0x11, 0xe6, 0x93, 0xab, 0x31, 0x76, 0x03, 0xae, 0x0c, - 0xbd, 0x01, 0x6f, 0xc1, 0xd2, 0xa1, 0x6b, 0x18, 0x03, 0x31, 0x0c, 0x91, 0x6b, 0x70, 0xef, 0x06, - 0xeb, 0x55, 0x29, 0xb9, 0xb4, 0x91, 0xc1, 0x83, 0x33, 0x25, 0x73, 0x6e, 0xf3, 0x8b, 0x97, 0xba, - 0xcd, 0x4f, 0x5d, 0x2e, 0x97, 0xc6, 0xb8, 0x5c, 0xce, 0xbc, 0x99, 0x9f, 0xb8, 0xc4, 0xcd, 0xfc, - 0x65, 0xae, 0xd2, 0x33, 0x82, 0xd8, 0xd0, 0x97, 0x9d, 0xaf, 0xc2, 0x2d, 0x29, 0xc6, 0xc4, 0x2d, - 0xb7, 0xc9, 0x1c, 0xcb, 0x30, 0x88, 0xb3, 0xee, 0xf6, 0xfb, 0x03, 0xf5, 0x9b, 0x30, 0x1b, 0x7f, - 0xbf, 0xe1, 0xcd, 0xb4, 0xf7, 0x84, 0x44, 0xde, 0x23, 0x46, 0x66, 0xda, 0x6b, 0xc7, 0x01, 0x87, - 0xfa, 0xfb, 0x0a, 0x2c, 0x67, 0xbf, 0xd3, 0x44, 0x06, 0xcc, 0xf6, 0xb5, 0xd3, 0xe8, 0xdb, 0x59, - 0xe5, 0x92, 0x27, 0x3c, 0xe2, 0xe2, 0x7e, 0x3b, 0x86, 0x85, 0x13, 0xd8, 0xea, 0x97, 0x0a, 0xdc, - 0xcc, 0xb9, 0x32, 0xbf, 0x5a, 0x4b, 0xd0, 0x47, 0x50, 0xe9, 0x6b, 0xa7, 0x6d, 0xd7, 0xe9, 0x91, - 0x4b, 0x9f, 0x69, 0x89, 0x88, 0xb1, 0x2d, 0x51, 0x70, 0x80, 0xa7, 0xfe, 0x48, 0x81, 0x6a, 0xde, - 0xee, 0x02, 0xbd, 0x1b, 0xbb, 0xdc, 0xff, 0x7a, 0xe2, 0x72, 0x7f, 0x21, 0x25, 0xf7, 0x92, 0xae, - 0xf6, 0xff, 0x4e, 0x81, 0xe5, 0xec, 0x5d, 0x16, 0x7a, 0x3b, 0x66, 0x61, 0x2d, 0x61, 0xe1, 0x5c, - 0x42, 0x4a, 0xda, 0xf7, 0x3d, 0x98, 0x95, 0x7b, 0x31, 0x09, 0x23, 0x47, 0x55, 0xcd, 0x8a, 0x95, - 0x12, 0xc2, 0xdf, 0x7b, 0x88, 0xf9, 0x8a, 0xb7, 0xe1, 0x04, 0x9a, 0xfa, 0x07, 0x05, 0x98, 0x68, - 0x77, 0x34, 0x83, 0x5c, 0x41, 0x99, 0xf5, 0x9d, 0x58, 0x99, 0x35, 0xec, 0x7f, 0x2e, 0xc2, 0xaa, - 0xdc, 0x0a, 0x0b, 0x27, 0x2a, 0xac, 0x37, 0x46, 0x42, 0xbb, 0xb8, 0xb8, 0xfa, 0x15, 0x98, 0x0a, - 0x94, 0x8e, 0x17, 0xf3, 0xd5, 0xbf, 0x2e, 0xc0, 0x74, 0x44, 0xc5, 0x98, 0x19, 0xe3, 0x30, 0x96, - 0x69, 0x47, 0xf9, 0x77, 0x61, 0x44, 0x57, 0xdd, 0xcf, 0xad, 0xde, 0x3b, 0xcd, 0xf0, 0x65, 0x5e, - 0x3a, 0xe5, 0x7e, 0x13, 0x66, 0x99, 0xf8, 0xf7, 0x5d, 0x70, 0x12, 0x5c, 0x14, 0xbe, 0x18, 0xbc, - 0xee, 0xdd, 0x8b, 0x51, 0x71, 0x82, 0xfb, 0xd6, 0x23, 0x98, 0x89, 0x29, 0x1b, 0xeb, 0x99, 0xe5, - 0x3f, 0x2a, 0xf0, 0xf5, 0xa1, 0xfb, 0x74, 0xd4, 0x8c, 0x2d, 0x92, 0x7a, 0x62, 0x91, 0xac, 0xe4, - 0x03, 0xbc, 0xbc, 0xe7, 0x3a, 0xcd, 0x37, 0x3f, 0xff, 0x62, 0xe5, 0xda, 0xcf, 0xbe, 0x58, 0xb9, - 0xf6, 0xf3, 0x2f, 0x56, 0xae, 0xfd, 0xee, 0xf9, 0x8a, 0xf2, 0xf9, 0xf9, 0x8a, 0xf2, 0xb3, 0xf3, - 0x15, 0xe5, 0xe7, 0xe7, 0x2b, 0xca, 0x7f, 0x9c, 0xaf, 0x28, 0x7f, 0xf2, 0xe5, 0xca, 0xb5, 0x8f, - 0xca, 0x12, 0xee, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xa7, 0xd7, 0xb5, 0x56, 0x5c, 0x3d, 0x00, - 0x00, -} diff --git a/vendor/k8s.io/api/extensions/v1beta1/generated.proto b/vendor/k8s.io/api/extensions/v1beta1/generated.proto index 1a9b7ebb..6c90cb3c 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/generated.proto +++ b/vendor/k8s.io/api/extensions/v1beta1/generated.proto @@ -22,7 +22,6 @@ syntax = 'proto2'; package k8s.io.api.extensions.v1beta1; import "k8s.io/api/core/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/api/resource/generated.proto"; import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; @@ -31,6 +30,12 @@ import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; // Package-wide variables from generator "generated". option go_package = "v1beta1"; +// AllowedCSIDriver represents a single inline CSI Driver that is allowed to be used. +message AllowedCSIDriver { + // Name is the registered name of the CSI driver + optional string name = 1; +} + // AllowedFlexVolume represents a single Flexvolume that is allowed to be used. // Deprecated: use AllowedFlexVolume from policy API Group instead. message AllowedFlexVolume { @@ -45,7 +50,7 @@ message AllowedHostPath { // pathPrefix is the path prefix that the host volume must match. // It does not support `*`. // Trailing slashes are trimmed when validating the path prefix with a host path. - // + // // Examples: // `/foo` would allow `/foo`, `/foo/` and `/foo/bar` // `/foo` would not allow `/food` or `/etc/foo` @@ -56,42 +61,17 @@ message AllowedHostPath { optional bool readOnly = 2; } -message CustomMetricCurrentStatus { - // Custom Metric name. - optional string name = 1; - - // Custom Metric value (average). - optional k8s.io.apimachinery.pkg.api.resource.Quantity value = 2; -} - -message CustomMetricCurrentStatusList { - repeated CustomMetricCurrentStatus items = 1; -} - -// Alpha-level support for Custom Metrics in HPA (as annotations). -message CustomMetricTarget { - // Custom Metric name. - optional string name = 1; - - // Custom Metric value (average). - optional k8s.io.apimachinery.pkg.api.resource.Quantity value = 2; -} - -message CustomMetricTargetList { - repeated CustomMetricTarget items = 1; -} - // DEPRECATED - This group version of DaemonSet is deprecated by apps/v1beta2/DaemonSet. See the release notes for // more information. // DaemonSet represents the configuration of a daemon set. message DaemonSet { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // The desired behavior of this daemon set. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional DaemonSetSpec spec = 2; @@ -99,7 +79,7 @@ message DaemonSet { // out of date by some window of time. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional DaemonSetStatus status = 3; } @@ -128,7 +108,7 @@ message DaemonSetCondition { // DaemonSetList is a collection of daemon sets. message DaemonSetList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -335,6 +315,8 @@ message DeploymentSpec { // The number of old ReplicaSets to retain to allow rollback. // This is a pointer to distinguish between explicit zero and not specified. + // This is set to the max value of int32 (i.e. 2147483647) by default, which + // means "retaining all old RelicaSets". // +optional optional int32 revisionHistoryLimit = 6; @@ -495,19 +477,20 @@ message IPBlock { // endpoints defined by a backend. An Ingress can be configured to give services // externally-reachable urls, load balance traffic, terminate SSL, offer name // based virtual hosting etc. +// DEPRECATED - This group version of Ingress is deprecated by networking.k8s.io/v1beta1 Ingress. See the release notes for more information. message Ingress { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec is the desired state of the Ingress. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional IngressSpec spec = 2; // Status is the current state of the Ingress. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional IngressStatus status = 3; } @@ -524,7 +507,7 @@ message IngressBackend { // IngressList is a collection of Ingress. message IngressList { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -621,7 +604,7 @@ message IngressTLS { // NetworkPolicy describes what network traffic is allowed for a set of Pods message NetworkPolicy { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -666,7 +649,7 @@ message NetworkPolicyIngressRule { // List of sources which should be able to access the pods selected for this rule. // Items in this list are combined using a logical OR operation. // If this field is empty or missing, this rule matches all sources (traffic not restricted by source). - // If this field is present and contains at least on item, this rule allows traffic only if the + // If this field is present and contains at least one item, this rule allows traffic only if the // traffic matches at least one item in the from list. // +optional repeated NetworkPolicyPeer from = 2; @@ -676,7 +659,7 @@ message NetworkPolicyIngressRule { // Network Policy List is a list of NetworkPolicy objects. message NetworkPolicyList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -688,7 +671,7 @@ message NetworkPolicyList { message NetworkPolicyPeer { // This is a label selector which selects Pods. This field follows standard label // selector semantics; if present but empty, it selects all pods. - // + // // If NamespaceSelector is also set, then the NetworkPolicyPeer as a whole selects // the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. // Otherwise it selects the Pods matching PodSelector in the policy's own Namespace. @@ -697,7 +680,7 @@ message NetworkPolicyPeer { // Selects Namespaces using cluster-scoped labels. This field follows standard label // selector semantics; if present but empty, it selects all namespaces. - // + // // If PodSelector is also set, then the NetworkPolicyPeer as a whole selects // the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. // Otherwise it selects all Pods in the Namespaces selected by NamespaceSelector. @@ -756,7 +739,7 @@ message NetworkPolicySpec { repeated NetworkPolicyEgressRule egress = 3; // List of rule types that the NetworkPolicy relates to. - // Valid options are Ingress, Egress, or Ingress,Egress. + // Valid options are "Ingress", "Egress", or "Ingress,Egress". // If this field is not specified, it will default based on the existence of Ingress or Egress rules; // policies that contain an Egress section are assumed to affect Egress, and all policies // (whether or not they contain an Ingress section) are assumed to affect Ingress. @@ -774,7 +757,7 @@ message NetworkPolicySpec { // Deprecated: use PodSecurityPolicy from policy API Group instead. message PodSecurityPolicy { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -787,7 +770,7 @@ message PodSecurityPolicy { // Deprecated: use PodSecurityPolicyList from policy API Group instead. message PodSecurityPolicyList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -847,6 +830,12 @@ message PodSecurityPolicySpec { // runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set. optional RunAsUserStrategyOptions runAsUser = 11; + // RunAsGroup is the strategy that will dictate the allowable RunAsGroup values that may be set. + // If this field is omitted, the pod's RunAsGroup can take any value. This field requires the + // RunAsGroup feature gate to be enabled. + // +optional + optional RunAsGroupStrategyOptions runAsGroup = 22; + // supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext. optional SupplementalGroupsStrategyOptions supplementalGroups = 12; @@ -882,11 +871,16 @@ message PodSecurityPolicySpec { // +optional repeated AllowedFlexVolume allowedFlexVolumes = 18; + // AllowedCSIDrivers is a whitelist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. + // An empty value indicates that any CSI driver can be used for inline ephemeral volumes. + // +optional + repeated AllowedCSIDriver allowedCSIDrivers = 23; + // allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. // Each entry is either a plain sysctl name or ends in "*" in which case it is considered // as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. // Kubelet has to whitelist all allowed unsafe sysctls explicitly to avoid rejection. - // + // // Examples: // e.g. "foo/*" allows "foo/bar", "foo/baz", etc. // e.g. "foo.*" allows "foo.bar", "foo.baz", etc. @@ -896,7 +890,7 @@ message PodSecurityPolicySpec { // forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. // Each entry is either a plain sysctl name or ends in "*" in which case it is considered // as a prefix of forbidden sysctls. Single * means all sysctls are forbidden. - // + // // Examples: // e.g. "foo/*" forbids "foo/bar", "foo/baz", etc. // e.g. "foo.*" forbids "foo.bar", "foo.baz", etc. @@ -908,6 +902,12 @@ message PodSecurityPolicySpec { // This requires the ProcMountType feature flag to be enabled. // +optional repeated string allowedProcMountTypes = 21; + + // runtimeClass is the strategy that will dictate the allowable RuntimeClasses for a pod. + // If this field is omitted, the pod's runtimeClassName field is unrestricted. + // Enforcement of this field depends on the RuntimeClass feature gate being enabled. + // +optional + optional RuntimeClassStrategyOptions runtimeClass = 24; } // DEPRECATED - This group version of ReplicaSet is deprecated by apps/v1beta2/ReplicaSet. See the release notes for @@ -916,12 +916,12 @@ message PodSecurityPolicySpec { message ReplicaSet { // If the Labels of a ReplicaSet are empty, they are defaulted to // be the same as the Pod(s) that the ReplicaSet manages. - // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the specification of the desired behavior of the ReplicaSet. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional ReplicaSetSpec spec = 2; @@ -929,7 +929,7 @@ message ReplicaSet { // This data may be out of date by some window of time. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional ReplicaSetStatus status = 3; } @@ -958,7 +958,7 @@ message ReplicaSetCondition { // ReplicaSetList is a collection of ReplicaSets. message ReplicaSetList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -1081,11 +1081,23 @@ message RollingUpdateDeployment { // the rolling update starts, such that the total number of old and new pods do not exceed // 130% of desired pods. Once old pods have been killed, // new RC can be scaled up further, ensuring that total number of pods running - // at any time during the update is atmost 130% of desired pods. + // at any time during the update is at most 130% of desired pods. // +optional optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; } +// RunAsGroupStrategyOptions defines the strategy type and any options used to create the strategy. +// Deprecated: use RunAsGroupStrategyOptions from policy API Group instead. +message RunAsGroupStrategyOptions { + // rule is the strategy that will dictate the allowable RunAsGroup values that may be set. + optional string rule = 1; + + // ranges are the allowed ranges of gids that may be used. If you would like to force a single gid + // then supply a single range with the same start and end. Required for MustRunAs. + // +optional + repeated IDRange ranges = 2; +} + // RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy. // Deprecated: use RunAsUserStrategyOptions from policy API Group instead. message RunAsUserStrategyOptions { @@ -1098,6 +1110,21 @@ message RunAsUserStrategyOptions { repeated IDRange ranges = 2; } +// RuntimeClassStrategyOptions define the strategy that will dictate the allowable RuntimeClasses +// for a pod. +message RuntimeClassStrategyOptions { + // allowedRuntimeClassNames is a whitelist of RuntimeClass names that may be specified on a pod. + // A value of "*" means that any RuntimeClass name is allowed, and must be the only item in the + // list. An empty list requires the RuntimeClassName field to be unset. + repeated string allowedRuntimeClassNames = 1; + + // defaultRuntimeClassName is the default RuntimeClassName to set on the pod. + // The default MUST be allowed by the allowedRuntimeClassNames list. + // A value of nil does not mutate the Pod. + // +optional + optional string defaultRuntimeClassName = 2; +} + // SELinuxStrategyOptions defines the strategy type and any options used to create the strategy. // Deprecated: use SELinuxStrategyOptions from policy API Group instead. message SELinuxStrategyOptions { @@ -1112,15 +1139,15 @@ message SELinuxStrategyOptions { // represents a scaling request for a resource. message Scale { - // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. + // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. // +optional optional ScaleSpec spec = 2; - // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only. + // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only. // +optional optional ScaleStatus status = 3; } diff --git a/vendor/k8s.io/api/extensions/v1beta1/types.go b/vendor/k8s.io/api/extensions/v1beta1/types.go index 38e112d1..eb255341 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/types.go +++ b/vendor/k8s.io/api/extensions/v1beta1/types.go @@ -18,8 +18,7 @@ package v1beta1 import ( appsv1beta1 "k8s.io/api/apps/v1beta1" - "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" ) @@ -50,22 +49,20 @@ type ScaleStatus struct { TargetSelector string `json:"targetSelector,omitempty" protobuf:"bytes,3,opt,name=targetSelector"` } -// +genclient -// +genclient:noVerbs // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // represents a scaling request for a resource. type Scale struct { metav1.TypeMeta `json:",inline"` - // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. + // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. // +optional Spec ScaleSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only. + // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only. // +optional Status ScaleStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -77,29 +74,6 @@ type ReplicationControllerDummy struct { metav1.TypeMeta `json:",inline"` } -// Alpha-level support for Custom Metrics in HPA (as annotations). -type CustomMetricTarget struct { - // Custom Metric name. - Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - // Custom Metric value (average). - TargetValue resource.Quantity `json:"value" protobuf:"bytes,2,opt,name=value"` -} - -type CustomMetricTargetList struct { - Items []CustomMetricTarget `json:"items" protobuf:"bytes,1,rep,name=items"` -} - -type CustomMetricCurrentStatus struct { - // Custom Metric name. - Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - // Custom Metric value (average). - CurrentValue resource.Quantity `json:"value" protobuf:"bytes,2,opt,name=value"` -} - -type CustomMetricCurrentStatusList struct { - Items []CustomMetricCurrentStatus `json:"items" protobuf:"bytes,1,rep,name=items"` -} - // +genclient // +genclient:method=GetScale,verb=get,subresource=scale,result=Scale // +genclient:method=UpdateScale,verb=update,subresource=scale,input=Scale,result=Scale @@ -151,6 +125,8 @@ type DeploymentSpec struct { // The number of old ReplicaSets to retain to allow rollback. // This is a pointer to distinguish between explicit zero and not specified. + // This is set to the max value of int32 (i.e. 2147483647) by default, which + // means "retaining all old RelicaSets". // +optional RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,6,opt,name=revisionHistoryLimit"` @@ -253,7 +229,7 @@ type RollingUpdateDeployment struct { // the rolling update starts, such that the total number of old and new pods do not exceed // 130% of desired pods. Once old pods have been killed, // new RC can be scaled up further, ensuring that total number of pods running - // at any time during the update is atmost 130% of desired pods. + // at any time during the update is at most 130% of desired pods. // +optional MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty" protobuf:"bytes,2,opt,name=maxSurge"` } @@ -513,12 +489,12 @@ type DaemonSetCondition struct { type DaemonSet struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // The desired behavior of this daemon set. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec DaemonSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` @@ -526,7 +502,7 @@ type DaemonSet struct { // out of date by some window of time. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Status DaemonSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -550,7 +526,7 @@ const ( type DaemonSetList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -565,20 +541,21 @@ type DaemonSetList struct { // endpoints defined by a backend. An Ingress can be configured to give services // externally-reachable urls, load balance traffic, terminate SSL, offer name // based virtual hosting etc. +// DEPRECATED - This group version of Ingress is deprecated by networking.k8s.io/v1beta1 Ingress. See the release notes for more information. type Ingress struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec is the desired state of the Ingress. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec IngressSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` // Status is the current state of the Ingress. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Status IngressStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -589,7 +566,7 @@ type Ingress struct { type IngressList struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -740,12 +717,12 @@ type ReplicaSet struct { // If the Labels of a ReplicaSet are empty, they are defaulted to // be the same as the Pod(s) that the ReplicaSet manages. - // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec defines the specification of the desired behavior of the ReplicaSet. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec ReplicaSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` @@ -753,7 +730,7 @@ type ReplicaSet struct { // This data may be out of date by some window of time. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Status ReplicaSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -764,7 +741,7 @@ type ReplicaSet struct { type ReplicaSetList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -868,7 +845,7 @@ type ReplicaSetCondition struct { type PodSecurityPolicy struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -918,6 +895,11 @@ type PodSecurityPolicySpec struct { SELinux SELinuxStrategyOptions `json:"seLinux" protobuf:"bytes,10,opt,name=seLinux"` // runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set. RunAsUser RunAsUserStrategyOptions `json:"runAsUser" protobuf:"bytes,11,opt,name=runAsUser"` + // RunAsGroup is the strategy that will dictate the allowable RunAsGroup values that may be set. + // If this field is omitted, the pod's RunAsGroup can take any value. This field requires the + // RunAsGroup feature gate to be enabled. + // +optional + RunAsGroup *RunAsGroupStrategyOptions `json:"runAsGroup,omitempty" protobuf:"bytes,22,opt,name=runAsGroup"` // supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext. SupplementalGroups SupplementalGroupsStrategyOptions `json:"supplementalGroups" protobuf:"bytes,12,opt,name=supplementalGroups"` // fsGroup is the strategy that will dictate what fs group is used by the SecurityContext. @@ -946,6 +928,10 @@ type PodSecurityPolicySpec struct { // is allowed in the "volumes" field. // +optional AllowedFlexVolumes []AllowedFlexVolume `json:"allowedFlexVolumes,omitempty" protobuf:"bytes,18,rep,name=allowedFlexVolumes"` + // AllowedCSIDrivers is a whitelist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. + // An empty value indicates that any CSI driver can be used for inline ephemeral volumes. + // +optional + AllowedCSIDrivers []AllowedCSIDriver `json:"allowedCSIDrivers,omitempty" protobuf:"bytes,23,rep,name=allowedCSIDrivers"` // allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. // Each entry is either a plain sysctl name or ends in "*" in which case it is considered // as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. @@ -970,6 +956,11 @@ type PodSecurityPolicySpec struct { // This requires the ProcMountType feature flag to be enabled. // +optional AllowedProcMountTypes []v1.ProcMountType `json:"allowedProcMountTypes,omitempty" protobuf:"bytes,21,opt,name=allowedProcMountTypes"` + // runtimeClass is the strategy that will dictate the allowable RuntimeClasses for a pod. + // If this field is omitted, the pod's runtimeClassName field is unrestricted. + // Enforcement of this field depends on the RuntimeClass feature gate being enabled. + // +optional + RuntimeClass *RuntimeClassStrategyOptions `json:"runtimeClass,omitempty" protobuf:"bytes,24,opt,name=runtimeClass"` } // AllowedHostPath defines the host volume conditions that will be enabled by a policy @@ -994,7 +985,7 @@ type AllowedHostPath struct { // Deprecated: use FSType from policy API Group instead. type FSType string -var ( +const ( AzureFile FSType = "azureFile" Flocker FSType = "flocker" FlexVolume FSType = "flexVolume" @@ -1016,6 +1007,7 @@ var ( ConfigMap FSType = "configMap" Quobyte FSType = "quobyte" AzureDisk FSType = "azureDisk" + CSI FSType = "csi" All FSType = "*" ) @@ -1026,6 +1018,12 @@ type AllowedFlexVolume struct { Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"` } +// AllowedCSIDriver represents a single inline CSI Driver that is allowed to be used. +type AllowedCSIDriver struct { + // Name is the registered name of the CSI driver + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` +} + // HostPortRange defines a range of host ports that will be enabled by a policy // for pods to use. It requires both the start and end to be defined. // Deprecated: use HostPortRange from policy API Group instead. @@ -1072,6 +1070,17 @@ type RunAsUserStrategyOptions struct { Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` } +// RunAsGroupStrategyOptions defines the strategy type and any options used to create the strategy. +// Deprecated: use RunAsGroupStrategyOptions from policy API Group instead. +type RunAsGroupStrategyOptions struct { + // rule is the strategy that will dictate the allowable RunAsGroup values that may be set. + Rule RunAsGroupStrategy `json:"rule" protobuf:"bytes,1,opt,name=rule,casttype=RunAsGroupStrategy"` + // ranges are the allowed ranges of gids that may be used. If you would like to force a single gid + // then supply a single range with the same start and end. Required for MustRunAs. + // +optional + Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` +} + // IDRange provides a min/max of an allowed range of IDs. // Deprecated: use IDRange from policy API Group instead. type IDRange struct { @@ -1098,6 +1107,23 @@ const ( RunAsUserStrategyRunAsAny RunAsUserStrategy = "RunAsAny" ) +// RunAsGroupStrategy denotes strategy types for generating RunAsGroup values for a +// Security Context. +// Deprecated: use RunAsGroupStrategy from policy API Group instead. +type RunAsGroupStrategy string + +const ( + // RunAsGroupStrategyMayRunAs means that container does not need to run with a particular gid. + // However, when RunAsGroup are specified, they have to fall in the defined range. + RunAsGroupStrategyMayRunAs RunAsGroupStrategy = "MayRunAs" + // RunAsGroupStrategyMustRunAs means that container must run as a particular gid. + // Deprecated: use RunAsGroupStrategyMustRunAs from policy API Group instead. + RunAsGroupStrategyMustRunAs RunAsGroupStrategy = "MustRunAs" + // RunAsGroupStrategyRunAsAny means that container may make requests for any gid. + // Deprecated: use RunAsGroupStrategyRunAsAny from policy API Group instead. + RunAsGroupStrategyRunAsAny RunAsGroupStrategy = "RunAsAny" +) + // FSGroupStrategyOptions defines the strategy type and options used to create the strategy. // Deprecated: use FSGroupStrategyOptions from policy API Group instead. type FSGroupStrategyOptions struct { @@ -1150,6 +1176,25 @@ const ( SupplementalGroupsStrategyRunAsAny SupplementalGroupsStrategyType = "RunAsAny" ) +// RuntimeClassStrategyOptions define the strategy that will dictate the allowable RuntimeClasses +// for a pod. +type RuntimeClassStrategyOptions struct { + // allowedRuntimeClassNames is a whitelist of RuntimeClass names that may be specified on a pod. + // A value of "*" means that any RuntimeClass name is allowed, and must be the only item in the + // list. An empty list requires the RuntimeClassName field to be unset. + AllowedRuntimeClassNames []string `json:"allowedRuntimeClassNames" protobuf:"bytes,1,rep,name=allowedRuntimeClassNames"` + // defaultRuntimeClassName is the default RuntimeClassName to set on the pod. + // The default MUST be allowed by the allowedRuntimeClassNames list. + // A value of nil does not mutate the Pod. + // +optional + DefaultRuntimeClassName *string `json:"defaultRuntimeClassName,omitempty" protobuf:"bytes,2,opt,name=defaultRuntimeClassName"` +} + +// AllowAllRuntimeClassNames can be used as a value for the +// RuntimeClassStrategyOptions.AllowedRuntimeClassNames field and means that any RuntimeClassName is +// allowed. +const AllowAllRuntimeClassNames = "*" + // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // PodSecurityPolicyList is a list of PodSecurityPolicy objects. @@ -1157,7 +1202,7 @@ const ( type PodSecurityPolicyList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -1165,6 +1210,7 @@ type PodSecurityPolicyList struct { Items []PodSecurityPolicy `json:"items" protobuf:"bytes,2,rep,name=items"` } +// +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // DEPRECATED 1.9 - This group version of NetworkPolicy is deprecated by networking/v1/NetworkPolicy. @@ -1172,7 +1218,7 @@ type PodSecurityPolicyList struct { type NetworkPolicy struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -1223,7 +1269,7 @@ type NetworkPolicySpec struct { Egress []NetworkPolicyEgressRule `json:"egress,omitempty" protobuf:"bytes,3,rep,name=egress"` // List of rule types that the NetworkPolicy relates to. - // Valid options are Ingress, Egress, or Ingress,Egress. + // Valid options are "Ingress", "Egress", or "Ingress,Egress". // If this field is not specified, it will default based on the existence of Ingress or Egress rules; // policies that contain an Egress section are assumed to affect Egress, and all policies // (whether or not they contain an Ingress section) are assumed to affect Ingress. @@ -1250,7 +1296,7 @@ type NetworkPolicyIngressRule struct { // List of sources which should be able to access the pods selected for this rule. // Items in this list are combined using a logical OR operation. // If this field is empty or missing, this rule matches all sources (traffic not restricted by source). - // If this field is present and contains at least on item, this rule allows traffic only if the + // If this field is present and contains at least one item, this rule allows traffic only if the // traffic matches at least one item in the from list. // +optional From []NetworkPolicyPeer `json:"from,omitempty" protobuf:"bytes,2,rep,name=from"` @@ -1342,7 +1388,7 @@ type NetworkPolicyPeer struct { type NetworkPolicyList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` diff --git a/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go index cdbc490a..a7eb2ec9 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go @@ -27,6 +27,15 @@ package v1beta1 // Those methods can be generated by using hack/update-generated-swagger-docs.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_AllowedCSIDriver = map[string]string{ + "": "AllowedCSIDriver represents a single inline CSI Driver that is allowed to be used.", + "name": "Name is the registered name of the CSI driver", +} + +func (AllowedCSIDriver) SwaggerDoc() map[string]string { + return map_AllowedCSIDriver +} + var map_AllowedFlexVolume = map[string]string{ "": "AllowedFlexVolume represents a single Flexvolume that is allowed to be used. Deprecated: use AllowedFlexVolume from policy API Group instead.", "driver": "driver is the name of the Flexvolume driver.", @@ -46,30 +55,11 @@ func (AllowedHostPath) SwaggerDoc() map[string]string { return map_AllowedHostPath } -var map_CustomMetricCurrentStatus = map[string]string{ - "name": "Custom Metric name.", - "value": "Custom Metric value (average).", -} - -func (CustomMetricCurrentStatus) SwaggerDoc() map[string]string { - return map_CustomMetricCurrentStatus -} - -var map_CustomMetricTarget = map[string]string{ - "": "Alpha-level support for Custom Metrics in HPA (as annotations).", - "name": "Custom Metric name.", - "value": "Custom Metric value (average).", -} - -func (CustomMetricTarget) SwaggerDoc() map[string]string { - return map_CustomMetricTarget -} - var map_DaemonSet = map[string]string{ "": "DEPRECATED - This group version of DaemonSet is deprecated by apps/v1beta2/DaemonSet. See the release notes for more information. DaemonSet represents the configuration of a daemon set.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "The desired behavior of this daemon set. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - "status": "The current status of this daemon set. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "The desired behavior of this daemon set. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "The current status of this daemon set. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (DaemonSet) SwaggerDoc() map[string]string { @@ -91,7 +81,7 @@ func (DaemonSetCondition) SwaggerDoc() map[string]string { var map_DaemonSetList = map[string]string{ "": "DaemonSetList is a collection of daemon sets.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "A list of daemon sets.", } @@ -114,7 +104,7 @@ func (DaemonSetSpec) SwaggerDoc() map[string]string { } var map_DaemonSetStatus = map[string]string{ - "": "DaemonSetStatus represents the current status of a daemon set.", + "": "DaemonSetStatus represents the current status of a daemon set.", "currentNumberScheduled": "The number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", "numberMisscheduled": "The number of nodes that are running the daemon pod, but are not supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", "desiredNumberScheduled": "The total number of nodes that should be running the daemon pod (including nodes correctly running the daemon pod). More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", @@ -193,7 +183,7 @@ var map_DeploymentSpec = map[string]string{ "template": "Template describes the pods that will be created.", "strategy": "The deployment strategy to use to replace existing pods with new ones.", "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", - "revisionHistoryLimit": "The number of old ReplicaSets to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified.", + "revisionHistoryLimit": "The number of old ReplicaSets to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. This is set to the max value of int32 (i.e. 2147483647) by default, which means \"retaining all old RelicaSets\".", "paused": "Indicates that the deployment is paused and will not be processed by the deployment controller.", "rollbackTo": "DEPRECATED. The config this deployment is rolling back to. Will be cleared after rollback is done.", "progressDeadlineSeconds": "The maximum time in seconds for a deployment to make progress before it is considered to be failed. The deployment controller will continue to process failed deployments and a condition with a ProgressDeadlineExceeded reason will be surfaced in the deployment status. Note that progress will not be estimated during the time a deployment is paused. This is set to the max value of int32 (i.e. 2147483647) by default, which means \"no deadline\".", @@ -289,10 +279,10 @@ func (IPBlock) SwaggerDoc() map[string]string { } var map_Ingress = map[string]string{ - "": "Ingress is a collection of rules that allow inbound connections to reach the endpoints defined by a backend. An Ingress can be configured to give services externally-reachable urls, load balance traffic, terminate SSL, offer name based virtual hosting etc.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Spec is the desired state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - "status": "Status is the current state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "": "Ingress is a collection of rules that allow inbound connections to reach the endpoints defined by a backend. An Ingress can be configured to give services externally-reachable urls, load balance traffic, terminate SSL, offer name based virtual hosting etc. DEPRECATED - This group version of Ingress is deprecated by networking.k8s.io/v1beta1 Ingress. See the release notes for more information.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Spec is the desired state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "Status is the current state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (Ingress) SwaggerDoc() map[string]string { @@ -311,7 +301,7 @@ func (IngressBackend) SwaggerDoc() map[string]string { var map_IngressList = map[string]string{ "": "IngressList is a collection of Ingress.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "Items is the list of Ingress.", } @@ -368,7 +358,7 @@ func (IngressTLS) SwaggerDoc() map[string]string { var map_NetworkPolicy = map[string]string{ "": "DEPRECATED 1.9 - This group version of NetworkPolicy is deprecated by networking/v1/NetworkPolicy. NetworkPolicy describes what network traffic is allowed for a set of Pods", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "spec": "Specification of the desired behavior for this NetworkPolicy.", } @@ -389,7 +379,7 @@ func (NetworkPolicyEgressRule) SwaggerDoc() map[string]string { var map_NetworkPolicyIngressRule = map[string]string{ "": "DEPRECATED 1.9 - This group version of NetworkPolicyIngressRule is deprecated by networking/v1/NetworkPolicyIngressRule. This NetworkPolicyIngressRule matches traffic if and only if the traffic matches both ports AND from.", "ports": "List of ports which should be made accessible on the pods selected for this rule. Each item in this list is combined using a logical OR. If this field is empty or missing, this rule matches all ports (traffic not restricted by port). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list.", - "from": "List of sources which should be able to access the pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all sources (traffic not restricted by source). If this field is present and contains at least on item, this rule allows traffic only if the traffic matches at least one item in the from list.", + "from": "List of sources which should be able to access the pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all sources (traffic not restricted by source). If this field is present and contains at least one item, this rule allows traffic only if the traffic matches at least one item in the from list.", } func (NetworkPolicyIngressRule) SwaggerDoc() map[string]string { @@ -398,7 +388,7 @@ func (NetworkPolicyIngressRule) SwaggerDoc() map[string]string { var map_NetworkPolicyList = map[string]string{ "": "DEPRECATED 1.9 - This group version of NetworkPolicyList is deprecated by networking/v1/NetworkPolicyList. Network Policy List is a list of NetworkPolicy objects.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "Items is a list of schema objects.", } @@ -432,7 +422,7 @@ var map_NetworkPolicySpec = map[string]string{ "podSelector": "Selects the pods to which this NetworkPolicy object applies. The array of ingress rules is applied to any pods selected by this field. Multiple network policies can select the same set of pods. In this case, the ingress rules for each are combined additively. This field is NOT optional and follows standard label selector semantics. An empty podSelector matches all pods in this namespace.", "ingress": "List of ingress rules to be applied to the selected pods. Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod OR if the traffic source is the pod's local node, OR if the traffic matches at least one ingress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy does not allow any traffic (and serves solely to ensure that the pods it selects are isolated by default).", "egress": "List of egress rules to be applied to the selected pods. Outgoing traffic is allowed if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic matches at least one egress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy limits all outgoing traffic (and serves solely to ensure that the pods it selects are isolated by default). This field is beta-level in 1.8", - "policyTypes": "List of rule types that the NetworkPolicy relates to. Valid options are Ingress, Egress, or Ingress,Egress. If this field is not specified, it will default based on the existence of Ingress or Egress rules; policies that contain an Egress section are assumed to affect Egress, and all policies (whether or not they contain an Ingress section) are assumed to affect Ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ \"Egress\" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include \"Egress\" (since such a policy would not include an Egress section and would otherwise default to just [ \"Ingress\" ]). This field is beta-level in 1.8", + "policyTypes": "List of rule types that the NetworkPolicy relates to. Valid options are \"Ingress\", \"Egress\", or \"Ingress,Egress\". If this field is not specified, it will default based on the existence of Ingress or Egress rules; policies that contain an Egress section are assumed to affect Egress, and all policies (whether or not they contain an Ingress section) are assumed to affect Ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ \"Egress\" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include \"Egress\" (since such a policy would not include an Egress section and would otherwise default to just [ \"Ingress\" ]). This field is beta-level in 1.8", } func (NetworkPolicySpec) SwaggerDoc() map[string]string { @@ -441,7 +431,7 @@ func (NetworkPolicySpec) SwaggerDoc() map[string]string { var map_PodSecurityPolicy = map[string]string{ "": "PodSecurityPolicy governs the ability to make requests that affect the Security Context that will be applied to a pod and container. Deprecated: use PodSecurityPolicy from policy API Group instead.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "spec": "spec defines the policy enforced.", } @@ -451,7 +441,7 @@ func (PodSecurityPolicy) SwaggerDoc() map[string]string { var map_PodSecurityPolicyList = map[string]string{ "": "PodSecurityPolicyList is a list of PodSecurityPolicy objects. Deprecated: use PodSecurityPolicyList from policy API Group instead.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "items is a list of schema objects.", } @@ -472,6 +462,7 @@ var map_PodSecurityPolicySpec = map[string]string{ "hostIPC": "hostIPC determines if the policy allows the use of HostIPC in the pod spec.", "seLinux": "seLinux is the strategy that will dictate the allowable labels that may be set.", "runAsUser": "runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set.", + "runAsGroup": "RunAsGroup is the strategy that will dictate the allowable RunAsGroup values that may be set. If this field is omitted, the pod's RunAsGroup can take any value. This field requires the RunAsGroup feature gate to be enabled.", "supplementalGroups": "supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext.", "fsGroup": "fsGroup is the strategy that will dictate what fs group is used by the SecurityContext.", "readOnlyRootFilesystem": "readOnlyRootFilesystem when set to true will force containers to run with a read only root file system. If the container specifically requests to run with a non-read only root file system the PSP should deny the pod. If set to false the container may run with a read only root file system if it wishes but it will not be forced to.", @@ -479,9 +470,11 @@ var map_PodSecurityPolicySpec = map[string]string{ "allowPrivilegeEscalation": "allowPrivilegeEscalation determines if a pod can request to allow privilege escalation. If unspecified, defaults to true.", "allowedHostPaths": "allowedHostPaths is a white list of allowed host paths. Empty indicates that all host paths may be used.", "allowedFlexVolumes": "allowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes is allowed in the \"volumes\" field.", + "allowedCSIDrivers": "AllowedCSIDrivers is a whitelist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. An empty value indicates that any CSI driver can be used for inline ephemeral volumes.", "allowedUnsafeSysctls": "allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. Kubelet has to whitelist all allowed unsafe sysctls explicitly to avoid rejection.\n\nExamples: e.g. \"foo/*\" allows \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" allows \"foo.bar\", \"foo.baz\", etc.", "forbiddenSysctls": "forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of forbidden sysctls. Single * means all sysctls are forbidden.\n\nExamples: e.g. \"foo/*\" forbids \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" forbids \"foo.bar\", \"foo.baz\", etc.", "allowedProcMountTypes": "AllowedProcMountTypes is a whitelist of allowed ProcMountTypes. Empty or nil indicates that only the DefaultProcMountType may be used. This requires the ProcMountType feature flag to be enabled.", + "runtimeClass": "runtimeClass is the strategy that will dictate the allowable RuntimeClasses for a pod. If this field is omitted, the pod's runtimeClassName field is unrestricted. Enforcement of this field depends on the RuntimeClass feature gate being enabled.", } func (PodSecurityPolicySpec) SwaggerDoc() map[string]string { @@ -490,9 +483,9 @@ func (PodSecurityPolicySpec) SwaggerDoc() map[string]string { var map_ReplicaSet = map[string]string{ "": "DEPRECATED - This group version of ReplicaSet is deprecated by apps/v1beta2/ReplicaSet. See the release notes for more information. ReplicaSet ensures that a specified number of pod replicas are running at any given time.", - "metadata": "If the Labels of a ReplicaSet are empty, they are defaulted to be the same as the Pod(s) that the ReplicaSet manages. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Spec defines the specification of the desired behavior of the ReplicaSet. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - "status": "Status is the most recently observed status of the ReplicaSet. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "If the Labels of a ReplicaSet are empty, they are defaulted to be the same as the Pod(s) that the ReplicaSet manages. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Spec defines the specification of the desired behavior of the ReplicaSet. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "Status is the most recently observed status of the ReplicaSet. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (ReplicaSet) SwaggerDoc() map[string]string { @@ -514,7 +507,7 @@ func (ReplicaSetCondition) SwaggerDoc() map[string]string { var map_ReplicaSetList = map[string]string{ "": "ReplicaSetList is a collection of ReplicaSets.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "items": "List of ReplicaSets. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller", } @@ -577,13 +570,23 @@ func (RollingUpdateDaemonSet) SwaggerDoc() map[string]string { var map_RollingUpdateDeployment = map[string]string{ "": "Spec to control the desired behavior of rolling update.", "maxUnavailable": "The maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). Absolute number is calculated from percentage by rounding down. This can not be 0 if MaxSurge is 0. By default, a fixed value of 1 is used. Example: when this is set to 30%, the old RC can be scaled down to 70% of desired pods immediately when the rolling update starts. Once new pods are ready, old RC can be scaled down further, followed by scaling up the new RC, ensuring that the total number of pods available at all times during the update is at least 70% of desired pods.", - "maxSurge": "The maximum number of pods that can be scheduled above the desired number of pods. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up. By default, a value of 1 is used. Example: when this is set to 30%, the new RC can be scaled up immediately when the rolling update starts, such that the total number of old and new pods do not exceed 130% of desired pods. Once old pods have been killed, new RC can be scaled up further, ensuring that total number of pods running at any time during the update is atmost 130% of desired pods.", + "maxSurge": "The maximum number of pods that can be scheduled above the desired number of pods. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up. By default, a value of 1 is used. Example: when this is set to 30%, the new RC can be scaled up immediately when the rolling update starts, such that the total number of old and new pods do not exceed 130% of desired pods. Once old pods have been killed, new RC can be scaled up further, ensuring that total number of pods running at any time during the update is at most 130% of desired pods.", } func (RollingUpdateDeployment) SwaggerDoc() map[string]string { return map_RollingUpdateDeployment } +var map_RunAsGroupStrategyOptions = map[string]string{ + "": "RunAsGroupStrategyOptions defines the strategy type and any options used to create the strategy. Deprecated: use RunAsGroupStrategyOptions from policy API Group instead.", + "rule": "rule is the strategy that will dictate the allowable RunAsGroup values that may be set.", + "ranges": "ranges are the allowed ranges of gids that may be used. If you would like to force a single gid then supply a single range with the same start and end. Required for MustRunAs.", +} + +func (RunAsGroupStrategyOptions) SwaggerDoc() map[string]string { + return map_RunAsGroupStrategyOptions +} + var map_RunAsUserStrategyOptions = map[string]string{ "": "RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy. Deprecated: use RunAsUserStrategyOptions from policy API Group instead.", "rule": "rule is the strategy that will dictate the allowable RunAsUser values that may be set.", @@ -594,6 +597,16 @@ func (RunAsUserStrategyOptions) SwaggerDoc() map[string]string { return map_RunAsUserStrategyOptions } +var map_RuntimeClassStrategyOptions = map[string]string{ + "": "RuntimeClassStrategyOptions define the strategy that will dictate the allowable RuntimeClasses for a pod.", + "allowedRuntimeClassNames": "allowedRuntimeClassNames is a whitelist of RuntimeClass names that may be specified on a pod. A value of \"*\" means that any RuntimeClass name is allowed, and must be the only item in the list. An empty list requires the RuntimeClassName field to be unset.", + "defaultRuntimeClassName": "defaultRuntimeClassName is the default RuntimeClassName to set on the pod. The default MUST be allowed by the allowedRuntimeClassNames list. A value of nil does not mutate the Pod.", +} + +func (RuntimeClassStrategyOptions) SwaggerDoc() map[string]string { + return map_RuntimeClassStrategyOptions +} + var map_SELinuxStrategyOptions = map[string]string{ "": "SELinuxStrategyOptions defines the strategy type and any options used to create the strategy. Deprecated: use SELinuxStrategyOptions from policy API Group instead.", "rule": "rule is the strategy that will dictate the allowable labels that may be set.", @@ -606,9 +619,9 @@ func (SELinuxStrategyOptions) SwaggerDoc() map[string]string { var map_Scale = map[string]string{ "": "represents a scaling request for a resource.", - "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.", - "spec": "defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.", - "status": "current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only.", + "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.", + "spec": "defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.", + "status": "current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only.", } func (Scale) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go index 65801c23..cb610179 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go @@ -28,113 +28,49 @@ import ( ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AllowedFlexVolume) DeepCopyInto(out *AllowedFlexVolume) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedFlexVolume. -func (in *AllowedFlexVolume) DeepCopy() *AllowedFlexVolume { - if in == nil { - return nil - } - out := new(AllowedFlexVolume) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AllowedHostPath) DeepCopyInto(out *AllowedHostPath) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedHostPath. -func (in *AllowedHostPath) DeepCopy() *AllowedHostPath { - if in == nil { - return nil - } - out := new(AllowedHostPath) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CustomMetricCurrentStatus) DeepCopyInto(out *CustomMetricCurrentStatus) { - *out = *in - out.CurrentValue = in.CurrentValue.DeepCopy() - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomMetricCurrentStatus. -func (in *CustomMetricCurrentStatus) DeepCopy() *CustomMetricCurrentStatus { - if in == nil { - return nil - } - out := new(CustomMetricCurrentStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CustomMetricCurrentStatusList) DeepCopyInto(out *CustomMetricCurrentStatusList) { +func (in *AllowedCSIDriver) DeepCopyInto(out *AllowedCSIDriver) { *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]CustomMetricCurrentStatus, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomMetricCurrentStatusList. -func (in *CustomMetricCurrentStatusList) DeepCopy() *CustomMetricCurrentStatusList { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedCSIDriver. +func (in *AllowedCSIDriver) DeepCopy() *AllowedCSIDriver { if in == nil { return nil } - out := new(CustomMetricCurrentStatusList) + out := new(AllowedCSIDriver) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CustomMetricTarget) DeepCopyInto(out *CustomMetricTarget) { +func (in *AllowedFlexVolume) DeepCopyInto(out *AllowedFlexVolume) { *out = *in - out.TargetValue = in.TargetValue.DeepCopy() return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomMetricTarget. -func (in *CustomMetricTarget) DeepCopy() *CustomMetricTarget { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedFlexVolume. +func (in *AllowedFlexVolume) DeepCopy() *AllowedFlexVolume { if in == nil { return nil } - out := new(CustomMetricTarget) + out := new(AllowedFlexVolume) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CustomMetricTargetList) DeepCopyInto(out *CustomMetricTargetList) { +func (in *AllowedHostPath) DeepCopyInto(out *AllowedHostPath) { *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]CustomMetricTarget, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomMetricTargetList. -func (in *CustomMetricTargetList) DeepCopy() *CustomMetricTargetList { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedHostPath. +func (in *AllowedHostPath) DeepCopy() *AllowedHostPath { if in == nil { return nil } - out := new(CustomMetricTargetList) + out := new(AllowedHostPath) in.DeepCopyInto(out) return out } @@ -188,7 +124,7 @@ func (in *DaemonSetCondition) DeepCopy() *DaemonSetCondition { func (in *DaemonSetList) DeepCopyInto(out *DaemonSetList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]DaemonSet, len(*in)) @@ -344,7 +280,7 @@ func (in *DeploymentCondition) DeepCopy() *DeploymentCondition { func (in *DeploymentList) DeepCopyInto(out *DeploymentList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Deployment, len(*in)) @@ -659,7 +595,7 @@ func (in *IngressBackend) DeepCopy() *IngressBackend { func (in *IngressList) DeepCopyInto(out *IngressList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Ingress, len(*in)) @@ -890,7 +826,7 @@ func (in *NetworkPolicyIngressRule) DeepCopy() *NetworkPolicyIngressRule { func (in *NetworkPolicyList) DeepCopyInto(out *NetworkPolicyList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]NetworkPolicy, len(*in)) @@ -1043,7 +979,7 @@ func (in *PodSecurityPolicy) DeepCopyObject() runtime.Object { func (in *PodSecurityPolicyList) DeepCopyInto(out *PodSecurityPolicyList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]PodSecurityPolicy, len(*in)) @@ -1102,6 +1038,11 @@ func (in *PodSecurityPolicySpec) DeepCopyInto(out *PodSecurityPolicySpec) { } in.SELinux.DeepCopyInto(&out.SELinux) in.RunAsUser.DeepCopyInto(&out.RunAsUser) + if in.RunAsGroup != nil { + in, out := &in.RunAsGroup, &out.RunAsGroup + *out = new(RunAsGroupStrategyOptions) + (*in).DeepCopyInto(*out) + } in.SupplementalGroups.DeepCopyInto(&out.SupplementalGroups) in.FSGroup.DeepCopyInto(&out.FSGroup) if in.DefaultAllowPrivilegeEscalation != nil { @@ -1124,6 +1065,11 @@ func (in *PodSecurityPolicySpec) DeepCopyInto(out *PodSecurityPolicySpec) { *out = make([]AllowedFlexVolume, len(*in)) copy(*out, *in) } + if in.AllowedCSIDrivers != nil { + in, out := &in.AllowedCSIDrivers, &out.AllowedCSIDrivers + *out = make([]AllowedCSIDriver, len(*in)) + copy(*out, *in) + } if in.AllowedUnsafeSysctls != nil { in, out := &in.AllowedUnsafeSysctls, &out.AllowedUnsafeSysctls *out = make([]string, len(*in)) @@ -1139,6 +1085,11 @@ func (in *PodSecurityPolicySpec) DeepCopyInto(out *PodSecurityPolicySpec) { *out = make([]corev1.ProcMountType, len(*in)) copy(*out, *in) } + if in.RuntimeClass != nil { + in, out := &in.RuntimeClass, &out.RuntimeClass + *out = new(RuntimeClassStrategyOptions) + (*in).DeepCopyInto(*out) + } return } @@ -1201,7 +1152,7 @@ func (in *ReplicaSetCondition) DeepCopy() *ReplicaSetCondition { func (in *ReplicaSetList) DeepCopyInto(out *ReplicaSetList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ReplicaSet, len(*in)) @@ -1368,6 +1319,27 @@ func (in *RollingUpdateDeployment) DeepCopy() *RollingUpdateDeployment { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RunAsGroupStrategyOptions) DeepCopyInto(out *RunAsGroupStrategyOptions) { + *out = *in + if in.Ranges != nil { + in, out := &in.Ranges, &out.Ranges + *out = make([]IDRange, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunAsGroupStrategyOptions. +func (in *RunAsGroupStrategyOptions) DeepCopy() *RunAsGroupStrategyOptions { + if in == nil { + return nil + } + out := new(RunAsGroupStrategyOptions) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RunAsUserStrategyOptions) DeepCopyInto(out *RunAsUserStrategyOptions) { *out = *in @@ -1389,6 +1361,32 @@ func (in *RunAsUserStrategyOptions) DeepCopy() *RunAsUserStrategyOptions { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuntimeClassStrategyOptions) DeepCopyInto(out *RuntimeClassStrategyOptions) { + *out = *in + if in.AllowedRuntimeClassNames != nil { + in, out := &in.AllowedRuntimeClassNames, &out.AllowedRuntimeClassNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.DefaultRuntimeClassName != nil { + in, out := &in.DefaultRuntimeClassName, &out.DefaultRuntimeClassName + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuntimeClassStrategyOptions. +func (in *RuntimeClassStrategyOptions) DeepCopy() *RuntimeClassStrategyOptions { + if in == nil { + return nil + } + out := new(RuntimeClassStrategyOptions) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SELinuxStrategyOptions) DeepCopyInto(out *SELinuxStrategyOptions) { *out = *in diff --git a/vendor/k8s.io/api/flowcontrol/v1alpha1/doc.go b/vendor/k8s.io/api/flowcontrol/v1alpha1/doc.go new file mode 100644 index 00000000..31b8b5d5 --- /dev/null +++ b/vendor/k8s.io/api/flowcontrol/v1alpha1/doc.go @@ -0,0 +1,24 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:openapi-gen=true + +// +groupName=flowcontrol.apiserver.k8s.io + +// Package v1alpha1 holds api types of version v1alpha1 for group "flowcontrol.apiserver.k8s.io". +package v1alpha1 // import "k8s.io/api/flowcontrol/v1alpha1" diff --git a/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.pb.go b/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.pb.go new file mode 100644 index 00000000..d44ec3c9 --- /dev/null +++ b/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.pb.go @@ -0,0 +1,5459 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.proto + +package v1alpha1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +func (m *FlowDistinguisherMethod) Reset() { *m = FlowDistinguisherMethod{} } +func (*FlowDistinguisherMethod) ProtoMessage() {} +func (*FlowDistinguisherMethod) Descriptor() ([]byte, []int) { + return fileDescriptor_45ba024d525b289b, []int{0} +} +func (m *FlowDistinguisherMethod) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FlowDistinguisherMethod) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FlowDistinguisherMethod) XXX_Merge(src proto.Message) { + xxx_messageInfo_FlowDistinguisherMethod.Merge(m, src) +} +func (m *FlowDistinguisherMethod) XXX_Size() int { + return m.Size() +} +func (m *FlowDistinguisherMethod) XXX_DiscardUnknown() { + xxx_messageInfo_FlowDistinguisherMethod.DiscardUnknown(m) +} + +var xxx_messageInfo_FlowDistinguisherMethod proto.InternalMessageInfo + +func (m *FlowSchema) Reset() { *m = FlowSchema{} } +func (*FlowSchema) ProtoMessage() {} +func (*FlowSchema) Descriptor() ([]byte, []int) { + return fileDescriptor_45ba024d525b289b, []int{1} +} +func (m *FlowSchema) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FlowSchema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FlowSchema) XXX_Merge(src proto.Message) { + xxx_messageInfo_FlowSchema.Merge(m, src) +} +func (m *FlowSchema) XXX_Size() int { + return m.Size() +} +func (m *FlowSchema) XXX_DiscardUnknown() { + xxx_messageInfo_FlowSchema.DiscardUnknown(m) +} + +var xxx_messageInfo_FlowSchema proto.InternalMessageInfo + +func (m *FlowSchemaCondition) Reset() { *m = FlowSchemaCondition{} } +func (*FlowSchemaCondition) ProtoMessage() {} +func (*FlowSchemaCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_45ba024d525b289b, []int{2} +} +func (m *FlowSchemaCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FlowSchemaCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FlowSchemaCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_FlowSchemaCondition.Merge(m, src) +} +func (m *FlowSchemaCondition) XXX_Size() int { + return m.Size() +} +func (m *FlowSchemaCondition) XXX_DiscardUnknown() { + xxx_messageInfo_FlowSchemaCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_FlowSchemaCondition proto.InternalMessageInfo + +func (m *FlowSchemaList) Reset() { *m = FlowSchemaList{} } +func (*FlowSchemaList) ProtoMessage() {} +func (*FlowSchemaList) Descriptor() ([]byte, []int) { + return fileDescriptor_45ba024d525b289b, []int{3} +} +func (m *FlowSchemaList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FlowSchemaList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FlowSchemaList) XXX_Merge(src proto.Message) { + xxx_messageInfo_FlowSchemaList.Merge(m, src) +} +func (m *FlowSchemaList) XXX_Size() int { + return m.Size() +} +func (m *FlowSchemaList) XXX_DiscardUnknown() { + xxx_messageInfo_FlowSchemaList.DiscardUnknown(m) +} + +var xxx_messageInfo_FlowSchemaList proto.InternalMessageInfo + +func (m *FlowSchemaSpec) Reset() { *m = FlowSchemaSpec{} } +func (*FlowSchemaSpec) ProtoMessage() {} +func (*FlowSchemaSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_45ba024d525b289b, []int{4} +} +func (m *FlowSchemaSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FlowSchemaSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FlowSchemaSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_FlowSchemaSpec.Merge(m, src) +} +func (m *FlowSchemaSpec) XXX_Size() int { + return m.Size() +} +func (m *FlowSchemaSpec) XXX_DiscardUnknown() { + xxx_messageInfo_FlowSchemaSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_FlowSchemaSpec proto.InternalMessageInfo + +func (m *FlowSchemaStatus) Reset() { *m = FlowSchemaStatus{} } +func (*FlowSchemaStatus) ProtoMessage() {} +func (*FlowSchemaStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_45ba024d525b289b, []int{5} +} +func (m *FlowSchemaStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FlowSchemaStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FlowSchemaStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_FlowSchemaStatus.Merge(m, src) +} +func (m *FlowSchemaStatus) XXX_Size() int { + return m.Size() +} +func (m *FlowSchemaStatus) XXX_DiscardUnknown() { + xxx_messageInfo_FlowSchemaStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_FlowSchemaStatus proto.InternalMessageInfo + +func (m *GroupSubject) Reset() { *m = GroupSubject{} } +func (*GroupSubject) ProtoMessage() {} +func (*GroupSubject) Descriptor() ([]byte, []int) { + return fileDescriptor_45ba024d525b289b, []int{6} +} +func (m *GroupSubject) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GroupSubject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GroupSubject) XXX_Merge(src proto.Message) { + xxx_messageInfo_GroupSubject.Merge(m, src) +} +func (m *GroupSubject) XXX_Size() int { + return m.Size() +} +func (m *GroupSubject) XXX_DiscardUnknown() { + xxx_messageInfo_GroupSubject.DiscardUnknown(m) +} + +var xxx_messageInfo_GroupSubject proto.InternalMessageInfo + +func (m *LimitResponse) Reset() { *m = LimitResponse{} } +func (*LimitResponse) ProtoMessage() {} +func (*LimitResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_45ba024d525b289b, []int{7} +} +func (m *LimitResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LimitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LimitResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_LimitResponse.Merge(m, src) +} +func (m *LimitResponse) XXX_Size() int { + return m.Size() +} +func (m *LimitResponse) XXX_DiscardUnknown() { + xxx_messageInfo_LimitResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_LimitResponse proto.InternalMessageInfo + +func (m *LimitedPriorityLevelConfiguration) Reset() { *m = LimitedPriorityLevelConfiguration{} } +func (*LimitedPriorityLevelConfiguration) ProtoMessage() {} +func (*LimitedPriorityLevelConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptor_45ba024d525b289b, []int{8} +} +func (m *LimitedPriorityLevelConfiguration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LimitedPriorityLevelConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LimitedPriorityLevelConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_LimitedPriorityLevelConfiguration.Merge(m, src) +} +func (m *LimitedPriorityLevelConfiguration) XXX_Size() int { + return m.Size() +} +func (m *LimitedPriorityLevelConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_LimitedPriorityLevelConfiguration.DiscardUnknown(m) +} + +var xxx_messageInfo_LimitedPriorityLevelConfiguration proto.InternalMessageInfo + +func (m *NonResourcePolicyRule) Reset() { *m = NonResourcePolicyRule{} } +func (*NonResourcePolicyRule) ProtoMessage() {} +func (*NonResourcePolicyRule) Descriptor() ([]byte, []int) { + return fileDescriptor_45ba024d525b289b, []int{9} +} +func (m *NonResourcePolicyRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NonResourcePolicyRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NonResourcePolicyRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_NonResourcePolicyRule.Merge(m, src) +} +func (m *NonResourcePolicyRule) XXX_Size() int { + return m.Size() +} +func (m *NonResourcePolicyRule) XXX_DiscardUnknown() { + xxx_messageInfo_NonResourcePolicyRule.DiscardUnknown(m) +} + +var xxx_messageInfo_NonResourcePolicyRule proto.InternalMessageInfo + +func (m *PolicyRulesWithSubjects) Reset() { *m = PolicyRulesWithSubjects{} } +func (*PolicyRulesWithSubjects) ProtoMessage() {} +func (*PolicyRulesWithSubjects) Descriptor() ([]byte, []int) { + return fileDescriptor_45ba024d525b289b, []int{10} +} +func (m *PolicyRulesWithSubjects) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PolicyRulesWithSubjects) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PolicyRulesWithSubjects) XXX_Merge(src proto.Message) { + xxx_messageInfo_PolicyRulesWithSubjects.Merge(m, src) +} +func (m *PolicyRulesWithSubjects) XXX_Size() int { + return m.Size() +} +func (m *PolicyRulesWithSubjects) XXX_DiscardUnknown() { + xxx_messageInfo_PolicyRulesWithSubjects.DiscardUnknown(m) +} + +var xxx_messageInfo_PolicyRulesWithSubjects proto.InternalMessageInfo + +func (m *PriorityLevelConfiguration) Reset() { *m = PriorityLevelConfiguration{} } +func (*PriorityLevelConfiguration) ProtoMessage() {} +func (*PriorityLevelConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptor_45ba024d525b289b, []int{11} +} +func (m *PriorityLevelConfiguration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PriorityLevelConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PriorityLevelConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_PriorityLevelConfiguration.Merge(m, src) +} +func (m *PriorityLevelConfiguration) XXX_Size() int { + return m.Size() +} +func (m *PriorityLevelConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_PriorityLevelConfiguration.DiscardUnknown(m) +} + +var xxx_messageInfo_PriorityLevelConfiguration proto.InternalMessageInfo + +func (m *PriorityLevelConfigurationCondition) Reset() { *m = PriorityLevelConfigurationCondition{} } +func (*PriorityLevelConfigurationCondition) ProtoMessage() {} +func (*PriorityLevelConfigurationCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_45ba024d525b289b, []int{12} +} +func (m *PriorityLevelConfigurationCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PriorityLevelConfigurationCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PriorityLevelConfigurationCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_PriorityLevelConfigurationCondition.Merge(m, src) +} +func (m *PriorityLevelConfigurationCondition) XXX_Size() int { + return m.Size() +} +func (m *PriorityLevelConfigurationCondition) XXX_DiscardUnknown() { + xxx_messageInfo_PriorityLevelConfigurationCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_PriorityLevelConfigurationCondition proto.InternalMessageInfo + +func (m *PriorityLevelConfigurationList) Reset() { *m = PriorityLevelConfigurationList{} } +func (*PriorityLevelConfigurationList) ProtoMessage() {} +func (*PriorityLevelConfigurationList) Descriptor() ([]byte, []int) { + return fileDescriptor_45ba024d525b289b, []int{13} +} +func (m *PriorityLevelConfigurationList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PriorityLevelConfigurationList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PriorityLevelConfigurationList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PriorityLevelConfigurationList.Merge(m, src) +} +func (m *PriorityLevelConfigurationList) XXX_Size() int { + return m.Size() +} +func (m *PriorityLevelConfigurationList) XXX_DiscardUnknown() { + xxx_messageInfo_PriorityLevelConfigurationList.DiscardUnknown(m) +} + +var xxx_messageInfo_PriorityLevelConfigurationList proto.InternalMessageInfo + +func (m *PriorityLevelConfigurationReference) Reset() { *m = PriorityLevelConfigurationReference{} } +func (*PriorityLevelConfigurationReference) ProtoMessage() {} +func (*PriorityLevelConfigurationReference) Descriptor() ([]byte, []int) { + return fileDescriptor_45ba024d525b289b, []int{14} +} +func (m *PriorityLevelConfigurationReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PriorityLevelConfigurationReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PriorityLevelConfigurationReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_PriorityLevelConfigurationReference.Merge(m, src) +} +func (m *PriorityLevelConfigurationReference) XXX_Size() int { + return m.Size() +} +func (m *PriorityLevelConfigurationReference) XXX_DiscardUnknown() { + xxx_messageInfo_PriorityLevelConfigurationReference.DiscardUnknown(m) +} + +var xxx_messageInfo_PriorityLevelConfigurationReference proto.InternalMessageInfo + +func (m *PriorityLevelConfigurationSpec) Reset() { *m = PriorityLevelConfigurationSpec{} } +func (*PriorityLevelConfigurationSpec) ProtoMessage() {} +func (*PriorityLevelConfigurationSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_45ba024d525b289b, []int{15} +} +func (m *PriorityLevelConfigurationSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PriorityLevelConfigurationSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PriorityLevelConfigurationSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_PriorityLevelConfigurationSpec.Merge(m, src) +} +func (m *PriorityLevelConfigurationSpec) XXX_Size() int { + return m.Size() +} +func (m *PriorityLevelConfigurationSpec) XXX_DiscardUnknown() { + xxx_messageInfo_PriorityLevelConfigurationSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_PriorityLevelConfigurationSpec proto.InternalMessageInfo + +func (m *PriorityLevelConfigurationStatus) Reset() { *m = PriorityLevelConfigurationStatus{} } +func (*PriorityLevelConfigurationStatus) ProtoMessage() {} +func (*PriorityLevelConfigurationStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_45ba024d525b289b, []int{16} +} +func (m *PriorityLevelConfigurationStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PriorityLevelConfigurationStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PriorityLevelConfigurationStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_PriorityLevelConfigurationStatus.Merge(m, src) +} +func (m *PriorityLevelConfigurationStatus) XXX_Size() int { + return m.Size() +} +func (m *PriorityLevelConfigurationStatus) XXX_DiscardUnknown() { + xxx_messageInfo_PriorityLevelConfigurationStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_PriorityLevelConfigurationStatus proto.InternalMessageInfo + +func (m *QueuingConfiguration) Reset() { *m = QueuingConfiguration{} } +func (*QueuingConfiguration) ProtoMessage() {} +func (*QueuingConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptor_45ba024d525b289b, []int{17} +} +func (m *QueuingConfiguration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueuingConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *QueuingConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueuingConfiguration.Merge(m, src) +} +func (m *QueuingConfiguration) XXX_Size() int { + return m.Size() +} +func (m *QueuingConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_QueuingConfiguration.DiscardUnknown(m) +} + +var xxx_messageInfo_QueuingConfiguration proto.InternalMessageInfo + +func (m *ResourcePolicyRule) Reset() { *m = ResourcePolicyRule{} } +func (*ResourcePolicyRule) ProtoMessage() {} +func (*ResourcePolicyRule) Descriptor() ([]byte, []int) { + return fileDescriptor_45ba024d525b289b, []int{18} +} +func (m *ResourcePolicyRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourcePolicyRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourcePolicyRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourcePolicyRule.Merge(m, src) +} +func (m *ResourcePolicyRule) XXX_Size() int { + return m.Size() +} +func (m *ResourcePolicyRule) XXX_DiscardUnknown() { + xxx_messageInfo_ResourcePolicyRule.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourcePolicyRule proto.InternalMessageInfo + +func (m *ServiceAccountSubject) Reset() { *m = ServiceAccountSubject{} } +func (*ServiceAccountSubject) ProtoMessage() {} +func (*ServiceAccountSubject) Descriptor() ([]byte, []int) { + return fileDescriptor_45ba024d525b289b, []int{19} +} +func (m *ServiceAccountSubject) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceAccountSubject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ServiceAccountSubject) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceAccountSubject.Merge(m, src) +} +func (m *ServiceAccountSubject) XXX_Size() int { + return m.Size() +} +func (m *ServiceAccountSubject) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceAccountSubject.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceAccountSubject proto.InternalMessageInfo + +func (m *Subject) Reset() { *m = Subject{} } +func (*Subject) ProtoMessage() {} +func (*Subject) Descriptor() ([]byte, []int) { + return fileDescriptor_45ba024d525b289b, []int{20} +} +func (m *Subject) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Subject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Subject) XXX_Merge(src proto.Message) { + xxx_messageInfo_Subject.Merge(m, src) +} +func (m *Subject) XXX_Size() int { + return m.Size() +} +func (m *Subject) XXX_DiscardUnknown() { + xxx_messageInfo_Subject.DiscardUnknown(m) +} + +var xxx_messageInfo_Subject proto.InternalMessageInfo + +func (m *UserSubject) Reset() { *m = UserSubject{} } +func (*UserSubject) ProtoMessage() {} +func (*UserSubject) Descriptor() ([]byte, []int) { + return fileDescriptor_45ba024d525b289b, []int{21} +} +func (m *UserSubject) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UserSubject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *UserSubject) XXX_Merge(src proto.Message) { + xxx_messageInfo_UserSubject.Merge(m, src) +} +func (m *UserSubject) XXX_Size() int { + return m.Size() +} +func (m *UserSubject) XXX_DiscardUnknown() { + xxx_messageInfo_UserSubject.DiscardUnknown(m) +} + +var xxx_messageInfo_UserSubject proto.InternalMessageInfo + +func init() { + proto.RegisterType((*FlowDistinguisherMethod)(nil), "k8s.io.api.flowcontrol.v1alpha1.FlowDistinguisherMethod") + proto.RegisterType((*FlowSchema)(nil), "k8s.io.api.flowcontrol.v1alpha1.FlowSchema") + proto.RegisterType((*FlowSchemaCondition)(nil), "k8s.io.api.flowcontrol.v1alpha1.FlowSchemaCondition") + proto.RegisterType((*FlowSchemaList)(nil), "k8s.io.api.flowcontrol.v1alpha1.FlowSchemaList") + proto.RegisterType((*FlowSchemaSpec)(nil), "k8s.io.api.flowcontrol.v1alpha1.FlowSchemaSpec") + proto.RegisterType((*FlowSchemaStatus)(nil), "k8s.io.api.flowcontrol.v1alpha1.FlowSchemaStatus") + proto.RegisterType((*GroupSubject)(nil), "k8s.io.api.flowcontrol.v1alpha1.GroupSubject") + proto.RegisterType((*LimitResponse)(nil), "k8s.io.api.flowcontrol.v1alpha1.LimitResponse") + proto.RegisterType((*LimitedPriorityLevelConfiguration)(nil), "k8s.io.api.flowcontrol.v1alpha1.LimitedPriorityLevelConfiguration") + proto.RegisterType((*NonResourcePolicyRule)(nil), "k8s.io.api.flowcontrol.v1alpha1.NonResourcePolicyRule") + proto.RegisterType((*PolicyRulesWithSubjects)(nil), "k8s.io.api.flowcontrol.v1alpha1.PolicyRulesWithSubjects") + proto.RegisterType((*PriorityLevelConfiguration)(nil), "k8s.io.api.flowcontrol.v1alpha1.PriorityLevelConfiguration") + proto.RegisterType((*PriorityLevelConfigurationCondition)(nil), "k8s.io.api.flowcontrol.v1alpha1.PriorityLevelConfigurationCondition") + proto.RegisterType((*PriorityLevelConfigurationList)(nil), "k8s.io.api.flowcontrol.v1alpha1.PriorityLevelConfigurationList") + proto.RegisterType((*PriorityLevelConfigurationReference)(nil), "k8s.io.api.flowcontrol.v1alpha1.PriorityLevelConfigurationReference") + proto.RegisterType((*PriorityLevelConfigurationSpec)(nil), "k8s.io.api.flowcontrol.v1alpha1.PriorityLevelConfigurationSpec") + proto.RegisterType((*PriorityLevelConfigurationStatus)(nil), "k8s.io.api.flowcontrol.v1alpha1.PriorityLevelConfigurationStatus") + proto.RegisterType((*QueuingConfiguration)(nil), "k8s.io.api.flowcontrol.v1alpha1.QueuingConfiguration") + proto.RegisterType((*ResourcePolicyRule)(nil), "k8s.io.api.flowcontrol.v1alpha1.ResourcePolicyRule") + proto.RegisterType((*ServiceAccountSubject)(nil), "k8s.io.api.flowcontrol.v1alpha1.ServiceAccountSubject") + proto.RegisterType((*Subject)(nil), "k8s.io.api.flowcontrol.v1alpha1.Subject") + proto.RegisterType((*UserSubject)(nil), "k8s.io.api.flowcontrol.v1alpha1.UserSubject") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.proto", fileDescriptor_45ba024d525b289b) +} + +var fileDescriptor_45ba024d525b289b = []byte{ + // 1502 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x57, 0x4d, 0x6f, 0xdb, 0x46, + 0x13, 0x36, 0x65, 0xc9, 0xb6, 0xc6, 0x9f, 0x59, 0x27, 0xb0, 0xe0, 0x00, 0x92, 0xc3, 0x17, 0x78, + 0x93, 0xf7, 0x4d, 0x42, 0xc6, 0x69, 0x92, 0xa6, 0x08, 0x8a, 0xc0, 0x74, 0xda, 0x7c, 0xd9, 0xae, + 0xbd, 0x4e, 0x52, 0x34, 0x48, 0x81, 0xd0, 0xd4, 0x5a, 0xda, 0x58, 0x22, 0x59, 0x2e, 0xa9, 0xd4, + 0x45, 0x0e, 0x05, 0xfa, 0x07, 0xfa, 0x03, 0x72, 0xec, 0xa1, 0xe7, 0xfe, 0x82, 0x1e, 0x8d, 0xa2, + 0x87, 0x1c, 0x73, 0x12, 0x62, 0xf5, 0x5a, 0xf4, 0xdc, 0xe6, 0x54, 0xec, 0x72, 0x49, 0x8a, 0xfa, + 0xb0, 0x94, 0x1a, 0xc8, 0xa9, 0x37, 0x71, 0x3e, 0x9e, 0xd9, 0x99, 0x9d, 0x99, 0x7d, 0x04, 0x77, + 0xf6, 0xae, 0x33, 0x8d, 0x3a, 0xfa, 0x5e, 0xb0, 0x43, 0x3c, 0x9b, 0xf8, 0x84, 0xe9, 0x0d, 0x62, + 0x97, 0x1d, 0x4f, 0x97, 0x0a, 0xd3, 0xa5, 0xfa, 0x6e, 0xcd, 0x79, 0x6e, 0x39, 0xb6, 0xef, 0x39, + 0x35, 0xbd, 0xb1, 0x6c, 0xd6, 0xdc, 0xaa, 0xb9, 0xac, 0x57, 0x88, 0x4d, 0x3c, 0xd3, 0x27, 0x65, + 0xcd, 0xf5, 0x1c, 0xdf, 0x41, 0xa5, 0xd0, 0x41, 0x33, 0x5d, 0xaa, 0xb5, 0x39, 0x68, 0x91, 0xc3, + 0xe2, 0xc5, 0x0a, 0xf5, 0xab, 0xc1, 0x8e, 0x66, 0x39, 0x75, 0xbd, 0xe2, 0x54, 0x1c, 0x5d, 0xf8, + 0xed, 0x04, 0xbb, 0xe2, 0x4b, 0x7c, 0x88, 0x5f, 0x21, 0xde, 0xe2, 0x95, 0xe4, 0x00, 0x75, 0xd3, + 0xaa, 0x52, 0x9b, 0x78, 0xfb, 0xba, 0xbb, 0x57, 0xe1, 0x02, 0xa6, 0xd7, 0x89, 0x6f, 0xea, 0x8d, + 0xae, 0x53, 0x2c, 0xea, 0xfd, 0xbc, 0xbc, 0xc0, 0xf6, 0x69, 0x9d, 0x74, 0x39, 0x5c, 0x1b, 0xe4, + 0xc0, 0xac, 0x2a, 0xa9, 0x9b, 0x9d, 0x7e, 0xea, 0x63, 0x58, 0xf8, 0xb4, 0xe6, 0x3c, 0xbf, 0x45, + 0x99, 0x4f, 0xed, 0x4a, 0x40, 0x59, 0x95, 0x78, 0xeb, 0xc4, 0xaf, 0x3a, 0x65, 0x74, 0x13, 0xb2, + 0xfe, 0xbe, 0x4b, 0x0a, 0xca, 0x92, 0x72, 0x2e, 0x6f, 0x9c, 0x3f, 0x68, 0x96, 0x46, 0x5a, 0xcd, + 0x52, 0xf6, 0xc1, 0xbe, 0x4b, 0xde, 0x36, 0x4b, 0xa7, 0xfb, 0xb8, 0x71, 0x35, 0x16, 0x8e, 0xea, + 0xcb, 0x0c, 0x00, 0xb7, 0xda, 0x16, 0xa1, 0xd1, 0x53, 0x98, 0xe0, 0xe9, 0x96, 0x4d, 0xdf, 0x14, + 0x98, 0x93, 0x97, 0x2f, 0x69, 0x49, 0xb1, 0xe3, 0x53, 0x6b, 0xee, 0x5e, 0x85, 0x0b, 0x98, 0xc6, + 0xad, 0xb5, 0xc6, 0xb2, 0xf6, 0xd9, 0xce, 0x33, 0x62, 0xf9, 0xeb, 0xc4, 0x37, 0x0d, 0x24, 0x4f, + 0x01, 0x89, 0x0c, 0xc7, 0xa8, 0x68, 0x0b, 0xb2, 0xcc, 0x25, 0x56, 0x21, 0x23, 0xd0, 0x75, 0x6d, + 0xc0, 0x55, 0x6a, 0xc9, 0xe1, 0xb6, 0x5d, 0x62, 0x19, 0x53, 0x51, 0x8a, 0xfc, 0x0b, 0x0b, 0x28, + 0xf4, 0x05, 0x8c, 0x31, 0xdf, 0xf4, 0x03, 0x56, 0x18, 0x15, 0xa0, 0xcb, 0xef, 0x02, 0x2a, 0x1c, + 0x8d, 0x19, 0x09, 0x3b, 0x16, 0x7e, 0x63, 0x09, 0xa8, 0xbe, 0xce, 0xc0, 0x7c, 0x62, 0xbc, 0xea, + 0xd8, 0x65, 0xea, 0x53, 0xc7, 0x46, 0x37, 0x52, 0x75, 0x3f, 0xdb, 0x51, 0xf7, 0x85, 0x1e, 0x2e, + 0x49, 0xcd, 0xd1, 0x47, 0xf1, 0x79, 0x33, 0xc2, 0xfd, 0x4c, 0x3a, 0xf8, 0xdb, 0x66, 0x69, 0x36, + 0x76, 0x4b, 0x9f, 0x07, 0x35, 0x00, 0xd5, 0x4c, 0xe6, 0x3f, 0xf0, 0x4c, 0x9b, 0x85, 0xb0, 0xb4, + 0x4e, 0x64, 0xda, 0xff, 0x1f, 0xee, 0xa6, 0xb8, 0x87, 0xb1, 0x28, 0x43, 0xa2, 0xb5, 0x2e, 0x34, + 0xdc, 0x23, 0x02, 0xfa, 0x2f, 0x8c, 0x79, 0xc4, 0x64, 0x8e, 0x5d, 0xc8, 0x8a, 0x23, 0xc7, 0xf5, + 0xc2, 0x42, 0x8a, 0xa5, 0x16, 0xfd, 0x0f, 0xc6, 0xeb, 0x84, 0x31, 0xb3, 0x42, 0x0a, 0x39, 0x61, + 0x38, 0x2b, 0x0d, 0xc7, 0xd7, 0x43, 0x31, 0x8e, 0xf4, 0xea, 0xcf, 0x0a, 0xcc, 0x24, 0x75, 0x5a, + 0xa3, 0xcc, 0x47, 0x4f, 0xba, 0xba, 0x4f, 0x1b, 0x2e, 0x27, 0xee, 0x2d, 0x7a, 0x6f, 0x4e, 0x86, + 0x9b, 0x88, 0x24, 0x6d, 0x9d, 0xb7, 0x09, 0x39, 0xea, 0x93, 0x3a, 0xaf, 0xfa, 0xe8, 0xb9, 0xc9, + 0xcb, 0xe7, 0xdf, 0xa1, 0x4b, 0x8c, 0x69, 0x89, 0x9b, 0xbb, 0xcb, 0x11, 0x70, 0x08, 0xa4, 0xfe, + 0x3e, 0xda, 0x9e, 0x02, 0xef, 0x48, 0xf4, 0xa3, 0x02, 0x8b, 0xae, 0x47, 0x1d, 0x8f, 0xfa, 0xfb, + 0x6b, 0xa4, 0x41, 0x6a, 0xab, 0x8e, 0xbd, 0x4b, 0x2b, 0x81, 0x67, 0xf2, 0x5a, 0xca, 0xac, 0x6e, + 0x0d, 0x0c, 0xbd, 0xd9, 0x17, 0x02, 0x93, 0x5d, 0xe2, 0x11, 0xdb, 0x22, 0x86, 0x2a, 0xcf, 0xb4, + 0x78, 0x84, 0xf1, 0x11, 0x67, 0x41, 0xf7, 0x00, 0xd5, 0x4d, 0x9f, 0xd7, 0xb4, 0xb2, 0xe9, 0x11, + 0x8b, 0x94, 0x39, 0xaa, 0x68, 0xc9, 0x5c, 0xd2, 0x1f, 0xeb, 0x5d, 0x16, 0xb8, 0x87, 0x17, 0xfa, + 0x4e, 0x81, 0xf9, 0x72, 0xf7, 0xa2, 0x91, 0x9d, 0x79, 0x7d, 0xa8, 0x52, 0xf7, 0x58, 0x54, 0xc6, + 0x42, 0xab, 0x59, 0x9a, 0xef, 0xa1, 0xc0, 0xbd, 0xa2, 0xa1, 0x2f, 0x21, 0xe7, 0x05, 0x35, 0xc2, + 0x0a, 0x59, 0x71, 0xc3, 0x83, 0xc3, 0x6e, 0x3a, 0x35, 0x6a, 0xed, 0x63, 0xee, 0xf3, 0x39, 0xf5, + 0xab, 0xdb, 0x81, 0xd8, 0x58, 0x2c, 0xb9, 0x6e, 0xa1, 0xc2, 0x21, 0xaa, 0xfa, 0x02, 0xe6, 0x3a, + 0x17, 0x07, 0xaa, 0x02, 0x58, 0xd1, 0xac, 0xb2, 0x82, 0x22, 0xe2, 0x5e, 0x79, 0x87, 0xce, 0x8a, + 0x07, 0x3d, 0x59, 0x9b, 0xb1, 0x88, 0xe1, 0x36, 0x6c, 0xf5, 0x12, 0x4c, 0xdd, 0xf6, 0x9c, 0xc0, + 0x95, 0x87, 0x44, 0x4b, 0x90, 0xb5, 0xcd, 0x7a, 0xb4, 0x82, 0xe2, 0xbd, 0xb8, 0x61, 0xd6, 0x09, + 0x16, 0x1a, 0xf5, 0x07, 0x05, 0xa6, 0xd7, 0x68, 0x9d, 0xfa, 0x98, 0x30, 0xd7, 0xb1, 0x19, 0x41, + 0x57, 0x53, 0x6b, 0xeb, 0x4c, 0xc7, 0xda, 0x3a, 0x91, 0x32, 0x6e, 0x5b, 0x58, 0x4f, 0x60, 0xfc, + 0xab, 0x80, 0x04, 0xd4, 0xae, 0xc8, 0xb5, 0x7d, 0x75, 0x60, 0x86, 0x5b, 0xa1, 0x7d, 0xaa, 0xe3, + 0x8c, 0x49, 0xbe, 0x08, 0xa4, 0x06, 0x47, 0x90, 0xea, 0x1f, 0x0a, 0x9c, 0x11, 0x91, 0x49, 0xb9, + 0x7f, 0x27, 0xa3, 0x27, 0x50, 0x30, 0x19, 0x0b, 0x3c, 0x52, 0x5e, 0x75, 0x6c, 0x2b, 0xf0, 0xf8, + 0x0c, 0xec, 0x6f, 0x57, 0x4d, 0x8f, 0x30, 0x91, 0x4e, 0xce, 0x58, 0x92, 0xe9, 0x14, 0x56, 0xfa, + 0xd8, 0xe1, 0xbe, 0x08, 0x68, 0x0f, 0xa6, 0x6b, 0xed, 0xc9, 0xcb, 0x3c, 0xb5, 0x81, 0x79, 0xa6, + 0x4a, 0x66, 0x9c, 0x92, 0x47, 0x48, 0x97, 0x1d, 0xa7, 0xb1, 0xd5, 0xe7, 0x70, 0x6a, 0x83, 0x0f, + 0x32, 0x73, 0x02, 0xcf, 0x22, 0x49, 0x0f, 0xa2, 0x12, 0xe4, 0x1a, 0xc4, 0xdb, 0x09, 0xfb, 0x28, + 0x6f, 0xe4, 0x79, 0x07, 0x3e, 0xe2, 0x02, 0x1c, 0xca, 0xd1, 0xc7, 0x30, 0x6b, 0x27, 0x9e, 0x0f, + 0xf1, 0x1a, 0x2b, 0x8c, 0x09, 0xd3, 0xf9, 0x56, 0xb3, 0x34, 0xbb, 0x91, 0x56, 0xe1, 0x4e, 0x5b, + 0xf5, 0x30, 0x03, 0x0b, 0x7d, 0x5a, 0x1e, 0x3d, 0x82, 0x09, 0x26, 0x7f, 0xcb, 0x36, 0x3e, 0x37, + 0x30, 0x79, 0xe9, 0x9c, 0x6c, 0xdd, 0x08, 0x0d, 0xc7, 0x58, 0xc8, 0x85, 0x69, 0x4f, 0x9e, 0x41, + 0x04, 0x95, 0xdb, 0xf7, 0x83, 0x81, 0xe0, 0xdd, 0xf5, 0x49, 0xca, 0x8b, 0xdb, 0x11, 0x71, 0x3a, + 0x00, 0x7a, 0x01, 0x73, 0x6d, 0x89, 0x87, 0x41, 0x47, 0x45, 0xd0, 0x6b, 0x03, 0x83, 0xf6, 0xbc, + 0x17, 0xa3, 0x20, 0xe3, 0xce, 0x6d, 0x74, 0xe0, 0xe2, 0xae, 0x48, 0xea, 0xaf, 0x19, 0x38, 0x62, + 0x21, 0xbf, 0x07, 0x82, 0x65, 0xa6, 0x08, 0xd6, 0xcd, 0x63, 0x3c, 0x35, 0x7d, 0x09, 0x17, 0xed, + 0x20, 0x5c, 0x2b, 0xc7, 0x09, 0x72, 0x34, 0x01, 0xfb, 0x33, 0x03, 0xff, 0xe9, 0xef, 0x9c, 0x10, + 0xb2, 0xfb, 0xa9, 0xcd, 0xf6, 0x61, 0xc7, 0x66, 0x3b, 0x3b, 0x04, 0xc4, 0xbf, 0x04, 0xad, 0x83, + 0xa0, 0xbd, 0x51, 0xa0, 0xd8, 0xbf, 0x6e, 0xef, 0x81, 0xb0, 0x3d, 0x4d, 0x13, 0xb6, 0x1b, 0xc7, + 0xe8, 0xb2, 0x3e, 0x04, 0xee, 0xf6, 0x51, 0xcd, 0x15, 0x33, 0xad, 0x21, 0x9e, 0xda, 0x83, 0x23, + 0x6b, 0x25, 0x98, 0xe1, 0x80, 0xbf, 0x0c, 0x29, 0xef, 0x4f, 0x6c, 0x73, 0xa7, 0x46, 0xea, 0xc4, + 0xf6, 0x65, 0x47, 0x52, 0x18, 0xaf, 0x85, 0x4f, 0xa4, 0x9c, 0x6b, 0x63, 0xb8, 0x97, 0xe9, 0xa8, + 0x27, 0x35, 0x7c, 0x8e, 0xa5, 0x19, 0x8e, 0xf0, 0xd5, 0x97, 0x0a, 0x2c, 0x0d, 0x1a, 0x57, 0xf4, + 0x75, 0x0f, 0xda, 0x73, 0x1c, 0x56, 0x3b, 0x3c, 0x0d, 0xfa, 0x49, 0x81, 0x93, 0xbd, 0xc8, 0x05, + 0x9f, 0x00, 0xce, 0x28, 0x62, 0x3a, 0x10, 0x4f, 0xc0, 0x96, 0x90, 0x62, 0xa9, 0x45, 0x17, 0x60, + 0xa2, 0x6a, 0xda, 0xe5, 0x6d, 0xfa, 0x4d, 0x44, 0x76, 0xe3, 0x1e, 0xbc, 0x23, 0xe5, 0x38, 0xb6, + 0x40, 0xb7, 0x60, 0x4e, 0xf8, 0xad, 0x11, 0xbb, 0xe2, 0x57, 0x45, 0xb1, 0xc4, 0x34, 0xe7, 0x92, + 0x47, 0x61, 0xab, 0x43, 0x8f, 0xbb, 0x3c, 0xd4, 0xbf, 0x14, 0x40, 0xff, 0xe4, 0xbd, 0x3f, 0x0f, + 0x79, 0xd3, 0xa5, 0x82, 0xf6, 0x85, 0x53, 0x90, 0x37, 0xa6, 0x5b, 0xcd, 0x52, 0x7e, 0x65, 0xf3, + 0x6e, 0x28, 0xc4, 0x89, 0x9e, 0x1b, 0x47, 0x0f, 0x61, 0xf8, 0xe0, 0x49, 0xe3, 0x28, 0x30, 0xc3, + 0x89, 0x1e, 0x5d, 0x87, 0x29, 0xab, 0x16, 0x30, 0x9f, 0x78, 0xdb, 0x96, 0xe3, 0x12, 0xb1, 0x35, + 0x26, 0x8c, 0x93, 0x32, 0xa7, 0xa9, 0xd5, 0x36, 0x1d, 0x4e, 0x59, 0x22, 0x0d, 0x80, 0xb7, 0x3c, + 0x73, 0x4d, 0x1e, 0x27, 0x27, 0xe2, 0xcc, 0xf0, 0x0b, 0xdb, 0x88, 0xa5, 0xb8, 0xcd, 0x42, 0x7d, + 0x06, 0xa7, 0xb6, 0x89, 0xd7, 0xa0, 0x16, 0x59, 0xb1, 0x2c, 0x27, 0xb0, 0xfd, 0x88, 0xc0, 0xea, + 0x90, 0x8f, 0xcd, 0xe4, 0x54, 0x9c, 0x90, 0xf1, 0xf3, 0x31, 0x16, 0x4e, 0x6c, 0xe2, 0x31, 0xcc, + 0xf4, 0x1d, 0xc3, 0x5f, 0x32, 0x30, 0x9e, 0xc0, 0x67, 0xf7, 0xa8, 0x5d, 0x96, 0xc8, 0xa7, 0x23, + 0xeb, 0xfb, 0xd4, 0x2e, 0xbf, 0x6d, 0x96, 0x26, 0xa5, 0x19, 0xff, 0xc4, 0xc2, 0x10, 0xdd, 0x83, + 0x6c, 0xc0, 0x88, 0x27, 0x07, 0xec, 0xc2, 0xc0, 0x6e, 0x7e, 0xc8, 0x88, 0x17, 0x31, 0xa0, 0x09, + 0x0e, 0xcd, 0x05, 0x58, 0x60, 0xa0, 0x0d, 0xc8, 0x55, 0xf8, 0xad, 0xc8, 0xcd, 0x7f, 0x71, 0x20, + 0x58, 0x3b, 0xb5, 0x0f, 0x1b, 0x41, 0x48, 0x70, 0x08, 0x83, 0x3c, 0x98, 0x61, 0xa9, 0x22, 0x8a, + 0x0b, 0x1b, 0x86, 0xd1, 0xf4, 0xac, 0xbd, 0x81, 0x5a, 0xcd, 0xd2, 0x4c, 0x5a, 0x85, 0x3b, 0x22, + 0xa8, 0x3a, 0x4c, 0xb6, 0xa5, 0x38, 0x78, 0x09, 0x1a, 0xda, 0xc1, 0x61, 0x71, 0xe4, 0xd5, 0x61, + 0x71, 0xe4, 0xf5, 0x61, 0x71, 0xe4, 0xdb, 0x56, 0x51, 0x39, 0x68, 0x15, 0x95, 0x57, 0xad, 0xa2, + 0xf2, 0xba, 0x55, 0x54, 0xde, 0xb4, 0x8a, 0xca, 0xf7, 0xbf, 0x15, 0x47, 0x1e, 0x4f, 0x44, 0x47, + 0xfb, 0x3b, 0x00, 0x00, 0xff, 0xff, 0xe7, 0xb3, 0x17, 0x48, 0x11, 0x14, 0x00, 0x00, +} + +func (m *FlowDistinguisherMethod) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FlowDistinguisherMethod) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FlowDistinguisherMethod) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *FlowSchema) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FlowSchema) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FlowSchema) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *FlowSchemaCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FlowSchemaCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FlowSchemaCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *FlowSchemaList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FlowSchemaList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FlowSchemaList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *FlowSchemaSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FlowSchemaSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FlowSchemaSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if m.DistinguisherMethod != nil { + { + size, err := m.DistinguisherMethod.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + i = encodeVarintGenerated(dAtA, i, uint64(m.MatchingPrecedence)) + i-- + dAtA[i] = 0x10 + { + size, err := m.PriorityLevelConfiguration.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *FlowSchemaStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FlowSchemaStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FlowSchemaStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *GroupSubject) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GroupSubject) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GroupSubject) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *LimitResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LimitResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LimitResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Queuing != nil { + { + size, err := m.Queuing.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *LimitedPriorityLevelConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LimitedPriorityLevelConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LimitedPriorityLevelConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.LimitResponse.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(m.AssuredConcurrencyShares)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *NonResourcePolicyRule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NonResourcePolicyRule) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NonResourcePolicyRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.NonResourceURLs) > 0 { + for iNdEx := len(m.NonResourceURLs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.NonResourceURLs[iNdEx]) + copy(dAtA[i:], m.NonResourceURLs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NonResourceURLs[iNdEx]))) + i-- + dAtA[i] = 0x32 + } + } + if len(m.Verbs) > 0 { + for iNdEx := len(m.Verbs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Verbs[iNdEx]) + copy(dAtA[i:], m.Verbs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verbs[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *PolicyRulesWithSubjects) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PolicyRulesWithSubjects) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PolicyRulesWithSubjects) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.NonResourceRules) > 0 { + for iNdEx := len(m.NonResourceRules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.NonResourceRules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.ResourceRules) > 0 { + for iNdEx := len(m.ResourceRules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ResourceRules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Subjects) > 0 { + for iNdEx := len(m.Subjects) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Subjects[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *PriorityLevelConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PriorityLevelConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PriorityLevelConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PriorityLevelConfigurationCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PriorityLevelConfigurationCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PriorityLevelConfigurationCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PriorityLevelConfigurationList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PriorityLevelConfigurationList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PriorityLevelConfigurationList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PriorityLevelConfigurationReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PriorityLevelConfigurationReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PriorityLevelConfigurationReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PriorityLevelConfigurationSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PriorityLevelConfigurationSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PriorityLevelConfigurationSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Limited != nil { + { + size, err := m.Limited.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PriorityLevelConfigurationStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PriorityLevelConfigurationStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PriorityLevelConfigurationStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *QueuingConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueuingConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueuingConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.QueueLengthLimit)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.HandSize)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.Queues)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *ResourcePolicyRule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourcePolicyRule) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourcePolicyRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Namespaces) > 0 { + for iNdEx := len(m.Namespaces) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Namespaces[iNdEx]) + copy(dAtA[i:], m.Namespaces[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespaces[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + i-- + if m.ClusterScope { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + if len(m.Resources) > 0 { + for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Resources[iNdEx]) + copy(dAtA[i:], m.Resources[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resources[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.APIGroups) > 0 { + for iNdEx := len(m.APIGroups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.APIGroups[iNdEx]) + copy(dAtA[i:], m.APIGroups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroups[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Verbs) > 0 { + for iNdEx := len(m.Verbs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Verbs[iNdEx]) + copy(dAtA[i:], m.Verbs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verbs[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ServiceAccountSubject) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceAccountSubject) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServiceAccountSubject) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Subject) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Subject) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Subject) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ServiceAccount != nil { + { + size, err := m.ServiceAccount.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.Group != nil { + { + size, err := m.Group.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.User != nil { + { + size, err := m.User.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *UserSubject) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UserSubject) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UserSubject) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *FlowDistinguisherMethod) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *FlowSchema) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *FlowSchemaCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *FlowSchemaList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *FlowSchemaSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.PriorityLevelConfiguration.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.MatchingPrecedence)) + if m.DistinguisherMethod != nil { + l = m.DistinguisherMethod.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *FlowSchemaStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *GroupSubject) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *LimitResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.Queuing != nil { + l = m.Queuing.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *LimitedPriorityLevelConfiguration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.AssuredConcurrencyShares)) + l = m.LimitResponse.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *NonResourcePolicyRule) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Verbs) > 0 { + for _, s := range m.Verbs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.NonResourceURLs) > 0 { + for _, s := range m.NonResourceURLs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PolicyRulesWithSubjects) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Subjects) > 0 { + for _, e := range m.Subjects { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.ResourceRules) > 0 { + for _, e := range m.ResourceRules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.NonResourceRules) > 0 { + for _, e := range m.NonResourceRules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PriorityLevelConfiguration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PriorityLevelConfigurationCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PriorityLevelConfigurationList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PriorityLevelConfigurationReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PriorityLevelConfigurationSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.Limited != nil { + l = m.Limited.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *PriorityLevelConfigurationStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *QueuingConfiguration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Queues)) + n += 1 + sovGenerated(uint64(m.HandSize)) + n += 1 + sovGenerated(uint64(m.QueueLengthLimit)) + return n +} + +func (m *ResourcePolicyRule) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Verbs) > 0 { + for _, s := range m.Verbs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.APIGroups) > 0 { + for _, s := range m.APIGroups { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Resources) > 0 { + for _, s := range m.Resources { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 2 + if len(m.Namespaces) > 0 { + for _, s := range m.Namespaces { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ServiceAccountSubject) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Subject) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + if m.User != nil { + l = m.User.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Group != nil { + l = m.Group.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ServiceAccount != nil { + l = m.ServiceAccount.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *UserSubject) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *FlowDistinguisherMethod) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FlowDistinguisherMethod{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `}`, + }, "") + return s +} +func (this *FlowSchema) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FlowSchema{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "FlowSchemaSpec", "FlowSchemaSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "FlowSchemaStatus", "FlowSchemaStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *FlowSchemaCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FlowSchemaCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *FlowSchemaList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]FlowSchema{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "FlowSchema", "FlowSchema", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&FlowSchemaList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *FlowSchemaSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForRules := "[]PolicyRulesWithSubjects{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "PolicyRulesWithSubjects", "PolicyRulesWithSubjects", 1), `&`, ``, 1) + "," + } + repeatedStringForRules += "}" + s := strings.Join([]string{`&FlowSchemaSpec{`, + `PriorityLevelConfiguration:` + strings.Replace(strings.Replace(this.PriorityLevelConfiguration.String(), "PriorityLevelConfigurationReference", "PriorityLevelConfigurationReference", 1), `&`, ``, 1) + `,`, + `MatchingPrecedence:` + fmt.Sprintf("%v", this.MatchingPrecedence) + `,`, + `DistinguisherMethod:` + strings.Replace(this.DistinguisherMethod.String(), "FlowDistinguisherMethod", "FlowDistinguisherMethod", 1) + `,`, + `Rules:` + repeatedStringForRules + `,`, + `}`, + }, "") + return s +} +func (this *FlowSchemaStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]FlowSchemaCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "FlowSchemaCondition", "FlowSchemaCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&FlowSchemaStatus{`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s +} +func (this *GroupSubject) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GroupSubject{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *LimitResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LimitResponse{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Queuing:` + strings.Replace(this.Queuing.String(), "QueuingConfiguration", "QueuingConfiguration", 1) + `,`, + `}`, + }, "") + return s +} +func (this *LimitedPriorityLevelConfiguration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LimitedPriorityLevelConfiguration{`, + `AssuredConcurrencyShares:` + fmt.Sprintf("%v", this.AssuredConcurrencyShares) + `,`, + `LimitResponse:` + strings.Replace(strings.Replace(this.LimitResponse.String(), "LimitResponse", "LimitResponse", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NonResourcePolicyRule) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NonResourcePolicyRule{`, + `Verbs:` + fmt.Sprintf("%v", this.Verbs) + `,`, + `NonResourceURLs:` + fmt.Sprintf("%v", this.NonResourceURLs) + `,`, + `}`, + }, "") + return s +} +func (this *PolicyRulesWithSubjects) String() string { + if this == nil { + return "nil" + } + repeatedStringForSubjects := "[]Subject{" + for _, f := range this.Subjects { + repeatedStringForSubjects += strings.Replace(strings.Replace(f.String(), "Subject", "Subject", 1), `&`, ``, 1) + "," + } + repeatedStringForSubjects += "}" + repeatedStringForResourceRules := "[]ResourcePolicyRule{" + for _, f := range this.ResourceRules { + repeatedStringForResourceRules += strings.Replace(strings.Replace(f.String(), "ResourcePolicyRule", "ResourcePolicyRule", 1), `&`, ``, 1) + "," + } + repeatedStringForResourceRules += "}" + repeatedStringForNonResourceRules := "[]NonResourcePolicyRule{" + for _, f := range this.NonResourceRules { + repeatedStringForNonResourceRules += strings.Replace(strings.Replace(f.String(), "NonResourcePolicyRule", "NonResourcePolicyRule", 1), `&`, ``, 1) + "," + } + repeatedStringForNonResourceRules += "}" + s := strings.Join([]string{`&PolicyRulesWithSubjects{`, + `Subjects:` + repeatedStringForSubjects + `,`, + `ResourceRules:` + repeatedStringForResourceRules + `,`, + `NonResourceRules:` + repeatedStringForNonResourceRules + `,`, + `}`, + }, "") + return s +} +func (this *PriorityLevelConfiguration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PriorityLevelConfiguration{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PriorityLevelConfigurationSpec", "PriorityLevelConfigurationSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PriorityLevelConfigurationStatus", "PriorityLevelConfigurationStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PriorityLevelConfigurationCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PriorityLevelConfigurationCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *PriorityLevelConfigurationList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]PriorityLevelConfiguration{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PriorityLevelConfiguration", "PriorityLevelConfiguration", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&PriorityLevelConfigurationList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *PriorityLevelConfigurationReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PriorityLevelConfigurationReference{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *PriorityLevelConfigurationSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PriorityLevelConfigurationSpec{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Limited:` + strings.Replace(this.Limited.String(), "LimitedPriorityLevelConfiguration", "LimitedPriorityLevelConfiguration", 1) + `,`, + `}`, + }, "") + return s +} +func (this *PriorityLevelConfigurationStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]PriorityLevelConfigurationCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "PriorityLevelConfigurationCondition", "PriorityLevelConfigurationCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&PriorityLevelConfigurationStatus{`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s +} +func (this *QueuingConfiguration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&QueuingConfiguration{`, + `Queues:` + fmt.Sprintf("%v", this.Queues) + `,`, + `HandSize:` + fmt.Sprintf("%v", this.HandSize) + `,`, + `QueueLengthLimit:` + fmt.Sprintf("%v", this.QueueLengthLimit) + `,`, + `}`, + }, "") + return s +} +func (this *ResourcePolicyRule) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourcePolicyRule{`, + `Verbs:` + fmt.Sprintf("%v", this.Verbs) + `,`, + `APIGroups:` + fmt.Sprintf("%v", this.APIGroups) + `,`, + `Resources:` + fmt.Sprintf("%v", this.Resources) + `,`, + `ClusterScope:` + fmt.Sprintf("%v", this.ClusterScope) + `,`, + `Namespaces:` + fmt.Sprintf("%v", this.Namespaces) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceAccountSubject) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceAccountSubject{`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *Subject) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Subject{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `User:` + strings.Replace(this.User.String(), "UserSubject", "UserSubject", 1) + `,`, + `Group:` + strings.Replace(this.Group.String(), "GroupSubject", "GroupSubject", 1) + `,`, + `ServiceAccount:` + strings.Replace(this.ServiceAccount.String(), "ServiceAccountSubject", "ServiceAccountSubject", 1) + `,`, + `}`, + }, "") + return s +} +func (this *UserSubject) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UserSubject{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *FlowDistinguisherMethod) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FlowDistinguisherMethod: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FlowDistinguisherMethod: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = FlowDistinguisherMethodType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FlowSchema) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FlowSchema: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FlowSchema: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FlowSchemaCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FlowSchemaCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FlowSchemaCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = FlowSchemaConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FlowSchemaList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FlowSchemaList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FlowSchemaList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, FlowSchema{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FlowSchemaSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FlowSchemaSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FlowSchemaSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PriorityLevelConfiguration", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.PriorityLevelConfiguration.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchingPrecedence", wireType) + } + m.MatchingPrecedence = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MatchingPrecedence |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DistinguisherMethod", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DistinguisherMethod == nil { + m.DistinguisherMethod = &FlowDistinguisherMethod{} + } + if err := m.DistinguisherMethod.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rules = append(m.Rules, PolicyRulesWithSubjects{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FlowSchemaStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FlowSchemaStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FlowSchemaStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, FlowSchemaCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GroupSubject) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GroupSubject: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GroupSubject: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LimitResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LimitResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LimitResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = LimitResponseType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Queuing", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Queuing == nil { + m.Queuing = &QueuingConfiguration{} + } + if err := m.Queuing.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LimitedPriorityLevelConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LimitedPriorityLevelConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LimitedPriorityLevelConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AssuredConcurrencyShares", wireType) + } + m.AssuredConcurrencyShares = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.AssuredConcurrencyShares |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LimitResponse", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LimitResponse.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NonResourcePolicyRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NonResourcePolicyRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NonResourcePolicyRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Verbs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Verbs = append(m.Verbs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NonResourceURLs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NonResourceURLs = append(m.NonResourceURLs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PolicyRulesWithSubjects) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PolicyRulesWithSubjects: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PolicyRulesWithSubjects: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subjects", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subjects = append(m.Subjects, Subject{}) + if err := m.Subjects[len(m.Subjects)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceRules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceRules = append(m.ResourceRules, ResourcePolicyRule{}) + if err := m.ResourceRules[len(m.ResourceRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NonResourceRules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NonResourceRules = append(m.NonResourceRules, NonResourcePolicyRule{}) + if err := m.NonResourceRules[len(m.NonResourceRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PriorityLevelConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PriorityLevelConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PriorityLevelConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PriorityLevelConfigurationCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PriorityLevelConfigurationCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PriorityLevelConfigurationCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = PriorityLevelConfigurationConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PriorityLevelConfigurationList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PriorityLevelConfigurationList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PriorityLevelConfigurationList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, PriorityLevelConfiguration{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PriorityLevelConfigurationReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PriorityLevelConfigurationReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PriorityLevelConfigurationReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PriorityLevelConfigurationSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PriorityLevelConfigurationSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PriorityLevelConfigurationSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = PriorityLevelEnablement(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Limited", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Limited == nil { + m.Limited = &LimitedPriorityLevelConfiguration{} + } + if err := m.Limited.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PriorityLevelConfigurationStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PriorityLevelConfigurationStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PriorityLevelConfigurationStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, PriorityLevelConfigurationCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueuingConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueuingConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueuingConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Queues", wireType) + } + m.Queues = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Queues |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HandSize", wireType) + } + m.HandSize = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.HandSize |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field QueueLengthLimit", wireType) + } + m.QueueLengthLimit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.QueueLengthLimit |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourcePolicyRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourcePolicyRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourcePolicyRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Verbs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Verbs = append(m.Verbs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIGroups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIGroups = append(m.APIGroups, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resources = append(m.Resources, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterScope", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ClusterScope = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespaces", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespaces = append(m.Namespaces, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServiceAccountSubject) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceAccountSubject: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceAccountSubject: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Subject) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Subject: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Subject: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = SubjectKind(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.User == nil { + m.User = &UserSubject{} + } + if err := m.User.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Group == nil { + m.Group = &GroupSubject{} + } + if err := m.Group.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceAccount", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ServiceAccount == nil { + m.ServiceAccount = &ServiceAccountSubject{} + } + if err := m.ServiceAccount.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UserSubject) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UserSubject: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UserSubject: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) diff --git a/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.proto b/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.proto new file mode 100644 index 00000000..6134b5e6 --- /dev/null +++ b/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.proto @@ -0,0 +1,436 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.api.flowcontrol.v1alpha1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1alpha1"; + +// FlowDistinguisherMethod specifies the method of a flow distinguisher. +message FlowDistinguisherMethod { + // `type` is the type of flow distinguisher method + // The supported types are "ByUser" and "ByNamespace". + // Required. + optional string type = 1; +} + +// FlowSchema defines the schema of a group of flows. Note that a flow is made up of a set of inbound API requests with +// similar attributes and is identified by a pair of strings: the name of the FlowSchema and a "flow distinguisher". +message FlowSchema { + // `metadata` is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // `spec` is the specification of the desired behavior of a FlowSchema. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + optional FlowSchemaSpec spec = 2; + + // `status` is the current status of a FlowSchema. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + optional FlowSchemaStatus status = 3; +} + +// FlowSchemaCondition describes conditions for a FlowSchema. +message FlowSchemaCondition { + // `type` is the type of the condition. + // Required. + optional string type = 1; + + // `status` is the status of the condition. + // Can be True, False, Unknown. + // Required. + optional string status = 2; + + // `lastTransitionTime` is the last time the condition transitioned from one status to another. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + + // `reason` is a unique, one-word, CamelCase reason for the condition's last transition. + optional string reason = 4; + + // `message` is a human-readable message indicating details about last transition. + optional string message = 5; +} + +// FlowSchemaList is a list of FlowSchema objects. +message FlowSchemaList { + // `metadata` is the standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // `items` is a list of FlowSchemas. + // +listType=set + repeated FlowSchema items = 2; +} + +// FlowSchemaSpec describes how the FlowSchema's specification looks like. +message FlowSchemaSpec { + // `priorityLevelConfiguration` should reference a PriorityLevelConfiguration in the cluster. If the reference cannot + // be resolved, the FlowSchema will be ignored and marked as invalid in its status. + // Required. + optional PriorityLevelConfigurationReference priorityLevelConfiguration = 1; + + // `matchingPrecedence` is used to choose among the FlowSchemas that match a given request. The chosen + // FlowSchema is among those with the numerically lowest (which we take to be logically highest) + // MatchingPrecedence. Each MatchingPrecedence value must be non-negative. + // Note that if the precedence is not specified or zero, it will be set to 1000 as default. + // +optional + optional int32 matchingPrecedence = 2; + + // `distinguisherMethod` defines how to compute the flow distinguisher for requests that match this schema. + // `nil` specifies that the distinguisher is disabled and thus will always be the empty string. + // +optional + optional FlowDistinguisherMethod distinguisherMethod = 3; + + // `rules` describes which requests will match this flow schema. This FlowSchema matches a request if and only if + // at least one member of rules matches the request. + // if it is an empty slice, there will be no requests matching the FlowSchema. + // +listType=set + // +optional + repeated PolicyRulesWithSubjects rules = 4; +} + +// FlowSchemaStatus represents the current state of a FlowSchema. +message FlowSchemaStatus { + // `conditions` is a list of the current states of FlowSchema. + // +listType=map + // +listMapKey=type + // +optional + repeated FlowSchemaCondition conditions = 1; +} + +// GroupSubject holds detailed information for group-kind subject. +message GroupSubject { + // name is the user group that matches, or "*" to match all user groups. + // See https://github.com/kubernetes/apiserver/blob/master/pkg/authentication/user/user.go for some + // well-known group names. + // Required. + optional string name = 1; +} + +// LimitResponse defines how to handle requests that can not be executed right now. +// +union +message LimitResponse { + // `type` is "Queue" or "Reject". + // "Queue" means that requests that can not be executed upon arrival + // are held in a queue until they can be executed or a queuing limit + // is reached. + // "Reject" means that requests that can not be executed upon arrival + // are rejected. + // Required. + // +unionDiscriminator + optional string type = 1; + + // `queuing` holds the configuration parameters for queuing. + // This field may be non-empty only if `type` is `"Queue"`. + // +optional + optional QueuingConfiguration queuing = 2; +} + +// LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. +// It addresses two issues: +// * How are requests for this priority level limited? +// * What should be done with requests that exceed the limit? +message LimitedPriorityLevelConfiguration { + // `assuredConcurrencyShares` (ACS) configures the execution + // limit, which is a limit on the number of requests of this + // priority level that may be exeucting at a given time. ACS must + // be a positive number. The server's concurrency limit (SCL) is + // divided among the concurrency-controlled priority levels in + // proportion to their assured concurrency shares. This produces + // the assured concurrency value (ACV) --- the number of requests + // that may be executing at a time --- for each such priority + // level: + // + // ACV(l) = ceil( SCL * ACS(l) / ( sum[priority levels k] ACS(k) ) ) + // + // bigger numbers of ACS mean more reserved concurrent requests (at the + // expense of every other PL). + // This field has a default value of 30. + // +optional + optional int32 assuredConcurrencyShares = 1; + + // `limitResponse` indicates what to do with requests that can not be executed right now + optional LimitResponse limitResponse = 2; +} + +// NonResourcePolicyRule is a predicate that matches non-resource requests according to their verb and the +// target non-resource URL. A NonResourcePolicyRule matches a request if and only if both (a) at least one member +// of verbs matches the request and (b) at least one member of nonResourceURLs matches the request. +message NonResourcePolicyRule { + // `verbs` is a list of matching verbs and may not be empty. + // "*" matches all verbs. If it is present, it must be the only entry. + // +listType=set + // Required. + repeated string verbs = 1; + + // `nonResourceURLs` is a set of url prefixes that a user should have access to and may not be empty. + // For example: + // - "/healthz" is legal + // - "/hea*" is illegal + // - "/hea" is legal but matches nothing + // - "/hea/*" also matches nothing + // - "/healthz/*" matches all per-component health checks. + // "*" matches all non-resource urls. if it is present, it must be the only entry. + // +listType=set + // Required. + repeated string nonResourceURLs = 6; +} + +// PolicyRulesWithSubjects prescribes a test that applies to a request to an apiserver. The test considers the subject +// making the request, the verb being requested, and the resource to be acted upon. This PolicyRulesWithSubjects matches +// a request if and only if both (a) at least one member of subjects matches the request and (b) at least one member +// of resourceRules or nonResourceRules matches the request. +message PolicyRulesWithSubjects { + // subjects is the list of normal user, serviceaccount, or group that this rule cares about. + // There must be at least one member in this slice. + // A slice that includes both the system:authenticated and system:unauthenticated user groups matches every request. + // +listType=set + // Required. + repeated Subject subjects = 1; + + // `resourceRules` is a slice of ResourcePolicyRules that identify matching requests according to their verb and the + // target resource. + // At least one of `resourceRules` and `nonResourceRules` has to be non-empty. + // +listType=set + // +optional + repeated ResourcePolicyRule resourceRules = 2; + + // `nonResourceRules` is a list of NonResourcePolicyRules that identify matching requests according to their verb + // and the target non-resource URL. + // +listType=set + // +optional + repeated NonResourcePolicyRule nonResourceRules = 3; +} + +// PriorityLevelConfiguration represents the configuration of a priority level. +message PriorityLevelConfiguration { + // `metadata` is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // `spec` is the specification of the desired behavior of a "request-priority". + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + optional PriorityLevelConfigurationSpec spec = 2; + + // `status` is the current status of a "request-priority". + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + optional PriorityLevelConfigurationStatus status = 3; +} + +// PriorityLevelConfigurationCondition defines the condition of priority level. +message PriorityLevelConfigurationCondition { + // `type` is the type of the condition. + // Required. + optional string type = 1; + + // `status` is the status of the condition. + // Can be True, False, Unknown. + // Required. + optional string status = 2; + + // `lastTransitionTime` is the last time the condition transitioned from one status to another. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + + // `reason` is a unique, one-word, CamelCase reason for the condition's last transition. + optional string reason = 4; + + // `message` is a human-readable message indicating details about last transition. + optional string message = 5; +} + +// PriorityLevelConfigurationList is a list of PriorityLevelConfiguration objects. +message PriorityLevelConfigurationList { + // `metadata` is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // `items` is a list of request-priorities. + // +listType=set + repeated PriorityLevelConfiguration items = 2; +} + +// PriorityLevelConfigurationReference contains information that points to the "request-priority" being used. +message PriorityLevelConfigurationReference { + // `name` is the name of the priority level configuration being referenced + // Required. + optional string name = 1; +} + +// PriorityLevelConfigurationSpec specifies the configuration of a priority level. +// +union +message PriorityLevelConfigurationSpec { + // `type` indicates whether this priority level is subject to + // limitation on request execution. A value of `"Exempt"` means + // that requests of this priority level are not subject to a limit + // (and thus are never queued) and do not detract from the + // capacity made available to other priority levels. A value of + // `"Limited"` means that (a) requests of this priority level + // _are_ subject to limits and (b) some of the server's limited + // capacity is made available exclusively to this priority level. + // Required. + // +unionDiscriminator + optional string type = 1; + + // `limited` specifies how requests are handled for a Limited priority level. + // This field must be non-empty if and only if `type` is `"Limited"`. + // +optional + optional LimitedPriorityLevelConfiguration limited = 2; +} + +// PriorityLevelConfigurationStatus represents the current state of a "request-priority". +message PriorityLevelConfigurationStatus { + // `conditions` is the current state of "request-priority". + // +listType=map + // +listMapKey=type + // +optional + repeated PriorityLevelConfigurationCondition conditions = 1; +} + +// QueuingConfiguration holds the configuration parameters for queuing +message QueuingConfiguration { + // `queues` is the number of queues for this priority level. The + // queues exist independently at each apiserver. The value must be + // positive. Setting it to 1 effectively precludes + // shufflesharding and thus makes the distinguisher method of + // associated flow schemas irrelevant. This field has a default + // value of 64. + // +optional + optional int32 queues = 1; + + // `handSize` is a small positive number that configures the + // shuffle sharding of requests into queues. When enqueuing a request + // at this priority level the request's flow identifier (a string + // pair) is hashed and the hash value is used to shuffle the list + // of queues and deal a hand of the size specified here. The + // request is put into one of the shortest queues in that hand. + // `handSize` must be no larger than `queues`, and should be + // significantly smaller (so that a few heavy flows do not + // saturate most of the queues). See the user-facing + // documentation for more extensive guidance on setting this + // field. This field has a default value of 8. + // +optional + optional int32 handSize = 2; + + // `queueLengthLimit` is the maximum number of requests allowed to + // be waiting in a given queue of this priority level at a time; + // excess requests are rejected. This value must be positive. If + // not specified, it will be defaulted to 50. + // +optional + optional int32 queueLengthLimit = 3; +} + +// ResourcePolicyRule is a predicate that matches some resource +// requests, testing the request's verb and the target resource. A +// ResourcePolicyRule matches a resource request if and only if: (a) +// at least one member of verbs matches the request, (b) at least one +// member of apiGroups matches the request, (c) at least one member of +// resources matches the request, and (d) least one member of +// namespaces matches the request. +message ResourcePolicyRule { + // `verbs` is a list of matching verbs and may not be empty. + // "*" matches all verbs and, if present, must be the only entry. + // +listType=set + // Required. + repeated string verbs = 1; + + // `apiGroups` is a list of matching API groups and may not be empty. + // "*" matches all API groups and, if present, must be the only entry. + // +listType=set + // Required. + repeated string apiGroups = 2; + + // `resources` is a list of matching resources (i.e., lowercase + // and plural) with, if desired, subresource. For example, [ + // "services", "nodes/status" ]. This list may not be empty. + // "*" matches all resources and, if present, must be the only entry. + // Required. + // +listType=set + repeated string resources = 3; + + // `clusterScope` indicates whether to match requests that do not + // specify a namespace (which happens either because the resource + // is not namespaced or the request targets all namespaces). + // If this field is omitted or false then the `namespaces` field + // must contain a non-empty list. + // +optional + optional bool clusterScope = 4; + + // `namespaces` is a list of target namespaces that restricts + // matches. A request that specifies a target namespace matches + // only if either (a) this list contains that target namespace or + // (b) this list contains "*". Note that "*" matches any + // specified namespace but does not match a request that _does + // not specify_ a namespace (see the `clusterScope` field for + // that). + // This list may be empty, but only if `clusterScope` is true. + // +optional + // +listType=set + repeated string namespaces = 5; +} + +// ServiceAccountSubject holds detailed information for service-account-kind subject. +message ServiceAccountSubject { + // `namespace` is the namespace of matching ServiceAccount objects. + // Required. + optional string namespace = 1; + + // `name` is the name of matching ServiceAccount objects, or "*" to match regardless of name. + // Required. + optional string name = 2; +} + +// Subject matches the originator of a request, as identified by the request authentication system. There are three +// ways of matching an originator; by user, group, or service account. +// +union +message Subject { + // Required + // +unionDiscriminator + optional string kind = 1; + + // +optional + optional UserSubject user = 2; + + // +optional + optional GroupSubject group = 3; + + // +optional + optional ServiceAccountSubject serviceAccount = 4; +} + +// UserSubject holds detailed information for user-kind subject. +message UserSubject { + // `name` is the username that matches, or "*" to match all usernames. + // Required. + optional string name = 1; +} + diff --git a/vendor/k8s.io/api/flowcontrol/v1alpha1/register.go b/vendor/k8s.io/api/flowcontrol/v1alpha1/register.go new file mode 100644 index 00000000..0c8a9cc5 --- /dev/null +++ b/vendor/k8s.io/api/flowcontrol/v1alpha1/register.go @@ -0,0 +1,58 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the name of api group +const GroupName = "flowcontrol.apiserver.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // SchemeBuilder installs the api group to a scheme + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // AddToScheme adds api to a scheme + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &FlowSchema{}, + &FlowSchemaList{}, + &PriorityLevelConfiguration{}, + &PriorityLevelConfigurationList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/flowcontrol/v1alpha1/types.go b/vendor/k8s.io/api/flowcontrol/v1alpha1/types.go new file mode 100644 index 00000000..41073bdc --- /dev/null +++ b/vendor/k8s.io/api/flowcontrol/v1alpha1/types.go @@ -0,0 +1,513 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// These are valid wildcards. +const ( + APIGroupAll = "*" + ResourceAll = "*" + VerbAll = "*" + NonResourceAll = "*" + NameAll = "*" + + NamespaceEvery = "*" // matches every particular namespace +) + +// System preset priority level names +const ( + PriorityLevelConfigurationNameExempt = "exempt" +) + +// Conditions +const ( + FlowSchemaConditionDangling = "Dangling" + + PriorityLevelConfigurationConditionConcurrencyShared = "ConcurrencyShared" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// FlowSchema defines the schema of a group of flows. Note that a flow is made up of a set of inbound API requests with +// similar attributes and is identified by a pair of strings: the name of the FlowSchema and a "flow distinguisher". +type FlowSchema struct { + metav1.TypeMeta `json:",inline"` + // `metadata` is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // `spec` is the specification of the desired behavior of a FlowSchema. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + Spec FlowSchemaSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + // `status` is the current status of a FlowSchema. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + Status FlowSchemaStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// FlowSchemaList is a list of FlowSchema objects. +type FlowSchemaList struct { + metav1.TypeMeta `json:",inline"` + // `metadata` is the standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // `items` is a list of FlowSchemas. + // +listType=set + Items []FlowSchema `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// FlowSchemaSpec describes how the FlowSchema's specification looks like. +type FlowSchemaSpec struct { + // `priorityLevelConfiguration` should reference a PriorityLevelConfiguration in the cluster. If the reference cannot + // be resolved, the FlowSchema will be ignored and marked as invalid in its status. + // Required. + PriorityLevelConfiguration PriorityLevelConfigurationReference `json:"priorityLevelConfiguration" protobuf:"bytes,1,opt,name=priorityLevelConfiguration"` + // `matchingPrecedence` is used to choose among the FlowSchemas that match a given request. The chosen + // FlowSchema is among those with the numerically lowest (which we take to be logically highest) + // MatchingPrecedence. Each MatchingPrecedence value must be non-negative. + // Note that if the precedence is not specified or zero, it will be set to 1000 as default. + // +optional + MatchingPrecedence int32 `json:"matchingPrecedence" protobuf:"varint,2,opt,name=matchingPrecedence"` + // `distinguisherMethod` defines how to compute the flow distinguisher for requests that match this schema. + // `nil` specifies that the distinguisher is disabled and thus will always be the empty string. + // +optional + DistinguisherMethod *FlowDistinguisherMethod `json:"distinguisherMethod,omitempty" protobuf:"bytes,3,opt,name=distinguisherMethod"` + // `rules` describes which requests will match this flow schema. This FlowSchema matches a request if and only if + // at least one member of rules matches the request. + // if it is an empty slice, there will be no requests matching the FlowSchema. + // +listType=set + // +optional + Rules []PolicyRulesWithSubjects `json:"rules,omitempty" protobuf:"bytes,4,rep,name=rules"` +} + +// FlowDistinguisherMethodType is the type of flow distinguisher method +type FlowDistinguisherMethodType string + +// These are valid flow-distinguisher methods. +const ( + // FlowDistinguisherMethodByUserType specifies that the flow distinguisher is the username in the request. + // This type is used to provide some insulation between users. + FlowDistinguisherMethodByUserType FlowDistinguisherMethodType = "ByUser" + + // FlowDistinguisherMethodByNamespaceType specifies that the flow distinguisher is the namespace of the + // object that the request acts upon. If the object is not namespaced, or if the request is a non-resource + // request, then the distinguisher will be the empty string. An example usage of this type is to provide + // some insulation between tenants in a situation where there are multiple tenants and each namespace + // is dedicated to a tenant. + FlowDistinguisherMethodByNamespaceType FlowDistinguisherMethodType = "ByNamespace" +) + +// FlowDistinguisherMethod specifies the method of a flow distinguisher. +type FlowDistinguisherMethod struct { + // `type` is the type of flow distinguisher method + // The supported types are "ByUser" and "ByNamespace". + // Required. + Type FlowDistinguisherMethodType `json:"type" protobuf:"bytes,1,opt,name=type"` +} + +// PriorityLevelConfigurationReference contains information that points to the "request-priority" being used. +type PriorityLevelConfigurationReference struct { + // `name` is the name of the priority level configuration being referenced + // Required. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` +} + +// PolicyRulesWithSubjects prescribes a test that applies to a request to an apiserver. The test considers the subject +// making the request, the verb being requested, and the resource to be acted upon. This PolicyRulesWithSubjects matches +// a request if and only if both (a) at least one member of subjects matches the request and (b) at least one member +// of resourceRules or nonResourceRules matches the request. +type PolicyRulesWithSubjects struct { + // subjects is the list of normal user, serviceaccount, or group that this rule cares about. + // There must be at least one member in this slice. + // A slice that includes both the system:authenticated and system:unauthenticated user groups matches every request. + // +listType=set + // Required. + Subjects []Subject `json:"subjects" protobuf:"bytes,1,rep,name=subjects"` + // `resourceRules` is a slice of ResourcePolicyRules that identify matching requests according to their verb and the + // target resource. + // At least one of `resourceRules` and `nonResourceRules` has to be non-empty. + // +listType=set + // +optional + ResourceRules []ResourcePolicyRule `json:"resourceRules,omitempty" protobuf:"bytes,2,opt,name=resourceRules"` + // `nonResourceRules` is a list of NonResourcePolicyRules that identify matching requests according to their verb + // and the target non-resource URL. + // +listType=set + // +optional + NonResourceRules []NonResourcePolicyRule `json:"nonResourceRules,omitempty" protobuf:"bytes,3,opt,name=nonResourceRules"` +} + +// Subject matches the originator of a request, as identified by the request authentication system. There are three +// ways of matching an originator; by user, group, or service account. +// +union +type Subject struct { + // Required + // +unionDiscriminator + Kind SubjectKind `json:"kind" protobuf:"bytes,1,opt,name=kind"` + // +optional + User *UserSubject `json:"user,omitempty" protobuf:"bytes,2,opt,name=user"` + // +optional + Group *GroupSubject `json:"group,omitempty" protobuf:"bytes,3,opt,name=group"` + // +optional + ServiceAccount *ServiceAccountSubject `json:"serviceAccount,omitempty" protobuf:"bytes,4,opt,name=serviceAccount"` +} + +// SubjectKind is the kind of subject. +type SubjectKind string + +// Supported subject's kinds. +const ( + SubjectKindUser SubjectKind = "User" + SubjectKindGroup SubjectKind = "Group" + SubjectKindServiceAccount SubjectKind = "ServiceAccount" +) + +// UserSubject holds detailed information for user-kind subject. +type UserSubject struct { + // `name` is the username that matches, or "*" to match all usernames. + // Required. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` +} + +// GroupSubject holds detailed information for group-kind subject. +type GroupSubject struct { + // name is the user group that matches, or "*" to match all user groups. + // See https://github.com/kubernetes/apiserver/blob/master/pkg/authentication/user/user.go for some + // well-known group names. + // Required. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` +} + +// ServiceAccountSubject holds detailed information for service-account-kind subject. +type ServiceAccountSubject struct { + // `namespace` is the namespace of matching ServiceAccount objects. + // Required. + Namespace string `json:"namespace" protobuf:"bytes,1,opt,name=namespace"` + // `name` is the name of matching ServiceAccount objects, or "*" to match regardless of name. + // Required. + Name string `json:"name" protobuf:"bytes,2,opt,name=name"` +} + +// ResourcePolicyRule is a predicate that matches some resource +// requests, testing the request's verb and the target resource. A +// ResourcePolicyRule matches a resource request if and only if: (a) +// at least one member of verbs matches the request, (b) at least one +// member of apiGroups matches the request, (c) at least one member of +// resources matches the request, and (d) least one member of +// namespaces matches the request. +type ResourcePolicyRule struct { + // `verbs` is a list of matching verbs and may not be empty. + // "*" matches all verbs and, if present, must be the only entry. + // +listType=set + // Required. + Verbs []string `json:"verbs" protobuf:"bytes,1,rep,name=verbs"` + + // `apiGroups` is a list of matching API groups and may not be empty. + // "*" matches all API groups and, if present, must be the only entry. + // +listType=set + // Required. + APIGroups []string `json:"apiGroups" protobuf:"bytes,2,rep,name=apiGroups"` + + // `resources` is a list of matching resources (i.e., lowercase + // and plural) with, if desired, subresource. For example, [ + // "services", "nodes/status" ]. This list may not be empty. + // "*" matches all resources and, if present, must be the only entry. + // Required. + // +listType=set + Resources []string `json:"resources" protobuf:"bytes,3,rep,name=resources"` + + // `clusterScope` indicates whether to match requests that do not + // specify a namespace (which happens either because the resource + // is not namespaced or the request targets all namespaces). + // If this field is omitted or false then the `namespaces` field + // must contain a non-empty list. + // +optional + ClusterScope bool `json:"clusterScope,omitempty" protobuf:"varint,4,opt,name=clusterScope"` + + // `namespaces` is a list of target namespaces that restricts + // matches. A request that specifies a target namespace matches + // only if either (a) this list contains that target namespace or + // (b) this list contains "*". Note that "*" matches any + // specified namespace but does not match a request that _does + // not specify_ a namespace (see the `clusterScope` field for + // that). + // This list may be empty, but only if `clusterScope` is true. + // +optional + // +listType=set + Namespaces []string `json:"namespaces" protobuf:"bytes,5,rep,name=namespaces"` +} + +// NonResourcePolicyRule is a predicate that matches non-resource requests according to their verb and the +// target non-resource URL. A NonResourcePolicyRule matches a request if and only if both (a) at least one member +// of verbs matches the request and (b) at least one member of nonResourceURLs matches the request. +type NonResourcePolicyRule struct { + // `verbs` is a list of matching verbs and may not be empty. + // "*" matches all verbs. If it is present, it must be the only entry. + // +listType=set + // Required. + Verbs []string `json:"verbs" protobuf:"bytes,1,rep,name=verbs"` + // `nonResourceURLs` is a set of url prefixes that a user should have access to and may not be empty. + // For example: + // - "/healthz" is legal + // - "/hea*" is illegal + // - "/hea" is legal but matches nothing + // - "/hea/*" also matches nothing + // - "/healthz/*" matches all per-component health checks. + // "*" matches all non-resource urls. if it is present, it must be the only entry. + // +listType=set + // Required. + NonResourceURLs []string `json:"nonResourceURLs" protobuf:"bytes,6,rep,name=nonResourceURLs"` +} + +// FlowSchemaStatus represents the current state of a FlowSchema. +type FlowSchemaStatus struct { + // `conditions` is a list of the current states of FlowSchema. + // +listType=map + // +listMapKey=type + // +optional + Conditions []FlowSchemaCondition `json:"conditions,omitempty" protobuf:"bytes,1,rep,name=conditions"` +} + +// FlowSchemaCondition describes conditions for a FlowSchema. +type FlowSchemaCondition struct { + // `type` is the type of the condition. + // Required. + Type FlowSchemaConditionType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type"` + // `status` is the status of the condition. + // Can be True, False, Unknown. + // Required. + Status ConditionStatus `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"` + // `lastTransitionTime` is the last time the condition transitioned from one status to another. + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // `reason` is a unique, one-word, CamelCase reason for the condition's last transition. + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // `message` is a human-readable message indicating details about last transition. + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` +} + +// FlowSchemaConditionType is a valid value for FlowSchemaStatusCondition.Type +type FlowSchemaConditionType string + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PriorityLevelConfiguration represents the configuration of a priority level. +type PriorityLevelConfiguration struct { + metav1.TypeMeta `json:",inline"` + // `metadata` is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // `spec` is the specification of the desired behavior of a "request-priority". + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + Spec PriorityLevelConfigurationSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + // `status` is the current status of a "request-priority". + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + Status PriorityLevelConfigurationStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PriorityLevelConfigurationList is a list of PriorityLevelConfiguration objects. +type PriorityLevelConfigurationList struct { + metav1.TypeMeta `json:",inline"` + // `metadata` is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // `items` is a list of request-priorities. + // +listType=set + Items []PriorityLevelConfiguration `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// PriorityLevelConfigurationSpec specifies the configuration of a priority level. +// +union +type PriorityLevelConfigurationSpec struct { + // `type` indicates whether this priority level is subject to + // limitation on request execution. A value of `"Exempt"` means + // that requests of this priority level are not subject to a limit + // (and thus are never queued) and do not detract from the + // capacity made available to other priority levels. A value of + // `"Limited"` means that (a) requests of this priority level + // _are_ subject to limits and (b) some of the server's limited + // capacity is made available exclusively to this priority level. + // Required. + // +unionDiscriminator + Type PriorityLevelEnablement `json:"type" protobuf:"bytes,1,opt,name=type"` + + // `limited` specifies how requests are handled for a Limited priority level. + // This field must be non-empty if and only if `type` is `"Limited"`. + // +optional + Limited *LimitedPriorityLevelConfiguration `json:"limited,omitempty" protobuf:"bytes,2,opt,name=limited"` +} + +// PriorityLevelEnablement indicates whether limits on execution are enabled for the priority level +type PriorityLevelEnablement string + +// Supported priority level enablement values. +const ( + // PriorityLevelEnablementExempt means that requests are not subject to limits + PriorityLevelEnablementExempt PriorityLevelEnablement = "Exempt" + + // PriorityLevelEnablementLimited means that requests are subject to limits + PriorityLevelEnablementLimited PriorityLevelEnablement = "Limited" +) + +// LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. +// It addresses two issues: +// * How are requests for this priority level limited? +// * What should be done with requests that exceed the limit? +type LimitedPriorityLevelConfiguration struct { + // `assuredConcurrencyShares` (ACS) configures the execution + // limit, which is a limit on the number of requests of this + // priority level that may be exeucting at a given time. ACS must + // be a positive number. The server's concurrency limit (SCL) is + // divided among the concurrency-controlled priority levels in + // proportion to their assured concurrency shares. This produces + // the assured concurrency value (ACV) --- the number of requests + // that may be executing at a time --- for each such priority + // level: + // + // ACV(l) = ceil( SCL * ACS(l) / ( sum[priority levels k] ACS(k) ) ) + // + // bigger numbers of ACS mean more reserved concurrent requests (at the + // expense of every other PL). + // This field has a default value of 30. + // +optional + AssuredConcurrencyShares int32 `json:"assuredConcurrencyShares" protobuf:"varint,1,opt,name=assuredConcurrencyShares"` + + // `limitResponse` indicates what to do with requests that can not be executed right now + LimitResponse LimitResponse `json:"limitResponse,omitempty" protobuf:"bytes,2,opt,name=limitResponse"` +} + +// LimitResponse defines how to handle requests that can not be executed right now. +// +union +type LimitResponse struct { + // `type` is "Queue" or "Reject". + // "Queue" means that requests that can not be executed upon arrival + // are held in a queue until they can be executed or a queuing limit + // is reached. + // "Reject" means that requests that can not be executed upon arrival + // are rejected. + // Required. + // +unionDiscriminator + Type LimitResponseType `json:"type" protobuf:"bytes,1,opt,name=type"` + + // `queuing` holds the configuration parameters for queuing. + // This field may be non-empty only if `type` is `"Queue"`. + // +optional + Queuing *QueuingConfiguration `json:"queuing,omitempty" protobuf:"bytes,2,opt,name=queuing"` +} + +// LimitResponseType identifies how a Limited priority level handles a request that can not be executed right now +type LimitResponseType string + +// Supported limit responses. +const ( + // LimitResponseTypeQueue means that requests that can not be executed right now are queued until they can be executed or a queuing limit is hit + LimitResponseTypeQueue LimitResponseType = "Queue" + + // LimitResponseTypeReject means that requests that can not be executed right now are rejected + LimitResponseTypeReject LimitResponseType = "Reject" +) + +// QueuingConfiguration holds the configuration parameters for queuing +type QueuingConfiguration struct { + // `queues` is the number of queues for this priority level. The + // queues exist independently at each apiserver. The value must be + // positive. Setting it to 1 effectively precludes + // shufflesharding and thus makes the distinguisher method of + // associated flow schemas irrelevant. This field has a default + // value of 64. + // +optional + Queues int32 `json:"queues" protobuf:"varint,1,opt,name=queues"` + + // `handSize` is a small positive number that configures the + // shuffle sharding of requests into queues. When enqueuing a request + // at this priority level the request's flow identifier (a string + // pair) is hashed and the hash value is used to shuffle the list + // of queues and deal a hand of the size specified here. The + // request is put into one of the shortest queues in that hand. + // `handSize` must be no larger than `queues`, and should be + // significantly smaller (so that a few heavy flows do not + // saturate most of the queues). See the user-facing + // documentation for more extensive guidance on setting this + // field. This field has a default value of 8. + // +optional + HandSize int32 `json:"handSize" protobuf:"varint,2,opt,name=handSize"` + + // `queueLengthLimit` is the maximum number of requests allowed to + // be waiting in a given queue of this priority level at a time; + // excess requests are rejected. This value must be positive. If + // not specified, it will be defaulted to 50. + // +optional + QueueLengthLimit int32 `json:"queueLengthLimit" protobuf:"varint,3,opt,name=queueLengthLimit"` +} + +// PriorityLevelConfigurationConditionType is a valid value for PriorityLevelConfigurationStatusCondition.Type +type PriorityLevelConfigurationConditionType string + +// PriorityLevelConfigurationStatus represents the current state of a "request-priority". +type PriorityLevelConfigurationStatus struct { + // `conditions` is the current state of "request-priority". + // +listType=map + // +listMapKey=type + // +optional + Conditions []PriorityLevelConfigurationCondition `json:"conditions,omitempty" protobuf:"bytes,1,rep,name=conditions"` +} + +// PriorityLevelConfigurationCondition defines the condition of priority level. +type PriorityLevelConfigurationCondition struct { + // `type` is the type of the condition. + // Required. + Type PriorityLevelConfigurationConditionType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type"` + // `status` is the status of the condition. + // Can be True, False, Unknown. + // Required. + Status ConditionStatus `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"` + // `lastTransitionTime` is the last time the condition transitioned from one status to another. + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // `reason` is a unique, one-word, CamelCase reason for the condition's last transition. + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // `message` is a human-readable message indicating details about last transition. + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` +} + +// ConditionStatus is the status of the condition. +type ConditionStatus string + +// These are valid condition statuses. "ConditionTrue" means a resource is in the condition. +// "ConditionFalse" means a resource is not in the condition. "ConditionUnknown" means kubernetes +// can't decide if a resource is in the condition or not. In the future, we could add other +// intermediate conditions, e.g. ConditionDegraded. +const ( + ConditionTrue ConditionStatus = "True" + ConditionFalse ConditionStatus = "False" + ConditionUnknown ConditionStatus = "Unknown" +) diff --git a/vendor/k8s.io/api/flowcontrol/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/flowcontrol/v1alpha1/types_swagger_doc_generated.go new file mode 100644 index 00000000..08380a1b --- /dev/null +++ b/vendor/k8s.io/api/flowcontrol/v1alpha1/types_swagger_doc_generated.go @@ -0,0 +1,258 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_FlowDistinguisherMethod = map[string]string{ + "": "FlowDistinguisherMethod specifies the method of a flow distinguisher.", + "type": "`type` is the type of flow distinguisher method The supported types are \"ByUser\" and \"ByNamespace\". Required.", +} + +func (FlowDistinguisherMethod) SwaggerDoc() map[string]string { + return map_FlowDistinguisherMethod +} + +var map_FlowSchema = map[string]string{ + "": "FlowSchema defines the schema of a group of flows. Note that a flow is made up of a set of inbound API requests with similar attributes and is identified by a pair of strings: the name of the FlowSchema and a \"flow distinguisher\".", + "metadata": "`metadata` is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "spec": "`spec` is the specification of the desired behavior of a FlowSchema. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "status": "`status` is the current status of a FlowSchema. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", +} + +func (FlowSchema) SwaggerDoc() map[string]string { + return map_FlowSchema +} + +var map_FlowSchemaCondition = map[string]string{ + "": "FlowSchemaCondition describes conditions for a FlowSchema.", + "type": "`type` is the type of the condition. Required.", + "status": "`status` is the status of the condition. Can be True, False, Unknown. Required.", + "lastTransitionTime": "`lastTransitionTime` is the last time the condition transitioned from one status to another.", + "reason": "`reason` is a unique, one-word, CamelCase reason for the condition's last transition.", + "message": "`message` is a human-readable message indicating details about last transition.", +} + +func (FlowSchemaCondition) SwaggerDoc() map[string]string { + return map_FlowSchemaCondition +} + +var map_FlowSchemaList = map[string]string{ + "": "FlowSchemaList is a list of FlowSchema objects.", + "metadata": "`metadata` is the standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "items": "`items` is a list of FlowSchemas.", +} + +func (FlowSchemaList) SwaggerDoc() map[string]string { + return map_FlowSchemaList +} + +var map_FlowSchemaSpec = map[string]string{ + "": "FlowSchemaSpec describes how the FlowSchema's specification looks like.", + "priorityLevelConfiguration": "`priorityLevelConfiguration` should reference a PriorityLevelConfiguration in the cluster. If the reference cannot be resolved, the FlowSchema will be ignored and marked as invalid in its status. Required.", + "matchingPrecedence": "`matchingPrecedence` is used to choose among the FlowSchemas that match a given request. The chosen FlowSchema is among those with the numerically lowest (which we take to be logically highest) MatchingPrecedence. Each MatchingPrecedence value must be non-negative. Note that if the precedence is not specified or zero, it will be set to 1000 as default.", + "distinguisherMethod": "`distinguisherMethod` defines how to compute the flow distinguisher for requests that match this schema. `nil` specifies that the distinguisher is disabled and thus will always be the empty string.", + "rules": "`rules` describes which requests will match this flow schema. This FlowSchema matches a request if and only if at least one member of rules matches the request. if it is an empty slice, there will be no requests matching the FlowSchema.", +} + +func (FlowSchemaSpec) SwaggerDoc() map[string]string { + return map_FlowSchemaSpec +} + +var map_FlowSchemaStatus = map[string]string{ + "": "FlowSchemaStatus represents the current state of a FlowSchema.", + "conditions": "`conditions` is a list of the current states of FlowSchema.", +} + +func (FlowSchemaStatus) SwaggerDoc() map[string]string { + return map_FlowSchemaStatus +} + +var map_GroupSubject = map[string]string{ + "": "GroupSubject holds detailed information for group-kind subject.", + "name": "name is the user group that matches, or \"*\" to match all user groups. See https://github.com/kubernetes/apiserver/blob/master/pkg/authentication/user/user.go for some well-known group names. Required.", +} + +func (GroupSubject) SwaggerDoc() map[string]string { + return map_GroupSubject +} + +var map_LimitResponse = map[string]string{ + "": "LimitResponse defines how to handle requests that can not be executed right now.", + "type": "`type` is \"Queue\" or \"Reject\". \"Queue\" means that requests that can not be executed upon arrival are held in a queue until they can be executed or a queuing limit is reached. \"Reject\" means that requests that can not be executed upon arrival are rejected. Required.", + "queuing": "`queuing` holds the configuration parameters for queuing. This field may be non-empty only if `type` is `\"Queue\"`.", +} + +func (LimitResponse) SwaggerDoc() map[string]string { + return map_LimitResponse +} + +var map_LimitedPriorityLevelConfiguration = map[string]string{ + "": "LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. It addresses two issues:\n * How are requests for this priority level limited?\n * What should be done with requests that exceed the limit?", + "assuredConcurrencyShares": "`assuredConcurrencyShares` (ACS) configures the execution limit, which is a limit on the number of requests of this priority level that may be exeucting at a given time. ACS must be a positive number. The server's concurrency limit (SCL) is divided among the concurrency-controlled priority levels in proportion to their assured concurrency shares. This produces the assured concurrency value (ACV) ", + "limitResponse": "`limitResponse` indicates what to do with requests that can not be executed right now", +} + +func (LimitedPriorityLevelConfiguration) SwaggerDoc() map[string]string { + return map_LimitedPriorityLevelConfiguration +} + +var map_NonResourcePolicyRule = map[string]string{ + "": "NonResourcePolicyRule is a predicate that matches non-resource requests according to their verb and the target non-resource URL. A NonResourcePolicyRule matches a request if and only if both (a) at least one member of verbs matches the request and (b) at least one member of nonResourceURLs matches the request.", + "verbs": "`verbs` is a list of matching verbs and may not be empty. \"*\" matches all verbs. If it is present, it must be the only entry. Required.", + "nonResourceURLs": "`nonResourceURLs` is a set of url prefixes that a user should have access to and may not be empty. For example:\n - \"/healthz\" is legal\n - \"/hea*\" is illegal\n - \"/hea\" is legal but matches nothing\n - \"/hea/*\" also matches nothing\n - \"/healthz/*\" matches all per-component health checks.\n\"*\" matches all non-resource urls. if it is present, it must be the only entry. Required.", +} + +func (NonResourcePolicyRule) SwaggerDoc() map[string]string { + return map_NonResourcePolicyRule +} + +var map_PolicyRulesWithSubjects = map[string]string{ + "": "PolicyRulesWithSubjects prescribes a test that applies to a request to an apiserver. The test considers the subject making the request, the verb being requested, and the resource to be acted upon. This PolicyRulesWithSubjects matches a request if and only if both (a) at least one member of subjects matches the request and (b) at least one member of resourceRules or nonResourceRules matches the request.", + "subjects": "subjects is the list of normal user, serviceaccount, or group that this rule cares about. There must be at least one member in this slice. A slice that includes both the system:authenticated and system:unauthenticated user groups matches every request. Required.", + "resourceRules": "`resourceRules` is a slice of ResourcePolicyRules that identify matching requests according to their verb and the target resource. At least one of `resourceRules` and `nonResourceRules` has to be non-empty.", + "nonResourceRules": "`nonResourceRules` is a list of NonResourcePolicyRules that identify matching requests according to their verb and the target non-resource URL.", +} + +func (PolicyRulesWithSubjects) SwaggerDoc() map[string]string { + return map_PolicyRulesWithSubjects +} + +var map_PriorityLevelConfiguration = map[string]string{ + "": "PriorityLevelConfiguration represents the configuration of a priority level.", + "metadata": "`metadata` is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "spec": "`spec` is the specification of the desired behavior of a \"request-priority\". More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "status": "`status` is the current status of a \"request-priority\". More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", +} + +func (PriorityLevelConfiguration) SwaggerDoc() map[string]string { + return map_PriorityLevelConfiguration +} + +var map_PriorityLevelConfigurationCondition = map[string]string{ + "": "PriorityLevelConfigurationCondition defines the condition of priority level.", + "type": "`type` is the type of the condition. Required.", + "status": "`status` is the status of the condition. Can be True, False, Unknown. Required.", + "lastTransitionTime": "`lastTransitionTime` is the last time the condition transitioned from one status to another.", + "reason": "`reason` is a unique, one-word, CamelCase reason for the condition's last transition.", + "message": "`message` is a human-readable message indicating details about last transition.", +} + +func (PriorityLevelConfigurationCondition) SwaggerDoc() map[string]string { + return map_PriorityLevelConfigurationCondition +} + +var map_PriorityLevelConfigurationList = map[string]string{ + "": "PriorityLevelConfigurationList is a list of PriorityLevelConfiguration objects.", + "metadata": "`metadata` is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "items": "`items` is a list of request-priorities.", +} + +func (PriorityLevelConfigurationList) SwaggerDoc() map[string]string { + return map_PriorityLevelConfigurationList +} + +var map_PriorityLevelConfigurationReference = map[string]string{ + "": "PriorityLevelConfigurationReference contains information that points to the \"request-priority\" being used.", + "name": "`name` is the name of the priority level configuration being referenced Required.", +} + +func (PriorityLevelConfigurationReference) SwaggerDoc() map[string]string { + return map_PriorityLevelConfigurationReference +} + +var map_PriorityLevelConfigurationSpec = map[string]string{ + "": "PriorityLevelConfigurationSpec specifies the configuration of a priority level.", + "type": "`type` indicates whether this priority level is subject to limitation on request execution. A value of `\"Exempt\"` means that requests of this priority level are not subject to a limit (and thus are never queued) and do not detract from the capacity made available to other priority levels. A value of `\"Limited\"` means that (a) requests of this priority level _are_ subject to limits and (b) some of the server's limited capacity is made available exclusively to this priority level. Required.", + "limited": "`limited` specifies how requests are handled for a Limited priority level. This field must be non-empty if and only if `type` is `\"Limited\"`.", +} + +func (PriorityLevelConfigurationSpec) SwaggerDoc() map[string]string { + return map_PriorityLevelConfigurationSpec +} + +var map_PriorityLevelConfigurationStatus = map[string]string{ + "": "PriorityLevelConfigurationStatus represents the current state of a \"request-priority\".", + "conditions": "`conditions` is the current state of \"request-priority\".", +} + +func (PriorityLevelConfigurationStatus) SwaggerDoc() map[string]string { + return map_PriorityLevelConfigurationStatus +} + +var map_QueuingConfiguration = map[string]string{ + "": "QueuingConfiguration holds the configuration parameters for queuing", + "queues": "`queues` is the number of queues for this priority level. The queues exist independently at each apiserver. The value must be positive. Setting it to 1 effectively precludes shufflesharding and thus makes the distinguisher method of associated flow schemas irrelevant. This field has a default value of 64.", + "handSize": "`handSize` is a small positive number that configures the shuffle sharding of requests into queues. When enqueuing a request at this priority level the request's flow identifier (a string pair) is hashed and the hash value is used to shuffle the list of queues and deal a hand of the size specified here. The request is put into one of the shortest queues in that hand. `handSize` must be no larger than `queues`, and should be significantly smaller (so that a few heavy flows do not saturate most of the queues). See the user-facing documentation for more extensive guidance on setting this field. This field has a default value of 8.", + "queueLengthLimit": "`queueLengthLimit` is the maximum number of requests allowed to be waiting in a given queue of this priority level at a time; excess requests are rejected. This value must be positive. If not specified, it will be defaulted to 50.", +} + +func (QueuingConfiguration) SwaggerDoc() map[string]string { + return map_QueuingConfiguration +} + +var map_ResourcePolicyRule = map[string]string{ + "": "ResourcePolicyRule is a predicate that matches some resource requests, testing the request's verb and the target resource. A ResourcePolicyRule matches a resource request if and only if: (a) at least one member of verbs matches the request, (b) at least one member of apiGroups matches the request, (c) at least one member of resources matches the request, and (d) least one member of namespaces matches the request.", + "verbs": "`verbs` is a list of matching verbs and may not be empty. \"*\" matches all verbs and, if present, must be the only entry. Required.", + "apiGroups": "`apiGroups` is a list of matching API groups and may not be empty. \"*\" matches all API groups and, if present, must be the only entry. Required.", + "resources": "`resources` is a list of matching resources (i.e., lowercase and plural) with, if desired, subresource. For example, [ \"services\", \"nodes/status\" ]. This list may not be empty. \"*\" matches all resources and, if present, must be the only entry. Required.", + "clusterScope": "`clusterScope` indicates whether to match requests that do not specify a namespace (which happens either because the resource is not namespaced or the request targets all namespaces). If this field is omitted or false then the `namespaces` field must contain a non-empty list.", + "namespaces": "`namespaces` is a list of target namespaces that restricts matches. A request that specifies a target namespace matches only if either (a) this list contains that target namespace or (b) this list contains \"*\". Note that \"*\" matches any specified namespace but does not match a request that _does not specify_ a namespace (see the `clusterScope` field for that). This list may be empty, but only if `clusterScope` is true.", +} + +func (ResourcePolicyRule) SwaggerDoc() map[string]string { + return map_ResourcePolicyRule +} + +var map_ServiceAccountSubject = map[string]string{ + "": "ServiceAccountSubject holds detailed information for service-account-kind subject.", + "namespace": "`namespace` is the namespace of matching ServiceAccount objects. Required.", + "name": "`name` is the name of matching ServiceAccount objects, or \"*\" to match regardless of name. Required.", +} + +func (ServiceAccountSubject) SwaggerDoc() map[string]string { + return map_ServiceAccountSubject +} + +var map_Subject = map[string]string{ + "": "Subject matches the originator of a request, as identified by the request authentication system. There are three ways of matching an originator; by user, group, or service account.", + "kind": "Required", +} + +func (Subject) SwaggerDoc() map[string]string { + return map_Subject +} + +var map_UserSubject = map[string]string{ + "": "UserSubject holds detailed information for user-kind subject.", + "name": "`name` is the username that matches, or \"*\" to match all usernames. Required.", +} + +func (UserSubject) SwaggerDoc() map[string]string { + return map_UserSubject +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/flowcontrol/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/flowcontrol/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 00000000..f5d37954 --- /dev/null +++ b/vendor/k8s.io/api/flowcontrol/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,541 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowDistinguisherMethod) DeepCopyInto(out *FlowDistinguisherMethod) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowDistinguisherMethod. +func (in *FlowDistinguisherMethod) DeepCopy() *FlowDistinguisherMethod { + if in == nil { + return nil + } + out := new(FlowDistinguisherMethod) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowSchema) DeepCopyInto(out *FlowSchema) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowSchema. +func (in *FlowSchema) DeepCopy() *FlowSchema { + if in == nil { + return nil + } + out := new(FlowSchema) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FlowSchema) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowSchemaCondition) DeepCopyInto(out *FlowSchemaCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowSchemaCondition. +func (in *FlowSchemaCondition) DeepCopy() *FlowSchemaCondition { + if in == nil { + return nil + } + out := new(FlowSchemaCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowSchemaList) DeepCopyInto(out *FlowSchemaList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]FlowSchema, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowSchemaList. +func (in *FlowSchemaList) DeepCopy() *FlowSchemaList { + if in == nil { + return nil + } + out := new(FlowSchemaList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FlowSchemaList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowSchemaSpec) DeepCopyInto(out *FlowSchemaSpec) { + *out = *in + out.PriorityLevelConfiguration = in.PriorityLevelConfiguration + if in.DistinguisherMethod != nil { + in, out := &in.DistinguisherMethod, &out.DistinguisherMethod + *out = new(FlowDistinguisherMethod) + **out = **in + } + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]PolicyRulesWithSubjects, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowSchemaSpec. +func (in *FlowSchemaSpec) DeepCopy() *FlowSchemaSpec { + if in == nil { + return nil + } + out := new(FlowSchemaSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowSchemaStatus) DeepCopyInto(out *FlowSchemaStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]FlowSchemaCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowSchemaStatus. +func (in *FlowSchemaStatus) DeepCopy() *FlowSchemaStatus { + if in == nil { + return nil + } + out := new(FlowSchemaStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupSubject) DeepCopyInto(out *GroupSubject) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupSubject. +func (in *GroupSubject) DeepCopy() *GroupSubject { + if in == nil { + return nil + } + out := new(GroupSubject) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LimitResponse) DeepCopyInto(out *LimitResponse) { + *out = *in + if in.Queuing != nil { + in, out := &in.Queuing, &out.Queuing + *out = new(QueuingConfiguration) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitResponse. +func (in *LimitResponse) DeepCopy() *LimitResponse { + if in == nil { + return nil + } + out := new(LimitResponse) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LimitedPriorityLevelConfiguration) DeepCopyInto(out *LimitedPriorityLevelConfiguration) { + *out = *in + in.LimitResponse.DeepCopyInto(&out.LimitResponse) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitedPriorityLevelConfiguration. +func (in *LimitedPriorityLevelConfiguration) DeepCopy() *LimitedPriorityLevelConfiguration { + if in == nil { + return nil + } + out := new(LimitedPriorityLevelConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NonResourcePolicyRule) DeepCopyInto(out *NonResourcePolicyRule) { + *out = *in + if in.Verbs != nil { + in, out := &in.Verbs, &out.Verbs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.NonResourceURLs != nil { + in, out := &in.NonResourceURLs, &out.NonResourceURLs + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NonResourcePolicyRule. +func (in *NonResourcePolicyRule) DeepCopy() *NonResourcePolicyRule { + if in == nil { + return nil + } + out := new(NonResourcePolicyRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyRulesWithSubjects) DeepCopyInto(out *PolicyRulesWithSubjects) { + *out = *in + if in.Subjects != nil { + in, out := &in.Subjects, &out.Subjects + *out = make([]Subject, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceRules != nil { + in, out := &in.ResourceRules, &out.ResourceRules + *out = make([]ResourcePolicyRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NonResourceRules != nil { + in, out := &in.NonResourceRules, &out.NonResourceRules + *out = make([]NonResourcePolicyRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyRulesWithSubjects. +func (in *PolicyRulesWithSubjects) DeepCopy() *PolicyRulesWithSubjects { + if in == nil { + return nil + } + out := new(PolicyRulesWithSubjects) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PriorityLevelConfiguration) DeepCopyInto(out *PriorityLevelConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityLevelConfiguration. +func (in *PriorityLevelConfiguration) DeepCopy() *PriorityLevelConfiguration { + if in == nil { + return nil + } + out := new(PriorityLevelConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PriorityLevelConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PriorityLevelConfigurationCondition) DeepCopyInto(out *PriorityLevelConfigurationCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityLevelConfigurationCondition. +func (in *PriorityLevelConfigurationCondition) DeepCopy() *PriorityLevelConfigurationCondition { + if in == nil { + return nil + } + out := new(PriorityLevelConfigurationCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PriorityLevelConfigurationList) DeepCopyInto(out *PriorityLevelConfigurationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PriorityLevelConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityLevelConfigurationList. +func (in *PriorityLevelConfigurationList) DeepCopy() *PriorityLevelConfigurationList { + if in == nil { + return nil + } + out := new(PriorityLevelConfigurationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PriorityLevelConfigurationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PriorityLevelConfigurationReference) DeepCopyInto(out *PriorityLevelConfigurationReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityLevelConfigurationReference. +func (in *PriorityLevelConfigurationReference) DeepCopy() *PriorityLevelConfigurationReference { + if in == nil { + return nil + } + out := new(PriorityLevelConfigurationReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PriorityLevelConfigurationSpec) DeepCopyInto(out *PriorityLevelConfigurationSpec) { + *out = *in + if in.Limited != nil { + in, out := &in.Limited, &out.Limited + *out = new(LimitedPriorityLevelConfiguration) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityLevelConfigurationSpec. +func (in *PriorityLevelConfigurationSpec) DeepCopy() *PriorityLevelConfigurationSpec { + if in == nil { + return nil + } + out := new(PriorityLevelConfigurationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PriorityLevelConfigurationStatus) DeepCopyInto(out *PriorityLevelConfigurationStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]PriorityLevelConfigurationCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityLevelConfigurationStatus. +func (in *PriorityLevelConfigurationStatus) DeepCopy() *PriorityLevelConfigurationStatus { + if in == nil { + return nil + } + out := new(PriorityLevelConfigurationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueuingConfiguration) DeepCopyInto(out *QueuingConfiguration) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueuingConfiguration. +func (in *QueuingConfiguration) DeepCopy() *QueuingConfiguration { + if in == nil { + return nil + } + out := new(QueuingConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcePolicyRule) DeepCopyInto(out *ResourcePolicyRule) { + *out = *in + if in.Verbs != nil { + in, out := &in.Verbs, &out.Verbs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.APIGroups != nil { + in, out := &in.APIGroups, &out.APIGroups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Namespaces != nil { + in, out := &in.Namespaces, &out.Namespaces + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcePolicyRule. +func (in *ResourcePolicyRule) DeepCopy() *ResourcePolicyRule { + if in == nil { + return nil + } + out := new(ResourcePolicyRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountSubject) DeepCopyInto(out *ServiceAccountSubject) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountSubject. +func (in *ServiceAccountSubject) DeepCopy() *ServiceAccountSubject { + if in == nil { + return nil + } + out := new(ServiceAccountSubject) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Subject) DeepCopyInto(out *Subject) { + *out = *in + if in.User != nil { + in, out := &in.User, &out.User + *out = new(UserSubject) + **out = **in + } + if in.Group != nil { + in, out := &in.Group, &out.Group + *out = new(GroupSubject) + **out = **in + } + if in.ServiceAccount != nil { + in, out := &in.ServiceAccount, &out.ServiceAccount + *out = new(ServiceAccountSubject) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Subject. +func (in *Subject) DeepCopy() *Subject { + if in == nil { + return nil + } + out := new(Subject) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSubject) DeepCopyInto(out *UserSubject) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSubject. +func (in *UserSubject) DeepCopy() *UserSubject { + if in == nil { + return nil + } + out := new(UserSubject) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/networking/v1/doc.go b/vendor/k8s.io/api/networking/v1/doc.go index ef9ae2ae..d3ffd5ed 100644 --- a/vendor/k8s.io/api/networking/v1/doc.go +++ b/vendor/k8s.io/api/networking/v1/doc.go @@ -15,6 +15,8 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +k8s:openapi-gen=true // +groupName=networking.k8s.io + package v1 // import "k8s.io/api/networking/v1" diff --git a/vendor/k8s.io/api/networking/v1/generated.pb.go b/vendor/k8s.io/api/networking/v1/generated.pb.go index 7b1c04b2..c9b22fc7 100644 --- a/vendor/k8s.io/api/networking/v1/generated.pb.go +++ b/vendor/k8s.io/api/networking/v1/generated.pb.go @@ -14,42 +14,28 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/networking/v1/generated.proto -// DO NOT EDIT! -/* - Package v1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/networking/v1/generated.proto - - It has these top-level messages: - IPBlock - NetworkPolicy - NetworkPolicyEgressRule - NetworkPolicyIngressRule - NetworkPolicyList - NetworkPolicyPeer - NetworkPolicyPort - NetworkPolicySpec -*/ package v1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + io "io" -import k8s_io_apimachinery_pkg_util_intstr "k8s.io/apimachinery/pkg/util/intstr" + proto "github.com/gogo/protobuf/proto" -import k8s_io_api_core_v1 "k8s.io/api/core/v1" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" -import strings "strings" -import reflect "reflect" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" -import io "io" + intstr "k8s.io/apimachinery/pkg/util/intstr" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -62,39 +48,229 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *IPBlock) Reset() { *m = IPBlock{} } -func (*IPBlock) ProtoMessage() {} -func (*IPBlock) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *IPBlock) Reset() { *m = IPBlock{} } +func (*IPBlock) ProtoMessage() {} +func (*IPBlock) Descriptor() ([]byte, []int) { + return fileDescriptor_1c72867a70a7cc90, []int{0} +} +func (m *IPBlock) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IPBlock) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IPBlock) XXX_Merge(src proto.Message) { + xxx_messageInfo_IPBlock.Merge(m, src) +} +func (m *IPBlock) XXX_Size() int { + return m.Size() +} +func (m *IPBlock) XXX_DiscardUnknown() { + xxx_messageInfo_IPBlock.DiscardUnknown(m) +} + +var xxx_messageInfo_IPBlock proto.InternalMessageInfo + +func (m *NetworkPolicy) Reset() { *m = NetworkPolicy{} } +func (*NetworkPolicy) ProtoMessage() {} +func (*NetworkPolicy) Descriptor() ([]byte, []int) { + return fileDescriptor_1c72867a70a7cc90, []int{1} +} +func (m *NetworkPolicy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NetworkPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NetworkPolicy) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkPolicy.Merge(m, src) +} +func (m *NetworkPolicy) XXX_Size() int { + return m.Size() +} +func (m *NetworkPolicy) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkPolicy.DiscardUnknown(m) +} -func (m *NetworkPolicy) Reset() { *m = NetworkPolicy{} } -func (*NetworkPolicy) ProtoMessage() {} -func (*NetworkPolicy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +var xxx_messageInfo_NetworkPolicy proto.InternalMessageInfo + +func (m *NetworkPolicyEgressRule) Reset() { *m = NetworkPolicyEgressRule{} } +func (*NetworkPolicyEgressRule) ProtoMessage() {} +func (*NetworkPolicyEgressRule) Descriptor() ([]byte, []int) { + return fileDescriptor_1c72867a70a7cc90, []int{2} +} +func (m *NetworkPolicyEgressRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NetworkPolicyEgressRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NetworkPolicyEgressRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkPolicyEgressRule.Merge(m, src) +} +func (m *NetworkPolicyEgressRule) XXX_Size() int { + return m.Size() +} +func (m *NetworkPolicyEgressRule) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkPolicyEgressRule.DiscardUnknown(m) +} -func (m *NetworkPolicyEgressRule) Reset() { *m = NetworkPolicyEgressRule{} } -func (*NetworkPolicyEgressRule) ProtoMessage() {} -func (*NetworkPolicyEgressRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +var xxx_messageInfo_NetworkPolicyEgressRule proto.InternalMessageInfo func (m *NetworkPolicyIngressRule) Reset() { *m = NetworkPolicyIngressRule{} } func (*NetworkPolicyIngressRule) ProtoMessage() {} func (*NetworkPolicyIngressRule) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{3} + return fileDescriptor_1c72867a70a7cc90, []int{3} +} +func (m *NetworkPolicyIngressRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NetworkPolicyIngressRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NetworkPolicyIngressRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkPolicyIngressRule.Merge(m, src) +} +func (m *NetworkPolicyIngressRule) XXX_Size() int { + return m.Size() +} +func (m *NetworkPolicyIngressRule) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkPolicyIngressRule.DiscardUnknown(m) +} + +var xxx_messageInfo_NetworkPolicyIngressRule proto.InternalMessageInfo + +func (m *NetworkPolicyList) Reset() { *m = NetworkPolicyList{} } +func (*NetworkPolicyList) ProtoMessage() {} +func (*NetworkPolicyList) Descriptor() ([]byte, []int) { + return fileDescriptor_1c72867a70a7cc90, []int{4} +} +func (m *NetworkPolicyList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NetworkPolicyList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NetworkPolicyList) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkPolicyList.Merge(m, src) +} +func (m *NetworkPolicyList) XXX_Size() int { + return m.Size() +} +func (m *NetworkPolicyList) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkPolicyList.DiscardUnknown(m) +} + +var xxx_messageInfo_NetworkPolicyList proto.InternalMessageInfo + +func (m *NetworkPolicyPeer) Reset() { *m = NetworkPolicyPeer{} } +func (*NetworkPolicyPeer) ProtoMessage() {} +func (*NetworkPolicyPeer) Descriptor() ([]byte, []int) { + return fileDescriptor_1c72867a70a7cc90, []int{5} +} +func (m *NetworkPolicyPeer) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NetworkPolicyPeer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NetworkPolicyPeer) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkPolicyPeer.Merge(m, src) +} +func (m *NetworkPolicyPeer) XXX_Size() int { + return m.Size() +} +func (m *NetworkPolicyPeer) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkPolicyPeer.DiscardUnknown(m) } -func (m *NetworkPolicyList) Reset() { *m = NetworkPolicyList{} } -func (*NetworkPolicyList) ProtoMessage() {} -func (*NetworkPolicyList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +var xxx_messageInfo_NetworkPolicyPeer proto.InternalMessageInfo + +func (m *NetworkPolicyPort) Reset() { *m = NetworkPolicyPort{} } +func (*NetworkPolicyPort) ProtoMessage() {} +func (*NetworkPolicyPort) Descriptor() ([]byte, []int) { + return fileDescriptor_1c72867a70a7cc90, []int{6} +} +func (m *NetworkPolicyPort) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NetworkPolicyPort) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NetworkPolicyPort) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkPolicyPort.Merge(m, src) +} +func (m *NetworkPolicyPort) XXX_Size() int { + return m.Size() +} +func (m *NetworkPolicyPort) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkPolicyPort.DiscardUnknown(m) +} -func (m *NetworkPolicyPeer) Reset() { *m = NetworkPolicyPeer{} } -func (*NetworkPolicyPeer) ProtoMessage() {} -func (*NetworkPolicyPeer) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +var xxx_messageInfo_NetworkPolicyPort proto.InternalMessageInfo -func (m *NetworkPolicyPort) Reset() { *m = NetworkPolicyPort{} } -func (*NetworkPolicyPort) ProtoMessage() {} -func (*NetworkPolicyPort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +func (m *NetworkPolicySpec) Reset() { *m = NetworkPolicySpec{} } +func (*NetworkPolicySpec) ProtoMessage() {} +func (*NetworkPolicySpec) Descriptor() ([]byte, []int) { + return fileDescriptor_1c72867a70a7cc90, []int{7} +} +func (m *NetworkPolicySpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NetworkPolicySpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NetworkPolicySpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkPolicySpec.Merge(m, src) +} +func (m *NetworkPolicySpec) XXX_Size() int { + return m.Size() +} +func (m *NetworkPolicySpec) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkPolicySpec.DiscardUnknown(m) +} -func (m *NetworkPolicySpec) Reset() { *m = NetworkPolicySpec{} } -func (*NetworkPolicySpec) ProtoMessage() {} -func (*NetworkPolicySpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +var xxx_messageInfo_NetworkPolicySpec proto.InternalMessageInfo func init() { proto.RegisterType((*IPBlock)(nil), "k8s.io.api.networking.v1.IPBlock") @@ -106,10 +282,70 @@ func init() { proto.RegisterType((*NetworkPolicyPort)(nil), "k8s.io.api.networking.v1.NetworkPolicyPort") proto.RegisterType((*NetworkPolicySpec)(nil), "k8s.io.api.networking.v1.NetworkPolicySpec") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/networking/v1/generated.proto", fileDescriptor_1c72867a70a7cc90) +} + +var fileDescriptor_1c72867a70a7cc90 = []byte{ + // 804 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x55, 0xcf, 0x8f, 0xdb, 0x44, + 0x14, 0x8e, 0x9d, 0x6c, 0x92, 0x4e, 0x28, 0x65, 0x07, 0x21, 0xac, 0x45, 0xd8, 0xc1, 0x17, 0x56, + 0xaa, 0x18, 0x93, 0x16, 0x21, 0x6e, 0x08, 0x43, 0x29, 0x91, 0xba, 0xbb, 0xd1, 0x6c, 0x2f, 0x20, + 0x90, 0x70, 0x9c, 0x59, 0xef, 0x34, 0xb1, 0xc7, 0x1a, 0x4f, 0x42, 0xf7, 0xc6, 0x9f, 0xc0, 0x1f, + 0xc2, 0x91, 0x1b, 0x87, 0x72, 0xdc, 0x63, 0x8f, 0x3d, 0x59, 0xac, 0xf9, 0x2f, 0xf6, 0x84, 0x66, + 0x3c, 0x89, 0xf3, 0xa3, 0x11, 0xd9, 0x15, 0xbd, 0x65, 0xde, 0xbc, 0xef, 0x7b, 0xf3, 0xde, 0xfb, + 0xf2, 0x19, 0x7c, 0x35, 0xfe, 0x22, 0x43, 0x94, 0x79, 0xe3, 0xe9, 0x90, 0xf0, 0x84, 0x08, 0x92, + 0x79, 0x33, 0x92, 0x8c, 0x18, 0xf7, 0xf4, 0x45, 0x90, 0x52, 0x2f, 0x21, 0xe2, 0x17, 0xc6, 0xc7, + 0x34, 0x89, 0xbc, 0x59, 0xcf, 0x8b, 0x48, 0x42, 0x78, 0x20, 0xc8, 0x08, 0xa5, 0x9c, 0x09, 0x06, + 0xad, 0x32, 0x13, 0x05, 0x29, 0x45, 0x55, 0x26, 0x9a, 0xf5, 0x0e, 0x3e, 0x89, 0xa8, 0x38, 0x9f, + 0x0e, 0x51, 0xc8, 0x62, 0x2f, 0x62, 0x11, 0xf3, 0x14, 0x60, 0x38, 0x3d, 0x53, 0x27, 0x75, 0x50, + 0xbf, 0x4a, 0xa2, 0x03, 0x77, 0xa9, 0x64, 0xc8, 0x38, 0x79, 0x4d, 0xb1, 0x83, 0xcf, 0xaa, 0x9c, + 0x38, 0x08, 0xcf, 0x69, 0x42, 0xf8, 0x85, 0x97, 0x8e, 0x23, 0x19, 0xc8, 0xbc, 0x98, 0x88, 0xe0, + 0x75, 0x28, 0x6f, 0x1b, 0x8a, 0x4f, 0x13, 0x41, 0x63, 0xb2, 0x01, 0xf8, 0xfc, 0xbf, 0x00, 0x59, + 0x78, 0x4e, 0xe2, 0x60, 0x03, 0xf7, 0x70, 0x1b, 0x6e, 0x2a, 0xe8, 0xc4, 0xa3, 0x89, 0xc8, 0x04, + 0x5f, 0x07, 0xb9, 0x27, 0xa0, 0xd5, 0x1f, 0xf8, 0x13, 0x16, 0x8e, 0x61, 0x17, 0x34, 0x42, 0x3a, + 0xe2, 0x96, 0xd1, 0x35, 0x0e, 0xef, 0xf8, 0x6f, 0x5d, 0xe6, 0x4e, 0xad, 0xc8, 0x9d, 0xc6, 0xd7, + 0xfd, 0x6f, 0x30, 0x56, 0x37, 0xd0, 0x05, 0x4d, 0xf2, 0x3c, 0x24, 0xa9, 0xb0, 0xcc, 0x6e, 0xfd, + 0xf0, 0x8e, 0x0f, 0x8a, 0xdc, 0x69, 0x3e, 0x52, 0x11, 0xac, 0x6f, 0xdc, 0xbf, 0x0c, 0x70, 0xf7, + 0xb8, 0xdc, 0xc4, 0x80, 0x4d, 0x68, 0x78, 0x01, 0x7f, 0x06, 0x6d, 0x39, 0x9b, 0x51, 0x20, 0x02, + 0xc5, 0xdd, 0x79, 0xf0, 0x29, 0xaa, 0xd6, 0xb6, 0x78, 0x2a, 0x4a, 0xc7, 0x91, 0x0c, 0x64, 0x48, + 0x66, 0xa3, 0x59, 0x0f, 0x9d, 0x0c, 0x9f, 0x91, 0x50, 0x1c, 0x11, 0x11, 0xf8, 0x50, 0xbf, 0x06, + 0x54, 0x31, 0xbc, 0x60, 0x85, 0x47, 0xa0, 0x91, 0xa5, 0x24, 0xb4, 0x4c, 0xc5, 0x7e, 0x1f, 0x6d, + 0x13, 0x05, 0x5a, 0x79, 0xd8, 0x69, 0x4a, 0xc2, 0xaa, 0x4d, 0x79, 0xc2, 0x8a, 0xc6, 0xfd, 0xc3, + 0x00, 0xef, 0xaf, 0x64, 0x3e, 0x8a, 0x38, 0xc9, 0x32, 0x3c, 0x9d, 0x10, 0x38, 0x00, 0x7b, 0x29, + 0xe3, 0x22, 0xb3, 0x8c, 0x6e, 0xfd, 0x06, 0xb5, 0x06, 0x8c, 0x0b, 0xff, 0xae, 0xae, 0xb5, 0x27, + 0x4f, 0x19, 0x2e, 0x89, 0xe0, 0x63, 0x60, 0x0a, 0xa6, 0x06, 0x7a, 0x03, 0x3a, 0x42, 0xb8, 0x0f, + 0x34, 0x9d, 0xf9, 0x94, 0x61, 0x53, 0x30, 0xf7, 0x4f, 0x03, 0x58, 0x2b, 0x59, 0xfd, 0xe4, 0x4d, + 0xbe, 0xfb, 0x08, 0x34, 0xce, 0x38, 0x8b, 0x6f, 0xf3, 0xf2, 0xc5, 0xd0, 0xbf, 0xe5, 0x2c, 0xc6, + 0x8a, 0xc6, 0x7d, 0x61, 0x80, 0xfd, 0x95, 0xcc, 0x27, 0x34, 0x13, 0xf0, 0xc7, 0x0d, 0xed, 0xa0, + 0xdd, 0xb4, 0x23, 0xd1, 0x4a, 0x39, 0xef, 0xe8, 0x5a, 0xed, 0x79, 0x64, 0x49, 0x37, 0x4f, 0xc0, + 0x1e, 0x15, 0x24, 0xce, 0x74, 0x0f, 0x1f, 0xef, 0xd8, 0x43, 0x35, 0x90, 0xbe, 0x44, 0xe3, 0x92, + 0xc4, 0x7d, 0x61, 0xae, 0x75, 0x20, 0x7b, 0x85, 0x67, 0xa0, 0x93, 0xb2, 0xd1, 0x29, 0x99, 0x90, + 0x50, 0x30, 0xae, 0x9b, 0x78, 0xb8, 0x63, 0x13, 0xc1, 0x90, 0x4c, 0xe6, 0x50, 0xff, 0x5e, 0x91, + 0x3b, 0x9d, 0x41, 0xc5, 0x85, 0x97, 0x89, 0xe1, 0x73, 0xb0, 0x9f, 0x04, 0x31, 0xc9, 0xd2, 0x20, + 0x24, 0x8b, 0x6a, 0xe6, 0xed, 0xab, 0xbd, 0x57, 0xe4, 0xce, 0xfe, 0xf1, 0x3a, 0x23, 0xde, 0x2c, + 0x02, 0xbf, 0x03, 0x2d, 0x9a, 0x2a, 0x0b, 0xb1, 0xea, 0xaa, 0xde, 0x47, 0xdb, 0xe7, 0xa8, 0xbd, + 0xc6, 0xef, 0x14, 0xb9, 0x33, 0x37, 0x1e, 0x3c, 0x87, 0xbb, 0xbf, 0xaf, 0x6b, 0x40, 0x0a, 0x0e, + 0x3e, 0x06, 0x6d, 0xe5, 0x55, 0x21, 0x9b, 0x68, 0x6f, 0xba, 0x2f, 0xf7, 0x39, 0xd0, 0xb1, 0xeb, + 0xdc, 0xf9, 0x60, 0xd3, 0xbc, 0xd1, 0xfc, 0x1a, 0x2f, 0xc0, 0xf0, 0x18, 0x34, 0xa4, 0x74, 0xf5, + 0x54, 0xb6, 0x9b, 0x90, 0xf4, 0x4b, 0x54, 0xfa, 0x25, 0xea, 0x27, 0xe2, 0x84, 0x9f, 0x0a, 0x4e, + 0x93, 0xc8, 0x6f, 0x4b, 0xc9, 0xca, 0x27, 0x61, 0xc5, 0xe3, 0x5e, 0xaf, 0x2f, 0x5c, 0x7a, 0x08, + 0x7c, 0xf6, 0xbf, 0x2d, 0xfc, 0x5d, 0x2d, 0xb3, 0xed, 0x4b, 0xff, 0x09, 0xb4, 0x68, 0xf9, 0x27, + 0xd7, 0x12, 0x7e, 0xb0, 0xa3, 0x84, 0x97, 0xac, 0xc1, 0xbf, 0xa7, 0xcb, 0xb4, 0xe6, 0xc1, 0x39, + 0x27, 0xfc, 0x1e, 0x34, 0x49, 0xc9, 0x5e, 0x57, 0xec, 0xbd, 0x1d, 0xd9, 0x2b, 0xbf, 0xf4, 0xdf, + 0xd6, 0xe4, 0x4d, 0x1d, 0xd3, 0x84, 0xf0, 0x4b, 0x39, 0x25, 0x99, 0xfb, 0xf4, 0x22, 0x25, 0x99, + 0xd5, 0x50, 0xdf, 0x93, 0x0f, 0xcb, 0x66, 0x17, 0xe1, 0xeb, 0xdc, 0x01, 0xd5, 0x11, 0x2f, 0x23, + 0xfc, 0xc3, 0xcb, 0x2b, 0xbb, 0xf6, 0xf2, 0xca, 0xae, 0xbd, 0xba, 0xb2, 0x6b, 0xbf, 0x16, 0xb6, + 0x71, 0x59, 0xd8, 0xc6, 0xcb, 0xc2, 0x36, 0x5e, 0x15, 0xb6, 0xf1, 0x77, 0x61, 0x1b, 0xbf, 0xfd, + 0x63, 0xd7, 0x7e, 0x30, 0x67, 0xbd, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xf7, 0x7b, 0xc9, 0x59, + 0x67, 0x08, 0x00, 0x00, +} + func (m *IPBlock) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -117,36 +353,36 @@ func (m *IPBlock) Marshal() (dAtA []byte, err error) { } func (m *IPBlock) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IPBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.CIDR))) - i += copy(dAtA[i:], m.CIDR) if len(m.Except) > 0 { - for _, s := range m.Except { + for iNdEx := len(m.Except) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Except[iNdEx]) + copy(dAtA[i:], m.Except[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Except[iNdEx]))) + i-- dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - return i, nil + i -= len(m.CIDR) + copy(dAtA[i:], m.CIDR) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CIDR))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *NetworkPolicy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -154,33 +390,42 @@ func (m *NetworkPolicy) Marshal() (dAtA []byte, err error) { } func (m *NetworkPolicy) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NetworkPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n1 + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n2 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *NetworkPolicyEgressRule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -188,41 +433,50 @@ func (m *NetworkPolicyEgressRule) Marshal() (dAtA []byte, err error) { } func (m *NetworkPolicyEgressRule) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NetworkPolicyEgressRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Ports) > 0 { - for _, msg := range m.Ports { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.To) > 0 { + for iNdEx := len(m.To) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.To[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - if len(m.To) > 0 { - for _, msg := range m.To { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Ports) > 0 { + for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ports[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } func (m *NetworkPolicyIngressRule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -230,41 +484,50 @@ func (m *NetworkPolicyIngressRule) Marshal() (dAtA []byte, err error) { } func (m *NetworkPolicyIngressRule) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NetworkPolicyIngressRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Ports) > 0 { - for _, msg := range m.Ports { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.From) > 0 { + for iNdEx := len(m.From) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.From[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - if len(m.From) > 0 { - for _, msg := range m.From { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Ports) > 0 { + for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ports[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } func (m *NetworkPolicyList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -272,37 +535,46 @@ func (m *NetworkPolicyList) Marshal() (dAtA []byte, err error) { } func (m *NetworkPolicyList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NetworkPolicyList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n3, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *NetworkPolicyPeer) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -310,47 +582,58 @@ func (m *NetworkPolicyPeer) Marshal() (dAtA []byte, err error) { } func (m *NetworkPolicyPeer) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NetworkPolicyPeer) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.PodSelector != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PodSelector.Size())) - n4, err := m.PodSelector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.IPBlock != nil { + { + size, err := m.IPBlock.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n4 + i-- + dAtA[i] = 0x1a } if m.NamespaceSelector != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NamespaceSelector.Size())) - n5, err := m.NamespaceSelector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.NamespaceSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n5 + i-- + dAtA[i] = 0x12 } - if m.IPBlock != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.IPBlock.Size())) - n6, err := m.IPBlock.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.PodSelector != nil { + { + size, err := m.PodSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n6 + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *NetworkPolicyPort) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -358,33 +641,41 @@ func (m *NetworkPolicyPort) Marshal() (dAtA []byte, err error) { } func (m *NetworkPolicyPort) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NetworkPolicyPort) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Protocol != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Protocol))) - i += copy(dAtA[i:], *m.Protocol) - } if m.Port != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Port.Size())) - n7, err := m.Port.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Port.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n7 + i-- + dAtA[i] = 0x12 + } + if m.Protocol != nil { + i -= len(*m.Protocol) + copy(dAtA[i:], *m.Protocol) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Protocol))) + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *NetworkPolicySpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -392,88 +683,80 @@ func (m *NetworkPolicySpec) Marshal() (dAtA []byte, err error) { } func (m *NetworkPolicySpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NetworkPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PodSelector.Size())) - n8, err := m.PodSelector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n8 - if len(m.Ingress) > 0 { - for _, msg := range m.Ingress { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + if len(m.PolicyTypes) > 0 { + for iNdEx := len(m.PolicyTypes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.PolicyTypes[iNdEx]) + copy(dAtA[i:], m.PolicyTypes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PolicyTypes[iNdEx]))) + i-- + dAtA[i] = 0x22 } } if len(m.Egress) > 0 { - for _, msg := range m.Egress { + for iNdEx := len(m.Egress) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Egress[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + } + } + if len(m.Ingress) > 0 { + for iNdEx := len(m.Ingress) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ingress[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - if len(m.PolicyTypes) > 0 { - for _, s := range m.PolicyTypes { - dAtA[i] = 0x22 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + { + size, err := m.PodSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *IPBlock) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.CIDR) @@ -488,6 +771,9 @@ func (m *IPBlock) Size() (n int) { } func (m *NetworkPolicy) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -498,6 +784,9 @@ func (m *NetworkPolicy) Size() (n int) { } func (m *NetworkPolicyEgressRule) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Ports) > 0 { @@ -516,6 +805,9 @@ func (m *NetworkPolicyEgressRule) Size() (n int) { } func (m *NetworkPolicyIngressRule) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Ports) > 0 { @@ -534,6 +826,9 @@ func (m *NetworkPolicyIngressRule) Size() (n int) { } func (m *NetworkPolicyList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -548,6 +843,9 @@ func (m *NetworkPolicyList) Size() (n int) { } func (m *NetworkPolicyPeer) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.PodSelector != nil { @@ -566,6 +864,9 @@ func (m *NetworkPolicyPeer) Size() (n int) { } func (m *NetworkPolicyPort) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Protocol != nil { @@ -580,6 +881,9 @@ func (m *NetworkPolicyPort) Size() (n int) { } func (m *NetworkPolicySpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.PodSelector.Size() @@ -606,14 +910,7 @@ func (m *NetworkPolicySpec) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -634,7 +931,7 @@ func (this *NetworkPolicy) String() string { return "nil" } s := strings.Join([]string{`&NetworkPolicy{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "NetworkPolicySpec", "NetworkPolicySpec", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -644,9 +941,19 @@ func (this *NetworkPolicyEgressRule) String() string { if this == nil { return "nil" } + repeatedStringForPorts := "[]NetworkPolicyPort{" + for _, f := range this.Ports { + repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPort", "NetworkPolicyPort", 1), `&`, ``, 1) + "," + } + repeatedStringForPorts += "}" + repeatedStringForTo := "[]NetworkPolicyPeer{" + for _, f := range this.To { + repeatedStringForTo += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPeer", "NetworkPolicyPeer", 1), `&`, ``, 1) + "," + } + repeatedStringForTo += "}" s := strings.Join([]string{`&NetworkPolicyEgressRule{`, - `Ports:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ports), "NetworkPolicyPort", "NetworkPolicyPort", 1), `&`, ``, 1) + `,`, - `To:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.To), "NetworkPolicyPeer", "NetworkPolicyPeer", 1), `&`, ``, 1) + `,`, + `Ports:` + repeatedStringForPorts + `,`, + `To:` + repeatedStringForTo + `,`, `}`, }, "") return s @@ -655,9 +962,19 @@ func (this *NetworkPolicyIngressRule) String() string { if this == nil { return "nil" } + repeatedStringForPorts := "[]NetworkPolicyPort{" + for _, f := range this.Ports { + repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPort", "NetworkPolicyPort", 1), `&`, ``, 1) + "," + } + repeatedStringForPorts += "}" + repeatedStringForFrom := "[]NetworkPolicyPeer{" + for _, f := range this.From { + repeatedStringForFrom += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPeer", "NetworkPolicyPeer", 1), `&`, ``, 1) + "," + } + repeatedStringForFrom += "}" s := strings.Join([]string{`&NetworkPolicyIngressRule{`, - `Ports:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ports), "NetworkPolicyPort", "NetworkPolicyPort", 1), `&`, ``, 1) + `,`, - `From:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.From), "NetworkPolicyPeer", "NetworkPolicyPeer", 1), `&`, ``, 1) + `,`, + `Ports:` + repeatedStringForPorts + `,`, + `From:` + repeatedStringForFrom + `,`, `}`, }, "") return s @@ -666,9 +983,14 @@ func (this *NetworkPolicyList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]NetworkPolicy{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "NetworkPolicy", "NetworkPolicy", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&NetworkPolicyList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "NetworkPolicy", "NetworkPolicy", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -678,9 +1000,9 @@ func (this *NetworkPolicyPeer) String() string { return "nil" } s := strings.Join([]string{`&NetworkPolicyPeer{`, - `PodSelector:` + strings.Replace(fmt.Sprintf("%v", this.PodSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `IPBlock:` + strings.Replace(fmt.Sprintf("%v", this.IPBlock), "IPBlock", "IPBlock", 1) + `,`, + `PodSelector:` + strings.Replace(fmt.Sprintf("%v", this.PodSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `IPBlock:` + strings.Replace(this.IPBlock.String(), "IPBlock", "IPBlock", 1) + `,`, `}`, }, "") return s @@ -691,7 +1013,7 @@ func (this *NetworkPolicyPort) String() string { } s := strings.Join([]string{`&NetworkPolicyPort{`, `Protocol:` + valueToStringGenerated(this.Protocol) + `,`, - `Port:` + strings.Replace(fmt.Sprintf("%v", this.Port), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, + `Port:` + strings.Replace(fmt.Sprintf("%v", this.Port), "IntOrString", "intstr.IntOrString", 1) + `,`, `}`, }, "") return s @@ -700,10 +1022,20 @@ func (this *NetworkPolicySpec) String() string { if this == nil { return "nil" } + repeatedStringForIngress := "[]NetworkPolicyIngressRule{" + for _, f := range this.Ingress { + repeatedStringForIngress += strings.Replace(strings.Replace(f.String(), "NetworkPolicyIngressRule", "NetworkPolicyIngressRule", 1), `&`, ``, 1) + "," + } + repeatedStringForIngress += "}" + repeatedStringForEgress := "[]NetworkPolicyEgressRule{" + for _, f := range this.Egress { + repeatedStringForEgress += strings.Replace(strings.Replace(f.String(), "NetworkPolicyEgressRule", "NetworkPolicyEgressRule", 1), `&`, ``, 1) + "," + } + repeatedStringForEgress += "}" s := strings.Join([]string{`&NetworkPolicySpec{`, - `PodSelector:` + strings.Replace(strings.Replace(this.PodSelector.String(), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1), `&`, ``, 1) + `,`, - `Ingress:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ingress), "NetworkPolicyIngressRule", "NetworkPolicyIngressRule", 1), `&`, ``, 1) + `,`, - `Egress:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Egress), "NetworkPolicyEgressRule", "NetworkPolicyEgressRule", 1), `&`, ``, 1) + `,`, + `PodSelector:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.PodSelector), "LabelSelector", "v1.LabelSelector", 1), `&`, ``, 1) + `,`, + `Ingress:` + repeatedStringForIngress + `,`, + `Egress:` + repeatedStringForEgress + `,`, `PolicyTypes:` + fmt.Sprintf("%v", this.PolicyTypes) + `,`, `}`, }, "") @@ -732,7 +1064,7 @@ func (m *IPBlock) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -760,7 +1092,7 @@ func (m *IPBlock) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -770,6 +1102,9 @@ func (m *IPBlock) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -789,7 +1124,7 @@ func (m *IPBlock) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -799,6 +1134,9 @@ func (m *IPBlock) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -813,6 +1151,9 @@ func (m *IPBlock) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -840,7 +1181,7 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -868,7 +1209,7 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -877,6 +1218,9 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -898,7 +1242,7 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -907,6 +1251,9 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -923,6 +1270,9 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -950,7 +1300,7 @@ func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -978,7 +1328,7 @@ func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -987,6 +1337,9 @@ func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1009,7 +1362,7 @@ func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1018,6 +1371,9 @@ func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1035,6 +1391,9 @@ func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1062,7 +1421,7 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1090,7 +1449,7 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1099,6 +1458,9 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1121,7 +1483,7 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1130,6 +1492,9 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1147,6 +1512,9 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1174,7 +1542,7 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1202,7 +1570,7 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1211,6 +1579,9 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1232,7 +1603,7 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1241,6 +1612,9 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1258,6 +1632,9 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1285,7 +1662,7 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1313,7 +1690,7 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1322,11 +1699,14 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.PodSelector == nil { - m.PodSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.PodSelector = &v1.LabelSelector{} } if err := m.PodSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1346,7 +1726,7 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1355,11 +1735,14 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.NamespaceSelector == nil { - m.NamespaceSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.NamespaceSelector = &v1.LabelSelector{} } if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1379,7 +1762,7 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1388,6 +1771,9 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1407,6 +1793,9 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1434,7 +1823,7 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1462,7 +1851,7 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1472,6 +1861,9 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1492,7 +1884,7 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1501,11 +1893,14 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Port == nil { - m.Port = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + m.Port = &intstr.IntOrString{} } if err := m.Port.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1520,6 +1915,9 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1547,7 +1945,7 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1575,7 +1973,7 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1584,6 +1982,9 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1605,7 +2006,7 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1614,6 +2015,9 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1636,7 +2040,7 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1645,6 +2049,9 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1667,7 +2074,7 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1677,6 +2084,9 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1691,6 +2101,9 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1757,10 +2170,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -1789,6 +2205,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -1807,62 +2226,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/networking/v1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 804 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x55, 0xcf, 0x8f, 0xdb, 0x44, - 0x14, 0x8e, 0x9d, 0x6c, 0x92, 0x4e, 0x28, 0x65, 0x07, 0x21, 0xac, 0x45, 0xd8, 0xc1, 0x17, 0x56, - 0xaa, 0x18, 0x93, 0x16, 0x21, 0x6e, 0x08, 0x43, 0x29, 0x91, 0xba, 0xbb, 0xd1, 0x6c, 0x2f, 0x20, - 0x90, 0x70, 0x9c, 0x59, 0xef, 0x34, 0xb1, 0xc7, 0x1a, 0x4f, 0x42, 0xf7, 0xc6, 0x9f, 0xc0, 0x1f, - 0xc2, 0x91, 0x1b, 0x87, 0x72, 0xdc, 0x63, 0x8f, 0x3d, 0x59, 0xac, 0xf9, 0x2f, 0xf6, 0x84, 0x66, - 0x3c, 0x89, 0xf3, 0xa3, 0x11, 0xd9, 0x15, 0xbd, 0x65, 0xde, 0xbc, 0xef, 0x7b, 0xf3, 0xde, 0xfb, - 0xf2, 0x19, 0x7c, 0x35, 0xfe, 0x22, 0x43, 0x94, 0x79, 0xe3, 0xe9, 0x90, 0xf0, 0x84, 0x08, 0x92, - 0x79, 0x33, 0x92, 0x8c, 0x18, 0xf7, 0xf4, 0x45, 0x90, 0x52, 0x2f, 0x21, 0xe2, 0x17, 0xc6, 0xc7, - 0x34, 0x89, 0xbc, 0x59, 0xcf, 0x8b, 0x48, 0x42, 0x78, 0x20, 0xc8, 0x08, 0xa5, 0x9c, 0x09, 0x06, - 0xad, 0x32, 0x13, 0x05, 0x29, 0x45, 0x55, 0x26, 0x9a, 0xf5, 0x0e, 0x3e, 0x89, 0xa8, 0x38, 0x9f, - 0x0e, 0x51, 0xc8, 0x62, 0x2f, 0x62, 0x11, 0xf3, 0x14, 0x60, 0x38, 0x3d, 0x53, 0x27, 0x75, 0x50, - 0xbf, 0x4a, 0xa2, 0x03, 0x77, 0xa9, 0x64, 0xc8, 0x38, 0x79, 0x4d, 0xb1, 0x83, 0xcf, 0xaa, 0x9c, - 0x38, 0x08, 0xcf, 0x69, 0x42, 0xf8, 0x85, 0x97, 0x8e, 0x23, 0x19, 0xc8, 0xbc, 0x98, 0x88, 0xe0, - 0x75, 0x28, 0x6f, 0x1b, 0x8a, 0x4f, 0x13, 0x41, 0x63, 0xb2, 0x01, 0xf8, 0xfc, 0xbf, 0x00, 0x59, - 0x78, 0x4e, 0xe2, 0x60, 0x03, 0xf7, 0x70, 0x1b, 0x6e, 0x2a, 0xe8, 0xc4, 0xa3, 0x89, 0xc8, 0x04, - 0x5f, 0x07, 0xb9, 0x27, 0xa0, 0xd5, 0x1f, 0xf8, 0x13, 0x16, 0x8e, 0x61, 0x17, 0x34, 0x42, 0x3a, - 0xe2, 0x96, 0xd1, 0x35, 0x0e, 0xef, 0xf8, 0x6f, 0x5d, 0xe6, 0x4e, 0xad, 0xc8, 0x9d, 0xc6, 0xd7, - 0xfd, 0x6f, 0x30, 0x56, 0x37, 0xd0, 0x05, 0x4d, 0xf2, 0x3c, 0x24, 0xa9, 0xb0, 0xcc, 0x6e, 0xfd, - 0xf0, 0x8e, 0x0f, 0x8a, 0xdc, 0x69, 0x3e, 0x52, 0x11, 0xac, 0x6f, 0xdc, 0xbf, 0x0c, 0x70, 0xf7, - 0xb8, 0xdc, 0xc4, 0x80, 0x4d, 0x68, 0x78, 0x01, 0x7f, 0x06, 0x6d, 0x39, 0x9b, 0x51, 0x20, 0x02, - 0xc5, 0xdd, 0x79, 0xf0, 0x29, 0xaa, 0xd6, 0xb6, 0x78, 0x2a, 0x4a, 0xc7, 0x91, 0x0c, 0x64, 0x48, - 0x66, 0xa3, 0x59, 0x0f, 0x9d, 0x0c, 0x9f, 0x91, 0x50, 0x1c, 0x11, 0x11, 0xf8, 0x50, 0xbf, 0x06, - 0x54, 0x31, 0xbc, 0x60, 0x85, 0x47, 0xa0, 0x91, 0xa5, 0x24, 0xb4, 0x4c, 0xc5, 0x7e, 0x1f, 0x6d, - 0x13, 0x05, 0x5a, 0x79, 0xd8, 0x69, 0x4a, 0xc2, 0xaa, 0x4d, 0x79, 0xc2, 0x8a, 0xc6, 0xfd, 0xc3, - 0x00, 0xef, 0xaf, 0x64, 0x3e, 0x8a, 0x38, 0xc9, 0x32, 0x3c, 0x9d, 0x10, 0x38, 0x00, 0x7b, 0x29, - 0xe3, 0x22, 0xb3, 0x8c, 0x6e, 0xfd, 0x06, 0xb5, 0x06, 0x8c, 0x0b, 0xff, 0xae, 0xae, 0xb5, 0x27, - 0x4f, 0x19, 0x2e, 0x89, 0xe0, 0x63, 0x60, 0x0a, 0xa6, 0x06, 0x7a, 0x03, 0x3a, 0x42, 0xb8, 0x0f, - 0x34, 0x9d, 0xf9, 0x94, 0x61, 0x53, 0x30, 0xf7, 0x4f, 0x03, 0x58, 0x2b, 0x59, 0xfd, 0xe4, 0x4d, - 0xbe, 0xfb, 0x08, 0x34, 0xce, 0x38, 0x8b, 0x6f, 0xf3, 0xf2, 0xc5, 0xd0, 0xbf, 0xe5, 0x2c, 0xc6, - 0x8a, 0xc6, 0x7d, 0x61, 0x80, 0xfd, 0x95, 0xcc, 0x27, 0x34, 0x13, 0xf0, 0xc7, 0x0d, 0xed, 0xa0, - 0xdd, 0xb4, 0x23, 0xd1, 0x4a, 0x39, 0xef, 0xe8, 0x5a, 0xed, 0x79, 0x64, 0x49, 0x37, 0x4f, 0xc0, - 0x1e, 0x15, 0x24, 0xce, 0x74, 0x0f, 0x1f, 0xef, 0xd8, 0x43, 0x35, 0x90, 0xbe, 0x44, 0xe3, 0x92, - 0xc4, 0x7d, 0x61, 0xae, 0x75, 0x20, 0x7b, 0x85, 0x67, 0xa0, 0x93, 0xb2, 0xd1, 0x29, 0x99, 0x90, - 0x50, 0x30, 0xae, 0x9b, 0x78, 0xb8, 0x63, 0x13, 0xc1, 0x90, 0x4c, 0xe6, 0x50, 0xff, 0x5e, 0x91, - 0x3b, 0x9d, 0x41, 0xc5, 0x85, 0x97, 0x89, 0xe1, 0x73, 0xb0, 0x9f, 0x04, 0x31, 0xc9, 0xd2, 0x20, - 0x24, 0x8b, 0x6a, 0xe6, 0xed, 0xab, 0xbd, 0x57, 0xe4, 0xce, 0xfe, 0xf1, 0x3a, 0x23, 0xde, 0x2c, - 0x02, 0xbf, 0x03, 0x2d, 0x9a, 0x2a, 0x0b, 0xb1, 0xea, 0xaa, 0xde, 0x47, 0xdb, 0xe7, 0xa8, 0xbd, - 0xc6, 0xef, 0x14, 0xb9, 0x33, 0x37, 0x1e, 0x3c, 0x87, 0xbb, 0xbf, 0xaf, 0x6b, 0x40, 0x0a, 0x0e, - 0x3e, 0x06, 0x6d, 0xe5, 0x55, 0x21, 0x9b, 0x68, 0x6f, 0xba, 0x2f, 0xf7, 0x39, 0xd0, 0xb1, 0xeb, - 0xdc, 0xf9, 0x60, 0xd3, 0xbc, 0xd1, 0xfc, 0x1a, 0x2f, 0xc0, 0xf0, 0x18, 0x34, 0xa4, 0x74, 0xf5, - 0x54, 0xb6, 0x9b, 0x90, 0xf4, 0x4b, 0x54, 0xfa, 0x25, 0xea, 0x27, 0xe2, 0x84, 0x9f, 0x0a, 0x4e, - 0x93, 0xc8, 0x6f, 0x4b, 0xc9, 0xca, 0x27, 0x61, 0xc5, 0xe3, 0x5e, 0xaf, 0x2f, 0x5c, 0x7a, 0x08, - 0x7c, 0xf6, 0xbf, 0x2d, 0xfc, 0x5d, 0x2d, 0xb3, 0xed, 0x4b, 0xff, 0x09, 0xb4, 0x68, 0xf9, 0x27, - 0xd7, 0x12, 0x7e, 0xb0, 0xa3, 0x84, 0x97, 0xac, 0xc1, 0xbf, 0xa7, 0xcb, 0xb4, 0xe6, 0xc1, 0x39, - 0x27, 0xfc, 0x1e, 0x34, 0x49, 0xc9, 0x5e, 0x57, 0xec, 0xbd, 0x1d, 0xd9, 0x2b, 0xbf, 0xf4, 0xdf, - 0xd6, 0xe4, 0x4d, 0x1d, 0xd3, 0x84, 0xf0, 0x4b, 0x39, 0x25, 0x99, 0xfb, 0xf4, 0x22, 0x25, 0x99, - 0xd5, 0x50, 0xdf, 0x93, 0x0f, 0xcb, 0x66, 0x17, 0xe1, 0xeb, 0xdc, 0x01, 0xd5, 0x11, 0x2f, 0x23, - 0xfc, 0xc3, 0xcb, 0x2b, 0xbb, 0xf6, 0xf2, 0xca, 0xae, 0xbd, 0xba, 0xb2, 0x6b, 0xbf, 0x16, 0xb6, - 0x71, 0x59, 0xd8, 0xc6, 0xcb, 0xc2, 0x36, 0x5e, 0x15, 0xb6, 0xf1, 0x77, 0x61, 0x1b, 0xbf, 0xfd, - 0x63, 0xd7, 0x7e, 0x30, 0x67, 0xbd, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xf7, 0x7b, 0xc9, 0x59, - 0x67, 0x08, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/networking/v1/generated.proto b/vendor/k8s.io/api/networking/v1/generated.proto index 4e068d08..3cb73804 100644 --- a/vendor/k8s.io/api/networking/v1/generated.proto +++ b/vendor/k8s.io/api/networking/v1/generated.proto @@ -48,7 +48,7 @@ message IPBlock { // NetworkPolicy describes what network traffic is allowed for a set of Pods message NetworkPolicy { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -92,7 +92,7 @@ message NetworkPolicyIngressRule { // List of sources which should be able to access the pods selected for this rule. // Items in this list are combined using a logical OR operation. If this field is // empty or missing, this rule matches all sources (traffic not restricted by - // source). If this field is present and contains at least on item, this rule + // source). If this field is present and contains at least one item, this rule // allows traffic only if the traffic matches at least one item in the from list. // +optional repeated NetworkPolicyPeer from = 2; @@ -101,7 +101,7 @@ message NetworkPolicyIngressRule { // NetworkPolicyList is a list of NetworkPolicy objects. message NetworkPolicyList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -114,7 +114,7 @@ message NetworkPolicyList { message NetworkPolicyPeer { // This is a label selector which selects Pods. This field follows standard label // selector semantics; if present but empty, it selects all pods. - // + // // If NamespaceSelector is also set, then the NetworkPolicyPeer as a whole selects // the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. // Otherwise it selects the Pods matching PodSelector in the policy's own Namespace. @@ -123,7 +123,7 @@ message NetworkPolicyPeer { // Selects Namespaces using cluster-scoped labels. This field follows standard label // selector semantics; if present but empty, it selects all namespaces. - // + // // If PodSelector is also set, then the NetworkPolicyPeer as a whole selects // the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. // Otherwise it selects all Pods in the Namespaces selected by NamespaceSelector. @@ -180,7 +180,7 @@ message NetworkPolicySpec { repeated NetworkPolicyEgressRule egress = 3; // List of rule types that the NetworkPolicy relates to. - // Valid options are Ingress, Egress, or Ingress,Egress. + // Valid options are "Ingress", "Egress", or "Ingress,Egress". // If this field is not specified, it will default based on the existence of Ingress or Egress rules; // policies that contain an Egress section are assumed to affect Egress, and all policies // (whether or not they contain an Ingress section) are assumed to affect Ingress. diff --git a/vendor/k8s.io/api/networking/v1/types.go b/vendor/k8s.io/api/networking/v1/types.go index ce70448d..38a640f0 100644 --- a/vendor/k8s.io/api/networking/v1/types.go +++ b/vendor/k8s.io/api/networking/v1/types.go @@ -29,7 +29,7 @@ import ( type NetworkPolicy struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -80,7 +80,7 @@ type NetworkPolicySpec struct { Egress []NetworkPolicyEgressRule `json:"egress,omitempty" protobuf:"bytes,3,rep,name=egress"` // List of rule types that the NetworkPolicy relates to. - // Valid options are Ingress, Egress, or Ingress,Egress. + // Valid options are "Ingress", "Egress", or "Ingress,Egress". // If this field is not specified, it will default based on the existence of Ingress or Egress rules; // policies that contain an Egress section are assumed to affect Egress, and all policies // (whether or not they contain an Ingress section) are assumed to affect Ingress. @@ -107,7 +107,7 @@ type NetworkPolicyIngressRule struct { // List of sources which should be able to access the pods selected for this rule. // Items in this list are combined using a logical OR operation. If this field is // empty or missing, this rule matches all sources (traffic not restricted by - // source). If this field is present and contains at least on item, this rule + // source). If this field is present and contains at least one item, this rule // allows traffic only if the traffic matches at least one item in the from list. // +optional From []NetworkPolicyPeer `json:"from,omitempty" protobuf:"bytes,2,rep,name=from"` @@ -194,7 +194,7 @@ type NetworkPolicyPeer struct { type NetworkPolicyList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` diff --git a/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go index f4363bc0..188b72a1 100644 --- a/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go @@ -39,7 +39,7 @@ func (IPBlock) SwaggerDoc() map[string]string { var map_NetworkPolicy = map[string]string{ "": "NetworkPolicy describes what network traffic is allowed for a set of Pods", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "spec": "Specification of the desired behavior for this NetworkPolicy.", } @@ -60,7 +60,7 @@ func (NetworkPolicyEgressRule) SwaggerDoc() map[string]string { var map_NetworkPolicyIngressRule = map[string]string{ "": "NetworkPolicyIngressRule describes a particular set of traffic that is allowed to the pods matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and from.", "ports": "List of ports which should be made accessible on the pods selected for this rule. Each item in this list is combined using a logical OR. If this field is empty or missing, this rule matches all ports (traffic not restricted by port). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list.", - "from": "List of sources which should be able to access the pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all sources (traffic not restricted by source). If this field is present and contains at least on item, this rule allows traffic only if the traffic matches at least one item in the from list.", + "from": "List of sources which should be able to access the pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all sources (traffic not restricted by source). If this field is present and contains at least one item, this rule allows traffic only if the traffic matches at least one item in the from list.", } func (NetworkPolicyIngressRule) SwaggerDoc() map[string]string { @@ -69,7 +69,7 @@ func (NetworkPolicyIngressRule) SwaggerDoc() map[string]string { var map_NetworkPolicyList = map[string]string{ "": "NetworkPolicyList is a list of NetworkPolicy objects.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "Items is a list of schema objects.", } @@ -103,7 +103,7 @@ var map_NetworkPolicySpec = map[string]string{ "podSelector": "Selects the pods to which this NetworkPolicy object applies. The array of ingress rules is applied to any pods selected by this field. Multiple network policies can select the same set of pods. In this case, the ingress rules for each are combined additively. This field is NOT optional and follows standard label selector semantics. An empty podSelector matches all pods in this namespace.", "ingress": "List of ingress rules to be applied to the selected pods. Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic source is the pod's local node, OR if the traffic matches at least one ingress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy does not allow any traffic (and serves solely to ensure that the pods it selects are isolated by default)", "egress": "List of egress rules to be applied to the selected pods. Outgoing traffic is allowed if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic matches at least one egress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy limits all outgoing traffic (and serves solely to ensure that the pods it selects are isolated by default). This field is beta-level in 1.8", - "policyTypes": "List of rule types that the NetworkPolicy relates to. Valid options are Ingress, Egress, or Ingress,Egress. If this field is not specified, it will default based on the existence of Ingress or Egress rules; policies that contain an Egress section are assumed to affect Egress, and all policies (whether or not they contain an Ingress section) are assumed to affect Ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ \"Egress\" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include \"Egress\" (since such a policy would not include an Egress section and would otherwise default to just [ \"Ingress\" ]). This field is beta-level in 1.8", + "policyTypes": "List of rule types that the NetworkPolicy relates to. Valid options are \"Ingress\", \"Egress\", or \"Ingress,Egress\". If this field is not specified, it will default based on the existence of Ingress or Egress rules; policies that contain an Egress section are assumed to affect Egress, and all policies (whether or not they contain an Ingress section) are assumed to affect Ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ \"Egress\" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include \"Egress\" (since such a policy would not include an Egress section and would otherwise default to just [ \"Ingress\" ]). This field is beta-level in 1.8", } func (NetworkPolicySpec) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/networking/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/networking/v1/zz_generated.deepcopy.go index d1e4e884..1833e978 100644 --- a/vendor/k8s.io/api/networking/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/networking/v1/zz_generated.deepcopy.go @@ -139,7 +139,7 @@ func (in *NetworkPolicyIngressRule) DeepCopy() *NetworkPolicyIngressRule { func (in *NetworkPolicyList) DeepCopyInto(out *NetworkPolicyList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]NetworkPolicy, len(*in)) diff --git a/vendor/k8s.io/api/networking/v1beta1/doc.go b/vendor/k8s.io/api/networking/v1beta1/doc.go new file mode 100644 index 00000000..12d3d4ff --- /dev/null +++ b/vendor/k8s.io/api/networking/v1beta1/doc.go @@ -0,0 +1,22 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:openapi-gen=true +// +groupName=networking.k8s.io + +package v1beta1 // import "k8s.io/api/networking/v1beta1" diff --git a/vendor/k8s.io/api/networking/v1beta1/generated.pb.go b/vendor/k8s.io/api/networking/v1beta1/generated.pb.go new file mode 100644 index 00000000..8ed56009 --- /dev/null +++ b/vendor/k8s.io/api/networking/v1beta1/generated.pb.go @@ -0,0 +1,2394 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/api/networking/v1beta1/generated.proto + +package v1beta1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +func (m *HTTPIngressPath) Reset() { *m = HTTPIngressPath{} } +func (*HTTPIngressPath) ProtoMessage() {} +func (*HTTPIngressPath) Descriptor() ([]byte, []int) { + return fileDescriptor_5bea11de0ceb8f53, []int{0} +} +func (m *HTTPIngressPath) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HTTPIngressPath) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HTTPIngressPath) XXX_Merge(src proto.Message) { + xxx_messageInfo_HTTPIngressPath.Merge(m, src) +} +func (m *HTTPIngressPath) XXX_Size() int { + return m.Size() +} +func (m *HTTPIngressPath) XXX_DiscardUnknown() { + xxx_messageInfo_HTTPIngressPath.DiscardUnknown(m) +} + +var xxx_messageInfo_HTTPIngressPath proto.InternalMessageInfo + +func (m *HTTPIngressRuleValue) Reset() { *m = HTTPIngressRuleValue{} } +func (*HTTPIngressRuleValue) ProtoMessage() {} +func (*HTTPIngressRuleValue) Descriptor() ([]byte, []int) { + return fileDescriptor_5bea11de0ceb8f53, []int{1} +} +func (m *HTTPIngressRuleValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HTTPIngressRuleValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HTTPIngressRuleValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_HTTPIngressRuleValue.Merge(m, src) +} +func (m *HTTPIngressRuleValue) XXX_Size() int { + return m.Size() +} +func (m *HTTPIngressRuleValue) XXX_DiscardUnknown() { + xxx_messageInfo_HTTPIngressRuleValue.DiscardUnknown(m) +} + +var xxx_messageInfo_HTTPIngressRuleValue proto.InternalMessageInfo + +func (m *Ingress) Reset() { *m = Ingress{} } +func (*Ingress) ProtoMessage() {} +func (*Ingress) Descriptor() ([]byte, []int) { + return fileDescriptor_5bea11de0ceb8f53, []int{2} +} +func (m *Ingress) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Ingress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Ingress) XXX_Merge(src proto.Message) { + xxx_messageInfo_Ingress.Merge(m, src) +} +func (m *Ingress) XXX_Size() int { + return m.Size() +} +func (m *Ingress) XXX_DiscardUnknown() { + xxx_messageInfo_Ingress.DiscardUnknown(m) +} + +var xxx_messageInfo_Ingress proto.InternalMessageInfo + +func (m *IngressBackend) Reset() { *m = IngressBackend{} } +func (*IngressBackend) ProtoMessage() {} +func (*IngressBackend) Descriptor() ([]byte, []int) { + return fileDescriptor_5bea11de0ceb8f53, []int{3} +} +func (m *IngressBackend) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IngressBackend) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IngressBackend) XXX_Merge(src proto.Message) { + xxx_messageInfo_IngressBackend.Merge(m, src) +} +func (m *IngressBackend) XXX_Size() int { + return m.Size() +} +func (m *IngressBackend) XXX_DiscardUnknown() { + xxx_messageInfo_IngressBackend.DiscardUnknown(m) +} + +var xxx_messageInfo_IngressBackend proto.InternalMessageInfo + +func (m *IngressList) Reset() { *m = IngressList{} } +func (*IngressList) ProtoMessage() {} +func (*IngressList) Descriptor() ([]byte, []int) { + return fileDescriptor_5bea11de0ceb8f53, []int{4} +} +func (m *IngressList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IngressList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IngressList) XXX_Merge(src proto.Message) { + xxx_messageInfo_IngressList.Merge(m, src) +} +func (m *IngressList) XXX_Size() int { + return m.Size() +} +func (m *IngressList) XXX_DiscardUnknown() { + xxx_messageInfo_IngressList.DiscardUnknown(m) +} + +var xxx_messageInfo_IngressList proto.InternalMessageInfo + +func (m *IngressRule) Reset() { *m = IngressRule{} } +func (*IngressRule) ProtoMessage() {} +func (*IngressRule) Descriptor() ([]byte, []int) { + return fileDescriptor_5bea11de0ceb8f53, []int{5} +} +func (m *IngressRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IngressRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IngressRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_IngressRule.Merge(m, src) +} +func (m *IngressRule) XXX_Size() int { + return m.Size() +} +func (m *IngressRule) XXX_DiscardUnknown() { + xxx_messageInfo_IngressRule.DiscardUnknown(m) +} + +var xxx_messageInfo_IngressRule proto.InternalMessageInfo + +func (m *IngressRuleValue) Reset() { *m = IngressRuleValue{} } +func (*IngressRuleValue) ProtoMessage() {} +func (*IngressRuleValue) Descriptor() ([]byte, []int) { + return fileDescriptor_5bea11de0ceb8f53, []int{6} +} +func (m *IngressRuleValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IngressRuleValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IngressRuleValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_IngressRuleValue.Merge(m, src) +} +func (m *IngressRuleValue) XXX_Size() int { + return m.Size() +} +func (m *IngressRuleValue) XXX_DiscardUnknown() { + xxx_messageInfo_IngressRuleValue.DiscardUnknown(m) +} + +var xxx_messageInfo_IngressRuleValue proto.InternalMessageInfo + +func (m *IngressSpec) Reset() { *m = IngressSpec{} } +func (*IngressSpec) ProtoMessage() {} +func (*IngressSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_5bea11de0ceb8f53, []int{7} +} +func (m *IngressSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IngressSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IngressSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_IngressSpec.Merge(m, src) +} +func (m *IngressSpec) XXX_Size() int { + return m.Size() +} +func (m *IngressSpec) XXX_DiscardUnknown() { + xxx_messageInfo_IngressSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_IngressSpec proto.InternalMessageInfo + +func (m *IngressStatus) Reset() { *m = IngressStatus{} } +func (*IngressStatus) ProtoMessage() {} +func (*IngressStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_5bea11de0ceb8f53, []int{8} +} +func (m *IngressStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IngressStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IngressStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_IngressStatus.Merge(m, src) +} +func (m *IngressStatus) XXX_Size() int { + return m.Size() +} +func (m *IngressStatus) XXX_DiscardUnknown() { + xxx_messageInfo_IngressStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_IngressStatus proto.InternalMessageInfo + +func (m *IngressTLS) Reset() { *m = IngressTLS{} } +func (*IngressTLS) ProtoMessage() {} +func (*IngressTLS) Descriptor() ([]byte, []int) { + return fileDescriptor_5bea11de0ceb8f53, []int{9} +} +func (m *IngressTLS) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IngressTLS) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IngressTLS) XXX_Merge(src proto.Message) { + xxx_messageInfo_IngressTLS.Merge(m, src) +} +func (m *IngressTLS) XXX_Size() int { + return m.Size() +} +func (m *IngressTLS) XXX_DiscardUnknown() { + xxx_messageInfo_IngressTLS.DiscardUnknown(m) +} + +var xxx_messageInfo_IngressTLS proto.InternalMessageInfo + +func init() { + proto.RegisterType((*HTTPIngressPath)(nil), "k8s.io.api.networking.v1beta1.HTTPIngressPath") + proto.RegisterType((*HTTPIngressRuleValue)(nil), "k8s.io.api.networking.v1beta1.HTTPIngressRuleValue") + proto.RegisterType((*Ingress)(nil), "k8s.io.api.networking.v1beta1.Ingress") + proto.RegisterType((*IngressBackend)(nil), "k8s.io.api.networking.v1beta1.IngressBackend") + proto.RegisterType((*IngressList)(nil), "k8s.io.api.networking.v1beta1.IngressList") + proto.RegisterType((*IngressRule)(nil), "k8s.io.api.networking.v1beta1.IngressRule") + proto.RegisterType((*IngressRuleValue)(nil), "k8s.io.api.networking.v1beta1.IngressRuleValue") + proto.RegisterType((*IngressSpec)(nil), "k8s.io.api.networking.v1beta1.IngressSpec") + proto.RegisterType((*IngressStatus)(nil), "k8s.io.api.networking.v1beta1.IngressStatus") + proto.RegisterType((*IngressTLS)(nil), "k8s.io.api.networking.v1beta1.IngressTLS") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/networking/v1beta1/generated.proto", fileDescriptor_5bea11de0ceb8f53) +} + +var fileDescriptor_5bea11de0ceb8f53 = []byte{ + // 812 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x55, 0xcf, 0x6e, 0xfb, 0x44, + 0x10, 0x8e, 0xf3, 0xa7, 0x69, 0xd7, 0xfd, 0xa7, 0xa5, 0x87, 0xa8, 0x12, 0x6e, 0xe4, 0x03, 0x2a, + 0x88, 0xae, 0x69, 0x0a, 0x88, 0xb3, 0x0f, 0xa8, 0x15, 0x81, 0x86, 0x75, 0x84, 0x10, 0xe2, 0xd0, + 0x8d, 0xb3, 0x38, 0x26, 0x89, 0x6d, 0x76, 0xd7, 0x41, 0xdc, 0x78, 0x01, 0x04, 0x4f, 0xc1, 0x99, + 0x23, 0x8f, 0xd0, 0x63, 0x8f, 0x3d, 0x55, 0x34, 0xbc, 0x07, 0x42, 0xbb, 0xde, 0xda, 0x4e, 0xd2, + 0xfe, 0x6a, 0xfd, 0x6e, 0xde, 0x9d, 0xf9, 0xbe, 0xd9, 0x99, 0xf9, 0x66, 0x0c, 0x3e, 0x9f, 0x7e, + 0xc6, 0x51, 0x18, 0x3b, 0xd3, 0x74, 0x44, 0x59, 0x44, 0x05, 0xe5, 0xce, 0x82, 0x46, 0xe3, 0x98, + 0x39, 0xda, 0x40, 0x92, 0xd0, 0x89, 0xa8, 0xf8, 0x39, 0x66, 0xd3, 0x30, 0x0a, 0x9c, 0xc5, 0xf9, + 0x88, 0x0a, 0x72, 0xee, 0x04, 0x34, 0xa2, 0x8c, 0x08, 0x3a, 0x46, 0x09, 0x8b, 0x45, 0x0c, 0xdf, + 0xcd, 0xdc, 0x11, 0x49, 0x42, 0x54, 0xb8, 0x23, 0xed, 0x7e, 0x7c, 0x16, 0x84, 0x62, 0x92, 0x8e, + 0x90, 0x1f, 0xcf, 0x9d, 0x20, 0x0e, 0x62, 0x47, 0xa1, 0x46, 0xe9, 0x0f, 0xea, 0xa4, 0x0e, 0xea, + 0x2b, 0x63, 0x3b, 0xb6, 0x4b, 0xc1, 0xfd, 0x98, 0x51, 0x67, 0xb1, 0x11, 0xf1, 0xf8, 0xe3, 0xc2, + 0x67, 0x4e, 0xfc, 0x49, 0x18, 0x51, 0xf6, 0x8b, 0x93, 0x4c, 0x03, 0x79, 0xc1, 0x9d, 0x39, 0x15, + 0xe4, 0x39, 0x94, 0xf3, 0x12, 0x8a, 0xa5, 0x91, 0x08, 0xe7, 0x74, 0x03, 0xf0, 0xe9, 0x6b, 0x00, + 0xee, 0x4f, 0xe8, 0x9c, 0x6c, 0xe0, 0x2e, 0x5e, 0xc2, 0xa5, 0x22, 0x9c, 0x39, 0x61, 0x24, 0xb8, + 0x60, 0xeb, 0x20, 0xfb, 0x37, 0x03, 0x1c, 0x5c, 0x0e, 0x87, 0x83, 0xab, 0x28, 0x60, 0x94, 0xf3, + 0x01, 0x11, 0x13, 0xd8, 0x05, 0xcd, 0x84, 0x88, 0x49, 0xc7, 0xe8, 0x1a, 0xa7, 0x3b, 0xee, 0xee, + 0xed, 0xc3, 0x49, 0x6d, 0xf9, 0x70, 0xd2, 0x94, 0x36, 0xac, 0x2c, 0xf0, 0x5b, 0xd0, 0x1e, 0x11, + 0x7f, 0x4a, 0xa3, 0x71, 0xa7, 0xde, 0x35, 0x4e, 0xcd, 0xde, 0x19, 0x7a, 0x63, 0x37, 0x90, 0xa6, + 0x77, 0x33, 0x90, 0x7b, 0xa0, 0x39, 0xdb, 0xfa, 0x02, 0x3f, 0xd1, 0xd9, 0x53, 0x70, 0x54, 0x7a, + 0x0e, 0x4e, 0x67, 0xf4, 0x1b, 0x32, 0x4b, 0x29, 0xf4, 0x40, 0x4b, 0x46, 0xe6, 0x1d, 0xa3, 0xdb, + 0x38, 0x35, 0x7b, 0xe8, 0x95, 0x78, 0x6b, 0x29, 0xb9, 0x7b, 0x3a, 0x60, 0x4b, 0x9e, 0x38, 0xce, + 0xb8, 0xec, 0xdf, 0xeb, 0xa0, 0xad, 0xbd, 0xe0, 0x0d, 0xd8, 0x96, 0x1d, 0x1c, 0x13, 0x41, 0x54, + 0xe2, 0x66, 0xef, 0xa3, 0x52, 0x8c, 0xbc, 0xa0, 0x28, 0x99, 0x06, 0xf2, 0x82, 0x23, 0xe9, 0x8d, + 0x16, 0xe7, 0xe8, 0x7a, 0xf4, 0x23, 0xf5, 0xc5, 0x97, 0x54, 0x10, 0x17, 0xea, 0x28, 0xa0, 0xb8, + 0xc3, 0x39, 0x2b, 0xec, 0x83, 0x26, 0x4f, 0xa8, 0xaf, 0x2b, 0xf6, 0x41, 0xb5, 0x8a, 0x79, 0x09, + 0xf5, 0x8b, 0x16, 0xc8, 0x13, 0x56, 0x2c, 0x70, 0x08, 0xb6, 0xb8, 0x20, 0x22, 0xe5, 0x9d, 0x86, + 0xe2, 0xfb, 0xb0, 0x22, 0x9f, 0xc2, 0xb8, 0xfb, 0x9a, 0x71, 0x2b, 0x3b, 0x63, 0xcd, 0x65, 0xff, + 0x65, 0x80, 0xfd, 0xd5, 0x5e, 0xc1, 0x4f, 0x80, 0xc9, 0x29, 0x5b, 0x84, 0x3e, 0xfd, 0x8a, 0xcc, + 0xa9, 0x16, 0xc5, 0x3b, 0x1a, 0x6f, 0x7a, 0x85, 0x09, 0x97, 0xfd, 0x60, 0x90, 0xc3, 0x06, 0x31, + 0x13, 0x3a, 0xe9, 0x97, 0x4b, 0x2a, 0x35, 0x8a, 0x32, 0x8d, 0xa2, 0xab, 0x48, 0x5c, 0x33, 0x4f, + 0xb0, 0x30, 0x0a, 0x36, 0x02, 0x49, 0x32, 0x5c, 0x66, 0xb6, 0xff, 0x36, 0x80, 0xa9, 0x9f, 0xdc, + 0x0f, 0xb9, 0x80, 0xdf, 0x6f, 0x34, 0x12, 0x55, 0x6b, 0xa4, 0x44, 0xab, 0x36, 0x1e, 0xea, 0x98, + 0xdb, 0x4f, 0x37, 0xa5, 0x26, 0x7e, 0x01, 0x5a, 0xa1, 0xa0, 0x73, 0xde, 0xa9, 0x2b, 0x1d, 0xbe, + 0x57, 0x51, 0xf7, 0xb9, 0xfe, 0xae, 0x24, 0x18, 0x67, 0x1c, 0xf6, 0x9f, 0xc5, 0xd3, 0xa5, 0xd2, + 0xe5, 0xe0, 0x4d, 0x62, 0x2e, 0xd6, 0x07, 0xef, 0x32, 0xe6, 0x02, 0x2b, 0x0b, 0x4c, 0xc1, 0x61, + 0xb8, 0x36, 0x1a, 0xba, 0xb4, 0x4e, 0xb5, 0x97, 0xe4, 0x30, 0xb7, 0xa3, 0xe9, 0x0f, 0xd7, 0x2d, + 0x78, 0x23, 0x84, 0x4d, 0xc1, 0x86, 0x17, 0xfc, 0x1a, 0x34, 0x27, 0x42, 0x24, 0xba, 0xc6, 0x17, + 0xd5, 0x07, 0xb2, 0x78, 0xc2, 0xb6, 0xca, 0x6e, 0x38, 0x1c, 0x60, 0x45, 0x65, 0xff, 0x57, 0xd4, + 0xc3, 0xcb, 0x34, 0x9e, 0xaf, 0x19, 0xe3, 0x6d, 0xd6, 0x8c, 0xf9, 0xdc, 0x8a, 0x81, 0x97, 0xa0, + 0x21, 0x66, 0x4f, 0x0d, 0x7c, 0xbf, 0x1a, 0xe3, 0xb0, 0xef, 0xb9, 0xa6, 0x2e, 0x58, 0x63, 0xd8, + 0xf7, 0xb0, 0xa4, 0x80, 0xd7, 0xa0, 0xc5, 0xd2, 0x19, 0x95, 0x23, 0xd8, 0xa8, 0x3e, 0xd2, 0x32, + 0xff, 0x42, 0x10, 0xf2, 0xc4, 0x71, 0xc6, 0x63, 0xff, 0x04, 0xf6, 0x56, 0xe6, 0x14, 0xde, 0x80, + 0xdd, 0x59, 0x4c, 0xc6, 0x2e, 0x99, 0x91, 0xc8, 0xa7, 0x4c, 0x97, 0x61, 0x45, 0x75, 0xf2, 0x6f, + 0xa5, 0xe4, 0x5b, 0xf2, 0xd3, 0x53, 0x7e, 0xa4, 0x83, 0xec, 0x96, 0x6d, 0x78, 0x85, 0xd1, 0x26, + 0x00, 0x14, 0x39, 0xc2, 0x13, 0xd0, 0x92, 0x3a, 0xcb, 0xd6, 0xec, 0x8e, 0xbb, 0x23, 0x5f, 0x28, + 0xe5, 0xc7, 0x71, 0x76, 0x0f, 0x7b, 0x00, 0x70, 0xea, 0x33, 0x2a, 0xd4, 0x32, 0xa8, 0x2b, 0xa1, + 0xe6, 0x6b, 0xcf, 0xcb, 0x2d, 0xb8, 0xe4, 0xe5, 0x9e, 0xdd, 0x3e, 0x5a, 0xb5, 0xbb, 0x47, 0xab, + 0x76, 0xff, 0x68, 0xd5, 0x7e, 0x5d, 0x5a, 0xc6, 0xed, 0xd2, 0x32, 0xee, 0x96, 0x96, 0x71, 0xbf, + 0xb4, 0x8c, 0x7f, 0x96, 0x96, 0xf1, 0xc7, 0xbf, 0x56, 0xed, 0xbb, 0xb6, 0x2e, 0xd3, 0xff, 0x01, + 0x00, 0x00, 0xff, 0xff, 0xdb, 0x8a, 0xe4, 0xd8, 0x21, 0x08, 0x00, 0x00, +} + +func (m *HTTPIngressPath) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HTTPIngressPath) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HTTPIngressPath) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Backend.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *HTTPIngressRuleValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HTTPIngressRuleValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HTTPIngressRuleValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Paths) > 0 { + for iNdEx := len(m.Paths) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Paths[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Ingress) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Ingress) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Ingress) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *IngressBackend) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IngressBackend) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressBackend) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.ServicePort.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.ServiceName) + copy(dAtA[i:], m.ServiceName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *IngressList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IngressList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *IngressRule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IngressRule) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.IngressRuleValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Host) + copy(dAtA[i:], m.Host) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *IngressRuleValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IngressRuleValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressRuleValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.HTTP != nil { + { + size, err := m.HTTP.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *IngressSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IngressSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.TLS) > 0 { + for iNdEx := len(m.TLS) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.TLS[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if m.Backend != nil { + { + size, err := m.Backend.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *IngressStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IngressStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.LoadBalancer.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *IngressTLS) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IngressTLS) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressTLS) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.SecretName) + copy(dAtA[i:], m.SecretName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretName))) + i-- + dAtA[i] = 0x12 + if len(m.Hosts) > 0 { + for iNdEx := len(m.Hosts) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Hosts[iNdEx]) + copy(dAtA[i:], m.Hosts[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Hosts[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *HTTPIngressPath) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Backend.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *HTTPIngressRuleValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Paths) > 0 { + for _, e := range m.Paths { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *Ingress) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *IngressBackend) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ServiceName) + n += 1 + l + sovGenerated(uint64(l)) + l = m.ServicePort.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *IngressList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *IngressRule) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Host) + n += 1 + l + sovGenerated(uint64(l)) + l = m.IngressRuleValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *IngressRuleValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HTTP != nil { + l = m.HTTP.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *IngressSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Backend != nil { + l = m.Backend.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.TLS) > 0 { + for _, e := range m.TLS { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *IngressStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.LoadBalancer.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *IngressTLS) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Hosts) > 0 { + for _, s := range m.Hosts { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.SecretName) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *HTTPIngressPath) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HTTPIngressPath{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `Backend:` + strings.Replace(strings.Replace(this.Backend.String(), "IngressBackend", "IngressBackend", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *HTTPIngressRuleValue) String() string { + if this == nil { + return "nil" + } + repeatedStringForPaths := "[]HTTPIngressPath{" + for _, f := range this.Paths { + repeatedStringForPaths += strings.Replace(strings.Replace(f.String(), "HTTPIngressPath", "HTTPIngressPath", 1), `&`, ``, 1) + "," + } + repeatedStringForPaths += "}" + s := strings.Join([]string{`&HTTPIngressRuleValue{`, + `Paths:` + repeatedStringForPaths + `,`, + `}`, + }, "") + return s +} +func (this *Ingress) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Ingress{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "IngressSpec", "IngressSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "IngressStatus", "IngressStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *IngressBackend) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IngressBackend{`, + `ServiceName:` + fmt.Sprintf("%v", this.ServiceName) + `,`, + `ServicePort:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ServicePort), "IntOrString", "intstr.IntOrString", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *IngressList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Ingress{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Ingress", "Ingress", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&IngressList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *IngressRule) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IngressRule{`, + `Host:` + fmt.Sprintf("%v", this.Host) + `,`, + `IngressRuleValue:` + strings.Replace(strings.Replace(this.IngressRuleValue.String(), "IngressRuleValue", "IngressRuleValue", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *IngressRuleValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IngressRuleValue{`, + `HTTP:` + strings.Replace(this.HTTP.String(), "HTTPIngressRuleValue", "HTTPIngressRuleValue", 1) + `,`, + `}`, + }, "") + return s +} +func (this *IngressSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForTLS := "[]IngressTLS{" + for _, f := range this.TLS { + repeatedStringForTLS += strings.Replace(strings.Replace(f.String(), "IngressTLS", "IngressTLS", 1), `&`, ``, 1) + "," + } + repeatedStringForTLS += "}" + repeatedStringForRules := "[]IngressRule{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "IngressRule", "IngressRule", 1), `&`, ``, 1) + "," + } + repeatedStringForRules += "}" + s := strings.Join([]string{`&IngressSpec{`, + `Backend:` + strings.Replace(this.Backend.String(), "IngressBackend", "IngressBackend", 1) + `,`, + `TLS:` + repeatedStringForTLS + `,`, + `Rules:` + repeatedStringForRules + `,`, + `}`, + }, "") + return s +} +func (this *IngressStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IngressStatus{`, + `LoadBalancer:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LoadBalancer), "LoadBalancerStatus", "v11.LoadBalancerStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *IngressTLS) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IngressTLS{`, + `Hosts:` + fmt.Sprintf("%v", this.Hosts) + `,`, + `SecretName:` + fmt.Sprintf("%v", this.SecretName) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HTTPIngressPath: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HTTPIngressPath: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Backend", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Backend.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HTTPIngressRuleValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HTTPIngressRuleValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HTTPIngressRuleValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Paths", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Paths = append(m.Paths, HTTPIngressPath{}) + if err := m.Paths[len(m.Paths)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Ingress) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Ingress: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Ingress: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressBackend) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressBackend: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressBackend: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServicePort", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ServicePort.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Ingress{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Host = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IngressRuleValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.IngressRuleValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressRuleValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressRuleValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressRuleValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HTTP", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.HTTP == nil { + m.HTTP = &HTTPIngressRuleValue{} + } + if err := m.HTTP.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Backend", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Backend == nil { + m.Backend = &IngressBackend{} + } + if err := m.Backend.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TLS", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TLS = append(m.TLS, IngressTLS{}) + if err := m.TLS[len(m.TLS)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rules = append(m.Rules, IngressRule{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancer", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LoadBalancer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressTLS) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressTLS: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressTLS: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hosts", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hosts = append(m.Hosts, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SecretName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) diff --git a/vendor/k8s.io/api/networking/v1beta1/generated.proto b/vendor/k8s.io/api/networking/v1beta1/generated.proto new file mode 100644 index 00000000..a72545c8 --- /dev/null +++ b/vendor/k8s.io/api/networking/v1beta1/generated.proto @@ -0,0 +1,186 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.api.networking.v1beta1; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1beta1"; + +// HTTPIngressPath associates a path regex with a backend. Incoming urls matching +// the path are forwarded to the backend. +message HTTPIngressPath { + // Path is an extended POSIX regex as defined by IEEE Std 1003.1, + // (i.e this follows the egrep/unix syntax, not the perl syntax) + // matched against the path of an incoming request. Currently it can + // contain characters disallowed from the conventional "path" + // part of a URL as defined by RFC 3986. Paths must begin with + // a '/'. If unspecified, the path defaults to a catch all sending + // traffic to the backend. + // +optional + optional string path = 1; + + // Backend defines the referenced service endpoint to which the traffic + // will be forwarded to. + optional IngressBackend backend = 2; +} + +// HTTPIngressRuleValue is a list of http selectors pointing to backends. +// In the example: http:///? -> backend where +// where parts of the url correspond to RFC 3986, this resource will be used +// to match against everything after the last '/' and before the first '?' +// or '#'. +message HTTPIngressRuleValue { + // A collection of paths that map requests to backends. + repeated HTTPIngressPath paths = 1; +} + +// Ingress is a collection of rules that allow inbound connections to reach the +// endpoints defined by a backend. An Ingress can be configured to give services +// externally-reachable urls, load balance traffic, terminate SSL, offer name +// based virtual hosting etc. +message Ingress { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec is the desired state of the Ingress. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional IngressSpec spec = 2; + + // Status is the current state of the Ingress. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional IngressStatus status = 3; +} + +// IngressBackend describes all endpoints for a given service and port. +message IngressBackend { + // Specifies the name of the referenced service. + optional string serviceName = 1; + + // Specifies the port of the referenced service. + optional k8s.io.apimachinery.pkg.util.intstr.IntOrString servicePort = 2; +} + +// IngressList is a collection of Ingress. +message IngressList { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of Ingress. + repeated Ingress items = 2; +} + +// IngressRule represents the rules mapping the paths under a specified host to +// the related backend services. Incoming requests are first evaluated for a host +// match, then routed to the backend associated with the matching IngressRuleValue. +message IngressRule { + // Host is the fully qualified domain name of a network host, as defined + // by RFC 3986. Note the following deviations from the "host" part of the + // URI as defined in the RFC: + // 1. IPs are not allowed. Currently an IngressRuleValue can only apply to the + // IP in the Spec of the parent Ingress. + // 2. The `:` delimiter is not respected because ports are not allowed. + // Currently the port of an Ingress is implicitly :80 for http and + // :443 for https. + // Both these may change in the future. + // Incoming requests are matched against the host before the IngressRuleValue. + // If the host is unspecified, the Ingress routes all traffic based on the + // specified IngressRuleValue. + // +optional + optional string host = 1; + + // IngressRuleValue represents a rule to route requests for this IngressRule. + // If unspecified, the rule defaults to a http catch-all. Whether that sends + // just traffic matching the host to the default backend or all traffic to the + // default backend, is left to the controller fulfilling the Ingress. Http is + // currently the only supported IngressRuleValue. + // +optional + optional IngressRuleValue ingressRuleValue = 2; +} + +// IngressRuleValue represents a rule to apply against incoming requests. If the +// rule is satisfied, the request is routed to the specified backend. Currently +// mixing different types of rules in a single Ingress is disallowed, so exactly +// one of the following must be set. +message IngressRuleValue { + // +optional + optional HTTPIngressRuleValue http = 1; +} + +// IngressSpec describes the Ingress the user wishes to exist. +message IngressSpec { + // A default backend capable of servicing requests that don't match any + // rule. At least one of 'backend' or 'rules' must be specified. This field + // is optional to allow the loadbalancer controller or defaulting logic to + // specify a global default. + // +optional + optional IngressBackend backend = 1; + + // TLS configuration. Currently the Ingress only supports a single TLS + // port, 443. If multiple members of this list specify different hosts, they + // will be multiplexed on the same port according to the hostname specified + // through the SNI TLS extension, if the ingress controller fulfilling the + // ingress supports SNI. + // +optional + repeated IngressTLS tls = 2; + + // A list of host rules used to configure the Ingress. If unspecified, or + // no rule matches, all traffic is sent to the default backend. + // +optional + repeated IngressRule rules = 3; +} + +// IngressStatus describe the current state of the Ingress. +message IngressStatus { + // LoadBalancer contains the current status of the load-balancer. + // +optional + optional k8s.io.api.core.v1.LoadBalancerStatus loadBalancer = 1; +} + +// IngressTLS describes the transport layer security associated with an Ingress. +message IngressTLS { + // Hosts are a list of hosts included in the TLS certificate. The values in + // this list must match the name/s used in the tlsSecret. Defaults to the + // wildcard host setting for the loadbalancer controller fulfilling this + // Ingress, if left unspecified. + // +optional + repeated string hosts = 1; + + // SecretName is the name of the secret used to terminate SSL traffic on 443. + // Field is left optional to allow SSL routing based on SNI hostname alone. + // If the SNI host in a listener conflicts with the "Host" header field used + // by an IngressRule, the SNI host is used for termination and value of the + // Host header is used for routing. + // +optional + optional string secretName = 2; +} + diff --git a/vendor/k8s.io/api/networking/v1beta1/register.go b/vendor/k8s.io/api/networking/v1beta1/register.go new file mode 100644 index 00000000..c046c490 --- /dev/null +++ b/vendor/k8s.io/api/networking/v1beta1/register.go @@ -0,0 +1,56 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "networking.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // SchemeBuilder holds functions that add things to a scheme + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + + // AddToScheme adds the types of this group into the given scheme. + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &Ingress{}, + &IngressList{}, + ) + // Add the watch version that applies + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/networking/v1beta1/types.go b/vendor/k8s.io/api/networking/v1beta1/types.go new file mode 100644 index 00000000..37277bf8 --- /dev/null +++ b/vendor/k8s.io/api/networking/v1beta1/types.go @@ -0,0 +1,192 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Ingress is a collection of rules that allow inbound connections to reach the +// endpoints defined by a backend. An Ingress can be configured to give services +// externally-reachable urls, load balance traffic, terminate SSL, offer name +// based virtual hosting etc. +type Ingress struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec is the desired state of the Ingress. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Spec IngressSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Status is the current state of the Ingress. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Status IngressStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// IngressList is a collection of Ingress. +type IngressList struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of Ingress. + Items []Ingress `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// IngressSpec describes the Ingress the user wishes to exist. +type IngressSpec struct { + // A default backend capable of servicing requests that don't match any + // rule. At least one of 'backend' or 'rules' must be specified. This field + // is optional to allow the loadbalancer controller or defaulting logic to + // specify a global default. + // +optional + Backend *IngressBackend `json:"backend,omitempty" protobuf:"bytes,1,opt,name=backend"` + + // TLS configuration. Currently the Ingress only supports a single TLS + // port, 443. If multiple members of this list specify different hosts, they + // will be multiplexed on the same port according to the hostname specified + // through the SNI TLS extension, if the ingress controller fulfilling the + // ingress supports SNI. + // +optional + TLS []IngressTLS `json:"tls,omitempty" protobuf:"bytes,2,rep,name=tls"` + + // A list of host rules used to configure the Ingress. If unspecified, or + // no rule matches, all traffic is sent to the default backend. + // +optional + Rules []IngressRule `json:"rules,omitempty" protobuf:"bytes,3,rep,name=rules"` + // TODO: Add the ability to specify load-balancer IP through claims +} + +// IngressTLS describes the transport layer security associated with an Ingress. +type IngressTLS struct { + // Hosts are a list of hosts included in the TLS certificate. The values in + // this list must match the name/s used in the tlsSecret. Defaults to the + // wildcard host setting for the loadbalancer controller fulfilling this + // Ingress, if left unspecified. + // +optional + Hosts []string `json:"hosts,omitempty" protobuf:"bytes,1,rep,name=hosts"` + // SecretName is the name of the secret used to terminate SSL traffic on 443. + // Field is left optional to allow SSL routing based on SNI hostname alone. + // If the SNI host in a listener conflicts with the "Host" header field used + // by an IngressRule, the SNI host is used for termination and value of the + // Host header is used for routing. + // +optional + SecretName string `json:"secretName,omitempty" protobuf:"bytes,2,opt,name=secretName"` + // TODO: Consider specifying different modes of termination, protocols etc. +} + +// IngressStatus describe the current state of the Ingress. +type IngressStatus struct { + // LoadBalancer contains the current status of the load-balancer. + // +optional + LoadBalancer v1.LoadBalancerStatus `json:"loadBalancer,omitempty" protobuf:"bytes,1,opt,name=loadBalancer"` +} + +// IngressRule represents the rules mapping the paths under a specified host to +// the related backend services. Incoming requests are first evaluated for a host +// match, then routed to the backend associated with the matching IngressRuleValue. +type IngressRule struct { + // Host is the fully qualified domain name of a network host, as defined + // by RFC 3986. Note the following deviations from the "host" part of the + // URI as defined in the RFC: + // 1. IPs are not allowed. Currently an IngressRuleValue can only apply to the + // IP in the Spec of the parent Ingress. + // 2. The `:` delimiter is not respected because ports are not allowed. + // Currently the port of an Ingress is implicitly :80 for http and + // :443 for https. + // Both these may change in the future. + // Incoming requests are matched against the host before the IngressRuleValue. + // If the host is unspecified, the Ingress routes all traffic based on the + // specified IngressRuleValue. + // +optional + Host string `json:"host,omitempty" protobuf:"bytes,1,opt,name=host"` + // IngressRuleValue represents a rule to route requests for this IngressRule. + // If unspecified, the rule defaults to a http catch-all. Whether that sends + // just traffic matching the host to the default backend or all traffic to the + // default backend, is left to the controller fulfilling the Ingress. Http is + // currently the only supported IngressRuleValue. + // +optional + IngressRuleValue `json:",inline,omitempty" protobuf:"bytes,2,opt,name=ingressRuleValue"` +} + +// IngressRuleValue represents a rule to apply against incoming requests. If the +// rule is satisfied, the request is routed to the specified backend. Currently +// mixing different types of rules in a single Ingress is disallowed, so exactly +// one of the following must be set. +type IngressRuleValue struct { + //TODO: + // 1. Consider renaming this resource and the associated rules so they + // aren't tied to Ingress. They can be used to route intra-cluster traffic. + // 2. Consider adding fields for ingress-type specific global options + // usable by a loadbalancer, like http keep-alive. + + // +optional + HTTP *HTTPIngressRuleValue `json:"http,omitempty" protobuf:"bytes,1,opt,name=http"` +} + +// HTTPIngressRuleValue is a list of http selectors pointing to backends. +// In the example: http:///? -> backend where +// where parts of the url correspond to RFC 3986, this resource will be used +// to match against everything after the last '/' and before the first '?' +// or '#'. +type HTTPIngressRuleValue struct { + // A collection of paths that map requests to backends. + Paths []HTTPIngressPath `json:"paths" protobuf:"bytes,1,rep,name=paths"` + // TODO: Consider adding fields for ingress-type specific global + // options usable by a loadbalancer, like http keep-alive. +} + +// HTTPIngressPath associates a path regex with a backend. Incoming urls matching +// the path are forwarded to the backend. +type HTTPIngressPath struct { + // Path is an extended POSIX regex as defined by IEEE Std 1003.1, + // (i.e this follows the egrep/unix syntax, not the perl syntax) + // matched against the path of an incoming request. Currently it can + // contain characters disallowed from the conventional "path" + // part of a URL as defined by RFC 3986. Paths must begin with + // a '/'. If unspecified, the path defaults to a catch all sending + // traffic to the backend. + // +optional + Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"` + + // Backend defines the referenced service endpoint to which the traffic + // will be forwarded to. + Backend IngressBackend `json:"backend" protobuf:"bytes,2,opt,name=backend"` +} + +// IngressBackend describes all endpoints for a given service and port. +type IngressBackend struct { + // Specifies the name of the referenced service. + ServiceName string `json:"serviceName" protobuf:"bytes,1,opt,name=serviceName"` + + // Specifies the port of the referenced service. + ServicePort intstr.IntOrString `json:"servicePort" protobuf:"bytes,2,opt,name=servicePort"` +} diff --git a/vendor/k8s.io/api/networking/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/networking/v1beta1/types_swagger_doc_generated.go new file mode 100644 index 00000000..4ae5e32d --- /dev/null +++ b/vendor/k8s.io/api/networking/v1beta1/types_swagger_doc_generated.go @@ -0,0 +1,127 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_HTTPIngressPath = map[string]string{ + "": "HTTPIngressPath associates a path regex with a backend. Incoming urls matching the path are forwarded to the backend.", + "path": "Path is an extended POSIX regex as defined by IEEE Std 1003.1, (i.e this follows the egrep/unix syntax, not the perl syntax) matched against the path of an incoming request. Currently it can contain characters disallowed from the conventional \"path\" part of a URL as defined by RFC 3986. Paths must begin with a '/'. If unspecified, the path defaults to a catch all sending traffic to the backend.", + "backend": "Backend defines the referenced service endpoint to which the traffic will be forwarded to.", +} + +func (HTTPIngressPath) SwaggerDoc() map[string]string { + return map_HTTPIngressPath +} + +var map_HTTPIngressRuleValue = map[string]string{ + "": "HTTPIngressRuleValue is a list of http selectors pointing to backends. In the example: http:///? -> backend where where parts of the url correspond to RFC 3986, this resource will be used to match against everything after the last '/' and before the first '?' or '#'.", + "paths": "A collection of paths that map requests to backends.", +} + +func (HTTPIngressRuleValue) SwaggerDoc() map[string]string { + return map_HTTPIngressRuleValue +} + +var map_Ingress = map[string]string{ + "": "Ingress is a collection of rules that allow inbound connections to reach the endpoints defined by a backend. An Ingress can be configured to give services externally-reachable urls, load balance traffic, terminate SSL, offer name based virtual hosting etc.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Spec is the desired state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "Status is the current state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", +} + +func (Ingress) SwaggerDoc() map[string]string { + return map_Ingress +} + +var map_IngressBackend = map[string]string{ + "": "IngressBackend describes all endpoints for a given service and port.", + "serviceName": "Specifies the name of the referenced service.", + "servicePort": "Specifies the port of the referenced service.", +} + +func (IngressBackend) SwaggerDoc() map[string]string { + return map_IngressBackend +} + +var map_IngressList = map[string]string{ + "": "IngressList is a collection of Ingress.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "Items is the list of Ingress.", +} + +func (IngressList) SwaggerDoc() map[string]string { + return map_IngressList +} + +var map_IngressRule = map[string]string{ + "": "IngressRule represents the rules mapping the paths under a specified host to the related backend services. Incoming requests are first evaluated for a host match, then routed to the backend associated with the matching IngressRuleValue.", + "host": "Host is the fully qualified domain name of a network host, as defined by RFC 3986. Note the following deviations from the \"host\" part of the URI as defined in the RFC: 1. IPs are not allowed. Currently an IngressRuleValue can only apply to the\n\t IP in the Spec of the parent Ingress.\n2. The `:` delimiter is not respected because ports are not allowed.\n\t Currently the port of an Ingress is implicitly :80 for http and\n\t :443 for https.\nBoth these may change in the future. Incoming requests are matched against the host before the IngressRuleValue. If the host is unspecified, the Ingress routes all traffic based on the specified IngressRuleValue.", +} + +func (IngressRule) SwaggerDoc() map[string]string { + return map_IngressRule +} + +var map_IngressRuleValue = map[string]string{ + "": "IngressRuleValue represents a rule to apply against incoming requests. If the rule is satisfied, the request is routed to the specified backend. Currently mixing different types of rules in a single Ingress is disallowed, so exactly one of the following must be set.", +} + +func (IngressRuleValue) SwaggerDoc() map[string]string { + return map_IngressRuleValue +} + +var map_IngressSpec = map[string]string{ + "": "IngressSpec describes the Ingress the user wishes to exist.", + "backend": "A default backend capable of servicing requests that don't match any rule. At least one of 'backend' or 'rules' must be specified. This field is optional to allow the loadbalancer controller or defaulting logic to specify a global default.", + "tls": "TLS configuration. Currently the Ingress only supports a single TLS port, 443. If multiple members of this list specify different hosts, they will be multiplexed on the same port according to the hostname specified through the SNI TLS extension, if the ingress controller fulfilling the ingress supports SNI.", + "rules": "A list of host rules used to configure the Ingress. If unspecified, or no rule matches, all traffic is sent to the default backend.", +} + +func (IngressSpec) SwaggerDoc() map[string]string { + return map_IngressSpec +} + +var map_IngressStatus = map[string]string{ + "": "IngressStatus describe the current state of the Ingress.", + "loadBalancer": "LoadBalancer contains the current status of the load-balancer.", +} + +func (IngressStatus) SwaggerDoc() map[string]string { + return map_IngressStatus +} + +var map_IngressTLS = map[string]string{ + "": "IngressTLS describes the transport layer security associated with an Ingress.", + "hosts": "Hosts are a list of hosts included in the TLS certificate. The values in this list must match the name/s used in the tlsSecret. Defaults to the wildcard host setting for the loadbalancer controller fulfilling this Ingress, if left unspecified.", + "secretName": "SecretName is the name of the secret used to terminate SSL traffic on 443. Field is left optional to allow SSL routing based on SNI hostname alone. If the SNI host in a listener conflicts with the \"Host\" header field used by an IngressRule, the SNI host is used for termination and value of the Host header is used for routing.", +} + +func (IngressTLS) SwaggerDoc() map[string]string { + return map_IngressTLS +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/networking/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/networking/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 00000000..16ac936a --- /dev/null +++ b/vendor/k8s.io/api/networking/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,252 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1beta1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPIngressPath) DeepCopyInto(out *HTTPIngressPath) { + *out = *in + out.Backend = in.Backend + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPIngressPath. +func (in *HTTPIngressPath) DeepCopy() *HTTPIngressPath { + if in == nil { + return nil + } + out := new(HTTPIngressPath) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPIngressRuleValue) DeepCopyInto(out *HTTPIngressRuleValue) { + *out = *in + if in.Paths != nil { + in, out := &in.Paths, &out.Paths + *out = make([]HTTPIngressPath, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPIngressRuleValue. +func (in *HTTPIngressRuleValue) DeepCopy() *HTTPIngressRuleValue { + if in == nil { + return nil + } + out := new(HTTPIngressRuleValue) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Ingress) DeepCopyInto(out *Ingress) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Ingress. +func (in *Ingress) DeepCopy() *Ingress { + if in == nil { + return nil + } + out := new(Ingress) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Ingress) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressBackend) DeepCopyInto(out *IngressBackend) { + *out = *in + out.ServicePort = in.ServicePort + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressBackend. +func (in *IngressBackend) DeepCopy() *IngressBackend { + if in == nil { + return nil + } + out := new(IngressBackend) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressList) DeepCopyInto(out *IngressList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Ingress, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressList. +func (in *IngressList) DeepCopy() *IngressList { + if in == nil { + return nil + } + out := new(IngressList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IngressList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressRule) DeepCopyInto(out *IngressRule) { + *out = *in + in.IngressRuleValue.DeepCopyInto(&out.IngressRuleValue) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressRule. +func (in *IngressRule) DeepCopy() *IngressRule { + if in == nil { + return nil + } + out := new(IngressRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressRuleValue) DeepCopyInto(out *IngressRuleValue) { + *out = *in + if in.HTTP != nil { + in, out := &in.HTTP, &out.HTTP + *out = new(HTTPIngressRuleValue) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressRuleValue. +func (in *IngressRuleValue) DeepCopy() *IngressRuleValue { + if in == nil { + return nil + } + out := new(IngressRuleValue) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressSpec) DeepCopyInto(out *IngressSpec) { + *out = *in + if in.Backend != nil { + in, out := &in.Backend, &out.Backend + *out = new(IngressBackend) + **out = **in + } + if in.TLS != nil { + in, out := &in.TLS, &out.TLS + *out = make([]IngressTLS, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]IngressRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressSpec. +func (in *IngressSpec) DeepCopy() *IngressSpec { + if in == nil { + return nil + } + out := new(IngressSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressStatus) DeepCopyInto(out *IngressStatus) { + *out = *in + in.LoadBalancer.DeepCopyInto(&out.LoadBalancer) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressStatus. +func (in *IngressStatus) DeepCopy() *IngressStatus { + if in == nil { + return nil + } + out := new(IngressStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressTLS) DeepCopyInto(out *IngressTLS) { + *out = *in + if in.Hosts != nil { + in, out := &in.Hosts, &out.Hosts + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressTLS. +func (in *IngressTLS) DeepCopy() *IngressTLS { + if in == nil { + return nil + } + out := new(IngressTLS) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/node/v1alpha1/doc.go b/vendor/k8s.io/api/node/v1alpha1/doc.go new file mode 100644 index 00000000..dfe99540 --- /dev/null +++ b/vendor/k8s.io/api/node/v1alpha1/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:openapi-gen=true + +// +groupName=node.k8s.io + +package v1alpha1 // import "k8s.io/api/node/v1alpha1" diff --git a/vendor/k8s.io/api/node/v1alpha1/generated.pb.go b/vendor/k8s.io/api/node/v1alpha1/generated.pb.go new file mode 100644 index 00000000..34f4dd6d --- /dev/null +++ b/vendor/k8s.io/api/node/v1alpha1/generated.pb.go @@ -0,0 +1,1609 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/api/node/v1alpha1/generated.proto + +package v1alpha1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + v11 "k8s.io/api/core/v1" + k8s_io_apimachinery_pkg_api_resource "k8s.io/apimachinery/pkg/api/resource" + resource "k8s.io/apimachinery/pkg/api/resource" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +func (m *Overhead) Reset() { *m = Overhead{} } +func (*Overhead) ProtoMessage() {} +func (*Overhead) Descriptor() ([]byte, []int) { + return fileDescriptor_82a78945ab308218, []int{0} +} +func (m *Overhead) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Overhead) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Overhead) XXX_Merge(src proto.Message) { + xxx_messageInfo_Overhead.Merge(m, src) +} +func (m *Overhead) XXX_Size() int { + return m.Size() +} +func (m *Overhead) XXX_DiscardUnknown() { + xxx_messageInfo_Overhead.DiscardUnknown(m) +} + +var xxx_messageInfo_Overhead proto.InternalMessageInfo + +func (m *RuntimeClass) Reset() { *m = RuntimeClass{} } +func (*RuntimeClass) ProtoMessage() {} +func (*RuntimeClass) Descriptor() ([]byte, []int) { + return fileDescriptor_82a78945ab308218, []int{1} +} +func (m *RuntimeClass) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RuntimeClass) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RuntimeClass) XXX_Merge(src proto.Message) { + xxx_messageInfo_RuntimeClass.Merge(m, src) +} +func (m *RuntimeClass) XXX_Size() int { + return m.Size() +} +func (m *RuntimeClass) XXX_DiscardUnknown() { + xxx_messageInfo_RuntimeClass.DiscardUnknown(m) +} + +var xxx_messageInfo_RuntimeClass proto.InternalMessageInfo + +func (m *RuntimeClassList) Reset() { *m = RuntimeClassList{} } +func (*RuntimeClassList) ProtoMessage() {} +func (*RuntimeClassList) Descriptor() ([]byte, []int) { + return fileDescriptor_82a78945ab308218, []int{2} +} +func (m *RuntimeClassList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RuntimeClassList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RuntimeClassList) XXX_Merge(src proto.Message) { + xxx_messageInfo_RuntimeClassList.Merge(m, src) +} +func (m *RuntimeClassList) XXX_Size() int { + return m.Size() +} +func (m *RuntimeClassList) XXX_DiscardUnknown() { + xxx_messageInfo_RuntimeClassList.DiscardUnknown(m) +} + +var xxx_messageInfo_RuntimeClassList proto.InternalMessageInfo + +func (m *RuntimeClassSpec) Reset() { *m = RuntimeClassSpec{} } +func (*RuntimeClassSpec) ProtoMessage() {} +func (*RuntimeClassSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_82a78945ab308218, []int{3} +} +func (m *RuntimeClassSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RuntimeClassSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RuntimeClassSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_RuntimeClassSpec.Merge(m, src) +} +func (m *RuntimeClassSpec) XXX_Size() int { + return m.Size() +} +func (m *RuntimeClassSpec) XXX_DiscardUnknown() { + xxx_messageInfo_RuntimeClassSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_RuntimeClassSpec proto.InternalMessageInfo + +func (m *Scheduling) Reset() { *m = Scheduling{} } +func (*Scheduling) ProtoMessage() {} +func (*Scheduling) Descriptor() ([]byte, []int) { + return fileDescriptor_82a78945ab308218, []int{4} +} +func (m *Scheduling) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Scheduling) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Scheduling) XXX_Merge(src proto.Message) { + xxx_messageInfo_Scheduling.Merge(m, src) +} +func (m *Scheduling) XXX_Size() int { + return m.Size() +} +func (m *Scheduling) XXX_DiscardUnknown() { + xxx_messageInfo_Scheduling.DiscardUnknown(m) +} + +var xxx_messageInfo_Scheduling proto.InternalMessageInfo + +func init() { + proto.RegisterType((*Overhead)(nil), "k8s.io.api.node.v1alpha1.Overhead") + proto.RegisterMapType((k8s_io_api_core_v1.ResourceList)(nil), "k8s.io.api.node.v1alpha1.Overhead.PodFixedEntry") + proto.RegisterType((*RuntimeClass)(nil), "k8s.io.api.node.v1alpha1.RuntimeClass") + proto.RegisterType((*RuntimeClassList)(nil), "k8s.io.api.node.v1alpha1.RuntimeClassList") + proto.RegisterType((*RuntimeClassSpec)(nil), "k8s.io.api.node.v1alpha1.RuntimeClassSpec") + proto.RegisterType((*Scheduling)(nil), "k8s.io.api.node.v1alpha1.Scheduling") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.node.v1alpha1.Scheduling.NodeSelectorEntry") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/node/v1alpha1/generated.proto", fileDescriptor_82a78945ab308218) +} + +var fileDescriptor_82a78945ab308218 = []byte{ + // 698 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0xbf, 0x6f, 0xd3, 0x4e, + 0x14, 0xcf, 0xa5, 0xad, 0x94, 0x5e, 0xd2, 0xaa, 0x5f, 0x7f, 0x2b, 0x14, 0x65, 0x70, 0x2a, 0x0b, + 0xa1, 0x0a, 0xa9, 0x67, 0x5a, 0xa1, 0xaa, 0x62, 0x00, 0x61, 0x7e, 0x08, 0x44, 0x69, 0xc1, 0x2d, + 0x0b, 0x62, 0xe0, 0x62, 0x3f, 0x1c, 0x13, 0xdb, 0x67, 0xd9, 0xe7, 0x88, 0x6c, 0x88, 0x05, 0x89, + 0x89, 0x89, 0xff, 0x06, 0xe6, 0x8e, 0x9d, 0x50, 0xa7, 0x96, 0x86, 0xff, 0x81, 0x81, 0x09, 0x9d, + 0x7d, 0x4e, 0x9c, 0xa4, 0xa1, 0x61, 0xf3, 0xdd, 0x7d, 0x7e, 0xdc, 0xfb, 0xbc, 0x7b, 0xc6, 0x77, + 0x3b, 0x3b, 0x31, 0x71, 0x99, 0xde, 0x49, 0x5a, 0x10, 0x05, 0xc0, 0x21, 0xd6, 0xbb, 0x10, 0xd8, + 0x2c, 0xd2, 0xe5, 0x01, 0x0d, 0x5d, 0x3d, 0x60, 0x36, 0xe8, 0xdd, 0x4d, 0xea, 0x85, 0x6d, 0xba, + 0xa9, 0x3b, 0x10, 0x40, 0x44, 0x39, 0xd8, 0x24, 0x8c, 0x18, 0x67, 0x4a, 0x3d, 0x43, 0x12, 0x1a, + 0xba, 0x44, 0x20, 0x49, 0x8e, 0x6c, 0x6c, 0x38, 0x2e, 0x6f, 0x27, 0x2d, 0x62, 0x31, 0x5f, 0x77, + 0x98, 0xc3, 0xf4, 0x94, 0xd0, 0x4a, 0xde, 0xa4, 0xab, 0x74, 0x91, 0x7e, 0x65, 0x42, 0x0d, 0xad, + 0x60, 0x69, 0xb1, 0x48, 0x58, 0x8e, 0x9b, 0x35, 0x6e, 0x0e, 0x31, 0x3e, 0xb5, 0xda, 0x6e, 0x00, + 0x51, 0x4f, 0x0f, 0x3b, 0x4e, 0x4a, 0x8a, 0x20, 0x66, 0x49, 0x64, 0xc1, 0x3f, 0xb1, 0x62, 0xdd, + 0x07, 0x4e, 0x2f, 0xf2, 0xd2, 0xa7, 0xb1, 0xa2, 0x24, 0xe0, 0xae, 0x3f, 0x69, 0xb3, 0x7d, 0x19, + 0x21, 0xb6, 0xda, 0xe0, 0xd3, 0x71, 0x9e, 0x76, 0x5c, 0xc6, 0x95, 0xfd, 0x2e, 0x44, 0x6d, 0xa0, + 0xb6, 0xf2, 0x1d, 0xe1, 0x4a, 0xc8, 0xec, 0x87, 0xee, 0x3b, 0xb0, 0xeb, 0x68, 0x6d, 0x6e, 0xbd, + 0xba, 0x75, 0x83, 0x4c, 0x8b, 0x98, 0xe4, 0x34, 0xf2, 0x4c, 0x52, 0x1e, 0x04, 0x3c, 0xea, 0x19, + 0x1f, 0xd1, 0xd1, 0x69, 0xb3, 0xd4, 0x3f, 0x6d, 0x56, 0xf2, 0xfd, 0xdf, 0xa7, 0xcd, 0xe6, 0x64, + 0xbe, 0xc4, 0x94, 0x91, 0xed, 0xba, 0x31, 0xff, 0x70, 0xf6, 0x57, 0xc8, 0x1e, 0xf5, 0xe1, 0xd3, + 0x59, 0x73, 0x63, 0x96, 0x0e, 0x90, 0xe7, 0x09, 0x0d, 0xb8, 0xcb, 0x7b, 0xe6, 0xa0, 0x96, 0x46, + 0x07, 0x2f, 0x8d, 0x5c, 0x52, 0x59, 0xc1, 0x73, 0x1d, 0xe8, 0xd5, 0xd1, 0x1a, 0x5a, 0x5f, 0x34, + 0xc5, 0xa7, 0x72, 0x1f, 0x2f, 0x74, 0xa9, 0x97, 0x40, 0xbd, 0xbc, 0x86, 0xd6, 0xab, 0x5b, 0xa4, + 0x50, 0xf7, 0xc0, 0x8b, 0x84, 0x1d, 0x27, 0x0d, 0x62, 0xd2, 0x2b, 0x23, 0xdf, 0x2a, 0xef, 0x20, + 0xed, 0x1b, 0xc2, 0x35, 0x33, 0x4b, 0xfd, 0x9e, 0x47, 0xe3, 0x58, 0x79, 0x8d, 0x2b, 0xa2, 0xcf, + 0x36, 0xe5, 0x34, 0x75, 0x1c, 0x4d, 0x75, 0x42, 0x3d, 0x26, 0x02, 0x4d, 0xba, 0x9b, 0x64, 0xbf, + 0xf5, 0x16, 0x2c, 0xfe, 0x14, 0x38, 0x35, 0x14, 0x19, 0x2a, 0x1e, 0xee, 0x99, 0x03, 0x55, 0x65, + 0x17, 0xcf, 0xc7, 0x21, 0x58, 0xf2, 0xee, 0xd7, 0xa7, 0xf7, 0xac, 0x78, 0xaf, 0x83, 0x10, 0x2c, + 0xa3, 0x26, 0x75, 0xe7, 0xc5, 0xca, 0x4c, 0x55, 0xb4, 0xaf, 0x08, 0xaf, 0x14, 0x81, 0xa2, 0x41, + 0xca, 0xab, 0x89, 0x22, 0xc8, 0x6c, 0x45, 0x08, 0x76, 0x5a, 0xc2, 0x4a, 0xfe, 0x2e, 0xf2, 0x9d, + 0x42, 0x01, 0x4f, 0xf0, 0x82, 0xcb, 0xc1, 0x8f, 0xeb, 0xe5, 0xf4, 0xd5, 0x5d, 0x9b, 0xad, 0x02, + 0x63, 0x49, 0x4a, 0x2e, 0x3c, 0x16, 0x64, 0x33, 0xd3, 0xd0, 0x7e, 0x8d, 0xdd, 0x5f, 0x94, 0xa6, + 0xdc, 0xc6, 0xcb, 0x72, 0x14, 0x1e, 0xd1, 0xc0, 0xf6, 0x20, 0xca, 0x9a, 0x6f, 0x5c, 0x91, 0x12, + 0xcb, 0xe6, 0xc8, 0xa9, 0x39, 0x86, 0x56, 0x76, 0x71, 0x85, 0xc9, 0x07, 0x2f, 0x63, 0xd6, 0x2e, + 0x1f, 0x0d, 0xa3, 0x26, 0xea, 0xcd, 0x57, 0xe6, 0x40, 0x41, 0x39, 0xc4, 0x58, 0x0c, 0xa4, 0x9d, + 0x78, 0x6e, 0xe0, 0xd4, 0xe7, 0x52, 0xbd, 0xab, 0xd3, 0xf5, 0x0e, 0x06, 0x58, 0x63, 0x59, 0x3c, + 0x82, 0xe1, 0xda, 0x2c, 0xe8, 0x68, 0x5f, 0xca, 0xb8, 0x70, 0xa4, 0x84, 0xb8, 0x26, 0x64, 0x0e, + 0xc0, 0x03, 0x8b, 0xb3, 0x48, 0x4e, 0xf4, 0xf6, 0x2c, 0x36, 0x64, 0xaf, 0x40, 0xcc, 0xe6, 0x7a, + 0x55, 0x06, 0x55, 0x2b, 0x1e, 0x99, 0x23, 0x0e, 0xca, 0x0b, 0x5c, 0xe5, 0xcc, 0x13, 0x3f, 0x18, + 0x97, 0x05, 0x79, 0x33, 0xd5, 0xa2, 0xa1, 0x98, 0x6c, 0xf1, 0x2a, 0x0e, 0x07, 0x30, 0xe3, 0x7f, + 0x29, 0x5c, 0x1d, 0xee, 0xc5, 0x66, 0x51, 0xa7, 0x71, 0x07, 0xff, 0x37, 0x71, 0x9f, 0x0b, 0x46, + 0x78, 0xb5, 0x38, 0xc2, 0x8b, 0x85, 0x91, 0x34, 0xc8, 0xd1, 0xb9, 0x5a, 0x3a, 0x3e, 0x57, 0x4b, + 0x27, 0xe7, 0x6a, 0xe9, 0x7d, 0x5f, 0x45, 0x47, 0x7d, 0x15, 0x1d, 0xf7, 0x55, 0x74, 0xd2, 0x57, + 0xd1, 0x8f, 0xbe, 0x8a, 0x3e, 0xff, 0x54, 0x4b, 0x2f, 0x2b, 0x79, 0x10, 0x7f, 0x02, 0x00, 0x00, + 0xff, 0xff, 0xa8, 0x77, 0xef, 0x80, 0x9b, 0x06, 0x00, 0x00, +} + +func (m *Overhead) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Overhead) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Overhead) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.PodFixed) > 0 { + keysForPodFixed := make([]string, 0, len(m.PodFixed)) + for k := range m.PodFixed { + keysForPodFixed = append(keysForPodFixed, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForPodFixed) + for iNdEx := len(keysForPodFixed) - 1; iNdEx >= 0; iNdEx-- { + v := m.PodFixed[k8s_io_api_core_v1.ResourceName(keysForPodFixed[iNdEx])] + baseI := i + { + size, err := ((*resource.Quantity)(&v)).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForPodFixed[iNdEx]) + copy(dAtA[i:], keysForPodFixed[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForPodFixed[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *RuntimeClass) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RuntimeClass) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RuntimeClass) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RuntimeClassList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RuntimeClassList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RuntimeClassList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RuntimeClassSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RuntimeClassSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RuntimeClassSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Scheduling != nil { + { + size, err := m.Scheduling.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Overhead != nil { + { + size, err := m.Overhead.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.RuntimeHandler) + copy(dAtA[i:], m.RuntimeHandler) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RuntimeHandler))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Scheduling) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Scheduling) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Scheduling) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Tolerations) > 0 { + for iNdEx := len(m.Tolerations) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Tolerations[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.NodeSelector) > 0 { + keysForNodeSelector := make([]string, 0, len(m.NodeSelector)) + for k := range m.NodeSelector { + keysForNodeSelector = append(keysForNodeSelector, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector) + for iNdEx := len(keysForNodeSelector) - 1; iNdEx >= 0; iNdEx-- { + v := m.NodeSelector[string(keysForNodeSelector[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForNodeSelector[iNdEx]) + copy(dAtA[i:], keysForNodeSelector[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForNodeSelector[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Overhead) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.PodFixed) > 0 { + for k, v := range m.PodFixed { + _ = k + _ = v + l = ((*resource.Quantity)(&v)).Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *RuntimeClass) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *RuntimeClassList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *RuntimeClassSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.RuntimeHandler) + n += 1 + l + sovGenerated(uint64(l)) + if m.Overhead != nil { + l = m.Overhead.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Scheduling != nil { + l = m.Scheduling.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *Scheduling) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.NodeSelector) > 0 { + for k, v := range m.NodeSelector { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Tolerations) > 0 { + for _, e := range m.Tolerations { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Overhead) String() string { + if this == nil { + return "nil" + } + keysForPodFixed := make([]string, 0, len(this.PodFixed)) + for k := range this.PodFixed { + keysForPodFixed = append(keysForPodFixed, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForPodFixed) + mapStringForPodFixed := "k8s_io_api_core_v1.ResourceList{" + for _, k := range keysForPodFixed { + mapStringForPodFixed += fmt.Sprintf("%v: %v,", k, this.PodFixed[k8s_io_api_core_v1.ResourceName(k)]) + } + mapStringForPodFixed += "}" + s := strings.Join([]string{`&Overhead{`, + `PodFixed:` + mapStringForPodFixed + `,`, + `}`, + }, "") + return s +} +func (this *RuntimeClass) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RuntimeClass{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "RuntimeClassSpec", "RuntimeClassSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *RuntimeClassList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]RuntimeClass{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "RuntimeClass", "RuntimeClass", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&RuntimeClassList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *RuntimeClassSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RuntimeClassSpec{`, + `RuntimeHandler:` + fmt.Sprintf("%v", this.RuntimeHandler) + `,`, + `Overhead:` + strings.Replace(this.Overhead.String(), "Overhead", "Overhead", 1) + `,`, + `Scheduling:` + strings.Replace(this.Scheduling.String(), "Scheduling", "Scheduling", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Scheduling) String() string { + if this == nil { + return "nil" + } + repeatedStringForTolerations := "[]Toleration{" + for _, f := range this.Tolerations { + repeatedStringForTolerations += fmt.Sprintf("%v", f) + "," + } + repeatedStringForTolerations += "}" + keysForNodeSelector := make([]string, 0, len(this.NodeSelector)) + for k := range this.NodeSelector { + keysForNodeSelector = append(keysForNodeSelector, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector) + mapStringForNodeSelector := "map[string]string{" + for _, k := range keysForNodeSelector { + mapStringForNodeSelector += fmt.Sprintf("%v: %v,", k, this.NodeSelector[k]) + } + mapStringForNodeSelector += "}" + s := strings.Join([]string{`&Scheduling{`, + `NodeSelector:` + mapStringForNodeSelector + `,`, + `Tolerations:` + repeatedStringForTolerations + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Overhead) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Overhead: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Overhead: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodFixed", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PodFixed == nil { + m.PodFixed = make(k8s_io_api_core_v1.ResourceList) + } + var mapkey k8s_io_api_core_v1.ResourceName + mapvalue := &resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.PodFixed[k8s_io_api_core_v1.ResourceName(mapkey)] = ((k8s_io_apimachinery_pkg_api_resource.Quantity)(*mapvalue)) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RuntimeClass) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RuntimeClass: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RuntimeClass: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RuntimeClassList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RuntimeClassList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RuntimeClassList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, RuntimeClass{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RuntimeClassSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RuntimeClassSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RuntimeClassSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RuntimeHandler", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RuntimeHandler = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Overhead", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Overhead == nil { + m.Overhead = &Overhead{} + } + if err := m.Overhead.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scheduling", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Scheduling == nil { + m.Scheduling = &Scheduling{} + } + if err := m.Scheduling.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Scheduling) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Scheduling: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Scheduling: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodeSelector == nil { + m.NodeSelector = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.NodeSelector[mapkey] = mapvalue + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tolerations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tolerations = append(m.Tolerations, v11.Toleration{}) + if err := m.Tolerations[len(m.Tolerations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) diff --git a/vendor/k8s.io/api/node/v1alpha1/generated.proto b/vendor/k8s.io/api/node/v1alpha1/generated.proto new file mode 100644 index 00000000..ac05f839 --- /dev/null +++ b/vendor/k8s.io/api/node/v1alpha1/generated.proto @@ -0,0 +1,118 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.api.node.v1alpha1; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/api/resource/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1alpha1"; + +// Overhead structure represents the resource overhead associated with running a pod. +message Overhead { + // PodFixed represents the fixed resource overhead associated with running a pod. + // +optional + map podFixed = 1; +} + +// RuntimeClass defines a class of container runtime supported in the cluster. +// The RuntimeClass is used to determine which container runtime is used to run +// all containers in a pod. RuntimeClasses are (currently) manually defined by a +// user or cluster provisioner, and referenced in the PodSpec. The Kubelet is +// responsible for resolving the RuntimeClassName reference before running the +// pod. For more details, see +// https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md +message RuntimeClass { + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Specification of the RuntimeClass + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + optional RuntimeClassSpec spec = 2; +} + +// RuntimeClassList is a list of RuntimeClass objects. +message RuntimeClassList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of schema objects. + repeated RuntimeClass items = 2; +} + +// RuntimeClassSpec is a specification of a RuntimeClass. It contains parameters +// that are required to describe the RuntimeClass to the Container Runtime +// Interface (CRI) implementation, as well as any other components that need to +// understand how the pod will be run. The RuntimeClassSpec is immutable. +message RuntimeClassSpec { + // RuntimeHandler specifies the underlying runtime and configuration that the + // CRI implementation will use to handle pods of this class. The possible + // values are specific to the node & CRI configuration. It is assumed that + // all handlers are available on every node, and handlers of the same name are + // equivalent on every node. + // For example, a handler called "runc" might specify that the runc OCI + // runtime (using native Linux containers) will be used to run the containers + // in a pod. + // The RuntimeHandler must conform to the DNS Label (RFC 1123) requirements + // and is immutable. + optional string runtimeHandler = 1; + + // Overhead represents the resource overhead associated with running a pod for a + // given RuntimeClass. For more details, see + // https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md + // This field is alpha-level as of Kubernetes v1.15, and is only honored by servers that enable the PodOverhead feature. + // +optional + optional Overhead overhead = 2; + + // Scheduling holds the scheduling constraints to ensure that pods running + // with this RuntimeClass are scheduled to nodes that support it. + // If scheduling is nil, this RuntimeClass is assumed to be supported by all + // nodes. + // +optional + optional Scheduling scheduling = 3; +} + +// Scheduling specifies the scheduling constraints for nodes supporting a +// RuntimeClass. +message Scheduling { + // nodeSelector lists labels that must be present on nodes that support this + // RuntimeClass. Pods using this RuntimeClass can only be scheduled to a + // node matched by this selector. The RuntimeClass nodeSelector is merged + // with a pod's existing nodeSelector. Any conflicts will cause the pod to + // be rejected in admission. + // +optional + map nodeSelector = 1; + + // tolerations are appended (excluding duplicates) to pods running with this + // RuntimeClass during admission, effectively unioning the set of nodes + // tolerated by the pod and the RuntimeClass. + // +optional + // +listType=atomic + repeated k8s.io.api.core.v1.Toleration tolerations = 2; +} + diff --git a/vendor/k8s.io/api/node/v1alpha1/register.go b/vendor/k8s.io/api/node/v1alpha1/register.go new file mode 100644 index 00000000..b6082142 --- /dev/null +++ b/vendor/k8s.io/api/node/v1alpha1/register.go @@ -0,0 +1,52 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "node.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // SchemeBuilder is the scheme builder with scheme init functions to run for this API package + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // AddToScheme is a common registration function for mapping packaged scoped group & version keys to a scheme + AddToScheme = SchemeBuilder.AddToScheme +) + +// addKnownTypes adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &RuntimeClass{}, + &RuntimeClassList{}, + ) + + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/node/v1alpha1/types.go b/vendor/k8s.io/api/node/v1alpha1/types.go new file mode 100644 index 00000000..b5976710 --- /dev/null +++ b/vendor/k8s.io/api/node/v1alpha1/types.go @@ -0,0 +1,116 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RuntimeClass defines a class of container runtime supported in the cluster. +// The RuntimeClass is used to determine which container runtime is used to run +// all containers in a pod. RuntimeClasses are (currently) manually defined by a +// user or cluster provisioner, and referenced in the PodSpec. The Kubelet is +// responsible for resolving the RuntimeClassName reference before running the +// pod. For more details, see +// https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md +type RuntimeClass struct { + metav1.TypeMeta `json:",inline"` + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Specification of the RuntimeClass + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + Spec RuntimeClassSpec `json:"spec" protobuf:"bytes,2,name=spec"` +} + +// RuntimeClassSpec is a specification of a RuntimeClass. It contains parameters +// that are required to describe the RuntimeClass to the Container Runtime +// Interface (CRI) implementation, as well as any other components that need to +// understand how the pod will be run. The RuntimeClassSpec is immutable. +type RuntimeClassSpec struct { + // RuntimeHandler specifies the underlying runtime and configuration that the + // CRI implementation will use to handle pods of this class. The possible + // values are specific to the node & CRI configuration. It is assumed that + // all handlers are available on every node, and handlers of the same name are + // equivalent on every node. + // For example, a handler called "runc" might specify that the runc OCI + // runtime (using native Linux containers) will be used to run the containers + // in a pod. + // The RuntimeHandler must conform to the DNS Label (RFC 1123) requirements + // and is immutable. + RuntimeHandler string `json:"runtimeHandler" protobuf:"bytes,1,opt,name=runtimeHandler"` + + // Overhead represents the resource overhead associated with running a pod for a + // given RuntimeClass. For more details, see + // https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md + // This field is alpha-level as of Kubernetes v1.15, and is only honored by servers that enable the PodOverhead feature. + // +optional + Overhead *Overhead `json:"overhead,omitempty" protobuf:"bytes,2,opt,name=overhead"` + + // Scheduling holds the scheduling constraints to ensure that pods running + // with this RuntimeClass are scheduled to nodes that support it. + // If scheduling is nil, this RuntimeClass is assumed to be supported by all + // nodes. + // +optional + Scheduling *Scheduling `json:"scheduling,omitempty" protobuf:"bytes,3,opt,name=scheduling"` +} + +// Overhead structure represents the resource overhead associated with running a pod. +type Overhead struct { + // PodFixed represents the fixed resource overhead associated with running a pod. + // +optional + PodFixed corev1.ResourceList `json:"podFixed,omitempty" protobuf:"bytes,1,opt,name=podFixed,casttype=k8s.io/api/core/v1.ResourceList,castkey=k8s.io/api/core/v1.ResourceName,castvalue=k8s.io/apimachinery/pkg/api/resource.Quantity"` +} + +// Scheduling specifies the scheduling constraints for nodes supporting a +// RuntimeClass. +type Scheduling struct { + // nodeSelector lists labels that must be present on nodes that support this + // RuntimeClass. Pods using this RuntimeClass can only be scheduled to a + // node matched by this selector. The RuntimeClass nodeSelector is merged + // with a pod's existing nodeSelector. Any conflicts will cause the pod to + // be rejected in admission. + // +optional + NodeSelector map[string]string `json:"nodeSelector,omitempty" protobuf:"bytes,1,opt,name=nodeSelector"` + + // tolerations are appended (excluding duplicates) to pods running with this + // RuntimeClass during admission, effectively unioning the set of nodes + // tolerated by the pod and the RuntimeClass. + // +optional + // +listType=atomic + Tolerations []corev1.Toleration `json:"tolerations,omitempty" protobuf:"bytes,2,rep,name=tolerations"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RuntimeClassList is a list of RuntimeClass objects. +type RuntimeClassList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of schema objects. + Items []RuntimeClass `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/api/node/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/node/v1alpha1/types_swagger_doc_generated.go new file mode 100644 index 00000000..39000017 --- /dev/null +++ b/vendor/k8s.io/api/node/v1alpha1/types_swagger_doc_generated.go @@ -0,0 +1,80 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_Overhead = map[string]string{ + "": "Overhead structure represents the resource overhead associated with running a pod.", + "podFixed": "PodFixed represents the fixed resource overhead associated with running a pod.", +} + +func (Overhead) SwaggerDoc() map[string]string { + return map_Overhead +} + +var map_RuntimeClass = map[string]string{ + "": "RuntimeClass defines a class of container runtime supported in the cluster. The RuntimeClass is used to determine which container runtime is used to run all containers in a pod. RuntimeClasses are (currently) manually defined by a user or cluster provisioner, and referenced in the PodSpec. The Kubelet is responsible for resolving the RuntimeClassName reference before running the pod. For more details, see https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md", + "metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Specification of the RuntimeClass More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", +} + +func (RuntimeClass) SwaggerDoc() map[string]string { + return map_RuntimeClass +} + +var map_RuntimeClassList = map[string]string{ + "": "RuntimeClassList is a list of RuntimeClass objects.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "Items is a list of schema objects.", +} + +func (RuntimeClassList) SwaggerDoc() map[string]string { + return map_RuntimeClassList +} + +var map_RuntimeClassSpec = map[string]string{ + "": "RuntimeClassSpec is a specification of a RuntimeClass. It contains parameters that are required to describe the RuntimeClass to the Container Runtime Interface (CRI) implementation, as well as any other components that need to understand how the pod will be run. The RuntimeClassSpec is immutable.", + "runtimeHandler": "RuntimeHandler specifies the underlying runtime and configuration that the CRI implementation will use to handle pods of this class. The possible values are specific to the node & CRI configuration. It is assumed that all handlers are available on every node, and handlers of the same name are equivalent on every node. For example, a handler called \"runc\" might specify that the runc OCI runtime (using native Linux containers) will be used to run the containers in a pod. The RuntimeHandler must conform to the DNS Label (RFC 1123) requirements and is immutable.", + "overhead": "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. For more details, see https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.15, and is only honored by servers that enable the PodOverhead feature.", + "scheduling": "Scheduling holds the scheduling constraints to ensure that pods running with this RuntimeClass are scheduled to nodes that support it. If scheduling is nil, this RuntimeClass is assumed to be supported by all nodes.", +} + +func (RuntimeClassSpec) SwaggerDoc() map[string]string { + return map_RuntimeClassSpec +} + +var map_Scheduling = map[string]string{ + "": "Scheduling specifies the scheduling constraints for nodes supporting a RuntimeClass.", + "nodeSelector": "nodeSelector lists labels that must be present on nodes that support this RuntimeClass. Pods using this RuntimeClass can only be scheduled to a node matched by this selector. The RuntimeClass nodeSelector is merged with a pod's existing nodeSelector. Any conflicts will cause the pod to be rejected in admission.", + "tolerations": "tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission, effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.", +} + +func (Scheduling) SwaggerDoc() map[string]string { + return map_Scheduling +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/node/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/node/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 00000000..20f80518 --- /dev/null +++ b/vendor/k8s.io/api/node/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,165 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Overhead) DeepCopyInto(out *Overhead) { + *out = *in + if in.PodFixed != nil { + in, out := &in.PodFixed, &out.PodFixed + *out = make(v1.ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Overhead. +func (in *Overhead) DeepCopy() *Overhead { + if in == nil { + return nil + } + out := new(Overhead) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuntimeClass) DeepCopyInto(out *RuntimeClass) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuntimeClass. +func (in *RuntimeClass) DeepCopy() *RuntimeClass { + if in == nil { + return nil + } + out := new(RuntimeClass) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RuntimeClass) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuntimeClassList) DeepCopyInto(out *RuntimeClassList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]RuntimeClass, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuntimeClassList. +func (in *RuntimeClassList) DeepCopy() *RuntimeClassList { + if in == nil { + return nil + } + out := new(RuntimeClassList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RuntimeClassList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuntimeClassSpec) DeepCopyInto(out *RuntimeClassSpec) { + *out = *in + if in.Overhead != nil { + in, out := &in.Overhead, &out.Overhead + *out = new(Overhead) + (*in).DeepCopyInto(*out) + } + if in.Scheduling != nil { + in, out := &in.Scheduling, &out.Scheduling + *out = new(Scheduling) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuntimeClassSpec. +func (in *RuntimeClassSpec) DeepCopy() *RuntimeClassSpec { + if in == nil { + return nil + } + out := new(RuntimeClassSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Scheduling) DeepCopyInto(out *Scheduling) { + *out = *in + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]v1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Scheduling. +func (in *Scheduling) DeepCopy() *Scheduling { + if in == nil { + return nil + } + out := new(Scheduling) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/node/v1beta1/doc.go b/vendor/k8s.io/api/node/v1beta1/doc.go new file mode 100644 index 00000000..e87583ce --- /dev/null +++ b/vendor/k8s.io/api/node/v1beta1/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:openapi-gen=true + +// +groupName=node.k8s.io + +package v1beta1 // import "k8s.io/api/node/v1beta1" diff --git a/vendor/k8s.io/api/node/v1beta1/generated.pb.go b/vendor/k8s.io/api/node/v1beta1/generated.pb.go new file mode 100644 index 00000000..63992f43 --- /dev/null +++ b/vendor/k8s.io/api/node/v1beta1/generated.pb.go @@ -0,0 +1,1438 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/api/node/v1beta1/generated.proto + +package v1beta1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + v11 "k8s.io/api/core/v1" + k8s_io_apimachinery_pkg_api_resource "k8s.io/apimachinery/pkg/api/resource" + resource "k8s.io/apimachinery/pkg/api/resource" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +func (m *Overhead) Reset() { *m = Overhead{} } +func (*Overhead) ProtoMessage() {} +func (*Overhead) Descriptor() ([]byte, []int) { + return fileDescriptor_f977b0dddc93b4ec, []int{0} +} +func (m *Overhead) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Overhead) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Overhead) XXX_Merge(src proto.Message) { + xxx_messageInfo_Overhead.Merge(m, src) +} +func (m *Overhead) XXX_Size() int { + return m.Size() +} +func (m *Overhead) XXX_DiscardUnknown() { + xxx_messageInfo_Overhead.DiscardUnknown(m) +} + +var xxx_messageInfo_Overhead proto.InternalMessageInfo + +func (m *RuntimeClass) Reset() { *m = RuntimeClass{} } +func (*RuntimeClass) ProtoMessage() {} +func (*RuntimeClass) Descriptor() ([]byte, []int) { + return fileDescriptor_f977b0dddc93b4ec, []int{1} +} +func (m *RuntimeClass) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RuntimeClass) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RuntimeClass) XXX_Merge(src proto.Message) { + xxx_messageInfo_RuntimeClass.Merge(m, src) +} +func (m *RuntimeClass) XXX_Size() int { + return m.Size() +} +func (m *RuntimeClass) XXX_DiscardUnknown() { + xxx_messageInfo_RuntimeClass.DiscardUnknown(m) +} + +var xxx_messageInfo_RuntimeClass proto.InternalMessageInfo + +func (m *RuntimeClassList) Reset() { *m = RuntimeClassList{} } +func (*RuntimeClassList) ProtoMessage() {} +func (*RuntimeClassList) Descriptor() ([]byte, []int) { + return fileDescriptor_f977b0dddc93b4ec, []int{2} +} +func (m *RuntimeClassList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RuntimeClassList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RuntimeClassList) XXX_Merge(src proto.Message) { + xxx_messageInfo_RuntimeClassList.Merge(m, src) +} +func (m *RuntimeClassList) XXX_Size() int { + return m.Size() +} +func (m *RuntimeClassList) XXX_DiscardUnknown() { + xxx_messageInfo_RuntimeClassList.DiscardUnknown(m) +} + +var xxx_messageInfo_RuntimeClassList proto.InternalMessageInfo + +func (m *Scheduling) Reset() { *m = Scheduling{} } +func (*Scheduling) ProtoMessage() {} +func (*Scheduling) Descriptor() ([]byte, []int) { + return fileDescriptor_f977b0dddc93b4ec, []int{3} +} +func (m *Scheduling) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Scheduling) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Scheduling) XXX_Merge(src proto.Message) { + xxx_messageInfo_Scheduling.Merge(m, src) +} +func (m *Scheduling) XXX_Size() int { + return m.Size() +} +func (m *Scheduling) XXX_DiscardUnknown() { + xxx_messageInfo_Scheduling.DiscardUnknown(m) +} + +var xxx_messageInfo_Scheduling proto.InternalMessageInfo + +func init() { + proto.RegisterType((*Overhead)(nil), "k8s.io.api.node.v1beta1.Overhead") + proto.RegisterMapType((k8s_io_api_core_v1.ResourceList)(nil), "k8s.io.api.node.v1beta1.Overhead.PodFixedEntry") + proto.RegisterType((*RuntimeClass)(nil), "k8s.io.api.node.v1beta1.RuntimeClass") + proto.RegisterType((*RuntimeClassList)(nil), "k8s.io.api.node.v1beta1.RuntimeClassList") + proto.RegisterType((*Scheduling)(nil), "k8s.io.api.node.v1beta1.Scheduling") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.node.v1beta1.Scheduling.NodeSelectorEntry") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/node/v1beta1/generated.proto", fileDescriptor_f977b0dddc93b4ec) +} + +var fileDescriptor_f977b0dddc93b4ec = []byte{ + // 666 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x53, 0xbd, 0x6f, 0xd3, 0x40, + 0x14, 0xcf, 0xa5, 0x54, 0x4d, 0x2f, 0x29, 0x14, 0x53, 0x89, 0x28, 0x83, 0x53, 0x82, 0x90, 0xca, + 0xd0, 0x33, 0xad, 0x00, 0x55, 0x2c, 0x20, 0xf3, 0x21, 0x3e, 0x5b, 0x70, 0x61, 0x41, 0x0c, 0x5c, + 0xec, 0x87, 0x63, 0x12, 0xfb, 0xa2, 0xf3, 0x39, 0x22, 0x1b, 0x62, 0x41, 0x62, 0x62, 0xe1, 0xbf, + 0x81, 0xbd, 0x1b, 0x5d, 0x90, 0x3a, 0xb5, 0x34, 0xfc, 0x17, 0x4c, 0xe8, 0xec, 0x73, 0x72, 0x6d, + 0x9a, 0xb6, 0x6c, 0xbe, 0xf3, 0xef, 0xe3, 0xbd, 0xdf, 0xbb, 0x87, 0xef, 0xb4, 0xd7, 0x62, 0x12, + 0x30, 0xab, 0x9d, 0x34, 0x81, 0x47, 0x20, 0x20, 0xb6, 0x7a, 0x10, 0x79, 0x8c, 0x5b, 0xea, 0x07, + 0xed, 0x06, 0x56, 0xc4, 0x3c, 0xb0, 0x7a, 0x2b, 0x4d, 0x10, 0x74, 0xc5, 0xf2, 0x21, 0x02, 0x4e, + 0x05, 0x78, 0xa4, 0xcb, 0x99, 0x60, 0xc6, 0xc5, 0x0c, 0x48, 0x68, 0x37, 0x20, 0x12, 0x48, 0x14, + 0xb0, 0xb6, 0xec, 0x07, 0xa2, 0x95, 0x34, 0x89, 0xcb, 0x42, 0xcb, 0x67, 0x3e, 0xb3, 0x52, 0x7c, + 0x33, 0x79, 0x97, 0x9e, 0xd2, 0x43, 0xfa, 0x95, 0xe9, 0xd4, 0x1a, 0x9a, 0xa1, 0xcb, 0xb8, 0x34, + 0x3c, 0xec, 0x55, 0xbb, 0x3e, 0xc2, 0x84, 0xd4, 0x6d, 0x05, 0x11, 0xf0, 0xbe, 0xd5, 0x6d, 0xfb, + 0x29, 0x89, 0x43, 0xcc, 0x12, 0xee, 0xc2, 0x7f, 0xb1, 0x62, 0x2b, 0x04, 0x41, 0x8f, 0xf2, 0xb2, + 0x26, 0xb1, 0x78, 0x12, 0x89, 0x20, 0x1c, 0xb7, 0xb9, 0x79, 0x12, 0x21, 0x76, 0x5b, 0x10, 0xd2, + 0xc3, 0xbc, 0xc6, 0xcf, 0x22, 0x2e, 0x6d, 0xf4, 0x80, 0xb7, 0x80, 0x7a, 0xc6, 0x2f, 0x84, 0x4b, + 0x5d, 0xe6, 0x3d, 0x08, 0x3e, 0x80, 0x57, 0x45, 0x8b, 0x53, 0x4b, 0xe5, 0x55, 0x8b, 0x4c, 0x48, + 0x98, 0xe4, 0x2c, 0xf2, 0x5c, 0x31, 0xee, 0x47, 0x82, 0xf7, 0xed, 0xcf, 0x68, 0x6b, 0xb7, 0x5e, + 0x18, 0xec, 0xd6, 0x4b, 0xf9, 0xfd, 0xdf, 0xdd, 0x7a, 0x7d, 0x3c, 0x5e, 0xe2, 0xa8, 0xc4, 0x9e, + 0x06, 0xb1, 0xf8, 0xb4, 0x77, 0x2c, 0x64, 0x9d, 0x86, 0xf0, 0x65, 0xaf, 0xbe, 0x7c, 0x9a, 0x01, + 0x90, 0x17, 0x09, 0x8d, 0x44, 0x20, 0xfa, 0xce, 0xb0, 0x95, 0x5a, 0x1b, 0xcf, 0x1d, 0x28, 0xd2, + 0x98, 0xc7, 0x53, 0x6d, 0xe8, 0x57, 0xd1, 0x22, 0x5a, 0x9a, 0x75, 0xe4, 0xa7, 0x71, 0x0f, 0x4f, + 0xf7, 0x68, 0x27, 0x81, 0x6a, 0x71, 0x11, 0x2d, 0x95, 0x57, 0x89, 0xd6, 0xf6, 0xd0, 0x8b, 0x74, + 0xdb, 0x7e, 0x9a, 0xc3, 0xb8, 0x57, 0x46, 0xbe, 0x55, 0x5c, 0x43, 0x8d, 0x1f, 0x45, 0x5c, 0x71, + 0xb2, 0xd0, 0xef, 0x76, 0x68, 0x1c, 0x1b, 0x6f, 0x71, 0x49, 0x8e, 0xd9, 0xa3, 0x82, 0xa6, 0x8e, + 0xe5, 0xd5, 0x6b, 0xc7, 0xa9, 0xc7, 0x44, 0xa2, 0x49, 0x6f, 0x85, 0x6c, 0x34, 0xdf, 0x83, 0x2b, + 0x9e, 0x81, 0xa0, 0xb6, 0xa1, 0x42, 0xc5, 0xa3, 0x3b, 0x67, 0xa8, 0x6a, 0x5c, 0xc5, 0x33, 0x2d, + 0x1a, 0x79, 0x1d, 0xe0, 0x69, 0xf9, 0xb3, 0xf6, 0x39, 0x05, 0x9f, 0x79, 0x98, 0x5d, 0x3b, 0xf9, + 0x7f, 0xe3, 0x09, 0x2e, 0x31, 0x35, 0xb8, 0xea, 0x54, 0x5a, 0xcc, 0xa5, 0x13, 0x27, 0x6c, 0x57, + 0xe4, 0x38, 0xf3, 0x93, 0x33, 0x14, 0x30, 0x36, 0x31, 0x96, 0xcf, 0xca, 0x4b, 0x3a, 0x41, 0xe4, + 0x57, 0xcf, 0xa4, 0x72, 0x97, 0x27, 0xca, 0x6d, 0x0e, 0xa1, 0xf6, 0x59, 0xd9, 0xca, 0xe8, 0xec, + 0x68, 0x32, 0x8d, 0xef, 0x08, 0xcf, 0xeb, 0xf9, 0xc9, 0xf7, 0x61, 0xbc, 0x19, 0xcb, 0x90, 0x9c, + 0x2e, 0x43, 0xc9, 0x4e, 0x13, 0x9c, 0xcf, 0x9f, 0x65, 0x7e, 0xa3, 0xe5, 0xf7, 0x18, 0x4f, 0x07, + 0x02, 0xc2, 0xb8, 0x5a, 0x4c, 0xdf, 0xfc, 0x95, 0x89, 0x2d, 0xe8, 0x75, 0xd9, 0x73, 0x4a, 0x71, + 0xfa, 0x91, 0xe4, 0x3a, 0x99, 0x44, 0xe3, 0x5b, 0x11, 0x6b, 0x9d, 0x19, 0x0c, 0x57, 0xa4, 0xc2, + 0x26, 0x74, 0xc0, 0x15, 0x8c, 0xab, 0xad, 0xba, 0x71, 0x8a, 0x90, 0xc8, 0xba, 0xc6, 0xcb, 0x76, + 0x6b, 0x41, 0x39, 0x56, 0xf4, 0x5f, 0xce, 0x01, 0x03, 0xe3, 0x15, 0x2e, 0x0b, 0xd6, 0x91, 0x3b, + 0x1e, 0xb0, 0x28, 0xef, 0xc8, 0xd4, 0xfd, 0xe4, 0x76, 0xc9, 0x68, 0x5e, 0x0e, 0x61, 0xf6, 0x05, + 0x25, 0x5c, 0x1e, 0xdd, 0xc5, 0x8e, 0xae, 0x53, 0xbb, 0x8d, 0xcf, 0x8f, 0xd5, 0x73, 0xc4, 0x1a, + 0x2d, 0xe8, 0x6b, 0x34, 0xab, 0xad, 0x85, 0xbd, 0xbc, 0xb5, 0x6f, 0x16, 0xb6, 0xf7, 0xcd, 0xc2, + 0xce, 0xbe, 0x59, 0xf8, 0x38, 0x30, 0xd1, 0xd6, 0xc0, 0x44, 0xdb, 0x03, 0x13, 0xed, 0x0c, 0x4c, + 0xf4, 0x7b, 0x60, 0xa2, 0xaf, 0x7f, 0xcc, 0xc2, 0xeb, 0x19, 0x95, 0xc3, 0xbf, 0x00, 0x00, 0x00, + 0xff, 0xff, 0x5b, 0xcf, 0x13, 0x0c, 0x1b, 0x06, 0x00, 0x00, +} + +func (m *Overhead) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Overhead) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Overhead) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.PodFixed) > 0 { + keysForPodFixed := make([]string, 0, len(m.PodFixed)) + for k := range m.PodFixed { + keysForPodFixed = append(keysForPodFixed, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForPodFixed) + for iNdEx := len(keysForPodFixed) - 1; iNdEx >= 0; iNdEx-- { + v := m.PodFixed[k8s_io_api_core_v1.ResourceName(keysForPodFixed[iNdEx])] + baseI := i + { + size, err := ((*resource.Quantity)(&v)).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForPodFixed[iNdEx]) + copy(dAtA[i:], keysForPodFixed[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForPodFixed[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *RuntimeClass) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RuntimeClass) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RuntimeClass) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Scheduling != nil { + { + size, err := m.Scheduling.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.Overhead != nil { + { + size, err := m.Overhead.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + i -= len(m.Handler) + copy(dAtA[i:], m.Handler) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Handler))) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RuntimeClassList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RuntimeClassList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RuntimeClassList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Scheduling) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Scheduling) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Scheduling) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Tolerations) > 0 { + for iNdEx := len(m.Tolerations) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Tolerations[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.NodeSelector) > 0 { + keysForNodeSelector := make([]string, 0, len(m.NodeSelector)) + for k := range m.NodeSelector { + keysForNodeSelector = append(keysForNodeSelector, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector) + for iNdEx := len(keysForNodeSelector) - 1; iNdEx >= 0; iNdEx-- { + v := m.NodeSelector[string(keysForNodeSelector[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForNodeSelector[iNdEx]) + copy(dAtA[i:], keysForNodeSelector[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForNodeSelector[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Overhead) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.PodFixed) > 0 { + for k, v := range m.PodFixed { + _ = k + _ = v + l = ((*resource.Quantity)(&v)).Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *RuntimeClass) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Handler) + n += 1 + l + sovGenerated(uint64(l)) + if m.Overhead != nil { + l = m.Overhead.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Scheduling != nil { + l = m.Scheduling.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *RuntimeClassList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *Scheduling) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.NodeSelector) > 0 { + for k, v := range m.NodeSelector { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Tolerations) > 0 { + for _, e := range m.Tolerations { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Overhead) String() string { + if this == nil { + return "nil" + } + keysForPodFixed := make([]string, 0, len(this.PodFixed)) + for k := range this.PodFixed { + keysForPodFixed = append(keysForPodFixed, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForPodFixed) + mapStringForPodFixed := "k8s_io_api_core_v1.ResourceList{" + for _, k := range keysForPodFixed { + mapStringForPodFixed += fmt.Sprintf("%v: %v,", k, this.PodFixed[k8s_io_api_core_v1.ResourceName(k)]) + } + mapStringForPodFixed += "}" + s := strings.Join([]string{`&Overhead{`, + `PodFixed:` + mapStringForPodFixed + `,`, + `}`, + }, "") + return s +} +func (this *RuntimeClass) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RuntimeClass{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Handler:` + fmt.Sprintf("%v", this.Handler) + `,`, + `Overhead:` + strings.Replace(this.Overhead.String(), "Overhead", "Overhead", 1) + `,`, + `Scheduling:` + strings.Replace(this.Scheduling.String(), "Scheduling", "Scheduling", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RuntimeClassList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]RuntimeClass{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "RuntimeClass", "RuntimeClass", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&RuntimeClassList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *Scheduling) String() string { + if this == nil { + return "nil" + } + repeatedStringForTolerations := "[]Toleration{" + for _, f := range this.Tolerations { + repeatedStringForTolerations += fmt.Sprintf("%v", f) + "," + } + repeatedStringForTolerations += "}" + keysForNodeSelector := make([]string, 0, len(this.NodeSelector)) + for k := range this.NodeSelector { + keysForNodeSelector = append(keysForNodeSelector, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector) + mapStringForNodeSelector := "map[string]string{" + for _, k := range keysForNodeSelector { + mapStringForNodeSelector += fmt.Sprintf("%v: %v,", k, this.NodeSelector[k]) + } + mapStringForNodeSelector += "}" + s := strings.Join([]string{`&Scheduling{`, + `NodeSelector:` + mapStringForNodeSelector + `,`, + `Tolerations:` + repeatedStringForTolerations + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Overhead) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Overhead: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Overhead: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodFixed", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PodFixed == nil { + m.PodFixed = make(k8s_io_api_core_v1.ResourceList) + } + var mapkey k8s_io_api_core_v1.ResourceName + mapvalue := &resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.PodFixed[k8s_io_api_core_v1.ResourceName(mapkey)] = ((k8s_io_apimachinery_pkg_api_resource.Quantity)(*mapvalue)) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RuntimeClass) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RuntimeClass: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RuntimeClass: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Handler", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Handler = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Overhead", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Overhead == nil { + m.Overhead = &Overhead{} + } + if err := m.Overhead.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scheduling", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Scheduling == nil { + m.Scheduling = &Scheduling{} + } + if err := m.Scheduling.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RuntimeClassList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RuntimeClassList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RuntimeClassList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, RuntimeClass{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Scheduling) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Scheduling: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Scheduling: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodeSelector == nil { + m.NodeSelector = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.NodeSelector[mapkey] = mapvalue + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tolerations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tolerations = append(m.Tolerations, v11.Toleration{}) + if err := m.Tolerations[len(m.Tolerations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) diff --git a/vendor/k8s.io/api/node/v1beta1/generated.proto b/vendor/k8s.io/api/node/v1beta1/generated.proto new file mode 100644 index 00000000..49166798 --- /dev/null +++ b/vendor/k8s.io/api/node/v1beta1/generated.proto @@ -0,0 +1,108 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.api.node.v1beta1; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/api/resource/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1beta1"; + +// Overhead structure represents the resource overhead associated with running a pod. +message Overhead { + // PodFixed represents the fixed resource overhead associated with running a pod. + // +optional + map podFixed = 1; +} + +// RuntimeClass defines a class of container runtime supported in the cluster. +// The RuntimeClass is used to determine which container runtime is used to run +// all containers in a pod. RuntimeClasses are (currently) manually defined by a +// user or cluster provisioner, and referenced in the PodSpec. The Kubelet is +// responsible for resolving the RuntimeClassName reference before running the +// pod. For more details, see +// https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md +message RuntimeClass { + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Handler specifies the underlying runtime and configuration that the CRI + // implementation will use to handle pods of this class. The possible values + // are specific to the node & CRI configuration. It is assumed that all + // handlers are available on every node, and handlers of the same name are + // equivalent on every node. + // For example, a handler called "runc" might specify that the runc OCI + // runtime (using native Linux containers) will be used to run the containers + // in a pod. + // The Handler must conform to the DNS Label (RFC 1123) requirements, and is + // immutable. + optional string handler = 2; + + // Overhead represents the resource overhead associated with running a pod for a + // given RuntimeClass. For more details, see + // https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md + // This field is alpha-level as of Kubernetes v1.15, and is only honored by servers that enable the PodOverhead feature. + // +optional + optional Overhead overhead = 3; + + // Scheduling holds the scheduling constraints to ensure that pods running + // with this RuntimeClass are scheduled to nodes that support it. + // If scheduling is nil, this RuntimeClass is assumed to be supported by all + // nodes. + // +optional + optional Scheduling scheduling = 4; +} + +// RuntimeClassList is a list of RuntimeClass objects. +message RuntimeClassList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of schema objects. + repeated RuntimeClass items = 2; +} + +// Scheduling specifies the scheduling constraints for nodes supporting a +// RuntimeClass. +message Scheduling { + // nodeSelector lists labels that must be present on nodes that support this + // RuntimeClass. Pods using this RuntimeClass can only be scheduled to a + // node matched by this selector. The RuntimeClass nodeSelector is merged + // with a pod's existing nodeSelector. Any conflicts will cause the pod to + // be rejected in admission. + // +optional + map nodeSelector = 1; + + // tolerations are appended (excluding duplicates) to pods running with this + // RuntimeClass during admission, effectively unioning the set of nodes + // tolerated by the pod and the RuntimeClass. + // +optional + // +listType=atomic + repeated k8s.io.api.core.v1.Toleration tolerations = 2; +} + diff --git a/vendor/k8s.io/api/node/v1beta1/register.go b/vendor/k8s.io/api/node/v1beta1/register.go new file mode 100644 index 00000000..3c3b61ba --- /dev/null +++ b/vendor/k8s.io/api/node/v1beta1/register.go @@ -0,0 +1,52 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "node.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // SchemeBuilder is the scheme builder with scheme init functions to run for this API package + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // AddToScheme is a common registration function for mapping packaged scoped group & version keys to a scheme + AddToScheme = SchemeBuilder.AddToScheme +) + +// addKnownTypes adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &RuntimeClass{}, + &RuntimeClassList{}, + ) + + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/node/v1beta1/types.go b/vendor/k8s.io/api/node/v1beta1/types.go new file mode 100644 index 00000000..793a48f6 --- /dev/null +++ b/vendor/k8s.io/api/node/v1beta1/types.go @@ -0,0 +1,106 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RuntimeClass defines a class of container runtime supported in the cluster. +// The RuntimeClass is used to determine which container runtime is used to run +// all containers in a pod. RuntimeClasses are (currently) manually defined by a +// user or cluster provisioner, and referenced in the PodSpec. The Kubelet is +// responsible for resolving the RuntimeClassName reference before running the +// pod. For more details, see +// https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md +type RuntimeClass struct { + metav1.TypeMeta `json:",inline"` + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Handler specifies the underlying runtime and configuration that the CRI + // implementation will use to handle pods of this class. The possible values + // are specific to the node & CRI configuration. It is assumed that all + // handlers are available on every node, and handlers of the same name are + // equivalent on every node. + // For example, a handler called "runc" might specify that the runc OCI + // runtime (using native Linux containers) will be used to run the containers + // in a pod. + // The Handler must conform to the DNS Label (RFC 1123) requirements, and is + // immutable. + Handler string `json:"handler" protobuf:"bytes,2,opt,name=handler"` + + // Overhead represents the resource overhead associated with running a pod for a + // given RuntimeClass. For more details, see + // https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md + // This field is alpha-level as of Kubernetes v1.15, and is only honored by servers that enable the PodOverhead feature. + // +optional + Overhead *Overhead `json:"overhead,omitempty" protobuf:"bytes,3,opt,name=overhead"` + + // Scheduling holds the scheduling constraints to ensure that pods running + // with this RuntimeClass are scheduled to nodes that support it. + // If scheduling is nil, this RuntimeClass is assumed to be supported by all + // nodes. + // +optional + Scheduling *Scheduling `json:"scheduling,omitempty" protobuf:"bytes,4,opt,name=scheduling"` +} + +// Overhead structure represents the resource overhead associated with running a pod. +type Overhead struct { + // PodFixed represents the fixed resource overhead associated with running a pod. + // +optional + PodFixed corev1.ResourceList `json:"podFixed,omitempty" protobuf:"bytes,1,opt,name=podFixed,casttype=k8s.io/api/core/v1.ResourceList,castkey=k8s.io/api/core/v1.ResourceName,castvalue=k8s.io/apimachinery/pkg/api/resource.Quantity"` +} + +// Scheduling specifies the scheduling constraints for nodes supporting a +// RuntimeClass. +type Scheduling struct { + // nodeSelector lists labels that must be present on nodes that support this + // RuntimeClass. Pods using this RuntimeClass can only be scheduled to a + // node matched by this selector. The RuntimeClass nodeSelector is merged + // with a pod's existing nodeSelector. Any conflicts will cause the pod to + // be rejected in admission. + // +optional + NodeSelector map[string]string `json:"nodeSelector,omitempty" protobuf:"bytes,1,opt,name=nodeSelector"` + + // tolerations are appended (excluding duplicates) to pods running with this + // RuntimeClass during admission, effectively unioning the set of nodes + // tolerated by the pod and the RuntimeClass. + // +optional + // +listType=atomic + Tolerations []corev1.Toleration `json:"tolerations,omitempty" protobuf:"bytes,2,rep,name=tolerations"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RuntimeClassList is a list of RuntimeClass objects. +type RuntimeClassList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of schema objects. + Items []RuntimeClass `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/api/node/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/node/v1beta1/types_swagger_doc_generated.go new file mode 100644 index 00000000..681f73f2 --- /dev/null +++ b/vendor/k8s.io/api/node/v1beta1/types_swagger_doc_generated.go @@ -0,0 +1,71 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_Overhead = map[string]string{ + "": "Overhead structure represents the resource overhead associated with running a pod.", + "podFixed": "PodFixed represents the fixed resource overhead associated with running a pod.", +} + +func (Overhead) SwaggerDoc() map[string]string { + return map_Overhead +} + +var map_RuntimeClass = map[string]string{ + "": "RuntimeClass defines a class of container runtime supported in the cluster. The RuntimeClass is used to determine which container runtime is used to run all containers in a pod. RuntimeClasses are (currently) manually defined by a user or cluster provisioner, and referenced in the PodSpec. The Kubelet is responsible for resolving the RuntimeClassName reference before running the pod. For more details, see https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md", + "metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "handler": "Handler specifies the underlying runtime and configuration that the CRI implementation will use to handle pods of this class. The possible values are specific to the node & CRI configuration. It is assumed that all handlers are available on every node, and handlers of the same name are equivalent on every node. For example, a handler called \"runc\" might specify that the runc OCI runtime (using native Linux containers) will be used to run the containers in a pod. The Handler must conform to the DNS Label (RFC 1123) requirements, and is immutable.", + "overhead": "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. For more details, see https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.15, and is only honored by servers that enable the PodOverhead feature.", + "scheduling": "Scheduling holds the scheduling constraints to ensure that pods running with this RuntimeClass are scheduled to nodes that support it. If scheduling is nil, this RuntimeClass is assumed to be supported by all nodes.", +} + +func (RuntimeClass) SwaggerDoc() map[string]string { + return map_RuntimeClass +} + +var map_RuntimeClassList = map[string]string{ + "": "RuntimeClassList is a list of RuntimeClass objects.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "Items is a list of schema objects.", +} + +func (RuntimeClassList) SwaggerDoc() map[string]string { + return map_RuntimeClassList +} + +var map_Scheduling = map[string]string{ + "": "Scheduling specifies the scheduling constraints for nodes supporting a RuntimeClass.", + "nodeSelector": "nodeSelector lists labels that must be present on nodes that support this RuntimeClass. Pods using this RuntimeClass can only be scheduled to a node matched by this selector. The RuntimeClass nodeSelector is merged with a pod's existing nodeSelector. Any conflicts will cause the pod to be rejected in admission.", + "tolerations": "tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission, effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.", +} + +func (Scheduling) SwaggerDoc() map[string]string { + return map_Scheduling +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/node/v1beta1/zz_generated.deepcopy.go similarity index 59% rename from vendor/k8s.io/api/admissionregistration/v1alpha1/zz_generated.deepcopy.go rename to vendor/k8s.io/api/node/v1beta1/zz_generated.deepcopy.go index 9f636b48..c3989528 100644 --- a/vendor/k8s.io/api/admissionregistration/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/node/v1beta1/zz_generated.deepcopy.go @@ -18,62 +18,66 @@ limitations under the License. // Code generated by deepcopy-gen. DO NOT EDIT. -package v1alpha1 +package v1beta1 import ( + v1 "k8s.io/api/core/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Initializer) DeepCopyInto(out *Initializer) { +func (in *Overhead) DeepCopyInto(out *Overhead) { *out = *in - if in.Rules != nil { - in, out := &in.Rules, &out.Rules - *out = make([]Rule, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) + if in.PodFixed != nil { + in, out := &in.PodFixed, &out.PodFixed + *out = make(v1.ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() } } return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Initializer. -func (in *Initializer) DeepCopy() *Initializer { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Overhead. +func (in *Overhead) DeepCopy() *Overhead { if in == nil { return nil } - out := new(Initializer) + out := new(Overhead) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *InitializerConfiguration) DeepCopyInto(out *InitializerConfiguration) { +func (in *RuntimeClass) DeepCopyInto(out *RuntimeClass) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.Initializers != nil { - in, out := &in.Initializers, &out.Initializers - *out = make([]Initializer, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } + if in.Overhead != nil { + in, out := &in.Overhead, &out.Overhead + *out = new(Overhead) + (*in).DeepCopyInto(*out) + } + if in.Scheduling != nil { + in, out := &in.Scheduling, &out.Scheduling + *out = new(Scheduling) + (*in).DeepCopyInto(*out) } return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InitializerConfiguration. -func (in *InitializerConfiguration) DeepCopy() *InitializerConfiguration { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuntimeClass. +func (in *RuntimeClass) DeepCopy() *RuntimeClass { if in == nil { return nil } - out := new(InitializerConfiguration) + out := new(RuntimeClass) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *InitializerConfiguration) DeepCopyObject() runtime.Object { +func (in *RuntimeClass) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -81,13 +85,13 @@ func (in *InitializerConfiguration) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *InitializerConfigurationList) DeepCopyInto(out *InitializerConfigurationList) { +func (in *RuntimeClassList) DeepCopyInto(out *RuntimeClassList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]InitializerConfiguration, len(*in)) + *out = make([]RuntimeClass, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -95,18 +99,18 @@ func (in *InitializerConfigurationList) DeepCopyInto(out *InitializerConfigurati return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InitializerConfigurationList. -func (in *InitializerConfigurationList) DeepCopy() *InitializerConfigurationList { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuntimeClassList. +func (in *RuntimeClassList) DeepCopy() *RuntimeClassList { if in == nil { return nil } - out := new(InitializerConfigurationList) + out := new(RuntimeClassList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *InitializerConfigurationList) DeepCopyObject() runtime.Object { +func (in *RuntimeClassList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -114,32 +118,31 @@ func (in *InitializerConfigurationList) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Rule) DeepCopyInto(out *Rule) { +func (in *Scheduling) DeepCopyInto(out *Scheduling) { *out = *in - if in.APIGroups != nil { - in, out := &in.APIGroups, &out.APIGroups - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.APIVersions != nil { - in, out := &in.APIVersions, &out.APIVersions - *out = make([]string, len(*in)) - copy(*out, *in) + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } } - if in.Resources != nil { - in, out := &in.Resources, &out.Resources - *out = make([]string, len(*in)) - copy(*out, *in) + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]v1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } } return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Rule. -func (in *Rule) DeepCopy() *Rule { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Scheduling. +func (in *Scheduling) DeepCopy() *Scheduling { if in == nil { return nil } - out := new(Rule) + out := new(Scheduling) in.DeepCopyInto(out) return out } diff --git a/vendor/k8s.io/api/policy/v1beta1/doc.go b/vendor/k8s.io/api/policy/v1beta1/doc.go index 9c456f92..05d8332f 100644 --- a/vendor/k8s.io/api/policy/v1beta1/doc.go +++ b/vendor/k8s.io/api/policy/v1beta1/doc.go @@ -15,9 +15,10 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:openapi-gen=true // Package policy is for any kind of policy object. Suitable examples, even if // they aren't all here, are PodDisruptionBudget, PodSecurityPolicy, // NetworkPolicy, etc. -// +k8s:openapi-gen=true package v1beta1 // import "k8s.io/api/policy/v1beta1" diff --git a/vendor/k8s.io/api/policy/v1beta1/generated.pb.go b/vendor/k8s.io/api/policy/v1beta1/generated.pb.go index d7d62dd3..5b57f699 100644 --- a/vendor/k8s.io/api/policy/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/policy/v1beta1/generated.pb.go @@ -14,51 +14,29 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/policy/v1beta1/generated.proto -// DO NOT EDIT! -/* - Package v1beta1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/policy/v1beta1/generated.proto - - It has these top-level messages: - AllowedFlexVolume - AllowedHostPath - Eviction - FSGroupStrategyOptions - HostPortRange - IDRange - PodDisruptionBudget - PodDisruptionBudgetList - PodDisruptionBudgetSpec - PodDisruptionBudgetStatus - PodSecurityPolicy - PodSecurityPolicyList - PodSecurityPolicySpec - RunAsUserStrategyOptions - SELinuxStrategyOptions - SupplementalGroupsStrategyOptions -*/ package v1beta1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import k8s_io_api_core_v1 "k8s.io/api/core/v1" -import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + io "io" -import k8s_io_apimachinery_pkg_util_intstr "k8s.io/apimachinery/pkg/util/intstr" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + v11 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" -import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" -import strings "strings" -import reflect "reflect" - -import io "io" + intstr "k8s.io/apimachinery/pkg/util/intstr" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -71,77 +49,540 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *AllowedFlexVolume) Reset() { *m = AllowedFlexVolume{} } -func (*AllowedFlexVolume) ProtoMessage() {} -func (*AllowedFlexVolume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *AllowedCSIDriver) Reset() { *m = AllowedCSIDriver{} } +func (*AllowedCSIDriver) ProtoMessage() {} +func (*AllowedCSIDriver) Descriptor() ([]byte, []int) { + return fileDescriptor_014060e454a820dc, []int{0} +} +func (m *AllowedCSIDriver) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AllowedCSIDriver) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AllowedCSIDriver) XXX_Merge(src proto.Message) { + xxx_messageInfo_AllowedCSIDriver.Merge(m, src) +} +func (m *AllowedCSIDriver) XXX_Size() int { + return m.Size() +} +func (m *AllowedCSIDriver) XXX_DiscardUnknown() { + xxx_messageInfo_AllowedCSIDriver.DiscardUnknown(m) +} + +var xxx_messageInfo_AllowedCSIDriver proto.InternalMessageInfo + +func (m *AllowedFlexVolume) Reset() { *m = AllowedFlexVolume{} } +func (*AllowedFlexVolume) ProtoMessage() {} +func (*AllowedFlexVolume) Descriptor() ([]byte, []int) { + return fileDescriptor_014060e454a820dc, []int{1} +} +func (m *AllowedFlexVolume) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AllowedFlexVolume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AllowedFlexVolume) XXX_Merge(src proto.Message) { + xxx_messageInfo_AllowedFlexVolume.Merge(m, src) +} +func (m *AllowedFlexVolume) XXX_Size() int { + return m.Size() +} +func (m *AllowedFlexVolume) XXX_DiscardUnknown() { + xxx_messageInfo_AllowedFlexVolume.DiscardUnknown(m) +} + +var xxx_messageInfo_AllowedFlexVolume proto.InternalMessageInfo + +func (m *AllowedHostPath) Reset() { *m = AllowedHostPath{} } +func (*AllowedHostPath) ProtoMessage() {} +func (*AllowedHostPath) Descriptor() ([]byte, []int) { + return fileDescriptor_014060e454a820dc, []int{2} +} +func (m *AllowedHostPath) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AllowedHostPath) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AllowedHostPath) XXX_Merge(src proto.Message) { + xxx_messageInfo_AllowedHostPath.Merge(m, src) +} +func (m *AllowedHostPath) XXX_Size() int { + return m.Size() +} +func (m *AllowedHostPath) XXX_DiscardUnknown() { + xxx_messageInfo_AllowedHostPath.DiscardUnknown(m) +} + +var xxx_messageInfo_AllowedHostPath proto.InternalMessageInfo + +func (m *Eviction) Reset() { *m = Eviction{} } +func (*Eviction) ProtoMessage() {} +func (*Eviction) Descriptor() ([]byte, []int) { + return fileDescriptor_014060e454a820dc, []int{3} +} +func (m *Eviction) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Eviction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Eviction) XXX_Merge(src proto.Message) { + xxx_messageInfo_Eviction.Merge(m, src) +} +func (m *Eviction) XXX_Size() int { + return m.Size() +} +func (m *Eviction) XXX_DiscardUnknown() { + xxx_messageInfo_Eviction.DiscardUnknown(m) +} + +var xxx_messageInfo_Eviction proto.InternalMessageInfo + +func (m *FSGroupStrategyOptions) Reset() { *m = FSGroupStrategyOptions{} } +func (*FSGroupStrategyOptions) ProtoMessage() {} +func (*FSGroupStrategyOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_014060e454a820dc, []int{4} +} +func (m *FSGroupStrategyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FSGroupStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FSGroupStrategyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_FSGroupStrategyOptions.Merge(m, src) +} +func (m *FSGroupStrategyOptions) XXX_Size() int { + return m.Size() +} +func (m *FSGroupStrategyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_FSGroupStrategyOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_FSGroupStrategyOptions proto.InternalMessageInfo + +func (m *HostPortRange) Reset() { *m = HostPortRange{} } +func (*HostPortRange) ProtoMessage() {} +func (*HostPortRange) Descriptor() ([]byte, []int) { + return fileDescriptor_014060e454a820dc, []int{5} +} +func (m *HostPortRange) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HostPortRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HostPortRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_HostPortRange.Merge(m, src) +} +func (m *HostPortRange) XXX_Size() int { + return m.Size() +} +func (m *HostPortRange) XXX_DiscardUnknown() { + xxx_messageInfo_HostPortRange.DiscardUnknown(m) +} + +var xxx_messageInfo_HostPortRange proto.InternalMessageInfo -func (m *AllowedHostPath) Reset() { *m = AllowedHostPath{} } -func (*AllowedHostPath) ProtoMessage() {} -func (*AllowedHostPath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +func (m *IDRange) Reset() { *m = IDRange{} } +func (*IDRange) ProtoMessage() {} +func (*IDRange) Descriptor() ([]byte, []int) { + return fileDescriptor_014060e454a820dc, []int{6} +} +func (m *IDRange) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IDRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IDRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_IDRange.Merge(m, src) +} +func (m *IDRange) XXX_Size() int { + return m.Size() +} +func (m *IDRange) XXX_DiscardUnknown() { + xxx_messageInfo_IDRange.DiscardUnknown(m) +} -func (m *Eviction) Reset() { *m = Eviction{} } -func (*Eviction) ProtoMessage() {} -func (*Eviction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +var xxx_messageInfo_IDRange proto.InternalMessageInfo -func (m *FSGroupStrategyOptions) Reset() { *m = FSGroupStrategyOptions{} } -func (*FSGroupStrategyOptions) ProtoMessage() {} -func (*FSGroupStrategyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +func (m *PodDisruptionBudget) Reset() { *m = PodDisruptionBudget{} } +func (*PodDisruptionBudget) ProtoMessage() {} +func (*PodDisruptionBudget) Descriptor() ([]byte, []int) { + return fileDescriptor_014060e454a820dc, []int{7} +} +func (m *PodDisruptionBudget) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodDisruptionBudget) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodDisruptionBudget) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodDisruptionBudget.Merge(m, src) +} +func (m *PodDisruptionBudget) XXX_Size() int { + return m.Size() +} +func (m *PodDisruptionBudget) XXX_DiscardUnknown() { + xxx_messageInfo_PodDisruptionBudget.DiscardUnknown(m) +} -func (m *HostPortRange) Reset() { *m = HostPortRange{} } -func (*HostPortRange) ProtoMessage() {} -func (*HostPortRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +var xxx_messageInfo_PodDisruptionBudget proto.InternalMessageInfo -func (m *IDRange) Reset() { *m = IDRange{} } -func (*IDRange) ProtoMessage() {} -func (*IDRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +func (m *PodDisruptionBudgetList) Reset() { *m = PodDisruptionBudgetList{} } +func (*PodDisruptionBudgetList) ProtoMessage() {} +func (*PodDisruptionBudgetList) Descriptor() ([]byte, []int) { + return fileDescriptor_014060e454a820dc, []int{8} +} +func (m *PodDisruptionBudgetList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodDisruptionBudgetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodDisruptionBudgetList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodDisruptionBudgetList.Merge(m, src) +} +func (m *PodDisruptionBudgetList) XXX_Size() int { + return m.Size() +} +func (m *PodDisruptionBudgetList) XXX_DiscardUnknown() { + xxx_messageInfo_PodDisruptionBudgetList.DiscardUnknown(m) +} -func (m *PodDisruptionBudget) Reset() { *m = PodDisruptionBudget{} } -func (*PodDisruptionBudget) ProtoMessage() {} -func (*PodDisruptionBudget) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +var xxx_messageInfo_PodDisruptionBudgetList proto.InternalMessageInfo -func (m *PodDisruptionBudgetList) Reset() { *m = PodDisruptionBudgetList{} } -func (*PodDisruptionBudgetList) ProtoMessage() {} -func (*PodDisruptionBudgetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +func (m *PodDisruptionBudgetSpec) Reset() { *m = PodDisruptionBudgetSpec{} } +func (*PodDisruptionBudgetSpec) ProtoMessage() {} +func (*PodDisruptionBudgetSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_014060e454a820dc, []int{9} +} +func (m *PodDisruptionBudgetSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodDisruptionBudgetSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodDisruptionBudgetSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodDisruptionBudgetSpec.Merge(m, src) +} +func (m *PodDisruptionBudgetSpec) XXX_Size() int { + return m.Size() +} +func (m *PodDisruptionBudgetSpec) XXX_DiscardUnknown() { + xxx_messageInfo_PodDisruptionBudgetSpec.DiscardUnknown(m) +} -func (m *PodDisruptionBudgetSpec) Reset() { *m = PodDisruptionBudgetSpec{} } -func (*PodDisruptionBudgetSpec) ProtoMessage() {} -func (*PodDisruptionBudgetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +var xxx_messageInfo_PodDisruptionBudgetSpec proto.InternalMessageInfo func (m *PodDisruptionBudgetStatus) Reset() { *m = PodDisruptionBudgetStatus{} } func (*PodDisruptionBudgetStatus) ProtoMessage() {} func (*PodDisruptionBudgetStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{9} + return fileDescriptor_014060e454a820dc, []int{10} +} +func (m *PodDisruptionBudgetStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodDisruptionBudgetStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodDisruptionBudgetStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodDisruptionBudgetStatus.Merge(m, src) +} +func (m *PodDisruptionBudgetStatus) XXX_Size() int { + return m.Size() +} +func (m *PodDisruptionBudgetStatus) XXX_DiscardUnknown() { + xxx_messageInfo_PodDisruptionBudgetStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_PodDisruptionBudgetStatus proto.InternalMessageInfo + +func (m *PodSecurityPolicy) Reset() { *m = PodSecurityPolicy{} } +func (*PodSecurityPolicy) ProtoMessage() {} +func (*PodSecurityPolicy) Descriptor() ([]byte, []int) { + return fileDescriptor_014060e454a820dc, []int{11} +} +func (m *PodSecurityPolicy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodSecurityPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodSecurityPolicy) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSecurityPolicy.Merge(m, src) +} +func (m *PodSecurityPolicy) XXX_Size() int { + return m.Size() +} +func (m *PodSecurityPolicy) XXX_DiscardUnknown() { + xxx_messageInfo_PodSecurityPolicy.DiscardUnknown(m) +} + +var xxx_messageInfo_PodSecurityPolicy proto.InternalMessageInfo + +func (m *PodSecurityPolicyList) Reset() { *m = PodSecurityPolicyList{} } +func (*PodSecurityPolicyList) ProtoMessage() {} +func (*PodSecurityPolicyList) Descriptor() ([]byte, []int) { + return fileDescriptor_014060e454a820dc, []int{12} +} +func (m *PodSecurityPolicyList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodSecurityPolicyList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodSecurityPolicyList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSecurityPolicyList.Merge(m, src) +} +func (m *PodSecurityPolicyList) XXX_Size() int { + return m.Size() +} +func (m *PodSecurityPolicyList) XXX_DiscardUnknown() { + xxx_messageInfo_PodSecurityPolicyList.DiscardUnknown(m) +} + +var xxx_messageInfo_PodSecurityPolicyList proto.InternalMessageInfo + +func (m *PodSecurityPolicySpec) Reset() { *m = PodSecurityPolicySpec{} } +func (*PodSecurityPolicySpec) ProtoMessage() {} +func (*PodSecurityPolicySpec) Descriptor() ([]byte, []int) { + return fileDescriptor_014060e454a820dc, []int{13} +} +func (m *PodSecurityPolicySpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodSecurityPolicySpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodSecurityPolicySpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSecurityPolicySpec.Merge(m, src) +} +func (m *PodSecurityPolicySpec) XXX_Size() int { + return m.Size() +} +func (m *PodSecurityPolicySpec) XXX_DiscardUnknown() { + xxx_messageInfo_PodSecurityPolicySpec.DiscardUnknown(m) } -func (m *PodSecurityPolicy) Reset() { *m = PodSecurityPolicy{} } -func (*PodSecurityPolicy) ProtoMessage() {} -func (*PodSecurityPolicy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } +var xxx_messageInfo_PodSecurityPolicySpec proto.InternalMessageInfo -func (m *PodSecurityPolicyList) Reset() { *m = PodSecurityPolicyList{} } -func (*PodSecurityPolicyList) ProtoMessage() {} -func (*PodSecurityPolicyList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } +func (m *RunAsGroupStrategyOptions) Reset() { *m = RunAsGroupStrategyOptions{} } +func (*RunAsGroupStrategyOptions) ProtoMessage() {} +func (*RunAsGroupStrategyOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_014060e454a820dc, []int{14} +} +func (m *RunAsGroupStrategyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RunAsGroupStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RunAsGroupStrategyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_RunAsGroupStrategyOptions.Merge(m, src) +} +func (m *RunAsGroupStrategyOptions) XXX_Size() int { + return m.Size() +} +func (m *RunAsGroupStrategyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_RunAsGroupStrategyOptions.DiscardUnknown(m) +} -func (m *PodSecurityPolicySpec) Reset() { *m = PodSecurityPolicySpec{} } -func (*PodSecurityPolicySpec) ProtoMessage() {} -func (*PodSecurityPolicySpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } +var xxx_messageInfo_RunAsGroupStrategyOptions proto.InternalMessageInfo func (m *RunAsUserStrategyOptions) Reset() { *m = RunAsUserStrategyOptions{} } func (*RunAsUserStrategyOptions) ProtoMessage() {} func (*RunAsUserStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{13} + return fileDescriptor_014060e454a820dc, []int{15} +} +func (m *RunAsUserStrategyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RunAsUserStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RunAsUserStrategyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_RunAsUserStrategyOptions.Merge(m, src) +} +func (m *RunAsUserStrategyOptions) XXX_Size() int { + return m.Size() +} +func (m *RunAsUserStrategyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_RunAsUserStrategyOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_RunAsUserStrategyOptions proto.InternalMessageInfo + +func (m *RuntimeClassStrategyOptions) Reset() { *m = RuntimeClassStrategyOptions{} } +func (*RuntimeClassStrategyOptions) ProtoMessage() {} +func (*RuntimeClassStrategyOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_014060e454a820dc, []int{16} +} +func (m *RuntimeClassStrategyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RuntimeClassStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RuntimeClassStrategyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_RuntimeClassStrategyOptions.Merge(m, src) +} +func (m *RuntimeClassStrategyOptions) XXX_Size() int { + return m.Size() +} +func (m *RuntimeClassStrategyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_RuntimeClassStrategyOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_RuntimeClassStrategyOptions proto.InternalMessageInfo + +func (m *SELinuxStrategyOptions) Reset() { *m = SELinuxStrategyOptions{} } +func (*SELinuxStrategyOptions) ProtoMessage() {} +func (*SELinuxStrategyOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_014060e454a820dc, []int{17} +} +func (m *SELinuxStrategyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SELinuxStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SELinuxStrategyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_SELinuxStrategyOptions.Merge(m, src) +} +func (m *SELinuxStrategyOptions) XXX_Size() int { + return m.Size() +} +func (m *SELinuxStrategyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_SELinuxStrategyOptions.DiscardUnknown(m) } -func (m *SELinuxStrategyOptions) Reset() { *m = SELinuxStrategyOptions{} } -func (*SELinuxStrategyOptions) ProtoMessage() {} -func (*SELinuxStrategyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } +var xxx_messageInfo_SELinuxStrategyOptions proto.InternalMessageInfo func (m *SupplementalGroupsStrategyOptions) Reset() { *m = SupplementalGroupsStrategyOptions{} } func (*SupplementalGroupsStrategyOptions) ProtoMessage() {} func (*SupplementalGroupsStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{15} + return fileDescriptor_014060e454a820dc, []int{18} +} +func (m *SupplementalGroupsStrategyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SupplementalGroupsStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SupplementalGroupsStrategyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_SupplementalGroupsStrategyOptions.Merge(m, src) +} +func (m *SupplementalGroupsStrategyOptions) XXX_Size() int { + return m.Size() } +func (m *SupplementalGroupsStrategyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_SupplementalGroupsStrategyOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_SupplementalGroupsStrategyOptions proto.InternalMessageInfo func init() { + proto.RegisterType((*AllowedCSIDriver)(nil), "k8s.io.api.policy.v1beta1.AllowedCSIDriver") proto.RegisterType((*AllowedFlexVolume)(nil), "k8s.io.api.policy.v1beta1.AllowedFlexVolume") proto.RegisterType((*AllowedHostPath)(nil), "k8s.io.api.policy.v1beta1.AllowedHostPath") proto.RegisterType((*Eviction)(nil), "k8s.io.api.policy.v1beta1.Eviction") @@ -152,17 +593,175 @@ func init() { proto.RegisterType((*PodDisruptionBudgetList)(nil), "k8s.io.api.policy.v1beta1.PodDisruptionBudgetList") proto.RegisterType((*PodDisruptionBudgetSpec)(nil), "k8s.io.api.policy.v1beta1.PodDisruptionBudgetSpec") proto.RegisterType((*PodDisruptionBudgetStatus)(nil), "k8s.io.api.policy.v1beta1.PodDisruptionBudgetStatus") + proto.RegisterMapType((map[string]v1.Time)(nil), "k8s.io.api.policy.v1beta1.PodDisruptionBudgetStatus.DisruptedPodsEntry") proto.RegisterType((*PodSecurityPolicy)(nil), "k8s.io.api.policy.v1beta1.PodSecurityPolicy") proto.RegisterType((*PodSecurityPolicyList)(nil), "k8s.io.api.policy.v1beta1.PodSecurityPolicyList") proto.RegisterType((*PodSecurityPolicySpec)(nil), "k8s.io.api.policy.v1beta1.PodSecurityPolicySpec") + proto.RegisterType((*RunAsGroupStrategyOptions)(nil), "k8s.io.api.policy.v1beta1.RunAsGroupStrategyOptions") proto.RegisterType((*RunAsUserStrategyOptions)(nil), "k8s.io.api.policy.v1beta1.RunAsUserStrategyOptions") + proto.RegisterType((*RuntimeClassStrategyOptions)(nil), "k8s.io.api.policy.v1beta1.RuntimeClassStrategyOptions") proto.RegisterType((*SELinuxStrategyOptions)(nil), "k8s.io.api.policy.v1beta1.SELinuxStrategyOptions") proto.RegisterType((*SupplementalGroupsStrategyOptions)(nil), "k8s.io.api.policy.v1beta1.SupplementalGroupsStrategyOptions") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/policy/v1beta1/generated.proto", fileDescriptor_014060e454a820dc) +} + +var fileDescriptor_014060e454a820dc = []byte{ + // 1883 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0xdd, 0x6e, 0x1b, 0xc7, + 0x15, 0xd6, 0x9a, 0xfa, 0xa1, 0x46, 0x3f, 0x16, 0x47, 0x3f, 0x5e, 0x2b, 0x35, 0xd7, 0xd9, 0x00, + 0x85, 0x9b, 0x26, 0xcb, 0x58, 0x76, 0x5c, 0xa3, 0x69, 0x8b, 0x68, 0x45, 0xc9, 0x56, 0x60, 0x59, + 0xec, 0xd0, 0x0e, 0xda, 0xc2, 0x2d, 0x3a, 0xe4, 0x8e, 0xa8, 0x8d, 0x96, 0xbb, 0xdb, 0x99, 0x59, + 0x46, 0xbc, 0xeb, 0x45, 0x2f, 0x7a, 0xd9, 0x17, 0x08, 0xfa, 0x00, 0x45, 0xaf, 0xfa, 0x12, 0x0e, + 0x50, 0x04, 0xb9, 0x0c, 0x7a, 0x41, 0xd4, 0x2c, 0xfa, 0x12, 0xbe, 0x0a, 0x76, 0x38, 0xbb, 0xe4, + 0xfe, 0x91, 0x76, 0x00, 0xfb, 0x8e, 0x3b, 0xe7, 0xfb, 0xbe, 0x33, 0x73, 0xe6, 0xcc, 0x99, 0xc3, + 0x01, 0xe6, 0xc5, 0x7d, 0x66, 0xd8, 0x5e, 0xed, 0x22, 0x68, 0x11, 0xea, 0x12, 0x4e, 0x58, 0xad, + 0x47, 0x5c, 0xcb, 0xa3, 0x35, 0x69, 0xc0, 0xbe, 0x5d, 0xf3, 0x3d, 0xc7, 0x6e, 0xf7, 0x6b, 0xbd, + 0xdb, 0x2d, 0xc2, 0xf1, 0xed, 0x5a, 0x87, 0xb8, 0x84, 0x62, 0x4e, 0x2c, 0xc3, 0xa7, 0x1e, 0xf7, + 0xe0, 0xf5, 0x11, 0xd4, 0xc0, 0xbe, 0x6d, 0x8c, 0xa0, 0x86, 0x84, 0xee, 0x7e, 0xd8, 0xb1, 0xf9, + 0x79, 0xd0, 0x32, 0xda, 0x5e, 0xb7, 0xd6, 0xf1, 0x3a, 0x5e, 0x4d, 0x30, 0x5a, 0xc1, 0x99, 0xf8, + 0x12, 0x1f, 0xe2, 0xd7, 0x48, 0x69, 0x57, 0x9f, 0x70, 0xda, 0xf6, 0x28, 0xa9, 0xf5, 0x32, 0xde, + 0x76, 0xef, 0x8e, 0x31, 0x5d, 0xdc, 0x3e, 0xb7, 0x5d, 0x42, 0xfb, 0x35, 0xff, 0xa2, 0x13, 0x0e, + 0xb0, 0x5a, 0x97, 0x70, 0x9c, 0xc7, 0xaa, 0x15, 0xb1, 0x68, 0xe0, 0x72, 0xbb, 0x4b, 0x32, 0x84, + 0x7b, 0xb3, 0x08, 0xac, 0x7d, 0x4e, 0xba, 0x38, 0xc3, 0xbb, 0x53, 0xc4, 0x0b, 0xb8, 0xed, 0xd4, + 0x6c, 0x97, 0x33, 0x4e, 0xd3, 0x24, 0xfd, 0x2e, 0xd8, 0xd8, 0x77, 0x1c, 0xef, 0x4b, 0x62, 0x1d, + 0x34, 0x8f, 0xeb, 0xd4, 0xee, 0x11, 0x0a, 0x6f, 0x82, 0x79, 0x17, 0x77, 0x89, 0xaa, 0xdc, 0x54, + 0x6e, 0x2d, 0x9b, 0xab, 0xcf, 0x07, 0xda, 0xdc, 0x70, 0xa0, 0xcd, 0x3f, 0xc6, 0x5d, 0x82, 0x84, + 0x45, 0xff, 0x04, 0x54, 0x24, 0xeb, 0xc8, 0x21, 0x97, 0x9f, 0x7b, 0x4e, 0xd0, 0x25, 0xf0, 0xc7, + 0x60, 0xd1, 0x12, 0x02, 0x92, 0xb8, 0x2e, 0x89, 0x8b, 0x23, 0x59, 0x24, 0xad, 0x3a, 0x03, 0x57, + 0x25, 0xf9, 0xa1, 0xc7, 0x78, 0x03, 0xf3, 0x73, 0xb8, 0x07, 0x80, 0x8f, 0xf9, 0x79, 0x83, 0x92, + 0x33, 0xfb, 0x52, 0xd2, 0xa1, 0xa4, 0x83, 0x46, 0x6c, 0x41, 0x13, 0x28, 0xf8, 0x01, 0x28, 0x53, + 0x82, 0xad, 0x53, 0xd7, 0xe9, 0xab, 0x57, 0x6e, 0x2a, 0xb7, 0xca, 0xe6, 0x86, 0x64, 0x94, 0x91, + 0x1c, 0x47, 0x31, 0x42, 0xff, 0x8f, 0x02, 0xca, 0x87, 0x3d, 0xbb, 0xcd, 0x6d, 0xcf, 0x85, 0x7f, + 0x04, 0xe5, 0x70, 0xb7, 0x2c, 0xcc, 0xb1, 0x70, 0xb6, 0xb2, 0xf7, 0x91, 0x31, 0xce, 0xa4, 0x38, + 0x78, 0x86, 0x7f, 0xd1, 0x09, 0x07, 0x98, 0x11, 0xa2, 0x8d, 0xde, 0x6d, 0xe3, 0xb4, 0xf5, 0x05, + 0x69, 0xf3, 0x13, 0xc2, 0xf1, 0x78, 0x7a, 0xe3, 0x31, 0x14, 0xab, 0x42, 0x07, 0xac, 0x59, 0xc4, + 0x21, 0x9c, 0x9c, 0xfa, 0xa1, 0x47, 0x26, 0x66, 0xb8, 0xb2, 0x77, 0xe7, 0xd5, 0xdc, 0xd4, 0x27, + 0xa9, 0x66, 0x65, 0x38, 0xd0, 0xd6, 0x12, 0x43, 0x28, 0x29, 0xae, 0x7f, 0xa5, 0x80, 0x9d, 0xa3, + 0xe6, 0x03, 0xea, 0x05, 0x7e, 0x93, 0x87, 0xbb, 0xdb, 0xe9, 0x4b, 0x13, 0xfc, 0x19, 0x98, 0xa7, + 0x81, 0x13, 0xed, 0xe5, 0x7b, 0xd1, 0x5e, 0xa2, 0xc0, 0x21, 0x2f, 0x07, 0xda, 0x66, 0x8a, 0xf5, + 0xa4, 0xef, 0x13, 0x24, 0x08, 0xf0, 0x33, 0xb0, 0x48, 0xb1, 0xdb, 0x21, 0xe1, 0xd4, 0x4b, 0xb7, + 0x56, 0xf6, 0x74, 0xa3, 0xf0, 0xac, 0x19, 0xc7, 0x75, 0x14, 0x42, 0xc7, 0x3b, 0x2e, 0x3e, 0x19, + 0x92, 0x0a, 0xfa, 0x09, 0x58, 0x13, 0x5b, 0xed, 0x51, 0x2e, 0x2c, 0xf0, 0x06, 0x28, 0x75, 0x6d, + 0x57, 0x4c, 0x6a, 0xc1, 0x5c, 0x91, 0xac, 0xd2, 0x89, 0xed, 0xa2, 0x70, 0x5c, 0x98, 0xf1, 0xa5, + 0x88, 0xd9, 0xa4, 0x19, 0x5f, 0xa2, 0x70, 0x5c, 0x7f, 0x00, 0x96, 0xa4, 0xc7, 0x49, 0xa1, 0xd2, + 0x74, 0xa1, 0x52, 0x8e, 0xd0, 0x3f, 0xae, 0x80, 0xcd, 0x86, 0x67, 0xd5, 0x6d, 0x46, 0x03, 0x11, + 0x2f, 0x33, 0xb0, 0x3a, 0x84, 0xbf, 0x85, 0xfc, 0x78, 0x02, 0xe6, 0x99, 0x4f, 0xda, 0x32, 0x2d, + 0xf6, 0xa6, 0xc4, 0x36, 0x67, 0x7e, 0x4d, 0x9f, 0xb4, 0xc7, 0xc7, 0x32, 0xfc, 0x42, 0x42, 0x0d, + 0x3e, 0x03, 0x8b, 0x8c, 0x63, 0x1e, 0x30, 0xb5, 0x24, 0x74, 0xef, 0xbe, 0xa6, 0xae, 0xe0, 0x8e, + 0x77, 0x71, 0xf4, 0x8d, 0xa4, 0xa6, 0xfe, 0x6f, 0x05, 0x5c, 0xcb, 0x61, 0x3d, 0xb2, 0x19, 0x87, + 0xcf, 0x32, 0x11, 0x33, 0x5e, 0x2d, 0x62, 0x21, 0x5b, 0xc4, 0x2b, 0x3e, 0xbc, 0xd1, 0xc8, 0x44, + 0xb4, 0x9a, 0x60, 0xc1, 0xe6, 0xa4, 0x1b, 0xa5, 0xa2, 0xf1, 0x7a, 0xcb, 0x32, 0xd7, 0xa4, 0xf4, + 0xc2, 0x71, 0x28, 0x82, 0x46, 0x5a, 0xfa, 0x37, 0x57, 0x72, 0x97, 0x13, 0x86, 0x13, 0x9e, 0x81, + 0xd5, 0xae, 0xed, 0xee, 0xf7, 0xb0, 0xed, 0xe0, 0x96, 0x3c, 0x3d, 0xd3, 0x92, 0x20, 0xac, 0xb0, + 0xc6, 0xa8, 0xc2, 0x1a, 0xc7, 0x2e, 0x3f, 0xa5, 0x4d, 0x4e, 0x6d, 0xb7, 0x63, 0x6e, 0x0c, 0x07, + 0xda, 0xea, 0xc9, 0x84, 0x12, 0x4a, 0xe8, 0xc2, 0xdf, 0x83, 0x32, 0x23, 0x0e, 0x69, 0x73, 0x8f, + 0xbe, 0x5e, 0x85, 0x78, 0x84, 0x5b, 0xc4, 0x69, 0x4a, 0xaa, 0xb9, 0x1a, 0xc6, 0x2d, 0xfa, 0x42, + 0xb1, 0x24, 0x74, 0xc0, 0x7a, 0x17, 0x5f, 0x3e, 0x75, 0x71, 0xbc, 0x90, 0xd2, 0x0f, 0x5c, 0x08, + 0x1c, 0x0e, 0xb4, 0xf5, 0x93, 0x84, 0x16, 0x4a, 0x69, 0xeb, 0xff, 0x9f, 0x07, 0xd7, 0x0b, 0xb3, + 0x0a, 0x7e, 0x06, 0xa0, 0xd7, 0x62, 0x84, 0xf6, 0x88, 0xf5, 0x60, 0x74, 0x07, 0xd9, 0x5e, 0x74, + 0x70, 0x77, 0xe5, 0x06, 0xc1, 0xd3, 0x0c, 0x02, 0xe5, 0xb0, 0xe0, 0x5f, 0x14, 0xb0, 0x66, 0x8d, + 0xdc, 0x10, 0xab, 0xe1, 0x59, 0x51, 0x62, 0x3c, 0xf8, 0x21, 0xf9, 0x6e, 0xd4, 0x27, 0x95, 0x0e, + 0x5d, 0x4e, 0xfb, 0xe6, 0xb6, 0x9c, 0xd0, 0x5a, 0xc2, 0x86, 0x92, 0x4e, 0xe1, 0x09, 0x80, 0x56, + 0x2c, 0xc9, 0xe4, 0x9d, 0x26, 0x42, 0xbc, 0x60, 0xde, 0x90, 0x0a, 0xdb, 0x09, 0xbf, 0x11, 0x08, + 0xe5, 0x10, 0xe1, 0xaf, 0xc0, 0x7a, 0x3b, 0xa0, 0x94, 0xb8, 0xfc, 0x21, 0xc1, 0x0e, 0x3f, 0xef, + 0xab, 0xf3, 0x42, 0x6a, 0x47, 0x4a, 0xad, 0x1f, 0x24, 0xac, 0x28, 0x85, 0x0e, 0xf9, 0x16, 0x61, + 0x36, 0x25, 0x56, 0xc4, 0x5f, 0x48, 0xf2, 0xeb, 0x09, 0x2b, 0x4a, 0xa1, 0xe1, 0x7d, 0xb0, 0x4a, + 0x2e, 0x7d, 0xd2, 0x8e, 0x62, 0xba, 0x28, 0xd8, 0x5b, 0x92, 0xbd, 0x7a, 0x38, 0x61, 0x43, 0x09, + 0xe4, 0xae, 0x03, 0x60, 0x36, 0x88, 0x70, 0x03, 0x94, 0x2e, 0x48, 0x7f, 0x74, 0xf3, 0xa0, 0xf0, + 0x27, 0xfc, 0x14, 0x2c, 0xf4, 0xb0, 0x13, 0x10, 0x99, 0xeb, 0xef, 0xbf, 0x5a, 0xae, 0x3f, 0xb1, + 0xbb, 0x04, 0x8d, 0x88, 0x3f, 0xbf, 0x72, 0x5f, 0xd1, 0xbf, 0x56, 0x40, 0xa5, 0xe1, 0x59, 0x4d, + 0xd2, 0x0e, 0xa8, 0xcd, 0xfb, 0x0d, 0xb1, 0xcf, 0x6f, 0xa1, 0x66, 0xa3, 0x44, 0xcd, 0xfe, 0x68, + 0x7a, 0xae, 0x25, 0x67, 0x57, 0x54, 0xb1, 0xf5, 0xe7, 0x0a, 0xd8, 0xce, 0xa0, 0xdf, 0x42, 0x45, + 0xfd, 0x75, 0xb2, 0xa2, 0x7e, 0xf0, 0x3a, 0x8b, 0x29, 0xa8, 0xa7, 0x5f, 0x57, 0x72, 0x96, 0x22, + 0xaa, 0x69, 0xd8, 0xdd, 0x51, 0xbb, 0x67, 0x3b, 0xa4, 0x43, 0x2c, 0xb1, 0x98, 0xf2, 0x44, 0x77, + 0x17, 0x5b, 0xd0, 0x04, 0x0a, 0x32, 0xb0, 0x63, 0x91, 0x33, 0x1c, 0x38, 0x7c, 0xdf, 0xb2, 0x0e, + 0xb0, 0x8f, 0x5b, 0xb6, 0x63, 0x73, 0x5b, 0xb6, 0x23, 0xcb, 0xe6, 0x27, 0xc3, 0x81, 0xb6, 0x53, + 0xcf, 0x45, 0xbc, 0x1c, 0x68, 0x37, 0xb2, 0xdd, 0xbc, 0x11, 0x43, 0xfa, 0xa8, 0x40, 0x1a, 0xf6, + 0x81, 0x4a, 0xc9, 0x9f, 0x82, 0xf0, 0x50, 0xd4, 0xa9, 0xe7, 0x27, 0xdc, 0x96, 0x84, 0xdb, 0x5f, + 0x0e, 0x07, 0x9a, 0x8a, 0x0a, 0x30, 0xb3, 0x1d, 0x17, 0xca, 0xc3, 0x2f, 0xc0, 0x26, 0x96, 0x7d, + 0xf8, 0xa4, 0xd7, 0x79, 0xe1, 0xf5, 0xfe, 0x70, 0xa0, 0x6d, 0xee, 0x67, 0xcd, 0xb3, 0x1d, 0xe6, + 0x89, 0xc2, 0x1a, 0x58, 0xea, 0x89, 0x96, 0x9d, 0xa9, 0x0b, 0x42, 0x7f, 0x7b, 0x38, 0xd0, 0x96, + 0x46, 0x5d, 0x7c, 0xa8, 0xb9, 0x78, 0xd4, 0x14, 0x8d, 0x60, 0x84, 0x82, 0x1f, 0x83, 0x95, 0x73, + 0x8f, 0xf1, 0xc7, 0x84, 0x7f, 0xe9, 0xd1, 0x0b, 0x51, 0x18, 0xca, 0xe6, 0xa6, 0xdc, 0xc1, 0x95, + 0x87, 0x63, 0x13, 0x9a, 0xc4, 0xc1, 0xdf, 0x82, 0xe5, 0x73, 0xd9, 0xf6, 0x31, 0x75, 0x49, 0x24, + 0xda, 0xad, 0x29, 0x89, 0x96, 0x68, 0x11, 0xcd, 0x8a, 0x94, 0x5f, 0x8e, 0x86, 0x19, 0x1a, 0xab, + 0xc1, 0x9f, 0x80, 0x25, 0xf1, 0x71, 0x5c, 0x57, 0xcb, 0x62, 0x36, 0x57, 0x25, 0x7c, 0xe9, 0xe1, + 0x68, 0x18, 0x45, 0xf6, 0x08, 0x7a, 0xdc, 0x38, 0x50, 0x97, 0xb3, 0xd0, 0xe3, 0xc6, 0x01, 0x8a, + 0xec, 0xf0, 0x19, 0x58, 0x62, 0xe4, 0x91, 0xed, 0x06, 0x97, 0x2a, 0x10, 0x47, 0xee, 0xf6, 0x94, + 0xe9, 0x36, 0x0f, 0x05, 0x32, 0xd5, 0x70, 0x8f, 0xd5, 0xa5, 0x1d, 0x45, 0x92, 0xd0, 0x02, 0xcb, + 0x34, 0x70, 0xf7, 0xd9, 0x53, 0x46, 0xa8, 0xba, 0x92, 0xb9, 0xed, 0xd3, 0xfa, 0x28, 0xc2, 0xa6, + 0x3d, 0xc4, 0x91, 0x89, 0x11, 0x68, 0x2c, 0x0c, 0x2d, 0x00, 0xc4, 0x87, 0xe8, 0xeb, 0xd5, 0x9d, + 0x99, 0x7d, 0x20, 0x8a, 0xc1, 0x69, 0x3f, 0xeb, 0xe1, 0xf1, 0x1c, 0x9b, 0xd1, 0x84, 0x2e, 0xfc, + 0xab, 0x02, 0x20, 0x0b, 0x7c, 0xdf, 0x21, 0x5d, 0xe2, 0x72, 0xec, 0x88, 0x51, 0xa6, 0xae, 0x0a, + 0x77, 0xbf, 0x98, 0x16, 0xb5, 0x0c, 0x29, 0xed, 0x36, 0x6e, 0x06, 0xb2, 0x50, 0x94, 0xe3, 0x33, + 0xdc, 0xb4, 0x33, 0xb9, 0xda, 0xb5, 0x99, 0x9b, 0x96, 0xff, 0x2f, 0x69, 0xbc, 0x69, 0xd2, 0x8e, + 0x22, 0x49, 0xf8, 0x39, 0xd8, 0x89, 0xfe, 0x43, 0x22, 0xcf, 0xe3, 0x47, 0xb6, 0x43, 0x58, 0x9f, + 0x71, 0xd2, 0x55, 0xd7, 0x45, 0x32, 0x55, 0x25, 0x73, 0x07, 0xe5, 0xa2, 0x50, 0x01, 0x1b, 0x76, + 0x81, 0x16, 0x15, 0xa1, 0xf0, 0x84, 0xc6, 0x55, 0xf0, 0x90, 0xb5, 0xb1, 0x33, 0xea, 0x8d, 0xae, + 0x0a, 0x07, 0xef, 0x0d, 0x07, 0x9a, 0x56, 0x9f, 0x0e, 0x45, 0xb3, 0xb4, 0xe0, 0x6f, 0x80, 0x8a, + 0x8b, 0xfc, 0x6c, 0x08, 0x3f, 0x3f, 0x0a, 0x2b, 0x5b, 0xa1, 0x83, 0x42, 0x36, 0xf4, 0xc1, 0x06, + 0x4e, 0xfe, 0x9b, 0x67, 0x6a, 0x45, 0x9c, 0xf5, 0xf7, 0xa7, 0xec, 0x43, 0xea, 0x01, 0xc0, 0x54, + 0x65, 0x18, 0x37, 0x52, 0x06, 0x86, 0x32, 0xea, 0xf0, 0x12, 0x40, 0x9c, 0x7e, 0x7c, 0x60, 0x2a, + 0x9c, 0x79, 0x91, 0x65, 0x5e, 0x2c, 0xc6, 0xa9, 0x96, 0x31, 0x31, 0x94, 0xe3, 0x03, 0x72, 0x50, + 0xc1, 0xa9, 0xc7, 0x12, 0xa6, 0x5e, 0x13, 0x8e, 0x7f, 0x3a, 0xdb, 0x71, 0xcc, 0x31, 0xaf, 0x4b, + 0xbf, 0x95, 0xb4, 0x85, 0xa1, 0xac, 0x03, 0xf8, 0x08, 0x6c, 0xc9, 0xc1, 0xa7, 0x2e, 0xc3, 0x67, + 0xa4, 0xd9, 0x67, 0x6d, 0xee, 0x30, 0x75, 0x53, 0xd4, 0x6e, 0x75, 0x38, 0xd0, 0xb6, 0xf6, 0x73, + 0xec, 0x28, 0x97, 0x05, 0x3f, 0x05, 0x1b, 0x67, 0x1e, 0x6d, 0xd9, 0x96, 0x45, 0xdc, 0x48, 0x69, + 0x4b, 0x28, 0x6d, 0x85, 0xf1, 0x3f, 0x4a, 0xd9, 0x50, 0x06, 0x0d, 0x19, 0xd8, 0x96, 0xca, 0x0d, + 0xea, 0xb5, 0x4f, 0xbc, 0xc0, 0xe5, 0xe1, 0x75, 0xc1, 0xd4, 0xed, 0xf8, 0x8a, 0xdc, 0xde, 0xcf, + 0x03, 0xbc, 0x1c, 0x68, 0x37, 0x73, 0xae, 0xab, 0x04, 0x08, 0xe5, 0x6b, 0x43, 0x07, 0xac, 0xca, + 0xe7, 0xaf, 0x03, 0x07, 0x33, 0xa6, 0xaa, 0xe2, 0xa8, 0xdf, 0x9b, 0x5e, 0xd8, 0x62, 0x78, 0xfa, + 0xbc, 0x8b, 0xff, 0x65, 0x93, 0x00, 0x94, 0x50, 0xd7, 0xff, 0xae, 0x80, 0xeb, 0x85, 0x85, 0x11, + 0xde, 0x4b, 0xbc, 0xa9, 0xe8, 0xa9, 0x37, 0x15, 0x98, 0x25, 0xbe, 0x81, 0x27, 0x95, 0xaf, 0x14, + 0xa0, 0x16, 0xdd, 0x10, 0xf0, 0xe3, 0xc4, 0x04, 0xdf, 0x4d, 0x4d, 0xb0, 0x92, 0xe1, 0xbd, 0x81, + 0xf9, 0x7d, 0xa3, 0x80, 0x77, 0xa6, 0xec, 0x40, 0x5c, 0x90, 0x88, 0x35, 0x89, 0x7a, 0x8c, 0xc3, + 0xa3, 0xac, 0x88, 0x3c, 0x1a, 0x17, 0xa4, 0x1c, 0x0c, 0x2a, 0x64, 0xc3, 0xa7, 0xe0, 0x9a, 0xac, + 0x86, 0x69, 0x9b, 0xe8, 0xdc, 0x97, 0xcd, 0x77, 0x86, 0x03, 0xed, 0x5a, 0x3d, 0x1f, 0x82, 0x8a, + 0xb8, 0xfa, 0x3f, 0x15, 0xb0, 0x93, 0x7f, 0xe5, 0xc3, 0x3b, 0x89, 0x70, 0x6b, 0xa9, 0x70, 0x5f, + 0x4d, 0xb1, 0x64, 0xb0, 0xff, 0x00, 0xd6, 0x65, 0x63, 0x90, 0x7c, 0x22, 0x4c, 0x04, 0x3d, 0x3c, + 0x22, 0x61, 0x4f, 0x2f, 0x25, 0xa2, 0xf4, 0x15, 0xff, 0xc6, 0x93, 0x63, 0x28, 0xa5, 0xa6, 0xff, + 0x4b, 0x01, 0xef, 0xce, 0xbc, 0x6c, 0xa1, 0x99, 0x98, 0xba, 0x91, 0x9a, 0x7a, 0xb5, 0x58, 0xe0, + 0xcd, 0xbc, 0x14, 0x9a, 0x1f, 0x3e, 0x7f, 0x51, 0x9d, 0xfb, 0xf6, 0x45, 0x75, 0xee, 0xbb, 0x17, + 0xd5, 0xb9, 0x3f, 0x0f, 0xab, 0xca, 0xf3, 0x61, 0x55, 0xf9, 0x76, 0x58, 0x55, 0xbe, 0x1b, 0x56, + 0x95, 0xff, 0x0e, 0xab, 0xca, 0xdf, 0xfe, 0x57, 0x9d, 0xfb, 0xdd, 0x92, 0x94, 0xfb, 0x3e, 0x00, + 0x00, 0xff, 0xff, 0x48, 0x23, 0x7b, 0x0e, 0x44, 0x18, 0x00, 0x00, +} + +func (m *AllowedCSIDriver) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AllowedCSIDriver) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AllowedCSIDriver) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *AllowedFlexVolume) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -170,21 +769,27 @@ func (m *AllowedFlexVolume) Marshal() (dAtA []byte, err error) { } func (m *AllowedFlexVolume) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AllowedFlexVolume) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ + i -= len(m.Driver) + copy(dAtA[i:], m.Driver) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) - i += copy(dAtA[i:], m.Driver) - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *AllowedHostPath) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -192,29 +797,35 @@ func (m *AllowedHostPath) Marshal() (dAtA []byte, err error) { } func (m *AllowedHostPath) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AllowedHostPath) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PathPrefix))) - i += copy(dAtA[i:], m.PathPrefix) - dAtA[i] = 0x10 - i++ + i-- if m.ReadOnly { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - return i, nil + i-- + dAtA[i] = 0x10 + i -= len(m.PathPrefix) + copy(dAtA[i:], m.PathPrefix) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PathPrefix))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *Eviction) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -222,35 +833,44 @@ func (m *Eviction) Marshal() (dAtA []byte, err error) { } func (m *Eviction) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Eviction) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 if m.DeleteOptions != nil { + { + size, err := m.DeleteOptions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DeleteOptions.Size())) - n2, err := m.DeleteOptions.MarshalTo(dAtA[i:]) + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n2 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *FSGroupStrategyOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -258,33 +878,41 @@ func (m *FSGroupStrategyOptions) Marshal() (dAtA []byte, err error) { } func (m *FSGroupStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FSGroupStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i += copy(dAtA[i:], m.Rule) if len(m.Ranges) > 0 { - for _, msg := range m.Ranges { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil -} + i -= len(m.Rule) + copy(dAtA[i:], m.Rule) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} func (m *HostPortRange) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -292,23 +920,28 @@ func (m *HostPortRange) Marshal() (dAtA []byte, err error) { } func (m *HostPortRange) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HostPortRange) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Min)) - dAtA[i] = 0x10 - i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Max)) - return i, nil + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.Min)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *IDRange) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -316,23 +949,28 @@ func (m *IDRange) Marshal() (dAtA []byte, err error) { } func (m *IDRange) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IDRange) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Min)) - dAtA[i] = 0x10 - i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Max)) - return i, nil + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.Min)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *PodDisruptionBudget) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -340,41 +978,52 @@ func (m *PodDisruptionBudget) Marshal() (dAtA []byte, err error) { } func (m *PodDisruptionBudget) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodDisruptionBudget) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n3, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n4, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n4 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n5, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n5 - return i, nil + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *PodDisruptionBudgetList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -382,37 +1031,46 @@ func (m *PodDisruptionBudgetList) Marshal() (dAtA []byte, err error) { } func (m *PodDisruptionBudgetList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodDisruptionBudgetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n6, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *PodDisruptionBudgetSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -420,47 +1078,58 @@ func (m *PodDisruptionBudgetSpec) Marshal() (dAtA []byte, err error) { } func (m *PodDisruptionBudgetSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodDisruptionBudgetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.MinAvailable != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MinAvailable.Size())) - n7, err := m.MinAvailable.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.MaxUnavailable != nil { + { + size, err := m.MaxUnavailable.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n7 + i-- + dAtA[i] = 0x1a } if m.Selector != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n8, err := m.Selector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n8 + i-- + dAtA[i] = 0x12 } - if m.MaxUnavailable != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MaxUnavailable.Size())) - n9, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.MinAvailable != nil { + { + size, err := m.MinAvailable.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n9 + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *PodDisruptionBudgetStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -468,63 +1137,66 @@ func (m *PodDisruptionBudgetStatus) Marshal() (dAtA []byte, err error) { } func (m *PodDisruptionBudgetStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodDisruptionBudgetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i = encodeVarintGenerated(dAtA, i, uint64(m.ExpectedPods)) + i-- + dAtA[i] = 0x30 + i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredHealthy)) + i-- + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentHealthy)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.PodDisruptionsAllowed)) + i-- + dAtA[i] = 0x18 if len(m.DisruptedPods) > 0 { keysForDisruptedPods := make([]string, 0, len(m.DisruptedPods)) for k := range m.DisruptedPods { keysForDisruptedPods = append(keysForDisruptedPods, string(k)) } github_com_gogo_protobuf_sortkeys.Strings(keysForDisruptedPods) - for _, k := range keysForDisruptedPods { + for iNdEx := len(keysForDisruptedPods) - 1; iNdEx >= 0; iNdEx-- { + v := m.DisruptedPods[string(keysForDisruptedPods[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x12 - i++ - v := m.DisruptedPods[string(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + i -= len(keysForDisruptedPods[iNdEx]) + copy(dAtA[i:], keysForDisruptedPods[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForDisruptedPods[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n10, err := (&v).MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n10 } } - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PodDisruptionsAllowed)) - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentHealthy)) - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredHealthy)) - dAtA[i] = 0x30 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ExpectedPods)) - return i, nil + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *PodSecurityPolicy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -532,33 +1204,42 @@ func (m *PodSecurityPolicy) Marshal() (dAtA []byte, err error) { } func (m *PodSecurityPolicy) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodSecurityPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n11, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n11 + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n12, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n12 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *PodSecurityPolicyList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -566,37 +1247,46 @@ func (m *PodSecurityPolicyList) Marshal() (dAtA []byte, err error) { } func (m *PodSecurityPolicyList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodSecurityPolicyList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n13, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n13 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *PodSecurityPolicySpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -604,296 +1294,406 @@ func (m *PodSecurityPolicySpec) Marshal() (dAtA []byte, err error) { } func (m *PodSecurityPolicySpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodSecurityPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - if m.Privileged { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if m.RuntimeClass != nil { + { + size, err := m.RuntimeClass.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc2 } - i++ - if len(m.DefaultAddCapabilities) > 0 { - for _, s := range m.DefaultAddCapabilities { - dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ + if len(m.AllowedCSIDrivers) > 0 { + for iNdEx := len(m.AllowedCSIDrivers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AllowedCSIDrivers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xba } } - if len(m.RequiredDropCapabilities) > 0 { - for _, s := range m.RequiredDropCapabilities { - dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ + if m.RunAsGroup != nil { + { + size, err := m.RunAsGroup.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb2 } - if len(m.AllowedCapabilities) > 0 { - for _, s := range m.AllowedCapabilities { - dAtA[i] = 0x22 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ + if len(m.AllowedProcMountTypes) > 0 { + for iNdEx := len(m.AllowedProcMountTypes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AllowedProcMountTypes[iNdEx]) + copy(dAtA[i:], m.AllowedProcMountTypes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllowedProcMountTypes[iNdEx]))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xaa + } + } + if len(m.ForbiddenSysctls) > 0 { + for iNdEx := len(m.ForbiddenSysctls) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ForbiddenSysctls[iNdEx]) + copy(dAtA[i:], m.ForbiddenSysctls[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ForbiddenSysctls[iNdEx]))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa2 + } + } + if len(m.AllowedUnsafeSysctls) > 0 { + for iNdEx := len(m.AllowedUnsafeSysctls) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AllowedUnsafeSysctls[iNdEx]) + copy(dAtA[i:], m.AllowedUnsafeSysctls[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllowedUnsafeSysctls[iNdEx]))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x9a + } + } + if len(m.AllowedFlexVolumes) > 0 { + for iNdEx := len(m.AllowedFlexVolumes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AllowedFlexVolumes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x92 } } - if len(m.Volumes) > 0 { - for _, s := range m.Volumes { - dAtA[i] = 0x2a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ + if len(m.AllowedHostPaths) > 0 { + for iNdEx := len(m.AllowedHostPaths) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AllowedHostPaths[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a } } - dAtA[i] = 0x30 - i++ - if m.HostNetwork { + if m.AllowPrivilegeEscalation != nil { + i-- + if *m.AllowPrivilegeEscalation { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x80 + } + if m.DefaultAllowPrivilegeEscalation != nil { + i-- + if *m.DefaultAllowPrivilegeEscalation { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x78 + } + i-- + if m.ReadOnlyRootFilesystem { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - if len(m.HostPorts) > 0 { - for _, msg := range m.HostPorts { - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + i-- + dAtA[i] = 0x70 + { + size, err := m.FSGroup.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - dAtA[i] = 0x40 - i++ - if m.HostPID { + i-- + dAtA[i] = 0x6a + { + size, err := m.SupplementalGroups.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x62 + { + size, err := m.RunAsUser.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + { + size, err := m.SELinux.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + i-- + if m.HostIPC { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ + i-- dAtA[i] = 0x48 - i++ - if m.HostIPC { + i-- + if m.HostPID { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SELinux.Size())) - n14, err := m.SELinux.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n14 - dAtA[i] = 0x5a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RunAsUser.Size())) - n15, err := m.RunAsUser.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n15 - dAtA[i] = 0x62 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SupplementalGroups.Size())) - n16, err := m.SupplementalGroups.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n16 - dAtA[i] = 0x6a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.FSGroup.Size())) - n17, err := m.FSGroup.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i-- + dAtA[i] = 0x40 + if len(m.HostPorts) > 0 { + for iNdEx := len(m.HostPorts) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.HostPorts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } } - i += n17 - dAtA[i] = 0x70 - i++ - if m.ReadOnlyRootFilesystem { + i-- + if m.HostNetwork { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - if m.DefaultAllowPrivilegeEscalation != nil { - dAtA[i] = 0x78 - i++ - if *m.DefaultAllowPrivilegeEscalation { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + i-- + dAtA[i] = 0x30 + if len(m.Volumes) > 0 { + for iNdEx := len(m.Volumes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Volumes[iNdEx]) + copy(dAtA[i:], m.Volumes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Volumes[iNdEx]))) + i-- + dAtA[i] = 0x2a } - i++ } - if m.AllowPrivilegeEscalation != nil { - dAtA[i] = 0x80 - i++ - dAtA[i] = 0x1 - i++ - if *m.AllowPrivilegeEscalation { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if len(m.AllowedCapabilities) > 0 { + for iNdEx := len(m.AllowedCapabilities) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AllowedCapabilities[iNdEx]) + copy(dAtA[i:], m.AllowedCapabilities[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllowedCapabilities[iNdEx]))) + i-- + dAtA[i] = 0x22 } - i++ } - if len(m.AllowedHostPaths) > 0 { - for _, msg := range m.AllowedHostPaths { - dAtA[i] = 0x8a - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + if len(m.RequiredDropCapabilities) > 0 { + for iNdEx := len(m.RequiredDropCapabilities) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.RequiredDropCapabilities[iNdEx]) + copy(dAtA[i:], m.RequiredDropCapabilities[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RequiredDropCapabilities[iNdEx]))) + i-- + dAtA[i] = 0x1a } } - if len(m.AllowedFlexVolumes) > 0 { - for _, msg := range m.AllowedFlexVolumes { - dAtA[i] = 0x92 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + if len(m.DefaultAddCapabilities) > 0 { + for iNdEx := len(m.DefaultAddCapabilities) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.DefaultAddCapabilities[iNdEx]) + copy(dAtA[i:], m.DefaultAddCapabilities[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DefaultAddCapabilities[iNdEx]))) + i-- + dAtA[i] = 0x12 } } - if len(m.AllowedUnsafeSysctls) > 0 { - for _, s := range m.AllowedUnsafeSysctls { - dAtA[i] = 0x9a - i++ - dAtA[i] = 0x1 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } + i-- + if m.Privileged { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - if len(m.ForbiddenSysctls) > 0 { - for _, s := range m.ForbiddenSysctls { - dAtA[i] = 0xa2 - i++ - dAtA[i] = 0x1 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *RunAsGroupStrategyOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RunAsGroupStrategyOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RunAsGroupStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Ranges) > 0 { + for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i-- + dAtA[i] = 0x12 } } - if len(m.AllowedProcMountTypes) > 0 { - for _, s := range m.AllowedProcMountTypes { - dAtA[i] = 0xaa - i++ - dAtA[i] = 0x1 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ + i -= len(m.Rule) + copy(dAtA[i:], m.Rule) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RunAsUserStrategyOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RunAsUserStrategyOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RunAsUserStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Ranges) > 0 { + for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i-- + dAtA[i] = 0x12 } } - return i, nil + i -= len(m.Rule) + copy(dAtA[i:], m.Rule) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *RunAsUserStrategyOptions) Marshal() (dAtA []byte, err error) { +func (m *RuntimeClassStrategyOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *RunAsUserStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *RuntimeClassStrategyOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RuntimeClassStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i += copy(dAtA[i:], m.Rule) - if len(m.Ranges) > 0 { - for _, msg := range m.Ranges { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + if m.DefaultRuntimeClassName != nil { + i -= len(*m.DefaultRuntimeClassName) + copy(dAtA[i:], *m.DefaultRuntimeClassName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.DefaultRuntimeClassName))) + i-- + dAtA[i] = 0x12 + } + if len(m.AllowedRuntimeClassNames) > 0 { + for iNdEx := len(m.AllowedRuntimeClassNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AllowedRuntimeClassNames[iNdEx]) + copy(dAtA[i:], m.AllowedRuntimeClassNames[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllowedRuntimeClassNames[iNdEx]))) + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } func (m *SELinuxStrategyOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -901,31 +1701,39 @@ func (m *SELinuxStrategyOptions) Marshal() (dAtA []byte, err error) { } func (m *SELinuxStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SELinuxStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i += copy(dAtA[i:], m.Rule) if m.SELinuxOptions != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SELinuxOptions.Size())) - n18, err := m.SELinuxOptions.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.SELinuxOptions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n18 + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Rule) + copy(dAtA[i:], m.Rule) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *SupplementalGroupsStrategyOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -933,57 +1741,63 @@ func (m *SupplementalGroupsStrategyOptions) Marshal() (dAtA []byte, err error) { } func (m *SupplementalGroupsStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SupplementalGroupsStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i += copy(dAtA[i:], m.Rule) if len(m.Ranges) > 0 { - for _, msg := range m.Ranges { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + i -= len(m.Rule) + copy(dAtA[i:], m.Rule) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base +} +func (m *AllowedCSIDriver) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n } + func (m *AllowedFlexVolume) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Driver) @@ -992,6 +1806,9 @@ func (m *AllowedFlexVolume) Size() (n int) { } func (m *AllowedHostPath) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.PathPrefix) @@ -1001,6 +1818,9 @@ func (m *AllowedHostPath) Size() (n int) { } func (m *Eviction) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -1013,6 +1833,9 @@ func (m *Eviction) Size() (n int) { } func (m *FSGroupStrategyOptions) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Rule) @@ -1027,6 +1850,9 @@ func (m *FSGroupStrategyOptions) Size() (n int) { } func (m *HostPortRange) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.Min)) @@ -1035,6 +1861,9 @@ func (m *HostPortRange) Size() (n int) { } func (m *IDRange) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.Min)) @@ -1043,6 +1872,9 @@ func (m *IDRange) Size() (n int) { } func (m *PodDisruptionBudget) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -1055,6 +1887,9 @@ func (m *PodDisruptionBudget) Size() (n int) { } func (m *PodDisruptionBudgetList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -1069,6 +1904,9 @@ func (m *PodDisruptionBudgetList) Size() (n int) { } func (m *PodDisruptionBudgetSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.MinAvailable != nil { @@ -1087,6 +1925,9 @@ func (m *PodDisruptionBudgetSpec) Size() (n int) { } func (m *PodDisruptionBudgetStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.ObservedGeneration)) @@ -1107,6 +1948,9 @@ func (m *PodDisruptionBudgetStatus) Size() (n int) { } func (m *PodSecurityPolicy) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -1117,6 +1961,9 @@ func (m *PodSecurityPolicy) Size() (n int) { } func (m *PodSecurityPolicyList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -1131,6 +1978,9 @@ func (m *PodSecurityPolicyList) Size() (n int) { } func (m *PodSecurityPolicySpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 2 @@ -1212,10 +2062,44 @@ func (m *PodSecurityPolicySpec) Size() (n int) { n += 2 + l + sovGenerated(uint64(l)) } } + if m.RunAsGroup != nil { + l = m.RunAsGroup.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if len(m.AllowedCSIDrivers) > 0 { + for _, e := range m.AllowedCSIDrivers { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + if m.RuntimeClass != nil { + l = m.RuntimeClass.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *RunAsGroupStrategyOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Rule) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Ranges) > 0 { + for _, e := range m.Ranges { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } func (m *RunAsUserStrategyOptions) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Rule) @@ -1229,7 +2113,29 @@ func (m *RunAsUserStrategyOptions) Size() (n int) { return n } +func (m *RuntimeClassStrategyOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.AllowedRuntimeClassNames) > 0 { + for _, s := range m.AllowedRuntimeClassNames { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.DefaultRuntimeClassName != nil { + l = len(*m.DefaultRuntimeClassName) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + func (m *SELinuxStrategyOptions) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Rule) @@ -1242,6 +2148,9 @@ func (m *SELinuxStrategyOptions) Size() (n int) { } func (m *SupplementalGroupsStrategyOptions) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Rule) @@ -1256,18 +2165,21 @@ func (m *SupplementalGroupsStrategyOptions) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } +func (this *AllowedCSIDriver) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AllowedCSIDriver{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} func (this *AllowedFlexVolume) String() string { if this == nil { return "nil" @@ -1294,8 +2206,8 @@ func (this *Eviction) String() string { return "nil" } s := strings.Join([]string{`&Eviction{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `DeleteOptions:` + strings.Replace(fmt.Sprintf("%v", this.DeleteOptions), "DeleteOptions", "k8s_io_apimachinery_pkg_apis_meta_v1.DeleteOptions", 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `DeleteOptions:` + strings.Replace(fmt.Sprintf("%v", this.DeleteOptions), "DeleteOptions", "v1.DeleteOptions", 1) + `,`, `}`, }, "") return s @@ -1304,9 +2216,14 @@ func (this *FSGroupStrategyOptions) String() string { if this == nil { return "nil" } + repeatedStringForRanges := "[]IDRange{" + for _, f := range this.Ranges { + repeatedStringForRanges += strings.Replace(strings.Replace(f.String(), "IDRange", "IDRange", 1), `&`, ``, 1) + "," + } + repeatedStringForRanges += "}" s := strings.Join([]string{`&FSGroupStrategyOptions{`, `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `Ranges:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ranges), "IDRange", "IDRange", 1), `&`, ``, 1) + `,`, + `Ranges:` + repeatedStringForRanges + `,`, `}`, }, "") return s @@ -1338,7 +2255,7 @@ func (this *PodDisruptionBudget) String() string { return "nil" } s := strings.Join([]string{`&PodDisruptionBudget{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodDisruptionBudgetSpec", "PodDisruptionBudgetSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PodDisruptionBudgetStatus", "PodDisruptionBudgetStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -1349,9 +2266,14 @@ func (this *PodDisruptionBudgetList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]PodDisruptionBudget{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PodDisruptionBudget", "PodDisruptionBudget", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&PodDisruptionBudgetList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "PodDisruptionBudget", "PodDisruptionBudget", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -1361,9 +2283,9 @@ func (this *PodDisruptionBudgetSpec) String() string { return "nil" } s := strings.Join([]string{`&PodDisruptionBudgetSpec{`, - `MinAvailable:` + strings.Replace(fmt.Sprintf("%v", this.MinAvailable), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, + `MinAvailable:` + strings.Replace(fmt.Sprintf("%v", this.MinAvailable), "IntOrString", "intstr.IntOrString", 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`, `}`, }, "") return s @@ -1377,7 +2299,7 @@ func (this *PodDisruptionBudgetStatus) String() string { keysForDisruptedPods = append(keysForDisruptedPods, k) } github_com_gogo_protobuf_sortkeys.Strings(keysForDisruptedPods) - mapStringForDisruptedPods := "map[string]k8s_io_apimachinery_pkg_apis_meta_v1.Time{" + mapStringForDisruptedPods := "map[string]v1.Time{" for _, k := range keysForDisruptedPods { mapStringForDisruptedPods += fmt.Sprintf("%v: %v,", k, this.DisruptedPods[k]) } @@ -1398,7 +2320,7 @@ func (this *PodSecurityPolicy) String() string { return "nil" } s := strings.Join([]string{`&PodSecurityPolicy{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodSecurityPolicySpec", "PodSecurityPolicySpec", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -1408,9 +2330,14 @@ func (this *PodSecurityPolicyList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]PodSecurityPolicy{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PodSecurityPolicy", "PodSecurityPolicy", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&PodSecurityPolicyList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "PodSecurityPolicy", "PodSecurityPolicy", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -1419,6 +2346,26 @@ func (this *PodSecurityPolicySpec) String() string { if this == nil { return "nil" } + repeatedStringForHostPorts := "[]HostPortRange{" + for _, f := range this.HostPorts { + repeatedStringForHostPorts += strings.Replace(strings.Replace(f.String(), "HostPortRange", "HostPortRange", 1), `&`, ``, 1) + "," + } + repeatedStringForHostPorts += "}" + repeatedStringForAllowedHostPaths := "[]AllowedHostPath{" + for _, f := range this.AllowedHostPaths { + repeatedStringForAllowedHostPaths += strings.Replace(strings.Replace(f.String(), "AllowedHostPath", "AllowedHostPath", 1), `&`, ``, 1) + "," + } + repeatedStringForAllowedHostPaths += "}" + repeatedStringForAllowedFlexVolumes := "[]AllowedFlexVolume{" + for _, f := range this.AllowedFlexVolumes { + repeatedStringForAllowedFlexVolumes += strings.Replace(strings.Replace(f.String(), "AllowedFlexVolume", "AllowedFlexVolume", 1), `&`, ``, 1) + "," + } + repeatedStringForAllowedFlexVolumes += "}" + repeatedStringForAllowedCSIDrivers := "[]AllowedCSIDriver{" + for _, f := range this.AllowedCSIDrivers { + repeatedStringForAllowedCSIDrivers += strings.Replace(strings.Replace(f.String(), "AllowedCSIDriver", "AllowedCSIDriver", 1), `&`, ``, 1) + "," + } + repeatedStringForAllowedCSIDrivers += "}" s := strings.Join([]string{`&PodSecurityPolicySpec{`, `Privileged:` + fmt.Sprintf("%v", this.Privileged) + `,`, `DefaultAddCapabilities:` + fmt.Sprintf("%v", this.DefaultAddCapabilities) + `,`, @@ -1426,7 +2373,7 @@ func (this *PodSecurityPolicySpec) String() string { `AllowedCapabilities:` + fmt.Sprintf("%v", this.AllowedCapabilities) + `,`, `Volumes:` + fmt.Sprintf("%v", this.Volumes) + `,`, `HostNetwork:` + fmt.Sprintf("%v", this.HostNetwork) + `,`, - `HostPorts:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.HostPorts), "HostPortRange", "HostPortRange", 1), `&`, ``, 1) + `,`, + `HostPorts:` + repeatedStringForHostPorts + `,`, `HostPID:` + fmt.Sprintf("%v", this.HostPID) + `,`, `HostIPC:` + fmt.Sprintf("%v", this.HostIPC) + `,`, `SELinux:` + strings.Replace(strings.Replace(this.SELinux.String(), "SELinuxStrategyOptions", "SELinuxStrategyOptions", 1), `&`, ``, 1) + `,`, @@ -1436,11 +2383,30 @@ func (this *PodSecurityPolicySpec) String() string { `ReadOnlyRootFilesystem:` + fmt.Sprintf("%v", this.ReadOnlyRootFilesystem) + `,`, `DefaultAllowPrivilegeEscalation:` + valueToStringGenerated(this.DefaultAllowPrivilegeEscalation) + `,`, `AllowPrivilegeEscalation:` + valueToStringGenerated(this.AllowPrivilegeEscalation) + `,`, - `AllowedHostPaths:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.AllowedHostPaths), "AllowedHostPath", "AllowedHostPath", 1), `&`, ``, 1) + `,`, - `AllowedFlexVolumes:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.AllowedFlexVolumes), "AllowedFlexVolume", "AllowedFlexVolume", 1), `&`, ``, 1) + `,`, + `AllowedHostPaths:` + repeatedStringForAllowedHostPaths + `,`, + `AllowedFlexVolumes:` + repeatedStringForAllowedFlexVolumes + `,`, `AllowedUnsafeSysctls:` + fmt.Sprintf("%v", this.AllowedUnsafeSysctls) + `,`, `ForbiddenSysctls:` + fmt.Sprintf("%v", this.ForbiddenSysctls) + `,`, `AllowedProcMountTypes:` + fmt.Sprintf("%v", this.AllowedProcMountTypes) + `,`, + `RunAsGroup:` + strings.Replace(this.RunAsGroup.String(), "RunAsGroupStrategyOptions", "RunAsGroupStrategyOptions", 1) + `,`, + `AllowedCSIDrivers:` + repeatedStringForAllowedCSIDrivers + `,`, + `RuntimeClass:` + strings.Replace(this.RuntimeClass.String(), "RuntimeClassStrategyOptions", "RuntimeClassStrategyOptions", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RunAsGroupStrategyOptions) String() string { + if this == nil { + return "nil" + } + repeatedStringForRanges := "[]IDRange{" + for _, f := range this.Ranges { + repeatedStringForRanges += strings.Replace(strings.Replace(f.String(), "IDRange", "IDRange", 1), `&`, ``, 1) + "," + } + repeatedStringForRanges += "}" + s := strings.Join([]string{`&RunAsGroupStrategyOptions{`, + `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, + `Ranges:` + repeatedStringForRanges + `,`, `}`, }, "") return s @@ -1449,9 +2415,25 @@ func (this *RunAsUserStrategyOptions) String() string { if this == nil { return "nil" } + repeatedStringForRanges := "[]IDRange{" + for _, f := range this.Ranges { + repeatedStringForRanges += strings.Replace(strings.Replace(f.String(), "IDRange", "IDRange", 1), `&`, ``, 1) + "," + } + repeatedStringForRanges += "}" s := strings.Join([]string{`&RunAsUserStrategyOptions{`, `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `Ranges:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ranges), "IDRange", "IDRange", 1), `&`, ``, 1) + `,`, + `Ranges:` + repeatedStringForRanges + `,`, + `}`, + }, "") + return s +} +func (this *RuntimeClassStrategyOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RuntimeClassStrategyOptions{`, + `AllowedRuntimeClassNames:` + fmt.Sprintf("%v", this.AllowedRuntimeClassNames) + `,`, + `DefaultRuntimeClassName:` + valueToStringGenerated(this.DefaultRuntimeClassName) + `,`, `}`, }, "") return s @@ -1462,7 +2444,7 @@ func (this *SELinuxStrategyOptions) String() string { } s := strings.Join([]string{`&SELinuxStrategyOptions{`, `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `SELinuxOptions:` + strings.Replace(fmt.Sprintf("%v", this.SELinuxOptions), "SELinuxOptions", "k8s_io_api_core_v1.SELinuxOptions", 1) + `,`, + `SELinuxOptions:` + strings.Replace(fmt.Sprintf("%v", this.SELinuxOptions), "SELinuxOptions", "v11.SELinuxOptions", 1) + `,`, `}`, }, "") return s @@ -1471,9 +2453,14 @@ func (this *SupplementalGroupsStrategyOptions) String() string { if this == nil { return "nil" } + repeatedStringForRanges := "[]IDRange{" + for _, f := range this.Ranges { + repeatedStringForRanges += strings.Replace(strings.Replace(f.String(), "IDRange", "IDRange", 1), `&`, ``, 1) + "," + } + repeatedStringForRanges += "}" s := strings.Join([]string{`&SupplementalGroupsStrategyOptions{`, `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `Ranges:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ranges), "IDRange", "IDRange", 1), `&`, ``, 1) + `,`, + `Ranges:` + repeatedStringForRanges + `,`, `}`, }, "") return s @@ -1486,6 +2473,91 @@ func valueToStringGenerated(v interface{}) string { pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("*%v", pv) } +func (m *AllowedCSIDriver) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AllowedCSIDriver: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AllowedCSIDriver: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *AllowedFlexVolume) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -1501,7 +2573,7 @@ func (m *AllowedFlexVolume) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1529,7 +2601,7 @@ func (m *AllowedFlexVolume) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1539,6 +2611,9 @@ func (m *AllowedFlexVolume) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1553,6 +2628,9 @@ func (m *AllowedFlexVolume) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1580,7 +2658,7 @@ func (m *AllowedHostPath) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1608,7 +2686,7 @@ func (m *AllowedHostPath) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1618,6 +2696,9 @@ func (m *AllowedHostPath) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1637,7 +2718,7 @@ func (m *AllowedHostPath) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1652,6 +2733,9 @@ func (m *AllowedHostPath) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1679,7 +2763,7 @@ func (m *Eviction) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1707,7 +2791,7 @@ func (m *Eviction) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1716,6 +2800,9 @@ func (m *Eviction) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1737,7 +2824,7 @@ func (m *Eviction) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1746,11 +2833,14 @@ func (m *Eviction) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.DeleteOptions == nil { - m.DeleteOptions = &k8s_io_apimachinery_pkg_apis_meta_v1.DeleteOptions{} + m.DeleteOptions = &v1.DeleteOptions{} } if err := m.DeleteOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1765,6 +2855,9 @@ func (m *Eviction) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1792,7 +2885,7 @@ func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1820,7 +2913,7 @@ func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1830,6 +2923,9 @@ func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1849,7 +2945,7 @@ func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1858,6 +2954,9 @@ func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1875,6 +2974,9 @@ func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1902,7 +3004,7 @@ func (m *HostPortRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1930,7 +3032,7 @@ func (m *HostPortRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Min |= (int32(b) & 0x7F) << shift + m.Min |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -1949,7 +3051,7 @@ func (m *HostPortRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Max |= (int32(b) & 0x7F) << shift + m.Max |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -1963,6 +3065,9 @@ func (m *HostPortRange) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1990,7 +3095,7 @@ func (m *IDRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2018,7 +3123,7 @@ func (m *IDRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Min |= (int64(b) & 0x7F) << shift + m.Min |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -2037,7 +3142,7 @@ func (m *IDRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Max |= (int64(b) & 0x7F) << shift + m.Max |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -2048,7 +3153,10 @@ func (m *IDRange) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -2078,7 +3186,7 @@ func (m *PodDisruptionBudget) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2106,7 +3214,7 @@ func (m *PodDisruptionBudget) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2115,6 +3223,9 @@ func (m *PodDisruptionBudget) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2136,7 +3247,7 @@ func (m *PodDisruptionBudget) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2145,6 +3256,9 @@ func (m *PodDisruptionBudget) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2166,7 +3280,7 @@ func (m *PodDisruptionBudget) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2175,6 +3289,9 @@ func (m *PodDisruptionBudget) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2191,6 +3308,9 @@ func (m *PodDisruptionBudget) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2218,7 +3338,7 @@ func (m *PodDisruptionBudgetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2246,7 +3366,7 @@ func (m *PodDisruptionBudgetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2255,6 +3375,9 @@ func (m *PodDisruptionBudgetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2276,7 +3399,7 @@ func (m *PodDisruptionBudgetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2285,6 +3408,9 @@ func (m *PodDisruptionBudgetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2302,6 +3428,9 @@ func (m *PodDisruptionBudgetList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2329,7 +3458,7 @@ func (m *PodDisruptionBudgetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2357,7 +3486,7 @@ func (m *PodDisruptionBudgetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2366,11 +3495,14 @@ func (m *PodDisruptionBudgetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.MinAvailable == nil { - m.MinAvailable = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + m.MinAvailable = &intstr.IntOrString{} } if err := m.MinAvailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -2390,7 +3522,7 @@ func (m *PodDisruptionBudgetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2399,11 +3531,14 @@ func (m *PodDisruptionBudgetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -2423,7 +3558,7 @@ func (m *PodDisruptionBudgetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2432,11 +3567,14 @@ func (m *PodDisruptionBudgetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.MaxUnavailable == nil { - m.MaxUnavailable = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + m.MaxUnavailable = &intstr.IntOrString{} } if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -2451,6 +3589,9 @@ func (m *PodDisruptionBudgetSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2478,7 +3619,7 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2506,7 +3647,7 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ObservedGeneration |= (int64(b) & 0x7F) << shift + m.ObservedGeneration |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -2525,7 +3666,7 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2534,54 +3675,20 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { + if postIndex > l { return io.ErrUnexpectedEOF } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.DisruptedPods == nil { - m.DisruptedPods = make(map[string]k8s_io_apimachinery_pkg_apis_meta_v1.Time) + m.DisruptedPods = make(map[string]v1.Time) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + mapvalue := &v1.Time{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2591,46 +3698,88 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &v1.Time{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.DisruptedPods[mapkey] = *mapvalue - } else { - var mapvalue k8s_io_apimachinery_pkg_apis_meta_v1.Time - m.DisruptedPods[mapkey] = mapvalue } + m.DisruptedPods[mapkey] = *mapvalue iNdEx = postIndex case 3: if wireType != 0 { @@ -2646,7 +3795,7 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.PodDisruptionsAllowed |= (int32(b) & 0x7F) << shift + m.PodDisruptionsAllowed |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -2665,7 +3814,7 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.CurrentHealthy |= (int32(b) & 0x7F) << shift + m.CurrentHealthy |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -2684,7 +3833,7 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.DesiredHealthy |= (int32(b) & 0x7F) << shift + m.DesiredHealthy |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -2703,7 +3852,7 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ExpectedPods |= (int32(b) & 0x7F) << shift + m.ExpectedPods |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -2717,6 +3866,9 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2744,7 +3896,7 @@ func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2772,7 +3924,7 @@ func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2781,6 +3933,9 @@ func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2802,7 +3957,7 @@ func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2811,6 +3966,9 @@ func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2827,6 +3985,9 @@ func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2854,7 +4015,7 @@ func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2882,7 +4043,7 @@ func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2891,6 +4052,9 @@ func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2912,7 +4076,7 @@ func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2921,6 +4085,9 @@ func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2938,6 +4105,9 @@ func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2965,7 +4135,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2993,7 +4163,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3013,7 +4183,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3023,6 +4193,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3042,7 +4215,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3052,6 +4225,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3071,7 +4247,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3081,6 +4257,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3100,7 +4279,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3110,6 +4289,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3129,7 +4311,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3149,7 +4331,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3158,6 +4340,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3180,7 +4365,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3200,7 +4385,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3220,7 +4405,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3229,6 +4414,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3250,7 +4438,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3259,6 +4447,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3280,7 +4471,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3289,6 +4480,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3310,7 +4504,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3319,6 +4513,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3340,7 +4537,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3360,7 +4557,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3371,7 +4568,160 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field AllowPrivilegeEscalation", wireType) } - var v int + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.AllowPrivilegeEscalation = &b + case 17: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowedHostPaths", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AllowedHostPaths = append(m.AllowedHostPaths, AllowedHostPath{}) + if err := m.AllowedHostPaths[len(m.AllowedHostPaths)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 18: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowedFlexVolumes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AllowedFlexVolumes = append(m.AllowedFlexVolumes, AllowedFlexVolume{}) + if err := m.AllowedFlexVolumes[len(m.AllowedFlexVolumes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 19: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowedUnsafeSysctls", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AllowedUnsafeSysctls = append(m.AllowedUnsafeSysctls, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 20: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ForbiddenSysctls", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ForbiddenSysctls = append(m.ForbiddenSysctls, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 21: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowedProcMountTypes", wireType) + } + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -3381,16 +4731,27 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - b := bool(v != 0) - m.AllowPrivilegeEscalation = &b - case 17: + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AllowedProcMountTypes = append(m.AllowedProcMountTypes, k8s_io_api_core_v1.ProcMountType(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 22: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedHostPaths", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RunAsGroup", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -3402,7 +4763,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3411,17 +4772,22 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.AllowedHostPaths = append(m.AllowedHostPaths, AllowedHostPath{}) - if err := m.AllowedHostPaths[len(m.AllowedHostPaths)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.RunAsGroup == nil { + m.RunAsGroup = &RunAsGroupStrategyOptions{} + } + if err := m.RunAsGroup.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 18: + case 23: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedFlexVolumes", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AllowedCSIDrivers", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -3433,7 +4799,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3442,19 +4808,22 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.AllowedFlexVolumes = append(m.AllowedFlexVolumes, AllowedFlexVolume{}) - if err := m.AllowedFlexVolumes[len(m.AllowedFlexVolumes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.AllowedCSIDrivers = append(m.AllowedCSIDrivers, AllowedCSIDriver{}) + if err := m.AllowedCSIDrivers[len(m.AllowedCSIDrivers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 19: + case 24: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedUnsafeSysctls", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RuntimeClass", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -3464,24 +4833,84 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.AllowedUnsafeSysctls = append(m.AllowedUnsafeSysctls, string(dAtA[iNdEx:postIndex])) + if m.RuntimeClass == nil { + m.RuntimeClass = &RuntimeClassStrategyOptions{} + } + if err := m.RuntimeClass.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 20: + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RunAsGroupStrategyOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RunAsGroupStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ForbiddenSysctls", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -3493,7 +4922,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3503,16 +4932,19 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.ForbiddenSysctls = append(m.ForbiddenSysctls, string(dAtA[iNdEx:postIndex])) + m.Rule = RunAsGroupStrategy(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 21: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedProcMountTypes", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -3522,20 +4954,25 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.AllowedProcMountTypes = append(m.AllowedProcMountTypes, k8s_io_api_core_v1.ProcMountType(dAtA[iNdEx:postIndex])) + m.Ranges = append(m.Ranges, IDRange{}) + if err := m.Ranges[len(m.Ranges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -3546,6 +4983,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3573,7 +5013,7 @@ func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3601,7 +5041,7 @@ func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3611,6 +5051,9 @@ func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3630,7 +5073,7 @@ func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3639,6 +5082,9 @@ func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3656,6 +5102,127 @@ func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RuntimeClassStrategyOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RuntimeClassStrategyOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RuntimeClassStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowedRuntimeClassNames", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AllowedRuntimeClassNames = append(m.AllowedRuntimeClassNames, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DefaultRuntimeClassName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.DefaultRuntimeClassName = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3683,7 +5250,7 @@ func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3711,7 +5278,7 @@ func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3721,6 +5288,9 @@ func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3740,7 +5310,7 @@ func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3749,11 +5319,14 @@ func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.SELinuxOptions == nil { - m.SELinuxOptions = &k8s_io_api_core_v1.SELinuxOptions{} + m.SELinuxOptions = &v11.SELinuxOptions{} } if err := m.SELinuxOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3768,6 +5341,9 @@ func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3795,7 +5371,7 @@ func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3823,7 +5399,7 @@ func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3833,6 +5409,9 @@ func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3852,7 +5431,7 @@ func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3861,6 +5440,9 @@ func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3878,6 +5460,9 @@ func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3944,10 +5529,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -3976,6 +5564,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -3994,119 +5585,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/policy/v1beta1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 1715 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0x4f, 0x6f, 0x1b, 0xc7, - 0x15, 0xd7, 0x9a, 0x92, 0x48, 0x8d, 0x24, 0x5a, 0x1a, 0xfd, 0xe9, 0x46, 0xa8, 0xb9, 0x0e, 0x03, - 0x14, 0x6e, 0x90, 0x2c, 0x63, 0x39, 0x69, 0x8d, 0xa6, 0x2d, 0xa2, 0x35, 0x25, 0x5b, 0x81, 0x55, - 0xb1, 0x43, 0x3b, 0x68, 0x0b, 0xb7, 0xe8, 0x70, 0x77, 0x44, 0x4e, 0xb4, 0xdc, 0xdd, 0xce, 0xcc, - 0x32, 0xe4, 0xad, 0x87, 0x1e, 0x7a, 0xec, 0x17, 0xc8, 0x27, 0x28, 0x7a, 0xea, 0x97, 0x50, 0x81, - 0xa2, 0xc8, 0x31, 0xe8, 0x81, 0xa8, 0x59, 0xf4, 0x4b, 0xf8, 0xd2, 0x60, 0x87, 0xb3, 0x24, 0xf7, - 0x0f, 0x29, 0x2b, 0x40, 0x7c, 0xdb, 0x9d, 0xf7, 0xfb, 0xfd, 0xde, 0x9b, 0x37, 0x6f, 0xde, 0xce, - 0x0e, 0xb0, 0x2e, 0x1f, 0x72, 0x93, 0xfa, 0xb5, 0xcb, 0xb0, 0x45, 0x98, 0x47, 0x04, 0xe1, 0xb5, - 0x1e, 0xf1, 0x1c, 0x9f, 0xd5, 0x94, 0x01, 0x07, 0xb4, 0x16, 0xf8, 0x2e, 0xb5, 0x07, 0xb5, 0xde, - 0xfd, 0x16, 0x11, 0xf8, 0x7e, 0xad, 0x4d, 0x3c, 0xc2, 0xb0, 0x20, 0x8e, 0x19, 0x30, 0x5f, 0xf8, - 0xf0, 0xad, 0x31, 0xd4, 0xc4, 0x01, 0x35, 0xc7, 0x50, 0x53, 0x41, 0x0f, 0xde, 0x6f, 0x53, 0xd1, - 0x09, 0x5b, 0xa6, 0xed, 0x77, 0x6b, 0x6d, 0xbf, 0xed, 0xd7, 0x24, 0xa3, 0x15, 0x5e, 0xc8, 0x37, - 0xf9, 0x22, 0x9f, 0xc6, 0x4a, 0x07, 0xd5, 0x19, 0xa7, 0xb6, 0xcf, 0x48, 0xad, 0x97, 0xf1, 0x76, - 0xf0, 0xe1, 0x14, 0xd3, 0xc5, 0x76, 0x87, 0x7a, 0x84, 0x0d, 0x6a, 0xc1, 0x65, 0x3b, 0x1a, 0xe0, - 0xb5, 0x2e, 0x11, 0x38, 0x8f, 0x55, 0x9b, 0xc7, 0x62, 0xa1, 0x27, 0x68, 0x97, 0x64, 0x08, 0x3f, - 0xba, 0x8e, 0xc0, 0xed, 0x0e, 0xe9, 0xe2, 0x0c, 0xef, 0xc1, 0x3c, 0x5e, 0x28, 0xa8, 0x5b, 0xa3, - 0x9e, 0xe0, 0x82, 0xa5, 0x49, 0xd5, 0x8f, 0xc1, 0xf6, 0x91, 0xeb, 0xfa, 0x5f, 0x10, 0xe7, 0xc4, - 0x25, 0xfd, 0xcf, 0x7c, 0x37, 0xec, 0x12, 0xf8, 0x03, 0xb0, 0xea, 0x30, 0xda, 0x23, 0x4c, 0xd7, - 0xee, 0x6a, 0xf7, 0xd6, 0xac, 0xf2, 0xd5, 0xd0, 0x58, 0x1a, 0x0d, 0x8d, 0xd5, 0xba, 0x1c, 0x45, - 0xca, 0x5a, 0xe5, 0xe0, 0xb6, 0x22, 0x3f, 0xf1, 0xb9, 0x68, 0x60, 0xd1, 0x81, 0x87, 0x00, 0x04, - 0x58, 0x74, 0x1a, 0x8c, 0x5c, 0xd0, 0xbe, 0xa2, 0x43, 0x45, 0x07, 0x8d, 0x89, 0x05, 0xcd, 0xa0, - 0xe0, 0x7b, 0xa0, 0xc4, 0x08, 0x76, 0xce, 0x3d, 0x77, 0xa0, 0xdf, 0xba, 0xab, 0xdd, 0x2b, 0x59, - 0x5b, 0x8a, 0x51, 0x42, 0x6a, 0x1c, 0x4d, 0x10, 0xd5, 0x7f, 0x6b, 0xa0, 0x74, 0xdc, 0xa3, 0xb6, - 0xa0, 0xbe, 0x07, 0x7f, 0x0f, 0x4a, 0x51, 0xde, 0x1d, 0x2c, 0xb0, 0x74, 0xb6, 0x7e, 0xf8, 0x81, - 0x39, 0xad, 0x89, 0x49, 0x1a, 0xcc, 0xe0, 0xb2, 0x1d, 0x0d, 0x70, 0x33, 0x42, 0x9b, 0xbd, 0xfb, - 0xe6, 0x79, 0xeb, 0x73, 0x62, 0x8b, 0x33, 0x22, 0xf0, 0x34, 0xbc, 0xe9, 0x18, 0x9a, 0xa8, 0x42, - 0x17, 0x6c, 0x3a, 0xc4, 0x25, 0x82, 0x9c, 0x07, 0x91, 0x47, 0x2e, 0x23, 0x5c, 0x3f, 0x7c, 0xf0, - 0x7a, 0x6e, 0xea, 0xb3, 0x54, 0x6b, 0x7b, 0x34, 0x34, 0x36, 0x13, 0x43, 0x28, 0x29, 0x5e, 0xfd, - 0x52, 0x03, 0xfb, 0x27, 0xcd, 0xc7, 0xcc, 0x0f, 0x83, 0xa6, 0x88, 0xd6, 0xa9, 0x3d, 0x50, 0x26, - 0xf8, 0x63, 0xb0, 0xcc, 0x42, 0x97, 0xa8, 0x9c, 0xbe, 0xa3, 0x82, 0x5e, 0x46, 0xa1, 0x4b, 0x5e, - 0x0d, 0x8d, 0x9d, 0x14, 0xeb, 0xd9, 0x20, 0x20, 0x48, 0x12, 0xe0, 0xa7, 0x60, 0x95, 0x61, 0xaf, - 0x4d, 0xa2, 0xd0, 0x0b, 0xf7, 0xd6, 0x0f, 0xab, 0xe6, 0xdc, 0x5d, 0x63, 0x9e, 0xd6, 0x51, 0x04, - 0x9d, 0xae, 0xb8, 0x7c, 0xe5, 0x48, 0x29, 0x54, 0xcf, 0xc0, 0xa6, 0x5c, 0x6a, 0x9f, 0x09, 0x69, - 0x81, 0x77, 0x40, 0xa1, 0x4b, 0x3d, 0x19, 0xd4, 0x8a, 0xb5, 0xae, 0x58, 0x85, 0x33, 0xea, 0xa1, - 0x68, 0x5c, 0x9a, 0x71, 0x5f, 0xe6, 0x6c, 0xd6, 0x8c, 0xfb, 0x28, 0x1a, 0xaf, 0x3e, 0x06, 0x45, - 0xe5, 0x71, 0x56, 0xa8, 0xb0, 0x58, 0xa8, 0x90, 0x23, 0xf4, 0xd7, 0x5b, 0x60, 0xa7, 0xe1, 0x3b, - 0x75, 0xca, 0x59, 0x28, 0xf3, 0x65, 0x85, 0x4e, 0x9b, 0x88, 0x37, 0x50, 0x1f, 0xcf, 0xc0, 0x32, - 0x0f, 0x88, 0xad, 0xca, 0xe2, 0x70, 0x41, 0x6e, 0x73, 0xe2, 0x6b, 0x06, 0xc4, 0xb6, 0x36, 0xe2, - 0xa5, 0x8c, 0xde, 0x90, 0x54, 0x83, 0x2f, 0xc0, 0x2a, 0x17, 0x58, 0x84, 0x5c, 0x2f, 0x48, 0xdd, - 0x0f, 0x6f, 0xa8, 0x2b, 0xb9, 0xd3, 0x55, 0x1c, 0xbf, 0x23, 0xa5, 0x59, 0xfd, 0xa7, 0x06, 0xbe, - 0x97, 0xc3, 0x7a, 0x4a, 0xb9, 0x80, 0x2f, 0x32, 0x19, 0x33, 0x5f, 0x2f, 0x63, 0x11, 0x5b, 0xe6, - 0x6b, 0xb2, 0x79, 0xe3, 0x91, 0x99, 0x6c, 0x35, 0xc1, 0x0a, 0x15, 0xa4, 0x1b, 0x97, 0xa2, 0x79, - 0xb3, 0x69, 0x59, 0x9b, 0x4a, 0x7a, 0xe5, 0x34, 0x12, 0x41, 0x63, 0xad, 0xea, 0xbf, 0x6e, 0xe5, - 0x4e, 0x27, 0x4a, 0x27, 0xbc, 0x00, 0x1b, 0x5d, 0xea, 0x1d, 0xf5, 0x30, 0x75, 0x71, 0x4b, 0xed, - 0x9e, 0x45, 0x45, 0x10, 0xf5, 0x4a, 0x73, 0xdc, 0x2b, 0xcd, 0x53, 0x4f, 0x9c, 0xb3, 0xa6, 0x60, - 0xd4, 0x6b, 0x5b, 0x5b, 0xa3, 0xa1, 0xb1, 0x71, 0x36, 0xa3, 0x84, 0x12, 0xba, 0xf0, 0xb7, 0xa0, - 0xc4, 0x89, 0x4b, 0x6c, 0xe1, 0xb3, 0x9b, 0x75, 0x88, 0xa7, 0xb8, 0x45, 0xdc, 0xa6, 0xa2, 0x5a, - 0x1b, 0x51, 0xde, 0xe2, 0x37, 0x34, 0x91, 0x84, 0x2e, 0x28, 0x77, 0x71, 0xff, 0xb9, 0x87, 0x27, - 0x13, 0x29, 0x7c, 0xcb, 0x89, 0xc0, 0xd1, 0xd0, 0x28, 0x9f, 0x25, 0xb4, 0x50, 0x4a, 0xbb, 0xfa, - 0xbf, 0x65, 0xf0, 0xd6, 0xdc, 0xaa, 0x82, 0x9f, 0x02, 0xe8, 0xb7, 0x38, 0x61, 0x3d, 0xe2, 0x3c, - 0x1e, 0x7f, 0x4d, 0xa8, 0x1f, 0x6f, 0xdc, 0x03, 0xb5, 0x40, 0xf0, 0x3c, 0x83, 0x40, 0x39, 0x2c, - 0xf8, 0x27, 0x0d, 0x6c, 0x3a, 0x63, 0x37, 0xc4, 0x69, 0xf8, 0x4e, 0x5c, 0x18, 0x8f, 0xbf, 0x4d, - 0xbd, 0x9b, 0xf5, 0x59, 0xa5, 0x63, 0x4f, 0xb0, 0x81, 0xb5, 0xa7, 0x02, 0xda, 0x4c, 0xd8, 0x50, - 0xd2, 0x29, 0x3c, 0x03, 0xd0, 0x99, 0x48, 0x72, 0xf5, 0x4d, 0x93, 0x29, 0x5e, 0xb1, 0xee, 0x28, - 0x85, 0xbd, 0x84, 0xdf, 0x18, 0x84, 0x72, 0x88, 0xf0, 0xe7, 0xa0, 0x6c, 0x87, 0x8c, 0x11, 0x4f, - 0x3c, 0x21, 0xd8, 0x15, 0x9d, 0x81, 0xbe, 0x2c, 0xa5, 0xf6, 0x95, 0x54, 0xf9, 0x51, 0xc2, 0x8a, - 0x52, 0xe8, 0x88, 0xef, 0x10, 0x4e, 0x19, 0x71, 0x62, 0xfe, 0x4a, 0x92, 0x5f, 0x4f, 0x58, 0x51, - 0x0a, 0x0d, 0x1f, 0x82, 0x0d, 0xd2, 0x0f, 0x88, 0x1d, 0xe7, 0x74, 0x55, 0xb2, 0x77, 0x15, 0x7b, - 0xe3, 0x78, 0xc6, 0x86, 0x12, 0xc8, 0x03, 0x17, 0xc0, 0x6c, 0x12, 0xe1, 0x16, 0x28, 0x5c, 0x92, - 0xc1, 0xf8, 0xcb, 0x83, 0xa2, 0x47, 0xf8, 0x09, 0x58, 0xe9, 0x61, 0x37, 0x24, 0xaa, 0xd6, 0xdf, - 0x7d, 0xbd, 0x5a, 0x7f, 0x46, 0xbb, 0x04, 0x8d, 0x89, 0x3f, 0xb9, 0xf5, 0x50, 0xab, 0xfe, 0x43, - 0x03, 0xdb, 0x0d, 0xdf, 0x69, 0x12, 0x3b, 0x64, 0x54, 0x0c, 0x1a, 0x72, 0x9d, 0xdf, 0x40, 0xcf, - 0x46, 0x89, 0x9e, 0xfd, 0xc1, 0xe2, 0x5a, 0x4b, 0x46, 0x37, 0xaf, 0x63, 0x57, 0xaf, 0x34, 0xb0, - 0x97, 0x41, 0xbf, 0x81, 0x8e, 0xfa, 0xcb, 0x64, 0x47, 0x7d, 0xef, 0x26, 0x93, 0x99, 0xd3, 0x4f, - 0xff, 0x5f, 0xce, 0x99, 0x8a, 0xec, 0xa6, 0xd1, 0xe9, 0x8e, 0xd1, 0x1e, 0x75, 0x49, 0x9b, 0x38, - 0x72, 0x32, 0xa5, 0x99, 0xd3, 0xdd, 0xc4, 0x82, 0x66, 0x50, 0x90, 0x83, 0x7d, 0x87, 0x5c, 0xe0, - 0xd0, 0x15, 0x47, 0x8e, 0xf3, 0x08, 0x07, 0xb8, 0x45, 0x5d, 0x2a, 0xa8, 0x3a, 0x8e, 0xac, 0x59, - 0x1f, 0x8f, 0x86, 0xc6, 0x7e, 0x3d, 0x17, 0xf1, 0x6a, 0x68, 0xdc, 0xc9, 0x9e, 0xcb, 0xcd, 0x09, - 0x64, 0x80, 0xe6, 0x48, 0xc3, 0x01, 0xd0, 0x19, 0xf9, 0x43, 0x18, 0x6d, 0x8a, 0x3a, 0xf3, 0x83, - 0x84, 0xdb, 0x82, 0x74, 0xfb, 0xb3, 0xd1, 0xd0, 0xd0, 0xd1, 0x1c, 0xcc, 0xf5, 0x8e, 0xe7, 0xca, - 0xc3, 0xcf, 0xc1, 0x0e, 0x1e, 0xf7, 0x81, 0x84, 0xd7, 0x65, 0xe9, 0xf5, 0xe1, 0x68, 0x68, 0xec, - 0x1c, 0x65, 0xcd, 0xd7, 0x3b, 0xcc, 0x13, 0x85, 0x35, 0x50, 0xec, 0xc9, 0x23, 0x3b, 0xd7, 0x57, - 0xa4, 0xfe, 0xde, 0x68, 0x68, 0x14, 0xc7, 0xa7, 0xf8, 0x48, 0x73, 0xf5, 0xa4, 0x29, 0x0f, 0x82, - 0x31, 0x0a, 0x7e, 0x04, 0xd6, 0x3b, 0x3e, 0x17, 0xbf, 0x20, 0xe2, 0x0b, 0x9f, 0x5d, 0xca, 0xc6, - 0x50, 0xb2, 0x76, 0xd4, 0x0a, 0xae, 0x3f, 0x99, 0x9a, 0xd0, 0x2c, 0x0e, 0xfe, 0x1a, 0xac, 0x75, - 0xd4, 0xb1, 0x8f, 0xeb, 0x45, 0x59, 0x68, 0xf7, 0x16, 0x14, 0x5a, 0xe2, 0x88, 0x68, 0x6d, 0x2b, - 0xf9, 0xb5, 0x78, 0x98, 0xa3, 0xa9, 0x1a, 0xfc, 0x21, 0x28, 0xca, 0x97, 0xd3, 0xba, 0x5e, 0x92, - 0xd1, 0xdc, 0x56, 0xf0, 0xe2, 0x93, 0xf1, 0x30, 0x8a, 0xed, 0x31, 0xf4, 0xb4, 0xf1, 0x48, 0x5f, - 0xcb, 0x42, 0x4f, 0x1b, 0x8f, 0x50, 0x6c, 0x87, 0x2f, 0x40, 0x91, 0x93, 0xa7, 0xd4, 0x0b, 0xfb, - 0x3a, 0x90, 0x5b, 0xee, 0xfe, 0x82, 0x70, 0x9b, 0xc7, 0x12, 0x99, 0x3a, 0x70, 0x4f, 0xd5, 0x95, - 0x1d, 0xc5, 0x92, 0xd0, 0x01, 0x6b, 0x2c, 0xf4, 0x8e, 0xf8, 0x73, 0x4e, 0x98, 0xbe, 0x9e, 0xf9, - 0xda, 0xa7, 0xf5, 0x51, 0x8c, 0x4d, 0x7b, 0x98, 0x64, 0x66, 0x82, 0x40, 0x53, 0x61, 0xf8, 0x67, - 0x0d, 0x40, 0x1e, 0x06, 0x81, 0x4b, 0xba, 0xc4, 0x13, 0xd8, 0x95, 0xe7, 0x7b, 0xae, 0x6f, 0x48, - 0x7f, 0x3f, 0x5d, 0x34, 0x9f, 0x0c, 0x29, 0xed, 0x78, 0xf2, 0x99, 0xce, 0x42, 0x51, 0x8e, 0xcf, - 0x28, 0x9d, 0x17, 0x5c, 0x3e, 0xeb, 0x9b, 0xd7, 0xa6, 0x33, 0xff, 0xff, 0x65, 0x9a, 0x4e, 0x65, - 0x47, 0xb1, 0x24, 0xfc, 0x0c, 0xec, 0xc7, 0x7f, 0x77, 0xc8, 0xf7, 0xc5, 0x09, 0x75, 0x09, 0x1f, - 0x70, 0x41, 0xba, 0x7a, 0x59, 0x2e, 0x73, 0x45, 0x31, 0xf7, 0x51, 0x2e, 0x0a, 0xcd, 0x61, 0xc3, - 0x2e, 0x30, 0xe2, 0xf6, 0x10, 0xed, 0x9d, 0x49, 0x7f, 0x3a, 0xe6, 0x36, 0x76, 0xc7, 0xa7, 0x96, - 0xdb, 0xd2, 0xc1, 0x3b, 0xa3, 0xa1, 0x61, 0xd4, 0x17, 0x43, 0xd1, 0x75, 0x5a, 0xf0, 0x57, 0x40, - 0xc7, 0xf3, 0xfc, 0x6c, 0x49, 0x3f, 0xdf, 0x8f, 0x7a, 0xce, 0x5c, 0x07, 0x73, 0xd9, 0x30, 0x00, - 0x5b, 0x38, 0xf9, 0x9f, 0xcd, 0xf5, 0x6d, 0xb9, 0x0b, 0xdf, 0x5d, 0xb0, 0x0e, 0xa9, 0x5f, 0x73, - 0x4b, 0x57, 0x69, 0xdc, 0x4a, 0x19, 0x38, 0xca, 0xa8, 0xc3, 0x3e, 0x80, 0x38, 0x7d, 0x2d, 0xc0, - 0x75, 0x78, 0xed, 0x27, 0x26, 0x73, 0x97, 0x30, 0x2d, 0xb5, 0x8c, 0x89, 0xa3, 0x1c, 0x1f, 0xf0, - 0x29, 0xd8, 0x55, 0xa3, 0xcf, 0x3d, 0x8e, 0x2f, 0x48, 0x73, 0xc0, 0x6d, 0xe1, 0x72, 0x7d, 0x47, - 0xf6, 0x37, 0x7d, 0x34, 0x34, 0x76, 0x8f, 0x72, 0xec, 0x28, 0x97, 0x05, 0x3f, 0x01, 0x5b, 0x17, - 0x3e, 0x6b, 0x51, 0xc7, 0x21, 0x5e, 0xac, 0xb4, 0x2b, 0x95, 0x76, 0xa3, 0x4c, 0x9c, 0xa4, 0x6c, - 0x28, 0x83, 0x86, 0x1c, 0xec, 0x29, 0xe5, 0x06, 0xf3, 0xed, 0x33, 0x3f, 0xf4, 0x44, 0xd4, 0x52, - 0xb9, 0xbe, 0x37, 0xf9, 0x8c, 0xec, 0x1d, 0xe5, 0x01, 0x5e, 0x0d, 0x8d, 0xbb, 0x39, 0x2d, 0x3d, - 0x01, 0x42, 0xf9, 0xda, 0xd5, 0x2f, 0x35, 0xa0, 0xcf, 0xeb, 0x1a, 0xf0, 0xa3, 0xc4, 0x45, 0xc0, - 0xdb, 0xa9, 0x8b, 0x80, 0xed, 0x0c, 0xef, 0x3b, 0xb8, 0x06, 0xf8, 0x9b, 0x06, 0xf6, 0xf3, 0xbb, - 0x26, 0x7c, 0x90, 0x88, 0xce, 0x48, 0x45, 0x77, 0x3b, 0xc5, 0x52, 0xb1, 0xfd, 0x0e, 0x94, 0x55, - 0x6f, 0x4d, 0xde, 0xb2, 0x24, 0x62, 0x8c, 0x32, 0x18, 0x1d, 0x8b, 0x94, 0x44, 0xdc, 0x57, 0xe4, - 0x0f, 0x4d, 0x72, 0x0c, 0xa5, 0xd4, 0xaa, 0x7f, 0xd7, 0xc0, 0xdb, 0xd7, 0x76, 0x45, 0x68, 0x25, - 0x42, 0x37, 0x53, 0xa1, 0x57, 0xe6, 0x0b, 0x7c, 0x37, 0x97, 0x2d, 0xd6, 0xfb, 0x57, 0x2f, 0x2b, - 0x4b, 0x5f, 0xbd, 0xac, 0x2c, 0x7d, 0xfd, 0xb2, 0xb2, 0xf4, 0xc7, 0x51, 0x45, 0xbb, 0x1a, 0x55, - 0xb4, 0xaf, 0x46, 0x15, 0xed, 0xeb, 0x51, 0x45, 0xfb, 0xcf, 0xa8, 0xa2, 0xfd, 0xe5, 0xbf, 0x95, - 0xa5, 0xdf, 0x14, 0x95, 0xdc, 0x37, 0x01, 0x00, 0x00, 0xff, 0xff, 0xa8, 0xba, 0x23, 0xa4, 0x51, - 0x15, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/policy/v1beta1/generated.proto b/vendor/k8s.io/api/policy/v1beta1/generated.proto index aa37d948..9679dafc 100644 --- a/vendor/k8s.io/api/policy/v1beta1/generated.proto +++ b/vendor/k8s.io/api/policy/v1beta1/generated.proto @@ -30,6 +30,12 @@ import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; // Package-wide variables from generator "generated". option go_package = "v1beta1"; +// AllowedCSIDriver represents a single inline CSI Driver that is allowed to be used. +message AllowedCSIDriver { + // Name is the registered name of the CSI driver + optional string name = 1; +} + // AllowedFlexVolume represents a single Flexvolume that is allowed to be used. message AllowedFlexVolume { // driver is the name of the Flexvolume driver. @@ -42,7 +48,7 @@ message AllowedHostPath { // pathPrefix is the path prefix that the host volume must match. // It does not support `*`. // Trailing slashes are trimmed when validating the path prefix with a host path. - // + // // Examples: // `/foo` would allow `/foo`, `/foo/` and `/foo/bar` // `/foo` would not allow `/food` or `/etc/foo` @@ -58,9 +64,11 @@ message AllowedHostPath { // created by POSTing to .../pods//evictions. message Eviction { // ObjectMeta describes the pod that is being evicted. + // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // DeleteOptions may be provided + // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.DeleteOptions deleteOptions = 2; } @@ -97,17 +105,21 @@ message IDRange { // PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods message PodDisruptionBudget { + // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of the PodDisruptionBudget. + // +optional optional PodDisruptionBudgetSpec spec = 2; // Most recently observed status of the PodDisruptionBudget. + // +optional optional PodDisruptionBudgetStatus status = 3; } // PodDisruptionBudgetList is a collection of PodDisruptionBudgets. message PodDisruptionBudgetList { + // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; repeated PodDisruptionBudget items = 2; @@ -119,16 +131,19 @@ message PodDisruptionBudgetSpec { // "selector" will still be available after the eviction, i.e. even in the // absence of the evicted pod. So for example you can prevent all voluntary // evictions by specifying "100%". + // +optional optional k8s.io.apimachinery.pkg.util.intstr.IntOrString minAvailable = 1; // Label query over pods whose evictions are managed by the disruption // budget. + // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; // An eviction is allowed if at most "maxUnavailable" pods selected by // "selector" are unavailable after the eviction, i.e. even in absence of // the evicted pod. For example, one can prevent all voluntary evictions // by specifying 0. This is a mutually exclusive setting with "minAvailable". + // +optional optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 3; } @@ -136,7 +151,7 @@ message PodDisruptionBudgetSpec { // PodDisruptionBudget. Status may trail the actual state of a system. message PodDisruptionBudgetStatus { // Most recent generation observed when updating this PDB status. PodDisruptionsAllowed and other - // status informatio is valid only if observedGeneration equals to PDB's object generation. + // status information is valid only if observedGeneration equals to PDB's object generation. // +optional optional int64 observedGeneration = 1; @@ -171,7 +186,7 @@ message PodDisruptionBudgetStatus { // that will be applied to a pod and container. message PodSecurityPolicy { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -183,7 +198,7 @@ message PodSecurityPolicy { // PodSecurityPolicyList is a list of PodSecurityPolicy objects. message PodSecurityPolicyList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -242,6 +257,12 @@ message PodSecurityPolicySpec { // runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set. optional RunAsUserStrategyOptions runAsUser = 11; + // RunAsGroup is the strategy that will dictate the allowable RunAsGroup values that may be set. + // If this field is omitted, the pod's RunAsGroup can take any value. This field requires the + // RunAsGroup feature gate to be enabled. + // +optional + optional RunAsGroupStrategyOptions runAsGroup = 22; + // supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext. optional SupplementalGroupsStrategyOptions supplementalGroups = 12; @@ -277,11 +298,17 @@ message PodSecurityPolicySpec { // +optional repeated AllowedFlexVolume allowedFlexVolumes = 18; + // AllowedCSIDrivers is a whitelist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. + // An empty value indicates that any CSI driver can be used for inline ephemeral volumes. + // This is an alpha field, and is only honored if the API server enables the CSIInlineVolume feature gate. + // +optional + repeated AllowedCSIDriver allowedCSIDrivers = 23; + // allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. // Each entry is either a plain sysctl name or ends in "*" in which case it is considered // as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. // Kubelet has to whitelist all allowed unsafe sysctls explicitly to avoid rejection. - // + // // Examples: // e.g. "foo/*" allows "foo/bar", "foo/baz", etc. // e.g. "foo.*" allows "foo.bar", "foo.baz", etc. @@ -291,7 +318,7 @@ message PodSecurityPolicySpec { // forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. // Each entry is either a plain sysctl name or ends in "*" in which case it is considered // as a prefix of forbidden sysctls. Single * means all sysctls are forbidden. - // + // // Examples: // e.g. "foo/*" forbids "foo/bar", "foo/baz", etc. // e.g. "foo.*" forbids "foo.bar", "foo.baz", etc. @@ -303,6 +330,23 @@ message PodSecurityPolicySpec { // This requires the ProcMountType feature flag to be enabled. // +optional repeated string allowedProcMountTypes = 21; + + // runtimeClass is the strategy that will dictate the allowable RuntimeClasses for a pod. + // If this field is omitted, the pod's runtimeClassName field is unrestricted. + // Enforcement of this field depends on the RuntimeClass feature gate being enabled. + // +optional + optional RuntimeClassStrategyOptions runtimeClass = 24; +} + +// RunAsGroupStrategyOptions defines the strategy type and any options used to create the strategy. +message RunAsGroupStrategyOptions { + // rule is the strategy that will dictate the allowable RunAsGroup values that may be set. + optional string rule = 1; + + // ranges are the allowed ranges of gids that may be used. If you would like to force a single gid + // then supply a single range with the same start and end. Required for MustRunAs. + // +optional + repeated IDRange ranges = 2; } // RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy. @@ -316,6 +360,21 @@ message RunAsUserStrategyOptions { repeated IDRange ranges = 2; } +// RuntimeClassStrategyOptions define the strategy that will dictate the allowable RuntimeClasses +// for a pod. +message RuntimeClassStrategyOptions { + // allowedRuntimeClassNames is a whitelist of RuntimeClass names that may be specified on a pod. + // A value of "*" means that any RuntimeClass name is allowed, and must be the only item in the + // list. An empty list requires the RuntimeClassName field to be unset. + repeated string allowedRuntimeClassNames = 1; + + // defaultRuntimeClassName is the default RuntimeClassName to set on the pod. + // The default MUST be allowed by the allowedRuntimeClassNames list. + // A value of nil does not mutate the Pod. + // +optional + optional string defaultRuntimeClassName = 2; +} + // SELinuxStrategyOptions defines the strategy type and any options used to create the strategy. message SELinuxStrategyOptions { // rule is the strategy that will dictate the allowable labels that may be set. diff --git a/vendor/k8s.io/api/policy/v1beta1/types.go b/vendor/k8s.io/api/policy/v1beta1/types.go index c1a27275..d8e417ab 100644 --- a/vendor/k8s.io/api/policy/v1beta1/types.go +++ b/vendor/k8s.io/api/policy/v1beta1/types.go @@ -17,7 +17,7 @@ limitations under the License. package v1beta1 import ( - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" ) @@ -28,16 +28,19 @@ type PodDisruptionBudgetSpec struct { // "selector" will still be available after the eviction, i.e. even in the // absence of the evicted pod. So for example you can prevent all voluntary // evictions by specifying "100%". + // +optional MinAvailable *intstr.IntOrString `json:"minAvailable,omitempty" protobuf:"bytes,1,opt,name=minAvailable"` // Label query over pods whose evictions are managed by the disruption // budget. + // +optional Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"` // An eviction is allowed if at most "maxUnavailable" pods selected by // "selector" are unavailable after the eviction, i.e. even in absence of // the evicted pod. For example, one can prevent all voluntary evictions // by specifying 0. This is a mutually exclusive setting with "minAvailable". + // +optional MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"bytes,3,opt,name=maxUnavailable"` } @@ -45,7 +48,7 @@ type PodDisruptionBudgetSpec struct { // PodDisruptionBudget. Status may trail the actual state of a system. type PodDisruptionBudgetStatus struct { // Most recent generation observed when updating this PDB status. PodDisruptionsAllowed and other - // status informatio is valid only if observedGeneration equals to PDB's object generation. + // status information is valid only if observedGeneration equals to PDB's object generation. // +optional ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` @@ -81,12 +84,15 @@ type PodDisruptionBudgetStatus struct { // PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods type PodDisruptionBudget struct { - metav1.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` + // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Specification of the desired behavior of the PodDisruptionBudget. + // +optional Spec PodDisruptionBudgetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` // Most recently observed status of the PodDisruptionBudget. + // +optional Status PodDisruptionBudgetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -95,6 +101,7 @@ type PodDisruptionBudget struct { // PodDisruptionBudgetList is a collection of PodDisruptionBudgets. type PodDisruptionBudgetList struct { metav1.TypeMeta `json:",inline"` + // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` Items []PodDisruptionBudget `json:"items" protobuf:"bytes,2,rep,name=items"` } @@ -110,9 +117,11 @@ type Eviction struct { metav1.TypeMeta `json:",inline"` // ObjectMeta describes the pod that is being evicted. + // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // DeleteOptions may be provided + // +optional DeleteOptions *metav1.DeleteOptions `json:"deleteOptions,omitempty" protobuf:"bytes,2,opt,name=deleteOptions"` } @@ -125,7 +134,7 @@ type Eviction struct { type PodSecurityPolicy struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -174,6 +183,11 @@ type PodSecurityPolicySpec struct { SELinux SELinuxStrategyOptions `json:"seLinux" protobuf:"bytes,10,opt,name=seLinux"` // runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set. RunAsUser RunAsUserStrategyOptions `json:"runAsUser" protobuf:"bytes,11,opt,name=runAsUser"` + // RunAsGroup is the strategy that will dictate the allowable RunAsGroup values that may be set. + // If this field is omitted, the pod's RunAsGroup can take any value. This field requires the + // RunAsGroup feature gate to be enabled. + // +optional + RunAsGroup *RunAsGroupStrategyOptions `json:"runAsGroup,omitempty" protobuf:"bytes,22,opt,name=runAsGroup"` // supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext. SupplementalGroups SupplementalGroupsStrategyOptions `json:"supplementalGroups" protobuf:"bytes,12,opt,name=supplementalGroups"` // fsGroup is the strategy that will dictate what fs group is used by the SecurityContext. @@ -202,6 +216,11 @@ type PodSecurityPolicySpec struct { // is allowed in the "volumes" field. // +optional AllowedFlexVolumes []AllowedFlexVolume `json:"allowedFlexVolumes,omitempty" protobuf:"bytes,18,rep,name=allowedFlexVolumes"` + // AllowedCSIDrivers is a whitelist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. + // An empty value indicates that any CSI driver can be used for inline ephemeral volumes. + // This is an alpha field, and is only honored if the API server enables the CSIInlineVolume feature gate. + // +optional + AllowedCSIDrivers []AllowedCSIDriver `json:"allowedCSIDrivers,omitempty" protobuf:"bytes,23,rep,name=allowedCSIDrivers"` // allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. // Each entry is either a plain sysctl name or ends in "*" in which case it is considered // as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. @@ -226,6 +245,11 @@ type PodSecurityPolicySpec struct { // This requires the ProcMountType feature flag to be enabled. // +optional AllowedProcMountTypes []v1.ProcMountType `json:"allowedProcMountTypes,omitempty" protobuf:"bytes,21,opt,name=allowedProcMountTypes"` + // runtimeClass is the strategy that will dictate the allowable RuntimeClasses for a pod. + // If this field is omitted, the pod's runtimeClassName field is unrestricted. + // Enforcement of this field depends on the RuntimeClass feature gate being enabled. + // +optional + RuntimeClass *RuntimeClassStrategyOptions `json:"runtimeClass,omitempty" protobuf:"bytes,24,opt,name=runtimeClass"` } // AllowedHostPath defines the host volume conditions that will be enabled by a policy @@ -245,10 +269,14 @@ type AllowedHostPath struct { ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,2,opt,name=readOnly"` } +// AllowAllCapabilities can be used as a value for the PodSecurityPolicy.AllowAllCapabilities +// field and means that any capabilities are allowed to be requested. +var AllowAllCapabilities v1.Capability = "*" + // FSType gives strong typing to different file systems that are used by volumes. type FSType string -var ( +const ( AzureFile FSType = "azureFile" Flocker FSType = "flocker" FlexVolume FSType = "flexVolume" @@ -268,8 +296,15 @@ var ( DownwardAPI FSType = "downwardAPI" FC FSType = "fc" ConfigMap FSType = "configMap" + VsphereVolume FSType = "vsphereVolume" Quobyte FSType = "quobyte" AzureDisk FSType = "azureDisk" + PhotonPersistentDisk FSType = "photonPersistentDisk" + StorageOS FSType = "storageos" + Projected FSType = "projected" + PortworxVolume FSType = "portworxVolume" + ScaleIO FSType = "scaleIO" + CSI FSType = "csi" All FSType = "*" ) @@ -279,6 +314,12 @@ type AllowedFlexVolume struct { Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"` } +// AllowedCSIDriver represents a single inline CSI Driver that is allowed to be used. +type AllowedCSIDriver struct { + // Name is the registered name of the CSI driver + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` +} + // HostPortRange defines a range of host ports that will be enabled by a policy // for pods to use. It requires both the start and end to be defined. type HostPortRange struct { @@ -319,6 +360,16 @@ type RunAsUserStrategyOptions struct { Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` } +// RunAsGroupStrategyOptions defines the strategy type and any options used to create the strategy. +type RunAsGroupStrategyOptions struct { + // rule is the strategy that will dictate the allowable RunAsGroup values that may be set. + Rule RunAsGroupStrategy `json:"rule" protobuf:"bytes,1,opt,name=rule,casttype=RunAsGroupStrategy"` + // ranges are the allowed ranges of gids that may be used. If you would like to force a single gid + // then supply a single range with the same start and end. Required for MustRunAs. + // +optional + Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` +} + // IDRange provides a min/max of an allowed range of IDs. type IDRange struct { // min is the start of the range, inclusive. @@ -340,6 +391,20 @@ const ( RunAsUserStrategyRunAsAny RunAsUserStrategy = "RunAsAny" ) +// RunAsGroupStrategy denotes strategy types for generating RunAsGroup values for a +// Security Context. +type RunAsGroupStrategy string + +const ( + // RunAsGroupStrategyMayRunAs means that container does not need to run with a particular gid. + // However, when RunAsGroup are specified, they have to fall in the defined range. + RunAsGroupStrategyMayRunAs RunAsGroupStrategy = "MayRunAs" + // RunAsGroupStrategyMustRunAs means that container must run as a particular gid. + RunAsGroupStrategyMustRunAs RunAsGroupStrategy = "MustRunAs" + // RunAsUserStrategyRunAsAny means that container may make requests for any gid. + RunAsGroupStrategyRunAsAny RunAsGroupStrategy = "RunAsAny" +) + // FSGroupStrategyOptions defines the strategy type and options used to create the strategy. type FSGroupStrategyOptions struct { // rule is the strategy that will dictate what FSGroup is used in the SecurityContext. @@ -356,6 +421,9 @@ type FSGroupStrategyOptions struct { type FSGroupStrategyType string const ( + // FSGroupStrategyMayRunAs means that container does not need to have FSGroup of X applied. + // However, when FSGroups are specified, they have to fall in the defined range. + FSGroupStrategyMayRunAs FSGroupStrategyType = "MayRunAs" // FSGroupStrategyMustRunAs meant that container must have FSGroup of X applied. FSGroupStrategyMustRunAs FSGroupStrategyType = "MustRunAs" // FSGroupStrategyRunAsAny means that container may make requests for any FSGroup labels. @@ -378,19 +446,41 @@ type SupplementalGroupsStrategyOptions struct { type SupplementalGroupsStrategyType string const ( + // SupplementalGroupsStrategyMayRunAs means that container does not need to run with a particular gid. + // However, when gids are specified, they have to fall in the defined range. + SupplementalGroupsStrategyMayRunAs SupplementalGroupsStrategyType = "MayRunAs" // SupplementalGroupsStrategyMustRunAs means that container must run as a particular gid. SupplementalGroupsStrategyMustRunAs SupplementalGroupsStrategyType = "MustRunAs" // SupplementalGroupsStrategyRunAsAny means that container may make requests for any gid. SupplementalGroupsStrategyRunAsAny SupplementalGroupsStrategyType = "RunAsAny" ) +// RuntimeClassStrategyOptions define the strategy that will dictate the allowable RuntimeClasses +// for a pod. +type RuntimeClassStrategyOptions struct { + // allowedRuntimeClassNames is a whitelist of RuntimeClass names that may be specified on a pod. + // A value of "*" means that any RuntimeClass name is allowed, and must be the only item in the + // list. An empty list requires the RuntimeClassName field to be unset. + AllowedRuntimeClassNames []string `json:"allowedRuntimeClassNames" protobuf:"bytes,1,rep,name=allowedRuntimeClassNames"` + // defaultRuntimeClassName is the default RuntimeClassName to set on the pod. + // The default MUST be allowed by the allowedRuntimeClassNames list. + // A value of nil does not mutate the Pod. + // +optional + DefaultRuntimeClassName *string `json:"defaultRuntimeClassName,omitempty" protobuf:"bytes,2,opt,name=defaultRuntimeClassName"` +} + +// AllowAllRuntimeClassNames can be used as a value for the +// RuntimeClassStrategyOptions.AllowedRuntimeClassNames field and means that any RuntimeClassName is +// allowed. +const AllowAllRuntimeClassNames = "*" + // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // PodSecurityPolicyList is a list of PodSecurityPolicy objects. type PodSecurityPolicyList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` diff --git a/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go index df10b2a2..40a951c4 100644 --- a/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go @@ -27,6 +27,15 @@ package v1beta1 // Those methods can be generated by using hack/update-generated-swagger-docs.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_AllowedCSIDriver = map[string]string{ + "": "AllowedCSIDriver represents a single inline CSI Driver that is allowed to be used.", + "name": "Name is the registered name of the CSI driver", +} + +func (AllowedCSIDriver) SwaggerDoc() map[string]string { + return map_AllowedCSIDriver +} + var map_AllowedFlexVolume = map[string]string{ "": "AllowedFlexVolume represents a single Flexvolume that is allowed to be used.", "driver": "driver is the name of the Flexvolume driver.", @@ -117,7 +126,7 @@ func (PodDisruptionBudgetSpec) SwaggerDoc() map[string]string { var map_PodDisruptionBudgetStatus = map[string]string{ "": "PodDisruptionBudgetStatus represents information about the status of a PodDisruptionBudget. Status may trail the actual state of a system.", - "observedGeneration": "Most recent generation observed when updating this PDB status. PodDisruptionsAllowed and other status informatio is valid only if observedGeneration equals to PDB's object generation.", + "observedGeneration": "Most recent generation observed when updating this PDB status. PodDisruptionsAllowed and other status information is valid only if observedGeneration equals to PDB's object generation.", "disruptedPods": "DisruptedPods contains information about pods whose eviction was processed by the API server eviction subresource handler but has not yet been observed by the PodDisruptionBudget controller. A pod will be in this map from the time when the API server processed the eviction request to the time when the pod is seen by PDB controller as having been marked for deletion (or after a timeout). The key in the map is the name of the pod and the value is the time when the API server processed the eviction request. If the deletion didn't occur and a pod is still there it will be removed from the list automatically by PodDisruptionBudget controller after some time. If everything goes smooth this map should be empty for the most of the time. Large number of entries in the map may indicate problems with pod deletions.", "disruptionsAllowed": "Number of pod disruptions that are currently allowed.", "currentHealthy": "current number of healthy pods", @@ -131,7 +140,7 @@ func (PodDisruptionBudgetStatus) SwaggerDoc() map[string]string { var map_PodSecurityPolicy = map[string]string{ "": "PodSecurityPolicy governs the ability to make requests that affect the Security Context that will be applied to a pod and container.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "spec": "spec defines the policy enforced.", } @@ -141,7 +150,7 @@ func (PodSecurityPolicy) SwaggerDoc() map[string]string { var map_PodSecurityPolicyList = map[string]string{ "": "PodSecurityPolicyList is a list of PodSecurityPolicy objects.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "items is a list of schema objects.", } @@ -162,6 +171,7 @@ var map_PodSecurityPolicySpec = map[string]string{ "hostIPC": "hostIPC determines if the policy allows the use of HostIPC in the pod spec.", "seLinux": "seLinux is the strategy that will dictate the allowable labels that may be set.", "runAsUser": "runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set.", + "runAsGroup": "RunAsGroup is the strategy that will dictate the allowable RunAsGroup values that may be set. If this field is omitted, the pod's RunAsGroup can take any value. This field requires the RunAsGroup feature gate to be enabled.", "supplementalGroups": "supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext.", "fsGroup": "fsGroup is the strategy that will dictate what fs group is used by the SecurityContext.", "readOnlyRootFilesystem": "readOnlyRootFilesystem when set to true will force containers to run with a read only root file system. If the container specifically requests to run with a non-read only root file system the PSP should deny the pod. If set to false the container may run with a read only root file system if it wishes but it will not be forced to.", @@ -169,15 +179,27 @@ var map_PodSecurityPolicySpec = map[string]string{ "allowPrivilegeEscalation": "allowPrivilegeEscalation determines if a pod can request to allow privilege escalation. If unspecified, defaults to true.", "allowedHostPaths": "allowedHostPaths is a white list of allowed host paths. Empty indicates that all host paths may be used.", "allowedFlexVolumes": "allowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes is allowed in the \"volumes\" field.", + "allowedCSIDrivers": "AllowedCSIDrivers is a whitelist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. An empty value indicates that any CSI driver can be used for inline ephemeral volumes. This is an alpha field, and is only honored if the API server enables the CSIInlineVolume feature gate.", "allowedUnsafeSysctls": "allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. Kubelet has to whitelist all allowed unsafe sysctls explicitly to avoid rejection.\n\nExamples: e.g. \"foo/*\" allows \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" allows \"foo.bar\", \"foo.baz\", etc.", "forbiddenSysctls": "forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of forbidden sysctls. Single * means all sysctls are forbidden.\n\nExamples: e.g. \"foo/*\" forbids \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" forbids \"foo.bar\", \"foo.baz\", etc.", "allowedProcMountTypes": "AllowedProcMountTypes is a whitelist of allowed ProcMountTypes. Empty or nil indicates that only the DefaultProcMountType may be used. This requires the ProcMountType feature flag to be enabled.", + "runtimeClass": "runtimeClass is the strategy that will dictate the allowable RuntimeClasses for a pod. If this field is omitted, the pod's runtimeClassName field is unrestricted. Enforcement of this field depends on the RuntimeClass feature gate being enabled.", } func (PodSecurityPolicySpec) SwaggerDoc() map[string]string { return map_PodSecurityPolicySpec } +var map_RunAsGroupStrategyOptions = map[string]string{ + "": "RunAsGroupStrategyOptions defines the strategy type and any options used to create the strategy.", + "rule": "rule is the strategy that will dictate the allowable RunAsGroup values that may be set.", + "ranges": "ranges are the allowed ranges of gids that may be used. If you would like to force a single gid then supply a single range with the same start and end. Required for MustRunAs.", +} + +func (RunAsGroupStrategyOptions) SwaggerDoc() map[string]string { + return map_RunAsGroupStrategyOptions +} + var map_RunAsUserStrategyOptions = map[string]string{ "": "RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy.", "rule": "rule is the strategy that will dictate the allowable RunAsUser values that may be set.", @@ -188,6 +210,16 @@ func (RunAsUserStrategyOptions) SwaggerDoc() map[string]string { return map_RunAsUserStrategyOptions } +var map_RuntimeClassStrategyOptions = map[string]string{ + "": "RuntimeClassStrategyOptions define the strategy that will dictate the allowable RuntimeClasses for a pod.", + "allowedRuntimeClassNames": "allowedRuntimeClassNames is a whitelist of RuntimeClass names that may be specified on a pod. A value of \"*\" means that any RuntimeClass name is allowed, and must be the only item in the list. An empty list requires the RuntimeClassName field to be unset.", + "defaultRuntimeClassName": "defaultRuntimeClassName is the default RuntimeClassName to set on the pod. The default MUST be allowed by the allowedRuntimeClassNames list. A value of nil does not mutate the Pod.", +} + +func (RuntimeClassStrategyOptions) SwaggerDoc() map[string]string { + return map_RuntimeClassStrategyOptions +} + var map_SELinuxStrategyOptions = map[string]string{ "": "SELinuxStrategyOptions defines the strategy type and any options used to create the strategy.", "rule": "rule is the strategy that will dictate the allowable labels that may be set.", diff --git a/vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go index 9af268a4..75851e12 100644 --- a/vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go @@ -27,6 +27,22 @@ import ( intstr "k8s.io/apimachinery/pkg/util/intstr" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AllowedCSIDriver) DeepCopyInto(out *AllowedCSIDriver) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedCSIDriver. +func (in *AllowedCSIDriver) DeepCopy() *AllowedCSIDriver { + if in == nil { + return nil + } + out := new(AllowedCSIDriver) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AllowedFlexVolume) DeepCopyInto(out *AllowedFlexVolume) { *out = *in @@ -175,7 +191,7 @@ func (in *PodDisruptionBudget) DeepCopyObject() runtime.Object { func (in *PodDisruptionBudgetList) DeepCopyInto(out *PodDisruptionBudgetList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]PodDisruptionBudget, len(*in)) @@ -289,7 +305,7 @@ func (in *PodSecurityPolicy) DeepCopyObject() runtime.Object { func (in *PodSecurityPolicyList) DeepCopyInto(out *PodSecurityPolicyList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]PodSecurityPolicy, len(*in)) @@ -348,6 +364,11 @@ func (in *PodSecurityPolicySpec) DeepCopyInto(out *PodSecurityPolicySpec) { } in.SELinux.DeepCopyInto(&out.SELinux) in.RunAsUser.DeepCopyInto(&out.RunAsUser) + if in.RunAsGroup != nil { + in, out := &in.RunAsGroup, &out.RunAsGroup + *out = new(RunAsGroupStrategyOptions) + (*in).DeepCopyInto(*out) + } in.SupplementalGroups.DeepCopyInto(&out.SupplementalGroups) in.FSGroup.DeepCopyInto(&out.FSGroup) if in.DefaultAllowPrivilegeEscalation != nil { @@ -370,6 +391,11 @@ func (in *PodSecurityPolicySpec) DeepCopyInto(out *PodSecurityPolicySpec) { *out = make([]AllowedFlexVolume, len(*in)) copy(*out, *in) } + if in.AllowedCSIDrivers != nil { + in, out := &in.AllowedCSIDrivers, &out.AllowedCSIDrivers + *out = make([]AllowedCSIDriver, len(*in)) + copy(*out, *in) + } if in.AllowedUnsafeSysctls != nil { in, out := &in.AllowedUnsafeSysctls, &out.AllowedUnsafeSysctls *out = make([]string, len(*in)) @@ -385,6 +411,11 @@ func (in *PodSecurityPolicySpec) DeepCopyInto(out *PodSecurityPolicySpec) { *out = make([]corev1.ProcMountType, len(*in)) copy(*out, *in) } + if in.RuntimeClass != nil { + in, out := &in.RuntimeClass, &out.RuntimeClass + *out = new(RuntimeClassStrategyOptions) + (*in).DeepCopyInto(*out) + } return } @@ -398,6 +429,27 @@ func (in *PodSecurityPolicySpec) DeepCopy() *PodSecurityPolicySpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RunAsGroupStrategyOptions) DeepCopyInto(out *RunAsGroupStrategyOptions) { + *out = *in + if in.Ranges != nil { + in, out := &in.Ranges, &out.Ranges + *out = make([]IDRange, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunAsGroupStrategyOptions. +func (in *RunAsGroupStrategyOptions) DeepCopy() *RunAsGroupStrategyOptions { + if in == nil { + return nil + } + out := new(RunAsGroupStrategyOptions) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RunAsUserStrategyOptions) DeepCopyInto(out *RunAsUserStrategyOptions) { *out = *in @@ -419,6 +471,32 @@ func (in *RunAsUserStrategyOptions) DeepCopy() *RunAsUserStrategyOptions { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuntimeClassStrategyOptions) DeepCopyInto(out *RuntimeClassStrategyOptions) { + *out = *in + if in.AllowedRuntimeClassNames != nil { + in, out := &in.AllowedRuntimeClassNames, &out.AllowedRuntimeClassNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.DefaultRuntimeClassName != nil { + in, out := &in.DefaultRuntimeClassName, &out.DefaultRuntimeClassName + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuntimeClassStrategyOptions. +func (in *RuntimeClassStrategyOptions) DeepCopy() *RuntimeClassStrategyOptions { + if in == nil { + return nil + } + out := new(RuntimeClassStrategyOptions) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SELinuxStrategyOptions) DeepCopyInto(out *SELinuxStrategyOptions) { *out = *in diff --git a/vendor/k8s.io/api/rbac/v1/doc.go b/vendor/k8s.io/api/rbac/v1/doc.go index 28ceb269..80f43ce9 100644 --- a/vendor/k8s.io/api/rbac/v1/doc.go +++ b/vendor/k8s.io/api/rbac/v1/doc.go @@ -15,7 +15,9 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +k8s:openapi-gen=true // +groupName=rbac.authorization.k8s.io + package v1 // import "k8s.io/api/rbac/v1" diff --git a/vendor/k8s.io/api/rbac/v1/generated.pb.go b/vendor/k8s.io/api/rbac/v1/generated.pb.go index 21010fbe..9bb48fc3 100644 --- a/vendor/k8s.io/api/rbac/v1/generated.pb.go +++ b/vendor/k8s.io/api/rbac/v1/generated.pb.go @@ -14,42 +14,24 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1/generated.proto -// DO NOT EDIT! -/* - Package v1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1/generated.proto - - It has these top-level messages: - AggregationRule - ClusterRole - ClusterRoleBinding - ClusterRoleBindingList - ClusterRoleList - PolicyRule - Role - RoleBinding - RoleBindingList - RoleList - RoleRef - Subject -*/ package v1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + io "io" -import strings "strings" -import reflect "reflect" + proto "github.com/gogo/protobuf/proto" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" -import io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -62,53 +44,341 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *AggregationRule) Reset() { *m = AggregationRule{} } -func (*AggregationRule) ProtoMessage() {} -func (*AggregationRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *AggregationRule) Reset() { *m = AggregationRule{} } +func (*AggregationRule) ProtoMessage() {} +func (*AggregationRule) Descriptor() ([]byte, []int) { + return fileDescriptor_979ffd7b30c07419, []int{0} +} +func (m *AggregationRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AggregationRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AggregationRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_AggregationRule.Merge(m, src) +} +func (m *AggregationRule) XXX_Size() int { + return m.Size() +} +func (m *AggregationRule) XXX_DiscardUnknown() { + xxx_messageInfo_AggregationRule.DiscardUnknown(m) +} + +var xxx_messageInfo_AggregationRule proto.InternalMessageInfo + +func (m *ClusterRole) Reset() { *m = ClusterRole{} } +func (*ClusterRole) ProtoMessage() {} +func (*ClusterRole) Descriptor() ([]byte, []int) { + return fileDescriptor_979ffd7b30c07419, []int{1} +} +func (m *ClusterRole) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterRole) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterRole) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterRole.Merge(m, src) +} +func (m *ClusterRole) XXX_Size() int { + return m.Size() +} +func (m *ClusterRole) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterRole.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterRole proto.InternalMessageInfo + +func (m *ClusterRoleBinding) Reset() { *m = ClusterRoleBinding{} } +func (*ClusterRoleBinding) ProtoMessage() {} +func (*ClusterRoleBinding) Descriptor() ([]byte, []int) { + return fileDescriptor_979ffd7b30c07419, []int{2} +} +func (m *ClusterRoleBinding) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterRoleBinding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterRoleBinding) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterRoleBinding.Merge(m, src) +} +func (m *ClusterRoleBinding) XXX_Size() int { + return m.Size() +} +func (m *ClusterRoleBinding) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterRoleBinding.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterRoleBinding proto.InternalMessageInfo + +func (m *ClusterRoleBindingList) Reset() { *m = ClusterRoleBindingList{} } +func (*ClusterRoleBindingList) ProtoMessage() {} +func (*ClusterRoleBindingList) Descriptor() ([]byte, []int) { + return fileDescriptor_979ffd7b30c07419, []int{3} +} +func (m *ClusterRoleBindingList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterRoleBindingList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterRoleBindingList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterRoleBindingList.Merge(m, src) +} +func (m *ClusterRoleBindingList) XXX_Size() int { + return m.Size() +} +func (m *ClusterRoleBindingList) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterRoleBindingList.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterRoleBindingList proto.InternalMessageInfo + +func (m *ClusterRoleList) Reset() { *m = ClusterRoleList{} } +func (*ClusterRoleList) ProtoMessage() {} +func (*ClusterRoleList) Descriptor() ([]byte, []int) { + return fileDescriptor_979ffd7b30c07419, []int{4} +} +func (m *ClusterRoleList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterRoleList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterRoleList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterRoleList.Merge(m, src) +} +func (m *ClusterRoleList) XXX_Size() int { + return m.Size() +} +func (m *ClusterRoleList) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterRoleList.DiscardUnknown(m) +} -func (m *ClusterRole) Reset() { *m = ClusterRole{} } -func (*ClusterRole) ProtoMessage() {} -func (*ClusterRole) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +var xxx_messageInfo_ClusterRoleList proto.InternalMessageInfo -func (m *ClusterRoleBinding) Reset() { *m = ClusterRoleBinding{} } -func (*ClusterRoleBinding) ProtoMessage() {} -func (*ClusterRoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +func (m *PolicyRule) Reset() { *m = PolicyRule{} } +func (*PolicyRule) ProtoMessage() {} +func (*PolicyRule) Descriptor() ([]byte, []int) { + return fileDescriptor_979ffd7b30c07419, []int{5} +} +func (m *PolicyRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PolicyRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PolicyRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_PolicyRule.Merge(m, src) +} +func (m *PolicyRule) XXX_Size() int { + return m.Size() +} +func (m *PolicyRule) XXX_DiscardUnknown() { + xxx_messageInfo_PolicyRule.DiscardUnknown(m) +} + +var xxx_messageInfo_PolicyRule proto.InternalMessageInfo + +func (m *Role) Reset() { *m = Role{} } +func (*Role) ProtoMessage() {} +func (*Role) Descriptor() ([]byte, []int) { + return fileDescriptor_979ffd7b30c07419, []int{6} +} +func (m *Role) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Role) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Role) XXX_Merge(src proto.Message) { + xxx_messageInfo_Role.Merge(m, src) +} +func (m *Role) XXX_Size() int { + return m.Size() +} +func (m *Role) XXX_DiscardUnknown() { + xxx_messageInfo_Role.DiscardUnknown(m) +} + +var xxx_messageInfo_Role proto.InternalMessageInfo + +func (m *RoleBinding) Reset() { *m = RoleBinding{} } +func (*RoleBinding) ProtoMessage() {} +func (*RoleBinding) Descriptor() ([]byte, []int) { + return fileDescriptor_979ffd7b30c07419, []int{7} +} +func (m *RoleBinding) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RoleBinding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RoleBinding) XXX_Merge(src proto.Message) { + xxx_messageInfo_RoleBinding.Merge(m, src) +} +func (m *RoleBinding) XXX_Size() int { + return m.Size() +} +func (m *RoleBinding) XXX_DiscardUnknown() { + xxx_messageInfo_RoleBinding.DiscardUnknown(m) +} -func (m *ClusterRoleBindingList) Reset() { *m = ClusterRoleBindingList{} } -func (*ClusterRoleBindingList) ProtoMessage() {} -func (*ClusterRoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +var xxx_messageInfo_RoleBinding proto.InternalMessageInfo -func (m *ClusterRoleList) Reset() { *m = ClusterRoleList{} } -func (*ClusterRoleList) ProtoMessage() {} -func (*ClusterRoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +func (m *RoleBindingList) Reset() { *m = RoleBindingList{} } +func (*RoleBindingList) ProtoMessage() {} +func (*RoleBindingList) Descriptor() ([]byte, []int) { + return fileDescriptor_979ffd7b30c07419, []int{8} +} +func (m *RoleBindingList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RoleBindingList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RoleBindingList) XXX_Merge(src proto.Message) { + xxx_messageInfo_RoleBindingList.Merge(m, src) +} +func (m *RoleBindingList) XXX_Size() int { + return m.Size() +} +func (m *RoleBindingList) XXX_DiscardUnknown() { + xxx_messageInfo_RoleBindingList.DiscardUnknown(m) +} -func (m *PolicyRule) Reset() { *m = PolicyRule{} } -func (*PolicyRule) ProtoMessage() {} -func (*PolicyRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +var xxx_messageInfo_RoleBindingList proto.InternalMessageInfo -func (m *Role) Reset() { *m = Role{} } -func (*Role) ProtoMessage() {} -func (*Role) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +func (m *RoleList) Reset() { *m = RoleList{} } +func (*RoleList) ProtoMessage() {} +func (*RoleList) Descriptor() ([]byte, []int) { + return fileDescriptor_979ffd7b30c07419, []int{9} +} +func (m *RoleList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RoleList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RoleList) XXX_Merge(src proto.Message) { + xxx_messageInfo_RoleList.Merge(m, src) +} +func (m *RoleList) XXX_Size() int { + return m.Size() +} +func (m *RoleList) XXX_DiscardUnknown() { + xxx_messageInfo_RoleList.DiscardUnknown(m) +} -func (m *RoleBinding) Reset() { *m = RoleBinding{} } -func (*RoleBinding) ProtoMessage() {} -func (*RoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +var xxx_messageInfo_RoleList proto.InternalMessageInfo -func (m *RoleBindingList) Reset() { *m = RoleBindingList{} } -func (*RoleBindingList) ProtoMessage() {} -func (*RoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +func (m *RoleRef) Reset() { *m = RoleRef{} } +func (*RoleRef) ProtoMessage() {} +func (*RoleRef) Descriptor() ([]byte, []int) { + return fileDescriptor_979ffd7b30c07419, []int{10} +} +func (m *RoleRef) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RoleRef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RoleRef) XXX_Merge(src proto.Message) { + xxx_messageInfo_RoleRef.Merge(m, src) +} +func (m *RoleRef) XXX_Size() int { + return m.Size() +} +func (m *RoleRef) XXX_DiscardUnknown() { + xxx_messageInfo_RoleRef.DiscardUnknown(m) +} -func (m *RoleList) Reset() { *m = RoleList{} } -func (*RoleList) ProtoMessage() {} -func (*RoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +var xxx_messageInfo_RoleRef proto.InternalMessageInfo -func (m *RoleRef) Reset() { *m = RoleRef{} } -func (*RoleRef) ProtoMessage() {} -func (*RoleRef) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } +func (m *Subject) Reset() { *m = Subject{} } +func (*Subject) ProtoMessage() {} +func (*Subject) Descriptor() ([]byte, []int) { + return fileDescriptor_979ffd7b30c07419, []int{11} +} +func (m *Subject) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Subject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Subject) XXX_Merge(src proto.Message) { + xxx_messageInfo_Subject.Merge(m, src) +} +func (m *Subject) XXX_Size() int { + return m.Size() +} +func (m *Subject) XXX_DiscardUnknown() { + xxx_messageInfo_Subject.DiscardUnknown(m) +} -func (m *Subject) Reset() { *m = Subject{} } -func (*Subject) ProtoMessage() {} -func (*Subject) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } +var xxx_messageInfo_Subject proto.InternalMessageInfo func init() { proto.RegisterType((*AggregationRule)(nil), "k8s.io.api.rbac.v1.AggregationRule") @@ -124,10 +394,70 @@ func init() { proto.RegisterType((*RoleRef)(nil), "k8s.io.api.rbac.v1.RoleRef") proto.RegisterType((*Subject)(nil), "k8s.io.api.rbac.v1.Subject") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1/generated.proto", fileDescriptor_979ffd7b30c07419) +} + +var fileDescriptor_979ffd7b30c07419 = []byte{ + // 807 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x55, 0xcf, 0x6f, 0xe3, 0x44, + 0x14, 0xce, 0xa4, 0x89, 0x1a, 0x4f, 0x88, 0x42, 0x87, 0x0a, 0x59, 0x05, 0x39, 0x95, 0x91, 0x50, + 0x25, 0xc0, 0x26, 0x05, 0x01, 0x12, 0xea, 0xa1, 0x2e, 0x02, 0x55, 0x2d, 0xa5, 0x9a, 0x0a, 0x0e, + 0x88, 0x03, 0x63, 0x67, 0xea, 0x0e, 0xf1, 0x2f, 0xcd, 0xd8, 0x91, 0x2a, 0x2e, 0x08, 0x89, 0x03, + 0xb7, 0x3d, 0xee, 0xfe, 0x05, 0x7b, 0xd9, 0x3d, 0xee, 0x5f, 0xb0, 0x97, 0x1e, 0x7b, 0xec, 0x29, + 0xda, 0x7a, 0xff, 0x90, 0x5d, 0xf9, 0x57, 0x9c, 0x1f, 0xee, 0x36, 0xa7, 0x48, 0xab, 0x3d, 0xb5, + 0xf3, 0xde, 0xf7, 0xbe, 0xf7, 0xcd, 0xe7, 0x79, 0x2f, 0xf0, 0xfb, 0xe1, 0x77, 0x42, 0x63, 0xbe, + 0x3e, 0x8c, 0x4c, 0xca, 0x3d, 0x1a, 0x52, 0xa1, 0x8f, 0xa8, 0x37, 0xf0, 0xb9, 0x9e, 0x27, 0x48, + 0xc0, 0x74, 0x6e, 0x12, 0x4b, 0x1f, 0xf5, 0x75, 0x9b, 0x7a, 0x94, 0x93, 0x90, 0x0e, 0xb4, 0x80, + 0xfb, 0xa1, 0x8f, 0x50, 0x86, 0xd1, 0x48, 0xc0, 0xb4, 0x04, 0xa3, 0x8d, 0xfa, 0x5b, 0x5f, 0xd8, + 0x2c, 0xbc, 0x88, 0x4c, 0xcd, 0xf2, 0x5d, 0xdd, 0xf6, 0x6d, 0x5f, 0x4f, 0xa1, 0x66, 0x74, 0x9e, + 0x9e, 0xd2, 0x43, 0xfa, 0x5f, 0x46, 0xb1, 0xf5, 0x75, 0xd9, 0xc6, 0x25, 0xd6, 0x05, 0xf3, 0x28, + 0xbf, 0xd4, 0x83, 0xa1, 0x9d, 0x04, 0x84, 0xee, 0xd2, 0x90, 0x54, 0x34, 0xde, 0xd2, 0xef, 0xaa, + 0xe2, 0x91, 0x17, 0x32, 0x97, 0x2e, 0x14, 0x7c, 0x73, 0x5f, 0x81, 0xb0, 0x2e, 0xa8, 0x4b, 0xe6, + 0xeb, 0xd4, 0x47, 0x00, 0x76, 0xf7, 0x6d, 0x9b, 0x53, 0x9b, 0x84, 0xcc, 0xf7, 0x70, 0xe4, 0x50, + 0xf4, 0x1f, 0x80, 0x9b, 0x96, 0x13, 0x89, 0x90, 0x72, 0xec, 0x3b, 0xf4, 0x8c, 0x3a, 0xd4, 0x0a, + 0x7d, 0x2e, 0x64, 0xb0, 0xbd, 0xb6, 0xd3, 0xde, 0xfd, 0x4a, 0x2b, 0x5d, 0x99, 0xf4, 0xd2, 0x82, + 0xa1, 0x9d, 0x04, 0x84, 0x96, 0x5c, 0x49, 0x1b, 0xf5, 0xb5, 0x63, 0x62, 0x52, 0xa7, 0xa8, 0x35, + 0x3e, 0xbe, 0x1a, 0xf7, 0x6a, 0xf1, 0xb8, 0xb7, 0x79, 0x50, 0x41, 0x8c, 0x2b, 0xdb, 0xa9, 0x0f, + 0xeb, 0xb0, 0x3d, 0x05, 0x47, 0x7f, 0xc2, 0x56, 0x42, 0x3e, 0x20, 0x21, 0x91, 0xc1, 0x36, 0xd8, + 0x69, 0xef, 0x7e, 0xb9, 0x9c, 0x94, 0x5f, 0xcc, 0xbf, 0xa8, 0x15, 0xfe, 0x4c, 0x43, 0x62, 0xa0, + 0x5c, 0x07, 0x2c, 0x63, 0x78, 0xc2, 0x8a, 0x0e, 0x60, 0x93, 0x47, 0x0e, 0x15, 0x72, 0x3d, 0xbd, + 0xa9, 0xa2, 0x2d, 0x7e, 0x7f, 0xed, 0xd4, 0x77, 0x98, 0x75, 0x99, 0x18, 0x65, 0x74, 0x72, 0xb2, + 0x66, 0x72, 0x12, 0x38, 0xab, 0x45, 0x26, 0xec, 0x92, 0x59, 0x47, 0xe5, 0xb5, 0x54, 0xed, 0x27, + 0x55, 0x74, 0x73, 0xe6, 0x1b, 0x1f, 0xc4, 0xe3, 0xde, 0xfc, 0x17, 0xc1, 0xf3, 0x84, 0xea, 0xff, + 0x75, 0x88, 0xa6, 0xac, 0x31, 0x98, 0x37, 0x60, 0x9e, 0xbd, 0x02, 0x87, 0x0e, 0x61, 0x4b, 0x44, + 0x69, 0xa2, 0x30, 0xe9, 0xa3, 0xaa, 0x5b, 0x9d, 0x65, 0x18, 0xe3, 0xfd, 0x9c, 0xac, 0x95, 0x07, + 0x04, 0x9e, 0x94, 0xa3, 0x1f, 0xe1, 0x3a, 0xf7, 0x1d, 0x8a, 0xe9, 0x79, 0xee, 0x4f, 0x25, 0x13, + 0xce, 0x20, 0x46, 0x37, 0x67, 0x5a, 0xcf, 0x03, 0xb8, 0x28, 0x56, 0x9f, 0x03, 0xf8, 0xe1, 0xa2, + 0x17, 0xc7, 0x4c, 0x84, 0xe8, 0x8f, 0x05, 0x3f, 0xb4, 0x25, 0x1f, 0x2f, 0x13, 0x99, 0x1b, 0x93, + 0x0b, 0x14, 0x91, 0x29, 0x2f, 0x8e, 0x60, 0x93, 0x85, 0xd4, 0x2d, 0x8c, 0xf8, 0xb4, 0x4a, 0xfe, + 0xa2, 0xb0, 0xf2, 0xd5, 0x1c, 0x26, 0xc5, 0x38, 0xe3, 0x50, 0x9f, 0x01, 0xd8, 0x9d, 0x02, 0xaf, + 0x40, 0xfe, 0x0f, 0xb3, 0xf2, 0x7b, 0xf7, 0xc9, 0xaf, 0xd6, 0xfd, 0x0a, 0x40, 0x58, 0x8e, 0x04, + 0xea, 0xc1, 0xe6, 0x88, 0x72, 0x33, 0xdb, 0x15, 0x92, 0x21, 0x25, 0xf8, 0xdf, 0x92, 0x00, 0xce, + 0xe2, 0xe8, 0x33, 0x28, 0x91, 0x80, 0xfd, 0xc4, 0xfd, 0x28, 0xc8, 0x3a, 0x4b, 0x46, 0x27, 0x1e, + 0xf7, 0xa4, 0xfd, 0xd3, 0xc3, 0x2c, 0x88, 0xcb, 0x7c, 0x02, 0xe6, 0x54, 0xf8, 0x11, 0xb7, 0xa8, + 0x90, 0xd7, 0x4a, 0x30, 0x2e, 0x82, 0xb8, 0xcc, 0xa3, 0x6f, 0x61, 0xa7, 0x38, 0x9c, 0x10, 0x97, + 0x0a, 0xb9, 0x91, 0x16, 0x6c, 0xc4, 0xe3, 0x5e, 0x07, 0x4f, 0x27, 0xf0, 0x2c, 0x0e, 0xed, 0xc1, + 0xae, 0xe7, 0x7b, 0x05, 0xe4, 0x57, 0x7c, 0x2c, 0xe4, 0x66, 0x5a, 0x9a, 0xce, 0xe2, 0xc9, 0x6c, + 0x0a, 0xcf, 0x63, 0xd5, 0xa7, 0x00, 0x36, 0xde, 0xa2, 0xfd, 0xa4, 0xfe, 0x5b, 0x87, 0xed, 0x77, + 0x7e, 0x69, 0x24, 0xe3, 0xb6, 0xda, 0x6d, 0xb1, 0xcc, 0xb8, 0xdd, 0xbf, 0x26, 0x1e, 0x03, 0xd8, + 0x5a, 0xd1, 0x7e, 0xd8, 0x9b, 0x15, 0x2c, 0xdf, 0x29, 0xb8, 0x5a, 0xe9, 0xdf, 0xb0, 0x70, 0x1d, + 0x7d, 0x0e, 0x5b, 0xc5, 0x4c, 0xa7, 0x3a, 0xa5, 0xb2, 0x6f, 0x31, 0xf6, 0x78, 0x82, 0x40, 0xdb, + 0xb0, 0x31, 0x64, 0xde, 0x40, 0xae, 0xa7, 0xc8, 0xf7, 0x72, 0x64, 0xe3, 0x88, 0x79, 0x03, 0x9c, + 0x66, 0x12, 0x84, 0x47, 0xdc, 0xec, 0x67, 0x75, 0x0a, 0x91, 0x4c, 0x33, 0x4e, 0x33, 0xea, 0x13, + 0x00, 0xd7, 0xf3, 0xd7, 0x33, 0xe1, 0x03, 0x77, 0xf2, 0x4d, 0xeb, 0xab, 0x2f, 0xa3, 0xef, 0xcd, + 0xdd, 0x91, 0x0e, 0xa5, 0xe4, 0xaf, 0x08, 0x88, 0x45, 0xe5, 0x46, 0x0a, 0xdb, 0xc8, 0x61, 0xd2, + 0x49, 0x91, 0xc0, 0x25, 0xc6, 0xd8, 0xb9, 0xba, 0x55, 0x6a, 0xd7, 0xb7, 0x4a, 0xed, 0xe6, 0x56, + 0xa9, 0xfd, 0x13, 0x2b, 0xe0, 0x2a, 0x56, 0xc0, 0x75, 0xac, 0x80, 0x9b, 0x58, 0x01, 0x2f, 0x62, + 0x05, 0x3c, 0x78, 0xa9, 0xd4, 0x7e, 0xaf, 0x8f, 0xfa, 0xaf, 0x03, 0x00, 0x00, 0xff, 0xff, 0x24, + 0xa1, 0x47, 0x98, 0xcf, 0x0a, 0x00, 0x00, +} + func (m *AggregationRule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -135,29 +465,36 @@ func (m *AggregationRule) Marshal() (dAtA []byte, err error) { } func (m *AggregationRule) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AggregationRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if len(m.ClusterRoleSelectors) > 0 { - for _, msg := range m.ClusterRoleSelectors { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.ClusterRoleSelectors) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ClusterRoleSelectors[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } func (m *ClusterRole) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -165,47 +502,58 @@ func (m *ClusterRole) Marshal() (dAtA []byte, err error) { } func (m *ClusterRole) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterRole) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - if len(m.Rules) > 0 { - for _, msg := range m.Rules { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.AggregationRule != nil { + { + size, err := m.AggregationRule.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - } - if m.AggregationRule != nil { + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AggregationRule.Size())) - n2, err := m.AggregationRule.MarshalTo(dAtA[i:]) + } + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n2 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ClusterRoleBinding) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -213,45 +561,56 @@ func (m *ClusterRoleBinding) Marshal() (dAtA []byte, err error) { } func (m *ClusterRoleBinding) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterRoleBinding) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n3, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.RoleRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n3 + i-- + dAtA[i] = 0x1a if len(m.Subjects) > 0 { - for _, msg := range m.Subjects { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Subjects) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Subjects[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RoleRef.Size())) - n4, err := m.RoleRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n4 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ClusterRoleBindingList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -259,37 +618,46 @@ func (m *ClusterRoleBindingList) Marshal() (dAtA []byte, err error) { } func (m *ClusterRoleBindingList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterRoleBindingList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n5, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ClusterRoleList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -297,37 +665,46 @@ func (m *ClusterRoleList) Marshal() (dAtA []byte, err error) { } func (m *ClusterRoleList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterRoleList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n6, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *PolicyRule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -335,92 +712,67 @@ func (m *PolicyRule) Marshal() (dAtA []byte, err error) { } func (m *PolicyRule) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PolicyRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Verbs) > 0 { - for _, s := range m.Verbs { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + if len(m.NonResourceURLs) > 0 { + for iNdEx := len(m.NonResourceURLs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.NonResourceURLs[iNdEx]) + copy(dAtA[i:], m.NonResourceURLs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NonResourceURLs[iNdEx]))) + i-- + dAtA[i] = 0x2a } } - if len(m.APIGroups) > 0 { - for _, s := range m.APIGroups { - dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + if len(m.ResourceNames) > 0 { + for iNdEx := len(m.ResourceNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ResourceNames[iNdEx]) + copy(dAtA[i:], m.ResourceNames[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceNames[iNdEx]))) + i-- + dAtA[i] = 0x22 } } if len(m.Resources) > 0 { - for _, s := range m.Resources { + for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Resources[iNdEx]) + copy(dAtA[i:], m.Resources[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resources[iNdEx]))) + i-- dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - if len(m.ResourceNames) > 0 { - for _, s := range m.ResourceNames { - dAtA[i] = 0x22 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + if len(m.APIGroups) > 0 { + for iNdEx := len(m.APIGroups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.APIGroups[iNdEx]) + copy(dAtA[i:], m.APIGroups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroups[iNdEx]))) + i-- + dAtA[i] = 0x12 } } - if len(m.NonResourceURLs) > 0 { - for _, s := range m.NonResourceURLs { - dAtA[i] = 0x2a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + if len(m.Verbs) > 0 { + for iNdEx := len(m.Verbs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Verbs[iNdEx]) + copy(dAtA[i:], m.Verbs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verbs[iNdEx]))) + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } func (m *Role) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -428,37 +780,46 @@ func (m *Role) Marshal() (dAtA []byte, err error) { } func (m *Role) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Role) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n7, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 if len(m.Rules) > 0 { - for _, msg := range m.Rules { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *RoleBinding) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -466,45 +827,56 @@ func (m *RoleBinding) Marshal() (dAtA []byte, err error) { } func (m *RoleBinding) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RoleBinding) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n8, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.RoleRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n8 + i-- + dAtA[i] = 0x1a if len(m.Subjects) > 0 { - for _, msg := range m.Subjects { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Subjects) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Subjects[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RoleRef.Size())) - n9, err := m.RoleRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n9 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *RoleBindingList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -512,37 +884,46 @@ func (m *RoleBindingList) Marshal() (dAtA []byte, err error) { } func (m *RoleBindingList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RoleBindingList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n10, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n10 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *RoleList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -550,37 +931,46 @@ func (m *RoleList) Marshal() (dAtA []byte, err error) { } func (m *RoleList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RoleList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n11, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n11 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *RoleRef) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -588,29 +978,37 @@ func (m *RoleRef) Marshal() (dAtA []byte, err error) { } func (m *RoleRef) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RoleRef) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup))) - i += copy(dAtA[i:], m.APIGroup) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) - dAtA[i] = 0x1a - i++ + i -= len(m.Name) + copy(dAtA[i:], m.Name) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - return i, nil + i-- + dAtA[i] = 0x1a + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0x12 + i -= len(m.APIGroup) + copy(dAtA[i:], m.APIGroup) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *Subject) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -618,57 +1016,53 @@ func (m *Subject) Marshal() (dAtA []byte, err error) { } func (m *Subject) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Subject) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup))) - i += copy(dAtA[i:], m.APIGroup) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x22 - i++ + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i += copy(dAtA[i:], m.Namespace) - return i, nil + i-- + dAtA[i] = 0x22 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x1a + i -= len(m.APIGroup) + copy(dAtA[i:], m.APIGroup) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup))) + i-- + dAtA[i] = 0x12 + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *AggregationRule) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.ClusterRoleSelectors) > 0 { @@ -681,6 +1075,9 @@ func (m *AggregationRule) Size() (n int) { } func (m *ClusterRole) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -699,6 +1096,9 @@ func (m *ClusterRole) Size() (n int) { } func (m *ClusterRoleBinding) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -715,6 +1115,9 @@ func (m *ClusterRoleBinding) Size() (n int) { } func (m *ClusterRoleBindingList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -729,6 +1132,9 @@ func (m *ClusterRoleBindingList) Size() (n int) { } func (m *ClusterRoleList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -743,6 +1149,9 @@ func (m *ClusterRoleList) Size() (n int) { } func (m *PolicyRule) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Verbs) > 0 { @@ -779,6 +1188,9 @@ func (m *PolicyRule) Size() (n int) { } func (m *Role) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -793,6 +1205,9 @@ func (m *Role) Size() (n int) { } func (m *RoleBinding) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -809,6 +1224,9 @@ func (m *RoleBinding) Size() (n int) { } func (m *RoleBindingList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -823,6 +1241,9 @@ func (m *RoleBindingList) Size() (n int) { } func (m *RoleList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -837,6 +1258,9 @@ func (m *RoleList) Size() (n int) { } func (m *RoleRef) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.APIGroup) @@ -849,6 +1273,9 @@ func (m *RoleRef) Size() (n int) { } func (m *Subject) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Kind) @@ -863,14 +1290,7 @@ func (m *Subject) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -879,8 +1299,13 @@ func (this *AggregationRule) String() string { if this == nil { return "nil" } + repeatedStringForClusterRoleSelectors := "[]LabelSelector{" + for _, f := range this.ClusterRoleSelectors { + repeatedStringForClusterRoleSelectors += fmt.Sprintf("%v", f) + "," + } + repeatedStringForClusterRoleSelectors += "}" s := strings.Join([]string{`&AggregationRule{`, - `ClusterRoleSelectors:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ClusterRoleSelectors), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1), `&`, ``, 1) + `,`, + `ClusterRoleSelectors:` + repeatedStringForClusterRoleSelectors + `,`, `}`, }, "") return s @@ -889,10 +1314,15 @@ func (this *ClusterRole) String() string { if this == nil { return "nil" } + repeatedStringForRules := "[]PolicyRule{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + "," + } + repeatedStringForRules += "}" s := strings.Join([]string{`&ClusterRole{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + `,`, - `AggregationRule:` + strings.Replace(fmt.Sprintf("%v", this.AggregationRule), "AggregationRule", "AggregationRule", 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Rules:` + repeatedStringForRules + `,`, + `AggregationRule:` + strings.Replace(this.AggregationRule.String(), "AggregationRule", "AggregationRule", 1) + `,`, `}`, }, "") return s @@ -901,9 +1331,14 @@ func (this *ClusterRoleBinding) String() string { if this == nil { return "nil" } + repeatedStringForSubjects := "[]Subject{" + for _, f := range this.Subjects { + repeatedStringForSubjects += strings.Replace(strings.Replace(f.String(), "Subject", "Subject", 1), `&`, ``, 1) + "," + } + repeatedStringForSubjects += "}" s := strings.Join([]string{`&ClusterRoleBinding{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Subjects:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Subjects), "Subject", "Subject", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Subjects:` + repeatedStringForSubjects + `,`, `RoleRef:` + strings.Replace(strings.Replace(this.RoleRef.String(), "RoleRef", "RoleRef", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -913,9 +1348,14 @@ func (this *ClusterRoleBindingList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]ClusterRoleBinding{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ClusterRoleBinding", "ClusterRoleBinding", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&ClusterRoleBindingList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ClusterRoleBinding", "ClusterRoleBinding", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -924,9 +1364,14 @@ func (this *ClusterRoleList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]ClusterRole{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ClusterRole", "ClusterRole", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&ClusterRoleList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ClusterRole", "ClusterRole", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -949,9 +1394,14 @@ func (this *Role) String() string { if this == nil { return "nil" } + repeatedStringForRules := "[]PolicyRule{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + "," + } + repeatedStringForRules += "}" s := strings.Join([]string{`&Role{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Rules:` + repeatedStringForRules + `,`, `}`, }, "") return s @@ -960,9 +1410,14 @@ func (this *RoleBinding) String() string { if this == nil { return "nil" } + repeatedStringForSubjects := "[]Subject{" + for _, f := range this.Subjects { + repeatedStringForSubjects += strings.Replace(strings.Replace(f.String(), "Subject", "Subject", 1), `&`, ``, 1) + "," + } + repeatedStringForSubjects += "}" s := strings.Join([]string{`&RoleBinding{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Subjects:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Subjects), "Subject", "Subject", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Subjects:` + repeatedStringForSubjects + `,`, `RoleRef:` + strings.Replace(strings.Replace(this.RoleRef.String(), "RoleRef", "RoleRef", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -972,9 +1427,14 @@ func (this *RoleBindingList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]RoleBinding{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "RoleBinding", "RoleBinding", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&RoleBindingList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "RoleBinding", "RoleBinding", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -983,9 +1443,14 @@ func (this *RoleList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]Role{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Role", "Role", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&RoleList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Role", "Role", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -1038,7 +1503,7 @@ func (m *AggregationRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1066,7 +1531,7 @@ func (m *AggregationRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1075,10 +1540,13 @@ func (m *AggregationRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.ClusterRoleSelectors = append(m.ClusterRoleSelectors, k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{}) + m.ClusterRoleSelectors = append(m.ClusterRoleSelectors, v1.LabelSelector{}) if err := m.ClusterRoleSelectors[len(m.ClusterRoleSelectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -1092,6 +1560,9 @@ func (m *AggregationRule) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1119,7 +1590,7 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1147,7 +1618,7 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1156,6 +1627,9 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1177,7 +1651,7 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1186,6 +1660,9 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1208,7 +1685,7 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1217,6 +1694,9 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1236,6 +1716,9 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1263,7 +1746,7 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1291,7 +1774,7 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1300,6 +1783,9 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1321,7 +1807,7 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1330,6 +1816,9 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1352,7 +1841,7 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1361,6 +1850,9 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1377,6 +1869,9 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1404,7 +1899,7 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1432,7 +1927,7 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1441,6 +1936,9 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1462,7 +1960,7 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1471,6 +1969,9 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1488,6 +1989,9 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1515,7 +2019,7 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1543,7 +2047,7 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1552,6 +2056,9 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1573,7 +2080,7 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1582,6 +2089,9 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1599,6 +2109,9 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1626,7 +2139,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1654,7 +2167,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1664,6 +2177,9 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1683,7 +2199,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1693,6 +2209,9 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1712,7 +2231,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1722,6 +2241,9 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1741,7 +2263,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1751,6 +2273,9 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1770,7 +2295,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1780,6 +2305,9 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1794,6 +2322,9 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1821,7 +2352,7 @@ func (m *Role) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1849,7 +2380,7 @@ func (m *Role) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1858,6 +2389,9 @@ func (m *Role) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1879,7 +2413,7 @@ func (m *Role) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1888,6 +2422,9 @@ func (m *Role) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1905,6 +2442,9 @@ func (m *Role) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1932,7 +2472,7 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1960,7 +2500,7 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1969,6 +2509,9 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1990,7 +2533,7 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1999,6 +2542,9 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2021,7 +2567,7 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2030,6 +2576,9 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2046,6 +2595,9 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2073,7 +2625,7 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2101,7 +2653,7 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2110,6 +2662,9 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2131,7 +2686,7 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2140,6 +2695,9 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2157,6 +2715,9 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2184,7 +2745,7 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2212,7 +2773,7 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2221,6 +2782,9 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2242,7 +2806,7 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2251,6 +2815,9 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2268,6 +2835,9 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2295,7 +2865,7 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2323,7 +2893,7 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2333,6 +2903,9 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2352,7 +2925,7 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2362,6 +2935,9 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2381,7 +2957,7 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2391,6 +2967,9 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2405,6 +2984,9 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2432,7 +3014,7 @@ func (m *Subject) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2460,7 +3042,7 @@ func (m *Subject) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2470,6 +3052,9 @@ func (m *Subject) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2489,7 +3074,7 @@ func (m *Subject) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2499,6 +3084,9 @@ func (m *Subject) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2518,7 +3106,7 @@ func (m *Subject) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2528,6 +3116,9 @@ func (m *Subject) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2547,7 +3138,7 @@ func (m *Subject) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2557,6 +3148,9 @@ func (m *Subject) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2571,6 +3165,9 @@ func (m *Subject) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2637,10 +3234,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -2669,6 +3269,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -2687,62 +3290,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 807 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x55, 0xcf, 0x6f, 0xe3, 0x44, - 0x14, 0xce, 0xa4, 0x89, 0x1a, 0x4f, 0x88, 0x42, 0x87, 0x0a, 0x59, 0x05, 0x39, 0x95, 0x91, 0x50, - 0x25, 0xc0, 0x26, 0x05, 0x01, 0x12, 0xea, 0xa1, 0x2e, 0x02, 0x55, 0x2d, 0xa5, 0x9a, 0x0a, 0x0e, - 0x88, 0x03, 0x63, 0x67, 0xea, 0x0e, 0xf1, 0x2f, 0xcd, 0xd8, 0x91, 0x2a, 0x2e, 0x08, 0x89, 0x03, - 0xb7, 0x3d, 0xee, 0xfe, 0x05, 0x7b, 0xd9, 0x3d, 0xee, 0x5f, 0xb0, 0x97, 0x1e, 0x7b, 0xec, 0x29, - 0xda, 0x7a, 0xff, 0x90, 0x5d, 0xf9, 0x57, 0x9c, 0x1f, 0xee, 0x36, 0xa7, 0x48, 0xab, 0x3d, 0xb5, - 0xf3, 0xde, 0xf7, 0xbe, 0xf7, 0xcd, 0xe7, 0x79, 0x2f, 0xf0, 0xfb, 0xe1, 0x77, 0x42, 0x63, 0xbe, - 0x3e, 0x8c, 0x4c, 0xca, 0x3d, 0x1a, 0x52, 0xa1, 0x8f, 0xa8, 0x37, 0xf0, 0xb9, 0x9e, 0x27, 0x48, - 0xc0, 0x74, 0x6e, 0x12, 0x4b, 0x1f, 0xf5, 0x75, 0x9b, 0x7a, 0x94, 0x93, 0x90, 0x0e, 0xb4, 0x80, - 0xfb, 0xa1, 0x8f, 0x50, 0x86, 0xd1, 0x48, 0xc0, 0xb4, 0x04, 0xa3, 0x8d, 0xfa, 0x5b, 0x5f, 0xd8, - 0x2c, 0xbc, 0x88, 0x4c, 0xcd, 0xf2, 0x5d, 0xdd, 0xf6, 0x6d, 0x5f, 0x4f, 0xa1, 0x66, 0x74, 0x9e, - 0x9e, 0xd2, 0x43, 0xfa, 0x5f, 0x46, 0xb1, 0xf5, 0x75, 0xd9, 0xc6, 0x25, 0xd6, 0x05, 0xf3, 0x28, - 0xbf, 0xd4, 0x83, 0xa1, 0x9d, 0x04, 0x84, 0xee, 0xd2, 0x90, 0x54, 0x34, 0xde, 0xd2, 0xef, 0xaa, - 0xe2, 0x91, 0x17, 0x32, 0x97, 0x2e, 0x14, 0x7c, 0x73, 0x5f, 0x81, 0xb0, 0x2e, 0xa8, 0x4b, 0xe6, - 0xeb, 0xd4, 0x47, 0x00, 0x76, 0xf7, 0x6d, 0x9b, 0x53, 0x9b, 0x84, 0xcc, 0xf7, 0x70, 0xe4, 0x50, - 0xf4, 0x1f, 0x80, 0x9b, 0x96, 0x13, 0x89, 0x90, 0x72, 0xec, 0x3b, 0xf4, 0x8c, 0x3a, 0xd4, 0x0a, - 0x7d, 0x2e, 0x64, 0xb0, 0xbd, 0xb6, 0xd3, 0xde, 0xfd, 0x4a, 0x2b, 0x5d, 0x99, 0xf4, 0xd2, 0x82, - 0xa1, 0x9d, 0x04, 0x84, 0x96, 0x5c, 0x49, 0x1b, 0xf5, 0xb5, 0x63, 0x62, 0x52, 0xa7, 0xa8, 0x35, - 0x3e, 0xbe, 0x1a, 0xf7, 0x6a, 0xf1, 0xb8, 0xb7, 0x79, 0x50, 0x41, 0x8c, 0x2b, 0xdb, 0xa9, 0x0f, - 0xeb, 0xb0, 0x3d, 0x05, 0x47, 0x7f, 0xc2, 0x56, 0x42, 0x3e, 0x20, 0x21, 0x91, 0xc1, 0x36, 0xd8, - 0x69, 0xef, 0x7e, 0xb9, 0x9c, 0x94, 0x5f, 0xcc, 0xbf, 0xa8, 0x15, 0xfe, 0x4c, 0x43, 0x62, 0xa0, - 0x5c, 0x07, 0x2c, 0x63, 0x78, 0xc2, 0x8a, 0x0e, 0x60, 0x93, 0x47, 0x0e, 0x15, 0x72, 0x3d, 0xbd, - 0xa9, 0xa2, 0x2d, 0x7e, 0x7f, 0xed, 0xd4, 0x77, 0x98, 0x75, 0x99, 0x18, 0x65, 0x74, 0x72, 0xb2, - 0x66, 0x72, 0x12, 0x38, 0xab, 0x45, 0x26, 0xec, 0x92, 0x59, 0x47, 0xe5, 0xb5, 0x54, 0xed, 0x27, - 0x55, 0x74, 0x73, 0xe6, 0x1b, 0x1f, 0xc4, 0xe3, 0xde, 0xfc, 0x17, 0xc1, 0xf3, 0x84, 0xea, 0xff, - 0x75, 0x88, 0xa6, 0xac, 0x31, 0x98, 0x37, 0x60, 0x9e, 0xbd, 0x02, 0x87, 0x0e, 0x61, 0x4b, 0x44, - 0x69, 0xa2, 0x30, 0xe9, 0xa3, 0xaa, 0x5b, 0x9d, 0x65, 0x18, 0xe3, 0xfd, 0x9c, 0xac, 0x95, 0x07, - 0x04, 0x9e, 0x94, 0xa3, 0x1f, 0xe1, 0x3a, 0xf7, 0x1d, 0x8a, 0xe9, 0x79, 0xee, 0x4f, 0x25, 0x13, - 0xce, 0x20, 0x46, 0x37, 0x67, 0x5a, 0xcf, 0x03, 0xb8, 0x28, 0x56, 0x9f, 0x03, 0xf8, 0xe1, 0xa2, - 0x17, 0xc7, 0x4c, 0x84, 0xe8, 0x8f, 0x05, 0x3f, 0xb4, 0x25, 0x1f, 0x2f, 0x13, 0x99, 0x1b, 0x93, - 0x0b, 0x14, 0x91, 0x29, 0x2f, 0x8e, 0x60, 0x93, 0x85, 0xd4, 0x2d, 0x8c, 0xf8, 0xb4, 0x4a, 0xfe, - 0xa2, 0xb0, 0xf2, 0xd5, 0x1c, 0x26, 0xc5, 0x38, 0xe3, 0x50, 0x9f, 0x01, 0xd8, 0x9d, 0x02, 0xaf, - 0x40, 0xfe, 0x0f, 0xb3, 0xf2, 0x7b, 0xf7, 0xc9, 0xaf, 0xd6, 0xfd, 0x0a, 0x40, 0x58, 0x8e, 0x04, - 0xea, 0xc1, 0xe6, 0x88, 0x72, 0x33, 0xdb, 0x15, 0x92, 0x21, 0x25, 0xf8, 0xdf, 0x92, 0x00, 0xce, - 0xe2, 0xe8, 0x33, 0x28, 0x91, 0x80, 0xfd, 0xc4, 0xfd, 0x28, 0xc8, 0x3a, 0x4b, 0x46, 0x27, 0x1e, - 0xf7, 0xa4, 0xfd, 0xd3, 0xc3, 0x2c, 0x88, 0xcb, 0x7c, 0x02, 0xe6, 0x54, 0xf8, 0x11, 0xb7, 0xa8, - 0x90, 0xd7, 0x4a, 0x30, 0x2e, 0x82, 0xb8, 0xcc, 0xa3, 0x6f, 0x61, 0xa7, 0x38, 0x9c, 0x10, 0x97, - 0x0a, 0xb9, 0x91, 0x16, 0x6c, 0xc4, 0xe3, 0x5e, 0x07, 0x4f, 0x27, 0xf0, 0x2c, 0x0e, 0xed, 0xc1, - 0xae, 0xe7, 0x7b, 0x05, 0xe4, 0x57, 0x7c, 0x2c, 0xe4, 0x66, 0x5a, 0x9a, 0xce, 0xe2, 0xc9, 0x6c, - 0x0a, 0xcf, 0x63, 0xd5, 0xa7, 0x00, 0x36, 0xde, 0xa2, 0xfd, 0xa4, 0xfe, 0x5b, 0x87, 0xed, 0x77, - 0x7e, 0x69, 0x24, 0xe3, 0xb6, 0xda, 0x6d, 0xb1, 0xcc, 0xb8, 0xdd, 0xbf, 0x26, 0x1e, 0x03, 0xd8, - 0x5a, 0xd1, 0x7e, 0xd8, 0x9b, 0x15, 0x2c, 0xdf, 0x29, 0xb8, 0x5a, 0xe9, 0xdf, 0xb0, 0x70, 0x1d, - 0x7d, 0x0e, 0x5b, 0xc5, 0x4c, 0xa7, 0x3a, 0xa5, 0xb2, 0x6f, 0x31, 0xf6, 0x78, 0x82, 0x40, 0xdb, - 0xb0, 0x31, 0x64, 0xde, 0x40, 0xae, 0xa7, 0xc8, 0xf7, 0x72, 0x64, 0xe3, 0x88, 0x79, 0x03, 0x9c, - 0x66, 0x12, 0x84, 0x47, 0xdc, 0xec, 0x67, 0x75, 0x0a, 0x91, 0x4c, 0x33, 0x4e, 0x33, 0xea, 0x13, - 0x00, 0xd7, 0xf3, 0xd7, 0x33, 0xe1, 0x03, 0x77, 0xf2, 0x4d, 0xeb, 0xab, 0x2f, 0xa3, 0xef, 0xcd, - 0xdd, 0x91, 0x0e, 0xa5, 0xe4, 0xaf, 0x08, 0x88, 0x45, 0xe5, 0x46, 0x0a, 0xdb, 0xc8, 0x61, 0xd2, - 0x49, 0x91, 0xc0, 0x25, 0xc6, 0xd8, 0xb9, 0xba, 0x55, 0x6a, 0xd7, 0xb7, 0x4a, 0xed, 0xe6, 0x56, - 0xa9, 0xfd, 0x13, 0x2b, 0xe0, 0x2a, 0x56, 0xc0, 0x75, 0xac, 0x80, 0x9b, 0x58, 0x01, 0x2f, 0x62, - 0x05, 0x3c, 0x78, 0xa9, 0xd4, 0x7e, 0xaf, 0x8f, 0xfa, 0xaf, 0x03, 0x00, 0x00, 0xff, 0xff, 0x24, - 0xa1, 0x47, 0x98, 0xcf, 0x0a, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/rbac/v1/generated.proto b/vendor/k8s.io/api/rbac/v1/generated.proto index 4b321a7c..71fa0834 100644 --- a/vendor/k8s.io/api/rbac/v1/generated.proto +++ b/vendor/k8s.io/api/rbac/v1/generated.proto @@ -43,6 +43,7 @@ message ClusterRole { optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Rules holds all the PolicyRules for this ClusterRole + // +optional repeated PolicyRule rules = 2; // AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. @@ -121,6 +122,7 @@ message Role { optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Rules holds all the PolicyRules for this Role + // +optional repeated PolicyRule rules = 2; } diff --git a/vendor/k8s.io/api/rbac/v1/types.go b/vendor/k8s.io/api/rbac/v1/types.go index 17163cbb..7ba7d054 100644 --- a/vendor/k8s.io/api/rbac/v1/types.go +++ b/vendor/k8s.io/api/rbac/v1/types.go @@ -108,6 +108,7 @@ type Role struct { metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Rules holds all the PolicyRules for this Role + // +optional Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` } @@ -170,6 +171,7 @@ type ClusterRole struct { metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Rules holds all the PolicyRules for this ClusterRole + // +optional Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` // AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. diff --git a/vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go index 0ec20c88..83ce310e 100644 --- a/vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go @@ -28,7 +28,7 @@ package v1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_AggregationRule = map[string]string{ - "": "AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole", + "": "AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole", "clusterRoleSelectors": "ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. If any of the selectors match, then the ClusterRole's permissions will be added", } diff --git a/vendor/k8s.io/api/rbac/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/rbac/v1/zz_generated.deepcopy.go index 07eb321e..095a5e9c 100644 --- a/vendor/k8s.io/api/rbac/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/rbac/v1/zz_generated.deepcopy.go @@ -122,7 +122,7 @@ func (in *ClusterRoleBinding) DeepCopyObject() runtime.Object { func (in *ClusterRoleBindingList) DeepCopyInto(out *ClusterRoleBindingList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ClusterRoleBinding, len(*in)) @@ -155,7 +155,7 @@ func (in *ClusterRoleBindingList) DeepCopyObject() runtime.Object { func (in *ClusterRoleList) DeepCopyInto(out *ClusterRoleList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ClusterRole, len(*in)) @@ -294,7 +294,7 @@ func (in *RoleBinding) DeepCopyObject() runtime.Object { func (in *RoleBindingList) DeepCopyInto(out *RoleBindingList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]RoleBinding, len(*in)) @@ -327,7 +327,7 @@ func (in *RoleBindingList) DeepCopyObject() runtime.Object { func (in *RoleList) DeepCopyInto(out *RoleList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Role, len(*in)) diff --git a/vendor/k8s.io/api/rbac/v1alpha1/doc.go b/vendor/k8s.io/api/rbac/v1alpha1/doc.go index 5236a477..918b8a33 100644 --- a/vendor/k8s.io/api/rbac/v1alpha1/doc.go +++ b/vendor/k8s.io/api/rbac/v1alpha1/doc.go @@ -15,7 +15,9 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +k8s:openapi-gen=true // +groupName=rbac.authorization.k8s.io + package v1alpha1 // import "k8s.io/api/rbac/v1alpha1" diff --git a/vendor/k8s.io/api/rbac/v1alpha1/generated.pb.go b/vendor/k8s.io/api/rbac/v1alpha1/generated.pb.go index 71eced8d..19336247 100644 --- a/vendor/k8s.io/api/rbac/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/rbac/v1alpha1/generated.pb.go @@ -14,42 +14,24 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1alpha1/generated.proto -// DO NOT EDIT! -/* - Package v1alpha1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1alpha1/generated.proto - - It has these top-level messages: - AggregationRule - ClusterRole - ClusterRoleBinding - ClusterRoleBindingList - ClusterRoleList - PolicyRule - Role - RoleBinding - RoleBindingList - RoleList - RoleRef - Subject -*/ package v1alpha1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + io "io" -import strings "strings" -import reflect "reflect" + proto "github.com/gogo/protobuf/proto" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" -import io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -62,53 +44,341 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *AggregationRule) Reset() { *m = AggregationRule{} } -func (*AggregationRule) ProtoMessage() {} -func (*AggregationRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *AggregationRule) Reset() { *m = AggregationRule{} } +func (*AggregationRule) ProtoMessage() {} +func (*AggregationRule) Descriptor() ([]byte, []int) { + return fileDescriptor_b59b0bd5e7cb9590, []int{0} +} +func (m *AggregationRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AggregationRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AggregationRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_AggregationRule.Merge(m, src) +} +func (m *AggregationRule) XXX_Size() int { + return m.Size() +} +func (m *AggregationRule) XXX_DiscardUnknown() { + xxx_messageInfo_AggregationRule.DiscardUnknown(m) +} -func (m *ClusterRole) Reset() { *m = ClusterRole{} } -func (*ClusterRole) ProtoMessage() {} -func (*ClusterRole) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +var xxx_messageInfo_AggregationRule proto.InternalMessageInfo -func (m *ClusterRoleBinding) Reset() { *m = ClusterRoleBinding{} } -func (*ClusterRoleBinding) ProtoMessage() {} -func (*ClusterRoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +func (m *ClusterRole) Reset() { *m = ClusterRole{} } +func (*ClusterRole) ProtoMessage() {} +func (*ClusterRole) Descriptor() ([]byte, []int) { + return fileDescriptor_b59b0bd5e7cb9590, []int{1} +} +func (m *ClusterRole) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterRole) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterRole) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterRole.Merge(m, src) +} +func (m *ClusterRole) XXX_Size() int { + return m.Size() +} +func (m *ClusterRole) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterRole.DiscardUnknown(m) +} -func (m *ClusterRoleBindingList) Reset() { *m = ClusterRoleBindingList{} } -func (*ClusterRoleBindingList) ProtoMessage() {} -func (*ClusterRoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +var xxx_messageInfo_ClusterRole proto.InternalMessageInfo -func (m *ClusterRoleList) Reset() { *m = ClusterRoleList{} } -func (*ClusterRoleList) ProtoMessage() {} -func (*ClusterRoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +func (m *ClusterRoleBinding) Reset() { *m = ClusterRoleBinding{} } +func (*ClusterRoleBinding) ProtoMessage() {} +func (*ClusterRoleBinding) Descriptor() ([]byte, []int) { + return fileDescriptor_b59b0bd5e7cb9590, []int{2} +} +func (m *ClusterRoleBinding) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterRoleBinding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterRoleBinding) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterRoleBinding.Merge(m, src) +} +func (m *ClusterRoleBinding) XXX_Size() int { + return m.Size() +} +func (m *ClusterRoleBinding) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterRoleBinding.DiscardUnknown(m) +} -func (m *PolicyRule) Reset() { *m = PolicyRule{} } -func (*PolicyRule) ProtoMessage() {} -func (*PolicyRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +var xxx_messageInfo_ClusterRoleBinding proto.InternalMessageInfo -func (m *Role) Reset() { *m = Role{} } -func (*Role) ProtoMessage() {} -func (*Role) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +func (m *ClusterRoleBindingList) Reset() { *m = ClusterRoleBindingList{} } +func (*ClusterRoleBindingList) ProtoMessage() {} +func (*ClusterRoleBindingList) Descriptor() ([]byte, []int) { + return fileDescriptor_b59b0bd5e7cb9590, []int{3} +} +func (m *ClusterRoleBindingList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterRoleBindingList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterRoleBindingList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterRoleBindingList.Merge(m, src) +} +func (m *ClusterRoleBindingList) XXX_Size() int { + return m.Size() +} +func (m *ClusterRoleBindingList) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterRoleBindingList.DiscardUnknown(m) +} -func (m *RoleBinding) Reset() { *m = RoleBinding{} } -func (*RoleBinding) ProtoMessage() {} -func (*RoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +var xxx_messageInfo_ClusterRoleBindingList proto.InternalMessageInfo -func (m *RoleBindingList) Reset() { *m = RoleBindingList{} } -func (*RoleBindingList) ProtoMessage() {} -func (*RoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +func (m *ClusterRoleList) Reset() { *m = ClusterRoleList{} } +func (*ClusterRoleList) ProtoMessage() {} +func (*ClusterRoleList) Descriptor() ([]byte, []int) { + return fileDescriptor_b59b0bd5e7cb9590, []int{4} +} +func (m *ClusterRoleList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterRoleList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterRoleList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterRoleList.Merge(m, src) +} +func (m *ClusterRoleList) XXX_Size() int { + return m.Size() +} +func (m *ClusterRoleList) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterRoleList.DiscardUnknown(m) +} -func (m *RoleList) Reset() { *m = RoleList{} } -func (*RoleList) ProtoMessage() {} -func (*RoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +var xxx_messageInfo_ClusterRoleList proto.InternalMessageInfo -func (m *RoleRef) Reset() { *m = RoleRef{} } -func (*RoleRef) ProtoMessage() {} -func (*RoleRef) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } +func (m *PolicyRule) Reset() { *m = PolicyRule{} } +func (*PolicyRule) ProtoMessage() {} +func (*PolicyRule) Descriptor() ([]byte, []int) { + return fileDescriptor_b59b0bd5e7cb9590, []int{5} +} +func (m *PolicyRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PolicyRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PolicyRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_PolicyRule.Merge(m, src) +} +func (m *PolicyRule) XXX_Size() int { + return m.Size() +} +func (m *PolicyRule) XXX_DiscardUnknown() { + xxx_messageInfo_PolicyRule.DiscardUnknown(m) +} -func (m *Subject) Reset() { *m = Subject{} } -func (*Subject) ProtoMessage() {} -func (*Subject) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } +var xxx_messageInfo_PolicyRule proto.InternalMessageInfo + +func (m *Role) Reset() { *m = Role{} } +func (*Role) ProtoMessage() {} +func (*Role) Descriptor() ([]byte, []int) { + return fileDescriptor_b59b0bd5e7cb9590, []int{6} +} +func (m *Role) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Role) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Role) XXX_Merge(src proto.Message) { + xxx_messageInfo_Role.Merge(m, src) +} +func (m *Role) XXX_Size() int { + return m.Size() +} +func (m *Role) XXX_DiscardUnknown() { + xxx_messageInfo_Role.DiscardUnknown(m) +} + +var xxx_messageInfo_Role proto.InternalMessageInfo + +func (m *RoleBinding) Reset() { *m = RoleBinding{} } +func (*RoleBinding) ProtoMessage() {} +func (*RoleBinding) Descriptor() ([]byte, []int) { + return fileDescriptor_b59b0bd5e7cb9590, []int{7} +} +func (m *RoleBinding) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RoleBinding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RoleBinding) XXX_Merge(src proto.Message) { + xxx_messageInfo_RoleBinding.Merge(m, src) +} +func (m *RoleBinding) XXX_Size() int { + return m.Size() +} +func (m *RoleBinding) XXX_DiscardUnknown() { + xxx_messageInfo_RoleBinding.DiscardUnknown(m) +} + +var xxx_messageInfo_RoleBinding proto.InternalMessageInfo + +func (m *RoleBindingList) Reset() { *m = RoleBindingList{} } +func (*RoleBindingList) ProtoMessage() {} +func (*RoleBindingList) Descriptor() ([]byte, []int) { + return fileDescriptor_b59b0bd5e7cb9590, []int{8} +} +func (m *RoleBindingList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RoleBindingList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RoleBindingList) XXX_Merge(src proto.Message) { + xxx_messageInfo_RoleBindingList.Merge(m, src) +} +func (m *RoleBindingList) XXX_Size() int { + return m.Size() +} +func (m *RoleBindingList) XXX_DiscardUnknown() { + xxx_messageInfo_RoleBindingList.DiscardUnknown(m) +} + +var xxx_messageInfo_RoleBindingList proto.InternalMessageInfo + +func (m *RoleList) Reset() { *m = RoleList{} } +func (*RoleList) ProtoMessage() {} +func (*RoleList) Descriptor() ([]byte, []int) { + return fileDescriptor_b59b0bd5e7cb9590, []int{9} +} +func (m *RoleList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RoleList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RoleList) XXX_Merge(src proto.Message) { + xxx_messageInfo_RoleList.Merge(m, src) +} +func (m *RoleList) XXX_Size() int { + return m.Size() +} +func (m *RoleList) XXX_DiscardUnknown() { + xxx_messageInfo_RoleList.DiscardUnknown(m) +} + +var xxx_messageInfo_RoleList proto.InternalMessageInfo + +func (m *RoleRef) Reset() { *m = RoleRef{} } +func (*RoleRef) ProtoMessage() {} +func (*RoleRef) Descriptor() ([]byte, []int) { + return fileDescriptor_b59b0bd5e7cb9590, []int{10} +} +func (m *RoleRef) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RoleRef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RoleRef) XXX_Merge(src proto.Message) { + xxx_messageInfo_RoleRef.Merge(m, src) +} +func (m *RoleRef) XXX_Size() int { + return m.Size() +} +func (m *RoleRef) XXX_DiscardUnknown() { + xxx_messageInfo_RoleRef.DiscardUnknown(m) +} + +var xxx_messageInfo_RoleRef proto.InternalMessageInfo + +func (m *Subject) Reset() { *m = Subject{} } +func (*Subject) ProtoMessage() {} +func (*Subject) Descriptor() ([]byte, []int) { + return fileDescriptor_b59b0bd5e7cb9590, []int{11} +} +func (m *Subject) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Subject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Subject) XXX_Merge(src proto.Message) { + xxx_messageInfo_Subject.Merge(m, src) +} +func (m *Subject) XXX_Size() int { + return m.Size() +} +func (m *Subject) XXX_DiscardUnknown() { + xxx_messageInfo_Subject.DiscardUnknown(m) +} + +var xxx_messageInfo_Subject proto.InternalMessageInfo func init() { proto.RegisterType((*AggregationRule)(nil), "k8s.io.api.rbac.v1alpha1.AggregationRule") @@ -124,10 +394,71 @@ func init() { proto.RegisterType((*RoleRef)(nil), "k8s.io.api.rbac.v1alpha1.RoleRef") proto.RegisterType((*Subject)(nil), "k8s.io.api.rbac.v1alpha1.Subject") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1alpha1/generated.proto", fileDescriptor_b59b0bd5e7cb9590) +} + +var fileDescriptor_b59b0bd5e7cb9590 = []byte{ + // 830 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x55, 0xbf, 0x8f, 0xe3, 0x44, + 0x14, 0xce, 0x64, 0x13, 0x36, 0x99, 0x25, 0x0a, 0x37, 0x9c, 0x90, 0xb5, 0x42, 0xce, 0x62, 0x81, + 0x74, 0x88, 0xc3, 0x66, 0x17, 0x04, 0x34, 0x14, 0xf1, 0x15, 0x28, 0x10, 0xf6, 0x96, 0x39, 0x71, + 0x05, 0xa2, 0x60, 0xe2, 0xcc, 0x39, 0x43, 0x6c, 0x8f, 0x35, 0x63, 0x47, 0x3a, 0xd1, 0xd0, 0xd0, + 0x22, 0x1a, 0x0a, 0x7a, 0x5a, 0x1a, 0x28, 0xf9, 0x07, 0x96, 0xee, 0xca, 0xad, 0x22, 0xd6, 0xfc, + 0x21, 0x20, 0x8f, 0xed, 0xd8, 0xf9, 0x45, 0x52, 0x45, 0x42, 0xba, 0x2a, 0x99, 0xf7, 0xbe, 0xf7, + 0xbd, 0xf7, 0xbe, 0x99, 0xf7, 0x0c, 0xfb, 0xd3, 0x0f, 0xa5, 0xc9, 0xb8, 0x35, 0x8d, 0x47, 0x54, + 0x04, 0x34, 0xa2, 0xd2, 0x9a, 0xd1, 0x60, 0xcc, 0x85, 0x95, 0x3b, 0x48, 0xc8, 0x2c, 0x31, 0x22, + 0x8e, 0x35, 0x3b, 0x27, 0x5e, 0x38, 0x21, 0xe7, 0x96, 0x4b, 0x03, 0x2a, 0x48, 0x44, 0xc7, 0x66, + 0x28, 0x78, 0xc4, 0x91, 0x96, 0x21, 0x4d, 0x12, 0x32, 0x33, 0x45, 0x9a, 0x05, 0xf2, 0xf4, 0x6d, + 0x97, 0x45, 0x93, 0x78, 0x64, 0x3a, 0xdc, 0xb7, 0x5c, 0xee, 0x72, 0x4b, 0x05, 0x8c, 0xe2, 0x27, + 0xea, 0xa4, 0x0e, 0xea, 0x5f, 0x46, 0x74, 0xfa, 0x5e, 0x99, 0xd2, 0x27, 0xce, 0x84, 0x05, 0x54, + 0x3c, 0xb5, 0xc2, 0xa9, 0x9b, 0x1a, 0xa4, 0xe5, 0xd3, 0x88, 0x58, 0xb3, 0xb5, 0xf4, 0xa7, 0xd6, + 0xb6, 0x28, 0x11, 0x07, 0x11, 0xf3, 0xe9, 0x5a, 0xc0, 0xfb, 0xbb, 0x02, 0xa4, 0x33, 0xa1, 0x3e, + 0x59, 0x8d, 0x33, 0x7e, 0x06, 0xb0, 0xdb, 0x77, 0x5d, 0x41, 0x5d, 0x12, 0x31, 0x1e, 0xe0, 0xd8, + 0xa3, 0xe8, 0x7b, 0x00, 0xef, 0x3a, 0x5e, 0x2c, 0x23, 0x2a, 0x30, 0xf7, 0xe8, 0x23, 0xea, 0x51, + 0x27, 0xe2, 0x42, 0x6a, 0xe0, 0xec, 0xe8, 0xde, 0xc9, 0xc5, 0xbb, 0x66, 0xa9, 0xcd, 0x22, 0x97, + 0x19, 0x4e, 0xdd, 0xd4, 0x20, 0xcd, 0xb4, 0x25, 0x73, 0x76, 0x6e, 0x0e, 0xc9, 0x88, 0x7a, 0x45, + 0xac, 0xfd, 0xea, 0xf5, 0xbc, 0x57, 0x4b, 0xe6, 0xbd, 0xbb, 0x0f, 0x36, 0x10, 0xe3, 0x8d, 0xe9, + 0x8c, 0x5f, 0xea, 0xf0, 0xa4, 0x02, 0x47, 0x5f, 0xc3, 0x56, 0x4a, 0x3e, 0x26, 0x11, 0xd1, 0xc0, + 0x19, 0xb8, 0x77, 0x72, 0xf1, 0xce, 0x7e, 0xa5, 0x3c, 0x1c, 0x7d, 0x43, 0x9d, 0xe8, 0x33, 0x1a, + 0x11, 0x1b, 0xe5, 0x75, 0xc0, 0xd2, 0x86, 0x17, 0xac, 0x68, 0x00, 0x9b, 0x22, 0xf6, 0xa8, 0xd4, + 0xea, 0xaa, 0xd3, 0xd7, 0xcd, 0x6d, 0xaf, 0xc0, 0xbc, 0xe2, 0x1e, 0x73, 0x9e, 0xa6, 0x72, 0xd9, + 0x9d, 0x9c, 0xb2, 0x99, 0x9e, 0x24, 0xce, 0x18, 0xd0, 0x04, 0x76, 0xc9, 0xb2, 0xae, 0xda, 0x91, + 0xaa, 0xf9, 0xcd, 0xed, 0xa4, 0x2b, 0x17, 0x61, 0xbf, 0x9c, 0xcc, 0x7b, 0xab, 0xb7, 0x83, 0x57, + 0x69, 0x8d, 0x9f, 0xea, 0x10, 0x55, 0x64, 0xb2, 0x59, 0x30, 0x66, 0x81, 0x7b, 0x00, 0xb5, 0x1e, + 0xc2, 0x96, 0x8c, 0x95, 0xa3, 0x10, 0xec, 0xb5, 0xed, 0xbd, 0x3d, 0xca, 0x90, 0xf6, 0x4b, 0x39, + 0x65, 0x2b, 0x37, 0x48, 0xbc, 0x20, 0x41, 0x43, 0x78, 0x2c, 0xb8, 0x47, 0x31, 0x7d, 0x92, 0x6b, + 0xf5, 0x1f, 0x7c, 0x38, 0x03, 0xda, 0xdd, 0x9c, 0xef, 0x38, 0x37, 0xe0, 0x82, 0xc2, 0xf8, 0x13, + 0xc0, 0x57, 0xd6, 0x75, 0x19, 0x32, 0x19, 0xa1, 0xaf, 0xd6, 0xb4, 0x31, 0xf7, 0x7c, 0xd4, 0x4c, + 0x66, 0xca, 0x2c, 0xda, 0x28, 0x2c, 0x15, 0x5d, 0x3e, 0x87, 0x4d, 0x16, 0x51, 0xbf, 0x10, 0xe5, + 0xfe, 0xf6, 0x26, 0xd6, 0xcb, 0x2b, 0x5f, 0xd3, 0x20, 0xa5, 0xc0, 0x19, 0x93, 0xf1, 0x07, 0x80, + 0xdd, 0x0a, 0xf8, 0x00, 0x4d, 0x7c, 0xb2, 0xdc, 0xc4, 0x1b, 0xfb, 0x35, 0xb1, 0xb9, 0xfa, 0x7f, + 0x00, 0x84, 0xe5, 0xc0, 0xa0, 0x1e, 0x6c, 0xce, 0xa8, 0x18, 0x65, 0xfb, 0xa4, 0x6d, 0xb7, 0x53, + 0xfc, 0xe3, 0xd4, 0x80, 0x33, 0x3b, 0x7a, 0x0b, 0xb6, 0x49, 0xc8, 0x3e, 0x16, 0x3c, 0x0e, 0xa5, + 0x76, 0xa4, 0x40, 0x9d, 0x64, 0xde, 0x6b, 0xf7, 0xaf, 0x06, 0x99, 0x11, 0x97, 0xfe, 0x14, 0x2c, + 0xa8, 0xe4, 0xb1, 0x70, 0xa8, 0xd4, 0x1a, 0x25, 0x18, 0x17, 0x46, 0x5c, 0xfa, 0xd1, 0x07, 0xb0, + 0x53, 0x1c, 0x2e, 0x89, 0x4f, 0xa5, 0xd6, 0x54, 0x01, 0x77, 0x92, 0x79, 0xaf, 0x83, 0xab, 0x0e, + 0xbc, 0x8c, 0x43, 0x1f, 0xc1, 0x6e, 0xc0, 0x83, 0x02, 0xf2, 0x05, 0x1e, 0x4a, 0xed, 0x05, 0x15, + 0xaa, 0x66, 0xf4, 0x72, 0xd9, 0x85, 0x57, 0xb1, 0xc6, 0xef, 0x00, 0x36, 0xfe, 0x77, 0x3b, 0xcc, + 0xf8, 0xa1, 0x0e, 0x4f, 0x9e, 0xaf, 0x94, 0xca, 0x4a, 0x49, 0xc7, 0xf0, 0xb0, 0xbb, 0x64, 0xff, + 0x31, 0xdc, 0xbd, 0x44, 0x7e, 0x05, 0xb0, 0x75, 0xa0, 0xed, 0xf1, 0x60, 0xb9, 0x6c, 0x7d, 0x47, + 0xd9, 0x9b, 0xeb, 0xfd, 0x16, 0x16, 0x37, 0x80, 0xee, 0xc3, 0x56, 0x31, 0xf1, 0xaa, 0xda, 0x76, + 0x99, 0xbd, 0x58, 0x0a, 0x78, 0x81, 0x40, 0x67, 0xb0, 0x31, 0x65, 0xc1, 0x58, 0xab, 0x2b, 0xe4, + 0x8b, 0x39, 0xb2, 0xf1, 0x29, 0x0b, 0xc6, 0x58, 0x79, 0x52, 0x44, 0x40, 0xfc, 0xec, 0x93, 0x5c, + 0x41, 0xa4, 0xb3, 0x8e, 0x95, 0xc7, 0xf8, 0x0d, 0xc0, 0xe3, 0xfc, 0x3d, 0x2d, 0xf8, 0xc0, 0x56, + 0xbe, 0x0b, 0x08, 0x49, 0xc8, 0x1e, 0x53, 0x21, 0x19, 0x0f, 0xf2, 0xbc, 0x8b, 0x97, 0xde, 0xbf, + 0x1a, 0xe4, 0x1e, 0x5c, 0x41, 0xed, 0xae, 0x01, 0x59, 0xb0, 0x9d, 0xfe, 0xca, 0x90, 0x38, 0x54, + 0x6b, 0x28, 0xd8, 0x9d, 0x1c, 0xd6, 0xbe, 0x2c, 0x1c, 0xb8, 0xc4, 0xd8, 0xe6, 0xf5, 0xad, 0x5e, + 0x7b, 0x76, 0xab, 0xd7, 0x6e, 0x6e, 0xf5, 0xda, 0x77, 0x89, 0x0e, 0xae, 0x13, 0x1d, 0x3c, 0x4b, + 0x74, 0x70, 0x93, 0xe8, 0xe0, 0xaf, 0x44, 0x07, 0x3f, 0xfe, 0xad, 0xd7, 0xbe, 0x6c, 0x15, 0xe2, + 0xff, 0x1b, 0x00, 0x00, 0xff, 0xff, 0xb0, 0x73, 0x15, 0x10, 0x29, 0x0b, 0x00, 0x00, +} + func (m *AggregationRule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -135,29 +466,36 @@ func (m *AggregationRule) Marshal() (dAtA []byte, err error) { } func (m *AggregationRule) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AggregationRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if len(m.ClusterRoleSelectors) > 0 { - for _, msg := range m.ClusterRoleSelectors { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.ClusterRoleSelectors) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ClusterRoleSelectors[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } func (m *ClusterRole) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -165,47 +503,58 @@ func (m *ClusterRole) Marshal() (dAtA []byte, err error) { } func (m *ClusterRole) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterRole) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - if len(m.Rules) > 0 { - for _, msg := range m.Rules { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.AggregationRule != nil { + { + size, err := m.AggregationRule.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - } - if m.AggregationRule != nil { + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AggregationRule.Size())) - n2, err := m.AggregationRule.MarshalTo(dAtA[i:]) + } + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n2 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ClusterRoleBinding) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -213,45 +562,56 @@ func (m *ClusterRoleBinding) Marshal() (dAtA []byte, err error) { } func (m *ClusterRoleBinding) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterRoleBinding) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n3, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.RoleRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n3 + i-- + dAtA[i] = 0x1a if len(m.Subjects) > 0 { - for _, msg := range m.Subjects { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Subjects) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Subjects[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RoleRef.Size())) - n4, err := m.RoleRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n4 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ClusterRoleBindingList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -259,37 +619,46 @@ func (m *ClusterRoleBindingList) Marshal() (dAtA []byte, err error) { } func (m *ClusterRoleBindingList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterRoleBindingList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n5, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ClusterRoleList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -297,37 +666,46 @@ func (m *ClusterRoleList) Marshal() (dAtA []byte, err error) { } func (m *ClusterRoleList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterRoleList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n6, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *PolicyRule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -335,92 +713,67 @@ func (m *PolicyRule) Marshal() (dAtA []byte, err error) { } func (m *PolicyRule) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PolicyRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Verbs) > 0 { - for _, s := range m.Verbs { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + if len(m.NonResourceURLs) > 0 { + for iNdEx := len(m.NonResourceURLs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.NonResourceURLs[iNdEx]) + copy(dAtA[i:], m.NonResourceURLs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NonResourceURLs[iNdEx]))) + i-- + dAtA[i] = 0x32 } } - if len(m.APIGroups) > 0 { - for _, s := range m.APIGroups { - dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + if len(m.ResourceNames) > 0 { + for iNdEx := len(m.ResourceNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ResourceNames[iNdEx]) + copy(dAtA[i:], m.ResourceNames[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceNames[iNdEx]))) + i-- + dAtA[i] = 0x2a } } if len(m.Resources) > 0 { - for _, s := range m.Resources { + for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Resources[iNdEx]) + copy(dAtA[i:], m.Resources[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resources[iNdEx]))) + i-- dAtA[i] = 0x22 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - if len(m.ResourceNames) > 0 { - for _, s := range m.ResourceNames { - dAtA[i] = 0x2a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + if len(m.APIGroups) > 0 { + for iNdEx := len(m.APIGroups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.APIGroups[iNdEx]) + copy(dAtA[i:], m.APIGroups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroups[iNdEx]))) + i-- + dAtA[i] = 0x1a } } - if len(m.NonResourceURLs) > 0 { - for _, s := range m.NonResourceURLs { - dAtA[i] = 0x32 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + if len(m.Verbs) > 0 { + for iNdEx := len(m.Verbs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Verbs[iNdEx]) + copy(dAtA[i:], m.Verbs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verbs[iNdEx]))) + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } func (m *Role) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -428,37 +781,46 @@ func (m *Role) Marshal() (dAtA []byte, err error) { } func (m *Role) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Role) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n7, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 if len(m.Rules) > 0 { - for _, msg := range m.Rules { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *RoleBinding) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -466,45 +828,56 @@ func (m *RoleBinding) Marshal() (dAtA []byte, err error) { } func (m *RoleBinding) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RoleBinding) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n8, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.RoleRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n8 + i-- + dAtA[i] = 0x1a if len(m.Subjects) > 0 { - for _, msg := range m.Subjects { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Subjects) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Subjects[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RoleRef.Size())) - n9, err := m.RoleRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n9 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *RoleBindingList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -512,37 +885,46 @@ func (m *RoleBindingList) Marshal() (dAtA []byte, err error) { } func (m *RoleBindingList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RoleBindingList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n10, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n10 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *RoleList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -550,37 +932,46 @@ func (m *RoleList) Marshal() (dAtA []byte, err error) { } func (m *RoleList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RoleList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n11, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n11 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *RoleRef) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -588,29 +979,37 @@ func (m *RoleRef) Marshal() (dAtA []byte, err error) { } func (m *RoleRef) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RoleRef) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup))) - i += copy(dAtA[i:], m.APIGroup) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) - dAtA[i] = 0x1a - i++ + i -= len(m.Name) + copy(dAtA[i:], m.Name) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - return i, nil + i-- + dAtA[i] = 0x1a + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0x12 + i -= len(m.APIGroup) + copy(dAtA[i:], m.APIGroup) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *Subject) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -618,57 +1017,53 @@ func (m *Subject) Marshal() (dAtA []byte, err error) { } func (m *Subject) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Subject) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) - i += copy(dAtA[i:], m.APIVersion) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x22 - i++ + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i += copy(dAtA[i:], m.Namespace) - return i, nil + i-- + dAtA[i] = 0x22 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x1a + i -= len(m.APIVersion) + copy(dAtA[i:], m.APIVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) + i-- + dAtA[i] = 0x12 + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *AggregationRule) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.ClusterRoleSelectors) > 0 { @@ -681,6 +1076,9 @@ func (m *AggregationRule) Size() (n int) { } func (m *ClusterRole) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -699,6 +1097,9 @@ func (m *ClusterRole) Size() (n int) { } func (m *ClusterRoleBinding) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -715,6 +1116,9 @@ func (m *ClusterRoleBinding) Size() (n int) { } func (m *ClusterRoleBindingList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -729,6 +1133,9 @@ func (m *ClusterRoleBindingList) Size() (n int) { } func (m *ClusterRoleList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -743,6 +1150,9 @@ func (m *ClusterRoleList) Size() (n int) { } func (m *PolicyRule) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Verbs) > 0 { @@ -779,6 +1189,9 @@ func (m *PolicyRule) Size() (n int) { } func (m *Role) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -793,6 +1206,9 @@ func (m *Role) Size() (n int) { } func (m *RoleBinding) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -809,6 +1225,9 @@ func (m *RoleBinding) Size() (n int) { } func (m *RoleBindingList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -823,6 +1242,9 @@ func (m *RoleBindingList) Size() (n int) { } func (m *RoleList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -837,6 +1259,9 @@ func (m *RoleList) Size() (n int) { } func (m *RoleRef) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.APIGroup) @@ -849,6 +1274,9 @@ func (m *RoleRef) Size() (n int) { } func (m *Subject) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Kind) @@ -863,14 +1291,7 @@ func (m *Subject) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -879,8 +1300,13 @@ func (this *AggregationRule) String() string { if this == nil { return "nil" } + repeatedStringForClusterRoleSelectors := "[]LabelSelector{" + for _, f := range this.ClusterRoleSelectors { + repeatedStringForClusterRoleSelectors += fmt.Sprintf("%v", f) + "," + } + repeatedStringForClusterRoleSelectors += "}" s := strings.Join([]string{`&AggregationRule{`, - `ClusterRoleSelectors:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ClusterRoleSelectors), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1), `&`, ``, 1) + `,`, + `ClusterRoleSelectors:` + repeatedStringForClusterRoleSelectors + `,`, `}`, }, "") return s @@ -889,10 +1315,15 @@ func (this *ClusterRole) String() string { if this == nil { return "nil" } + repeatedStringForRules := "[]PolicyRule{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + "," + } + repeatedStringForRules += "}" s := strings.Join([]string{`&ClusterRole{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + `,`, - `AggregationRule:` + strings.Replace(fmt.Sprintf("%v", this.AggregationRule), "AggregationRule", "AggregationRule", 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Rules:` + repeatedStringForRules + `,`, + `AggregationRule:` + strings.Replace(this.AggregationRule.String(), "AggregationRule", "AggregationRule", 1) + `,`, `}`, }, "") return s @@ -901,9 +1332,14 @@ func (this *ClusterRoleBinding) String() string { if this == nil { return "nil" } + repeatedStringForSubjects := "[]Subject{" + for _, f := range this.Subjects { + repeatedStringForSubjects += strings.Replace(strings.Replace(f.String(), "Subject", "Subject", 1), `&`, ``, 1) + "," + } + repeatedStringForSubjects += "}" s := strings.Join([]string{`&ClusterRoleBinding{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Subjects:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Subjects), "Subject", "Subject", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Subjects:` + repeatedStringForSubjects + `,`, `RoleRef:` + strings.Replace(strings.Replace(this.RoleRef.String(), "RoleRef", "RoleRef", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -913,9 +1349,14 @@ func (this *ClusterRoleBindingList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]ClusterRoleBinding{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ClusterRoleBinding", "ClusterRoleBinding", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&ClusterRoleBindingList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ClusterRoleBinding", "ClusterRoleBinding", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -924,9 +1365,14 @@ func (this *ClusterRoleList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]ClusterRole{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ClusterRole", "ClusterRole", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&ClusterRoleList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ClusterRole", "ClusterRole", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -949,9 +1395,14 @@ func (this *Role) String() string { if this == nil { return "nil" } + repeatedStringForRules := "[]PolicyRule{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + "," + } + repeatedStringForRules += "}" s := strings.Join([]string{`&Role{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Rules:` + repeatedStringForRules + `,`, `}`, }, "") return s @@ -960,9 +1411,14 @@ func (this *RoleBinding) String() string { if this == nil { return "nil" } + repeatedStringForSubjects := "[]Subject{" + for _, f := range this.Subjects { + repeatedStringForSubjects += strings.Replace(strings.Replace(f.String(), "Subject", "Subject", 1), `&`, ``, 1) + "," + } + repeatedStringForSubjects += "}" s := strings.Join([]string{`&RoleBinding{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Subjects:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Subjects), "Subject", "Subject", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Subjects:` + repeatedStringForSubjects + `,`, `RoleRef:` + strings.Replace(strings.Replace(this.RoleRef.String(), "RoleRef", "RoleRef", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -972,9 +1428,14 @@ func (this *RoleBindingList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]RoleBinding{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "RoleBinding", "RoleBinding", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&RoleBindingList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "RoleBinding", "RoleBinding", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -983,9 +1444,14 @@ func (this *RoleList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]Role{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Role", "Role", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&RoleList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Role", "Role", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -1038,7 +1504,7 @@ func (m *AggregationRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1066,7 +1532,7 @@ func (m *AggregationRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1075,10 +1541,13 @@ func (m *AggregationRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.ClusterRoleSelectors = append(m.ClusterRoleSelectors, k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{}) + m.ClusterRoleSelectors = append(m.ClusterRoleSelectors, v1.LabelSelector{}) if err := m.ClusterRoleSelectors[len(m.ClusterRoleSelectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -1092,6 +1561,9 @@ func (m *AggregationRule) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1119,7 +1591,7 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1147,7 +1619,7 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1156,6 +1628,9 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1177,7 +1652,7 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1186,6 +1661,9 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1208,7 +1686,7 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1217,6 +1695,9 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1236,6 +1717,9 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1263,7 +1747,7 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1291,7 +1775,7 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1300,6 +1784,9 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1321,7 +1808,7 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1330,6 +1817,9 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1352,7 +1842,7 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1361,6 +1851,9 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1377,6 +1870,9 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1404,7 +1900,7 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1432,7 +1928,7 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1441,6 +1937,9 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1462,7 +1961,7 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1471,6 +1970,9 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1488,6 +1990,9 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1515,7 +2020,7 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1543,7 +2048,7 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1552,6 +2057,9 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1573,7 +2081,7 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1582,6 +2090,9 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1599,6 +2110,9 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1626,7 +2140,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1654,7 +2168,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1664,6 +2178,9 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1683,7 +2200,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1693,6 +2210,9 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1712,7 +2232,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1722,6 +2242,9 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1741,7 +2264,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1751,6 +2274,9 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1770,7 +2296,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1780,6 +2306,9 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1794,6 +2323,9 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1821,7 +2353,7 @@ func (m *Role) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1849,7 +2381,7 @@ func (m *Role) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1858,6 +2390,9 @@ func (m *Role) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1879,7 +2414,7 @@ func (m *Role) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1888,6 +2423,9 @@ func (m *Role) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1905,6 +2443,9 @@ func (m *Role) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1932,7 +2473,7 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1960,7 +2501,7 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1969,6 +2510,9 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1990,7 +2534,7 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1999,6 +2543,9 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2021,7 +2568,7 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2030,6 +2577,9 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2046,6 +2596,9 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2073,7 +2626,7 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2101,7 +2654,7 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2110,6 +2663,9 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2131,7 +2687,7 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2140,6 +2696,9 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2157,6 +2716,9 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2184,7 +2746,7 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2212,7 +2774,7 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2221,6 +2783,9 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2242,7 +2807,7 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2251,6 +2816,9 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2268,6 +2836,9 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2295,7 +2866,7 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2323,7 +2894,7 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2333,6 +2904,9 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2352,7 +2926,7 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2362,6 +2936,9 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2381,7 +2958,7 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2391,6 +2968,9 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2405,6 +2985,9 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2432,7 +3015,7 @@ func (m *Subject) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2460,7 +3043,7 @@ func (m *Subject) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2470,6 +3053,9 @@ func (m *Subject) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2489,7 +3075,7 @@ func (m *Subject) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2499,6 +3085,9 @@ func (m *Subject) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2518,7 +3107,7 @@ func (m *Subject) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2528,6 +3117,9 @@ func (m *Subject) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2547,7 +3139,7 @@ func (m *Subject) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2557,6 +3149,9 @@ func (m *Subject) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2571,6 +3166,9 @@ func (m *Subject) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2637,10 +3235,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -2669,6 +3270,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -2687,63 +3291,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1alpha1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 830 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x55, 0xbf, 0x8f, 0xe3, 0x44, - 0x14, 0xce, 0x64, 0x13, 0x36, 0x99, 0x25, 0x0a, 0x37, 0x9c, 0x90, 0xb5, 0x42, 0xce, 0x62, 0x81, - 0x74, 0x88, 0xc3, 0x66, 0x17, 0x04, 0x34, 0x14, 0xf1, 0x15, 0x28, 0x10, 0xf6, 0x96, 0x39, 0x71, - 0x05, 0xa2, 0x60, 0xe2, 0xcc, 0x39, 0x43, 0x6c, 0x8f, 0x35, 0x63, 0x47, 0x3a, 0xd1, 0xd0, 0xd0, - 0x22, 0x1a, 0x0a, 0x7a, 0x5a, 0x1a, 0x28, 0xf9, 0x07, 0x96, 0xee, 0xca, 0xad, 0x22, 0xd6, 0xfc, - 0x21, 0x20, 0x8f, 0xed, 0xd8, 0xf9, 0x45, 0x52, 0x45, 0x42, 0xba, 0x2a, 0x99, 0xf7, 0xbe, 0xf7, - 0xbd, 0xf7, 0xbe, 0x99, 0xf7, 0x0c, 0xfb, 0xd3, 0x0f, 0xa5, 0xc9, 0xb8, 0x35, 0x8d, 0x47, 0x54, - 0x04, 0x34, 0xa2, 0xd2, 0x9a, 0xd1, 0x60, 0xcc, 0x85, 0x95, 0x3b, 0x48, 0xc8, 0x2c, 0x31, 0x22, - 0x8e, 0x35, 0x3b, 0x27, 0x5e, 0x38, 0x21, 0xe7, 0x96, 0x4b, 0x03, 0x2a, 0x48, 0x44, 0xc7, 0x66, - 0x28, 0x78, 0xc4, 0x91, 0x96, 0x21, 0x4d, 0x12, 0x32, 0x33, 0x45, 0x9a, 0x05, 0xf2, 0xf4, 0x6d, - 0x97, 0x45, 0x93, 0x78, 0x64, 0x3a, 0xdc, 0xb7, 0x5c, 0xee, 0x72, 0x4b, 0x05, 0x8c, 0xe2, 0x27, - 0xea, 0xa4, 0x0e, 0xea, 0x5f, 0x46, 0x74, 0xfa, 0x5e, 0x99, 0xd2, 0x27, 0xce, 0x84, 0x05, 0x54, - 0x3c, 0xb5, 0xc2, 0xa9, 0x9b, 0x1a, 0xa4, 0xe5, 0xd3, 0x88, 0x58, 0xb3, 0xb5, 0xf4, 0xa7, 0xd6, - 0xb6, 0x28, 0x11, 0x07, 0x11, 0xf3, 0xe9, 0x5a, 0xc0, 0xfb, 0xbb, 0x02, 0xa4, 0x33, 0xa1, 0x3e, - 0x59, 0x8d, 0x33, 0x7e, 0x06, 0xb0, 0xdb, 0x77, 0x5d, 0x41, 0x5d, 0x12, 0x31, 0x1e, 0xe0, 0xd8, - 0xa3, 0xe8, 0x7b, 0x00, 0xef, 0x3a, 0x5e, 0x2c, 0x23, 0x2a, 0x30, 0xf7, 0xe8, 0x23, 0xea, 0x51, - 0x27, 0xe2, 0x42, 0x6a, 0xe0, 0xec, 0xe8, 0xde, 0xc9, 0xc5, 0xbb, 0x66, 0xa9, 0xcd, 0x22, 0x97, - 0x19, 0x4e, 0xdd, 0xd4, 0x20, 0xcd, 0xb4, 0x25, 0x73, 0x76, 0x6e, 0x0e, 0xc9, 0x88, 0x7a, 0x45, - 0xac, 0xfd, 0xea, 0xf5, 0xbc, 0x57, 0x4b, 0xe6, 0xbd, 0xbb, 0x0f, 0x36, 0x10, 0xe3, 0x8d, 0xe9, - 0x8c, 0x5f, 0xea, 0xf0, 0xa4, 0x02, 0x47, 0x5f, 0xc3, 0x56, 0x4a, 0x3e, 0x26, 0x11, 0xd1, 0xc0, - 0x19, 0xb8, 0x77, 0x72, 0xf1, 0xce, 0x7e, 0xa5, 0x3c, 0x1c, 0x7d, 0x43, 0x9d, 0xe8, 0x33, 0x1a, - 0x11, 0x1b, 0xe5, 0x75, 0xc0, 0xd2, 0x86, 0x17, 0xac, 0x68, 0x00, 0x9b, 0x22, 0xf6, 0xa8, 0xd4, - 0xea, 0xaa, 0xd3, 0xd7, 0xcd, 0x6d, 0xaf, 0xc0, 0xbc, 0xe2, 0x1e, 0x73, 0x9e, 0xa6, 0x72, 0xd9, - 0x9d, 0x9c, 0xb2, 0x99, 0x9e, 0x24, 0xce, 0x18, 0xd0, 0x04, 0x76, 0xc9, 0xb2, 0xae, 0xda, 0x91, - 0xaa, 0xf9, 0xcd, 0xed, 0xa4, 0x2b, 0x17, 0x61, 0xbf, 0x9c, 0xcc, 0x7b, 0xab, 0xb7, 0x83, 0x57, - 0x69, 0x8d, 0x9f, 0xea, 0x10, 0x55, 0x64, 0xb2, 0x59, 0x30, 0x66, 0x81, 0x7b, 0x00, 0xb5, 0x1e, - 0xc2, 0x96, 0x8c, 0x95, 0xa3, 0x10, 0xec, 0xb5, 0xed, 0xbd, 0x3d, 0xca, 0x90, 0xf6, 0x4b, 0x39, - 0x65, 0x2b, 0x37, 0x48, 0xbc, 0x20, 0x41, 0x43, 0x78, 0x2c, 0xb8, 0x47, 0x31, 0x7d, 0x92, 0x6b, - 0xf5, 0x1f, 0x7c, 0x38, 0x03, 0xda, 0xdd, 0x9c, 0xef, 0x38, 0x37, 0xe0, 0x82, 0xc2, 0xf8, 0x13, - 0xc0, 0x57, 0xd6, 0x75, 0x19, 0x32, 0x19, 0xa1, 0xaf, 0xd6, 0xb4, 0x31, 0xf7, 0x7c, 0xd4, 0x4c, - 0x66, 0xca, 0x2c, 0xda, 0x28, 0x2c, 0x15, 0x5d, 0x3e, 0x87, 0x4d, 0x16, 0x51, 0xbf, 0x10, 0xe5, - 0xfe, 0xf6, 0x26, 0xd6, 0xcb, 0x2b, 0x5f, 0xd3, 0x20, 0xa5, 0xc0, 0x19, 0x93, 0xf1, 0x07, 0x80, - 0xdd, 0x0a, 0xf8, 0x00, 0x4d, 0x7c, 0xb2, 0xdc, 0xc4, 0x1b, 0xfb, 0x35, 0xb1, 0xb9, 0xfa, 0x7f, - 0x00, 0x84, 0xe5, 0xc0, 0xa0, 0x1e, 0x6c, 0xce, 0xa8, 0x18, 0x65, 0xfb, 0xa4, 0x6d, 0xb7, 0x53, - 0xfc, 0xe3, 0xd4, 0x80, 0x33, 0x3b, 0x7a, 0x0b, 0xb6, 0x49, 0xc8, 0x3e, 0x16, 0x3c, 0x0e, 0xa5, - 0x76, 0xa4, 0x40, 0x9d, 0x64, 0xde, 0x6b, 0xf7, 0xaf, 0x06, 0x99, 0x11, 0x97, 0xfe, 0x14, 0x2c, - 0xa8, 0xe4, 0xb1, 0x70, 0xa8, 0xd4, 0x1a, 0x25, 0x18, 0x17, 0x46, 0x5c, 0xfa, 0xd1, 0x07, 0xb0, - 0x53, 0x1c, 0x2e, 0x89, 0x4f, 0xa5, 0xd6, 0x54, 0x01, 0x77, 0x92, 0x79, 0xaf, 0x83, 0xab, 0x0e, - 0xbc, 0x8c, 0x43, 0x1f, 0xc1, 0x6e, 0xc0, 0x83, 0x02, 0xf2, 0x05, 0x1e, 0x4a, 0xed, 0x05, 0x15, - 0xaa, 0x66, 0xf4, 0x72, 0xd9, 0x85, 0x57, 0xb1, 0xc6, 0xef, 0x00, 0x36, 0xfe, 0x77, 0x3b, 0xcc, - 0xf8, 0xa1, 0x0e, 0x4f, 0x9e, 0xaf, 0x94, 0xca, 0x4a, 0x49, 0xc7, 0xf0, 0xb0, 0xbb, 0x64, 0xff, - 0x31, 0xdc, 0xbd, 0x44, 0x7e, 0x05, 0xb0, 0x75, 0xa0, 0xed, 0xf1, 0x60, 0xb9, 0x6c, 0x7d, 0x47, - 0xd9, 0x9b, 0xeb, 0xfd, 0x16, 0x16, 0x37, 0x80, 0xee, 0xc3, 0x56, 0x31, 0xf1, 0xaa, 0xda, 0x76, - 0x99, 0xbd, 0x58, 0x0a, 0x78, 0x81, 0x40, 0x67, 0xb0, 0x31, 0x65, 0xc1, 0x58, 0xab, 0x2b, 0xe4, - 0x8b, 0x39, 0xb2, 0xf1, 0x29, 0x0b, 0xc6, 0x58, 0x79, 0x52, 0x44, 0x40, 0xfc, 0xec, 0x93, 0x5c, - 0x41, 0xa4, 0xb3, 0x8e, 0x95, 0xc7, 0xf8, 0x0d, 0xc0, 0xe3, 0xfc, 0x3d, 0x2d, 0xf8, 0xc0, 0x56, - 0xbe, 0x0b, 0x08, 0x49, 0xc8, 0x1e, 0x53, 0x21, 0x19, 0x0f, 0xf2, 0xbc, 0x8b, 0x97, 0xde, 0xbf, - 0x1a, 0xe4, 0x1e, 0x5c, 0x41, 0xed, 0xae, 0x01, 0x59, 0xb0, 0x9d, 0xfe, 0xca, 0x90, 0x38, 0x54, - 0x6b, 0x28, 0xd8, 0x9d, 0x1c, 0xd6, 0xbe, 0x2c, 0x1c, 0xb8, 0xc4, 0xd8, 0xe6, 0xf5, 0xad, 0x5e, - 0x7b, 0x76, 0xab, 0xd7, 0x6e, 0x6e, 0xf5, 0xda, 0x77, 0x89, 0x0e, 0xae, 0x13, 0x1d, 0x3c, 0x4b, - 0x74, 0x70, 0x93, 0xe8, 0xe0, 0xaf, 0x44, 0x07, 0x3f, 0xfe, 0xad, 0xd7, 0xbe, 0x6c, 0x15, 0xe2, - 0xff, 0x1b, 0x00, 0x00, 0xff, 0xff, 0xb0, 0x73, 0x15, 0x10, 0x29, 0x0b, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/rbac/v1alpha1/generated.proto b/vendor/k8s.io/api/rbac/v1alpha1/generated.proto index cde3aaac..5c50a87e 100644 --- a/vendor/k8s.io/api/rbac/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/rbac/v1alpha1/generated.proto @@ -37,12 +37,14 @@ message AggregationRule { } // ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. +// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRole, and will no longer be served in v1.20. message ClusterRole { // Standard object's metadata. // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Rules holds all the PolicyRules for this ClusterRole + // +optional repeated PolicyRule rules = 2; // AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. @@ -54,6 +56,7 @@ message ClusterRole { // ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, // and adds who information via Subject. +// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoleBinding, and will no longer be served in v1.20. message ClusterRoleBinding { // Standard object's metadata. // +optional @@ -68,7 +71,8 @@ message ClusterRoleBinding { optional RoleRef roleRef = 3; } -// ClusterRoleBindingList is a collection of ClusterRoleBindings +// ClusterRoleBindingList is a collection of ClusterRoleBindings. +// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoleBindings, and will no longer be served in v1.20. message ClusterRoleBindingList { // Standard object's metadata. // +optional @@ -78,7 +82,8 @@ message ClusterRoleBindingList { repeated ClusterRoleBinding items = 2; } -// ClusterRoleList is a collection of ClusterRoles +// ClusterRoleList is a collection of ClusterRoles. +// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoles, and will no longer be served in v1.20. message ClusterRoleList { // Standard object's metadata. // +optional @@ -116,18 +121,21 @@ message PolicyRule { } // Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding. +// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 Role, and will no longer be served in v1.20. message Role { // Standard object's metadata. // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Rules holds all the PolicyRules for this Role + // +optional repeated PolicyRule rules = 2; } // RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. // It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given // namespace only have effect in that namespace. +// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleBinding, and will no longer be served in v1.20. message RoleBinding { // Standard object's metadata. // +optional @@ -143,6 +151,7 @@ message RoleBinding { } // RoleBindingList is a collection of RoleBindings +// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleBindingList, and will no longer be served in v1.20. message RoleBindingList { // Standard object's metadata. // +optional @@ -152,7 +161,8 @@ message RoleBindingList { repeated RoleBinding items = 2; } -// RoleList is a collection of Roles +// RoleList is a collection of Roles. +// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleList, and will no longer be served in v1.20. message RoleList { // Standard object's metadata. // +optional diff --git a/vendor/k8s.io/api/rbac/v1alpha1/types.go b/vendor/k8s.io/api/rbac/v1alpha1/types.go index 398d6a16..a5d3e38f 100644 --- a/vendor/k8s.io/api/rbac/v1alpha1/types.go +++ b/vendor/k8s.io/api/rbac/v1alpha1/types.go @@ -103,6 +103,7 @@ type RoleRef struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding. +// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 Role, and will no longer be served in v1.20. type Role struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. @@ -110,6 +111,7 @@ type Role struct { metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Rules holds all the PolicyRules for this Role + // +optional Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` } @@ -119,6 +121,7 @@ type Role struct { // RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. // It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given // namespace only have effect in that namespace. +// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleBinding, and will no longer be served in v1.20. type RoleBinding struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. @@ -137,6 +140,7 @@ type RoleBinding struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // RoleBindingList is a collection of RoleBindings +// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleBindingList, and will no longer be served in v1.20. type RoleBindingList struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. @@ -149,7 +153,8 @@ type RoleBindingList struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// RoleList is a collection of Roles +// RoleList is a collection of Roles. +// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleList, and will no longer be served in v1.20. type RoleList struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. @@ -165,6 +170,7 @@ type RoleList struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. +// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRole, and will no longer be served in v1.20. type ClusterRole struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. @@ -172,6 +178,7 @@ type ClusterRole struct { metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Rules holds all the PolicyRules for this ClusterRole + // +optional Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` // AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. @@ -195,6 +202,7 @@ type AggregationRule struct { // ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, // and adds who information via Subject. +// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoleBinding, and will no longer be served in v1.20. type ClusterRoleBinding struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. @@ -212,7 +220,8 @@ type ClusterRoleBinding struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// ClusterRoleBindingList is a collection of ClusterRoleBindings +// ClusterRoleBindingList is a collection of ClusterRoleBindings. +// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoleBindings, and will no longer be served in v1.20. type ClusterRoleBindingList struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. @@ -225,7 +234,8 @@ type ClusterRoleBindingList struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// ClusterRoleList is a collection of ClusterRoles +// ClusterRoleList is a collection of ClusterRoles. +// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoles, and will no longer be served in v1.20. type ClusterRoleList struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. diff --git a/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go index 1d6ef30b..8238de21 100644 --- a/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go @@ -28,7 +28,7 @@ package v1alpha1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_AggregationRule = map[string]string{ - "": "AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole", + "": "AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole", "clusterRoleSelectors": "ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. If any of the selectors match, then the ClusterRole's permissions will be added", } @@ -37,7 +37,7 @@ func (AggregationRule) SwaggerDoc() map[string]string { } var map_ClusterRole = map[string]string{ - "": "ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding.", + "": "ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRole, and will no longer be served in v1.20.", "metadata": "Standard object's metadata.", "rules": "Rules holds all the PolicyRules for this ClusterRole", "aggregationRule": "AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be stomped by the controller.", @@ -48,7 +48,7 @@ func (ClusterRole) SwaggerDoc() map[string]string { } var map_ClusterRoleBinding = map[string]string{ - "": "ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, and adds who information via Subject.", + "": "ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, and adds who information via Subject. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoleBinding, and will no longer be served in v1.20.", "metadata": "Standard object's metadata.", "subjects": "Subjects holds references to the objects the role applies to.", "roleRef": "RoleRef can only reference a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error.", @@ -59,7 +59,7 @@ func (ClusterRoleBinding) SwaggerDoc() map[string]string { } var map_ClusterRoleBindingList = map[string]string{ - "": "ClusterRoleBindingList is a collection of ClusterRoleBindings", + "": "ClusterRoleBindingList is a collection of ClusterRoleBindings. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoleBindings, and will no longer be served in v1.20.", "metadata": "Standard object's metadata.", "items": "Items is a list of ClusterRoleBindings", } @@ -69,7 +69,7 @@ func (ClusterRoleBindingList) SwaggerDoc() map[string]string { } var map_ClusterRoleList = map[string]string{ - "": "ClusterRoleList is a collection of ClusterRoles", + "": "ClusterRoleList is a collection of ClusterRoles. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoles, and will no longer be served in v1.20.", "metadata": "Standard object's metadata.", "items": "Items is a list of ClusterRoles", } @@ -92,7 +92,7 @@ func (PolicyRule) SwaggerDoc() map[string]string { } var map_Role = map[string]string{ - "": "Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding.", + "": "Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 Role, and will no longer be served in v1.20.", "metadata": "Standard object's metadata.", "rules": "Rules holds all the PolicyRules for this Role", } @@ -102,7 +102,7 @@ func (Role) SwaggerDoc() map[string]string { } var map_RoleBinding = map[string]string{ - "": "RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given namespace only have effect in that namespace.", + "": "RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given namespace only have effect in that namespace. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleBinding, and will no longer be served in v1.20.", "metadata": "Standard object's metadata.", "subjects": "Subjects holds references to the objects the role applies to.", "roleRef": "RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error.", @@ -113,7 +113,7 @@ func (RoleBinding) SwaggerDoc() map[string]string { } var map_RoleBindingList = map[string]string{ - "": "RoleBindingList is a collection of RoleBindings", + "": "RoleBindingList is a collection of RoleBindings Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleBindingList, and will no longer be served in v1.20.", "metadata": "Standard object's metadata.", "items": "Items is a list of RoleBindings", } @@ -123,7 +123,7 @@ func (RoleBindingList) SwaggerDoc() map[string]string { } var map_RoleList = map[string]string{ - "": "RoleList is a collection of Roles", + "": "RoleList is a collection of Roles. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleList, and will no longer be served in v1.20.", "metadata": "Standard object's metadata.", "items": "Items is a list of Roles", } diff --git a/vendor/k8s.io/api/rbac/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/rbac/v1alpha1/zz_generated.deepcopy.go index 97f63331..0358227f 100644 --- a/vendor/k8s.io/api/rbac/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/rbac/v1alpha1/zz_generated.deepcopy.go @@ -122,7 +122,7 @@ func (in *ClusterRoleBinding) DeepCopyObject() runtime.Object { func (in *ClusterRoleBindingList) DeepCopyInto(out *ClusterRoleBindingList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ClusterRoleBinding, len(*in)) @@ -155,7 +155,7 @@ func (in *ClusterRoleBindingList) DeepCopyObject() runtime.Object { func (in *ClusterRoleList) DeepCopyInto(out *ClusterRoleList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ClusterRole, len(*in)) @@ -294,7 +294,7 @@ func (in *RoleBinding) DeepCopyObject() runtime.Object { func (in *RoleBindingList) DeepCopyInto(out *RoleBindingList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]RoleBinding, len(*in)) @@ -327,7 +327,7 @@ func (in *RoleBindingList) DeepCopyObject() runtime.Object { func (in *RoleList) DeepCopyInto(out *RoleList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Role, len(*in)) diff --git a/vendor/k8s.io/api/rbac/v1beta1/doc.go b/vendor/k8s.io/api/rbac/v1beta1/doc.go index 4b77c9c6..fe7aae97 100644 --- a/vendor/k8s.io/api/rbac/v1beta1/doc.go +++ b/vendor/k8s.io/api/rbac/v1beta1/doc.go @@ -15,7 +15,9 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +k8s:openapi-gen=true // +groupName=rbac.authorization.k8s.io + package v1beta1 // import "k8s.io/api/rbac/v1beta1" diff --git a/vendor/k8s.io/api/rbac/v1beta1/generated.pb.go b/vendor/k8s.io/api/rbac/v1beta1/generated.pb.go index 71e5799e..6c80f52f 100644 --- a/vendor/k8s.io/api/rbac/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/rbac/v1beta1/generated.pb.go @@ -14,42 +14,24 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1beta1/generated.proto -// DO NOT EDIT! -/* - Package v1beta1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1beta1/generated.proto - - It has these top-level messages: - AggregationRule - ClusterRole - ClusterRoleBinding - ClusterRoleBindingList - ClusterRoleList - PolicyRule - Role - RoleBinding - RoleBindingList - RoleList - RoleRef - Subject -*/ package v1beta1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + io "io" -import strings "strings" -import reflect "reflect" + proto "github.com/gogo/protobuf/proto" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" -import io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -62,53 +44,341 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *AggregationRule) Reset() { *m = AggregationRule{} } -func (*AggregationRule) ProtoMessage() {} -func (*AggregationRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *AggregationRule) Reset() { *m = AggregationRule{} } +func (*AggregationRule) ProtoMessage() {} +func (*AggregationRule) Descriptor() ([]byte, []int) { + return fileDescriptor_99f6bec96facc83d, []int{0} +} +func (m *AggregationRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AggregationRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AggregationRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_AggregationRule.Merge(m, src) +} +func (m *AggregationRule) XXX_Size() int { + return m.Size() +} +func (m *AggregationRule) XXX_DiscardUnknown() { + xxx_messageInfo_AggregationRule.DiscardUnknown(m) +} + +var xxx_messageInfo_AggregationRule proto.InternalMessageInfo + +func (m *ClusterRole) Reset() { *m = ClusterRole{} } +func (*ClusterRole) ProtoMessage() {} +func (*ClusterRole) Descriptor() ([]byte, []int) { + return fileDescriptor_99f6bec96facc83d, []int{1} +} +func (m *ClusterRole) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterRole) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterRole) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterRole.Merge(m, src) +} +func (m *ClusterRole) XXX_Size() int { + return m.Size() +} +func (m *ClusterRole) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterRole.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterRole proto.InternalMessageInfo + +func (m *ClusterRoleBinding) Reset() { *m = ClusterRoleBinding{} } +func (*ClusterRoleBinding) ProtoMessage() {} +func (*ClusterRoleBinding) Descriptor() ([]byte, []int) { + return fileDescriptor_99f6bec96facc83d, []int{2} +} +func (m *ClusterRoleBinding) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterRoleBinding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterRoleBinding) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterRoleBinding.Merge(m, src) +} +func (m *ClusterRoleBinding) XXX_Size() int { + return m.Size() +} +func (m *ClusterRoleBinding) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterRoleBinding.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterRoleBinding proto.InternalMessageInfo + +func (m *ClusterRoleBindingList) Reset() { *m = ClusterRoleBindingList{} } +func (*ClusterRoleBindingList) ProtoMessage() {} +func (*ClusterRoleBindingList) Descriptor() ([]byte, []int) { + return fileDescriptor_99f6bec96facc83d, []int{3} +} +func (m *ClusterRoleBindingList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterRoleBindingList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterRoleBindingList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterRoleBindingList.Merge(m, src) +} +func (m *ClusterRoleBindingList) XXX_Size() int { + return m.Size() +} +func (m *ClusterRoleBindingList) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterRoleBindingList.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterRoleBindingList proto.InternalMessageInfo + +func (m *ClusterRoleList) Reset() { *m = ClusterRoleList{} } +func (*ClusterRoleList) ProtoMessage() {} +func (*ClusterRoleList) Descriptor() ([]byte, []int) { + return fileDescriptor_99f6bec96facc83d, []int{4} +} +func (m *ClusterRoleList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterRoleList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterRoleList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterRoleList.Merge(m, src) +} +func (m *ClusterRoleList) XXX_Size() int { + return m.Size() +} +func (m *ClusterRoleList) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterRoleList.DiscardUnknown(m) +} -func (m *ClusterRole) Reset() { *m = ClusterRole{} } -func (*ClusterRole) ProtoMessage() {} -func (*ClusterRole) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +var xxx_messageInfo_ClusterRoleList proto.InternalMessageInfo -func (m *ClusterRoleBinding) Reset() { *m = ClusterRoleBinding{} } -func (*ClusterRoleBinding) ProtoMessage() {} -func (*ClusterRoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +func (m *PolicyRule) Reset() { *m = PolicyRule{} } +func (*PolicyRule) ProtoMessage() {} +func (*PolicyRule) Descriptor() ([]byte, []int) { + return fileDescriptor_99f6bec96facc83d, []int{5} +} +func (m *PolicyRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PolicyRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PolicyRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_PolicyRule.Merge(m, src) +} +func (m *PolicyRule) XXX_Size() int { + return m.Size() +} +func (m *PolicyRule) XXX_DiscardUnknown() { + xxx_messageInfo_PolicyRule.DiscardUnknown(m) +} + +var xxx_messageInfo_PolicyRule proto.InternalMessageInfo + +func (m *Role) Reset() { *m = Role{} } +func (*Role) ProtoMessage() {} +func (*Role) Descriptor() ([]byte, []int) { + return fileDescriptor_99f6bec96facc83d, []int{6} +} +func (m *Role) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Role) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Role) XXX_Merge(src proto.Message) { + xxx_messageInfo_Role.Merge(m, src) +} +func (m *Role) XXX_Size() int { + return m.Size() +} +func (m *Role) XXX_DiscardUnknown() { + xxx_messageInfo_Role.DiscardUnknown(m) +} + +var xxx_messageInfo_Role proto.InternalMessageInfo + +func (m *RoleBinding) Reset() { *m = RoleBinding{} } +func (*RoleBinding) ProtoMessage() {} +func (*RoleBinding) Descriptor() ([]byte, []int) { + return fileDescriptor_99f6bec96facc83d, []int{7} +} +func (m *RoleBinding) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RoleBinding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RoleBinding) XXX_Merge(src proto.Message) { + xxx_messageInfo_RoleBinding.Merge(m, src) +} +func (m *RoleBinding) XXX_Size() int { + return m.Size() +} +func (m *RoleBinding) XXX_DiscardUnknown() { + xxx_messageInfo_RoleBinding.DiscardUnknown(m) +} -func (m *ClusterRoleBindingList) Reset() { *m = ClusterRoleBindingList{} } -func (*ClusterRoleBindingList) ProtoMessage() {} -func (*ClusterRoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +var xxx_messageInfo_RoleBinding proto.InternalMessageInfo -func (m *ClusterRoleList) Reset() { *m = ClusterRoleList{} } -func (*ClusterRoleList) ProtoMessage() {} -func (*ClusterRoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +func (m *RoleBindingList) Reset() { *m = RoleBindingList{} } +func (*RoleBindingList) ProtoMessage() {} +func (*RoleBindingList) Descriptor() ([]byte, []int) { + return fileDescriptor_99f6bec96facc83d, []int{8} +} +func (m *RoleBindingList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RoleBindingList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RoleBindingList) XXX_Merge(src proto.Message) { + xxx_messageInfo_RoleBindingList.Merge(m, src) +} +func (m *RoleBindingList) XXX_Size() int { + return m.Size() +} +func (m *RoleBindingList) XXX_DiscardUnknown() { + xxx_messageInfo_RoleBindingList.DiscardUnknown(m) +} -func (m *PolicyRule) Reset() { *m = PolicyRule{} } -func (*PolicyRule) ProtoMessage() {} -func (*PolicyRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +var xxx_messageInfo_RoleBindingList proto.InternalMessageInfo -func (m *Role) Reset() { *m = Role{} } -func (*Role) ProtoMessage() {} -func (*Role) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +func (m *RoleList) Reset() { *m = RoleList{} } +func (*RoleList) ProtoMessage() {} +func (*RoleList) Descriptor() ([]byte, []int) { + return fileDescriptor_99f6bec96facc83d, []int{9} +} +func (m *RoleList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RoleList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RoleList) XXX_Merge(src proto.Message) { + xxx_messageInfo_RoleList.Merge(m, src) +} +func (m *RoleList) XXX_Size() int { + return m.Size() +} +func (m *RoleList) XXX_DiscardUnknown() { + xxx_messageInfo_RoleList.DiscardUnknown(m) +} -func (m *RoleBinding) Reset() { *m = RoleBinding{} } -func (*RoleBinding) ProtoMessage() {} -func (*RoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +var xxx_messageInfo_RoleList proto.InternalMessageInfo -func (m *RoleBindingList) Reset() { *m = RoleBindingList{} } -func (*RoleBindingList) ProtoMessage() {} -func (*RoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +func (m *RoleRef) Reset() { *m = RoleRef{} } +func (*RoleRef) ProtoMessage() {} +func (*RoleRef) Descriptor() ([]byte, []int) { + return fileDescriptor_99f6bec96facc83d, []int{10} +} +func (m *RoleRef) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RoleRef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RoleRef) XXX_Merge(src proto.Message) { + xxx_messageInfo_RoleRef.Merge(m, src) +} +func (m *RoleRef) XXX_Size() int { + return m.Size() +} +func (m *RoleRef) XXX_DiscardUnknown() { + xxx_messageInfo_RoleRef.DiscardUnknown(m) +} -func (m *RoleList) Reset() { *m = RoleList{} } -func (*RoleList) ProtoMessage() {} -func (*RoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +var xxx_messageInfo_RoleRef proto.InternalMessageInfo -func (m *RoleRef) Reset() { *m = RoleRef{} } -func (*RoleRef) ProtoMessage() {} -func (*RoleRef) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } +func (m *Subject) Reset() { *m = Subject{} } +func (*Subject) ProtoMessage() {} +func (*Subject) Descriptor() ([]byte, []int) { + return fileDescriptor_99f6bec96facc83d, []int{11} +} +func (m *Subject) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Subject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Subject) XXX_Merge(src proto.Message) { + xxx_messageInfo_Subject.Merge(m, src) +} +func (m *Subject) XXX_Size() int { + return m.Size() +} +func (m *Subject) XXX_DiscardUnknown() { + xxx_messageInfo_Subject.DiscardUnknown(m) +} -func (m *Subject) Reset() { *m = Subject{} } -func (*Subject) ProtoMessage() {} -func (*Subject) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } +var xxx_messageInfo_Subject proto.InternalMessageInfo func init() { proto.RegisterType((*AggregationRule)(nil), "k8s.io.api.rbac.v1beta1.AggregationRule") @@ -124,10 +394,70 @@ func init() { proto.RegisterType((*RoleRef)(nil), "k8s.io.api.rbac.v1beta1.RoleRef") proto.RegisterType((*Subject)(nil), "k8s.io.api.rbac.v1beta1.Subject") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1beta1/generated.proto", fileDescriptor_99f6bec96facc83d) +} + +var fileDescriptor_99f6bec96facc83d = []byte{ + // 808 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x55, 0xbd, 0x6f, 0xfb, 0x44, + 0x18, 0xce, 0xa5, 0x89, 0x12, 0x5f, 0x88, 0xc2, 0xef, 0xa8, 0xc0, 0xaa, 0xc0, 0x89, 0x02, 0x43, + 0xa5, 0x52, 0x9b, 0x16, 0x04, 0x2c, 0x48, 0xd4, 0x0c, 0x50, 0xb5, 0x84, 0xea, 0x2a, 0x18, 0x10, + 0x03, 0x67, 0xe7, 0xea, 0x1e, 0xf1, 0x97, 0xee, 0xec, 0x48, 0x15, 0x0b, 0x0b, 0x1b, 0x03, 0x12, + 0x13, 0x2b, 0x33, 0x13, 0x23, 0x7f, 0x41, 0xc6, 0x8e, 0x9d, 0x22, 0x6a, 0xfe, 0x10, 0xd0, 0xf9, + 0x23, 0xce, 0x67, 0x9b, 0x29, 0x12, 0x12, 0x53, 0x7b, 0xef, 0xfb, 0xbc, 0xcf, 0xfb, 0xbc, 0x8f, + 0xef, 0xde, 0xc0, 0x8f, 0x47, 0x1f, 0x0a, 0x9d, 0x05, 0xc6, 0x28, 0xb6, 0x28, 0xf7, 0x69, 0x44, + 0x85, 0x31, 0xa6, 0xfe, 0x30, 0xe0, 0x46, 0x9e, 0x20, 0x21, 0x33, 0xb8, 0x45, 0x6c, 0x63, 0x7c, + 0x62, 0xd1, 0x88, 0x9c, 0x18, 0x0e, 0xf5, 0x29, 0x27, 0x11, 0x1d, 0xea, 0x21, 0x0f, 0xa2, 0x00, + 0xbd, 0x96, 0x01, 0x75, 0x12, 0x32, 0x5d, 0x02, 0xf5, 0x1c, 0x78, 0x70, 0xec, 0xb0, 0xe8, 0x36, + 0xb6, 0x74, 0x3b, 0xf0, 0x0c, 0x27, 0x70, 0x02, 0x23, 0xc5, 0x5b, 0xf1, 0x4d, 0x7a, 0x4a, 0x0f, + 0xe9, 0x7f, 0x19, 0xcf, 0xc1, 0x7b, 0x65, 0x43, 0x8f, 0xd8, 0xb7, 0xcc, 0xa7, 0xfc, 0xce, 0x08, + 0x47, 0x8e, 0x0c, 0x08, 0xc3, 0xa3, 0x11, 0x31, 0xc6, 0x2b, 0xdd, 0x0f, 0x8c, 0x4d, 0x55, 0x3c, + 0xf6, 0x23, 0xe6, 0xd1, 0x95, 0x82, 0xf7, 0x9f, 0x2b, 0x10, 0xf6, 0x2d, 0xf5, 0xc8, 0x72, 0x5d, + 0xff, 0x57, 0x00, 0x3b, 0x67, 0x8e, 0xc3, 0xa9, 0x43, 0x22, 0x16, 0xf8, 0x38, 0x76, 0x29, 0xfa, + 0x11, 0xc0, 0x7d, 0xdb, 0x8d, 0x45, 0x44, 0x39, 0x0e, 0x5c, 0x7a, 0x4d, 0x5d, 0x6a, 0x47, 0x01, + 0x17, 0x2a, 0xe8, 0xed, 0x1d, 0xb6, 0x4e, 0xdf, 0xd5, 0x4b, 0x6b, 0x66, 0xbd, 0xf4, 0x70, 0xe4, + 0xc8, 0x80, 0xd0, 0xe5, 0x48, 0xfa, 0xf8, 0x44, 0xbf, 0x24, 0x16, 0x75, 0x8b, 0x5a, 0xf3, 0xf5, + 0xc9, 0xb4, 0x5b, 0x49, 0xa6, 0xdd, 0xfd, 0x4f, 0xd6, 0x10, 0xe3, 0xb5, 0xed, 0xfa, 0xbf, 0x55, + 0x61, 0x6b, 0x0e, 0x8e, 0xbe, 0x85, 0x4d, 0x49, 0x3e, 0x24, 0x11, 0x51, 0x41, 0x0f, 0x1c, 0xb6, + 0x4e, 0xdf, 0xd9, 0x4e, 0xca, 0x17, 0xd6, 0x77, 0xd4, 0x8e, 0x3e, 0xa7, 0x11, 0x31, 0x51, 0xae, + 0x03, 0x96, 0x31, 0x3c, 0x63, 0x45, 0x9f, 0xc1, 0x3a, 0x8f, 0x5d, 0x2a, 0xd4, 0x6a, 0x3a, 0xe9, + 0x9b, 0xfa, 0x86, 0x4b, 0xa0, 0x5f, 0x05, 0x2e, 0xb3, 0xef, 0xa4, 0x5b, 0x66, 0x3b, 0x67, 0xac, + 0xcb, 0x93, 0xc0, 0x19, 0x01, 0x72, 0x60, 0x87, 0x2c, 0xda, 0xaa, 0xee, 0xa5, 0x92, 0x0f, 0x37, + 0x72, 0x2e, 0x7d, 0x06, 0xf3, 0x95, 0x64, 0xda, 0x5d, 0xfe, 0x36, 0x78, 0x99, 0xb5, 0xff, 0x4b, + 0x15, 0xa2, 0x39, 0x93, 0x4c, 0xe6, 0x0f, 0x99, 0xef, 0xec, 0xc0, 0xab, 0x01, 0x6c, 0x8a, 0x38, + 0x4d, 0x14, 0x76, 0xf5, 0x36, 0x8e, 0x76, 0x9d, 0x01, 0xcd, 0x97, 0x73, 0xc6, 0x66, 0x1e, 0x10, + 0x78, 0xc6, 0x81, 0x2e, 0x60, 0x83, 0x07, 0x2e, 0xc5, 0xf4, 0x26, 0x77, 0x6a, 0x33, 0x1d, 0xce, + 0x70, 0x66, 0x27, 0xa7, 0x6b, 0xe4, 0x01, 0x5c, 0x30, 0xf4, 0x27, 0x00, 0xbe, 0xba, 0xea, 0xca, + 0x25, 0x13, 0x11, 0xfa, 0x66, 0xc5, 0x19, 0x7d, 0xcb, 0x0b, 0xcd, 0x44, 0xe6, 0xcb, 0x6c, 0x8a, + 0x22, 0x32, 0xe7, 0xca, 0x15, 0xac, 0xb3, 0x88, 0x7a, 0x85, 0x25, 0x47, 0x1b, 0x67, 0x58, 0x55, + 0x57, 0xde, 0xa4, 0x73, 0xc9, 0x80, 0x33, 0xa2, 0xfe, 0x9f, 0x00, 0x76, 0xe6, 0xc0, 0x3b, 0x98, + 0xe1, 0x7c, 0x71, 0x86, 0xb7, 0xb6, 0x9a, 0x61, 0xbd, 0xf8, 0x7f, 0x00, 0x84, 0xe5, 0x5b, 0x41, + 0x5d, 0x58, 0x1f, 0x53, 0x6e, 0x65, 0x9b, 0x44, 0x31, 0x15, 0x89, 0xff, 0x4a, 0x06, 0x70, 0x16, + 0x47, 0x47, 0x50, 0x21, 0x21, 0xfb, 0x94, 0x07, 0x71, 0x98, 0xb5, 0x57, 0xcc, 0x76, 0x32, 0xed, + 0x2a, 0x67, 0x57, 0xe7, 0x59, 0x10, 0x97, 0x79, 0x09, 0xe6, 0x54, 0x04, 0x31, 0xb7, 0xa9, 0x50, + 0xf7, 0x4a, 0x30, 0x2e, 0x82, 0xb8, 0xcc, 0xa3, 0x0f, 0x60, 0xbb, 0x38, 0x0c, 0x88, 0x47, 0x85, + 0x5a, 0x4b, 0x0b, 0x5e, 0x24, 0xd3, 0x6e, 0x1b, 0xcf, 0x27, 0xf0, 0x22, 0x0e, 0x7d, 0x04, 0x3b, + 0x7e, 0xe0, 0x17, 0x90, 0x2f, 0xf1, 0xa5, 0x50, 0xeb, 0x69, 0x69, 0xfa, 0x3e, 0x07, 0x8b, 0x29, + 0xbc, 0x8c, 0xed, 0xff, 0x01, 0x60, 0xed, 0xbf, 0xb6, 0xbd, 0xfa, 0x3f, 0x55, 0x61, 0xeb, 0xff, + 0x6d, 0x32, 0xdb, 0x26, 0xf2, 0x09, 0xee, 0x76, 0x8d, 0x6c, 0xfd, 0x04, 0x9f, 0xdf, 0x1f, 0xbf, + 0x03, 0xd8, 0xdc, 0xd1, 0xe2, 0x30, 0x17, 0x55, 0xbf, 0xf1, 0xb4, 0xea, 0xf5, 0x72, 0xbf, 0x87, + 0x85, 0xff, 0xe8, 0x6d, 0xd8, 0x2c, 0x1e, 0x7b, 0x2a, 0x56, 0x29, 0x9b, 0x17, 0xfb, 0x00, 0xcf, + 0x10, 0xa8, 0x07, 0x6b, 0x23, 0xe6, 0x0f, 0xd5, 0x6a, 0x8a, 0x7c, 0x29, 0x47, 0xd6, 0x2e, 0x98, + 0x3f, 0xc4, 0x69, 0x46, 0x22, 0x7c, 0xe2, 0x65, 0x3f, 0xc4, 0x73, 0x08, 0xf9, 0xcc, 0x71, 0x9a, + 0x91, 0x5e, 0x35, 0xf2, 0xcb, 0x34, 0xe3, 0x03, 0x1b, 0xf9, 0xe6, 0xf5, 0x55, 0xb7, 0xd1, 0xf7, + 0x74, 0x77, 0x64, 0x40, 0x45, 0xfe, 0x15, 0x21, 0xb1, 0xa9, 0x5a, 0x4b, 0x61, 0x2f, 0x72, 0x98, + 0x32, 0x28, 0x12, 0xb8, 0xc4, 0x98, 0xc7, 0x93, 0x47, 0xad, 0x72, 0xff, 0xa8, 0x55, 0x1e, 0x1e, + 0xb5, 0xca, 0x0f, 0x89, 0x06, 0x26, 0x89, 0x06, 0xee, 0x13, 0x0d, 0x3c, 0x24, 0x1a, 0xf8, 0x2b, + 0xd1, 0xc0, 0xcf, 0x7f, 0x6b, 0x95, 0xaf, 0x1b, 0xb9, 0xeb, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, + 0x37, 0x8f, 0x77, 0xcd, 0x15, 0x0b, 0x00, 0x00, +} + func (m *AggregationRule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -135,29 +465,36 @@ func (m *AggregationRule) Marshal() (dAtA []byte, err error) { } func (m *AggregationRule) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AggregationRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if len(m.ClusterRoleSelectors) > 0 { - for _, msg := range m.ClusterRoleSelectors { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.ClusterRoleSelectors) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ClusterRoleSelectors[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } func (m *ClusterRole) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -165,47 +502,58 @@ func (m *ClusterRole) Marshal() (dAtA []byte, err error) { } func (m *ClusterRole) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterRole) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - if len(m.Rules) > 0 { - for _, msg := range m.Rules { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.AggregationRule != nil { + { + size, err := m.AggregationRule.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - } - if m.AggregationRule != nil { + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AggregationRule.Size())) - n2, err := m.AggregationRule.MarshalTo(dAtA[i:]) + } + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n2 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ClusterRoleBinding) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -213,45 +561,56 @@ func (m *ClusterRoleBinding) Marshal() (dAtA []byte, err error) { } func (m *ClusterRoleBinding) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterRoleBinding) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n3, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.RoleRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n3 + i-- + dAtA[i] = 0x1a if len(m.Subjects) > 0 { - for _, msg := range m.Subjects { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Subjects) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Subjects[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RoleRef.Size())) - n4, err := m.RoleRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n4 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ClusterRoleBindingList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -259,37 +618,46 @@ func (m *ClusterRoleBindingList) Marshal() (dAtA []byte, err error) { } func (m *ClusterRoleBindingList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterRoleBindingList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n5, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ClusterRoleList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -297,37 +665,46 @@ func (m *ClusterRoleList) Marshal() (dAtA []byte, err error) { } func (m *ClusterRoleList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterRoleList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n6, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *PolicyRule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -335,92 +712,67 @@ func (m *PolicyRule) Marshal() (dAtA []byte, err error) { } func (m *PolicyRule) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PolicyRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Verbs) > 0 { - for _, s := range m.Verbs { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + if len(m.NonResourceURLs) > 0 { + for iNdEx := len(m.NonResourceURLs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.NonResourceURLs[iNdEx]) + copy(dAtA[i:], m.NonResourceURLs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NonResourceURLs[iNdEx]))) + i-- + dAtA[i] = 0x2a } } - if len(m.APIGroups) > 0 { - for _, s := range m.APIGroups { - dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + if len(m.ResourceNames) > 0 { + for iNdEx := len(m.ResourceNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ResourceNames[iNdEx]) + copy(dAtA[i:], m.ResourceNames[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceNames[iNdEx]))) + i-- + dAtA[i] = 0x22 } } if len(m.Resources) > 0 { - for _, s := range m.Resources { + for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Resources[iNdEx]) + copy(dAtA[i:], m.Resources[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resources[iNdEx]))) + i-- dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - if len(m.ResourceNames) > 0 { - for _, s := range m.ResourceNames { - dAtA[i] = 0x22 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + if len(m.APIGroups) > 0 { + for iNdEx := len(m.APIGroups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.APIGroups[iNdEx]) + copy(dAtA[i:], m.APIGroups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroups[iNdEx]))) + i-- + dAtA[i] = 0x12 } } - if len(m.NonResourceURLs) > 0 { - for _, s := range m.NonResourceURLs { - dAtA[i] = 0x2a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + if len(m.Verbs) > 0 { + for iNdEx := len(m.Verbs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Verbs[iNdEx]) + copy(dAtA[i:], m.Verbs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verbs[iNdEx]))) + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } func (m *Role) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -428,37 +780,46 @@ func (m *Role) Marshal() (dAtA []byte, err error) { } func (m *Role) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Role) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n7, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 if len(m.Rules) > 0 { - for _, msg := range m.Rules { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *RoleBinding) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -466,45 +827,56 @@ func (m *RoleBinding) Marshal() (dAtA []byte, err error) { } func (m *RoleBinding) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RoleBinding) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n8, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.RoleRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n8 + i-- + dAtA[i] = 0x1a if len(m.Subjects) > 0 { - for _, msg := range m.Subjects { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Subjects) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Subjects[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RoleRef.Size())) - n9, err := m.RoleRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n9 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *RoleBindingList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -512,37 +884,46 @@ func (m *RoleBindingList) Marshal() (dAtA []byte, err error) { } func (m *RoleBindingList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RoleBindingList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n10, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n10 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *RoleList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -550,37 +931,46 @@ func (m *RoleList) Marshal() (dAtA []byte, err error) { } func (m *RoleList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RoleList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n11, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n11 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *RoleRef) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -588,29 +978,37 @@ func (m *RoleRef) Marshal() (dAtA []byte, err error) { } func (m *RoleRef) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RoleRef) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup))) - i += copy(dAtA[i:], m.APIGroup) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) - dAtA[i] = 0x1a - i++ + i -= len(m.Name) + copy(dAtA[i:], m.Name) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - return i, nil + i-- + dAtA[i] = 0x1a + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0x12 + i -= len(m.APIGroup) + copy(dAtA[i:], m.APIGroup) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *Subject) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -618,57 +1016,53 @@ func (m *Subject) Marshal() (dAtA []byte, err error) { } func (m *Subject) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Subject) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup))) - i += copy(dAtA[i:], m.APIGroup) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x22 - i++ + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i += copy(dAtA[i:], m.Namespace) - return i, nil + i-- + dAtA[i] = 0x22 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x1a + i -= len(m.APIGroup) + copy(dAtA[i:], m.APIGroup) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup))) + i-- + dAtA[i] = 0x12 + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *AggregationRule) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.ClusterRoleSelectors) > 0 { @@ -681,6 +1075,9 @@ func (m *AggregationRule) Size() (n int) { } func (m *ClusterRole) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -699,6 +1096,9 @@ func (m *ClusterRole) Size() (n int) { } func (m *ClusterRoleBinding) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -715,6 +1115,9 @@ func (m *ClusterRoleBinding) Size() (n int) { } func (m *ClusterRoleBindingList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -729,6 +1132,9 @@ func (m *ClusterRoleBindingList) Size() (n int) { } func (m *ClusterRoleList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -743,6 +1149,9 @@ func (m *ClusterRoleList) Size() (n int) { } func (m *PolicyRule) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Verbs) > 0 { @@ -779,6 +1188,9 @@ func (m *PolicyRule) Size() (n int) { } func (m *Role) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -793,6 +1205,9 @@ func (m *Role) Size() (n int) { } func (m *RoleBinding) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -809,6 +1224,9 @@ func (m *RoleBinding) Size() (n int) { } func (m *RoleBindingList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -823,6 +1241,9 @@ func (m *RoleBindingList) Size() (n int) { } func (m *RoleList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -837,6 +1258,9 @@ func (m *RoleList) Size() (n int) { } func (m *RoleRef) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.APIGroup) @@ -849,6 +1273,9 @@ func (m *RoleRef) Size() (n int) { } func (m *Subject) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Kind) @@ -863,14 +1290,7 @@ func (m *Subject) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -879,8 +1299,13 @@ func (this *AggregationRule) String() string { if this == nil { return "nil" } + repeatedStringForClusterRoleSelectors := "[]LabelSelector{" + for _, f := range this.ClusterRoleSelectors { + repeatedStringForClusterRoleSelectors += fmt.Sprintf("%v", f) + "," + } + repeatedStringForClusterRoleSelectors += "}" s := strings.Join([]string{`&AggregationRule{`, - `ClusterRoleSelectors:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ClusterRoleSelectors), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1), `&`, ``, 1) + `,`, + `ClusterRoleSelectors:` + repeatedStringForClusterRoleSelectors + `,`, `}`, }, "") return s @@ -889,10 +1314,15 @@ func (this *ClusterRole) String() string { if this == nil { return "nil" } + repeatedStringForRules := "[]PolicyRule{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + "," + } + repeatedStringForRules += "}" s := strings.Join([]string{`&ClusterRole{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + `,`, - `AggregationRule:` + strings.Replace(fmt.Sprintf("%v", this.AggregationRule), "AggregationRule", "AggregationRule", 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Rules:` + repeatedStringForRules + `,`, + `AggregationRule:` + strings.Replace(this.AggregationRule.String(), "AggregationRule", "AggregationRule", 1) + `,`, `}`, }, "") return s @@ -901,9 +1331,14 @@ func (this *ClusterRoleBinding) String() string { if this == nil { return "nil" } + repeatedStringForSubjects := "[]Subject{" + for _, f := range this.Subjects { + repeatedStringForSubjects += strings.Replace(strings.Replace(f.String(), "Subject", "Subject", 1), `&`, ``, 1) + "," + } + repeatedStringForSubjects += "}" s := strings.Join([]string{`&ClusterRoleBinding{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Subjects:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Subjects), "Subject", "Subject", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Subjects:` + repeatedStringForSubjects + `,`, `RoleRef:` + strings.Replace(strings.Replace(this.RoleRef.String(), "RoleRef", "RoleRef", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -913,9 +1348,14 @@ func (this *ClusterRoleBindingList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]ClusterRoleBinding{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ClusterRoleBinding", "ClusterRoleBinding", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&ClusterRoleBindingList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ClusterRoleBinding", "ClusterRoleBinding", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -924,9 +1364,14 @@ func (this *ClusterRoleList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]ClusterRole{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ClusterRole", "ClusterRole", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&ClusterRoleList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ClusterRole", "ClusterRole", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -949,9 +1394,14 @@ func (this *Role) String() string { if this == nil { return "nil" } + repeatedStringForRules := "[]PolicyRule{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + "," + } + repeatedStringForRules += "}" s := strings.Join([]string{`&Role{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Rules:` + repeatedStringForRules + `,`, `}`, }, "") return s @@ -960,9 +1410,14 @@ func (this *RoleBinding) String() string { if this == nil { return "nil" } + repeatedStringForSubjects := "[]Subject{" + for _, f := range this.Subjects { + repeatedStringForSubjects += strings.Replace(strings.Replace(f.String(), "Subject", "Subject", 1), `&`, ``, 1) + "," + } + repeatedStringForSubjects += "}" s := strings.Join([]string{`&RoleBinding{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Subjects:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Subjects), "Subject", "Subject", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Subjects:` + repeatedStringForSubjects + `,`, `RoleRef:` + strings.Replace(strings.Replace(this.RoleRef.String(), "RoleRef", "RoleRef", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -972,9 +1427,14 @@ func (this *RoleBindingList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]RoleBinding{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "RoleBinding", "RoleBinding", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&RoleBindingList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "RoleBinding", "RoleBinding", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -983,9 +1443,14 @@ func (this *RoleList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]Role{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Role", "Role", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&RoleList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Role", "Role", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -1038,7 +1503,7 @@ func (m *AggregationRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1066,7 +1531,7 @@ func (m *AggregationRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1075,10 +1540,13 @@ func (m *AggregationRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.ClusterRoleSelectors = append(m.ClusterRoleSelectors, k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{}) + m.ClusterRoleSelectors = append(m.ClusterRoleSelectors, v1.LabelSelector{}) if err := m.ClusterRoleSelectors[len(m.ClusterRoleSelectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -1092,6 +1560,9 @@ func (m *AggregationRule) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1119,7 +1590,7 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1147,7 +1618,7 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1156,6 +1627,9 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1177,7 +1651,7 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1186,6 +1660,9 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1208,7 +1685,7 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1217,6 +1694,9 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1236,6 +1716,9 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1263,7 +1746,7 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1291,7 +1774,7 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1300,6 +1783,9 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1321,7 +1807,7 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1330,6 +1816,9 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1352,7 +1841,7 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1361,6 +1850,9 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1377,6 +1869,9 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1404,7 +1899,7 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1432,7 +1927,7 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1441,6 +1936,9 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1462,7 +1960,7 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1471,6 +1969,9 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1488,6 +1989,9 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1515,7 +2019,7 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1543,7 +2047,7 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1552,6 +2056,9 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1573,7 +2080,7 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1582,6 +2089,9 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1599,6 +2109,9 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1626,7 +2139,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1654,7 +2167,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1664,6 +2177,9 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1683,7 +2199,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1693,6 +2209,9 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1712,7 +2231,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1722,6 +2241,9 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1741,7 +2263,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1751,6 +2273,9 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1770,7 +2295,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1780,6 +2305,9 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1794,6 +2322,9 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1821,7 +2352,7 @@ func (m *Role) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1849,7 +2380,7 @@ func (m *Role) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1858,6 +2389,9 @@ func (m *Role) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1879,7 +2413,7 @@ func (m *Role) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1888,6 +2422,9 @@ func (m *Role) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1905,6 +2442,9 @@ func (m *Role) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1932,7 +2472,7 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1960,7 +2500,7 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1969,6 +2509,9 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1990,7 +2533,7 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1999,6 +2542,9 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2021,7 +2567,7 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2030,6 +2576,9 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2046,6 +2595,9 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2073,7 +2625,7 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2101,7 +2653,7 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2110,6 +2662,9 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2131,7 +2686,7 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2140,6 +2695,9 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2157,6 +2715,9 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2184,7 +2745,7 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2212,7 +2773,7 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2221,6 +2782,9 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2242,7 +2806,7 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2251,6 +2815,9 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2268,6 +2835,9 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2295,7 +2865,7 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2323,7 +2893,7 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2333,6 +2903,9 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2352,7 +2925,7 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2362,6 +2935,9 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2381,7 +2957,7 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2391,6 +2967,9 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2405,6 +2984,9 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2432,7 +3014,7 @@ func (m *Subject) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2460,7 +3042,7 @@ func (m *Subject) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2470,6 +3052,9 @@ func (m *Subject) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2489,7 +3074,7 @@ func (m *Subject) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2499,6 +3084,9 @@ func (m *Subject) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2518,7 +3106,7 @@ func (m *Subject) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2528,6 +3116,9 @@ func (m *Subject) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2547,7 +3138,7 @@ func (m *Subject) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2557,6 +3148,9 @@ func (m *Subject) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2571,6 +3165,9 @@ func (m *Subject) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2637,10 +3234,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -2669,6 +3269,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -2687,62 +3290,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1beta1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 808 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x55, 0xbd, 0x6f, 0xfb, 0x44, - 0x18, 0xce, 0xa5, 0x89, 0x12, 0x5f, 0x88, 0xc2, 0xef, 0xa8, 0xc0, 0xaa, 0xc0, 0x89, 0x02, 0x43, - 0xa5, 0x52, 0x9b, 0x16, 0x04, 0x2c, 0x48, 0xd4, 0x0c, 0x50, 0xb5, 0x84, 0xea, 0x2a, 0x18, 0x10, - 0x03, 0x67, 0xe7, 0xea, 0x1e, 0xf1, 0x97, 0xee, 0xec, 0x48, 0x15, 0x0b, 0x0b, 0x1b, 0x03, 0x12, - 0x13, 0x2b, 0x33, 0x13, 0x23, 0x7f, 0x41, 0xc6, 0x8e, 0x9d, 0x22, 0x6a, 0xfe, 0x10, 0xd0, 0xf9, - 0x23, 0xce, 0x67, 0x9b, 0x29, 0x12, 0x12, 0x53, 0x7b, 0xef, 0xfb, 0xbc, 0xcf, 0xfb, 0xbc, 0x8f, - 0xef, 0xde, 0xc0, 0x8f, 0x47, 0x1f, 0x0a, 0x9d, 0x05, 0xc6, 0x28, 0xb6, 0x28, 0xf7, 0x69, 0x44, - 0x85, 0x31, 0xa6, 0xfe, 0x30, 0xe0, 0x46, 0x9e, 0x20, 0x21, 0x33, 0xb8, 0x45, 0x6c, 0x63, 0x7c, - 0x62, 0xd1, 0x88, 0x9c, 0x18, 0x0e, 0xf5, 0x29, 0x27, 0x11, 0x1d, 0xea, 0x21, 0x0f, 0xa2, 0x00, - 0xbd, 0x96, 0x01, 0x75, 0x12, 0x32, 0x5d, 0x02, 0xf5, 0x1c, 0x78, 0x70, 0xec, 0xb0, 0xe8, 0x36, - 0xb6, 0x74, 0x3b, 0xf0, 0x0c, 0x27, 0x70, 0x02, 0x23, 0xc5, 0x5b, 0xf1, 0x4d, 0x7a, 0x4a, 0x0f, - 0xe9, 0x7f, 0x19, 0xcf, 0xc1, 0x7b, 0x65, 0x43, 0x8f, 0xd8, 0xb7, 0xcc, 0xa7, 0xfc, 0xce, 0x08, - 0x47, 0x8e, 0x0c, 0x08, 0xc3, 0xa3, 0x11, 0x31, 0xc6, 0x2b, 0xdd, 0x0f, 0x8c, 0x4d, 0x55, 0x3c, - 0xf6, 0x23, 0xe6, 0xd1, 0x95, 0x82, 0xf7, 0x9f, 0x2b, 0x10, 0xf6, 0x2d, 0xf5, 0xc8, 0x72, 0x5d, - 0xff, 0x57, 0x00, 0x3b, 0x67, 0x8e, 0xc3, 0xa9, 0x43, 0x22, 0x16, 0xf8, 0x38, 0x76, 0x29, 0xfa, - 0x11, 0xc0, 0x7d, 0xdb, 0x8d, 0x45, 0x44, 0x39, 0x0e, 0x5c, 0x7a, 0x4d, 0x5d, 0x6a, 0x47, 0x01, - 0x17, 0x2a, 0xe8, 0xed, 0x1d, 0xb6, 0x4e, 0xdf, 0xd5, 0x4b, 0x6b, 0x66, 0xbd, 0xf4, 0x70, 0xe4, - 0xc8, 0x80, 0xd0, 0xe5, 0x48, 0xfa, 0xf8, 0x44, 0xbf, 0x24, 0x16, 0x75, 0x8b, 0x5a, 0xf3, 0xf5, - 0xc9, 0xb4, 0x5b, 0x49, 0xa6, 0xdd, 0xfd, 0x4f, 0xd6, 0x10, 0xe3, 0xb5, 0xed, 0xfa, 0xbf, 0x55, - 0x61, 0x6b, 0x0e, 0x8e, 0xbe, 0x85, 0x4d, 0x49, 0x3e, 0x24, 0x11, 0x51, 0x41, 0x0f, 0x1c, 0xb6, - 0x4e, 0xdf, 0xd9, 0x4e, 0xca, 0x17, 0xd6, 0x77, 0xd4, 0x8e, 0x3e, 0xa7, 0x11, 0x31, 0x51, 0xae, - 0x03, 0x96, 0x31, 0x3c, 0x63, 0x45, 0x9f, 0xc1, 0x3a, 0x8f, 0x5d, 0x2a, 0xd4, 0x6a, 0x3a, 0xe9, - 0x9b, 0xfa, 0x86, 0x4b, 0xa0, 0x5f, 0x05, 0x2e, 0xb3, 0xef, 0xa4, 0x5b, 0x66, 0x3b, 0x67, 0xac, - 0xcb, 0x93, 0xc0, 0x19, 0x01, 0x72, 0x60, 0x87, 0x2c, 0xda, 0xaa, 0xee, 0xa5, 0x92, 0x0f, 0x37, - 0x72, 0x2e, 0x7d, 0x06, 0xf3, 0x95, 0x64, 0xda, 0x5d, 0xfe, 0x36, 0x78, 0x99, 0xb5, 0xff, 0x4b, - 0x15, 0xa2, 0x39, 0x93, 0x4c, 0xe6, 0x0f, 0x99, 0xef, 0xec, 0xc0, 0xab, 0x01, 0x6c, 0x8a, 0x38, - 0x4d, 0x14, 0x76, 0xf5, 0x36, 0x8e, 0x76, 0x9d, 0x01, 0xcd, 0x97, 0x73, 0xc6, 0x66, 0x1e, 0x10, - 0x78, 0xc6, 0x81, 0x2e, 0x60, 0x83, 0x07, 0x2e, 0xc5, 0xf4, 0x26, 0x77, 0x6a, 0x33, 0x1d, 0xce, - 0x70, 0x66, 0x27, 0xa7, 0x6b, 0xe4, 0x01, 0x5c, 0x30, 0xf4, 0x27, 0x00, 0xbe, 0xba, 0xea, 0xca, - 0x25, 0x13, 0x11, 0xfa, 0x66, 0xc5, 0x19, 0x7d, 0xcb, 0x0b, 0xcd, 0x44, 0xe6, 0xcb, 0x6c, 0x8a, - 0x22, 0x32, 0xe7, 0xca, 0x15, 0xac, 0xb3, 0x88, 0x7a, 0x85, 0x25, 0x47, 0x1b, 0x67, 0x58, 0x55, - 0x57, 0xde, 0xa4, 0x73, 0xc9, 0x80, 0x33, 0xa2, 0xfe, 0x9f, 0x00, 0x76, 0xe6, 0xc0, 0x3b, 0x98, - 0xe1, 0x7c, 0x71, 0x86, 0xb7, 0xb6, 0x9a, 0x61, 0xbd, 0xf8, 0x7f, 0x00, 0x84, 0xe5, 0x5b, 0x41, - 0x5d, 0x58, 0x1f, 0x53, 0x6e, 0x65, 0x9b, 0x44, 0x31, 0x15, 0x89, 0xff, 0x4a, 0x06, 0x70, 0x16, - 0x47, 0x47, 0x50, 0x21, 0x21, 0xfb, 0x94, 0x07, 0x71, 0x98, 0xb5, 0x57, 0xcc, 0x76, 0x32, 0xed, - 0x2a, 0x67, 0x57, 0xe7, 0x59, 0x10, 0x97, 0x79, 0x09, 0xe6, 0x54, 0x04, 0x31, 0xb7, 0xa9, 0x50, - 0xf7, 0x4a, 0x30, 0x2e, 0x82, 0xb8, 0xcc, 0xa3, 0x0f, 0x60, 0xbb, 0x38, 0x0c, 0x88, 0x47, 0x85, - 0x5a, 0x4b, 0x0b, 0x5e, 0x24, 0xd3, 0x6e, 0x1b, 0xcf, 0x27, 0xf0, 0x22, 0x0e, 0x7d, 0x04, 0x3b, - 0x7e, 0xe0, 0x17, 0x90, 0x2f, 0xf1, 0xa5, 0x50, 0xeb, 0x69, 0x69, 0xfa, 0x3e, 0x07, 0x8b, 0x29, - 0xbc, 0x8c, 0xed, 0xff, 0x01, 0x60, 0xed, 0xbf, 0xb6, 0xbd, 0xfa, 0x3f, 0x55, 0x61, 0xeb, 0xff, - 0x6d, 0x32, 0xdb, 0x26, 0xf2, 0x09, 0xee, 0x76, 0x8d, 0x6c, 0xfd, 0x04, 0x9f, 0xdf, 0x1f, 0xbf, - 0x03, 0xd8, 0xdc, 0xd1, 0xe2, 0x30, 0x17, 0x55, 0xbf, 0xf1, 0xb4, 0xea, 0xf5, 0x72, 0xbf, 0x87, - 0x85, 0xff, 0xe8, 0x6d, 0xd8, 0x2c, 0x1e, 0x7b, 0x2a, 0x56, 0x29, 0x9b, 0x17, 0xfb, 0x00, 0xcf, - 0x10, 0xa8, 0x07, 0x6b, 0x23, 0xe6, 0x0f, 0xd5, 0x6a, 0x8a, 0x7c, 0x29, 0x47, 0xd6, 0x2e, 0x98, - 0x3f, 0xc4, 0x69, 0x46, 0x22, 0x7c, 0xe2, 0x65, 0x3f, 0xc4, 0x73, 0x08, 0xf9, 0xcc, 0x71, 0x9a, - 0x91, 0x5e, 0x35, 0xf2, 0xcb, 0x34, 0xe3, 0x03, 0x1b, 0xf9, 0xe6, 0xf5, 0x55, 0xb7, 0xd1, 0xf7, - 0x74, 0x77, 0x64, 0x40, 0x45, 0xfe, 0x15, 0x21, 0xb1, 0xa9, 0x5a, 0x4b, 0x61, 0x2f, 0x72, 0x98, - 0x32, 0x28, 0x12, 0xb8, 0xc4, 0x98, 0xc7, 0x93, 0x47, 0xad, 0x72, 0xff, 0xa8, 0x55, 0x1e, 0x1e, - 0xb5, 0xca, 0x0f, 0x89, 0x06, 0x26, 0x89, 0x06, 0xee, 0x13, 0x0d, 0x3c, 0x24, 0x1a, 0xf8, 0x2b, - 0xd1, 0xc0, 0xcf, 0x7f, 0x6b, 0x95, 0xaf, 0x1b, 0xb9, 0xeb, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, - 0x37, 0x8f, 0x77, 0xcd, 0x15, 0x0b, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/rbac/v1beta1/generated.proto b/vendor/k8s.io/api/rbac/v1beta1/generated.proto index 27bd30ce..87e0dbdf 100644 --- a/vendor/k8s.io/api/rbac/v1beta1/generated.proto +++ b/vendor/k8s.io/api/rbac/v1beta1/generated.proto @@ -37,12 +37,14 @@ message AggregationRule { } // ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. +// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRole, and will no longer be served in v1.20. message ClusterRole { // Standard object's metadata. // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Rules holds all the PolicyRules for this ClusterRole + // +optional repeated PolicyRule rules = 2; // AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. @@ -54,6 +56,7 @@ message ClusterRole { // ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, // and adds who information via Subject. +// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoleBinding, and will no longer be served in v1.20. message ClusterRoleBinding { // Standard object's metadata. // +optional @@ -68,7 +71,8 @@ message ClusterRoleBinding { optional RoleRef roleRef = 3; } -// ClusterRoleBindingList is a collection of ClusterRoleBindings +// ClusterRoleBindingList is a collection of ClusterRoleBindings. +// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoleBindingList, and will no longer be served in v1.20. message ClusterRoleBindingList { // Standard object's metadata. // +optional @@ -78,7 +82,8 @@ message ClusterRoleBindingList { repeated ClusterRoleBinding items = 2; } -// ClusterRoleList is a collection of ClusterRoles +// ClusterRoleList is a collection of ClusterRoles. +// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoles, and will no longer be served in v1.20. message ClusterRoleList { // Standard object's metadata. // +optional @@ -116,18 +121,21 @@ message PolicyRule { } // Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding. +// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 Role, and will no longer be served in v1.20. message Role { // Standard object's metadata. // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Rules holds all the PolicyRules for this Role + // +optional repeated PolicyRule rules = 2; } // RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. // It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given // namespace only have effect in that namespace. +// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleBinding, and will no longer be served in v1.20. message RoleBinding { // Standard object's metadata. // +optional @@ -143,6 +151,7 @@ message RoleBinding { } // RoleBindingList is a collection of RoleBindings +// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleBindingList, and will no longer be served in v1.20. message RoleBindingList { // Standard object's metadata. // +optional @@ -153,6 +162,7 @@ message RoleBindingList { } // RoleList is a collection of Roles +// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleList, and will no longer be served in v1.20. message RoleList { // Standard object's metadata. // +optional diff --git a/vendor/k8s.io/api/rbac/v1beta1/types.go b/vendor/k8s.io/api/rbac/v1beta1/types.go index 857b67a6..74c70936 100644 --- a/vendor/k8s.io/api/rbac/v1beta1/types.go +++ b/vendor/k8s.io/api/rbac/v1beta1/types.go @@ -102,6 +102,7 @@ type RoleRef struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding. +// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 Role, and will no longer be served in v1.20. type Role struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. @@ -109,6 +110,7 @@ type Role struct { metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Rules holds all the PolicyRules for this Role + // +optional Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` } @@ -118,6 +120,7 @@ type Role struct { // RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. // It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given // namespace only have effect in that namespace. +// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleBinding, and will no longer be served in v1.20. type RoleBinding struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. @@ -136,6 +139,7 @@ type RoleBinding struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // RoleBindingList is a collection of RoleBindings +// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleBindingList, and will no longer be served in v1.20. type RoleBindingList struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. @@ -149,6 +153,7 @@ type RoleBindingList struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // RoleList is a collection of Roles +// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleList, and will no longer be served in v1.20. type RoleList struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. @@ -164,6 +169,7 @@ type RoleList struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. +// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRole, and will no longer be served in v1.20. type ClusterRole struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. @@ -171,6 +177,7 @@ type ClusterRole struct { metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Rules holds all the PolicyRules for this ClusterRole + // +optional Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` // AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. // If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be @@ -193,6 +200,7 @@ type AggregationRule struct { // ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, // and adds who information via Subject. +// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoleBinding, and will no longer be served in v1.20. type ClusterRoleBinding struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. @@ -210,7 +218,8 @@ type ClusterRoleBinding struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// ClusterRoleBindingList is a collection of ClusterRoleBindings +// ClusterRoleBindingList is a collection of ClusterRoleBindings. +// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoleBindingList, and will no longer be served in v1.20. type ClusterRoleBindingList struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. @@ -223,7 +232,8 @@ type ClusterRoleBindingList struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// ClusterRoleList is a collection of ClusterRoles +// ClusterRoleList is a collection of ClusterRoles. +// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoles, and will no longer be served in v1.20. type ClusterRoleList struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. diff --git a/vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go index 66dba6ca..8e9d7ace 100644 --- a/vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go @@ -28,7 +28,7 @@ package v1beta1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_AggregationRule = map[string]string{ - "": "AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole", + "": "AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole", "clusterRoleSelectors": "ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. If any of the selectors match, then the ClusterRole's permissions will be added", } @@ -37,7 +37,7 @@ func (AggregationRule) SwaggerDoc() map[string]string { } var map_ClusterRole = map[string]string{ - "": "ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding.", + "": "ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRole, and will no longer be served in v1.20.", "metadata": "Standard object's metadata.", "rules": "Rules holds all the PolicyRules for this ClusterRole", "aggregationRule": "AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be stomped by the controller.", @@ -48,7 +48,7 @@ func (ClusterRole) SwaggerDoc() map[string]string { } var map_ClusterRoleBinding = map[string]string{ - "": "ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, and adds who information via Subject.", + "": "ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, and adds who information via Subject. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoleBinding, and will no longer be served in v1.20.", "metadata": "Standard object's metadata.", "subjects": "Subjects holds references to the objects the role applies to.", "roleRef": "RoleRef can only reference a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error.", @@ -59,7 +59,7 @@ func (ClusterRoleBinding) SwaggerDoc() map[string]string { } var map_ClusterRoleBindingList = map[string]string{ - "": "ClusterRoleBindingList is a collection of ClusterRoleBindings", + "": "ClusterRoleBindingList is a collection of ClusterRoleBindings. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoleBindingList, and will no longer be served in v1.20.", "metadata": "Standard object's metadata.", "items": "Items is a list of ClusterRoleBindings", } @@ -69,7 +69,7 @@ func (ClusterRoleBindingList) SwaggerDoc() map[string]string { } var map_ClusterRoleList = map[string]string{ - "": "ClusterRoleList is a collection of ClusterRoles", + "": "ClusterRoleList is a collection of ClusterRoles. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoles, and will no longer be served in v1.20.", "metadata": "Standard object's metadata.", "items": "Items is a list of ClusterRoles", } @@ -92,7 +92,7 @@ func (PolicyRule) SwaggerDoc() map[string]string { } var map_Role = map[string]string{ - "": "Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding.", + "": "Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 Role, and will no longer be served in v1.20.", "metadata": "Standard object's metadata.", "rules": "Rules holds all the PolicyRules for this Role", } @@ -102,7 +102,7 @@ func (Role) SwaggerDoc() map[string]string { } var map_RoleBinding = map[string]string{ - "": "RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given namespace only have effect in that namespace.", + "": "RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given namespace only have effect in that namespace. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleBinding, and will no longer be served in v1.20.", "metadata": "Standard object's metadata.", "subjects": "Subjects holds references to the objects the role applies to.", "roleRef": "RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error.", @@ -113,7 +113,7 @@ func (RoleBinding) SwaggerDoc() map[string]string { } var map_RoleBindingList = map[string]string{ - "": "RoleBindingList is a collection of RoleBindings", + "": "RoleBindingList is a collection of RoleBindings Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleBindingList, and will no longer be served in v1.20.", "metadata": "Standard object's metadata.", "items": "Items is a list of RoleBindings", } @@ -123,7 +123,7 @@ func (RoleBindingList) SwaggerDoc() map[string]string { } var map_RoleList = map[string]string{ - "": "RoleList is a collection of Roles", + "": "RoleList is a collection of Roles Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleList, and will no longer be served in v1.20.", "metadata": "Standard object's metadata.", "items": "Items is a list of Roles", } diff --git a/vendor/k8s.io/api/rbac/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/rbac/v1beta1/zz_generated.deepcopy.go index c085c90b..7ffe5810 100644 --- a/vendor/k8s.io/api/rbac/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/rbac/v1beta1/zz_generated.deepcopy.go @@ -122,7 +122,7 @@ func (in *ClusterRoleBinding) DeepCopyObject() runtime.Object { func (in *ClusterRoleBindingList) DeepCopyInto(out *ClusterRoleBindingList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ClusterRoleBinding, len(*in)) @@ -155,7 +155,7 @@ func (in *ClusterRoleBindingList) DeepCopyObject() runtime.Object { func (in *ClusterRoleList) DeepCopyInto(out *ClusterRoleList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ClusterRole, len(*in)) @@ -294,7 +294,7 @@ func (in *RoleBinding) DeepCopyObject() runtime.Object { func (in *RoleBindingList) DeepCopyInto(out *RoleBindingList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]RoleBinding, len(*in)) @@ -327,7 +327,7 @@ func (in *RoleBindingList) DeepCopyObject() runtime.Object { func (in *RoleList) DeepCopyInto(out *RoleList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Role, len(*in)) diff --git a/vendor/k8s.io/api/scheduling/v1/doc.go b/vendor/k8s.io/api/scheduling/v1/doc.go new file mode 100644 index 00000000..76c4da00 --- /dev/null +++ b/vendor/k8s.io/api/scheduling/v1/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:openapi-gen=true + +// +groupName=scheduling.k8s.io + +package v1 // import "k8s.io/api/scheduling/v1" diff --git a/vendor/k8s.io/api/scheduling/v1/generated.pb.go b/vendor/k8s.io/api/scheduling/v1/generated.pb.go new file mode 100644 index 00000000..7e9764f1 --- /dev/null +++ b/vendor/k8s.io/api/scheduling/v1/generated.pb.go @@ -0,0 +1,761 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1/generated.proto + +package v1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + + k8s_io_api_core_v1 "k8s.io/api/core/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +func (m *PriorityClass) Reset() { *m = PriorityClass{} } +func (*PriorityClass) ProtoMessage() {} +func (*PriorityClass) Descriptor() ([]byte, []int) { + return fileDescriptor_277b2f43b72fffd5, []int{0} +} +func (m *PriorityClass) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PriorityClass) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PriorityClass) XXX_Merge(src proto.Message) { + xxx_messageInfo_PriorityClass.Merge(m, src) +} +func (m *PriorityClass) XXX_Size() int { + return m.Size() +} +func (m *PriorityClass) XXX_DiscardUnknown() { + xxx_messageInfo_PriorityClass.DiscardUnknown(m) +} + +var xxx_messageInfo_PriorityClass proto.InternalMessageInfo + +func (m *PriorityClassList) Reset() { *m = PriorityClassList{} } +func (*PriorityClassList) ProtoMessage() {} +func (*PriorityClassList) Descriptor() ([]byte, []int) { + return fileDescriptor_277b2f43b72fffd5, []int{1} +} +func (m *PriorityClassList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PriorityClassList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PriorityClassList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PriorityClassList.Merge(m, src) +} +func (m *PriorityClassList) XXX_Size() int { + return m.Size() +} +func (m *PriorityClassList) XXX_DiscardUnknown() { + xxx_messageInfo_PriorityClassList.DiscardUnknown(m) +} + +var xxx_messageInfo_PriorityClassList proto.InternalMessageInfo + +func init() { + proto.RegisterType((*PriorityClass)(nil), "k8s.io.api.scheduling.v1.PriorityClass") + proto.RegisterType((*PriorityClassList)(nil), "k8s.io.api.scheduling.v1.PriorityClassList") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1/generated.proto", fileDescriptor_277b2f43b72fffd5) +} + +var fileDescriptor_277b2f43b72fffd5 = []byte{ + // 488 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0x3f, 0x8f, 0xd3, 0x30, + 0x18, 0xc6, 0xeb, 0x1e, 0x95, 0x0e, 0x57, 0x95, 0x4a, 0x10, 0x52, 0xd4, 0x21, 0xad, 0x7a, 0x03, + 0x59, 0xb0, 0xe9, 0x09, 0x10, 0xd2, 0x4d, 0x84, 0x93, 0x10, 0xd2, 0x21, 0xaa, 0x0c, 0x0c, 0x88, + 0x01, 0x27, 0x79, 0x2f, 0x35, 0x4d, 0xe2, 0xc8, 0x76, 0x22, 0x75, 0xe3, 0x23, 0xf0, 0x8d, 0x58, + 0x3b, 0xde, 0x78, 0x53, 0x45, 0xc3, 0x47, 0x60, 0x63, 0x42, 0x49, 0xc3, 0xa5, 0x7f, 0xee, 0x04, + 0x5b, 0xfc, 0x3e, 0xcf, 0xef, 0xb1, 0xfd, 0x24, 0xc1, 0xaf, 0xe6, 0x2f, 0x15, 0xe1, 0x82, 0xce, + 0x33, 0x0f, 0x64, 0x02, 0x1a, 0x14, 0xcd, 0x21, 0x09, 0x84, 0xa4, 0xb5, 0xc0, 0x52, 0x4e, 0x95, + 0x3f, 0x83, 0x20, 0x8b, 0x78, 0x12, 0xd2, 0x7c, 0x42, 0x43, 0x48, 0x40, 0x32, 0x0d, 0x01, 0x49, + 0xa5, 0xd0, 0xc2, 0x30, 0x37, 0x4e, 0xc2, 0x52, 0x4e, 0x1a, 0x27, 0xc9, 0x27, 0x83, 0x27, 0x21, + 0xd7, 0xb3, 0xcc, 0x23, 0xbe, 0x88, 0x69, 0x28, 0x42, 0x41, 0x2b, 0xc0, 0xcb, 0x2e, 0xab, 0x55, + 0xb5, 0xa8, 0x9e, 0x36, 0x41, 0x83, 0xf1, 0xd6, 0x96, 0xbe, 0x90, 0x70, 0xcb, 0x66, 0x83, 0x67, + 0x8d, 0x27, 0x66, 0xfe, 0x8c, 0x27, 0x20, 0x17, 0x34, 0x9d, 0x87, 0xe5, 0x40, 0xd1, 0x18, 0x34, + 0xbb, 0x8d, 0xa2, 0x77, 0x51, 0x32, 0x4b, 0x34, 0x8f, 0xe1, 0x00, 0x78, 0xf1, 0x2f, 0xa0, 0xbc, + 0x68, 0xcc, 0xf6, 0xb9, 0xf1, 0xaf, 0x36, 0xee, 0x4d, 0x25, 0x17, 0x92, 0xeb, 0xc5, 0xeb, 0x88, + 0x29, 0x65, 0x7c, 0xc6, 0xc7, 0xe5, 0xa9, 0x02, 0xa6, 0x99, 0x89, 0x46, 0xc8, 0xee, 0x9e, 0x3e, + 0x25, 0x4d, 0x61, 0x37, 0xe1, 0x24, 0x9d, 0x87, 0xe5, 0x40, 0x91, 0xd2, 0x4d, 0xf2, 0x09, 0x79, + 0xef, 0x7d, 0x01, 0x5f, 0xbf, 0x03, 0xcd, 0x1c, 0x63, 0xb9, 0x1a, 0xb6, 0x8a, 0xd5, 0x10, 0x37, + 0x33, 0xf7, 0x26, 0xd5, 0x38, 0xc1, 0x9d, 0x9c, 0x45, 0x19, 0x98, 0xed, 0x11, 0xb2, 0x3b, 0x4e, + 0xaf, 0x36, 0x77, 0x3e, 0x94, 0x43, 0x77, 0xa3, 0x19, 0x67, 0xb8, 0x17, 0x46, 0xc2, 0x63, 0xd1, + 0x39, 0x5c, 0xb2, 0x2c, 0xd2, 0xe6, 0xd1, 0x08, 0xd9, 0xc7, 0xce, 0xa3, 0xda, 0xdc, 0x7b, 0xb3, + 0x2d, 0xba, 0xbb, 0x5e, 0xe3, 0x39, 0xee, 0x06, 0xa0, 0x7c, 0xc9, 0x53, 0xcd, 0x45, 0x62, 0xde, + 0x1b, 0x21, 0xfb, 0xbe, 0xf3, 0xb0, 0x46, 0xbb, 0xe7, 0x8d, 0xe4, 0x6e, 0xfb, 0x8c, 0x10, 0xf7, + 0x53, 0x09, 0x10, 0x57, 0xab, 0xa9, 0x88, 0xb8, 0xbf, 0x30, 0x3b, 0x15, 0x7b, 0x56, 0xac, 0x86, + 0xfd, 0xe9, 0x9e, 0xf6, 0x7b, 0x35, 0x3c, 0x39, 0xfc, 0x02, 0xc8, 0xbe, 0xcd, 0x3d, 0x08, 0x1d, + 0x7f, 0x47, 0xf8, 0xc1, 0x4e, 0xeb, 0x17, 0x5c, 0x69, 0xe3, 0xd3, 0x41, 0xf3, 0xe4, 0xff, 0x9a, + 0x2f, 0xe9, 0xaa, 0xf7, 0x7e, 0x7d, 0xc5, 0xe3, 0xbf, 0x93, 0xad, 0xd6, 0x2f, 0x70, 0x87, 0x6b, + 0x88, 0x95, 0xd9, 0x1e, 0x1d, 0xd9, 0xdd, 0xd3, 0xc7, 0xe4, 0xae, 0xbf, 0x80, 0xec, 0x9c, 0xac, + 0x79, 0x3d, 0x6f, 0x4b, 0xda, 0xdd, 0x84, 0x38, 0xf6, 0x72, 0x6d, 0xb5, 0xae, 0xd6, 0x56, 0xeb, + 0x7a, 0x6d, 0xb5, 0xbe, 0x16, 0x16, 0x5a, 0x16, 0x16, 0xba, 0x2a, 0x2c, 0x74, 0x5d, 0x58, 0xe8, + 0x47, 0x61, 0xa1, 0x6f, 0x3f, 0xad, 0xd6, 0xc7, 0x76, 0x3e, 0xf9, 0x13, 0x00, 0x00, 0xff, 0xff, + 0x53, 0xd9, 0x28, 0x30, 0xb1, 0x03, 0x00, 0x00, +} + +func (m *PriorityClass) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PriorityClass) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PriorityClass) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.PreemptionPolicy != nil { + i -= len(*m.PreemptionPolicy) + copy(dAtA[i:], *m.PreemptionPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PreemptionPolicy))) + i-- + dAtA[i] = 0x2a + } + i -= len(m.Description) + copy(dAtA[i:], m.Description) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description))) + i-- + dAtA[i] = 0x22 + i-- + if m.GlobalDefault { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.Value)) + i-- + dAtA[i] = 0x10 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PriorityClassList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PriorityClassList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PriorityClassList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *PriorityClass) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Value)) + n += 2 + l = len(m.Description) + n += 1 + l + sovGenerated(uint64(l)) + if m.PreemptionPolicy != nil { + l = len(*m.PreemptionPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *PriorityClassList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *PriorityClass) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PriorityClass{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `GlobalDefault:` + fmt.Sprintf("%v", this.GlobalDefault) + `,`, + `Description:` + fmt.Sprintf("%v", this.Description) + `,`, + `PreemptionPolicy:` + valueToStringGenerated(this.PreemptionPolicy) + `,`, + `}`, + }, "") + return s +} +func (this *PriorityClassList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]PriorityClass{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PriorityClass", "PriorityClass", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&PriorityClassList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *PriorityClass) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PriorityClass: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PriorityClass: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Value |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field GlobalDefault", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.GlobalDefault = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Description = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreemptionPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := k8s_io_api_core_v1.PreemptionPolicy(dAtA[iNdEx:postIndex]) + m.PreemptionPolicy = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PriorityClassList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PriorityClassList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PriorityClassList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, PriorityClass{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) diff --git a/vendor/k8s.io/api/scheduling/v1/generated.proto b/vendor/k8s.io/api/scheduling/v1/generated.proto new file mode 100644 index 00000000..82f6e0a2 --- /dev/null +++ b/vendor/k8s.io/api/scheduling/v1/generated.proto @@ -0,0 +1,75 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.api.scheduling.v1; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1"; + +// PriorityClass defines mapping from a priority class name to the priority +// integer value. The value can be any valid integer. +message PriorityClass { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // The value of this priority class. This is the actual priority that pods + // receive when they have the name of this class in their pod spec. + optional int32 value = 2; + + // globalDefault specifies whether this PriorityClass should be considered as + // the default priority for pods that do not have any priority class. + // Only one PriorityClass can be marked as `globalDefault`. However, if more than + // one PriorityClasses exists with their `globalDefault` field set to true, + // the smallest value of such global default PriorityClasses will be used as the default priority. + // +optional + optional bool globalDefault = 3; + + // description is an arbitrary string that usually provides guidelines on + // when this priority class should be used. + // +optional + optional string description = 4; + + // PreemptionPolicy is the Policy for preempting pods with lower priority. + // One of Never, PreemptLowerPriority. + // Defaults to PreemptLowerPriority if unset. + // This field is alpha-level and is only honored by servers that enable the NonPreemptingPriority feature. + // +optional + optional string preemptionPolicy = 5; +} + +// PriorityClassList is a collection of priority classes. +message PriorityClassList { + // Standard list metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is the list of PriorityClasses + repeated PriorityClass items = 2; +} + diff --git a/vendor/k8s.io/api/scheduling/v1/register.go b/vendor/k8s.io/api/scheduling/v1/register.go new file mode 100644 index 00000000..33977fe9 --- /dev/null +++ b/vendor/k8s.io/api/scheduling/v1/register.go @@ -0,0 +1,55 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "scheduling.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + + // SchemeBuilder is a collection of functions that add things to a scheme. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + // AddToScheme applies all the stored functions to the scheme. + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &PriorityClass{}, + &PriorityClassList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/scheduling/v1/types.go b/vendor/k8s.io/api/scheduling/v1/types.go new file mode 100644 index 00000000..087ee10d --- /dev/null +++ b/vendor/k8s.io/api/scheduling/v1/types.go @@ -0,0 +1,74 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + apiv1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PriorityClass defines mapping from a priority class name to the priority +// integer value. The value can be any valid integer. +type PriorityClass struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // The value of this priority class. This is the actual priority that pods + // receive when they have the name of this class in their pod spec. + Value int32 `json:"value" protobuf:"bytes,2,opt,name=value"` + + // globalDefault specifies whether this PriorityClass should be considered as + // the default priority for pods that do not have any priority class. + // Only one PriorityClass can be marked as `globalDefault`. However, if more than + // one PriorityClasses exists with their `globalDefault` field set to true, + // the smallest value of such global default PriorityClasses will be used as the default priority. + // +optional + GlobalDefault bool `json:"globalDefault,omitempty" protobuf:"bytes,3,opt,name=globalDefault"` + + // description is an arbitrary string that usually provides guidelines on + // when this priority class should be used. + // +optional + Description string `json:"description,omitempty" protobuf:"bytes,4,opt,name=description"` + + // PreemptionPolicy is the Policy for preempting pods with lower priority. + // One of Never, PreemptLowerPriority. + // Defaults to PreemptLowerPriority if unset. + // This field is alpha-level and is only honored by servers that enable the NonPreemptingPriority feature. + // +optional + PreemptionPolicy *apiv1.PreemptionPolicy `json:"preemptionPolicy,omitempty" protobuf:"bytes,5,opt,name=preemptionPolicy"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PriorityClassList is a collection of priority classes. +type PriorityClassList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is the list of PriorityClasses + Items []PriorityClass `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/api/scheduling/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/scheduling/v1/types_swagger_doc_generated.go new file mode 100644 index 00000000..4cfb9d3e --- /dev/null +++ b/vendor/k8s.io/api/scheduling/v1/types_swagger_doc_generated.go @@ -0,0 +1,53 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_PriorityClass = map[string]string{ + "": "PriorityClass defines mapping from a priority class name to the priority integer value. The value can be any valid integer.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "value": "The value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec.", + "globalDefault": "globalDefault specifies whether this PriorityClass should be considered as the default priority for pods that do not have any priority class. Only one PriorityClass can be marked as `globalDefault`. However, if more than one PriorityClasses exists with their `globalDefault` field set to true, the smallest value of such global default PriorityClasses will be used as the default priority.", + "description": "description is an arbitrary string that usually provides guidelines on when this priority class should be used.", + "preemptionPolicy": "PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is alpha-level and is only honored by servers that enable the NonPreemptingPriority feature.", +} + +func (PriorityClass) SwaggerDoc() map[string]string { + return map_PriorityClass +} + +var map_PriorityClassList = map[string]string{ + "": "PriorityClassList is a collection of priority classes.", + "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is the list of PriorityClasses", +} + +func (PriorityClassList) SwaggerDoc() map[string]string { + return map_PriorityClassList +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/scheduling/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/scheduling/v1/zz_generated.deepcopy.go new file mode 100644 index 00000000..63bfe640 --- /dev/null +++ b/vendor/k8s.io/api/scheduling/v1/zz_generated.deepcopy.go @@ -0,0 +1,90 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PriorityClass) DeepCopyInto(out *PriorityClass) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.PreemptionPolicy != nil { + in, out := &in.PreemptionPolicy, &out.PreemptionPolicy + *out = new(corev1.PreemptionPolicy) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityClass. +func (in *PriorityClass) DeepCopy() *PriorityClass { + if in == nil { + return nil + } + out := new(PriorityClass) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PriorityClass) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PriorityClassList) DeepCopyInto(out *PriorityClassList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PriorityClass, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityClassList. +func (in *PriorityClassList) DeepCopy() *PriorityClassList { + if in == nil { + return nil + } + out := new(PriorityClassList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PriorityClassList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/doc.go b/vendor/k8s.io/api/scheduling/v1alpha1/doc.go index e10d07ff..cff47e1f 100644 --- a/vendor/k8s.io/api/scheduling/v1alpha1/doc.go +++ b/vendor/k8s.io/api/scheduling/v1alpha1/doc.go @@ -15,7 +15,9 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +k8s:openapi-gen=true // +groupName=scheduling.k8s.io + package v1alpha1 // import "k8s.io/api/scheduling/v1alpha1" diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/generated.pb.go b/vendor/k8s.io/api/scheduling/v1alpha1/generated.pb.go index 97c07c98..05bff0ff 100644 --- a/vendor/k8s.io/api/scheduling/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/scheduling/v1alpha1/generated.pb.go @@ -14,30 +14,25 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto -// DO NOT EDIT! -/* - Package v1alpha1 is a generated protocol buffer package. +package v1alpha1 - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto +import ( + fmt "fmt" - It has these top-level messages: - PriorityClass - PriorityClassList -*/ -package v1alpha1 + io "io" -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" + proto "github.com/gogo/protobuf/proto" -import strings "strings" -import reflect "reflect" + k8s_io_api_core_v1 "k8s.io/api/core/v1" -import io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -50,22 +45,110 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *PriorityClass) Reset() { *m = PriorityClass{} } -func (*PriorityClass) ProtoMessage() {} -func (*PriorityClass) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *PriorityClass) Reset() { *m = PriorityClass{} } +func (*PriorityClass) ProtoMessage() {} +func (*PriorityClass) Descriptor() ([]byte, []int) { + return fileDescriptor_f033641dd0b95dce, []int{0} +} +func (m *PriorityClass) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PriorityClass) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PriorityClass) XXX_Merge(src proto.Message) { + xxx_messageInfo_PriorityClass.Merge(m, src) +} +func (m *PriorityClass) XXX_Size() int { + return m.Size() +} +func (m *PriorityClass) XXX_DiscardUnknown() { + xxx_messageInfo_PriorityClass.DiscardUnknown(m) +} + +var xxx_messageInfo_PriorityClass proto.InternalMessageInfo + +func (m *PriorityClassList) Reset() { *m = PriorityClassList{} } +func (*PriorityClassList) ProtoMessage() {} +func (*PriorityClassList) Descriptor() ([]byte, []int) { + return fileDescriptor_f033641dd0b95dce, []int{1} +} +func (m *PriorityClassList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PriorityClassList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PriorityClassList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PriorityClassList.Merge(m, src) +} +func (m *PriorityClassList) XXX_Size() int { + return m.Size() +} +func (m *PriorityClassList) XXX_DiscardUnknown() { + xxx_messageInfo_PriorityClassList.DiscardUnknown(m) +} -func (m *PriorityClassList) Reset() { *m = PriorityClassList{} } -func (*PriorityClassList) ProtoMessage() {} -func (*PriorityClassList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +var xxx_messageInfo_PriorityClassList proto.InternalMessageInfo func init() { proto.RegisterType((*PriorityClass)(nil), "k8s.io.api.scheduling.v1alpha1.PriorityClass") proto.RegisterType((*PriorityClassList)(nil), "k8s.io.api.scheduling.v1alpha1.PriorityClassList") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto", fileDescriptor_f033641dd0b95dce) +} + +var fileDescriptor_f033641dd0b95dce = []byte{ + // 494 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0x4f, 0x8b, 0xd3, 0x40, + 0x18, 0xc6, 0x3b, 0x5d, 0x0b, 0x75, 0x4a, 0xa1, 0x46, 0x84, 0xd0, 0xc3, 0xb4, 0x74, 0x2f, 0xbd, + 0xec, 0x8c, 0x5d, 0x54, 0x84, 0xbd, 0xd5, 0x85, 0x45, 0x50, 0x2c, 0x39, 0x78, 0x10, 0x0f, 0x4e, + 0xd3, 0x77, 0xd3, 0xb1, 0x49, 0x26, 0xcc, 0x4c, 0x02, 0xbd, 0xf9, 0x11, 0xfc, 0x52, 0x42, 0x8f, + 0x7b, 0xdc, 0x53, 0xb1, 0xf1, 0x23, 0x78, 0xf3, 0x24, 0x49, 0xd3, 0x4d, 0xdb, 0xf8, 0x67, 0x6f, + 0x99, 0xf7, 0xf9, 0x3d, 0xcf, 0xcc, 0x3c, 0x49, 0xf0, 0xd5, 0xe2, 0xa5, 0xa6, 0x42, 0xb2, 0x45, + 0x3c, 0x05, 0x15, 0x82, 0x01, 0xcd, 0x12, 0x08, 0x67, 0x52, 0xb1, 0x42, 0xe0, 0x91, 0x60, 0xda, + 0x9d, 0xc3, 0x2c, 0xf6, 0x45, 0xe8, 0xb1, 0x64, 0xc4, 0xfd, 0x68, 0xce, 0x47, 0xcc, 0x83, 0x10, + 0x14, 0x37, 0x30, 0xa3, 0x91, 0x92, 0x46, 0x5a, 0x64, 0xcb, 0x53, 0x1e, 0x09, 0x5a, 0xf2, 0x74, + 0xc7, 0x77, 0xcf, 0x3c, 0x61, 0xe6, 0xf1, 0x94, 0xba, 0x32, 0x60, 0x9e, 0xf4, 0x24, 0xcb, 0x6d, + 0xd3, 0xf8, 0x3a, 0x5f, 0xe5, 0x8b, 0xfc, 0x69, 0x1b, 0xd7, 0x1d, 0xec, 0x6d, 0xef, 0x4a, 0x05, + 0x2c, 0xa9, 0x6c, 0xd9, 0x7d, 0x56, 0x32, 0x01, 0x77, 0xe7, 0x22, 0x04, 0xb5, 0x64, 0xd1, 0xc2, + 0xcb, 0x06, 0x9a, 0x05, 0x60, 0xf8, 0x9f, 0x5c, 0xec, 0x6f, 0x2e, 0x15, 0x87, 0x46, 0x04, 0x50, + 0x31, 0xbc, 0xf8, 0x9f, 0x21, 0xbb, 0x6e, 0xc0, 0x8f, 0x7d, 0x83, 0x9f, 0x75, 0xdc, 0x9e, 0x28, + 0x21, 0x95, 0x30, 0xcb, 0x57, 0x3e, 0xd7, 0xda, 0xfa, 0x84, 0x9b, 0xd9, 0xa9, 0x66, 0xdc, 0x70, + 0x1b, 0xf5, 0xd1, 0xb0, 0x75, 0xfe, 0x94, 0x96, 0xb5, 0xdd, 0x85, 0xd3, 0x68, 0xe1, 0x65, 0x03, + 0x4d, 0x33, 0x9a, 0x26, 0x23, 0xfa, 0x6e, 0xfa, 0x19, 0x5c, 0xf3, 0x16, 0x0c, 0x1f, 0x5b, 0xab, + 0x75, 0xaf, 0x96, 0xae, 0x7b, 0xb8, 0x9c, 0x39, 0x77, 0xa9, 0xd6, 0x29, 0x6e, 0x24, 0xdc, 0x8f, + 0xc1, 0xae, 0xf7, 0xd1, 0xb0, 0x31, 0x6e, 0x17, 0x70, 0xe3, 0x7d, 0x36, 0x74, 0xb6, 0x9a, 0x75, + 0x81, 0xdb, 0x9e, 0x2f, 0xa7, 0xdc, 0xbf, 0x84, 0x6b, 0x1e, 0xfb, 0xc6, 0x3e, 0xe9, 0xa3, 0x61, + 0x73, 0xfc, 0xa4, 0x80, 0xdb, 0x57, 0xfb, 0xa2, 0x73, 0xc8, 0x5a, 0xcf, 0x71, 0x6b, 0x06, 0xda, + 0x55, 0x22, 0x32, 0x42, 0x86, 0xf6, 0x83, 0x3e, 0x1a, 0x3e, 0x1c, 0x3f, 0x2e, 0xac, 0xad, 0xcb, + 0x52, 0x72, 0xf6, 0x39, 0xcb, 0xc3, 0x9d, 0x48, 0x01, 0x04, 0xf9, 0x6a, 0x22, 0x7d, 0xe1, 0x2e, + 0xed, 0x46, 0xee, 0xbd, 0x48, 0xd7, 0xbd, 0xce, 0xe4, 0x48, 0xfb, 0xb5, 0xee, 0x9d, 0x56, 0xbf, + 0x00, 0x7a, 0x8c, 0x39, 0x95, 0xd0, 0xc1, 0x37, 0x84, 0x1f, 0x1d, 0xb4, 0xfe, 0x46, 0x68, 0x63, + 0x7d, 0xac, 0x34, 0x4f, 0xef, 0xd7, 0x7c, 0xe6, 0xce, 0x7b, 0xef, 0x14, 0x57, 0x6c, 0xee, 0x26, + 0x7b, 0xad, 0x3b, 0xb8, 0x21, 0x0c, 0x04, 0xda, 0xae, 0xf7, 0x4f, 0x86, 0xad, 0xf3, 0x33, 0xfa, + 0xef, 0x7f, 0x81, 0x1e, 0x9c, 0xaf, 0x7c, 0x49, 0xaf, 0xb3, 0x0c, 0x67, 0x1b, 0x35, 0xa6, 0xab, + 0x0d, 0xa9, 0xdd, 0x6c, 0x48, 0xed, 0x76, 0x43, 0x6a, 0x5f, 0x52, 0x82, 0x56, 0x29, 0x41, 0x37, + 0x29, 0x41, 0xb7, 0x29, 0x41, 0xdf, 0x53, 0x82, 0xbe, 0xfe, 0x20, 0xb5, 0x0f, 0xcd, 0x5d, 0xe6, + 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0x55, 0x5c, 0x1a, 0x39, 0xc9, 0x03, 0x00, 0x00, +} + func (m *PriorityClass) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -73,40 +156,55 @@ func (m *PriorityClass) Marshal() (dAtA []byte, err error) { } func (m *PriorityClass) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PriorityClass) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.PreemptionPolicy != nil { + i -= len(*m.PreemptionPolicy) + copy(dAtA[i:], *m.PreemptionPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PreemptionPolicy))) + i-- + dAtA[i] = 0x2a } - i += n1 - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Value)) - dAtA[i] = 0x18 - i++ + i -= len(m.Description) + copy(dAtA[i:], m.Description) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description))) + i-- + dAtA[i] = 0x22 + i-- if m.GlobalDefault { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description))) - i += copy(dAtA[i:], m.Description) - return i, nil + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.Value)) + i-- + dAtA[i] = 0x10 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *PriorityClassList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -114,61 +212,57 @@ func (m *PriorityClassList) Marshal() (dAtA []byte, err error) { } func (m *PriorityClassList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PriorityClassList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n2, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *PriorityClass) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -177,10 +271,17 @@ func (m *PriorityClass) Size() (n int) { n += 2 l = len(m.Description) n += 1 + l + sovGenerated(uint64(l)) + if m.PreemptionPolicy != nil { + l = len(*m.PreemptionPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } return n } func (m *PriorityClassList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -195,14 +296,7 @@ func (m *PriorityClassList) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -212,10 +306,11 @@ func (this *PriorityClass) String() string { return "nil" } s := strings.Join([]string{`&PriorityClass{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Value:` + fmt.Sprintf("%v", this.Value) + `,`, `GlobalDefault:` + fmt.Sprintf("%v", this.GlobalDefault) + `,`, `Description:` + fmt.Sprintf("%v", this.Description) + `,`, + `PreemptionPolicy:` + valueToStringGenerated(this.PreemptionPolicy) + `,`, `}`, }, "") return s @@ -224,9 +319,14 @@ func (this *PriorityClassList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]PriorityClass{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PriorityClass", "PriorityClass", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&PriorityClassList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "PriorityClass", "PriorityClass", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -254,7 +354,7 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -282,7 +382,7 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -291,6 +391,9 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -312,7 +415,7 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Value |= (int32(b) & 0x7F) << shift + m.Value |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -331,7 +434,7 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -351,7 +454,7 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -361,11 +464,47 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } m.Description = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreemptionPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := k8s_io_api_core_v1.PreemptionPolicy(dAtA[iNdEx:postIndex]) + m.PreemptionPolicy = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -375,6 +514,9 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -402,7 +544,7 @@ func (m *PriorityClassList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -430,7 +572,7 @@ func (m *PriorityClassList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -439,6 +581,9 @@ func (m *PriorityClassList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -460,7 +605,7 @@ func (m *PriorityClassList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -469,6 +614,9 @@ func (m *PriorityClassList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -486,6 +634,9 @@ func (m *PriorityClassList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -552,10 +703,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -584,6 +738,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -602,39 +759,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 447 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0x4f, 0x8b, 0xd3, 0x40, - 0x18, 0xc6, 0x33, 0x5d, 0x0b, 0x75, 0x4a, 0x41, 0x23, 0x42, 0xe8, 0x61, 0x36, 0xac, 0x97, 0x5c, - 0x76, 0xc6, 0x2e, 0x2a, 0x82, 0xb7, 0xb8, 0xb0, 0x08, 0x8a, 0x92, 0x83, 0x07, 0xf1, 0xe0, 0x24, - 0x79, 0x37, 0x1d, 0x9b, 0x64, 0xc2, 0xcc, 0x24, 0xb0, 0x37, 0xcf, 0x9e, 0xfc, 0x52, 0x42, 0x8f, - 0x7b, 0xdc, 0xd3, 0x62, 0xe3, 0x17, 0x91, 0xa4, 0x69, 0xd3, 0x5a, 0xfc, 0x73, 0xcb, 0x3c, 0xef, - 0xef, 0x79, 0xe6, 0xcd, 0xc3, 0xe0, 0x8b, 0xc5, 0x73, 0x4d, 0x85, 0x64, 0x8b, 0x32, 0x04, 0x95, - 0x83, 0x01, 0xcd, 0x2a, 0xc8, 0x63, 0xa9, 0x58, 0x37, 0xe0, 0x85, 0x60, 0x3a, 0x9a, 0x43, 0x5c, - 0xa6, 0x22, 0x4f, 0x58, 0x35, 0xe3, 0x69, 0x31, 0xe7, 0x33, 0x96, 0x40, 0x0e, 0x8a, 0x1b, 0x88, - 0x69, 0xa1, 0xa4, 0x91, 0x36, 0x59, 0xf3, 0x94, 0x17, 0x82, 0xf6, 0x3c, 0xdd, 0xf0, 0xd3, 0xd3, - 0x44, 0x98, 0x79, 0x19, 0xd2, 0x48, 0x66, 0x2c, 0x91, 0x89, 0x64, 0xad, 0x2d, 0x2c, 0x2f, 0xdb, - 0x53, 0x7b, 0x68, 0xbf, 0xd6, 0x71, 0xd3, 0x27, 0xfd, 0xf5, 0x19, 0x8f, 0xe6, 0x22, 0x07, 0x75, - 0xc5, 0x8a, 0x45, 0xd2, 0x08, 0x9a, 0x65, 0x60, 0x38, 0xab, 0x0e, 0x96, 0x98, 0xb2, 0x3f, 0xb9, - 0x54, 0x99, 0x1b, 0x91, 0xc1, 0x81, 0xe1, 0xd9, 0xbf, 0x0c, 0xcd, 0xaf, 0x64, 0xfc, 0x77, 0xdf, - 0xc9, 0xd7, 0x01, 0x9e, 0xbc, 0x53, 0x42, 0x2a, 0x61, 0xae, 0x5e, 0xa6, 0x5c, 0x6b, 0xfb, 0x13, - 0x1e, 0x35, 0x5b, 0xc5, 0xdc, 0x70, 0x07, 0xb9, 0xc8, 0x1b, 0x9f, 0x3d, 0xa6, 0x7d, 0x25, 0xdb, - 0x70, 0x5a, 0x2c, 0x92, 0x46, 0xd0, 0xb4, 0xa1, 0x69, 0x35, 0xa3, 0x6f, 0xc3, 0xcf, 0x10, 0x99, - 0x37, 0x60, 0xb8, 0x6f, 0x2f, 0x6f, 0x8f, 0xad, 0xfa, 0xf6, 0x18, 0xf7, 0x5a, 0xb0, 0x4d, 0xb5, - 0x1f, 0xe1, 0x61, 0xc5, 0xd3, 0x12, 0x9c, 0x81, 0x8b, 0xbc, 0xa1, 0x3f, 0xe9, 0xe0, 0xe1, 0xfb, - 0x46, 0x0c, 0xd6, 0x33, 0xfb, 0x05, 0x9e, 0x24, 0xa9, 0x0c, 0x79, 0x7a, 0x0e, 0x97, 0xbc, 0x4c, - 0x8d, 0x73, 0xe4, 0x22, 0x6f, 0xe4, 0x3f, 0xec, 0xe0, 0xc9, 0xc5, 0xee, 0x30, 0xd8, 0x67, 0xed, - 0xa7, 0x78, 0x1c, 0x83, 0x8e, 0x94, 0x28, 0x8c, 0x90, 0xb9, 0x73, 0xc7, 0x45, 0xde, 0x5d, 0xff, - 0x41, 0x67, 0x1d, 0x9f, 0xf7, 0xa3, 0x60, 0x97, 0x3b, 0xf9, 0x8e, 0xf0, 0xfd, 0xbd, 0x32, 0x5e, - 0x0b, 0x6d, 0xec, 0x8f, 0x07, 0x85, 0xd0, 0xff, 0x2b, 0xa4, 0x71, 0xb7, 0x75, 0xdc, 0xeb, 0x6e, - 0x1e, 0x6d, 0x94, 0x9d, 0x32, 0x02, 0x3c, 0x14, 0x06, 0x32, 0xed, 0x0c, 0xdc, 0x23, 0x6f, 0x7c, - 0x76, 0x4a, 0xff, 0xfe, 0xfc, 0xe8, 0xde, 0x7e, 0x7d, 0x77, 0xaf, 0x9a, 0x8c, 0x60, 0x1d, 0xe5, - 0xd3, 0xe5, 0x8a, 0x58, 0xd7, 0x2b, 0x62, 0xdd, 0xac, 0x88, 0xf5, 0xa5, 0x26, 0x68, 0x59, 0x13, - 0x74, 0x5d, 0x13, 0x74, 0x53, 0x13, 0xf4, 0xa3, 0x26, 0xe8, 0xdb, 0x4f, 0x62, 0x7d, 0x18, 0x6d, - 0x32, 0x7f, 0x05, 0x00, 0x00, 0xff, 0xff, 0xab, 0x20, 0x12, 0x63, 0x3c, 0x03, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto b/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto index 5fb54721..682fb873 100644 --- a/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto @@ -21,6 +21,7 @@ syntax = 'proto2'; package k8s.io.api.scheduling.v1alpha1; +import "k8s.io/api/core/v1/generated.proto"; import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; @@ -28,11 +29,12 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". option go_package = "v1alpha1"; +// DEPRECATED - This group version of PriorityClass is deprecated by scheduling.k8s.io/v1/PriorityClass. // PriorityClass defines mapping from a priority class name to the priority // integer value. The value can be any valid integer. message PriorityClass { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -52,12 +54,19 @@ message PriorityClass { // when this priority class should be used. // +optional optional string description = 4; + + // PreemptionPolicy is the Policy for preempting pods with lower priority. + // One of Never, PreemptLowerPriority. + // Defaults to PreemptLowerPriority if unset. + // This field is alpha-level and is only honored by servers that enable the NonPreemptingPriority feature. + // +optional + optional string preemptionPolicy = 5; } // PriorityClassList is a collection of priority classes. message PriorityClassList { // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/types.go b/vendor/k8s.io/api/scheduling/v1alpha1/types.go index 21e3df0a..86a2c513 100644 --- a/vendor/k8s.io/api/scheduling/v1alpha1/types.go +++ b/vendor/k8s.io/api/scheduling/v1alpha1/types.go @@ -17,6 +17,7 @@ limitations under the License. package v1alpha1 import ( + apiv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -24,12 +25,13 @@ import ( // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// DEPRECATED - This group version of PriorityClass is deprecated by scheduling.k8s.io/v1/PriorityClass. // PriorityClass defines mapping from a priority class name to the priority // integer value. The value can be any valid integer. type PriorityClass struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -49,6 +51,13 @@ type PriorityClass struct { // when this priority class should be used. // +optional Description string `json:"description,omitempty" protobuf:"bytes,4,opt,name=description"` + + // PreemptionPolicy is the Policy for preempting pods with lower priority. + // One of Never, PreemptLowerPriority. + // Defaults to PreemptLowerPriority if unset. + // This field is alpha-level and is only honored by servers that enable the NonPreemptingPriority feature. + // +optional + PreemptionPolicy *apiv1.PreemptionPolicy `json:"preemptionPolicy,omitempty" protobuf:"bytes,5,opt,name=preemptionPolicy"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -57,7 +66,7 @@ type PriorityClass struct { type PriorityClassList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/scheduling/v1alpha1/types_swagger_doc_generated.go index f406f440..63a9a353 100644 --- a/vendor/k8s.io/api/scheduling/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/scheduling/v1alpha1/types_swagger_doc_generated.go @@ -28,11 +28,12 @@ package v1alpha1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_PriorityClass = map[string]string{ - "": "PriorityClass defines mapping from a priority class name to the priority integer value. The value can be any valid integer.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "value": "The value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec.", - "globalDefault": "globalDefault specifies whether this PriorityClass should be considered as the default priority for pods that do not have any priority class. Only one PriorityClass can be marked as `globalDefault`. However, if more than one PriorityClasses exists with their `globalDefault` field set to true, the smallest value of such global default PriorityClasses will be used as the default priority.", - "description": "description is an arbitrary string that usually provides guidelines on when this priority class should be used.", + "": "DEPRECATED - This group version of PriorityClass is deprecated by scheduling.k8s.io/v1/PriorityClass. PriorityClass defines mapping from a priority class name to the priority integer value. The value can be any valid integer.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "value": "The value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec.", + "globalDefault": "globalDefault specifies whether this PriorityClass should be considered as the default priority for pods that do not have any priority class. Only one PriorityClass can be marked as `globalDefault`. However, if more than one PriorityClasses exists with their `globalDefault` field set to true, the smallest value of such global default PriorityClasses will be used as the default priority.", + "description": "description is an arbitrary string that usually provides guidelines on when this priority class should be used.", + "preemptionPolicy": "PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is alpha-level and is only honored by servers that enable the NonPreemptingPriority feature.", } func (PriorityClass) SwaggerDoc() map[string]string { @@ -41,7 +42,7 @@ func (PriorityClass) SwaggerDoc() map[string]string { var map_PriorityClassList = map[string]string{ "": "PriorityClassList is a collection of priority classes.", - "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "items is the list of PriorityClasses", } diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/scheduling/v1alpha1/zz_generated.deepcopy.go index fe0c8604..03928239 100644 --- a/vendor/k8s.io/api/scheduling/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/scheduling/v1alpha1/zz_generated.deepcopy.go @@ -21,6 +21,7 @@ limitations under the License. package v1alpha1 import ( + v1 "k8s.io/api/core/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) @@ -29,6 +30,11 @@ func (in *PriorityClass) DeepCopyInto(out *PriorityClass) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.PreemptionPolicy != nil { + in, out := &in.PreemptionPolicy, &out.PreemptionPolicy + *out = new(v1.PreemptionPolicy) + **out = **in + } return } @@ -54,7 +60,7 @@ func (in *PriorityClass) DeepCopyObject() runtime.Object { func (in *PriorityClassList) DeepCopyInto(out *PriorityClassList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]PriorityClass, len(*in)) diff --git a/vendor/k8s.io/api/scheduling/v1beta1/doc.go b/vendor/k8s.io/api/scheduling/v1beta1/doc.go index f2dd1cfa..e6619689 100644 --- a/vendor/k8s.io/api/scheduling/v1beta1/doc.go +++ b/vendor/k8s.io/api/scheduling/v1beta1/doc.go @@ -15,7 +15,9 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +k8s:openapi-gen=true // +groupName=scheduling.k8s.io + package v1beta1 // import "k8s.io/api/scheduling/v1beta1" diff --git a/vendor/k8s.io/api/scheduling/v1beta1/generated.pb.go b/vendor/k8s.io/api/scheduling/v1beta1/generated.pb.go index ea8f8d5e..198fcd02 100644 --- a/vendor/k8s.io/api/scheduling/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/scheduling/v1beta1/generated.pb.go @@ -14,30 +14,25 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1beta1/generated.proto -// DO NOT EDIT! -/* - Package v1beta1 is a generated protocol buffer package. +package v1beta1 - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1beta1/generated.proto +import ( + fmt "fmt" - It has these top-level messages: - PriorityClass - PriorityClassList -*/ -package v1beta1 + io "io" -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" + proto "github.com/gogo/protobuf/proto" -import strings "strings" -import reflect "reflect" + k8s_io_api_core_v1 "k8s.io/api/core/v1" -import io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -50,22 +45,110 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *PriorityClass) Reset() { *m = PriorityClass{} } -func (*PriorityClass) ProtoMessage() {} -func (*PriorityClass) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *PriorityClass) Reset() { *m = PriorityClass{} } +func (*PriorityClass) ProtoMessage() {} +func (*PriorityClass) Descriptor() ([]byte, []int) { + return fileDescriptor_6cd406dede2d3f42, []int{0} +} +func (m *PriorityClass) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PriorityClass) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PriorityClass) XXX_Merge(src proto.Message) { + xxx_messageInfo_PriorityClass.Merge(m, src) +} +func (m *PriorityClass) XXX_Size() int { + return m.Size() +} +func (m *PriorityClass) XXX_DiscardUnknown() { + xxx_messageInfo_PriorityClass.DiscardUnknown(m) +} + +var xxx_messageInfo_PriorityClass proto.InternalMessageInfo + +func (m *PriorityClassList) Reset() { *m = PriorityClassList{} } +func (*PriorityClassList) ProtoMessage() {} +func (*PriorityClassList) Descriptor() ([]byte, []int) { + return fileDescriptor_6cd406dede2d3f42, []int{1} +} +func (m *PriorityClassList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PriorityClassList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PriorityClassList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PriorityClassList.Merge(m, src) +} +func (m *PriorityClassList) XXX_Size() int { + return m.Size() +} +func (m *PriorityClassList) XXX_DiscardUnknown() { + xxx_messageInfo_PriorityClassList.DiscardUnknown(m) +} -func (m *PriorityClassList) Reset() { *m = PriorityClassList{} } -func (*PriorityClassList) ProtoMessage() {} -func (*PriorityClassList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +var xxx_messageInfo_PriorityClassList proto.InternalMessageInfo func init() { proto.RegisterType((*PriorityClass)(nil), "k8s.io.api.scheduling.v1beta1.PriorityClass") proto.RegisterType((*PriorityClassList)(nil), "k8s.io.api.scheduling.v1beta1.PriorityClassList") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1beta1/generated.proto", fileDescriptor_6cd406dede2d3f42) +} + +var fileDescriptor_6cd406dede2d3f42 = []byte{ + // 494 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0x3f, 0x8f, 0xd3, 0x30, + 0x18, 0xc6, 0xeb, 0x1e, 0x15, 0xc5, 0x55, 0xa5, 0x12, 0x84, 0x14, 0x55, 0x22, 0xad, 0x7a, 0x4b, + 0x07, 0xce, 0xa6, 0x27, 0x40, 0x48, 0xb7, 0x95, 0x13, 0x08, 0x09, 0x44, 0xc9, 0xc0, 0x80, 0x18, + 0x70, 0x92, 0xf7, 0x52, 0xd3, 0x24, 0x8e, 0x6c, 0x27, 0x52, 0x37, 0x3e, 0x02, 0x1f, 0x8a, 0xa1, + 0xe3, 0x8d, 0x37, 0x55, 0x34, 0x7c, 0x04, 0x36, 0x26, 0x94, 0x34, 0x5c, 0xda, 0x86, 0x7f, 0x5b, + 0xfc, 0x3e, 0xbf, 0xe7, 0xb1, 0xfd, 0x24, 0xc1, 0xcf, 0x16, 0x4f, 0x14, 0xe1, 0x82, 0x2e, 0x12, + 0x07, 0x64, 0x04, 0x1a, 0x14, 0x4d, 0x21, 0xf2, 0x84, 0xa4, 0xa5, 0xc0, 0x62, 0x4e, 0x95, 0x3b, + 0x07, 0x2f, 0x09, 0x78, 0xe4, 0xd3, 0x74, 0xe2, 0x80, 0x66, 0x13, 0xea, 0x43, 0x04, 0x92, 0x69, + 0xf0, 0x48, 0x2c, 0x85, 0x16, 0xc6, 0xbd, 0x2d, 0x4e, 0x58, 0xcc, 0x49, 0x85, 0x93, 0x12, 0xef, + 0x9f, 0xf8, 0x5c, 0xcf, 0x13, 0x87, 0xb8, 0x22, 0xa4, 0xbe, 0xf0, 0x05, 0x2d, 0x5c, 0x4e, 0x72, + 0x51, 0xac, 0x8a, 0x45, 0xf1, 0xb4, 0x4d, 0xeb, 0x8f, 0x76, 0x36, 0x77, 0x85, 0x04, 0x9a, 0xd6, + 0x76, 0xec, 0x3f, 0xac, 0x98, 0x90, 0xb9, 0x73, 0x1e, 0x81, 0x5c, 0xd2, 0x78, 0xe1, 0xe7, 0x03, + 0x45, 0x43, 0xd0, 0xec, 0x77, 0x2e, 0xfa, 0x27, 0x97, 0x4c, 0x22, 0xcd, 0x43, 0xa8, 0x19, 0x1e, + 0xff, 0xcb, 0x90, 0xdf, 0x36, 0x64, 0x87, 0xbe, 0xd1, 0xf7, 0x26, 0xee, 0xce, 0x24, 0x17, 0x92, + 0xeb, 0xe5, 0xd3, 0x80, 0x29, 0x65, 0x7c, 0xc0, 0xed, 0xfc, 0x54, 0x1e, 0xd3, 0xcc, 0x44, 0x43, + 0x34, 0xee, 0x9c, 0x3e, 0x20, 0x55, 0x6b, 0xd7, 0xe1, 0x24, 0x5e, 0xf8, 0xf9, 0x40, 0x91, 0x9c, + 0x26, 0xe9, 0x84, 0xbc, 0x76, 0x3e, 0x82, 0xab, 0x5f, 0x81, 0x66, 0x53, 0x63, 0xb5, 0x1e, 0x34, + 0xb2, 0xf5, 0x00, 0x57, 0x33, 0xfb, 0x3a, 0xd5, 0x38, 0xc6, 0xad, 0x94, 0x05, 0x09, 0x98, 0xcd, + 0x21, 0x1a, 0xb7, 0xa6, 0xdd, 0x12, 0x6e, 0xbd, 0xcd, 0x87, 0xf6, 0x56, 0x33, 0xce, 0x70, 0xd7, + 0x0f, 0x84, 0xc3, 0x82, 0x73, 0xb8, 0x60, 0x49, 0xa0, 0xcd, 0xa3, 0x21, 0x1a, 0xb7, 0xa7, 0x77, + 0x4b, 0xb8, 0xfb, 0x7c, 0x57, 0xb4, 0xf7, 0x59, 0xe3, 0x11, 0xee, 0x78, 0xa0, 0x5c, 0xc9, 0x63, + 0xcd, 0x45, 0x64, 0xde, 0x18, 0xa2, 0xf1, 0xad, 0xe9, 0x9d, 0xd2, 0xda, 0x39, 0xaf, 0x24, 0x7b, + 0x97, 0x33, 0x7c, 0xdc, 0x8b, 0x25, 0x40, 0x58, 0xac, 0x66, 0x22, 0xe0, 0xee, 0xd2, 0x6c, 0x15, + 0xde, 0xb3, 0x6c, 0x3d, 0xe8, 0xcd, 0x0e, 0xb4, 0x1f, 0xeb, 0xc1, 0x71, 0xfd, 0x0b, 0x20, 0x87, + 0x98, 0x5d, 0x0b, 0x1d, 0x7d, 0x41, 0xf8, 0xf6, 0x5e, 0xeb, 0x2f, 0xb9, 0xd2, 0xc6, 0xfb, 0x5a, + 0xf3, 0xe4, 0xff, 0x9a, 0xcf, 0xdd, 0x45, 0xef, 0xbd, 0xf2, 0x8a, 0xed, 0x5f, 0x93, 0x9d, 0xd6, + 0xdf, 0xe0, 0x16, 0xd7, 0x10, 0x2a, 0xb3, 0x39, 0x3c, 0x1a, 0x77, 0x4e, 0xef, 0x93, 0xbf, 0xfe, + 0x0a, 0x64, 0xef, 0x78, 0xd5, 0x3b, 0x7a, 0x91, 0x47, 0xd8, 0xdb, 0xa4, 0xe9, 0xc9, 0x6a, 0x63, + 0x35, 0x2e, 0x37, 0x56, 0xe3, 0x6a, 0x63, 0x35, 0x3e, 0x65, 0x16, 0x5a, 0x65, 0x16, 0xba, 0xcc, + 0x2c, 0x74, 0x95, 0x59, 0xe8, 0x6b, 0x66, 0xa1, 0xcf, 0xdf, 0xac, 0xc6, 0xbb, 0x9b, 0x65, 0xe4, + 0xcf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x1a, 0xc2, 0xc0, 0x1f, 0xc5, 0x03, 0x00, 0x00, +} + func (m *PriorityClass) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -73,40 +156,55 @@ func (m *PriorityClass) Marshal() (dAtA []byte, err error) { } func (m *PriorityClass) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PriorityClass) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.PreemptionPolicy != nil { + i -= len(*m.PreemptionPolicy) + copy(dAtA[i:], *m.PreemptionPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PreemptionPolicy))) + i-- + dAtA[i] = 0x2a } - i += n1 - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Value)) - dAtA[i] = 0x18 - i++ + i -= len(m.Description) + copy(dAtA[i:], m.Description) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description))) + i-- + dAtA[i] = 0x22 + i-- if m.GlobalDefault { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description))) - i += copy(dAtA[i:], m.Description) - return i, nil + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.Value)) + i-- + dAtA[i] = 0x10 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *PriorityClassList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -114,61 +212,57 @@ func (m *PriorityClassList) Marshal() (dAtA []byte, err error) { } func (m *PriorityClassList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PriorityClassList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n2, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *PriorityClass) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -177,10 +271,17 @@ func (m *PriorityClass) Size() (n int) { n += 2 l = len(m.Description) n += 1 + l + sovGenerated(uint64(l)) + if m.PreemptionPolicy != nil { + l = len(*m.PreemptionPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } return n } func (m *PriorityClassList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -195,14 +296,7 @@ func (m *PriorityClassList) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -212,10 +306,11 @@ func (this *PriorityClass) String() string { return "nil" } s := strings.Join([]string{`&PriorityClass{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Value:` + fmt.Sprintf("%v", this.Value) + `,`, `GlobalDefault:` + fmt.Sprintf("%v", this.GlobalDefault) + `,`, `Description:` + fmt.Sprintf("%v", this.Description) + `,`, + `PreemptionPolicy:` + valueToStringGenerated(this.PreemptionPolicy) + `,`, `}`, }, "") return s @@ -224,9 +319,14 @@ func (this *PriorityClassList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]PriorityClass{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PriorityClass", "PriorityClass", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&PriorityClassList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "PriorityClass", "PriorityClass", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -254,7 +354,7 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -282,7 +382,7 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -291,6 +391,9 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -312,7 +415,7 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Value |= (int32(b) & 0x7F) << shift + m.Value |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -331,7 +434,7 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -351,7 +454,7 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -361,11 +464,47 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } m.Description = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreemptionPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := k8s_io_api_core_v1.PreemptionPolicy(dAtA[iNdEx:postIndex]) + m.PreemptionPolicy = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -375,6 +514,9 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -402,7 +544,7 @@ func (m *PriorityClassList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -430,7 +572,7 @@ func (m *PriorityClassList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -439,6 +581,9 @@ func (m *PriorityClassList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -460,7 +605,7 @@ func (m *PriorityClassList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -469,6 +614,9 @@ func (m *PriorityClassList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -486,6 +634,9 @@ func (m *PriorityClassList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -552,10 +703,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -584,6 +738,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -602,39 +759,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1beta1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 448 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0xc1, 0x8b, 0xd3, 0x40, - 0x18, 0xc5, 0x33, 0x5d, 0x8b, 0x75, 0x4a, 0x41, 0x23, 0x42, 0x28, 0x38, 0x1b, 0xd6, 0x4b, 0x0e, - 0xee, 0x8c, 0x5d, 0x54, 0x04, 0x6f, 0x71, 0x51, 0x04, 0x45, 0xcd, 0xc1, 0x83, 0x78, 0x70, 0x92, - 0x7c, 0x9b, 0x8e, 0x4d, 0x32, 0x61, 0x66, 0x12, 0xd8, 0x9b, 0x67, 0x4f, 0xfe, 0x51, 0x1e, 0x7a, - 0xdc, 0xe3, 0x9e, 0x16, 0x1b, 0xff, 0x11, 0x49, 0x1a, 0x37, 0xad, 0x45, 0xdd, 0x5b, 0xe6, 0x7d, - 0xbf, 0xf7, 0xe6, 0xcb, 0x63, 0xf0, 0xf3, 0xc5, 0x13, 0x4d, 0x85, 0x64, 0x8b, 0x32, 0x04, 0x95, - 0x83, 0x01, 0xcd, 0x2a, 0xc8, 0x63, 0xa9, 0x58, 0x37, 0xe0, 0x85, 0x60, 0x3a, 0x9a, 0x43, 0x5c, - 0xa6, 0x22, 0x4f, 0x58, 0x35, 0x0b, 0xc1, 0xf0, 0x19, 0x4b, 0x20, 0x07, 0xc5, 0x0d, 0xc4, 0xb4, - 0x50, 0xd2, 0x48, 0xfb, 0xee, 0x1a, 0xa7, 0xbc, 0x10, 0xb4, 0xc7, 0x69, 0x87, 0x4f, 0x0f, 0x13, - 0x61, 0xe6, 0x65, 0x48, 0x23, 0x99, 0xb1, 0x44, 0x26, 0x92, 0xb5, 0xae, 0xb0, 0x3c, 0x69, 0x4f, - 0xed, 0xa1, 0xfd, 0x5a, 0xa7, 0x4d, 0x1f, 0xf6, 0x97, 0x67, 0x3c, 0x9a, 0x8b, 0x1c, 0xd4, 0x29, - 0x2b, 0x16, 0x49, 0x23, 0x68, 0x96, 0x81, 0xe1, 0xac, 0xda, 0xd9, 0x61, 0xca, 0xfe, 0xe6, 0x52, - 0x65, 0x6e, 0x44, 0x06, 0x3b, 0x86, 0xc7, 0xff, 0x33, 0x34, 0x7f, 0x92, 0xf1, 0x3f, 0x7d, 0x07, - 0x5f, 0x07, 0x78, 0xf2, 0x56, 0x09, 0xa9, 0x84, 0x39, 0x7d, 0x96, 0x72, 0xad, 0xed, 0x4f, 0x78, - 0xd4, 0x6c, 0x15, 0x73, 0xc3, 0x1d, 0xe4, 0x22, 0x6f, 0x7c, 0xf4, 0x80, 0xf6, 0x8d, 0x5c, 0x86, - 0xd3, 0x62, 0x91, 0x34, 0x82, 0xa6, 0x0d, 0x4d, 0xab, 0x19, 0x7d, 0x13, 0x7e, 0x86, 0xc8, 0xbc, - 0x06, 0xc3, 0x7d, 0x7b, 0x79, 0xb1, 0x6f, 0xd5, 0x17, 0xfb, 0xb8, 0xd7, 0x82, 0xcb, 0x54, 0xfb, - 0x1e, 0x1e, 0x56, 0x3c, 0x2d, 0xc1, 0x19, 0xb8, 0xc8, 0x1b, 0xfa, 0x93, 0x0e, 0x1e, 0xbe, 0x6f, - 0xc4, 0x60, 0x3d, 0xb3, 0x9f, 0xe2, 0x49, 0x92, 0xca, 0x90, 0xa7, 0xc7, 0x70, 0xc2, 0xcb, 0xd4, - 0x38, 0x7b, 0x2e, 0xf2, 0x46, 0xfe, 0x9d, 0x0e, 0x9e, 0xbc, 0xd8, 0x1c, 0x06, 0xdb, 0xac, 0xfd, - 0x08, 0x8f, 0x63, 0xd0, 0x91, 0x12, 0x85, 0x11, 0x32, 0x77, 0xae, 0xb9, 0xc8, 0xbb, 0xe1, 0xdf, - 0xee, 0xac, 0xe3, 0xe3, 0x7e, 0x14, 0x6c, 0x72, 0x07, 0xdf, 0x11, 0xbe, 0xb5, 0x55, 0xc6, 0x2b, - 0xa1, 0x8d, 0xfd, 0x71, 0xa7, 0x10, 0x7a, 0xb5, 0x42, 0x1a, 0x77, 0x5b, 0xc7, 0xcd, 0xee, 0xe6, - 0xd1, 0x6f, 0x65, 0xa3, 0x8c, 0x77, 0x78, 0x28, 0x0c, 0x64, 0xda, 0x19, 0xb8, 0x7b, 0xde, 0xf8, - 0xe8, 0x3e, 0xfd, 0xe7, 0xeb, 0xa3, 0x5b, 0xeb, 0xf5, 0xd5, 0xbd, 0x6c, 0x22, 0x82, 0x75, 0x92, - 0x7f, 0xb8, 0x5c, 0x11, 0xeb, 0x6c, 0x45, 0xac, 0xf3, 0x15, 0xb1, 0xbe, 0xd4, 0x04, 0x2d, 0x6b, - 0x82, 0xce, 0x6a, 0x82, 0xce, 0x6b, 0x82, 0x7e, 0xd4, 0x04, 0x7d, 0xfb, 0x49, 0xac, 0x0f, 0xd7, - 0xbb, 0xc8, 0x5f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x41, 0x74, 0x8a, 0x60, 0x38, 0x03, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/scheduling/v1beta1/generated.proto b/vendor/k8s.io/api/scheduling/v1beta1/generated.proto index 0a957550..2582891b 100644 --- a/vendor/k8s.io/api/scheduling/v1beta1/generated.proto +++ b/vendor/k8s.io/api/scheduling/v1beta1/generated.proto @@ -21,6 +21,7 @@ syntax = 'proto2'; package k8s.io.api.scheduling.v1beta1; +import "k8s.io/api/core/v1/generated.proto"; import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; @@ -28,11 +29,12 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". option go_package = "v1beta1"; +// DEPRECATED - This group version of PriorityClass is deprecated by scheduling.k8s.io/v1/PriorityClass. // PriorityClass defines mapping from a priority class name to the priority // integer value. The value can be any valid integer. message PriorityClass { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -52,12 +54,19 @@ message PriorityClass { // when this priority class should be used. // +optional optional string description = 4; + + // PreemptionPolicy is the Policy for preempting pods with lower priority. + // One of Never, PreemptLowerPriority. + // Defaults to PreemptLowerPriority if unset. + // This field is alpha-level and is only honored by servers that enable the NonPreemptingPriority feature. + // +optional + optional string preemptionPolicy = 5; } // PriorityClassList is a collection of priority classes. message PriorityClassList { // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; diff --git a/vendor/k8s.io/api/scheduling/v1beta1/types.go b/vendor/k8s.io/api/scheduling/v1beta1/types.go index a9aaa866..f806ecd4 100644 --- a/vendor/k8s.io/api/scheduling/v1beta1/types.go +++ b/vendor/k8s.io/api/scheduling/v1beta1/types.go @@ -17,6 +17,7 @@ limitations under the License. package v1beta1 import ( + apiv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -24,12 +25,13 @@ import ( // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// DEPRECATED - This group version of PriorityClass is deprecated by scheduling.k8s.io/v1/PriorityClass. // PriorityClass defines mapping from a priority class name to the priority // integer value. The value can be any valid integer. type PriorityClass struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -49,6 +51,13 @@ type PriorityClass struct { // when this priority class should be used. // +optional Description string `json:"description,omitempty" protobuf:"bytes,4,opt,name=description"` + + // PreemptionPolicy is the Policy for preempting pods with lower priority. + // One of Never, PreemptLowerPriority. + // Defaults to PreemptLowerPriority if unset. + // This field is alpha-level and is only honored by servers that enable the NonPreemptingPriority feature. + // +optional + PreemptionPolicy *apiv1.PreemptionPolicy `json:"preemptionPolicy,omitempty" protobuf:"bytes,5,opt,name=preemptionPolicy"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -57,7 +66,7 @@ type PriorityClass struct { type PriorityClassList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` diff --git a/vendor/k8s.io/api/scheduling/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/scheduling/v1beta1/types_swagger_doc_generated.go index c18f54a8..ffded9df 100644 --- a/vendor/k8s.io/api/scheduling/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/scheduling/v1beta1/types_swagger_doc_generated.go @@ -28,11 +28,12 @@ package v1beta1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_PriorityClass = map[string]string{ - "": "PriorityClass defines mapping from a priority class name to the priority integer value. The value can be any valid integer.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "value": "The value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec.", - "globalDefault": "globalDefault specifies whether this PriorityClass should be considered as the default priority for pods that do not have any priority class. Only one PriorityClass can be marked as `globalDefault`. However, if more than one PriorityClasses exists with their `globalDefault` field set to true, the smallest value of such global default PriorityClasses will be used as the default priority.", - "description": "description is an arbitrary string that usually provides guidelines on when this priority class should be used.", + "": "DEPRECATED - This group version of PriorityClass is deprecated by scheduling.k8s.io/v1/PriorityClass. PriorityClass defines mapping from a priority class name to the priority integer value. The value can be any valid integer.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "value": "The value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec.", + "globalDefault": "globalDefault specifies whether this PriorityClass should be considered as the default priority for pods that do not have any priority class. Only one PriorityClass can be marked as `globalDefault`. However, if more than one PriorityClasses exists with their `globalDefault` field set to true, the smallest value of such global default PriorityClasses will be used as the default priority.", + "description": "description is an arbitrary string that usually provides guidelines on when this priority class should be used.", + "preemptionPolicy": "PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is alpha-level and is only honored by servers that enable the NonPreemptingPriority feature.", } func (PriorityClass) SwaggerDoc() map[string]string { @@ -41,7 +42,7 @@ func (PriorityClass) SwaggerDoc() map[string]string { var map_PriorityClassList = map[string]string{ "": "PriorityClassList is a collection of priority classes.", - "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "items is the list of PriorityClasses", } diff --git a/vendor/k8s.io/api/scheduling/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/scheduling/v1beta1/zz_generated.deepcopy.go index 6f68e4ac..6e200857 100644 --- a/vendor/k8s.io/api/scheduling/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/scheduling/v1beta1/zz_generated.deepcopy.go @@ -21,6 +21,7 @@ limitations under the License. package v1beta1 import ( + v1 "k8s.io/api/core/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) @@ -29,6 +30,11 @@ func (in *PriorityClass) DeepCopyInto(out *PriorityClass) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.PreemptionPolicy != nil { + in, out := &in.PreemptionPolicy, &out.PreemptionPolicy + *out = new(v1.PreemptionPolicy) + **out = **in + } return } @@ -54,7 +60,7 @@ func (in *PriorityClass) DeepCopyObject() runtime.Object { func (in *PriorityClassList) DeepCopyInto(out *PriorityClassList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]PriorityClass, len(*in)) diff --git a/vendor/k8s.io/api/settings/v1alpha1/doc.go b/vendor/k8s.io/api/settings/v1alpha1/doc.go index 05a62c56..60066bb6 100644 --- a/vendor/k8s.io/api/settings/v1alpha1/doc.go +++ b/vendor/k8s.io/api/settings/v1alpha1/doc.go @@ -15,7 +15,9 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +k8s:openapi-gen=true // +groupName=settings.k8s.io + package v1alpha1 // import "k8s.io/api/settings/v1alpha1" diff --git a/vendor/k8s.io/api/settings/v1alpha1/generated.pb.go b/vendor/k8s.io/api/settings/v1alpha1/generated.pb.go index 15285bae..7469f74a 100644 --- a/vendor/k8s.io/api/settings/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/settings/v1alpha1/generated.pb.go @@ -14,33 +14,24 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/settings/v1alpha1/generated.proto -// DO NOT EDIT! -/* - Package v1alpha1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/settings/v1alpha1/generated.proto - - It has these top-level messages: - PodPreset - PodPresetList - PodPresetSpec -*/ package v1alpha1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import k8s_io_api_core_v1 "k8s.io/api/core/v1" + io "io" -import strings "strings" -import reflect "reflect" + proto "github.com/gogo/protobuf/proto" + v11 "k8s.io/api/core/v1" -import io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -53,27 +44,142 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *PodPreset) Reset() { *m = PodPreset{} } -func (*PodPreset) ProtoMessage() {} -func (*PodPreset) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *PodPreset) Reset() { *m = PodPreset{} } +func (*PodPreset) ProtoMessage() {} +func (*PodPreset) Descriptor() ([]byte, []int) { + return fileDescriptor_48fab0a6ea4b79ce, []int{0} +} +func (m *PodPreset) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodPreset) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodPreset) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodPreset.Merge(m, src) +} +func (m *PodPreset) XXX_Size() int { + return m.Size() +} +func (m *PodPreset) XXX_DiscardUnknown() { + xxx_messageInfo_PodPreset.DiscardUnknown(m) +} -func (m *PodPresetList) Reset() { *m = PodPresetList{} } -func (*PodPresetList) ProtoMessage() {} -func (*PodPresetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +var xxx_messageInfo_PodPreset proto.InternalMessageInfo -func (m *PodPresetSpec) Reset() { *m = PodPresetSpec{} } -func (*PodPresetSpec) ProtoMessage() {} -func (*PodPresetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +func (m *PodPresetList) Reset() { *m = PodPresetList{} } +func (*PodPresetList) ProtoMessage() {} +func (*PodPresetList) Descriptor() ([]byte, []int) { + return fileDescriptor_48fab0a6ea4b79ce, []int{1} +} +func (m *PodPresetList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodPresetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodPresetList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodPresetList.Merge(m, src) +} +func (m *PodPresetList) XXX_Size() int { + return m.Size() +} +func (m *PodPresetList) XXX_DiscardUnknown() { + xxx_messageInfo_PodPresetList.DiscardUnknown(m) +} + +var xxx_messageInfo_PodPresetList proto.InternalMessageInfo + +func (m *PodPresetSpec) Reset() { *m = PodPresetSpec{} } +func (*PodPresetSpec) ProtoMessage() {} +func (*PodPresetSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_48fab0a6ea4b79ce, []int{2} +} +func (m *PodPresetSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodPresetSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodPresetSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodPresetSpec.Merge(m, src) +} +func (m *PodPresetSpec) XXX_Size() int { + return m.Size() +} +func (m *PodPresetSpec) XXX_DiscardUnknown() { + xxx_messageInfo_PodPresetSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_PodPresetSpec proto.InternalMessageInfo func init() { proto.RegisterType((*PodPreset)(nil), "k8s.io.api.settings.v1alpha1.PodPreset") proto.RegisterType((*PodPresetList)(nil), "k8s.io.api.settings.v1alpha1.PodPresetList") proto.RegisterType((*PodPresetSpec)(nil), "k8s.io.api.settings.v1alpha1.PodPresetSpec") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/settings/v1alpha1/generated.proto", fileDescriptor_48fab0a6ea4b79ce) +} + +var fileDescriptor_48fab0a6ea4b79ce = []byte{ + // 542 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0xc1, 0x8e, 0xd2, 0x40, + 0x1c, 0xc6, 0xe9, 0xb2, 0x04, 0x1c, 0xd8, 0x68, 0x1a, 0x0f, 0x0d, 0x31, 0x65, 0xe5, 0xe2, 0x26, + 0xc6, 0x19, 0x59, 0x8d, 0xd1, 0x6b, 0x13, 0x4c, 0x4c, 0x20, 0x6e, 0x4a, 0xb2, 0x89, 0xc6, 0x83, + 0x43, 0xf9, 0x5b, 0x2a, 0xb4, 0xd3, 0xcc, 0x4c, 0x9b, 0x78, 0xf3, 0x11, 0x7c, 0x01, 0x9f, 0x44, + 0x1f, 0x80, 0xe3, 0x1e, 0xf7, 0xb4, 0x91, 0xfa, 0x22, 0x66, 0x86, 0x29, 0xa0, 0x88, 0x72, 0x9b, + 0xff, 0x9f, 0xef, 0xfb, 0xcd, 0xf7, 0x31, 0x45, 0xfd, 0xd9, 0x73, 0x81, 0x23, 0x46, 0x66, 0xd9, + 0x18, 0x78, 0x02, 0x12, 0x04, 0xc9, 0x21, 0x99, 0x30, 0x4e, 0xcc, 0x0f, 0x34, 0x8d, 0x88, 0x00, + 0x29, 0xa3, 0x24, 0x14, 0x24, 0xef, 0xd1, 0x79, 0x3a, 0xa5, 0x3d, 0x12, 0x42, 0x02, 0x9c, 0x4a, + 0x98, 0xe0, 0x94, 0x33, 0xc9, 0xec, 0x7b, 0x2b, 0x35, 0xa6, 0x69, 0x84, 0x4b, 0x35, 0x2e, 0xd5, + 0xed, 0x47, 0x61, 0x24, 0xa7, 0xd9, 0x18, 0x07, 0x2c, 0x26, 0x21, 0x0b, 0x19, 0xd1, 0xa6, 0x71, + 0xf6, 0x41, 0x4f, 0x7a, 0xd0, 0xa7, 0x15, 0xac, 0xdd, 0xdd, 0xba, 0x3a, 0x60, 0x1c, 0x48, 0xbe, + 0x73, 0x61, 0xfb, 0xe9, 0x46, 0x13, 0xd3, 0x60, 0x1a, 0x25, 0xc0, 0x3f, 0x91, 0x74, 0x16, 0xaa, + 0x85, 0x20, 0x31, 0x48, 0xfa, 0x37, 0x17, 0xd9, 0xe7, 0xe2, 0x59, 0x22, 0xa3, 0x18, 0x76, 0x0c, + 0xcf, 0xfe, 0x67, 0x10, 0xc1, 0x14, 0x62, 0xfa, 0xa7, 0xaf, 0xfb, 0xdd, 0x42, 0xb7, 0x2e, 0xd8, + 0xe4, 0x82, 0x83, 0x00, 0x69, 0xbf, 0x47, 0x0d, 0x95, 0x68, 0x42, 0x25, 0x75, 0xac, 0x53, 0xeb, + 0xac, 0x79, 0xfe, 0x18, 0x6f, 0xfe, 0xb0, 0x35, 0x18, 0xa7, 0xb3, 0x50, 0x2d, 0x04, 0x56, 0x6a, + 0x9c, 0xf7, 0xf0, 0xeb, 0xf1, 0x47, 0x08, 0xe4, 0x10, 0x24, 0xf5, 0xec, 0xc5, 0x4d, 0xa7, 0x52, + 0xdc, 0x74, 0xd0, 0x66, 0xe7, 0xaf, 0xa9, 0xf6, 0x10, 0x1d, 0x8b, 0x14, 0x02, 0xe7, 0x48, 0xd3, + 0x1f, 0xe2, 0x7f, 0x3d, 0x07, 0x5e, 0x07, 0x1b, 0xa5, 0x10, 0x78, 0x2d, 0x03, 0x3e, 0x56, 0x93, + 0xaf, 0x31, 0xdd, 0x6f, 0x16, 0x3a, 0x59, 0xab, 0x06, 0x91, 0x90, 0xf6, 0xbb, 0x9d, 0x0a, 0xf8, + 0xb0, 0x0a, 0xca, 0xad, 0x0b, 0xdc, 0x31, 0xf7, 0x34, 0xca, 0xcd, 0x56, 0xfc, 0x01, 0xaa, 0x45, + 0x12, 0x62, 0xe1, 0x1c, 0x9d, 0x56, 0xcf, 0x9a, 0xe7, 0x0f, 0x0e, 0xcc, 0xef, 0x9d, 0x18, 0x66, + 0xed, 0x95, 0x72, 0xfb, 0x2b, 0x48, 0xf7, 0x6b, 0x75, 0x2b, 0xbd, 0x6a, 0x65, 0x53, 0xd4, 0x10, + 0x30, 0x87, 0x40, 0x32, 0x6e, 0xd2, 0x3f, 0x39, 0x30, 0x3d, 0x1d, 0xc3, 0x7c, 0x64, 0xac, 0x9b, + 0x0a, 0xe5, 0xc6, 0x5f, 0x63, 0xed, 0x17, 0xa8, 0x0a, 0x49, 0x6e, 0x0a, 0xb4, 0xb7, 0x0b, 0xa8, + 0x4f, 0x58, 0xb1, 0xfa, 0x49, 0x7e, 0x49, 0xb9, 0xd7, 0x34, 0x90, 0x6a, 0x3f, 0xc9, 0x7d, 0xe5, + 0xb1, 0x07, 0xa8, 0x0e, 0x49, 0xfe, 0x92, 0xb3, 0xd8, 0xa9, 0x6a, 0xfb, 0xfd, 0x3d, 0x76, 0x25, + 0x19, 0xb1, 0x8c, 0x07, 0xe0, 0xdd, 0x36, 0x94, 0xba, 0x59, 0xfb, 0x25, 0xc2, 0xee, 0xa3, 0x7a, + 0xce, 0xe6, 0x59, 0x0c, 0xc2, 0x39, 0xde, 0x1f, 0xe6, 0x52, 0x4b, 0x36, 0x98, 0xd5, 0x2c, 0xfc, + 0xd2, 0x6b, 0xbf, 0x41, 0xad, 0xd5, 0x71, 0xc8, 0xb2, 0x44, 0x0a, 0xa7, 0xa6, 0x59, 0x9d, 0xfd, + 0x2c, 0xad, 0xf3, 0xee, 0x1a, 0x60, 0x6b, 0x6b, 0x29, 0xfc, 0xdf, 0x50, 0x1e, 0x5e, 0x2c, 0xdd, + 0xca, 0xd5, 0xd2, 0xad, 0x5c, 0x2f, 0xdd, 0xca, 0xe7, 0xc2, 0xb5, 0x16, 0x85, 0x6b, 0x5d, 0x15, + 0xae, 0x75, 0x5d, 0xb8, 0xd6, 0x8f, 0xc2, 0xb5, 0xbe, 0xfc, 0x74, 0x2b, 0x6f, 0x1b, 0xe5, 0x7b, + 0xff, 0x0a, 0x00, 0x00, 0xff, 0xff, 0x46, 0x15, 0xf2, 0x97, 0xa4, 0x04, 0x00, 0x00, +} + func (m *PodPreset) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -81,33 +187,42 @@ func (m *PodPreset) Marshal() (dAtA []byte, err error) { } func (m *PodPreset) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodPreset) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n1 + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n2 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *PodPresetList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -115,37 +230,46 @@ func (m *PodPresetList) Marshal() (dAtA []byte, err error) { } func (m *PodPresetList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodPresetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n3, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *PodPresetSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -153,97 +277,99 @@ func (m *PodPresetSpec) Marshal() (dAtA []byte, err error) { } func (m *PodPresetSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodPresetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n4, err := m.Selector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.VolumeMounts) > 0 { + for iNdEx := len(m.VolumeMounts) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.VolumeMounts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } } - i += n4 - if len(m.Env) > 0 { - for _, msg := range m.Env { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Volumes) > 0 { + for iNdEx := len(m.Volumes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Volumes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x22 } } if len(m.EnvFrom) > 0 { - for _, msg := range m.EnvFrom { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.EnvFrom) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.EnvFrom[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x1a } } - if len(m.Volumes) > 0 { - for _, msg := range m.Volumes { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Env) > 0 { + for iNdEx := len(m.Env) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Env[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - if len(m.VolumeMounts) > 0 { - for _, msg := range m.VolumeMounts { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *PodPreset) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -254,6 +380,9 @@ func (m *PodPreset) Size() (n int) { } func (m *PodPresetList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -268,6 +397,9 @@ func (m *PodPresetList) Size() (n int) { } func (m *PodPresetSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.Selector.Size() @@ -300,14 +432,7 @@ func (m *PodPresetSpec) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -317,7 +442,7 @@ func (this *PodPreset) String() string { return "nil" } s := strings.Join([]string{`&PodPreset{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodPresetSpec", "PodPresetSpec", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -327,9 +452,14 @@ func (this *PodPresetList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]PodPreset{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PodPreset", "PodPreset", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&PodPresetList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "PodPreset", "PodPreset", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -338,12 +468,32 @@ func (this *PodPresetSpec) String() string { if this == nil { return "nil" } + repeatedStringForEnv := "[]EnvVar{" + for _, f := range this.Env { + repeatedStringForEnv += fmt.Sprintf("%v", f) + "," + } + repeatedStringForEnv += "}" + repeatedStringForEnvFrom := "[]EnvFromSource{" + for _, f := range this.EnvFrom { + repeatedStringForEnvFrom += fmt.Sprintf("%v", f) + "," + } + repeatedStringForEnvFrom += "}" + repeatedStringForVolumes := "[]Volume{" + for _, f := range this.Volumes { + repeatedStringForVolumes += fmt.Sprintf("%v", f) + "," + } + repeatedStringForVolumes += "}" + repeatedStringForVolumeMounts := "[]VolumeMount{" + for _, f := range this.VolumeMounts { + repeatedStringForVolumeMounts += fmt.Sprintf("%v", f) + "," + } + repeatedStringForVolumeMounts += "}" s := strings.Join([]string{`&PodPresetSpec{`, - `Selector:` + strings.Replace(strings.Replace(this.Selector.String(), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1), `&`, ``, 1) + `,`, - `Env:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Env), "EnvVar", "k8s_io_api_core_v1.EnvVar", 1), `&`, ``, 1) + `,`, - `EnvFrom:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.EnvFrom), "EnvFromSource", "k8s_io_api_core_v1.EnvFromSource", 1), `&`, ``, 1) + `,`, - `Volumes:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Volumes), "Volume", "k8s_io_api_core_v1.Volume", 1), `&`, ``, 1) + `,`, - `VolumeMounts:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.VolumeMounts), "VolumeMount", "k8s_io_api_core_v1.VolumeMount", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1), `&`, ``, 1) + `,`, + `Env:` + repeatedStringForEnv + `,`, + `EnvFrom:` + repeatedStringForEnvFrom + `,`, + `Volumes:` + repeatedStringForVolumes + `,`, + `VolumeMounts:` + repeatedStringForVolumeMounts + `,`, `}`, }, "") return s @@ -371,7 +521,7 @@ func (m *PodPreset) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -399,7 +549,7 @@ func (m *PodPreset) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -408,6 +558,9 @@ func (m *PodPreset) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -429,7 +582,7 @@ func (m *PodPreset) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -438,6 +591,9 @@ func (m *PodPreset) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -454,6 +610,9 @@ func (m *PodPreset) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -481,7 +640,7 @@ func (m *PodPresetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -509,7 +668,7 @@ func (m *PodPresetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -518,6 +677,9 @@ func (m *PodPresetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -539,7 +701,7 @@ func (m *PodPresetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -548,6 +710,9 @@ func (m *PodPresetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -565,6 +730,9 @@ func (m *PodPresetList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -592,7 +760,7 @@ func (m *PodPresetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -620,7 +788,7 @@ func (m *PodPresetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -629,6 +797,9 @@ func (m *PodPresetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -650,7 +821,7 @@ func (m *PodPresetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -659,10 +830,13 @@ func (m *PodPresetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Env = append(m.Env, k8s_io_api_core_v1.EnvVar{}) + m.Env = append(m.Env, v11.EnvVar{}) if err := m.Env[len(m.Env)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -681,7 +855,7 @@ func (m *PodPresetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -690,10 +864,13 @@ func (m *PodPresetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.EnvFrom = append(m.EnvFrom, k8s_io_api_core_v1.EnvFromSource{}) + m.EnvFrom = append(m.EnvFrom, v11.EnvFromSource{}) if err := m.EnvFrom[len(m.EnvFrom)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -712,7 +889,7 @@ func (m *PodPresetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -721,10 +898,13 @@ func (m *PodPresetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Volumes = append(m.Volumes, k8s_io_api_core_v1.Volume{}) + m.Volumes = append(m.Volumes, v11.Volume{}) if err := m.Volumes[len(m.Volumes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -743,7 +923,7 @@ func (m *PodPresetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -752,10 +932,13 @@ func (m *PodPresetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.VolumeMounts = append(m.VolumeMounts, k8s_io_api_core_v1.VolumeMount{}) + m.VolumeMounts = append(m.VolumeMounts, v11.VolumeMount{}) if err := m.VolumeMounts[len(m.VolumeMounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -769,6 +952,9 @@ func (m *PodPresetSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -835,10 +1021,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -867,6 +1056,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -885,45 +1077,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/settings/v1alpha1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 542 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0xc1, 0x8e, 0xd2, 0x40, - 0x1c, 0xc6, 0xe9, 0xb2, 0x04, 0x1c, 0xd8, 0x68, 0x1a, 0x0f, 0x0d, 0x31, 0x65, 0xe5, 0xe2, 0x26, - 0xc6, 0x19, 0x59, 0x8d, 0xd1, 0x6b, 0x13, 0x4c, 0x4c, 0x20, 0x6e, 0x4a, 0xb2, 0x89, 0xc6, 0x83, - 0x43, 0xf9, 0x5b, 0x2a, 0xb4, 0xd3, 0xcc, 0x4c, 0x9b, 0x78, 0xf3, 0x11, 0x7c, 0x01, 0x9f, 0x44, - 0x1f, 0x80, 0xe3, 0x1e, 0xf7, 0xb4, 0x91, 0xfa, 0x22, 0x66, 0x86, 0x29, 0xa0, 0x88, 0x72, 0x9b, - 0xff, 0x9f, 0xef, 0xfb, 0xcd, 0xf7, 0x31, 0x45, 0xfd, 0xd9, 0x73, 0x81, 0x23, 0x46, 0x66, 0xd9, - 0x18, 0x78, 0x02, 0x12, 0x04, 0xc9, 0x21, 0x99, 0x30, 0x4e, 0xcc, 0x0f, 0x34, 0x8d, 0x88, 0x00, - 0x29, 0xa3, 0x24, 0x14, 0x24, 0xef, 0xd1, 0x79, 0x3a, 0xa5, 0x3d, 0x12, 0x42, 0x02, 0x9c, 0x4a, - 0x98, 0xe0, 0x94, 0x33, 0xc9, 0xec, 0x7b, 0x2b, 0x35, 0xa6, 0x69, 0x84, 0x4b, 0x35, 0x2e, 0xd5, - 0xed, 0x47, 0x61, 0x24, 0xa7, 0xd9, 0x18, 0x07, 0x2c, 0x26, 0x21, 0x0b, 0x19, 0xd1, 0xa6, 0x71, - 0xf6, 0x41, 0x4f, 0x7a, 0xd0, 0xa7, 0x15, 0xac, 0xdd, 0xdd, 0xba, 0x3a, 0x60, 0x1c, 0x48, 0xbe, - 0x73, 0x61, 0xfb, 0xe9, 0x46, 0x13, 0xd3, 0x60, 0x1a, 0x25, 0xc0, 0x3f, 0x91, 0x74, 0x16, 0xaa, - 0x85, 0x20, 0x31, 0x48, 0xfa, 0x37, 0x17, 0xd9, 0xe7, 0xe2, 0x59, 0x22, 0xa3, 0x18, 0x76, 0x0c, - 0xcf, 0xfe, 0x67, 0x10, 0xc1, 0x14, 0x62, 0xfa, 0xa7, 0xaf, 0xfb, 0xdd, 0x42, 0xb7, 0x2e, 0xd8, - 0xe4, 0x82, 0x83, 0x00, 0x69, 0xbf, 0x47, 0x0d, 0x95, 0x68, 0x42, 0x25, 0x75, 0xac, 0x53, 0xeb, - 0xac, 0x79, 0xfe, 0x18, 0x6f, 0xfe, 0xb0, 0x35, 0x18, 0xa7, 0xb3, 0x50, 0x2d, 0x04, 0x56, 0x6a, - 0x9c, 0xf7, 0xf0, 0xeb, 0xf1, 0x47, 0x08, 0xe4, 0x10, 0x24, 0xf5, 0xec, 0xc5, 0x4d, 0xa7, 0x52, - 0xdc, 0x74, 0xd0, 0x66, 0xe7, 0xaf, 0xa9, 0xf6, 0x10, 0x1d, 0x8b, 0x14, 0x02, 0xe7, 0x48, 0xd3, - 0x1f, 0xe2, 0x7f, 0x3d, 0x07, 0x5e, 0x07, 0x1b, 0xa5, 0x10, 0x78, 0x2d, 0x03, 0x3e, 0x56, 0x93, - 0xaf, 0x31, 0xdd, 0x6f, 0x16, 0x3a, 0x59, 0xab, 0x06, 0x91, 0x90, 0xf6, 0xbb, 0x9d, 0x0a, 0xf8, - 0xb0, 0x0a, 0xca, 0xad, 0x0b, 0xdc, 0x31, 0xf7, 0x34, 0xca, 0xcd, 0x56, 0xfc, 0x01, 0xaa, 0x45, - 0x12, 0x62, 0xe1, 0x1c, 0x9d, 0x56, 0xcf, 0x9a, 0xe7, 0x0f, 0x0e, 0xcc, 0xef, 0x9d, 0x18, 0x66, - 0xed, 0x95, 0x72, 0xfb, 0x2b, 0x48, 0xf7, 0x6b, 0x75, 0x2b, 0xbd, 0x6a, 0x65, 0x53, 0xd4, 0x10, - 0x30, 0x87, 0x40, 0x32, 0x6e, 0xd2, 0x3f, 0x39, 0x30, 0x3d, 0x1d, 0xc3, 0x7c, 0x64, 0xac, 0x9b, - 0x0a, 0xe5, 0xc6, 0x5f, 0x63, 0xed, 0x17, 0xa8, 0x0a, 0x49, 0x6e, 0x0a, 0xb4, 0xb7, 0x0b, 0xa8, - 0x4f, 0x58, 0xb1, 0xfa, 0x49, 0x7e, 0x49, 0xb9, 0xd7, 0x34, 0x90, 0x6a, 0x3f, 0xc9, 0x7d, 0xe5, - 0xb1, 0x07, 0xa8, 0x0e, 0x49, 0xfe, 0x92, 0xb3, 0xd8, 0xa9, 0x6a, 0xfb, 0xfd, 0x3d, 0x76, 0x25, - 0x19, 0xb1, 0x8c, 0x07, 0xe0, 0xdd, 0x36, 0x94, 0xba, 0x59, 0xfb, 0x25, 0xc2, 0xee, 0xa3, 0x7a, - 0xce, 0xe6, 0x59, 0x0c, 0xc2, 0x39, 0xde, 0x1f, 0xe6, 0x52, 0x4b, 0x36, 0x98, 0xd5, 0x2c, 0xfc, - 0xd2, 0x6b, 0xbf, 0x41, 0xad, 0xd5, 0x71, 0xc8, 0xb2, 0x44, 0x0a, 0xa7, 0xa6, 0x59, 0x9d, 0xfd, - 0x2c, 0xad, 0xf3, 0xee, 0x1a, 0x60, 0x6b, 0x6b, 0x29, 0xfc, 0xdf, 0x50, 0x1e, 0x5e, 0x2c, 0xdd, - 0xca, 0xd5, 0xd2, 0xad, 0x5c, 0x2f, 0xdd, 0xca, 0xe7, 0xc2, 0xb5, 0x16, 0x85, 0x6b, 0x5d, 0x15, - 0xae, 0x75, 0x5d, 0xb8, 0xd6, 0x8f, 0xc2, 0xb5, 0xbe, 0xfc, 0x74, 0x2b, 0x6f, 0x1b, 0xe5, 0x7b, - 0xff, 0x0a, 0x00, 0x00, 0xff, 0xff, 0x46, 0x15, 0xf2, 0x97, 0xa4, 0x04, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/settings/v1alpha1/generated.proto b/vendor/k8s.io/api/settings/v1alpha1/generated.proto index d5534c4d..db1ec931 100644 --- a/vendor/k8s.io/api/settings/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/settings/v1alpha1/generated.proto @@ -42,7 +42,7 @@ message PodPreset { // PodPresetList is a list of PodPreset objects. message PodPresetList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; diff --git a/vendor/k8s.io/api/settings/v1alpha1/types.go b/vendor/k8s.io/api/settings/v1alpha1/types.go index 506aacf4..8cc99d44 100644 --- a/vendor/k8s.io/api/settings/v1alpha1/types.go +++ b/vendor/k8s.io/api/settings/v1alpha1/types.go @@ -61,7 +61,7 @@ type PodPresetSpec struct { type PodPresetList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` diff --git a/vendor/k8s.io/api/settings/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/settings/v1alpha1/types_swagger_doc_generated.go index 508c452f..0501e0af 100644 --- a/vendor/k8s.io/api/settings/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/settings/v1alpha1/types_swagger_doc_generated.go @@ -37,7 +37,7 @@ func (PodPreset) SwaggerDoc() map[string]string { var map_PodPresetList = map[string]string{ "": "PodPresetList is a list of PodPreset objects.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "Items is a list of schema objects.", } diff --git a/vendor/k8s.io/api/settings/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/settings/v1alpha1/zz_generated.deepcopy.go index 6397a88a..ed6c31a3 100644 --- a/vendor/k8s.io/api/settings/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/settings/v1alpha1/zz_generated.deepcopy.go @@ -56,7 +56,7 @@ func (in *PodPreset) DeepCopyObject() runtime.Object { func (in *PodPresetList) DeepCopyInto(out *PodPresetList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]PodPreset, len(*in)) diff --git a/vendor/k8s.io/api/storage/v1/doc.go b/vendor/k8s.io/api/storage/v1/doc.go index 8f4a4045..75a6489d 100644 --- a/vendor/k8s.io/api/storage/v1/doc.go +++ b/vendor/k8s.io/api/storage/v1/doc.go @@ -15,6 +15,8 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +groupName=storage.k8s.io // +k8s:openapi-gen=true -package v1 + +package v1 // import "k8s.io/api/storage/v1" diff --git a/vendor/k8s.io/api/storage/v1/generated.pb.go b/vendor/k8s.io/api/storage/v1/generated.pb.go index d43a9829..3d09ee7e 100644 --- a/vendor/k8s.io/api/storage/v1/generated.pb.go +++ b/vendor/k8s.io/api/storage/v1/generated.pb.go @@ -14,34 +14,26 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/storage/v1/generated.proto -// DO NOT EDIT! -/* - Package v1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/storage/v1/generated.proto - - It has these top-level messages: - StorageClass - StorageClassList -*/ package v1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import k8s_io_api_core_v1 "k8s.io/api/core/v1" + io "io" -import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + v11 "k8s.io/api/core/v1" -import strings "strings" -import reflect "reflect" - -import io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -54,299 +46,2788 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *StorageClass) Reset() { *m = StorageClass{} } -func (*StorageClass) ProtoMessage() {} -func (*StorageClass) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *CSINode) Reset() { *m = CSINode{} } +func (*CSINode) ProtoMessage() {} +func (*CSINode) Descriptor() ([]byte, []int) { + return fileDescriptor_3b530c1983504d8d, []int{0} +} +func (m *CSINode) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CSINode) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CSINode) XXX_Merge(src proto.Message) { + xxx_messageInfo_CSINode.Merge(m, src) +} +func (m *CSINode) XXX_Size() int { + return m.Size() +} +func (m *CSINode) XXX_DiscardUnknown() { + xxx_messageInfo_CSINode.DiscardUnknown(m) +} + +var xxx_messageInfo_CSINode proto.InternalMessageInfo + +func (m *CSINodeDriver) Reset() { *m = CSINodeDriver{} } +func (*CSINodeDriver) ProtoMessage() {} +func (*CSINodeDriver) Descriptor() ([]byte, []int) { + return fileDescriptor_3b530c1983504d8d, []int{1} +} +func (m *CSINodeDriver) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CSINodeDriver) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CSINodeDriver) XXX_Merge(src proto.Message) { + xxx_messageInfo_CSINodeDriver.Merge(m, src) +} +func (m *CSINodeDriver) XXX_Size() int { + return m.Size() +} +func (m *CSINodeDriver) XXX_DiscardUnknown() { + xxx_messageInfo_CSINodeDriver.DiscardUnknown(m) +} + +var xxx_messageInfo_CSINodeDriver proto.InternalMessageInfo + +func (m *CSINodeList) Reset() { *m = CSINodeList{} } +func (*CSINodeList) ProtoMessage() {} +func (*CSINodeList) Descriptor() ([]byte, []int) { + return fileDescriptor_3b530c1983504d8d, []int{2} +} +func (m *CSINodeList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CSINodeList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CSINodeList) XXX_Merge(src proto.Message) { + xxx_messageInfo_CSINodeList.Merge(m, src) +} +func (m *CSINodeList) XXX_Size() int { + return m.Size() +} +func (m *CSINodeList) XXX_DiscardUnknown() { + xxx_messageInfo_CSINodeList.DiscardUnknown(m) +} + +var xxx_messageInfo_CSINodeList proto.InternalMessageInfo + +func (m *CSINodeSpec) Reset() { *m = CSINodeSpec{} } +func (*CSINodeSpec) ProtoMessage() {} +func (*CSINodeSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_3b530c1983504d8d, []int{3} +} +func (m *CSINodeSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CSINodeSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CSINodeSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_CSINodeSpec.Merge(m, src) +} +func (m *CSINodeSpec) XXX_Size() int { + return m.Size() +} +func (m *CSINodeSpec) XXX_DiscardUnknown() { + xxx_messageInfo_CSINodeSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_CSINodeSpec proto.InternalMessageInfo + +func (m *StorageClass) Reset() { *m = StorageClass{} } +func (*StorageClass) ProtoMessage() {} +func (*StorageClass) Descriptor() ([]byte, []int) { + return fileDescriptor_3b530c1983504d8d, []int{4} +} +func (m *StorageClass) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StorageClass) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StorageClass) XXX_Merge(src proto.Message) { + xxx_messageInfo_StorageClass.Merge(m, src) +} +func (m *StorageClass) XXX_Size() int { + return m.Size() +} +func (m *StorageClass) XXX_DiscardUnknown() { + xxx_messageInfo_StorageClass.DiscardUnknown(m) +} + +var xxx_messageInfo_StorageClass proto.InternalMessageInfo + +func (m *StorageClassList) Reset() { *m = StorageClassList{} } +func (*StorageClassList) ProtoMessage() {} +func (*StorageClassList) Descriptor() ([]byte, []int) { + return fileDescriptor_3b530c1983504d8d, []int{5} +} +func (m *StorageClassList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StorageClassList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StorageClassList) XXX_Merge(src proto.Message) { + xxx_messageInfo_StorageClassList.Merge(m, src) +} +func (m *StorageClassList) XXX_Size() int { + return m.Size() +} +func (m *StorageClassList) XXX_DiscardUnknown() { + xxx_messageInfo_StorageClassList.DiscardUnknown(m) +} + +var xxx_messageInfo_StorageClassList proto.InternalMessageInfo + +func (m *VolumeAttachment) Reset() { *m = VolumeAttachment{} } +func (*VolumeAttachment) ProtoMessage() {} +func (*VolumeAttachment) Descriptor() ([]byte, []int) { + return fileDescriptor_3b530c1983504d8d, []int{6} +} +func (m *VolumeAttachment) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeAttachment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeAttachment) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeAttachment.Merge(m, src) +} +func (m *VolumeAttachment) XXX_Size() int { + return m.Size() +} +func (m *VolumeAttachment) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeAttachment.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeAttachment proto.InternalMessageInfo + +func (m *VolumeAttachmentList) Reset() { *m = VolumeAttachmentList{} } +func (*VolumeAttachmentList) ProtoMessage() {} +func (*VolumeAttachmentList) Descriptor() ([]byte, []int) { + return fileDescriptor_3b530c1983504d8d, []int{7} +} +func (m *VolumeAttachmentList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeAttachmentList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeAttachmentList) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeAttachmentList.Merge(m, src) +} +func (m *VolumeAttachmentList) XXX_Size() int { + return m.Size() +} +func (m *VolumeAttachmentList) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeAttachmentList.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeAttachmentList proto.InternalMessageInfo + +func (m *VolumeAttachmentSource) Reset() { *m = VolumeAttachmentSource{} } +func (*VolumeAttachmentSource) ProtoMessage() {} +func (*VolumeAttachmentSource) Descriptor() ([]byte, []int) { + return fileDescriptor_3b530c1983504d8d, []int{8} +} +func (m *VolumeAttachmentSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeAttachmentSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeAttachmentSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeAttachmentSource.Merge(m, src) +} +func (m *VolumeAttachmentSource) XXX_Size() int { + return m.Size() +} +func (m *VolumeAttachmentSource) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeAttachmentSource.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeAttachmentSource proto.InternalMessageInfo + +func (m *VolumeAttachmentSpec) Reset() { *m = VolumeAttachmentSpec{} } +func (*VolumeAttachmentSpec) ProtoMessage() {} +func (*VolumeAttachmentSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_3b530c1983504d8d, []int{9} +} +func (m *VolumeAttachmentSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeAttachmentSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeAttachmentSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeAttachmentSpec.Merge(m, src) +} +func (m *VolumeAttachmentSpec) XXX_Size() int { + return m.Size() +} +func (m *VolumeAttachmentSpec) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeAttachmentSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeAttachmentSpec proto.InternalMessageInfo + +func (m *VolumeAttachmentStatus) Reset() { *m = VolumeAttachmentStatus{} } +func (*VolumeAttachmentStatus) ProtoMessage() {} +func (*VolumeAttachmentStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_3b530c1983504d8d, []int{10} +} +func (m *VolumeAttachmentStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeAttachmentStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeAttachmentStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeAttachmentStatus.Merge(m, src) +} +func (m *VolumeAttachmentStatus) XXX_Size() int { + return m.Size() +} +func (m *VolumeAttachmentStatus) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeAttachmentStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeAttachmentStatus proto.InternalMessageInfo + +func (m *VolumeError) Reset() { *m = VolumeError{} } +func (*VolumeError) ProtoMessage() {} +func (*VolumeError) Descriptor() ([]byte, []int) { + return fileDescriptor_3b530c1983504d8d, []int{11} +} +func (m *VolumeError) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeError) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeError.Merge(m, src) +} +func (m *VolumeError) XXX_Size() int { + return m.Size() +} +func (m *VolumeError) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeError.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeError proto.InternalMessageInfo + +func (m *VolumeNodeResources) Reset() { *m = VolumeNodeResources{} } +func (*VolumeNodeResources) ProtoMessage() {} +func (*VolumeNodeResources) Descriptor() ([]byte, []int) { + return fileDescriptor_3b530c1983504d8d, []int{12} +} +func (m *VolumeNodeResources) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeNodeResources) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeNodeResources) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeNodeResources.Merge(m, src) +} +func (m *VolumeNodeResources) XXX_Size() int { + return m.Size() +} +func (m *VolumeNodeResources) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeNodeResources.DiscardUnknown(m) +} -func (m *StorageClassList) Reset() { *m = StorageClassList{} } -func (*StorageClassList) ProtoMessage() {} -func (*StorageClassList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +var xxx_messageInfo_VolumeNodeResources proto.InternalMessageInfo func init() { + proto.RegisterType((*CSINode)(nil), "k8s.io.api.storage.v1.CSINode") + proto.RegisterType((*CSINodeDriver)(nil), "k8s.io.api.storage.v1.CSINodeDriver") + proto.RegisterType((*CSINodeList)(nil), "k8s.io.api.storage.v1.CSINodeList") + proto.RegisterType((*CSINodeSpec)(nil), "k8s.io.api.storage.v1.CSINodeSpec") proto.RegisterType((*StorageClass)(nil), "k8s.io.api.storage.v1.StorageClass") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.storage.v1.StorageClass.ParametersEntry") proto.RegisterType((*StorageClassList)(nil), "k8s.io.api.storage.v1.StorageClassList") + proto.RegisterType((*VolumeAttachment)(nil), "k8s.io.api.storage.v1.VolumeAttachment") + proto.RegisterType((*VolumeAttachmentList)(nil), "k8s.io.api.storage.v1.VolumeAttachmentList") + proto.RegisterType((*VolumeAttachmentSource)(nil), "k8s.io.api.storage.v1.VolumeAttachmentSource") + proto.RegisterType((*VolumeAttachmentSpec)(nil), "k8s.io.api.storage.v1.VolumeAttachmentSpec") + proto.RegisterType((*VolumeAttachmentStatus)(nil), "k8s.io.api.storage.v1.VolumeAttachmentStatus") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.storage.v1.VolumeAttachmentStatus.AttachmentMetadataEntry") + proto.RegisterType((*VolumeError)(nil), "k8s.io.api.storage.v1.VolumeError") + proto.RegisterType((*VolumeNodeResources)(nil), "k8s.io.api.storage.v1.VolumeNodeResources") } -func (m *StorageClass) Marshal() (dAtA []byte, err error) { + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/storage/v1/generated.proto", fileDescriptor_3b530c1983504d8d) +} + +var fileDescriptor_3b530c1983504d8d = []byte{ + // 1212 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x57, 0x41, 0x6f, 0xe3, 0x44, + 0x14, 0xae, 0x9b, 0xa4, 0x4d, 0x27, 0x2d, 0x9b, 0xce, 0x16, 0x08, 0x39, 0x24, 0x95, 0x41, 0x10, + 0x0a, 0xeb, 0x6c, 0x97, 0x65, 0xb5, 0x42, 0x02, 0x29, 0x6e, 0x23, 0x51, 0xd1, 0xb4, 0xd5, 0xb4, + 0xac, 0x10, 0x02, 0xc4, 0xd4, 0x1e, 0x52, 0x6f, 0x62, 0x8f, 0xf1, 0x4c, 0x02, 0xb9, 0x71, 0xe2, + 0x86, 0x04, 0x57, 0x7e, 0x05, 0x5c, 0x39, 0x72, 0x2a, 0xb7, 0x15, 0xa7, 0x3d, 0x45, 0xd4, 0x9c, + 0xe1, 0x07, 0xf4, 0x84, 0x66, 0x3c, 0x8d, 0x9d, 0xc4, 0x29, 0xe9, 0xa5, 0xb7, 0xcc, 0x9b, 0xf7, + 0x7d, 0xef, 0xbd, 0xf9, 0xde, 0xbc, 0x71, 0xc0, 0x07, 0x9d, 0xc7, 0xcc, 0x70, 0x68, 0xbd, 0xd3, + 0x3b, 0x25, 0x81, 0x47, 0x38, 0x61, 0xf5, 0x3e, 0xf1, 0x6c, 0x1a, 0xd4, 0xd5, 0x06, 0xf6, 0x9d, + 0x3a, 0xe3, 0x34, 0xc0, 0x6d, 0x52, 0xef, 0x6f, 0xd7, 0xdb, 0xc4, 0x23, 0x01, 0xe6, 0xc4, 0x36, + 0xfc, 0x80, 0x72, 0x0a, 0x5f, 0x8c, 0xdc, 0x0c, 0xec, 0x3b, 0x86, 0x72, 0x33, 0xfa, 0xdb, 0xe5, + 0x7b, 0x6d, 0x87, 0x9f, 0xf5, 0x4e, 0x0d, 0x8b, 0xba, 0xf5, 0x36, 0x6d, 0xd3, 0xba, 0xf4, 0x3e, + 0xed, 0x7d, 0x25, 0x57, 0x72, 0x21, 0x7f, 0x45, 0x2c, 0x65, 0x3d, 0x11, 0xcc, 0xa2, 0x41, 0x5a, + 0xa4, 0xf2, 0xc3, 0xd8, 0xc7, 0xc5, 0xd6, 0x99, 0xe3, 0x91, 0x60, 0x50, 0xf7, 0x3b, 0x6d, 0x61, + 0x60, 0x75, 0x97, 0x70, 0x9c, 0x86, 0xaa, 0xcf, 0x42, 0x05, 0x3d, 0x8f, 0x3b, 0x2e, 0x99, 0x02, + 0x3c, 0xfa, 0x3f, 0x00, 0xb3, 0xce, 0x88, 0x8b, 0x27, 0x71, 0xfa, 0xaf, 0x1a, 0x58, 0xde, 0x39, + 0xde, 0x3b, 0xa0, 0x36, 0x81, 0x5f, 0x82, 0xbc, 0xc8, 0xc7, 0xc6, 0x1c, 0x97, 0xb4, 0x4d, 0xad, + 0x56, 0x78, 0x70, 0xdf, 0x88, 0xcf, 0x69, 0x44, 0x6b, 0xf8, 0x9d, 0xb6, 0x30, 0x30, 0x43, 0x78, + 0x1b, 0xfd, 0x6d, 0xe3, 0xf0, 0xf4, 0x29, 0xb1, 0x78, 0x8b, 0x70, 0x6c, 0xc2, 0xf3, 0x61, 0x75, + 0x21, 0x1c, 0x56, 0x41, 0x6c, 0x43, 0x23, 0x56, 0xb8, 0x0b, 0xb2, 0xcc, 0x27, 0x56, 0x69, 0x51, + 0xb2, 0xeb, 0x46, 0xaa, 0x0a, 0x86, 0xca, 0xe7, 0xd8, 0x27, 0x96, 0xb9, 0xaa, 0xf8, 0xb2, 0x62, + 0x85, 0x24, 0x5a, 0xff, 0x57, 0x03, 0x6b, 0xca, 0x67, 0x37, 0x70, 0xfa, 0x24, 0x80, 0x9b, 0x20, + 0xeb, 0x61, 0x97, 0xc8, 0xac, 0x57, 0x62, 0xcc, 0x01, 0x76, 0x09, 0x92, 0x3b, 0xf0, 0x75, 0xb0, + 0xe4, 0x51, 0x9b, 0xec, 0xed, 0xca, 0xd8, 0x2b, 0xe6, 0x0b, 0xca, 0x67, 0xe9, 0x40, 0x5a, 0x91, + 0xda, 0x85, 0x0f, 0xc1, 0x2a, 0xa7, 0x3e, 0xed, 0xd2, 0xf6, 0xe0, 0x23, 0x32, 0x60, 0xa5, 0xcc, + 0x66, 0xa6, 0xb6, 0x62, 0x16, 0xc3, 0x61, 0x75, 0xf5, 0x24, 0x61, 0x47, 0x63, 0x5e, 0xf0, 0x73, + 0x50, 0xc0, 0xdd, 0x2e, 0xb5, 0x30, 0xc7, 0xa7, 0x5d, 0x52, 0xca, 0xca, 0xf2, 0xb6, 0x66, 0x94, + 0xf7, 0x84, 0x76, 0x7b, 0x2e, 0x11, 0x71, 0x11, 0x61, 0xb4, 0x17, 0x58, 0x84, 0x99, 0x77, 0xc2, + 0x61, 0xb5, 0xd0, 0x88, 0x29, 0x50, 0x92, 0x4f, 0xff, 0x45, 0x03, 0x05, 0x55, 0xf0, 0xbe, 0xc3, + 0x38, 0xfc, 0x6c, 0x4a, 0x28, 0x63, 0x3e, 0xa1, 0x04, 0x5a, 0xca, 0x54, 0x54, 0xe5, 0xe7, 0xaf, + 0x2c, 0x09, 0x91, 0x76, 0x40, 0xce, 0xe1, 0xc4, 0x65, 0xa5, 0xc5, 0xcd, 0x4c, 0xad, 0xf0, 0xa0, + 0x72, 0xbd, 0x4a, 0xe6, 0x9a, 0xa2, 0xca, 0xed, 0x09, 0x10, 0x8a, 0xb0, 0xfa, 0x17, 0xa3, 0x8c, + 0x85, 0x70, 0xf0, 0x10, 0x2c, 0xdb, 0x52, 0x2a, 0x56, 0xd2, 0x24, 0xeb, 0x6b, 0xd7, 0xb3, 0x46, + 0xba, 0x9a, 0x77, 0x14, 0xf7, 0x72, 0xb4, 0x66, 0xe8, 0x8a, 0x45, 0xff, 0x61, 0x09, 0xac, 0x1e, + 0x47, 0xb0, 0x9d, 0x2e, 0x66, 0xec, 0x16, 0x9a, 0xf7, 0x5d, 0x50, 0xf0, 0x03, 0xda, 0x77, 0x98, + 0x43, 0x3d, 0x12, 0xa8, 0x3e, 0xba, 0xab, 0x20, 0x85, 0xa3, 0x78, 0x0b, 0x25, 0xfd, 0x60, 0x1b, + 0x00, 0x1f, 0x07, 0xd8, 0x25, 0x5c, 0x54, 0x9f, 0x91, 0xd5, 0xbf, 0x33, 0xa3, 0xfa, 0x64, 0x45, + 0xc6, 0xd1, 0x08, 0xd5, 0xf4, 0x78, 0x30, 0x88, 0xb3, 0x8b, 0x37, 0x50, 0x82, 0x1a, 0x76, 0xc0, + 0x5a, 0x40, 0xac, 0x2e, 0x76, 0xdc, 0x23, 0xda, 0x75, 0xac, 0x81, 0x6c, 0xc3, 0x15, 0xb3, 0x19, + 0x0e, 0xab, 0x6b, 0x28, 0xb9, 0x71, 0x39, 0xac, 0xde, 0x9f, 0x9e, 0x5c, 0xc6, 0x11, 0x09, 0x98, + 0xc3, 0x38, 0xf1, 0x78, 0xd4, 0xa1, 0x63, 0x18, 0x34, 0xce, 0x2d, 0xee, 0x89, 0x4b, 0x7b, 0x1e, + 0x3f, 0xf4, 0xb9, 0x43, 0x3d, 0x56, 0xca, 0xc5, 0xf7, 0xa4, 0x95, 0xb0, 0xa3, 0x31, 0x2f, 0xb8, + 0x0f, 0x36, 0x44, 0x5f, 0x7f, 0x13, 0x05, 0x68, 0x7e, 0xeb, 0x63, 0x4f, 0x9c, 0x52, 0x69, 0x69, + 0x53, 0xab, 0xe5, 0xcd, 0x52, 0x38, 0xac, 0x6e, 0x34, 0x52, 0xf6, 0x51, 0x2a, 0x0a, 0x7e, 0x02, + 0xd6, 0xfb, 0xd2, 0x64, 0x3a, 0x9e, 0xed, 0x78, 0xed, 0x16, 0xb5, 0x49, 0x69, 0x59, 0x16, 0xbd, + 0x15, 0x0e, 0xab, 0xeb, 0x4f, 0x26, 0x37, 0x2f, 0xd3, 0x8c, 0x68, 0x9a, 0x04, 0x7e, 0x0d, 0xd6, + 0x65, 0x44, 0x62, 0xab, 0x4b, 0xef, 0x10, 0x56, 0xca, 0x4b, 0xe9, 0x6a, 0x49, 0xe9, 0xc4, 0xd1, + 0x09, 0xdd, 0xae, 0x46, 0xc3, 0x31, 0xe9, 0x12, 0x8b, 0xd3, 0xe0, 0x84, 0x04, 0xae, 0xf9, 0x8a, + 0xd2, 0x6b, 0xbd, 0x31, 0x49, 0x85, 0xa6, 0xd9, 0xcb, 0xef, 0x83, 0x3b, 0x13, 0x82, 0xc3, 0x22, + 0xc8, 0x74, 0xc8, 0x20, 0x1a, 0x6a, 0x48, 0xfc, 0x84, 0x1b, 0x20, 0xd7, 0xc7, 0xdd, 0x1e, 0x89, + 0x9a, 0x0f, 0x45, 0x8b, 0xf7, 0x16, 0x1f, 0x6b, 0xfa, 0x6f, 0x1a, 0x28, 0x26, 0xbb, 0xe7, 0x16, + 0xe6, 0xc4, 0x87, 0xe3, 0x73, 0xe2, 0xd5, 0x39, 0x7a, 0x7a, 0xc6, 0xb0, 0xf8, 0x79, 0x11, 0x14, + 0x23, 0x5d, 0x1a, 0x9c, 0x63, 0xeb, 0xcc, 0x25, 0x1e, 0xbf, 0x85, 0x0b, 0xdd, 0x1a, 0x7b, 0x8d, + 0xde, 0xba, 0x76, 0x5c, 0xc7, 0x89, 0xcd, 0x7a, 0x96, 0xe0, 0xc7, 0x60, 0x89, 0x71, 0xcc, 0x7b, + 0xe2, 0x92, 0x0b, 0xc2, 0x7b, 0xf3, 0x12, 0x4a, 0x50, 0xfc, 0x22, 0x45, 0x6b, 0xa4, 0xc8, 0xf4, + 0xdf, 0x35, 0xb0, 0x31, 0x09, 0xb9, 0x05, 0x75, 0xf7, 0xc7, 0xd5, 0x7d, 0x63, 0xce, 0x62, 0x66, + 0x28, 0xfc, 0xa7, 0x06, 0x5e, 0x9a, 0xaa, 0x5b, 0xbe, 0x7d, 0x62, 0x26, 0xf8, 0x13, 0x93, 0xe7, + 0x20, 0x7e, 0xcb, 0xe5, 0x4c, 0x38, 0x4a, 0xd9, 0x47, 0xa9, 0x28, 0xf8, 0x14, 0x14, 0x1d, 0xaf, + 0xeb, 0x78, 0x24, 0xb2, 0x1d, 0xc7, 0xfa, 0xa6, 0x5e, 0xdc, 0x49, 0x66, 0x29, 0xee, 0x46, 0x38, + 0xac, 0x16, 0xf7, 0x26, 0x58, 0xd0, 0x14, 0xaf, 0xfe, 0x47, 0x8a, 0x32, 0xf2, 0xb5, 0x7b, 0x1b, + 0xe4, 0xb1, 0xb4, 0x90, 0x40, 0x95, 0x31, 0x3a, 0xe9, 0x86, 0xb2, 0xa3, 0x91, 0x87, 0xec, 0x1b, + 0x79, 0x14, 0x2a, 0xd1, 0xb9, 0xfb, 0x46, 0x82, 0x12, 0x7d, 0x23, 0xd7, 0x48, 0x91, 0x89, 0x24, + 0xc4, 0x37, 0x8d, 0x3c, 0xcb, 0xcc, 0x78, 0x12, 0x07, 0xca, 0x8e, 0x46, 0x1e, 0xfa, 0x3f, 0x99, + 0x14, 0x81, 0x64, 0x03, 0x26, 0xaa, 0xb1, 0x65, 0x35, 0xf9, 0xa9, 0x6a, 0xec, 0x51, 0x35, 0x36, + 0xfc, 0x49, 0x03, 0x10, 0x8f, 0x28, 0x5a, 0x57, 0x0d, 0x1a, 0x75, 0x51, 0xf3, 0x46, 0x57, 0xc2, + 0x68, 0x4c, 0xf1, 0x44, 0x2f, 0x61, 0x59, 0xc5, 0x87, 0xd3, 0x0e, 0x28, 0x25, 0x38, 0xb4, 0x41, + 0x21, 0xb2, 0x36, 0x83, 0x80, 0x06, 0xea, 0x7a, 0xea, 0xd7, 0xe6, 0x22, 0x3d, 0xcd, 0x8a, 0xfc, + 0x2c, 0x8b, 0xa1, 0x97, 0xc3, 0x6a, 0x21, 0xb1, 0x8f, 0x92, 0xb4, 0x22, 0x8a, 0x4d, 0xe2, 0x28, + 0xd9, 0x9b, 0x45, 0xd9, 0x25, 0xb3, 0xa3, 0x24, 0x68, 0xcb, 0x4d, 0xf0, 0xf2, 0x8c, 0x63, 0xb9, + 0xd1, 0x7b, 0xf1, 0xbd, 0x06, 0x92, 0x31, 0xe0, 0x3e, 0xc8, 0x8a, 0xbf, 0x09, 0x6a, 0x90, 0x6c, + 0xcd, 0x37, 0x48, 0x4e, 0x1c, 0x97, 0xc4, 0xa3, 0x50, 0xac, 0x90, 0x64, 0x81, 0x6f, 0x82, 0x65, + 0x97, 0x30, 0x86, 0xdb, 0x2a, 0x72, 0xfc, 0x21, 0xd7, 0x8a, 0xcc, 0xe8, 0x6a, 0x5f, 0x7f, 0x04, + 0xee, 0xa6, 0x7c, 0x10, 0xc3, 0x2a, 0xc8, 0x59, 0xe2, 0xcb, 0x41, 0x26, 0x94, 0x33, 0x57, 0xc4, + 0x44, 0xd9, 0x11, 0x06, 0x14, 0xd9, 0xcd, 0xda, 0xf9, 0x45, 0x65, 0xe1, 0xd9, 0x45, 0x65, 0xe1, + 0xf9, 0x45, 0x65, 0xe1, 0xbb, 0xb0, 0xa2, 0x9d, 0x87, 0x15, 0xed, 0x59, 0x58, 0xd1, 0x9e, 0x87, + 0x15, 0xed, 0xaf, 0xb0, 0xa2, 0xfd, 0xf8, 0x77, 0x65, 0xe1, 0xd3, 0xc5, 0xfe, 0xf6, 0x7f, 0x01, + 0x00, 0x00, 0xff, 0xff, 0x5c, 0x59, 0x23, 0xb9, 0x2c, 0x0e, 0x00, 0x00, +} + +func (m *CSINode) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *StorageClass) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *CSINode) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CSINode) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Provisioner))) - i += copy(dAtA[i:], m.Provisioner) - if len(m.Parameters) > 0 { - keysForParameters := make([]string, 0, len(m.Parameters)) - for k := range m.Parameters { - keysForParameters = append(keysForParameters, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForParameters) - for _, k := range keysForParameters { - dAtA[i] = 0x1a - i++ - v := m.Parameters[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } - } - if m.ReclaimPolicy != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ReclaimPolicy))) - i += copy(dAtA[i:], *m.ReclaimPolicy) - } - if len(m.MountOptions) > 0 { - for _, s := range m.MountOptions { - dAtA[i] = 0x2a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.AllowVolumeExpansion != nil { - dAtA[i] = 0x30 - i++ - if *m.AllowVolumeExpansion { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - i++ + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.VolumeBindingMode != nil { - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VolumeBindingMode))) - i += copy(dAtA[i:], *m.VolumeBindingMode) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CSINodeDriver) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - if len(m.AllowedTopologies) > 0 { - for _, msg := range m.AllowedTopologies { - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + return dAtA[:n], nil +} + +func (m *CSINodeDriver) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CSINodeDriver) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Allocatable != nil { + { + size, err := m.Allocatable.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if len(m.TopologyKeys) > 0 { + for iNdEx := len(m.TopologyKeys) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.TopologyKeys[iNdEx]) + copy(dAtA[i:], m.TopologyKeys[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TopologyKeys[iNdEx]))) + i-- + dAtA[i] = 0x1a } } - return i, nil + i -= len(m.NodeID) + copy(dAtA[i:], m.NodeID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeID))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *StorageClassList) Marshal() (dAtA []byte, err error) { +func (m *CSINodeList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *StorageClassList) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *CSINodeList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CSINodeList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n2, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ +func (m *CSINodeSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - dAtA[offset] = uint8(v) - return offset + 1 + return dAtA[:n], nil } -func (m *StorageClass) Size() (n int) { + +func (m *CSINodeSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CSINodeSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Provisioner) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Parameters) > 0 { - for k, v := range m.Parameters { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + if len(m.Drivers) > 0 { + for iNdEx := len(m.Drivers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Drivers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } } - if m.ReclaimPolicy != nil { - l = len(*m.ReclaimPolicy) - n += 1 + l + sovGenerated(uint64(l)) + return len(dAtA) - i, nil +} + +func (m *StorageClass) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - if len(m.MountOptions) > 0 { - for _, s := range m.MountOptions { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + return dAtA[:n], nil +} + +func (m *StorageClass) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StorageClass) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.AllowedTopologies) > 0 { + for iNdEx := len(m.AllowedTopologies) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AllowedTopologies[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 } } + if m.VolumeBindingMode != nil { + i -= len(*m.VolumeBindingMode) + copy(dAtA[i:], *m.VolumeBindingMode) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VolumeBindingMode))) + i-- + dAtA[i] = 0x3a + } if m.AllowVolumeExpansion != nil { - n += 2 + i-- + if *m.AllowVolumeExpansion { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 } - if m.VolumeBindingMode != nil { - l = len(*m.VolumeBindingMode) - n += 1 + l + sovGenerated(uint64(l)) + if len(m.MountOptions) > 0 { + for iNdEx := len(m.MountOptions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.MountOptions[iNdEx]) + copy(dAtA[i:], m.MountOptions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MountOptions[iNdEx]))) + i-- + dAtA[i] = 0x2a + } } - if len(m.AllowedTopologies) > 0 { - for _, e := range m.AllowedTopologies { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.ReclaimPolicy != nil { + i -= len(*m.ReclaimPolicy) + copy(dAtA[i:], *m.ReclaimPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ReclaimPolicy))) + i-- + dAtA[i] = 0x22 + } + if len(m.Parameters) > 0 { + keysForParameters := make([]string, 0, len(m.Parameters)) + for k := range m.Parameters { + keysForParameters = append(keysForParameters, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForParameters) + for iNdEx := len(keysForParameters) - 1; iNdEx >= 0; iNdEx-- { + v := m.Parameters[string(keysForParameters[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForParameters[iNdEx]) + copy(dAtA[i:], keysForParameters[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForParameters[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1a } } - return n + i -= len(m.Provisioner) + copy(dAtA[i:], m.Provisioner) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Provisioner))) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *StorageClassList) Size() (n int) { +func (m *StorageClassList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StorageClassList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StorageClassList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *VolumeAttachment) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeAttachment) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeAttachment) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *VolumeAttachmentList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeAttachmentList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeAttachmentList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *VolumeAttachmentSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeAttachmentSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeAttachmentSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.InlineVolumeSpec != nil { + { + size, err := m.InlineVolumeSpec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.PersistentVolumeName != nil { + i -= len(*m.PersistentVolumeName) + copy(dAtA[i:], *m.PersistentVolumeName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PersistentVolumeName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *VolumeAttachmentSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeAttachmentSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeAttachmentSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.NodeName) + copy(dAtA[i:], m.NodeName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeName))) + i-- + dAtA[i] = 0x1a + { + size, err := m.Source.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Attacher) + copy(dAtA[i:], m.Attacher) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Attacher))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *VolumeAttachmentStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeAttachmentStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeAttachmentStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.DetachError != nil { + { + size, err := m.DetachError.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.AttachError != nil { + { + size, err := m.AttachError.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.AttachmentMetadata) > 0 { + keysForAttachmentMetadata := make([]string, 0, len(m.AttachmentMetadata)) + for k := range m.AttachmentMetadata { + keysForAttachmentMetadata = append(keysForAttachmentMetadata, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAttachmentMetadata) + for iNdEx := len(keysForAttachmentMetadata) - 1; iNdEx >= 0; iNdEx-- { + v := m.AttachmentMetadata[string(keysForAttachmentMetadata[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForAttachmentMetadata[iNdEx]) + copy(dAtA[i:], keysForAttachmentMetadata[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAttachmentMetadata[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + i-- + if m.Attached { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *VolumeError) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeError) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeError) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x12 + { + size, err := m.Time.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *VolumeNodeResources) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeNodeResources) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeNodeResources) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Count != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Count)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *CSINode) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *CSINodeDriver) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.NodeID) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.TopologyKeys) > 0 { + for _, s := range m.TopologyKeys { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Allocatable != nil { + l = m.Allocatable.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *CSINodeList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *CSINodeSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Drivers) > 0 { + for _, e := range m.Drivers { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *StorageClass) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Provisioner) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Parameters) > 0 { + for k, v := range m.Parameters { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.ReclaimPolicy != nil { + l = len(*m.ReclaimPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.MountOptions) > 0 { + for _, s := range m.MountOptions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.AllowVolumeExpansion != nil { + n += 2 + } + if m.VolumeBindingMode != nil { + l = len(*m.VolumeBindingMode) + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.AllowedTopologies) > 0 { + for _, e := range m.AllowedTopologies { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *StorageClassList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *VolumeAttachment) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *VolumeAttachmentList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *VolumeAttachmentSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PersistentVolumeName != nil { + l = len(*m.PersistentVolumeName) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.InlineVolumeSpec != nil { + l = m.InlineVolumeSpec.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *VolumeAttachmentSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Attacher) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Source.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.NodeName) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *VolumeAttachmentStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + if len(m.AttachmentMetadata) > 0 { + for k, v := range m.AttachmentMetadata { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.AttachError != nil { + l = m.AttachError.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.DetachError != nil { + l = m.DetachError.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *VolumeError) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Time.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *VolumeNodeResources) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Count != nil { + n += 1 + sovGenerated(uint64(*m.Count)) + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *CSINode) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CSINode{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "CSINodeSpec", "CSINodeSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *CSINodeDriver) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CSINodeDriver{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `NodeID:` + fmt.Sprintf("%v", this.NodeID) + `,`, + `TopologyKeys:` + fmt.Sprintf("%v", this.TopologyKeys) + `,`, + `Allocatable:` + strings.Replace(this.Allocatable.String(), "VolumeNodeResources", "VolumeNodeResources", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CSINodeList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]CSINode{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "CSINode", "CSINode", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&CSINodeList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *CSINodeSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForDrivers := "[]CSINodeDriver{" + for _, f := range this.Drivers { + repeatedStringForDrivers += strings.Replace(strings.Replace(f.String(), "CSINodeDriver", "CSINodeDriver", 1), `&`, ``, 1) + "," + } + repeatedStringForDrivers += "}" + s := strings.Join([]string{`&CSINodeSpec{`, + `Drivers:` + repeatedStringForDrivers + `,`, + `}`, + }, "") + return s +} +func (this *StorageClass) String() string { + if this == nil { + return "nil" + } + repeatedStringForAllowedTopologies := "[]TopologySelectorTerm{" + for _, f := range this.AllowedTopologies { + repeatedStringForAllowedTopologies += fmt.Sprintf("%v", f) + "," + } + repeatedStringForAllowedTopologies += "}" + keysForParameters := make([]string, 0, len(this.Parameters)) + for k := range this.Parameters { + keysForParameters = append(keysForParameters, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForParameters) + mapStringForParameters := "map[string]string{" + for _, k := range keysForParameters { + mapStringForParameters += fmt.Sprintf("%v: %v,", k, this.Parameters[k]) + } + mapStringForParameters += "}" + s := strings.Join([]string{`&StorageClass{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Provisioner:` + fmt.Sprintf("%v", this.Provisioner) + `,`, + `Parameters:` + mapStringForParameters + `,`, + `ReclaimPolicy:` + valueToStringGenerated(this.ReclaimPolicy) + `,`, + `MountOptions:` + fmt.Sprintf("%v", this.MountOptions) + `,`, + `AllowVolumeExpansion:` + valueToStringGenerated(this.AllowVolumeExpansion) + `,`, + `VolumeBindingMode:` + valueToStringGenerated(this.VolumeBindingMode) + `,`, + `AllowedTopologies:` + repeatedStringForAllowedTopologies + `,`, + `}`, + }, "") + return s +} +func (this *StorageClassList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]StorageClass{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "StorageClass", "StorageClass", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&StorageClassList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *VolumeAttachment) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeAttachment{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "VolumeAttachmentSpec", "VolumeAttachmentSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "VolumeAttachmentStatus", "VolumeAttachmentStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeAttachmentList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]VolumeAttachment{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "VolumeAttachment", "VolumeAttachment", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&VolumeAttachmentList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *VolumeAttachmentSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeAttachmentSource{`, + `PersistentVolumeName:` + valueToStringGenerated(this.PersistentVolumeName) + `,`, + `InlineVolumeSpec:` + strings.Replace(fmt.Sprintf("%v", this.InlineVolumeSpec), "PersistentVolumeSpec", "v11.PersistentVolumeSpec", 1) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeAttachmentSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeAttachmentSpec{`, + `Attacher:` + fmt.Sprintf("%v", this.Attacher) + `,`, + `Source:` + strings.Replace(strings.Replace(this.Source.String(), "VolumeAttachmentSource", "VolumeAttachmentSource", 1), `&`, ``, 1) + `,`, + `NodeName:` + fmt.Sprintf("%v", this.NodeName) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeAttachmentStatus) String() string { + if this == nil { + return "nil" + } + keysForAttachmentMetadata := make([]string, 0, len(this.AttachmentMetadata)) + for k := range this.AttachmentMetadata { + keysForAttachmentMetadata = append(keysForAttachmentMetadata, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAttachmentMetadata) + mapStringForAttachmentMetadata := "map[string]string{" + for _, k := range keysForAttachmentMetadata { + mapStringForAttachmentMetadata += fmt.Sprintf("%v: %v,", k, this.AttachmentMetadata[k]) + } + mapStringForAttachmentMetadata += "}" + s := strings.Join([]string{`&VolumeAttachmentStatus{`, + `Attached:` + fmt.Sprintf("%v", this.Attached) + `,`, + `AttachmentMetadata:` + mapStringForAttachmentMetadata + `,`, + `AttachError:` + strings.Replace(this.AttachError.String(), "VolumeError", "VolumeError", 1) + `,`, + `DetachError:` + strings.Replace(this.DetachError.String(), "VolumeError", "VolumeError", 1) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeError) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeError{`, + `Time:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Time), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeNodeResources) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeNodeResources{`, + `Count:` + valueToStringGenerated(this.Count) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *CSINode) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CSINode: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CSINode: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CSINodeDriver) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CSINodeDriver: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CSINodeDriver: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NodeID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TopologyKeys", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TopologyKeys = append(m.TopologyKeys, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Allocatable", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Allocatable == nil { + m.Allocatable = &VolumeNodeResources{} + } + if err := m.Allocatable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CSINodeList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CSINodeList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CSINodeList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, CSINode{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CSINodeSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CSINodeSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CSINodeSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Drivers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Drivers = append(m.Drivers, CSINodeDriver{}) + if err := m.Drivers[len(m.Drivers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StorageClass) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StorageClass: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StorageClass: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Provisioner", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Provisioner = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Parameters == nil { + m.Parameters = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Parameters[mapkey] = mapvalue + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReclaimPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := k8s_io_api_core_v1.PersistentVolumeReclaimPolicy(dAtA[iNdEx:postIndex]) + m.ReclaimPolicy = &s + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MountOptions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MountOptions = append(m.MountOptions, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowVolumeExpansion", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.AllowVolumeExpansion = &b + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeBindingMode", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := VolumeBindingMode(dAtA[iNdEx:postIndex]) + m.VolumeBindingMode = &s + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowedTopologies", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AllowedTopologies = append(m.AllowedTopologies, v11.TopologySelectorTerm{}) + if err := m.AllowedTopologies[len(m.AllowedTopologies)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StorageClassList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StorageClassList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StorageClassList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, StorageClass{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeAttachment: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeAttachment: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } } - return n -} -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *StorageClass) String() string { - if this == nil { - return "nil" - } - keysForParameters := make([]string, 0, len(this.Parameters)) - for k := range this.Parameters { - keysForParameters = append(keysForParameters, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForParameters) - mapStringForParameters := "map[string]string{" - for _, k := range keysForParameters { - mapStringForParameters += fmt.Sprintf("%v: %v,", k, this.Parameters[k]) - } - mapStringForParameters += "}" - s := strings.Join([]string{`&StorageClass{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Provisioner:` + fmt.Sprintf("%v", this.Provisioner) + `,`, - `Parameters:` + mapStringForParameters + `,`, - `ReclaimPolicy:` + valueToStringGenerated(this.ReclaimPolicy) + `,`, - `MountOptions:` + fmt.Sprintf("%v", this.MountOptions) + `,`, - `AllowVolumeExpansion:` + valueToStringGenerated(this.AllowVolumeExpansion) + `,`, - `VolumeBindingMode:` + valueToStringGenerated(this.VolumeBindingMode) + `,`, - `AllowedTopologies:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.AllowedTopologies), "TopologySelectorTerm", "k8s_io_api_core_v1.TopologySelectorTerm", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *StorageClassList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&StorageClassList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "StorageClass", "StorageClass", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" + if iNdEx > l { + return io.ErrUnexpectedEOF } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) + return nil } -func (m *StorageClass) Unmarshal(dAtA []byte) error { +func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -361,7 +2842,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -369,15 +2850,15 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: StorageClass: wiretype end group for non-group") + return fmt.Errorf("proto: VolumeAttachmentList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: StorageClass: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: VolumeAttachmentList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -389,7 +2870,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -398,18 +2879,21 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Provisioner", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -419,26 +2903,84 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.Provisioner = string(dAtA[iNdEx:postIndex]) + m.Items = append(m.Items, VolumeAttachment{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 3: + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeAttachmentSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeAttachmentSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeAttachmentSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PersistentVolumeName", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -448,19 +2990,30 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 + s := string(dAtA[iNdEx:postIndex]) + m.PersistentVolumeName = &s + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InlineVolumeSpec", wireType) + } + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -470,12 +3023,86 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - var stringLenmapkey uint64 + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.InlineVolumeSpec == nil { + m.InlineVolumeSpec = &v11.PersistentVolumeSpec{} + } + if err := m.InlineVolumeSpec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeAttachmentSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeAttachmentSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attacher", wireType) + } + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -485,76 +3112,29 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - if m.Parameters == nil { - m.Parameters = make(map[string]string) - } - if iNdEx < postIndex { - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Parameters[mapkey] = mapvalue - } else { - var mapvalue string - m.Parameters[mapkey] = mapvalue + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF } + m.Attacher = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 4: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ReclaimPolicy", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -564,25 +3144,28 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - s := k8s_io_api_core_v1.PersistentVolumeReclaimPolicy(dAtA[iNdEx:postIndex]) - m.ReclaimPolicy = &s + if err := m.Source.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 5: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MountOptions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -594,7 +3177,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -604,14 +3187,70 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.MountOptions = append(m.MountOptions, string(dAtA[iNdEx:postIndex])) + m.NodeName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 6: + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeAttachmentStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeAttachmentStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowVolumeExpansion", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Attached", wireType) } var v int for shift := uint(0); ; shift += 7 { @@ -623,18 +3262,17 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - b := bool(v != 0) - m.AllowVolumeExpansion = &b - case 7: + m.Attached = bool(v != 0) + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeBindingMode", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AttachmentMetadata", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -644,25 +3282,158 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - s := VolumeBindingMode(dAtA[iNdEx:postIndex]) - m.VolumeBindingMode = &s + if m.AttachmentMetadata == nil { + m.AttachmentMetadata = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.AttachmentMetadata[mapkey] = mapvalue iNdEx = postIndex - case 8: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedTopologies", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AttachError", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AttachError == nil { + m.AttachError = &VolumeError{} + } + if err := m.AttachError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DetachError", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -674,7 +3445,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -683,11 +3454,16 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.AllowedTopologies = append(m.AllowedTopologies, k8s_io_api_core_v1.TopologySelectorTerm{}) - if err := m.AllowedTopologies[len(m.AllowedTopologies)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.DetachError == nil { + m.DetachError = &VolumeError{} + } + if err := m.DetachError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -700,6 +3476,9 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -712,7 +3491,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } return nil } -func (m *StorageClassList) Unmarshal(dAtA []byte) error { +func (m *VolumeError) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -727,7 +3506,7 @@ func (m *StorageClassList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -735,15 +3514,15 @@ func (m *StorageClassList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: StorageClassList: wiretype end group for non-group") + return fmt.Errorf("proto: VolumeError: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: StorageClassList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: VolumeError: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -755,7 +3534,7 @@ func (m *StorageClassList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -764,18 +3543,21 @@ func (m *StorageClassList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Time.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -785,23 +3567,97 @@ func (m *StorageClassList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, StorageClass{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { return err } - iNdEx = postIndex + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeNodeResources) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeNodeResources: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeNodeResources: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Count = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -811,6 +3667,9 @@ func (m *StorageClassList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -877,10 +3736,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -909,6 +3771,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -927,52 +3792,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/storage/v1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 656 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x53, 0xcd, 0x6e, 0xd3, 0x4a, - 0x14, 0x8e, 0x93, 0x9b, 0xde, 0x74, 0xd2, 0xea, 0x26, 0xbe, 0xbd, 0x92, 0x6f, 0x16, 0x4e, 0x54, - 0x36, 0x11, 0x12, 0xe3, 0xa6, 0x14, 0x54, 0x21, 0x81, 0x54, 0xa3, 0x4a, 0x20, 0xb5, 0x6a, 0xe4, - 0x56, 0x15, 0x42, 0x2c, 0x98, 0x38, 0x07, 0x77, 0x88, 0xed, 0x31, 0x33, 0x63, 0x43, 0x76, 0xbc, - 0x00, 0x12, 0xcf, 0xc3, 0x13, 0x74, 0xd9, 0x65, 0x57, 0x16, 0x35, 0x6f, 0xd1, 0x15, 0xf2, 0x0f, - 0x8d, 0x9b, 0x04, 0xd1, 0xdd, 0xcc, 0x77, 0xbe, 0xef, 0x3b, 0x33, 0xe7, 0x07, 0x3d, 0x9b, 0xec, - 0x0a, 0x4c, 0x99, 0x31, 0x09, 0x47, 0xc0, 0x7d, 0x90, 0x20, 0x8c, 0x08, 0xfc, 0x31, 0xe3, 0x46, - 0x11, 0x20, 0x01, 0x35, 0x84, 0x64, 0x9c, 0x38, 0x60, 0x44, 0x03, 0xc3, 0x01, 0x1f, 0x38, 0x91, - 0x30, 0xc6, 0x01, 0x67, 0x92, 0xa9, 0xff, 0xe5, 0x34, 0x4c, 0x02, 0x8a, 0x0b, 0x1a, 0x8e, 0x06, - 0x9d, 0x07, 0x0e, 0x95, 0x67, 0xe1, 0x08, 0xdb, 0xcc, 0x33, 0x1c, 0xe6, 0x30, 0x23, 0x63, 0x8f, - 0xc2, 0x77, 0xd9, 0x2d, 0xbb, 0x64, 0xa7, 0xdc, 0xa5, 0xb3, 0x59, 0x4a, 0x66, 0x33, 0xbe, 0x2c, - 0x53, 0x67, 0x67, 0xc6, 0xf1, 0x88, 0x7d, 0x46, 0x7d, 0xe0, 0x53, 0x23, 0x98, 0x38, 0x29, 0x20, - 0x0c, 0x0f, 0x24, 0x59, 0xa6, 0x32, 0x7e, 0xa7, 0xe2, 0xa1, 0x2f, 0xa9, 0x07, 0x0b, 0x82, 0xc7, - 0x7f, 0x12, 0x08, 0xfb, 0x0c, 0x3c, 0x32, 0xaf, 0xdb, 0xfc, 0xb2, 0x82, 0xd6, 0x8e, 0xf3, 0x02, - 0x3c, 0x77, 0x89, 0x10, 0xea, 0x5b, 0xd4, 0x48, 0x1f, 0x35, 0x26, 0x92, 0x68, 0x4a, 0x4f, 0xe9, - 0x37, 0xb7, 0xb7, 0xf0, 0xac, 0x58, 0x37, 0xde, 0x38, 0x98, 0x38, 0x29, 0x20, 0x70, 0xca, 0xc6, - 0xd1, 0x00, 0x1f, 0x8d, 0xde, 0x83, 0x2d, 0x0f, 0x41, 0x12, 0x53, 0x3d, 0x8f, 0xbb, 0x95, 0x24, - 0xee, 0xa2, 0x19, 0x66, 0xdd, 0xb8, 0xaa, 0x8f, 0x50, 0x33, 0xe0, 0x2c, 0xa2, 0x82, 0x32, 0x1f, - 0xb8, 0x56, 0xed, 0x29, 0xfd, 0x55, 0xf3, 0xdf, 0x42, 0xd2, 0x1c, 0xce, 0x42, 0x56, 0x99, 0xa7, - 0x3a, 0x08, 0x05, 0x84, 0x13, 0x0f, 0x24, 0x70, 0xa1, 0xd5, 0x7a, 0xb5, 0x7e, 0x73, 0xfb, 0x21, - 0x5e, 0xda, 0x47, 0x5c, 0xfe, 0x11, 0x1e, 0xde, 0xa8, 0xf6, 0x7d, 0xc9, 0xa7, 0xb3, 0xd7, 0xcd, - 0x02, 0x56, 0xc9, 0x5a, 0x9d, 0xa0, 0x75, 0x0e, 0xb6, 0x4b, 0xa8, 0x37, 0x64, 0x2e, 0xb5, 0xa7, - 0xda, 0x5f, 0xd9, 0x0b, 0xf7, 0x93, 0xb8, 0xbb, 0x6e, 0x95, 0x03, 0xd7, 0x71, 0x77, 0x6b, 0x71, - 0x02, 0xf0, 0x10, 0xb8, 0xa0, 0x42, 0x82, 0x2f, 0x4f, 0x99, 0x1b, 0x7a, 0x70, 0x4b, 0x63, 0xdd, - 0xf6, 0x56, 0x77, 0xd0, 0x9a, 0xc7, 0x42, 0x5f, 0x1e, 0x05, 0x92, 0x32, 0x5f, 0x68, 0xf5, 0x5e, - 0xad, 0xbf, 0x6a, 0xb6, 0x92, 0xb8, 0xbb, 0x76, 0x58, 0xc2, 0xad, 0x5b, 0x2c, 0xf5, 0x00, 0x6d, - 0x10, 0xd7, 0x65, 0x1f, 0xf3, 0x04, 0xfb, 0x9f, 0x02, 0xe2, 0xa7, 0x55, 0xd2, 0x56, 0x7a, 0x4a, - 0xbf, 0x61, 0x6a, 0x49, 0xdc, 0xdd, 0xd8, 0x5b, 0x12, 0xb7, 0x96, 0xaa, 0xd4, 0x57, 0xa8, 0x1d, - 0x65, 0x90, 0x49, 0xfd, 0x31, 0xf5, 0x9d, 0x43, 0x36, 0x06, 0xed, 0xef, 0xec, 0xd3, 0xf7, 0x93, - 0xb8, 0xdb, 0x3e, 0x9d, 0x0f, 0x5e, 0x2f, 0x03, 0xad, 0x45, 0x13, 0xf5, 0x03, 0x6a, 0x67, 0x19, - 0x61, 0x7c, 0xc2, 0x02, 0xe6, 0x32, 0x87, 0x82, 0xd0, 0x1a, 0x59, 0xeb, 0xfa, 0xe5, 0xd6, 0xa5, - 0xa5, 0x4b, 0xfb, 0x56, 0xb0, 0xa6, 0xc7, 0xe0, 0x82, 0x2d, 0x19, 0x3f, 0x01, 0xee, 0x99, 0xff, - 0x17, 0xfd, 0x6a, 0xef, 0xcd, 0x5b, 0x59, 0x8b, 0xee, 0x9d, 0xa7, 0xe8, 0x9f, 0xb9, 0x86, 0xab, - 0x2d, 0x54, 0x9b, 0xc0, 0x34, 0x9b, 0xe6, 0x55, 0x2b, 0x3d, 0xaa, 0x1b, 0xa8, 0x1e, 0x11, 0x37, - 0x84, 0x7c, 0xf8, 0xac, 0xfc, 0xf2, 0xa4, 0xba, 0xab, 0x6c, 0x7e, 0x53, 0x50, 0xab, 0x3c, 0x3d, - 0x07, 0x54, 0x48, 0xf5, 0xcd, 0xc2, 0x4e, 0xe0, 0xbb, 0xed, 0x44, 0xaa, 0xce, 0x36, 0xa2, 0x55, - 0xfc, 0xa1, 0xf1, 0x0b, 0x29, 0xed, 0xc3, 0x0b, 0x54, 0xa7, 0x12, 0x3c, 0xa1, 0x55, 0xb3, 0xc2, - 0xdc, 0xbb, 0xc3, 0x4c, 0x9b, 0xeb, 0x85, 0x5f, 0xfd, 0x65, 0xaa, 0xb4, 0x72, 0x03, 0xb3, 0x7f, - 0x7e, 0xa5, 0x57, 0x2e, 0xae, 0xf4, 0xca, 0xe5, 0x95, 0x5e, 0xf9, 0x9c, 0xe8, 0xca, 0x79, 0xa2, - 0x2b, 0x17, 0x89, 0xae, 0x5c, 0x26, 0xba, 0xf2, 0x3d, 0xd1, 0x95, 0xaf, 0x3f, 0xf4, 0xca, 0xeb, - 0x6a, 0x34, 0xf8, 0x19, 0x00, 0x00, 0xff, 0xff, 0x0d, 0x64, 0x41, 0x83, 0x40, 0x05, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/storage/v1/generated.proto b/vendor/k8s.io/api/storage/v1/generated.proto index d1785659..e5004c84 100644 --- a/vendor/k8s.io/api/storage/v1/generated.proto +++ b/vendor/k8s.io/api/storage/v1/generated.proto @@ -29,14 +29,88 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". option go_package = "v1"; +// CSINode holds information about all CSI drivers installed on a node. +// CSI drivers do not need to create the CSINode object directly. As long as +// they use the node-driver-registrar sidecar container, the kubelet will +// automatically populate the CSINode object for the CSI driver as part of +// kubelet plugin registration. +// CSINode has the same name as a node. If the object is missing, it means either +// there are no CSI Drivers available on the node, or the Kubelet version is low +// enough that it doesn't create this object. +// CSINode has an OwnerReference that points to the corresponding node object. +message CSINode { + // metadata.name must be the Kubernetes node name. + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // spec is the specification of CSINode + optional CSINodeSpec spec = 2; +} + +// CSINodeDriver holds information about the specification of one CSI driver installed on a node +message CSINodeDriver { + // This is the name of the CSI driver that this object refers to. + // This MUST be the same name returned by the CSI GetPluginName() call for + // that driver. + optional string name = 1; + + // nodeID of the node from the driver point of view. + // This field enables Kubernetes to communicate with storage systems that do + // not share the same nomenclature for nodes. For example, Kubernetes may + // refer to a given node as "node1", but the storage system may refer to + // the same node as "nodeA". When Kubernetes issues a command to the storage + // system to attach a volume to a specific node, it can use this field to + // refer to the node name using the ID that the storage system will + // understand, e.g. "nodeA" instead of "node1". This field is required. + optional string nodeID = 2; + + // topologyKeys is the list of keys supported by the driver. + // When a driver is initialized on a cluster, it provides a set of topology + // keys that it understands (e.g. "company.com/zone", "company.com/region"). + // When a driver is initialized on a node, it provides the same topology keys + // along with values. Kubelet will expose these topology keys as labels + // on its own node object. + // When Kubernetes does topology aware provisioning, it can use this list to + // determine which labels it should retrieve from the node object and pass + // back to the driver. + // It is possible for different nodes to use different topology keys. + // This can be empty if driver does not support topology. + // +optional + repeated string topologyKeys = 3; + + // allocatable represents the volume resources of a node that are available for scheduling. + // This field is beta. + // +optional + optional VolumeNodeResources allocatable = 4; +} + +// CSINodeList is a collection of CSINode objects. +message CSINodeList { + // Standard list metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is the list of CSINode + repeated CSINode items = 2; +} + +// CSINodeSpec holds information about the specification of all CSI drivers installed on a node +message CSINodeSpec { + // drivers is a list of information of all CSI Drivers existing on a node. + // If all drivers in the list are uninstalled, this can become empty. + // +patchMergeKey=name + // +patchStrategy=merge + repeated CSINodeDriver drivers = 1; +} + // StorageClass describes the parameters for a class of storage for // which PersistentVolumes can be dynamically provisioned. -// +// // StorageClasses are non-namespaced; the name of the storage class // according to etcd is in ObjectMeta.Name. message StorageClass { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -80,7 +154,7 @@ message StorageClass { // StorageClassList is a collection of storage classes. message StorageClassList { // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -88,3 +162,118 @@ message StorageClassList { repeated StorageClass items = 2; } +// VolumeAttachment captures the intent to attach or detach the specified volume +// to/from the specified node. +// +// VolumeAttachment objects are non-namespaced. +message VolumeAttachment { + // Standard object metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Specification of the desired attach/detach volume behavior. + // Populated by the Kubernetes system. + optional VolumeAttachmentSpec spec = 2; + + // Status of the VolumeAttachment request. + // Populated by the entity completing the attach or detach + // operation, i.e. the external-attacher. + // +optional + optional VolumeAttachmentStatus status = 3; +} + +// VolumeAttachmentList is a collection of VolumeAttachment objects. +message VolumeAttachmentList { + // Standard list metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of VolumeAttachments + repeated VolumeAttachment items = 2; +} + +// VolumeAttachmentSource represents a volume that should be attached. +// Right now only PersistenVolumes can be attached via external attacher, +// in future we may allow also inline volumes in pods. +// Exactly one member can be set. +message VolumeAttachmentSource { + // Name of the persistent volume to attach. + // +optional + optional string persistentVolumeName = 1; + + // inlineVolumeSpec contains all the information necessary to attach + // a persistent volume defined by a pod's inline VolumeSource. This field + // is populated only for the CSIMigration feature. It contains + // translated fields from a pod's inline VolumeSource to a + // PersistentVolumeSpec. This field is alpha-level and is only + // honored by servers that enabled the CSIMigration feature. + // +optional + optional k8s.io.api.core.v1.PersistentVolumeSpec inlineVolumeSpec = 2; +} + +// VolumeAttachmentSpec is the specification of a VolumeAttachment request. +message VolumeAttachmentSpec { + // Attacher indicates the name of the volume driver that MUST handle this + // request. This is the name returned by GetPluginName(). + optional string attacher = 1; + + // Source represents the volume that should be attached. + optional VolumeAttachmentSource source = 2; + + // The node that the volume should be attached to. + optional string nodeName = 3; +} + +// VolumeAttachmentStatus is the status of a VolumeAttachment request. +message VolumeAttachmentStatus { + // Indicates the volume is successfully attached. + // This field must only be set by the entity completing the attach + // operation, i.e. the external-attacher. + optional bool attached = 1; + + // Upon successful attach, this field is populated with any + // information returned by the attach operation that must be passed + // into subsequent WaitForAttach or Mount calls. + // This field must only be set by the entity completing the attach + // operation, i.e. the external-attacher. + // +optional + map attachmentMetadata = 2; + + // The last error encountered during attach operation, if any. + // This field must only be set by the entity completing the attach + // operation, i.e. the external-attacher. + // +optional + optional VolumeError attachError = 3; + + // The last error encountered during detach operation, if any. + // This field must only be set by the entity completing the detach + // operation, i.e. the external-attacher. + // +optional + optional VolumeError detachError = 4; +} + +// VolumeError captures an error encountered during a volume operation. +message VolumeError { + // Time the error was encountered. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time time = 1; + + // String detailing the error encountered during Attach or Detach operation. + // This string may be logged, so it should not contain sensitive + // information. + // +optional + optional string message = 2; +} + +// VolumeNodeResources is a set of resource limits for scheduling of volumes. +message VolumeNodeResources { + // Maximum number of unique volumes managed by the CSI driver that can be used on a node. + // A volume that is both attached and mounted on a node is considered to be used once, not twice. + // The same rule applies for a unique volume that is shared among multiple pods on the same node. + // If this field is not specified, then the supported number of volumes on this node is unbounded. + // +optional + optional int32 count = 1; +} + diff --git a/vendor/k8s.io/api/storage/v1/register.go b/vendor/k8s.io/api/storage/v1/register.go index c058add8..67493fd0 100644 --- a/vendor/k8s.io/api/storage/v1/register.go +++ b/vendor/k8s.io/api/storage/v1/register.go @@ -46,6 +46,12 @@ func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &StorageClass{}, &StorageClassList{}, + + &VolumeAttachment{}, + &VolumeAttachmentList{}, + + &CSINode{}, + &CSINodeList{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) diff --git a/vendor/k8s.io/api/storage/v1/types.go b/vendor/k8s.io/api/storage/v1/types.go index 30e6d6d2..86cb78b6 100644 --- a/vendor/k8s.io/api/storage/v1/types.go +++ b/vendor/k8s.io/api/storage/v1/types.go @@ -17,7 +17,7 @@ limitations under the License. package v1 import ( - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -33,7 +33,7 @@ import ( type StorageClass struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -80,7 +80,7 @@ type StorageClass struct { type StorageClassList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -102,3 +102,211 @@ const ( // binding will occur during Pod scheduing. VolumeBindingWaitForFirstConsumer VolumeBindingMode = "WaitForFirstConsumer" ) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// VolumeAttachment captures the intent to attach or detach the specified volume +// to/from the specified node. +// +// VolumeAttachment objects are non-namespaced. +type VolumeAttachment struct { + metav1.TypeMeta `json:",inline"` + + // Standard object metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Specification of the desired attach/detach volume behavior. + // Populated by the Kubernetes system. + Spec VolumeAttachmentSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + + // Status of the VolumeAttachment request. + // Populated by the entity completing the attach or detach + // operation, i.e. the external-attacher. + // +optional + Status VolumeAttachmentStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// VolumeAttachmentList is a collection of VolumeAttachment objects. +type VolumeAttachmentList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of VolumeAttachments + Items []VolumeAttachment `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// VolumeAttachmentSpec is the specification of a VolumeAttachment request. +type VolumeAttachmentSpec struct { + // Attacher indicates the name of the volume driver that MUST handle this + // request. This is the name returned by GetPluginName(). + Attacher string `json:"attacher" protobuf:"bytes,1,opt,name=attacher"` + + // Source represents the volume that should be attached. + Source VolumeAttachmentSource `json:"source" protobuf:"bytes,2,opt,name=source"` + + // The node that the volume should be attached to. + NodeName string `json:"nodeName" protobuf:"bytes,3,opt,name=nodeName"` +} + +// VolumeAttachmentSource represents a volume that should be attached. +// Right now only PersistenVolumes can be attached via external attacher, +// in future we may allow also inline volumes in pods. +// Exactly one member can be set. +type VolumeAttachmentSource struct { + // Name of the persistent volume to attach. + // +optional + PersistentVolumeName *string `json:"persistentVolumeName,omitempty" protobuf:"bytes,1,opt,name=persistentVolumeName"` + + // inlineVolumeSpec contains all the information necessary to attach + // a persistent volume defined by a pod's inline VolumeSource. This field + // is populated only for the CSIMigration feature. It contains + // translated fields from a pod's inline VolumeSource to a + // PersistentVolumeSpec. This field is alpha-level and is only + // honored by servers that enabled the CSIMigration feature. + // +optional + InlineVolumeSpec *v1.PersistentVolumeSpec `json:"inlineVolumeSpec,omitempty" protobuf:"bytes,2,opt,name=inlineVolumeSpec"` +} + +// VolumeAttachmentStatus is the status of a VolumeAttachment request. +type VolumeAttachmentStatus struct { + // Indicates the volume is successfully attached. + // This field must only be set by the entity completing the attach + // operation, i.e. the external-attacher. + Attached bool `json:"attached" protobuf:"varint,1,opt,name=attached"` + + // Upon successful attach, this field is populated with any + // information returned by the attach operation that must be passed + // into subsequent WaitForAttach or Mount calls. + // This field must only be set by the entity completing the attach + // operation, i.e. the external-attacher. + // +optional + AttachmentMetadata map[string]string `json:"attachmentMetadata,omitempty" protobuf:"bytes,2,rep,name=attachmentMetadata"` + + // The last error encountered during attach operation, if any. + // This field must only be set by the entity completing the attach + // operation, i.e. the external-attacher. + // +optional + AttachError *VolumeError `json:"attachError,omitempty" protobuf:"bytes,3,opt,name=attachError,casttype=VolumeError"` + + // The last error encountered during detach operation, if any. + // This field must only be set by the entity completing the detach + // operation, i.e. the external-attacher. + // +optional + DetachError *VolumeError `json:"detachError,omitempty" protobuf:"bytes,4,opt,name=detachError,casttype=VolumeError"` +} + +// VolumeError captures an error encountered during a volume operation. +type VolumeError struct { + // Time the error was encountered. + // +optional + Time metav1.Time `json:"time,omitempty" protobuf:"bytes,1,opt,name=time"` + + // String detailing the error encountered during Attach or Detach operation. + // This string may be logged, so it should not contain sensitive + // information. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CSINode holds information about all CSI drivers installed on a node. +// CSI drivers do not need to create the CSINode object directly. As long as +// they use the node-driver-registrar sidecar container, the kubelet will +// automatically populate the CSINode object for the CSI driver as part of +// kubelet plugin registration. +// CSINode has the same name as a node. If the object is missing, it means either +// there are no CSI Drivers available on the node, or the Kubelet version is low +// enough that it doesn't create this object. +// CSINode has an OwnerReference that points to the corresponding node object. +type CSINode struct { + metav1.TypeMeta `json:",inline"` + + // metadata.name must be the Kubernetes node name. + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // spec is the specification of CSINode + Spec CSINodeSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` +} + +// CSINodeSpec holds information about the specification of all CSI drivers installed on a node +type CSINodeSpec struct { + // drivers is a list of information of all CSI Drivers existing on a node. + // If all drivers in the list are uninstalled, this can become empty. + // +patchMergeKey=name + // +patchStrategy=merge + Drivers []CSINodeDriver `json:"drivers" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,1,rep,name=drivers"` +} + +// CSINodeDriver holds information about the specification of one CSI driver installed on a node +type CSINodeDriver struct { + // This is the name of the CSI driver that this object refers to. + // This MUST be the same name returned by the CSI GetPluginName() call for + // that driver. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + + // nodeID of the node from the driver point of view. + // This field enables Kubernetes to communicate with storage systems that do + // not share the same nomenclature for nodes. For example, Kubernetes may + // refer to a given node as "node1", but the storage system may refer to + // the same node as "nodeA". When Kubernetes issues a command to the storage + // system to attach a volume to a specific node, it can use this field to + // refer to the node name using the ID that the storage system will + // understand, e.g. "nodeA" instead of "node1". This field is required. + NodeID string `json:"nodeID" protobuf:"bytes,2,opt,name=nodeID"` + + // topologyKeys is the list of keys supported by the driver. + // When a driver is initialized on a cluster, it provides a set of topology + // keys that it understands (e.g. "company.com/zone", "company.com/region"). + // When a driver is initialized on a node, it provides the same topology keys + // along with values. Kubelet will expose these topology keys as labels + // on its own node object. + // When Kubernetes does topology aware provisioning, it can use this list to + // determine which labels it should retrieve from the node object and pass + // back to the driver. + // It is possible for different nodes to use different topology keys. + // This can be empty if driver does not support topology. + // +optional + TopologyKeys []string `json:"topologyKeys" protobuf:"bytes,3,rep,name=topologyKeys"` + + // allocatable represents the volume resources of a node that are available for scheduling. + // This field is beta. + // +optional + Allocatable *VolumeNodeResources `json:"allocatable,omitempty" protobuf:"bytes,4,opt,name=allocatable"` +} + +// VolumeNodeResources is a set of resource limits for scheduling of volumes. +type VolumeNodeResources struct { + // Maximum number of unique volumes managed by the CSI driver that can be used on a node. + // A volume that is both attached and mounted on a node is considered to be used once, not twice. + // The same rule applies for a unique volume that is shared among multiple pods on the same node. + // If this field is not specified, then the supported number of volumes on this node is unbounded. + // +optional + Count *int32 `json:"count,omitempty" protobuf:"varint,1,opt,name=count"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CSINodeList is a collection of CSINode objects. +type CSINodeList struct { + metav1.TypeMeta `json:",inline"` + + // Standard list metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is the list of CSINode + Items []CSINode `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go index 23b76e28..d6e3a162 100644 --- a/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go @@ -27,9 +27,50 @@ package v1 // Those methods can be generated by using hack/update-generated-swagger-docs.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_CSINode = map[string]string{ + "": "CSINode holds information about all CSI drivers installed on a node. CSI drivers do not need to create the CSINode object directly. As long as they use the node-driver-registrar sidecar container, the kubelet will automatically populate the CSINode object for the CSI driver as part of kubelet plugin registration. CSINode has the same name as a node. If the object is missing, it means either there are no CSI Drivers available on the node, or the Kubelet version is low enough that it doesn't create this object. CSINode has an OwnerReference that points to the corresponding node object.", + "metadata": "metadata.name must be the Kubernetes node name.", + "spec": "spec is the specification of CSINode", +} + +func (CSINode) SwaggerDoc() map[string]string { + return map_CSINode +} + +var map_CSINodeDriver = map[string]string{ + "": "CSINodeDriver holds information about the specification of one CSI driver installed on a node", + "name": "This is the name of the CSI driver that this object refers to. This MUST be the same name returned by the CSI GetPluginName() call for that driver.", + "nodeID": "nodeID of the node from the driver point of view. This field enables Kubernetes to communicate with storage systems that do not share the same nomenclature for nodes. For example, Kubernetes may refer to a given node as \"node1\", but the storage system may refer to the same node as \"nodeA\". When Kubernetes issues a command to the storage system to attach a volume to a specific node, it can use this field to refer to the node name using the ID that the storage system will understand, e.g. \"nodeA\" instead of \"node1\". This field is required.", + "topologyKeys": "topologyKeys is the list of keys supported by the driver. When a driver is initialized on a cluster, it provides a set of topology keys that it understands (e.g. \"company.com/zone\", \"company.com/region\"). When a driver is initialized on a node, it provides the same topology keys along with values. Kubelet will expose these topology keys as labels on its own node object. When Kubernetes does topology aware provisioning, it can use this list to determine which labels it should retrieve from the node object and pass back to the driver. It is possible for different nodes to use different topology keys. This can be empty if driver does not support topology.", + "allocatable": "allocatable represents the volume resources of a node that are available for scheduling. This field is beta.", +} + +func (CSINodeDriver) SwaggerDoc() map[string]string { + return map_CSINodeDriver +} + +var map_CSINodeList = map[string]string{ + "": "CSINodeList is a collection of CSINode objects.", + "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is the list of CSINode", +} + +func (CSINodeList) SwaggerDoc() map[string]string { + return map_CSINodeList +} + +var map_CSINodeSpec = map[string]string{ + "": "CSINodeSpec holds information about the specification of all CSI drivers installed on a node", + "drivers": "drivers is a list of information of all CSI Drivers existing on a node. If all drivers in the list are uninstalled, this can become empty.", +} + +func (CSINodeSpec) SwaggerDoc() map[string]string { + return map_CSINodeSpec +} + var map_StorageClass = map[string]string{ "": "StorageClass describes the parameters for a class of storage for which PersistentVolumes can be dynamically provisioned.\n\nStorageClasses are non-namespaced; the name of the storage class according to etcd is in ObjectMeta.Name.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "provisioner": "Provisioner indicates the type of the provisioner.", "parameters": "Parameters holds the parameters for the provisioner that should create volumes of this storage class.", "reclaimPolicy": "Dynamically provisioned PersistentVolumes of this storage class are created with this reclaimPolicy. Defaults to Delete.", @@ -45,7 +86,7 @@ func (StorageClass) SwaggerDoc() map[string]string { var map_StorageClassList = map[string]string{ "": "StorageClassList is a collection of storage classes.", - "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "Items is the list of StorageClasses", } @@ -53,4 +94,76 @@ func (StorageClassList) SwaggerDoc() map[string]string { return map_StorageClassList } +var map_VolumeAttachment = map[string]string{ + "": "VolumeAttachment captures the intent to attach or detach the specified volume to/from the specified node.\n\nVolumeAttachment objects are non-namespaced.", + "metadata": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Specification of the desired attach/detach volume behavior. Populated by the Kubernetes system.", + "status": "Status of the VolumeAttachment request. Populated by the entity completing the attach or detach operation, i.e. the external-attacher.", +} + +func (VolumeAttachment) SwaggerDoc() map[string]string { + return map_VolumeAttachment +} + +var map_VolumeAttachmentList = map[string]string{ + "": "VolumeAttachmentList is a collection of VolumeAttachment objects.", + "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "Items is the list of VolumeAttachments", +} + +func (VolumeAttachmentList) SwaggerDoc() map[string]string { + return map_VolumeAttachmentList +} + +var map_VolumeAttachmentSource = map[string]string{ + "": "VolumeAttachmentSource represents a volume that should be attached. Right now only PersistenVolumes can be attached via external attacher, in future we may allow also inline volumes in pods. Exactly one member can be set.", + "persistentVolumeName": "Name of the persistent volume to attach.", +} + +func (VolumeAttachmentSource) SwaggerDoc() map[string]string { + return map_VolumeAttachmentSource +} + +var map_VolumeAttachmentSpec = map[string]string{ + "": "VolumeAttachmentSpec is the specification of a VolumeAttachment request.", + "attacher": "Attacher indicates the name of the volume driver that MUST handle this request. This is the name returned by GetPluginName().", + "source": "Source represents the volume that should be attached.", + "nodeName": "The node that the volume should be attached to.", +} + +func (VolumeAttachmentSpec) SwaggerDoc() map[string]string { + return map_VolumeAttachmentSpec +} + +var map_VolumeAttachmentStatus = map[string]string{ + "": "VolumeAttachmentStatus is the status of a VolumeAttachment request.", + "attached": "Indicates the volume is successfully attached. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", + "attachmentMetadata": "Upon successful attach, this field is populated with any information returned by the attach operation that must be passed into subsequent WaitForAttach or Mount calls. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", + "attachError": "The last error encountered during attach operation, if any. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", + "detachError": "The last error encountered during detach operation, if any. This field must only be set by the entity completing the detach operation, i.e. the external-attacher.", +} + +func (VolumeAttachmentStatus) SwaggerDoc() map[string]string { + return map_VolumeAttachmentStatus +} + +var map_VolumeError = map[string]string{ + "": "VolumeError captures an error encountered during a volume operation.", + "time": "Time the error was encountered.", + "message": "String detailing the error encountered during Attach or Detach operation. This string may be logged, so it should not contain sensitive information.", +} + +func (VolumeError) SwaggerDoc() map[string]string { + return map_VolumeError +} + +var map_VolumeNodeResources = map[string]string{ + "": "VolumeNodeResources is a set of resource limits for scheduling of volumes.", + "count": "Maximum number of unique volumes managed by the CSI driver that can be used on a node. A volume that is both attached and mounted on a node is considered to be used once, not twice. The same rule applies for a unique volume that is shared among multiple pods on the same node. If this field is not specified, then the supported number of volumes on this node is unbounded.", +} + +func (VolumeNodeResources) SwaggerDoc() map[string]string { + return map_VolumeNodeResources +} + // AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go index 0e850dc3..76255a0a 100644 --- a/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go @@ -25,6 +25,115 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CSINode) DeepCopyInto(out *CSINode) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSINode. +func (in *CSINode) DeepCopy() *CSINode { + if in == nil { + return nil + } + out := new(CSINode) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CSINode) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CSINodeDriver) DeepCopyInto(out *CSINodeDriver) { + *out = *in + if in.TopologyKeys != nil { + in, out := &in.TopologyKeys, &out.TopologyKeys + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Allocatable != nil { + in, out := &in.Allocatable, &out.Allocatable + *out = new(VolumeNodeResources) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSINodeDriver. +func (in *CSINodeDriver) DeepCopy() *CSINodeDriver { + if in == nil { + return nil + } + out := new(CSINodeDriver) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CSINodeList) DeepCopyInto(out *CSINodeList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CSINode, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSINodeList. +func (in *CSINodeList) DeepCopy() *CSINodeList { + if in == nil { + return nil + } + out := new(CSINodeList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CSINodeList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CSINodeSpec) DeepCopyInto(out *CSINodeSpec) { + *out = *in + if in.Drivers != nil { + in, out := &in.Drivers, &out.Drivers + *out = make([]CSINodeDriver, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSINodeSpec. +func (in *CSINodeSpec) DeepCopy() *CSINodeSpec { + if in == nil { + return nil + } + out := new(CSINodeSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *StorageClass) DeepCopyInto(out *StorageClass) { *out = *in @@ -89,7 +198,7 @@ func (in *StorageClass) DeepCopyObject() runtime.Object { func (in *StorageClassList) DeepCopyInto(out *StorageClassList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]StorageClass, len(*in)) @@ -117,3 +226,178 @@ func (in *StorageClassList) DeepCopyObject() runtime.Object { } return nil } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeAttachment) DeepCopyInto(out *VolumeAttachment) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachment. +func (in *VolumeAttachment) DeepCopy() *VolumeAttachment { + if in == nil { + return nil + } + out := new(VolumeAttachment) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VolumeAttachment) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeAttachmentList) DeepCopyInto(out *VolumeAttachmentList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]VolumeAttachment, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachmentList. +func (in *VolumeAttachmentList) DeepCopy() *VolumeAttachmentList { + if in == nil { + return nil + } + out := new(VolumeAttachmentList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VolumeAttachmentList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeAttachmentSource) DeepCopyInto(out *VolumeAttachmentSource) { + *out = *in + if in.PersistentVolumeName != nil { + in, out := &in.PersistentVolumeName, &out.PersistentVolumeName + *out = new(string) + **out = **in + } + if in.InlineVolumeSpec != nil { + in, out := &in.InlineVolumeSpec, &out.InlineVolumeSpec + *out = new(corev1.PersistentVolumeSpec) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachmentSource. +func (in *VolumeAttachmentSource) DeepCopy() *VolumeAttachmentSource { + if in == nil { + return nil + } + out := new(VolumeAttachmentSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeAttachmentSpec) DeepCopyInto(out *VolumeAttachmentSpec) { + *out = *in + in.Source.DeepCopyInto(&out.Source) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachmentSpec. +func (in *VolumeAttachmentSpec) DeepCopy() *VolumeAttachmentSpec { + if in == nil { + return nil + } + out := new(VolumeAttachmentSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeAttachmentStatus) DeepCopyInto(out *VolumeAttachmentStatus) { + *out = *in + if in.AttachmentMetadata != nil { + in, out := &in.AttachmentMetadata, &out.AttachmentMetadata + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.AttachError != nil { + in, out := &in.AttachError, &out.AttachError + *out = new(VolumeError) + (*in).DeepCopyInto(*out) + } + if in.DetachError != nil { + in, out := &in.DetachError, &out.DetachError + *out = new(VolumeError) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachmentStatus. +func (in *VolumeAttachmentStatus) DeepCopy() *VolumeAttachmentStatus { + if in == nil { + return nil + } + out := new(VolumeAttachmentStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeError) DeepCopyInto(out *VolumeError) { + *out = *in + in.Time.DeepCopyInto(&out.Time) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeError. +func (in *VolumeError) DeepCopy() *VolumeError { + if in == nil { + return nil + } + out := new(VolumeError) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeNodeResources) DeepCopyInto(out *VolumeNodeResources) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeNodeResources. +func (in *VolumeNodeResources) DeepCopy() *VolumeNodeResources { + if in == nil { + return nil + } + out := new(VolumeNodeResources) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/storage/v1alpha1/doc.go b/vendor/k8s.io/api/storage/v1alpha1/doc.go index aa94aff7..6f7ad7e7 100644 --- a/vendor/k8s.io/api/storage/v1alpha1/doc.go +++ b/vendor/k8s.io/api/storage/v1alpha1/doc.go @@ -14,7 +14,9 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +groupName=storage.k8s.io // +k8s:openapi-gen=true + package v1alpha1 // import "k8s.io/api/storage/v1alpha1" diff --git a/vendor/k8s.io/api/storage/v1alpha1/generated.pb.go b/vendor/k8s.io/api/storage/v1alpha1/generated.pb.go index 507b5c1d..42324352 100644 --- a/vendor/k8s.io/api/storage/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/storage/v1alpha1/generated.pb.go @@ -14,36 +14,25 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/storage/v1alpha1/generated.proto -// DO NOT EDIT! -/* - Package v1alpha1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/storage/v1alpha1/generated.proto - - It has these top-level messages: - VolumeAttachment - VolumeAttachmentList - VolumeAttachmentSource - VolumeAttachmentSpec - VolumeAttachmentStatus - VolumeError -*/ package v1alpha1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + io "io" -import strings "strings" -import reflect "reflect" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + v11 "k8s.io/api/core/v1" -import io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -56,29 +45,173 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *VolumeAttachment) Reset() { *m = VolumeAttachment{} } -func (*VolumeAttachment) ProtoMessage() {} -func (*VolumeAttachment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *VolumeAttachment) Reset() { *m = VolumeAttachment{} } +func (*VolumeAttachment) ProtoMessage() {} +func (*VolumeAttachment) Descriptor() ([]byte, []int) { + return fileDescriptor_10f856db1e670dc4, []int{0} +} +func (m *VolumeAttachment) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeAttachment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeAttachment) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeAttachment.Merge(m, src) +} +func (m *VolumeAttachment) XXX_Size() int { + return m.Size() +} +func (m *VolumeAttachment) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeAttachment.DiscardUnknown(m) +} -func (m *VolumeAttachmentList) Reset() { *m = VolumeAttachmentList{} } -func (*VolumeAttachmentList) ProtoMessage() {} -func (*VolumeAttachmentList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +var xxx_messageInfo_VolumeAttachment proto.InternalMessageInfo -func (m *VolumeAttachmentSource) Reset() { *m = VolumeAttachmentSource{} } -func (*VolumeAttachmentSource) ProtoMessage() {} -func (*VolumeAttachmentSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +func (m *VolumeAttachmentList) Reset() { *m = VolumeAttachmentList{} } +func (*VolumeAttachmentList) ProtoMessage() {} +func (*VolumeAttachmentList) Descriptor() ([]byte, []int) { + return fileDescriptor_10f856db1e670dc4, []int{1} +} +func (m *VolumeAttachmentList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeAttachmentList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeAttachmentList) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeAttachmentList.Merge(m, src) +} +func (m *VolumeAttachmentList) XXX_Size() int { + return m.Size() +} +func (m *VolumeAttachmentList) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeAttachmentList.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeAttachmentList proto.InternalMessageInfo -func (m *VolumeAttachmentSpec) Reset() { *m = VolumeAttachmentSpec{} } -func (*VolumeAttachmentSpec) ProtoMessage() {} -func (*VolumeAttachmentSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +func (m *VolumeAttachmentSource) Reset() { *m = VolumeAttachmentSource{} } +func (*VolumeAttachmentSource) ProtoMessage() {} +func (*VolumeAttachmentSource) Descriptor() ([]byte, []int) { + return fileDescriptor_10f856db1e670dc4, []int{2} +} +func (m *VolumeAttachmentSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeAttachmentSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeAttachmentSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeAttachmentSource.Merge(m, src) +} +func (m *VolumeAttachmentSource) XXX_Size() int { + return m.Size() +} +func (m *VolumeAttachmentSource) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeAttachmentSource.DiscardUnknown(m) +} -func (m *VolumeAttachmentStatus) Reset() { *m = VolumeAttachmentStatus{} } -func (*VolumeAttachmentStatus) ProtoMessage() {} -func (*VolumeAttachmentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +var xxx_messageInfo_VolumeAttachmentSource proto.InternalMessageInfo + +func (m *VolumeAttachmentSpec) Reset() { *m = VolumeAttachmentSpec{} } +func (*VolumeAttachmentSpec) ProtoMessage() {} +func (*VolumeAttachmentSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_10f856db1e670dc4, []int{3} +} +func (m *VolumeAttachmentSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeAttachmentSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeAttachmentSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeAttachmentSpec.Merge(m, src) +} +func (m *VolumeAttachmentSpec) XXX_Size() int { + return m.Size() +} +func (m *VolumeAttachmentSpec) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeAttachmentSpec.DiscardUnknown(m) +} -func (m *VolumeError) Reset() { *m = VolumeError{} } -func (*VolumeError) ProtoMessage() {} -func (*VolumeError) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +var xxx_messageInfo_VolumeAttachmentSpec proto.InternalMessageInfo + +func (m *VolumeAttachmentStatus) Reset() { *m = VolumeAttachmentStatus{} } +func (*VolumeAttachmentStatus) ProtoMessage() {} +func (*VolumeAttachmentStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_10f856db1e670dc4, []int{4} +} +func (m *VolumeAttachmentStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeAttachmentStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeAttachmentStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeAttachmentStatus.Merge(m, src) +} +func (m *VolumeAttachmentStatus) XXX_Size() int { + return m.Size() +} +func (m *VolumeAttachmentStatus) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeAttachmentStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeAttachmentStatus proto.InternalMessageInfo + +func (m *VolumeError) Reset() { *m = VolumeError{} } +func (*VolumeError) ProtoMessage() {} +func (*VolumeError) Descriptor() ([]byte, []int) { + return fileDescriptor_10f856db1e670dc4, []int{5} +} +func (m *VolumeError) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeError) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeError.Merge(m, src) +} +func (m *VolumeError) XXX_Size() int { + return m.Size() +} +func (m *VolumeError) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeError.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeError proto.InternalMessageInfo func init() { proto.RegisterType((*VolumeAttachment)(nil), "k8s.io.api.storage.v1alpha1.VolumeAttachment") @@ -86,12 +219,69 @@ func init() { proto.RegisterType((*VolumeAttachmentSource)(nil), "k8s.io.api.storage.v1alpha1.VolumeAttachmentSource") proto.RegisterType((*VolumeAttachmentSpec)(nil), "k8s.io.api.storage.v1alpha1.VolumeAttachmentSpec") proto.RegisterType((*VolumeAttachmentStatus)(nil), "k8s.io.api.storage.v1alpha1.VolumeAttachmentStatus") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.storage.v1alpha1.VolumeAttachmentStatus.AttachmentMetadataEntry") proto.RegisterType((*VolumeError)(nil), "k8s.io.api.storage.v1alpha1.VolumeError") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/storage/v1alpha1/generated.proto", fileDescriptor_10f856db1e670dc4) +} + +var fileDescriptor_10f856db1e670dc4 = []byte{ + // 745 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x94, 0xcd, 0x6e, 0xd3, 0x40, + 0x10, 0xc7, 0xe3, 0x24, 0x6d, 0xd3, 0x0d, 0x1f, 0xd1, 0x2a, 0x82, 0x28, 0x48, 0x4e, 0x95, 0x53, + 0x40, 0x74, 0x4d, 0x0a, 0x42, 0x15, 0xb7, 0x58, 0xed, 0xa1, 0xa2, 0x2d, 0x68, 0x8b, 0x38, 0x00, + 0x07, 0x36, 0xf6, 0xe2, 0xb8, 0x89, 0x3f, 0xe4, 0x5d, 0x47, 0xea, 0x8d, 0x13, 0x67, 0x6e, 0xbc, + 0x01, 0xcf, 0x92, 0x1b, 0x15, 0xa7, 0x9e, 0x22, 0x6a, 0xde, 0x82, 0x0b, 0x68, 0xd7, 0x9b, 0xc4, + 0x24, 0x29, 0xb4, 0xbd, 0x79, 0x66, 0x67, 0x7e, 0x33, 0xf3, 0xdf, 0xf1, 0x82, 0x9d, 0xfe, 0x36, + 0x43, 0x6e, 0x60, 0xf4, 0xe3, 0x2e, 0x8d, 0x7c, 0xca, 0x29, 0x33, 0x86, 0xd4, 0xb7, 0x83, 0xc8, + 0x50, 0x07, 0x24, 0x74, 0x0d, 0xc6, 0x83, 0x88, 0x38, 0xd4, 0x18, 0xb6, 0xc9, 0x20, 0xec, 0x91, + 0xb6, 0xe1, 0x50, 0x9f, 0x46, 0x84, 0x53, 0x1b, 0x85, 0x51, 0xc0, 0x03, 0x78, 0x2f, 0x0d, 0x46, + 0x24, 0x74, 0x91, 0x0a, 0x46, 0x93, 0xe0, 0xfa, 0xa6, 0xe3, 0xf2, 0x5e, 0xdc, 0x45, 0x56, 0xe0, + 0x19, 0x4e, 0xe0, 0x04, 0x86, 0xcc, 0xe9, 0xc6, 0x1f, 0xa4, 0x25, 0x0d, 0xf9, 0x95, 0xb2, 0xea, + 0xcd, 0x4c, 0x61, 0x2b, 0x88, 0x44, 0xd5, 0xf9, 0x7a, 0xf5, 0x27, 0xb3, 0x18, 0x8f, 0x58, 0x3d, + 0xd7, 0xa7, 0xd1, 0x89, 0x11, 0xf6, 0x1d, 0xe1, 0x60, 0x86, 0x47, 0x39, 0x59, 0x96, 0x65, 0x5c, + 0x94, 0x15, 0xc5, 0x3e, 0x77, 0x3d, 0xba, 0x90, 0xf0, 0xf4, 0x7f, 0x09, 0xcc, 0xea, 0x51, 0x8f, + 0xcc, 0xe7, 0x35, 0xbf, 0xe6, 0x41, 0xe5, 0x75, 0x30, 0x88, 0x3d, 0xda, 0xe1, 0x9c, 0x58, 0x3d, + 0x8f, 0xfa, 0x1c, 0xbe, 0x07, 0x25, 0xd1, 0x98, 0x4d, 0x38, 0xa9, 0x69, 0x1b, 0x5a, 0xab, 0xbc, + 0xf5, 0x08, 0xcd, 0x64, 0x9b, 0xf2, 0x51, 0xd8, 0x77, 0x84, 0x83, 0x21, 0x11, 0x8d, 0x86, 0x6d, + 0xf4, 0xa2, 0x7b, 0x4c, 0x2d, 0x7e, 0x40, 0x39, 0x31, 0xe1, 0x68, 0xdc, 0xc8, 0x25, 0xe3, 0x06, + 0x98, 0xf9, 0xf0, 0x94, 0x0a, 0x8f, 0x40, 0x91, 0x85, 0xd4, 0xaa, 0xe5, 0x25, 0xbd, 0x8d, 0xfe, + 0x71, 0x29, 0x68, 0xbe, 0xbd, 0xa3, 0x90, 0x5a, 0xe6, 0x0d, 0x85, 0x2f, 0x0a, 0x0b, 0x4b, 0x18, + 0x7c, 0x0b, 0x56, 0x19, 0x27, 0x3c, 0x66, 0xb5, 0x82, 0xc4, 0x3e, 0xbe, 0x1a, 0x56, 0xa6, 0x9a, + 0xb7, 0x14, 0x78, 0x35, 0xb5, 0xb1, 0x42, 0x36, 0x47, 0x1a, 0xa8, 0xce, 0xa7, 0xec, 0xbb, 0x8c, + 0xc3, 0x77, 0x0b, 0x62, 0xa1, 0xcb, 0x89, 0x25, 0xb2, 0xa5, 0x54, 0x15, 0x55, 0xb2, 0x34, 0xf1, + 0x64, 0x84, 0xc2, 0x60, 0xc5, 0xe5, 0xd4, 0x63, 0xb5, 0xfc, 0x46, 0xa1, 0x55, 0xde, 0xda, 0xbc, + 0xd2, 0x48, 0xe6, 0x4d, 0x45, 0x5e, 0xd9, 0x13, 0x0c, 0x9c, 0xa2, 0x9a, 0xdf, 0x35, 0x70, 0x67, + 0x61, 0xfa, 0x20, 0x8e, 0x2c, 0x0a, 0xf7, 0x41, 0x35, 0xa4, 0x11, 0x73, 0x19, 0xa7, 0x3e, 0x4f, + 0x63, 0x0e, 0x89, 0x47, 0xe5, 0x60, 0xeb, 0x66, 0x2d, 0x19, 0x37, 0xaa, 0x2f, 0x97, 0x9c, 0xe3, + 0xa5, 0x59, 0xf0, 0x18, 0x54, 0x5c, 0x7f, 0xe0, 0xfa, 0x34, 0xf5, 0x1d, 0xcd, 0x6e, 0xbc, 0x95, + 0x9d, 0x43, 0xfc, 0x3a, 0x42, 0x90, 0x79, 0xb2, 0xbc, 0xe8, 0x6a, 0x32, 0x6e, 0x54, 0xf6, 0xe6, + 0x28, 0x78, 0x81, 0xdb, 0xfc, 0xb6, 0xe4, 0x7e, 0xc4, 0x01, 0x7c, 0x08, 0x4a, 0x44, 0x7a, 0x68, + 0xa4, 0xc6, 0x98, 0xea, 0xdd, 0x51, 0x7e, 0x3c, 0x8d, 0x90, 0x3b, 0x24, 0xa5, 0x50, 0x8d, 0x5e, + 0x71, 0x87, 0x64, 0x6a, 0x66, 0x87, 0xa4, 0x8d, 0x15, 0x52, 0xb4, 0xe2, 0x07, 0x76, 0xaa, 0x68, + 0xe1, 0xef, 0x56, 0x0e, 0x95, 0x1f, 0x4f, 0x23, 0x9a, 0xbf, 0x0b, 0x4b, 0xae, 0x49, 0x2e, 0x63, + 0x66, 0x26, 0x5b, 0xce, 0x54, 0x5a, 0x98, 0xc9, 0x9e, 0xce, 0x64, 0xc3, 0x2f, 0x1a, 0x80, 0x64, + 0x8a, 0x38, 0x98, 0x2c, 0x6b, 0xba, 0x51, 0xcf, 0xaf, 0xf1, 0x93, 0xa0, 0xce, 0x02, 0x6d, 0xd7, + 0xe7, 0xd1, 0x89, 0x59, 0x57, 0x5d, 0xc0, 0xc5, 0x00, 0xbc, 0xa4, 0x05, 0x78, 0x0c, 0xca, 0xa9, + 0x77, 0x37, 0x8a, 0x82, 0x48, 0xfd, 0xb6, 0xad, 0x4b, 0x74, 0x24, 0xe3, 0x4d, 0x3d, 0x19, 0x37, + 0xca, 0x9d, 0x19, 0xe0, 0xd7, 0xb8, 0x51, 0xce, 0x9c, 0xe3, 0x2c, 0x5c, 0xd4, 0xb2, 0xe9, 0xac, + 0x56, 0xf1, 0x3a, 0xb5, 0x76, 0xe8, 0xc5, 0xb5, 0x32, 0xf0, 0xfa, 0x2e, 0xb8, 0x7b, 0x81, 0x44, + 0xb0, 0x02, 0x0a, 0x7d, 0x7a, 0x92, 0x6e, 0x22, 0x16, 0x9f, 0xb0, 0x0a, 0x56, 0x86, 0x64, 0x10, + 0xa7, 0x1b, 0xb7, 0x8e, 0x53, 0xe3, 0x59, 0x7e, 0x5b, 0x6b, 0x7e, 0xd2, 0x40, 0xb6, 0x06, 0xdc, + 0x07, 0x45, 0xf1, 0x96, 0xab, 0x67, 0xe6, 0xc1, 0xe5, 0x9e, 0x99, 0x57, 0xae, 0x47, 0x67, 0xcf, + 0xa5, 0xb0, 0xb0, 0xa4, 0xc0, 0xfb, 0x60, 0xcd, 0xa3, 0x8c, 0x11, 0x47, 0x55, 0x36, 0x6f, 0xab, + 0xa0, 0xb5, 0x83, 0xd4, 0x8d, 0x27, 0xe7, 0x26, 0x1a, 0x9d, 0xeb, 0xb9, 0xd3, 0x73, 0x3d, 0x77, + 0x76, 0xae, 0xe7, 0x3e, 0x26, 0xba, 0x36, 0x4a, 0x74, 0xed, 0x34, 0xd1, 0xb5, 0xb3, 0x44, 0xd7, + 0x7e, 0x24, 0xba, 0xf6, 0xf9, 0xa7, 0x9e, 0x7b, 0x53, 0x9a, 0x08, 0xf7, 0x27, 0x00, 0x00, 0xff, + 0xff, 0xe8, 0x45, 0xe3, 0xba, 0xab, 0x07, 0x00, 0x00, +} + func (m *VolumeAttachment) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -99,41 +289,52 @@ func (m *VolumeAttachment) Marshal() (dAtA []byte, err error) { } func (m *VolumeAttachment) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeAttachment) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n2 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n3, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n3 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *VolumeAttachmentList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -141,37 +342,46 @@ func (m *VolumeAttachmentList) Marshal() (dAtA []byte, err error) { } func (m *VolumeAttachmentList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeAttachmentList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n4, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *VolumeAttachmentSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -179,23 +389,41 @@ func (m *VolumeAttachmentSource) Marshal() (dAtA []byte, err error) { } func (m *VolumeAttachmentSource) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeAttachmentSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + if m.InlineVolumeSpec != nil { + { + size, err := m.InlineVolumeSpec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } if m.PersistentVolumeName != nil { - dAtA[i] = 0xa - i++ + i -= len(*m.PersistentVolumeName) + copy(dAtA[i:], *m.PersistentVolumeName) i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PersistentVolumeName))) - i += copy(dAtA[i:], *m.PersistentVolumeName) + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *VolumeAttachmentSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -203,33 +431,42 @@ func (m *VolumeAttachmentSpec) Marshal() (dAtA []byte, err error) { } func (m *VolumeAttachmentSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeAttachmentSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Attacher))) - i += copy(dAtA[i:], m.Attacher) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Source.Size())) - n5, err := m.Source.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 - dAtA[i] = 0x1a - i++ + i -= len(m.NodeName) + copy(dAtA[i:], m.NodeName) i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeName))) - i += copy(dAtA[i:], m.NodeName) - return i, nil + i-- + dAtA[i] = 0x1a + { + size, err := m.Source.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Attacher) + copy(dAtA[i:], m.Attacher) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Attacher))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *VolumeAttachmentStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -237,67 +474,78 @@ func (m *VolumeAttachmentStatus) Marshal() (dAtA []byte, err error) { } func (m *VolumeAttachmentStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeAttachmentStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - if m.Attached { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if m.DetachError != nil { + { + size, err := m.DetachError.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.AttachError != nil { + { + size, err := m.AttachError.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a } - i++ if len(m.AttachmentMetadata) > 0 { keysForAttachmentMetadata := make([]string, 0, len(m.AttachmentMetadata)) for k := range m.AttachmentMetadata { keysForAttachmentMetadata = append(keysForAttachmentMetadata, string(k)) } github_com_gogo_protobuf_sortkeys.Strings(keysForAttachmentMetadata) - for _, k := range keysForAttachmentMetadata { + for iNdEx := len(keysForAttachmentMetadata) - 1; iNdEx >= 0; iNdEx-- { + v := m.AttachmentMetadata[string(keysForAttachmentMetadata[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- dAtA[i] = 0x12 - i++ - v := m.AttachmentMetadata[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + i -= len(keysForAttachmentMetadata[iNdEx]) + copy(dAtA[i:], keysForAttachmentMetadata[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAttachmentMetadata[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) } } - if m.AttachError != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AttachError.Size())) - n6, err := m.AttachError.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 - } - if m.DetachError != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DetachError.Size())) - n7, err := m.DetachError.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 + i-- + if m.Attached { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - return i, nil + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *VolumeError) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -305,53 +553,48 @@ func (m *VolumeError) Marshal() (dAtA []byte, err error) { } func (m *VolumeError) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeError) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Time.Size())) - n8, err := m.Time.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n8 - dAtA[i] = 0x12 - i++ + i -= len(m.Message) + copy(dAtA[i:], m.Message) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil + i-- + dAtA[i] = 0x12 + { + size, err := m.Time.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *VolumeAttachment) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -364,6 +607,9 @@ func (m *VolumeAttachment) Size() (n int) { } func (m *VolumeAttachmentList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -378,16 +624,26 @@ func (m *VolumeAttachmentList) Size() (n int) { } func (m *VolumeAttachmentSource) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.PersistentVolumeName != nil { l = len(*m.PersistentVolumeName) n += 1 + l + sovGenerated(uint64(l)) } + if m.InlineVolumeSpec != nil { + l = m.InlineVolumeSpec.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } func (m *VolumeAttachmentSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Attacher) @@ -400,6 +656,9 @@ func (m *VolumeAttachmentSpec) Size() (n int) { } func (m *VolumeAttachmentStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 2 @@ -423,6 +682,9 @@ func (m *VolumeAttachmentStatus) Size() (n int) { } func (m *VolumeError) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.Time.Size() @@ -433,14 +695,7 @@ func (m *VolumeError) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -450,7 +705,7 @@ func (this *VolumeAttachment) String() string { return "nil" } s := strings.Join([]string{`&VolumeAttachment{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "VolumeAttachmentSpec", "VolumeAttachmentSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "VolumeAttachmentStatus", "VolumeAttachmentStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -461,9 +716,14 @@ func (this *VolumeAttachmentList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]VolumeAttachment{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "VolumeAttachment", "VolumeAttachment", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&VolumeAttachmentList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "VolumeAttachment", "VolumeAttachment", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -474,6 +734,7 @@ func (this *VolumeAttachmentSource) String() string { } s := strings.Join([]string{`&VolumeAttachmentSource{`, `PersistentVolumeName:` + valueToStringGenerated(this.PersistentVolumeName) + `,`, + `InlineVolumeSpec:` + strings.Replace(fmt.Sprintf("%v", this.InlineVolumeSpec), "PersistentVolumeSpec", "v11.PersistentVolumeSpec", 1) + `,`, `}`, }, "") return s @@ -507,8 +768,8 @@ func (this *VolumeAttachmentStatus) String() string { s := strings.Join([]string{`&VolumeAttachmentStatus{`, `Attached:` + fmt.Sprintf("%v", this.Attached) + `,`, `AttachmentMetadata:` + mapStringForAttachmentMetadata + `,`, - `AttachError:` + strings.Replace(fmt.Sprintf("%v", this.AttachError), "VolumeError", "VolumeError", 1) + `,`, - `DetachError:` + strings.Replace(fmt.Sprintf("%v", this.DetachError), "VolumeError", "VolumeError", 1) + `,`, + `AttachError:` + strings.Replace(this.AttachError.String(), "VolumeError", "VolumeError", 1) + `,`, + `DetachError:` + strings.Replace(this.DetachError.String(), "VolumeError", "VolumeError", 1) + `,`, `}`, }, "") return s @@ -518,7 +779,7 @@ func (this *VolumeError) String() string { return "nil" } s := strings.Join([]string{`&VolumeError{`, - `Time:` + strings.Replace(strings.Replace(this.Time.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `Time:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Time), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `}`, }, "") @@ -547,7 +808,7 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -575,7 +836,7 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -584,6 +845,9 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -605,7 +869,7 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -614,6 +878,9 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -635,7 +902,7 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -644,6 +911,9 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -660,6 +930,9 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -687,7 +960,7 @@ func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -715,7 +988,7 @@ func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -724,6 +997,9 @@ func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -745,7 +1021,7 @@ func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -754,6 +1030,9 @@ func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -771,6 +1050,9 @@ func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -798,7 +1080,7 @@ func (m *VolumeAttachmentSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -826,7 +1108,7 @@ func (m *VolumeAttachmentSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -836,12 +1118,51 @@ func (m *VolumeAttachmentSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } s := string(dAtA[iNdEx:postIndex]) m.PersistentVolumeName = &s iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InlineVolumeSpec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.InlineVolumeSpec == nil { + m.InlineVolumeSpec = &v11.PersistentVolumeSpec{} + } + if err := m.InlineVolumeSpec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -851,6 +1172,9 @@ func (m *VolumeAttachmentSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -878,7 +1202,7 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -906,7 +1230,7 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -916,6 +1240,9 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -935,7 +1262,7 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -944,6 +1271,9 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -965,7 +1295,7 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -975,6 +1305,9 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -989,6 +1322,9 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1016,7 +1352,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1044,7 +1380,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1064,7 +1400,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1073,54 +1409,20 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { + if postIndex > l { return io.ErrUnexpectedEOF } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.AttachmentMetadata == nil { m.AttachmentMetadata = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1130,41 +1432,86 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.AttachmentMetadata[mapkey] = mapvalue - } else { - var mapvalue string - m.AttachmentMetadata[mapkey] = mapvalue } + m.AttachmentMetadata[mapkey] = mapvalue iNdEx = postIndex case 3: if wireType != 2 { @@ -1180,7 +1527,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1189,6 +1536,9 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1213,7 +1563,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1222,6 +1572,9 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1241,6 +1594,9 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1268,7 +1624,7 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1296,7 +1652,7 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1305,6 +1661,9 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1326,7 +1685,7 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1336,6 +1695,9 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1350,6 +1712,9 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1416,10 +1781,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -1448,6 +1816,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -1466,55 +1837,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/storage/v1alpha1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 704 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x94, 0x4d, 0x6f, 0xd3, 0x4c, - 0x10, 0xc7, 0xe3, 0x24, 0x6d, 0xd3, 0xcd, 0xf3, 0x52, 0xad, 0xa2, 0xe7, 0x89, 0x82, 0xe4, 0x54, - 0x39, 0x15, 0x44, 0xd7, 0xa4, 0x20, 0x54, 0x71, 0x8b, 0xd5, 0x1e, 0x10, 0x6d, 0x41, 0x5b, 0xc4, - 0x01, 0x38, 0xb0, 0xb1, 0xa7, 0x8e, 0x9b, 0xfa, 0x45, 0xbb, 0xeb, 0x48, 0xbd, 0x71, 0xe2, 0xcc, - 0x8d, 0x6f, 0xc0, 0x67, 0xc9, 0x8d, 0x1e, 0x7b, 0x8a, 0xa8, 0xf9, 0x16, 0x5c, 0x40, 0x5e, 0x6f, - 0x5e, 0x68, 0x52, 0x68, 0x7b, 0xf3, 0xcc, 0xce, 0xfc, 0x66, 0xe6, 0xbf, 0xb3, 0x46, 0x3b, 0xfd, - 0x6d, 0x41, 0xfc, 0xc8, 0xea, 0x27, 0x5d, 0xe0, 0x21, 0x48, 0x10, 0xd6, 0x00, 0x42, 0x37, 0xe2, - 0x96, 0x3e, 0x60, 0xb1, 0x6f, 0x09, 0x19, 0x71, 0xe6, 0x81, 0x35, 0x68, 0xb3, 0x93, 0xb8, 0xc7, - 0xda, 0x96, 0x07, 0x21, 0x70, 0x26, 0xc1, 0x25, 0x31, 0x8f, 0x64, 0x84, 0xef, 0xe4, 0xc1, 0x84, - 0xc5, 0x3e, 0xd1, 0xc1, 0x64, 0x1c, 0xdc, 0xd8, 0xf4, 0x7c, 0xd9, 0x4b, 0xba, 0xc4, 0x89, 0x02, - 0xcb, 0x8b, 0xbc, 0xc8, 0x52, 0x39, 0xdd, 0xe4, 0x48, 0x59, 0xca, 0x50, 0x5f, 0x39, 0xab, 0xf1, - 0x68, 0x5a, 0x38, 0x60, 0x4e, 0xcf, 0x0f, 0x81, 0x9f, 0x5a, 0x71, 0xdf, 0xcb, 0x1c, 0xc2, 0x0a, - 0x40, 0x32, 0x6b, 0x30, 0xd7, 0x41, 0xc3, 0xba, 0x2a, 0x8b, 0x27, 0xa1, 0xf4, 0x03, 0x98, 0x4b, - 0x78, 0xfc, 0xa7, 0x04, 0xe1, 0xf4, 0x20, 0x60, 0x97, 0xf3, 0x5a, 0x9f, 0x8b, 0x68, 0xed, 0x55, - 0x74, 0x92, 0x04, 0xd0, 0x91, 0x92, 0x39, 0xbd, 0x00, 0x42, 0x89, 0xdf, 0xa1, 0x4a, 0xd6, 0x98, - 0xcb, 0x24, 0xab, 0x1b, 0xeb, 0xc6, 0x46, 0x75, 0xeb, 0x01, 0x99, 0x4a, 0x32, 0xe1, 0x93, 0xb8, - 0xef, 0x65, 0x0e, 0x41, 0xb2, 0x68, 0x32, 0x68, 0x93, 0xe7, 0xdd, 0x63, 0x70, 0xe4, 0x3e, 0x48, - 0x66, 0xe3, 0xe1, 0xa8, 0x59, 0x48, 0x47, 0x4d, 0x34, 0xf5, 0xd1, 0x09, 0x15, 0x1f, 0xa2, 0xb2, - 0x88, 0xc1, 0xa9, 0x17, 0x15, 0xbd, 0x4d, 0x7e, 0x23, 0x38, 0xb9, 0xdc, 0xde, 0x61, 0x0c, 0x8e, - 0xfd, 0x97, 0xc6, 0x97, 0x33, 0x8b, 0x2a, 0x18, 0x7e, 0x83, 0x96, 0x85, 0x64, 0x32, 0x11, 0xf5, - 0x92, 0xc2, 0x3e, 0xbc, 0x19, 0x56, 0xa5, 0xda, 0xff, 0x68, 0xf0, 0x72, 0x6e, 0x53, 0x8d, 0x6c, - 0x0d, 0x0d, 0x54, 0xbb, 0x9c, 0xb2, 0xe7, 0x0b, 0x89, 0xdf, 0xce, 0x89, 0x45, 0xae, 0x27, 0x56, - 0x96, 0xad, 0xa4, 0x5a, 0xd3, 0x25, 0x2b, 0x63, 0xcf, 0x8c, 0x50, 0x14, 0x2d, 0xf9, 0x12, 0x02, - 0x51, 0x2f, 0xae, 0x97, 0x36, 0xaa, 0x5b, 0x9b, 0x37, 0x1a, 0xc9, 0xfe, 0x5b, 0x93, 0x97, 0x9e, - 0x66, 0x0c, 0x9a, 0xa3, 0x5a, 0x47, 0xe8, 0xbf, 0xb9, 0xe1, 0xa3, 0x84, 0x3b, 0x80, 0xf7, 0x50, - 0x2d, 0x06, 0x2e, 0x7c, 0x21, 0x21, 0x94, 0x79, 0xcc, 0x01, 0x0b, 0x40, 0xcd, 0xb5, 0x6a, 0xd7, - 0xd3, 0x51, 0xb3, 0xf6, 0x62, 0xc1, 0x39, 0x5d, 0x98, 0xd5, 0xfa, 0xb2, 0x40, 0xb2, 0xec, 0xba, - 0xf0, 0x7d, 0x54, 0x61, 0xca, 0x03, 0x5c, 0xa3, 0x27, 0x12, 0x74, 0xb4, 0x9f, 0x4e, 0x22, 0xd4, - 0xb5, 0xaa, 0xf6, 0xf4, 0xb6, 0xdc, 0xf0, 0x5a, 0x55, 0xea, 0xcc, 0xb5, 0x2a, 0x9b, 0x6a, 0x64, - 0xd6, 0x4a, 0x18, 0xb9, 0xf9, 0x94, 0xa5, 0x5f, 0x5b, 0x39, 0xd0, 0x7e, 0x3a, 0x89, 0x68, 0xfd, - 0x28, 0x2d, 0x90, 0x4e, 0xed, 0xc7, 0xcc, 0x4c, 0xae, 0x9a, 0xa9, 0x32, 0x37, 0x93, 0x3b, 0x99, - 0xc9, 0xc5, 0x9f, 0x0c, 0x84, 0xd9, 0x04, 0xb1, 0x3f, 0xde, 0x9f, 0xfc, 0x92, 0x9f, 0xdd, 0x62, - 0x6f, 0x49, 0x67, 0x8e, 0xb6, 0x1b, 0x4a, 0x7e, 0x6a, 0x37, 0x74, 0x17, 0x78, 0x3e, 0x80, 0x2e, - 0x68, 0x01, 0x1f, 0xa3, 0x6a, 0xee, 0xdd, 0xe5, 0x3c, 0xe2, 0xfa, 0x25, 0x6d, 0x5c, 0xa3, 0x23, - 0x15, 0x6f, 0x9b, 0xe9, 0xa8, 0x59, 0xed, 0x4c, 0x01, 0xdf, 0x47, 0xcd, 0xea, 0xcc, 0x39, 0x9d, - 0x85, 0x67, 0xb5, 0x5c, 0x98, 0xd6, 0x2a, 0xdf, 0xa6, 0xd6, 0x0e, 0x5c, 0x5d, 0x6b, 0x06, 0xde, - 0xd8, 0x45, 0xff, 0x5f, 0x21, 0x11, 0x5e, 0x43, 0xa5, 0x3e, 0x9c, 0xe6, 0x9b, 0x48, 0xb3, 0x4f, - 0x5c, 0x43, 0x4b, 0x03, 0x76, 0x92, 0xe4, 0x1b, 0xb7, 0x4a, 0x73, 0xe3, 0x49, 0x71, 0xdb, 0x68, - 0x7d, 0x30, 0xd0, 0x6c, 0x0d, 0xbc, 0x87, 0xca, 0xd9, 0xef, 0x55, 0xbf, 0xfc, 0x7b, 0xd7, 0x7b, - 0xf9, 0x2f, 0xfd, 0x00, 0xa6, 0x7f, 0xb0, 0xcc, 0xa2, 0x8a, 0x82, 0xef, 0xa2, 0x95, 0x00, 0x84, - 0x60, 0x9e, 0xae, 0x6c, 0xff, 0xab, 0x83, 0x56, 0xf6, 0x73, 0x37, 0x1d, 0x9f, 0xdb, 0x64, 0x78, - 0x61, 0x16, 0xce, 0x2e, 0xcc, 0xc2, 0xf9, 0x85, 0x59, 0x78, 0x9f, 0x9a, 0xc6, 0x30, 0x35, 0x8d, - 0xb3, 0xd4, 0x34, 0xce, 0x53, 0xd3, 0xf8, 0x9a, 0x9a, 0xc6, 0xc7, 0x6f, 0x66, 0xe1, 0x75, 0x65, - 0x2c, 0xdc, 0xcf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x42, 0xba, 0xdb, 0x12, 0x1a, 0x07, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/storage/v1alpha1/generated.proto b/vendor/k8s.io/api/storage/v1alpha1/generated.proto index ccb94754..76019639 100644 --- a/vendor/k8s.io/api/storage/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/storage/v1alpha1/generated.proto @@ -21,6 +21,7 @@ syntax = 'proto2'; package k8s.io.api.storage.v1alpha1; +import "k8s.io/api/core/v1/generated.proto"; import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; @@ -30,11 +31,11 @@ option go_package = "v1alpha1"; // VolumeAttachment captures the intent to attach or detach the specified volume // to/from the specified node. -// +// // VolumeAttachment objects are non-namespaced. message VolumeAttachment { // Standard object metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -52,7 +53,7 @@ message VolumeAttachment { // VolumeAttachmentList is a collection of VolumeAttachment objects. message VolumeAttachmentList { // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -68,6 +69,15 @@ message VolumeAttachmentSource { // Name of the persistent volume to attach. // +optional optional string persistentVolumeName = 1; + + // inlineVolumeSpec contains all the information necessary to attach + // a persistent volume defined by a pod's inline VolumeSource. This field + // is populated only for the CSIMigration feature. It contains + // translated fields from a pod's inline VolumeSource to a + // PersistentVolumeSpec. This field is alpha-level and is only + // honored by servers that enabled the CSIMigration feature. + // +optional + optional k8s.io.api.core.v1.PersistentVolumeSpec inlineVolumeSpec = 2; } // VolumeAttachmentSpec is the specification of a VolumeAttachment request. diff --git a/vendor/k8s.io/api/storage/v1alpha1/types.go b/vendor/k8s.io/api/storage/v1alpha1/types.go index 964bb5f7..39408857 100644 --- a/vendor/k8s.io/api/storage/v1alpha1/types.go +++ b/vendor/k8s.io/api/storage/v1alpha1/types.go @@ -16,7 +16,10 @@ limitations under the License. package v1alpha1 -import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +import ( + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) // +genclient // +genclient:nonNamespaced @@ -30,7 +33,7 @@ type VolumeAttachment struct { metav1.TypeMeta `json:",inline"` // Standard object metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -51,7 +54,7 @@ type VolumeAttachment struct { type VolumeAttachmentList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -81,7 +84,14 @@ type VolumeAttachmentSource struct { // +optional PersistentVolumeName *string `json:"persistentVolumeName,omitempty" protobuf:"bytes,1,opt,name=persistentVolumeName"` - // Placeholder for *VolumeSource to accommodate inline volumes in pods. + // inlineVolumeSpec contains all the information necessary to attach + // a persistent volume defined by a pod's inline VolumeSource. This field + // is populated only for the CSIMigration feature. It contains + // translated fields from a pod's inline VolumeSource to a + // PersistentVolumeSpec. This field is alpha-level and is only + // honored by servers that enabled the CSIMigration feature. + // +optional + InlineVolumeSpec *v1.PersistentVolumeSpec `json:"inlineVolumeSpec,omitempty" protobuf:"bytes,2,opt,name=inlineVolumeSpec"` } // VolumeAttachmentStatus is the status of a VolumeAttachment request. diff --git a/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go index 32d7dcc5..2e821616 100644 --- a/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go @@ -29,7 +29,7 @@ package v1alpha1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_VolumeAttachment = map[string]string{ "": "VolumeAttachment captures the intent to attach or detach the specified volume to/from the specified node.\n\nVolumeAttachment objects are non-namespaced.", - "metadata": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "spec": "Specification of the desired attach/detach volume behavior. Populated by the Kubernetes system.", "status": "Status of the VolumeAttachment request. Populated by the entity completing the attach or detach operation, i.e. the external-attacher.", } @@ -40,7 +40,7 @@ func (VolumeAttachment) SwaggerDoc() map[string]string { var map_VolumeAttachmentList = map[string]string{ "": "VolumeAttachmentList is a collection of VolumeAttachment objects.", - "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "Items is the list of VolumeAttachments", } @@ -49,7 +49,7 @@ func (VolumeAttachmentList) SwaggerDoc() map[string]string { } var map_VolumeAttachmentSource = map[string]string{ - "": "VolumeAttachmentSource represents a volume that should be attached. Right now only PersistenVolumes can be attached via external attacher, in future we may allow also inline volumes in pods. Exactly one member can be set.", + "": "VolumeAttachmentSource represents a volume that should be attached. Right now only PersistenVolumes can be attached via external attacher, in future we may allow also inline volumes in pods. Exactly one member can be set.", "persistentVolumeName": "Name of the persistent volume to attach.", } diff --git a/vendor/k8s.io/api/storage/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/storage/v1alpha1/zz_generated.deepcopy.go index e27c6ff3..3debf9df 100644 --- a/vendor/k8s.io/api/storage/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/storage/v1alpha1/zz_generated.deepcopy.go @@ -21,6 +21,7 @@ limitations under the License. package v1alpha1 import ( + v1 "k8s.io/api/core/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) @@ -56,7 +57,7 @@ func (in *VolumeAttachment) DeepCopyObject() runtime.Object { func (in *VolumeAttachmentList) DeepCopyInto(out *VolumeAttachmentList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]VolumeAttachment, len(*in)) @@ -93,6 +94,11 @@ func (in *VolumeAttachmentSource) DeepCopyInto(out *VolumeAttachmentSource) { *out = new(string) **out = **in } + if in.InlineVolumeSpec != nil { + in, out := &in.InlineVolumeSpec, &out.InlineVolumeSpec + *out = new(v1.PersistentVolumeSpec) + (*in).DeepCopyInto(*out) + } return } diff --git a/vendor/k8s.io/api/storage/v1beta1/doc.go b/vendor/k8s.io/api/storage/v1beta1/doc.go index 8957a4cf..e3e3626e 100644 --- a/vendor/k8s.io/api/storage/v1beta1/doc.go +++ b/vendor/k8s.io/api/storage/v1beta1/doc.go @@ -15,6 +15,8 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +groupName=storage.k8s.io // +k8s:openapi-gen=true + package v1beta1 // import "k8s.io/api/storage/v1beta1" diff --git a/vendor/k8s.io/api/storage/v1beta1/generated.pb.go b/vendor/k8s.io/api/storage/v1beta1/generated.pb.go index fed8c7a6..cd35af34 100644 --- a/vendor/k8s.io/api/storage/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/storage/v1beta1/generated.pb.go @@ -14,40 +14,26 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/storage/v1beta1/generated.proto -// DO NOT EDIT! -/* - Package v1beta1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/storage/v1beta1/generated.proto - - It has these top-level messages: - StorageClass - StorageClassList - VolumeAttachment - VolumeAttachmentList - VolumeAttachmentSource - VolumeAttachmentSpec - VolumeAttachmentStatus - VolumeError -*/ package v1beta1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import k8s_io_api_core_v1 "k8s.io/api/core/v1" + io "io" -import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + v11 "k8s.io/api/core/v1" -import strings "strings" -import reflect "reflect" - -import io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -60,725 +46,2604 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *StorageClass) Reset() { *m = StorageClass{} } -func (*StorageClass) ProtoMessage() {} -func (*StorageClass) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } - -func (m *StorageClassList) Reset() { *m = StorageClassList{} } -func (*StorageClassList) ProtoMessage() {} -func (*StorageClassList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } - -func (m *VolumeAttachment) Reset() { *m = VolumeAttachment{} } -func (*VolumeAttachment) ProtoMessage() {} -func (*VolumeAttachment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } - -func (m *VolumeAttachmentList) Reset() { *m = VolumeAttachmentList{} } -func (*VolumeAttachmentList) ProtoMessage() {} -func (*VolumeAttachmentList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } - -func (m *VolumeAttachmentSource) Reset() { *m = VolumeAttachmentSource{} } -func (*VolumeAttachmentSource) ProtoMessage() {} -func (*VolumeAttachmentSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } - -func (m *VolumeAttachmentSpec) Reset() { *m = VolumeAttachmentSpec{} } -func (*VolumeAttachmentSpec) ProtoMessage() {} -func (*VolumeAttachmentSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } - -func (m *VolumeAttachmentStatus) Reset() { *m = VolumeAttachmentStatus{} } -func (*VolumeAttachmentStatus) ProtoMessage() {} -func (*VolumeAttachmentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +func (m *CSIDriver) Reset() { *m = CSIDriver{} } +func (*CSIDriver) ProtoMessage() {} +func (*CSIDriver) Descriptor() ([]byte, []int) { + return fileDescriptor_7d2980599fd0de80, []int{0} +} +func (m *CSIDriver) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CSIDriver) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CSIDriver) XXX_Merge(src proto.Message) { + xxx_messageInfo_CSIDriver.Merge(m, src) +} +func (m *CSIDriver) XXX_Size() int { + return m.Size() +} +func (m *CSIDriver) XXX_DiscardUnknown() { + xxx_messageInfo_CSIDriver.DiscardUnknown(m) +} -func (m *VolumeError) Reset() { *m = VolumeError{} } -func (*VolumeError) ProtoMessage() {} -func (*VolumeError) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +var xxx_messageInfo_CSIDriver proto.InternalMessageInfo -func init() { - proto.RegisterType((*StorageClass)(nil), "k8s.io.api.storage.v1beta1.StorageClass") - proto.RegisterType((*StorageClassList)(nil), "k8s.io.api.storage.v1beta1.StorageClassList") - proto.RegisterType((*VolumeAttachment)(nil), "k8s.io.api.storage.v1beta1.VolumeAttachment") - proto.RegisterType((*VolumeAttachmentList)(nil), "k8s.io.api.storage.v1beta1.VolumeAttachmentList") - proto.RegisterType((*VolumeAttachmentSource)(nil), "k8s.io.api.storage.v1beta1.VolumeAttachmentSource") - proto.RegisterType((*VolumeAttachmentSpec)(nil), "k8s.io.api.storage.v1beta1.VolumeAttachmentSpec") - proto.RegisterType((*VolumeAttachmentStatus)(nil), "k8s.io.api.storage.v1beta1.VolumeAttachmentStatus") - proto.RegisterType((*VolumeError)(nil), "k8s.io.api.storage.v1beta1.VolumeError") +func (m *CSIDriverList) Reset() { *m = CSIDriverList{} } +func (*CSIDriverList) ProtoMessage() {} +func (*CSIDriverList) Descriptor() ([]byte, []int) { + return fileDescriptor_7d2980599fd0de80, []int{1} } -func (m *StorageClass) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +func (m *CSIDriverList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CSIDriverList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *CSIDriverList) XXX_Merge(src proto.Message) { + xxx_messageInfo_CSIDriverList.Merge(m, src) +} +func (m *CSIDriverList) XXX_Size() int { + return m.Size() +} +func (m *CSIDriverList) XXX_DiscardUnknown() { + xxx_messageInfo_CSIDriverList.DiscardUnknown(m) } -func (m *StorageClass) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) +var xxx_messageInfo_CSIDriverList proto.InternalMessageInfo + +func (m *CSIDriverSpec) Reset() { *m = CSIDriverSpec{} } +func (*CSIDriverSpec) ProtoMessage() {} +func (*CSIDriverSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_7d2980599fd0de80, []int{2} +} +func (m *CSIDriverSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CSIDriverSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err - } - i += n1 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Provisioner))) - i += copy(dAtA[i:], m.Provisioner) - if len(m.Parameters) > 0 { - keysForParameters := make([]string, 0, len(m.Parameters)) - for k := range m.Parameters { - keysForParameters = append(keysForParameters, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForParameters) - for _, k := range keysForParameters { - dAtA[i] = 0x1a - i++ - v := m.Parameters[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } - } - if m.ReclaimPolicy != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ReclaimPolicy))) - i += copy(dAtA[i:], *m.ReclaimPolicy) - } - if len(m.MountOptions) > 0 { - for _, s := range m.MountOptions { - dAtA[i] = 0x2a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if m.AllowVolumeExpansion != nil { - dAtA[i] = 0x30 - i++ - if *m.AllowVolumeExpansion { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if m.VolumeBindingMode != nil { - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VolumeBindingMode))) - i += copy(dAtA[i:], *m.VolumeBindingMode) - } - if len(m.AllowedTopologies) > 0 { - for _, msg := range m.AllowedTopologies { - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } + return nil, err } - return i, nil + return b[:n], nil +} +func (m *CSIDriverSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_CSIDriverSpec.Merge(m, src) +} +func (m *CSIDriverSpec) XXX_Size() int { + return m.Size() +} +func (m *CSIDriverSpec) XXX_DiscardUnknown() { + xxx_messageInfo_CSIDriverSpec.DiscardUnknown(m) } -func (m *StorageClassList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_CSIDriverSpec proto.InternalMessageInfo + +func (m *CSINode) Reset() { *m = CSINode{} } +func (*CSINode) ProtoMessage() {} +func (*CSINode) Descriptor() ([]byte, []int) { + return fileDescriptor_7d2980599fd0de80, []int{3} +} +func (m *CSINode) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CSINode) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *CSINode) XXX_Merge(src proto.Message) { + xxx_messageInfo_CSINode.Merge(m, src) +} +func (m *CSINode) XXX_Size() int { + return m.Size() +} +func (m *CSINode) XXX_DiscardUnknown() { + xxx_messageInfo_CSINode.DiscardUnknown(m) } -func (m *StorageClassList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n2, err := m.ListMeta.MarshalTo(dAtA[i:]) +var xxx_messageInfo_CSINode proto.InternalMessageInfo + +func (m *CSINodeDriver) Reset() { *m = CSINodeDriver{} } +func (*CSINodeDriver) ProtoMessage() {} +func (*CSINodeDriver) Descriptor() ([]byte, []int) { + return fileDescriptor_7d2980599fd0de80, []int{4} +} +func (m *CSINodeDriver) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CSINodeDriver) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err - } - i += n2 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } + return nil, err } - return i, nil + return b[:n], nil +} +func (m *CSINodeDriver) XXX_Merge(src proto.Message) { + xxx_messageInfo_CSINodeDriver.Merge(m, src) +} +func (m *CSINodeDriver) XXX_Size() int { + return m.Size() +} +func (m *CSINodeDriver) XXX_DiscardUnknown() { + xxx_messageInfo_CSINodeDriver.DiscardUnknown(m) } -func (m *VolumeAttachment) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_CSINodeDriver proto.InternalMessageInfo + +func (m *CSINodeList) Reset() { *m = CSINodeList{} } +func (*CSINodeList) ProtoMessage() {} +func (*CSINodeList) Descriptor() ([]byte, []int) { + return fileDescriptor_7d2980599fd0de80, []int{5} +} +func (m *CSINodeList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CSINodeList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *CSINodeList) XXX_Merge(src proto.Message) { + xxx_messageInfo_CSINodeList.Merge(m, src) +} +func (m *CSINodeList) XXX_Size() int { + return m.Size() +} +func (m *CSINodeList) XXX_DiscardUnknown() { + xxx_messageInfo_CSINodeList.DiscardUnknown(m) } -func (m *VolumeAttachment) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n3, err := m.ObjectMeta.MarshalTo(dAtA[i:]) +var xxx_messageInfo_CSINodeList proto.InternalMessageInfo + +func (m *CSINodeSpec) Reset() { *m = CSINodeSpec{} } +func (*CSINodeSpec) ProtoMessage() {} +func (*CSINodeSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_7d2980599fd0de80, []int{6} +} +func (m *CSINodeSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CSINodeSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err + return nil, err } - i += n3 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n4, err := m.Spec.MarshalTo(dAtA[i:]) + return b[:n], nil +} +func (m *CSINodeSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_CSINodeSpec.Merge(m, src) +} +func (m *CSINodeSpec) XXX_Size() int { + return m.Size() +} +func (m *CSINodeSpec) XXX_DiscardUnknown() { + xxx_messageInfo_CSINodeSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_CSINodeSpec proto.InternalMessageInfo + +func (m *StorageClass) Reset() { *m = StorageClass{} } +func (*StorageClass) ProtoMessage() {} +func (*StorageClass) Descriptor() ([]byte, []int) { + return fileDescriptor_7d2980599fd0de80, []int{7} +} +func (m *StorageClass) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StorageClass) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err + return nil, err } - i += n4 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n5, err := m.Status.MarshalTo(dAtA[i:]) + return b[:n], nil +} +func (m *StorageClass) XXX_Merge(src proto.Message) { + xxx_messageInfo_StorageClass.Merge(m, src) +} +func (m *StorageClass) XXX_Size() int { + return m.Size() +} +func (m *StorageClass) XXX_DiscardUnknown() { + xxx_messageInfo_StorageClass.DiscardUnknown(m) +} + +var xxx_messageInfo_StorageClass proto.InternalMessageInfo + +func (m *StorageClassList) Reset() { *m = StorageClassList{} } +func (*StorageClassList) ProtoMessage() {} +func (*StorageClassList) Descriptor() ([]byte, []int) { + return fileDescriptor_7d2980599fd0de80, []int{8} +} +func (m *StorageClassList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StorageClassList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err + return nil, err } - i += n5 - return i, nil + return b[:n], nil +} +func (m *StorageClassList) XXX_Merge(src proto.Message) { + xxx_messageInfo_StorageClassList.Merge(m, src) +} +func (m *StorageClassList) XXX_Size() int { + return m.Size() +} +func (m *StorageClassList) XXX_DiscardUnknown() { + xxx_messageInfo_StorageClassList.DiscardUnknown(m) } -func (m *VolumeAttachmentList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_StorageClassList proto.InternalMessageInfo + +func (m *VolumeAttachment) Reset() { *m = VolumeAttachment{} } +func (*VolumeAttachment) ProtoMessage() {} +func (*VolumeAttachment) Descriptor() ([]byte, []int) { + return fileDescriptor_7d2980599fd0de80, []int{9} +} +func (m *VolumeAttachment) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeAttachment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *VolumeAttachment) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeAttachment.Merge(m, src) +} +func (m *VolumeAttachment) XXX_Size() int { + return m.Size() +} +func (m *VolumeAttachment) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeAttachment.DiscardUnknown(m) } -func (m *VolumeAttachmentList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n6, err := m.ListMeta.MarshalTo(dAtA[i:]) +var xxx_messageInfo_VolumeAttachment proto.InternalMessageInfo + +func (m *VolumeAttachmentList) Reset() { *m = VolumeAttachmentList{} } +func (*VolumeAttachmentList) ProtoMessage() {} +func (*VolumeAttachmentList) Descriptor() ([]byte, []int) { + return fileDescriptor_7d2980599fd0de80, []int{10} +} +func (m *VolumeAttachmentList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeAttachmentList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err - } - i += n6 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } + return nil, err } - return i, nil + return b[:n], nil +} +func (m *VolumeAttachmentList) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeAttachmentList.Merge(m, src) +} +func (m *VolumeAttachmentList) XXX_Size() int { + return m.Size() +} +func (m *VolumeAttachmentList) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeAttachmentList.DiscardUnknown(m) } -func (m *VolumeAttachmentSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_VolumeAttachmentList proto.InternalMessageInfo + +func (m *VolumeAttachmentSource) Reset() { *m = VolumeAttachmentSource{} } +func (*VolumeAttachmentSource) ProtoMessage() {} +func (*VolumeAttachmentSource) Descriptor() ([]byte, []int) { + return fileDescriptor_7d2980599fd0de80, []int{11} +} +func (m *VolumeAttachmentSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeAttachmentSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *VolumeAttachmentSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeAttachmentSource.Merge(m, src) +} +func (m *VolumeAttachmentSource) XXX_Size() int { + return m.Size() +} +func (m *VolumeAttachmentSource) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeAttachmentSource.DiscardUnknown(m) } -func (m *VolumeAttachmentSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.PersistentVolumeName != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PersistentVolumeName))) - i += copy(dAtA[i:], *m.PersistentVolumeName) +var xxx_messageInfo_VolumeAttachmentSource proto.InternalMessageInfo + +func (m *VolumeAttachmentSpec) Reset() { *m = VolumeAttachmentSpec{} } +func (*VolumeAttachmentSpec) ProtoMessage() {} +func (*VolumeAttachmentSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_7d2980599fd0de80, []int{12} +} +func (m *VolumeAttachmentSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeAttachmentSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } - return i, nil + return b[:n], nil +} +func (m *VolumeAttachmentSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeAttachmentSpec.Merge(m, src) +} +func (m *VolumeAttachmentSpec) XXX_Size() int { + return m.Size() +} +func (m *VolumeAttachmentSpec) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeAttachmentSpec.DiscardUnknown(m) } -func (m *VolumeAttachmentSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_VolumeAttachmentSpec proto.InternalMessageInfo + +func (m *VolumeAttachmentStatus) Reset() { *m = VolumeAttachmentStatus{} } +func (*VolumeAttachmentStatus) ProtoMessage() {} +func (*VolumeAttachmentStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_7d2980599fd0de80, []int{13} +} +func (m *VolumeAttachmentStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeAttachmentStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *VolumeAttachmentStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeAttachmentStatus.Merge(m, src) +} +func (m *VolumeAttachmentStatus) XXX_Size() int { + return m.Size() +} +func (m *VolumeAttachmentStatus) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeAttachmentStatus.DiscardUnknown(m) } -func (m *VolumeAttachmentSpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Attacher))) - i += copy(dAtA[i:], m.Attacher) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Source.Size())) - n7, err := m.Source.MarshalTo(dAtA[i:]) +var xxx_messageInfo_VolumeAttachmentStatus proto.InternalMessageInfo + +func (m *VolumeError) Reset() { *m = VolumeError{} } +func (*VolumeError) ProtoMessage() {} +func (*VolumeError) Descriptor() ([]byte, []int) { + return fileDescriptor_7d2980599fd0de80, []int{14} +} +func (m *VolumeError) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err + return nil, err } - i += n7 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeName))) - i += copy(dAtA[i:], m.NodeName) - return i, nil + return b[:n], nil +} +func (m *VolumeError) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeError.Merge(m, src) +} +func (m *VolumeError) XXX_Size() int { + return m.Size() +} +func (m *VolumeError) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeError.DiscardUnknown(m) } -func (m *VolumeAttachmentStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_VolumeError proto.InternalMessageInfo + +func (m *VolumeNodeResources) Reset() { *m = VolumeNodeResources{} } +func (*VolumeNodeResources) ProtoMessage() {} +func (*VolumeNodeResources) Descriptor() ([]byte, []int) { + return fileDescriptor_7d2980599fd0de80, []int{15} +} +func (m *VolumeNodeResources) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeNodeResources) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *VolumeNodeResources) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeNodeResources.Merge(m, src) +} +func (m *VolumeNodeResources) XXX_Size() int { + return m.Size() +} +func (m *VolumeNodeResources) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeNodeResources.DiscardUnknown(m) } -func (m *VolumeAttachmentStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0x8 - i++ - if m.Attached { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if len(m.AttachmentMetadata) > 0 { - keysForAttachmentMetadata := make([]string, 0, len(m.AttachmentMetadata)) - for k := range m.AttachmentMetadata { - keysForAttachmentMetadata = append(keysForAttachmentMetadata, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForAttachmentMetadata) - for _, k := range keysForAttachmentMetadata { - dAtA[i] = 0x12 - i++ - v := m.AttachmentMetadata[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } +var xxx_messageInfo_VolumeNodeResources proto.InternalMessageInfo + +func init() { + proto.RegisterType((*CSIDriver)(nil), "k8s.io.api.storage.v1beta1.CSIDriver") + proto.RegisterType((*CSIDriverList)(nil), "k8s.io.api.storage.v1beta1.CSIDriverList") + proto.RegisterType((*CSIDriverSpec)(nil), "k8s.io.api.storage.v1beta1.CSIDriverSpec") + proto.RegisterType((*CSINode)(nil), "k8s.io.api.storage.v1beta1.CSINode") + proto.RegisterType((*CSINodeDriver)(nil), "k8s.io.api.storage.v1beta1.CSINodeDriver") + proto.RegisterType((*CSINodeList)(nil), "k8s.io.api.storage.v1beta1.CSINodeList") + proto.RegisterType((*CSINodeSpec)(nil), "k8s.io.api.storage.v1beta1.CSINodeSpec") + proto.RegisterType((*StorageClass)(nil), "k8s.io.api.storage.v1beta1.StorageClass") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.storage.v1beta1.StorageClass.ParametersEntry") + proto.RegisterType((*StorageClassList)(nil), "k8s.io.api.storage.v1beta1.StorageClassList") + proto.RegisterType((*VolumeAttachment)(nil), "k8s.io.api.storage.v1beta1.VolumeAttachment") + proto.RegisterType((*VolumeAttachmentList)(nil), "k8s.io.api.storage.v1beta1.VolumeAttachmentList") + proto.RegisterType((*VolumeAttachmentSource)(nil), "k8s.io.api.storage.v1beta1.VolumeAttachmentSource") + proto.RegisterType((*VolumeAttachmentSpec)(nil), "k8s.io.api.storage.v1beta1.VolumeAttachmentSpec") + proto.RegisterType((*VolumeAttachmentStatus)(nil), "k8s.io.api.storage.v1beta1.VolumeAttachmentStatus") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.storage.v1beta1.VolumeAttachmentStatus.AttachmentMetadataEntry") + proto.RegisterType((*VolumeError)(nil), "k8s.io.api.storage.v1beta1.VolumeError") + proto.RegisterType((*VolumeNodeResources)(nil), "k8s.io.api.storage.v1beta1.VolumeNodeResources") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/storage/v1beta1/generated.proto", fileDescriptor_7d2980599fd0de80) +} + +var fileDescriptor_7d2980599fd0de80 = []byte{ + // 1344 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x57, 0xbd, 0x6f, 0xdb, 0x46, + 0x1b, 0x37, 0x2d, 0x7f, 0x9e, 0xec, 0x44, 0xbe, 0x18, 0xef, 0xab, 0x57, 0x83, 0x64, 0xe8, 0x45, + 0x1b, 0x27, 0x48, 0xc8, 0x24, 0x48, 0x83, 0x20, 0x40, 0x07, 0xd3, 0x31, 0x50, 0x25, 0x96, 0xe3, + 0x9e, 0x8d, 0xa0, 0x08, 0x3a, 0xf4, 0x44, 0x3e, 0x91, 0x19, 0x93, 0x3c, 0x86, 0x3c, 0xa9, 0xd5, + 0xd6, 0xa9, 0x73, 0xd1, 0xa1, 0x7f, 0x41, 0xff, 0x85, 0x16, 0x68, 0x97, 0x8e, 0xcd, 0x54, 0x04, + 0x9d, 0x32, 0x09, 0x0d, 0xbb, 0x76, 0xeb, 0x66, 0x74, 0x28, 0xee, 0x78, 0x12, 0x29, 0x89, 0x8a, + 0xed, 0x0e, 0xde, 0x78, 0xcf, 0xc7, 0xef, 0xf9, 0x7e, 0xee, 0x88, 0xb6, 0x8f, 0xef, 0x47, 0xba, + 0xc3, 0x8c, 0xe3, 0x4e, 0x0b, 0x42, 0x1f, 0x38, 0x44, 0x46, 0x17, 0x7c, 0x9b, 0x85, 0x86, 0x62, + 0xd0, 0xc0, 0x31, 0x22, 0xce, 0x42, 0xda, 0x06, 0xa3, 0x7b, 0xbb, 0x05, 0x9c, 0xde, 0x36, 0xda, + 0xe0, 0x43, 0x48, 0x39, 0xd8, 0x7a, 0x10, 0x32, 0xce, 0x70, 0x25, 0x91, 0xd5, 0x69, 0xe0, 0xe8, + 0x4a, 0x56, 0x57, 0xb2, 0x95, 0x9b, 0x6d, 0x87, 0x1f, 0x75, 0x5a, 0xba, 0xc5, 0x3c, 0xa3, 0xcd, + 0xda, 0xcc, 0x90, 0x2a, 0xad, 0xce, 0x73, 0x79, 0x92, 0x07, 0xf9, 0x95, 0x40, 0x55, 0xea, 0x19, + 0xb3, 0x16, 0x0b, 0x85, 0xcd, 0x71, 0x73, 0x95, 0xbb, 0xa9, 0x8c, 0x47, 0xad, 0x23, 0xc7, 0x87, + 0xb0, 0x67, 0x04, 0xc7, 0x6d, 0x41, 0x88, 0x0c, 0x0f, 0x38, 0xcd, 0xd3, 0x32, 0xa6, 0x69, 0x85, + 0x1d, 0x9f, 0x3b, 0x1e, 0x4c, 0x28, 0xdc, 0x3b, 0x4d, 0x21, 0xb2, 0x8e, 0xc0, 0xa3, 0xe3, 0x7a, + 0xf5, 0x9f, 0x34, 0xb4, 0xbc, 0x7d, 0xd0, 0x78, 0x18, 0x3a, 0x5d, 0x08, 0xf1, 0x67, 0x68, 0x49, + 0x78, 0x64, 0x53, 0x4e, 0xcb, 0xda, 0x86, 0xb6, 0x59, 0xbc, 0x73, 0x4b, 0x4f, 0xd3, 0x35, 0x04, + 0xd6, 0x83, 0xe3, 0xb6, 0x20, 0x44, 0xba, 0x90, 0xd6, 0xbb, 0xb7, 0xf5, 0x27, 0xad, 0x17, 0x60, + 0xf1, 0x26, 0x70, 0x6a, 0xe2, 0x57, 0xfd, 0xda, 0x4c, 0xdc, 0xaf, 0xa1, 0x94, 0x46, 0x86, 0xa8, + 0xf8, 0x31, 0x9a, 0x8b, 0x02, 0xb0, 0xca, 0xb3, 0x12, 0xfd, 0x9a, 0x3e, 0xbd, 0x18, 0xfa, 0xd0, + 0xad, 0x83, 0x00, 0x2c, 0x73, 0x45, 0xc1, 0xce, 0x89, 0x13, 0x91, 0x20, 0xf5, 0x1f, 0x35, 0xb4, + 0x3a, 0x94, 0xda, 0x75, 0x22, 0x8e, 0x3f, 0x9d, 0x08, 0x40, 0x3f, 0x5b, 0x00, 0x42, 0x5b, 0xba, + 0x5f, 0x52, 0x76, 0x96, 0x06, 0x94, 0x8c, 0xf3, 0x8f, 0xd0, 0xbc, 0xc3, 0xc1, 0x8b, 0xca, 0xb3, + 0x1b, 0x85, 0xcd, 0xe2, 0x9d, 0xf7, 0xce, 0xe4, 0xbd, 0xb9, 0xaa, 0x10, 0xe7, 0x1b, 0x42, 0x97, + 0x24, 0x10, 0xf5, 0x3f, 0xb3, 0xbe, 0x8b, 0x98, 0xf0, 0x03, 0x74, 0x89, 0x72, 0x4e, 0xad, 0x23, + 0x02, 0x2f, 0x3b, 0x4e, 0x08, 0xb6, 0x8c, 0x60, 0xc9, 0xc4, 0x71, 0xbf, 0x76, 0x69, 0x6b, 0x84, + 0x43, 0xc6, 0x24, 0x85, 0x6e, 0xc0, 0xec, 0x86, 0xff, 0x9c, 0x3d, 0xf1, 0x9b, 0xac, 0xe3, 0x73, + 0x99, 0x60, 0xa5, 0xbb, 0x3f, 0xc2, 0x21, 0x63, 0x92, 0xd8, 0x42, 0xeb, 0x5d, 0xe6, 0x76, 0x3c, + 0xd8, 0x75, 0x9e, 0x83, 0xd5, 0xb3, 0x5c, 0x68, 0x32, 0x1b, 0xa2, 0x72, 0x61, 0xa3, 0xb0, 0xb9, + 0x6c, 0x1a, 0x71, 0xbf, 0xb6, 0xfe, 0x34, 0x87, 0x7f, 0xd2, 0xaf, 0x5d, 0xc9, 0xa1, 0x93, 0x5c, + 0xb0, 0xfa, 0x0f, 0x1a, 0x5a, 0xdc, 0x3e, 0x68, 0xec, 0x31, 0x1b, 0x2e, 0xa0, 0xcb, 0x1a, 0x23, + 0x5d, 0x76, 0xf5, 0x94, 0x3a, 0x09, 0xa7, 0xa6, 0xf6, 0xd8, 0x5f, 0x49, 0x9d, 0x84, 0x8c, 0x1a, + 0x92, 0x0d, 0x34, 0xe7, 0x53, 0x0f, 0xa4, 0xeb, 0xcb, 0xa9, 0xce, 0x1e, 0xf5, 0x80, 0x48, 0x0e, + 0x7e, 0x1f, 0x2d, 0xf8, 0xcc, 0x86, 0xc6, 0x43, 0xe9, 0xc0, 0xb2, 0x79, 0x49, 0xc9, 0x2c, 0xec, + 0x49, 0x2a, 0x51, 0x5c, 0x7c, 0x17, 0xad, 0x70, 0x16, 0x30, 0x97, 0xb5, 0x7b, 0x8f, 0xa1, 0x37, + 0xc8, 0x78, 0x29, 0xee, 0xd7, 0x56, 0x0e, 0x33, 0x74, 0x32, 0x22, 0x85, 0x5b, 0xa8, 0x48, 0x5d, + 0x97, 0x59, 0x94, 0xd3, 0x96, 0x0b, 0xe5, 0x39, 0x19, 0xa3, 0xf1, 0xae, 0x18, 0x93, 0x32, 0x09, + 0xe3, 0x04, 0x22, 0xd6, 0x09, 0x2d, 0x88, 0xcc, 0xcb, 0x71, 0xbf, 0x56, 0xdc, 0x4a, 0x71, 0x48, + 0x16, 0xb4, 0xfe, 0xbd, 0x86, 0x8a, 0x2a, 0xea, 0x0b, 0x98, 0xab, 0x8f, 0x46, 0xe7, 0xea, 0xff, + 0x67, 0xa8, 0xd7, 0x94, 0xa9, 0xb2, 0x86, 0x6e, 0xcb, 0x91, 0x3a, 0x44, 0x8b, 0xb6, 0x2c, 0x5a, + 0x54, 0xd6, 0x24, 0xf4, 0xb5, 0x33, 0x40, 0xab, 0xb1, 0xbd, 0xac, 0x0c, 0x2c, 0x26, 0xe7, 0x88, + 0x0c, 0xa0, 0xea, 0xdf, 0x2c, 0xa0, 0x95, 0x83, 0x44, 0x77, 0xdb, 0xa5, 0x51, 0x74, 0x01, 0x0d, + 0xfd, 0x01, 0x2a, 0x06, 0x21, 0xeb, 0x3a, 0x91, 0xc3, 0x7c, 0x08, 0x55, 0x5b, 0x5d, 0x51, 0x2a, + 0xc5, 0xfd, 0x94, 0x45, 0xb2, 0x72, 0xd8, 0x45, 0x28, 0xa0, 0x21, 0xf5, 0x80, 0x8b, 0x14, 0x14, + 0x64, 0x0a, 0xee, 0xbf, 0x2b, 0x05, 0xd9, 0xb0, 0xf4, 0xfd, 0xa1, 0xea, 0x8e, 0xcf, 0xc3, 0x5e, + 0xea, 0x62, 0xca, 0x20, 0x19, 0x7c, 0x7c, 0x8c, 0x56, 0x43, 0xb0, 0x5c, 0xea, 0x78, 0xfb, 0xcc, + 0x75, 0xac, 0x9e, 0x6c, 0xcd, 0x65, 0x73, 0x27, 0xee, 0xd7, 0x56, 0x49, 0x96, 0x71, 0xd2, 0xaf, + 0xdd, 0x9a, 0xbc, 0x3a, 0xf5, 0x7d, 0x08, 0x23, 0x27, 0xe2, 0xe0, 0xf3, 0xa4, 0x61, 0x47, 0x74, + 0xc8, 0x28, 0xb6, 0x98, 0x1d, 0x4f, 0xac, 0xaf, 0x27, 0x01, 0x77, 0x98, 0x1f, 0x95, 0xe7, 0xd3, + 0xd9, 0x69, 0x66, 0xe8, 0x64, 0x44, 0x0a, 0xef, 0xa2, 0x75, 0xd1, 0xe6, 0x9f, 0x27, 0x06, 0x76, + 0xbe, 0x08, 0xa8, 0x2f, 0x52, 0x55, 0x5e, 0x90, 0xdb, 0xb2, 0x2c, 0x76, 0xdd, 0x56, 0x0e, 0x9f, + 0xe4, 0x6a, 0xe1, 0x4f, 0xd0, 0x5a, 0xb2, 0xec, 0x4c, 0xc7, 0xb7, 0x1d, 0xbf, 0x2d, 0x56, 0x5d, + 0x79, 0x51, 0x06, 0x7d, 0x3d, 0xee, 0xd7, 0xd6, 0x9e, 0x8e, 0x33, 0x4f, 0xf2, 0x88, 0x64, 0x12, + 0x04, 0xbf, 0x44, 0x6b, 0xd2, 0x22, 0xd8, 0x6a, 0x11, 0x38, 0x10, 0x95, 0x97, 0x64, 0xfd, 0x36, + 0xb3, 0xf5, 0x13, 0xa9, 0x13, 0x8d, 0x34, 0x58, 0x17, 0x07, 0xe0, 0x82, 0xc5, 0x59, 0x78, 0x08, + 0xa1, 0x67, 0xfe, 0x4f, 0xd5, 0x6b, 0x6d, 0x6b, 0x1c, 0x8a, 0x4c, 0xa2, 0x57, 0x3e, 0x44, 0x97, + 0xc7, 0x0a, 0x8e, 0x4b, 0xa8, 0x70, 0x0c, 0xbd, 0x64, 0xd1, 0x11, 0xf1, 0x89, 0xd7, 0xd1, 0x7c, + 0x97, 0xba, 0x1d, 0x48, 0x3a, 0x90, 0x24, 0x87, 0x07, 0xb3, 0xf7, 0xb5, 0xfa, 0xcf, 0x1a, 0x2a, + 0x65, 0xbb, 0xe7, 0x02, 0xd6, 0x46, 0x73, 0x74, 0x6d, 0x6c, 0x9e, 0xb5, 0xb1, 0xa7, 0xec, 0x8e, + 0xef, 0x66, 0x51, 0x29, 0x29, 0x4e, 0x72, 0xd9, 0x7a, 0xe0, 0xf3, 0x0b, 0x18, 0x6d, 0x32, 0x72, + 0x57, 0xdd, 0x3a, 0x7d, 0x8f, 0xa7, 0xde, 0x4d, 0xbb, 0xb4, 0xf0, 0x33, 0xb4, 0x10, 0x71, 0xca, + 0x3b, 0x62, 0xe6, 0x05, 0xea, 0x9d, 0x73, 0xa1, 0x4a, 0xcd, 0xf4, 0xd2, 0x4a, 0xce, 0x44, 0x21, + 0xd6, 0x7f, 0xd1, 0xd0, 0xfa, 0xb8, 0xca, 0x05, 0x14, 0xfb, 0xe3, 0xd1, 0x62, 0xdf, 0x38, 0x4f, + 0x44, 0x53, 0x0a, 0xfe, 0x9b, 0x86, 0xfe, 0x33, 0x11, 0xbc, 0xbc, 0x1e, 0xc5, 0x9e, 0x08, 0xc6, + 0xb6, 0xd1, 0x5e, 0x7a, 0xe7, 0xcb, 0x3d, 0xb1, 0x9f, 0xc3, 0x27, 0xb9, 0x5a, 0xf8, 0x05, 0x2a, + 0x39, 0xbe, 0xeb, 0xf8, 0x90, 0xd0, 0x0e, 0xd2, 0x72, 0xe7, 0x0e, 0xf3, 0x38, 0xb2, 0x2c, 0xf3, + 0x7a, 0xdc, 0xaf, 0x95, 0x1a, 0x63, 0x28, 0x64, 0x02, 0xb7, 0xfe, 0x6b, 0x4e, 0x79, 0xe4, 0x5d, + 0x78, 0x03, 0x2d, 0x25, 0x8f, 0x46, 0x08, 0x55, 0x18, 0xc3, 0x74, 0x6f, 0x29, 0x3a, 0x19, 0x4a, + 0xc8, 0x0e, 0x92, 0xa9, 0x50, 0x8e, 0x9e, 0xaf, 0x83, 0xa4, 0x66, 0xa6, 0x83, 0xe4, 0x99, 0x28, + 0x44, 0xe1, 0x89, 0x78, 0x00, 0xc9, 0x84, 0x16, 0x46, 0x3d, 0xd9, 0x53, 0x74, 0x32, 0x94, 0xa8, + 0xff, 0x5d, 0xc8, 0xa9, 0x92, 0x6c, 0xc5, 0x4c, 0x48, 0x83, 0xb7, 0xf2, 0x78, 0x48, 0xf6, 0x30, + 0x24, 0x1b, 0x7f, 0xab, 0x21, 0x4c, 0x87, 0x10, 0xcd, 0x41, 0xab, 0x26, 0xfd, 0xf4, 0xe8, 0xfc, + 0x13, 0xa2, 0x6f, 0x4d, 0x80, 0x25, 0xf7, 0x64, 0x45, 0x39, 0x81, 0x27, 0x05, 0x48, 0x8e, 0x07, + 0xd8, 0x41, 0xc5, 0x84, 0xba, 0x13, 0x86, 0x2c, 0x54, 0x23, 0x7b, 0xf5, 0x74, 0x87, 0xa4, 0xb8, + 0x59, 0x95, 0x0f, 0xb9, 0x54, 0xff, 0xa4, 0x5f, 0x2b, 0x66, 0xf8, 0x24, 0x8b, 0x2d, 0x4c, 0xd9, + 0x90, 0x9a, 0x9a, 0xfb, 0x17, 0xa6, 0x1e, 0xc2, 0x74, 0x53, 0x19, 0xec, 0xca, 0x0e, 0xfa, 0xef, + 0x94, 0x04, 0x9d, 0xeb, 0x5e, 0xf9, 0x4a, 0x43, 0x59, 0x1b, 0x78, 0x17, 0xcd, 0x89, 0xff, 0x59, + 0xb5, 0x61, 0xae, 0x9f, 0x6d, 0xc3, 0x1c, 0x3a, 0x1e, 0xa4, 0x8b, 0x52, 0x9c, 0x88, 0x44, 0xc1, + 0xd7, 0xd0, 0xa2, 0x07, 0x51, 0x44, 0xdb, 0xca, 0x72, 0xfa, 0xea, 0x6b, 0x26, 0x64, 0x32, 0xe0, + 0xd7, 0xef, 0xa1, 0x2b, 0x39, 0xef, 0x68, 0x5c, 0x43, 0xf3, 0x96, 0xfc, 0xe1, 0x12, 0x0e, 0xcd, + 0x9b, 0xcb, 0x62, 0xcb, 0x6c, 0xcb, 0xff, 0xac, 0x84, 0x6e, 0xde, 0x7c, 0xf5, 0xb6, 0x3a, 0xf3, + 0xfa, 0x6d, 0x75, 0xe6, 0xcd, 0xdb, 0xea, 0xcc, 0x97, 0x71, 0x55, 0x7b, 0x15, 0x57, 0xb5, 0xd7, + 0x71, 0x55, 0x7b, 0x13, 0x57, 0xb5, 0xdf, 0xe3, 0xaa, 0xf6, 0xf5, 0x1f, 0xd5, 0x99, 0x67, 0x8b, + 0x2a, 0xdf, 0xff, 0x04, 0x00, 0x00, 0xff, 0xff, 0x72, 0xff, 0xde, 0x2e, 0xe4, 0x10, 0x00, 0x00, +} + +func (m *CSIDriver) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - if m.AttachError != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AttachError.Size())) - n8, err := m.AttachError.MarshalTo(dAtA[i:]) + return dAtA[:n], nil +} + +func (m *CSIDriver) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CSIDriver) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n8 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.DetachError != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DetachError.Size())) - n9, err := m.DetachError.MarshalTo(dAtA[i:]) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n9 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *VolumeError) Marshal() (dAtA []byte, err error) { +func (m *CSIDriverList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *VolumeError) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *CSIDriverList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CSIDriverList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Time.Size())) - n10, err := m.Time.MarshalTo(dAtA[i:]) + return len(dAtA) - i, nil +} + +func (m *CSIDriverSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { - return 0, err + return nil, err } - i += n10 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil + return dAtA[:n], nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 +func (m *CSIDriverSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *StorageClass) Size() (n int) { + +func (m *CSIDriverSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Provisioner) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Parameters) > 0 { - for k, v := range m.Parameters { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + if len(m.VolumeLifecycleModes) > 0 { + for iNdEx := len(m.VolumeLifecycleModes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.VolumeLifecycleModes[iNdEx]) + copy(dAtA[i:], m.VolumeLifecycleModes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeLifecycleModes[iNdEx]))) + i-- + dAtA[i] = 0x1a } } - if m.ReclaimPolicy != nil { - l = len(*m.ReclaimPolicy) - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.MountOptions) > 0 { - for _, s := range m.MountOptions { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + if m.PodInfoOnMount != nil { + i-- + if *m.PodInfoOnMount { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i-- + dAtA[i] = 0x10 } - if m.AllowVolumeExpansion != nil { - n += 2 - } - if m.VolumeBindingMode != nil { - l = len(*m.VolumeBindingMode) - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.AllowedTopologies) > 0 { - for _, e := range m.AllowedTopologies { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.AttachRequired != nil { + i-- + if *m.AttachRequired { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i-- + dAtA[i] = 0x8 } - return n + return len(dAtA) - i, nil } -func (m *StorageClassList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } +func (m *CSINode) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *VolumeAttachment) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *CSINode) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *VolumeAttachmentList) Size() (n int) { +func (m *CSINode) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return n + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *VolumeAttachmentSource) Size() (n int) { - var l int - _ = l - if m.PersistentVolumeName != nil { - l = len(*m.PersistentVolumeName) - n += 1 + l + sovGenerated(uint64(l)) +func (m *CSINodeDriver) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *VolumeAttachmentSpec) Size() (n int) { - var l int - _ = l - l = len(m.Attacher) - n += 1 + l + sovGenerated(uint64(l)) - l = m.Source.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.NodeName) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *CSINodeDriver) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *VolumeAttachmentStatus) Size() (n int) { +func (m *CSINodeDriver) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - n += 2 - if len(m.AttachmentMetadata) > 0 { - for k, v := range m.AttachmentMetadata { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + if m.Allocatable != nil { + { + size, err := m.Allocatable.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x22 } - if m.AttachError != nil { - l = m.AttachError.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.TopologyKeys) > 0 { + for iNdEx := len(m.TopologyKeys) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.TopologyKeys[iNdEx]) + copy(dAtA[i:], m.TopologyKeys[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TopologyKeys[iNdEx]))) + i-- + dAtA[i] = 0x1a + } } - if m.DetachError != nil { - l = m.DetachError.Size() - n += 1 + l + sovGenerated(uint64(l)) + i -= len(m.NodeID) + copy(dAtA[i:], m.NodeID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeID))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CSINodeList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *VolumeError) Size() (n int) { - var l int - _ = l - l = m.Time.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *CSINodeList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break +func (m *CSINodeList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } } - return n -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (this *StorageClass) String() string { - if this == nil { - return "nil" + +func (m *CSINodeSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - keysForParameters := make([]string, 0, len(this.Parameters)) - for k := range this.Parameters { - keysForParameters = append(keysForParameters, k) + return dAtA[:n], nil +} + +func (m *CSINodeSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CSINodeSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Drivers) > 0 { + for iNdEx := len(m.Drivers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Drivers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } } - github_com_gogo_protobuf_sortkeys.Strings(keysForParameters) - mapStringForParameters := "map[string]string{" - for _, k := range keysForParameters { - mapStringForParameters += fmt.Sprintf("%v: %v,", k, this.Parameters[k]) + return len(dAtA) - i, nil +} + +func (m *StorageClass) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - mapStringForParameters += "}" - s := strings.Join([]string{`&StorageClass{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Provisioner:` + fmt.Sprintf("%v", this.Provisioner) + `,`, - `Parameters:` + mapStringForParameters + `,`, - `ReclaimPolicy:` + valueToStringGenerated(this.ReclaimPolicy) + `,`, - `MountOptions:` + fmt.Sprintf("%v", this.MountOptions) + `,`, - `AllowVolumeExpansion:` + valueToStringGenerated(this.AllowVolumeExpansion) + `,`, - `VolumeBindingMode:` + valueToStringGenerated(this.VolumeBindingMode) + `,`, - `AllowedTopologies:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.AllowedTopologies), "TopologySelectorTerm", "k8s_io_api_core_v1.TopologySelectorTerm", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + return dAtA[:n], nil } -func (this *StorageClassList) String() string { - if this == nil { - return "nil" + +func (m *StorageClass) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StorageClass) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.AllowedTopologies) > 0 { + for iNdEx := len(m.AllowedTopologies) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AllowedTopologies[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } } - s := strings.Join([]string{`&StorageClassList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "StorageClass", "StorageClass", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + if m.VolumeBindingMode != nil { + i -= len(*m.VolumeBindingMode) + copy(dAtA[i:], *m.VolumeBindingMode) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VolumeBindingMode))) + i-- + dAtA[i] = 0x3a + } + if m.AllowVolumeExpansion != nil { + i-- + if *m.AllowVolumeExpansion { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + if len(m.MountOptions) > 0 { + for iNdEx := len(m.MountOptions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.MountOptions[iNdEx]) + copy(dAtA[i:], m.MountOptions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MountOptions[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + if m.ReclaimPolicy != nil { + i -= len(*m.ReclaimPolicy) + copy(dAtA[i:], *m.ReclaimPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ReclaimPolicy))) + i-- + dAtA[i] = 0x22 + } + if len(m.Parameters) > 0 { + keysForParameters := make([]string, 0, len(m.Parameters)) + for k := range m.Parameters { + keysForParameters = append(keysForParameters, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForParameters) + for iNdEx := len(keysForParameters) - 1; iNdEx >= 0; iNdEx-- { + v := m.Parameters[string(keysForParameters[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForParameters[iNdEx]) + copy(dAtA[i:], keysForParameters[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForParameters[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.Provisioner) + copy(dAtA[i:], m.Provisioner) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Provisioner))) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (this *VolumeAttachment) String() string { - if this == nil { - return "nil" + +func (m *StorageClassList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - s := strings.Join([]string{`&VolumeAttachment{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "VolumeAttachmentSpec", "VolumeAttachmentSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "VolumeAttachmentStatus", "VolumeAttachmentStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + return dAtA[:n], nil } -func (this *VolumeAttachmentList) String() string { - if this == nil { - return "nil" + +func (m *StorageClassList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StorageClassList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } } - s := strings.Join([]string{`&VolumeAttachmentList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "VolumeAttachment", "VolumeAttachment", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (this *VolumeAttachmentSource) String() string { - if this == nil { - return "nil" + +func (m *VolumeAttachment) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - s := strings.Join([]string{`&VolumeAttachmentSource{`, - `PersistentVolumeName:` + valueToStringGenerated(this.PersistentVolumeName) + `,`, - `}`, - }, "") - return s + return dAtA[:n], nil } -func (this *VolumeAttachmentSpec) String() string { - if this == nil { - return "nil" + +func (m *VolumeAttachment) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeAttachment) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - s := strings.Join([]string{`&VolumeAttachmentSpec{`, - `Attacher:` + fmt.Sprintf("%v", this.Attacher) + `,`, - `Source:` + strings.Replace(strings.Replace(this.Source.String(), "VolumeAttachmentSource", "VolumeAttachmentSource", 1), `&`, ``, 1) + `,`, - `NodeName:` + fmt.Sprintf("%v", this.NodeName) + `,`, - `}`, - }, "") - return s + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (this *VolumeAttachmentStatus) String() string { - if this == nil { - return "nil" + +func (m *VolumeAttachmentList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - keysForAttachmentMetadata := make([]string, 0, len(this.AttachmentMetadata)) - for k := range this.AttachmentMetadata { - keysForAttachmentMetadata = append(keysForAttachmentMetadata, k) + return dAtA[:n], nil +} + +func (m *VolumeAttachmentList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeAttachmentList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } } - github_com_gogo_protobuf_sortkeys.Strings(keysForAttachmentMetadata) - mapStringForAttachmentMetadata := "map[string]string{" - for _, k := range keysForAttachmentMetadata { - mapStringForAttachmentMetadata += fmt.Sprintf("%v: %v,", k, this.AttachmentMetadata[k]) + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - mapStringForAttachmentMetadata += "}" - s := strings.Join([]string{`&VolumeAttachmentStatus{`, - `Attached:` + fmt.Sprintf("%v", this.Attached) + `,`, - `AttachmentMetadata:` + mapStringForAttachmentMetadata + `,`, - `AttachError:` + strings.Replace(fmt.Sprintf("%v", this.AttachError), "VolumeError", "VolumeError", 1) + `,`, - `DetachError:` + strings.Replace(fmt.Sprintf("%v", this.DetachError), "VolumeError", "VolumeError", 1) + `,`, - `}`, - }, "") - return s + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (this *VolumeError) String() string { - if this == nil { - return "nil" + +func (m *VolumeAttachmentSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeAttachmentSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeAttachmentSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.InlineVolumeSpec != nil { + { + size, err := m.InlineVolumeSpec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.PersistentVolumeName != nil { + i -= len(*m.PersistentVolumeName) + copy(dAtA[i:], *m.PersistentVolumeName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PersistentVolumeName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *VolumeAttachmentSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeAttachmentSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeAttachmentSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.NodeName) + copy(dAtA[i:], m.NodeName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeName))) + i-- + dAtA[i] = 0x1a + { + size, err := m.Source.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Attacher) + copy(dAtA[i:], m.Attacher) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Attacher))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *VolumeAttachmentStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeAttachmentStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeAttachmentStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.DetachError != nil { + { + size, err := m.DetachError.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.AttachError != nil { + { + size, err := m.AttachError.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.AttachmentMetadata) > 0 { + keysForAttachmentMetadata := make([]string, 0, len(m.AttachmentMetadata)) + for k := range m.AttachmentMetadata { + keysForAttachmentMetadata = append(keysForAttachmentMetadata, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAttachmentMetadata) + for iNdEx := len(keysForAttachmentMetadata) - 1; iNdEx >= 0; iNdEx-- { + v := m.AttachmentMetadata[string(keysForAttachmentMetadata[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForAttachmentMetadata[iNdEx]) + copy(dAtA[i:], keysForAttachmentMetadata[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAttachmentMetadata[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + i-- + if m.Attached { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *VolumeError) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeError) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeError) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x12 + { + size, err := m.Time.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *VolumeNodeResources) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeNodeResources) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeNodeResources) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Count != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Count)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *CSIDriver) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *CSIDriverList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *CSIDriverSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.AttachRequired != nil { + n += 2 + } + if m.PodInfoOnMount != nil { + n += 2 + } + if len(m.VolumeLifecycleModes) > 0 { + for _, s := range m.VolumeLifecycleModes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *CSINode) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *CSINodeDriver) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.NodeID) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.TopologyKeys) > 0 { + for _, s := range m.TopologyKeys { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Allocatable != nil { + l = m.Allocatable.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *CSINodeList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *CSINodeSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Drivers) > 0 { + for _, e := range m.Drivers { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *StorageClass) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Provisioner) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Parameters) > 0 { + for k, v := range m.Parameters { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.ReclaimPolicy != nil { + l = len(*m.ReclaimPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.MountOptions) > 0 { + for _, s := range m.MountOptions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.AllowVolumeExpansion != nil { + n += 2 + } + if m.VolumeBindingMode != nil { + l = len(*m.VolumeBindingMode) + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.AllowedTopologies) > 0 { + for _, e := range m.AllowedTopologies { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *StorageClassList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *VolumeAttachment) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *VolumeAttachmentList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *VolumeAttachmentSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PersistentVolumeName != nil { + l = len(*m.PersistentVolumeName) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.InlineVolumeSpec != nil { + l = m.InlineVolumeSpec.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *VolumeAttachmentSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Attacher) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Source.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.NodeName) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *VolumeAttachmentStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + if len(m.AttachmentMetadata) > 0 { + for k, v := range m.AttachmentMetadata { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.AttachError != nil { + l = m.AttachError.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.DetachError != nil { + l = m.DetachError.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *VolumeError) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Time.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *VolumeNodeResources) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Count != nil { + n += 1 + sovGenerated(uint64(*m.Count)) + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *CSIDriver) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CSIDriver{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "CSIDriverSpec", "CSIDriverSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *CSIDriverList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]CSIDriver{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "CSIDriver", "CSIDriver", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&CSIDriverList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *CSIDriverSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CSIDriverSpec{`, + `AttachRequired:` + valueToStringGenerated(this.AttachRequired) + `,`, + `PodInfoOnMount:` + valueToStringGenerated(this.PodInfoOnMount) + `,`, + `VolumeLifecycleModes:` + fmt.Sprintf("%v", this.VolumeLifecycleModes) + `,`, + `}`, + }, "") + return s +} +func (this *CSINode) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CSINode{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "CSINodeSpec", "CSINodeSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *CSINodeDriver) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CSINodeDriver{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `NodeID:` + fmt.Sprintf("%v", this.NodeID) + `,`, + `TopologyKeys:` + fmt.Sprintf("%v", this.TopologyKeys) + `,`, + `Allocatable:` + strings.Replace(this.Allocatable.String(), "VolumeNodeResources", "VolumeNodeResources", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CSINodeList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]CSINode{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "CSINode", "CSINode", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&CSINodeList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *CSINodeSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForDrivers := "[]CSINodeDriver{" + for _, f := range this.Drivers { + repeatedStringForDrivers += strings.Replace(strings.Replace(f.String(), "CSINodeDriver", "CSINodeDriver", 1), `&`, ``, 1) + "," + } + repeatedStringForDrivers += "}" + s := strings.Join([]string{`&CSINodeSpec{`, + `Drivers:` + repeatedStringForDrivers + `,`, + `}`, + }, "") + return s +} +func (this *StorageClass) String() string { + if this == nil { + return "nil" + } + repeatedStringForAllowedTopologies := "[]TopologySelectorTerm{" + for _, f := range this.AllowedTopologies { + repeatedStringForAllowedTopologies += fmt.Sprintf("%v", f) + "," + } + repeatedStringForAllowedTopologies += "}" + keysForParameters := make([]string, 0, len(this.Parameters)) + for k := range this.Parameters { + keysForParameters = append(keysForParameters, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForParameters) + mapStringForParameters := "map[string]string{" + for _, k := range keysForParameters { + mapStringForParameters += fmt.Sprintf("%v: %v,", k, this.Parameters[k]) + } + mapStringForParameters += "}" + s := strings.Join([]string{`&StorageClass{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Provisioner:` + fmt.Sprintf("%v", this.Provisioner) + `,`, + `Parameters:` + mapStringForParameters + `,`, + `ReclaimPolicy:` + valueToStringGenerated(this.ReclaimPolicy) + `,`, + `MountOptions:` + fmt.Sprintf("%v", this.MountOptions) + `,`, + `AllowVolumeExpansion:` + valueToStringGenerated(this.AllowVolumeExpansion) + `,`, + `VolumeBindingMode:` + valueToStringGenerated(this.VolumeBindingMode) + `,`, + `AllowedTopologies:` + repeatedStringForAllowedTopologies + `,`, + `}`, + }, "") + return s +} +func (this *StorageClassList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]StorageClass{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "StorageClass", "StorageClass", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&StorageClassList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *VolumeAttachment) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeAttachment{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "VolumeAttachmentSpec", "VolumeAttachmentSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "VolumeAttachmentStatus", "VolumeAttachmentStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeAttachmentList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]VolumeAttachment{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "VolumeAttachment", "VolumeAttachment", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&VolumeAttachmentList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *VolumeAttachmentSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeAttachmentSource{`, + `PersistentVolumeName:` + valueToStringGenerated(this.PersistentVolumeName) + `,`, + `InlineVolumeSpec:` + strings.Replace(fmt.Sprintf("%v", this.InlineVolumeSpec), "PersistentVolumeSpec", "v11.PersistentVolumeSpec", 1) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeAttachmentSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeAttachmentSpec{`, + `Attacher:` + fmt.Sprintf("%v", this.Attacher) + `,`, + `Source:` + strings.Replace(strings.Replace(this.Source.String(), "VolumeAttachmentSource", "VolumeAttachmentSource", 1), `&`, ``, 1) + `,`, + `NodeName:` + fmt.Sprintf("%v", this.NodeName) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeAttachmentStatus) String() string { + if this == nil { + return "nil" + } + keysForAttachmentMetadata := make([]string, 0, len(this.AttachmentMetadata)) + for k := range this.AttachmentMetadata { + keysForAttachmentMetadata = append(keysForAttachmentMetadata, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAttachmentMetadata) + mapStringForAttachmentMetadata := "map[string]string{" + for _, k := range keysForAttachmentMetadata { + mapStringForAttachmentMetadata += fmt.Sprintf("%v: %v,", k, this.AttachmentMetadata[k]) + } + mapStringForAttachmentMetadata += "}" + s := strings.Join([]string{`&VolumeAttachmentStatus{`, + `Attached:` + fmt.Sprintf("%v", this.Attached) + `,`, + `AttachmentMetadata:` + mapStringForAttachmentMetadata + `,`, + `AttachError:` + strings.Replace(this.AttachError.String(), "VolumeError", "VolumeError", 1) + `,`, + `DetachError:` + strings.Replace(this.DetachError.String(), "VolumeError", "VolumeError", 1) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeError) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeError{`, + `Time:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Time), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeNodeResources) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeNodeResources{`, + `Count:` + valueToStringGenerated(this.Count) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *CSIDriver) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CSIDriver: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CSIDriver: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CSIDriverList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CSIDriverList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CSIDriverList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, CSIDriver{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CSIDriverSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CSIDriverSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CSIDriverSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AttachRequired", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.AttachRequired = &b + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PodInfoOnMount", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.PodInfoOnMount = &b + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeLifecycleModes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeLifecycleModes = append(m.VolumeLifecycleModes, VolumeLifecycleMode(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CSINode) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CSINode: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CSINode: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CSINodeDriver) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CSINodeDriver: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CSINodeDriver: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NodeID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TopologyKeys", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TopologyKeys = append(m.TopologyKeys, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Allocatable", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Allocatable == nil { + m.Allocatable = &VolumeNodeResources{} + } + if err := m.Allocatable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } } - s := strings.Join([]string{`&VolumeError{`, - `Time:` + strings.Replace(strings.Replace(this.Time.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" + + if iNdEx > l { + return io.ErrUnexpectedEOF } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) + return nil } -func (m *StorageClass) Unmarshal(dAtA []byte) error { +func (m *CSINodeList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -793,7 +2658,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -801,15 +2666,15 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: StorageClass: wiretype end group for non-group") + return fmt.Errorf("proto: CSINodeList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: StorageClass: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: CSINodeList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -821,7 +2686,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -830,18 +2695,21 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Provisioner", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -851,24 +2719,169 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.Provisioner = string(dAtA[iNdEx:postIndex]) + m.Items = append(m.Items, CSINode{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 3: + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CSINodeSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CSINodeSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CSINodeSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Drivers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Drivers = append(m.Drivers, CSINodeDriver{}) + if err := m.Drivers[len(m.Drivers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StorageClass) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StorageClass: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StorageClass: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -880,7 +2893,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -889,10 +2902,53 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Provisioner", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 + m.Provisioner = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) + } + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -902,41 +2958,29 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } + if msglen < 0 { + return ErrInvalidLengthGenerated } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { + if postIndex > l { return io.ErrUnexpectedEOF } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Parameters == nil { m.Parameters = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -946,41 +2990,86 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Parameters[mapkey] = mapvalue - } else { - var mapvalue string - m.Parameters[mapkey] = mapvalue } + m.Parameters[mapkey] = mapvalue iNdEx = postIndex case 4: if wireType != 2 { @@ -996,7 +3085,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1006,6 +3095,9 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1026,7 +3118,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1036,6 +3128,9 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1055,7 +3150,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1076,7 +3171,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1086,6 +3181,9 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1106,7 +3204,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1115,10 +3213,13 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.AllowedTopologies = append(m.AllowedTopologies, k8s_io_api_core_v1.TopologySelectorTerm{}) + m.AllowedTopologies = append(m.AllowedTopologies, v11.TopologySelectorTerm{}) if err := m.AllowedTopologies[len(m.AllowedTopologies)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -1132,6 +3233,9 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1159,7 +3263,7 @@ func (m *StorageClassList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1187,7 +3291,7 @@ func (m *StorageClassList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1196,6 +3300,9 @@ func (m *StorageClassList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1217,7 +3324,7 @@ func (m *StorageClassList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1226,6 +3333,9 @@ func (m *StorageClassList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1243,6 +3353,9 @@ func (m *StorageClassList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1270,7 +3383,7 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1298,7 +3411,7 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1307,6 +3420,9 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1328,7 +3444,7 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1337,6 +3453,9 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1358,7 +3477,7 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1367,6 +3486,9 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1383,6 +3505,9 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1410,7 +3535,7 @@ func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1438,7 +3563,7 @@ func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1447,6 +3572,9 @@ func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1468,7 +3596,7 @@ func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1477,6 +3605,9 @@ func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1494,6 +3625,9 @@ func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1521,7 +3655,7 @@ func (m *VolumeAttachmentSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1549,7 +3683,7 @@ func (m *VolumeAttachmentSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1559,12 +3693,51 @@ func (m *VolumeAttachmentSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } s := string(dAtA[iNdEx:postIndex]) m.PersistentVolumeName = &s iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InlineVolumeSpec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.InlineVolumeSpec == nil { + m.InlineVolumeSpec = &v11.PersistentVolumeSpec{} + } + if err := m.InlineVolumeSpec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -1574,6 +3747,9 @@ func (m *VolumeAttachmentSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1601,7 +3777,7 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1629,7 +3805,7 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1639,6 +3815,9 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1658,7 +3837,7 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1667,6 +3846,9 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1688,7 +3870,7 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1698,6 +3880,9 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1712,6 +3897,9 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1739,7 +3927,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1767,7 +3955,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1787,7 +3975,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1796,54 +3984,20 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { + if postIndex > l { return io.ErrUnexpectedEOF } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.AttachmentMetadata == nil { m.AttachmentMetadata = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1853,41 +4007,86 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.AttachmentMetadata[mapkey] = mapvalue - } else { - var mapvalue string - m.AttachmentMetadata[mapkey] = mapvalue } + m.AttachmentMetadata[mapkey] = mapvalue iNdEx = postIndex case 3: if wireType != 2 { @@ -1903,7 +4102,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1912,6 +4111,9 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1936,7 +4138,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1945,6 +4147,9 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1964,6 +4169,9 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1991,7 +4199,7 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2019,7 +4227,7 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2028,6 +4236,9 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2049,7 +4260,7 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2059,6 +4270,9 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2073,6 +4287,82 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeNodeResources) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeNodeResources: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeNodeResources: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Count = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2139,10 +4429,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -2171,6 +4464,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -2189,73 +4485,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/storage/v1beta1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 988 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0x4d, 0x6f, 0x1b, 0x45, - 0x18, 0xce, 0xc6, 0xf9, 0x70, 0xc6, 0x09, 0x4d, 0x86, 0x08, 0x8c, 0x0f, 0x76, 0xe4, 0x0b, 0xa6, - 0x6a, 0x77, 0x9b, 0xa8, 0xa0, 0x08, 0x89, 0x83, 0xb7, 0xe4, 0x00, 0x8a, 0xdb, 0x30, 0x89, 0x2a, - 0x54, 0x71, 0x60, 0xb2, 0xfb, 0x76, 0xb3, 0x78, 0x77, 0x67, 0x99, 0x19, 0x1b, 0x72, 0xe3, 0xc4, - 0x19, 0x71, 0xe0, 0x17, 0xf0, 0x3f, 0x38, 0x92, 0x13, 0xea, 0xb1, 0x27, 0x8b, 0x2c, 0xff, 0x22, - 0xe2, 0x80, 0x66, 0x76, 0x62, 0xaf, 0xbd, 0x0e, 0x6d, 0x7a, 0xe8, 0xcd, 0xef, 0xc7, 0xf3, 0xbc, - 0xdf, 0xb3, 0x46, 0x8f, 0xfa, 0xfb, 0xc2, 0x0e, 0x99, 0xd3, 0x1f, 0x9c, 0x02, 0x4f, 0x40, 0x82, - 0x70, 0x86, 0x90, 0xf8, 0x8c, 0x3b, 0xc6, 0x40, 0xd3, 0xd0, 0x11, 0x92, 0x71, 0x1a, 0x80, 0x33, - 0xdc, 0x3d, 0x05, 0x49, 0x77, 0x9d, 0x00, 0x12, 0xe0, 0x54, 0x82, 0x6f, 0xa7, 0x9c, 0x49, 0x86, - 0x1b, 0xb9, 0xaf, 0x4d, 0xd3, 0xd0, 0x36, 0xbe, 0xb6, 0xf1, 0x6d, 0xdc, 0x0f, 0x42, 0x79, 0x36, - 0x38, 0xb5, 0x3d, 0x16, 0x3b, 0x01, 0x0b, 0x98, 0xa3, 0x21, 0xa7, 0x83, 0xe7, 0x5a, 0xd2, 0x82, - 0xfe, 0x95, 0x53, 0x35, 0xda, 0x85, 0xb0, 0x1e, 0xe3, 0x2a, 0xe6, 0x6c, 0xb8, 0xc6, 0xc3, 0x89, - 0x4f, 0x4c, 0xbd, 0xb3, 0x30, 0x01, 0x7e, 0xee, 0xa4, 0xfd, 0x40, 0x29, 0x84, 0x13, 0x83, 0xa4, - 0xf3, 0x50, 0xce, 0x4d, 0x28, 0x3e, 0x48, 0x64, 0x18, 0x43, 0x09, 0xf0, 0xc9, 0xab, 0x00, 0xc2, - 0x3b, 0x83, 0x98, 0xce, 0xe2, 0xda, 0xbf, 0xae, 0xa0, 0xf5, 0xe3, 0xbc, 0x0b, 0x8f, 0x22, 0x2a, - 0x04, 0xfe, 0x16, 0x55, 0x55, 0x52, 0x3e, 0x95, 0xb4, 0x6e, 0xed, 0x58, 0x9d, 0xda, 0xde, 0x03, - 0x7b, 0xd2, 0xb1, 0x31, 0xb7, 0x9d, 0xf6, 0x03, 0xa5, 0x10, 0xb6, 0xf2, 0xb6, 0x87, 0xbb, 0xf6, - 0x93, 0xd3, 0xef, 0xc0, 0x93, 0x3d, 0x90, 0xd4, 0xc5, 0x17, 0xa3, 0xd6, 0x42, 0x36, 0x6a, 0xa1, - 0x89, 0x8e, 0x8c, 0x59, 0xf1, 0xc7, 0xa8, 0x96, 0x72, 0x36, 0x0c, 0x45, 0xc8, 0x12, 0xe0, 0xf5, - 0xc5, 0x1d, 0xab, 0xb3, 0xe6, 0xbe, 0x6b, 0x20, 0xb5, 0xa3, 0x89, 0x89, 0x14, 0xfd, 0x70, 0x84, - 0x50, 0x4a, 0x39, 0x8d, 0x41, 0x02, 0x17, 0xf5, 0xca, 0x4e, 0xa5, 0x53, 0xdb, 0xdb, 0xb7, 0x6f, - 0x1e, 0xa6, 0x5d, 0x2c, 0xcb, 0x3e, 0x1a, 0x43, 0x0f, 0x12, 0xc9, 0xcf, 0x27, 0x29, 0x4e, 0x0c, - 0xa4, 0xc0, 0x8f, 0xfb, 0x68, 0x83, 0x83, 0x17, 0xd1, 0x30, 0x3e, 0x62, 0x51, 0xe8, 0x9d, 0xd7, - 0x97, 0x74, 0x9a, 0x07, 0xd9, 0xa8, 0xb5, 0x41, 0x8a, 0x86, 0xab, 0x51, 0xeb, 0x41, 0x79, 0x0d, - 0xec, 0x23, 0xe0, 0x22, 0x14, 0x12, 0x12, 0xf9, 0x94, 0x45, 0x83, 0x18, 0xa6, 0x30, 0x64, 0x9a, - 0x1b, 0x3f, 0x44, 0xeb, 0x31, 0x1b, 0x24, 0xf2, 0x49, 0x2a, 0x43, 0x96, 0x88, 0xfa, 0xf2, 0x4e, - 0xa5, 0xb3, 0xe6, 0x6e, 0x66, 0xa3, 0xd6, 0x7a, 0xaf, 0xa0, 0x27, 0x53, 0x5e, 0xf8, 0x10, 0x6d, - 0xd3, 0x28, 0x62, 0x3f, 0xe4, 0x01, 0x0e, 0x7e, 0x4c, 0x69, 0xa2, 0x5a, 0x55, 0x5f, 0xd9, 0xb1, - 0x3a, 0x55, 0xb7, 0x9e, 0x8d, 0x5a, 0xdb, 0xdd, 0x39, 0x76, 0x32, 0x17, 0x85, 0xbf, 0x46, 0x5b, - 0x43, 0xad, 0x72, 0xc3, 0xc4, 0x0f, 0x93, 0xa0, 0xc7, 0x7c, 0xa8, 0xaf, 0xea, 0xa2, 0xef, 0x66, - 0xa3, 0xd6, 0xd6, 0xd3, 0x59, 0xe3, 0xd5, 0x3c, 0x25, 0x29, 0x93, 0xe0, 0xef, 0xd1, 0x96, 0x8e, - 0x08, 0xfe, 0x09, 0x4b, 0x59, 0xc4, 0x82, 0x10, 0x44, 0xbd, 0xaa, 0xe7, 0xd7, 0x29, 0xce, 0x4f, - 0xb5, 0x4e, 0x2d, 0x92, 0xf1, 0x3a, 0x3f, 0x86, 0x08, 0x3c, 0xc9, 0xf8, 0x09, 0xf0, 0xd8, 0xfd, - 0xc0, 0xcc, 0x6b, 0xab, 0x3b, 0x4b, 0x45, 0xca, 0xec, 0x8d, 0xcf, 0xd0, 0x9d, 0x99, 0x81, 0xe3, - 0x4d, 0x54, 0xe9, 0xc3, 0xb9, 0x5e, 0xe9, 0x35, 0xa2, 0x7e, 0xe2, 0x6d, 0xb4, 0x3c, 0xa4, 0xd1, - 0x00, 0xf2, 0x0d, 0x24, 0xb9, 0xf0, 0xe9, 0xe2, 0xbe, 0xd5, 0xfe, 0xc3, 0x42, 0x9b, 0xc5, 0xed, - 0x39, 0x0c, 0x85, 0xc4, 0xdf, 0x94, 0x0e, 0xc3, 0x7e, 0xbd, 0xc3, 0x50, 0x68, 0x7d, 0x16, 0x9b, - 0xa6, 0x86, 0xea, 0xb5, 0xa6, 0x70, 0x14, 0x3d, 0xb4, 0x1c, 0x4a, 0x88, 0x45, 0x7d, 0xb1, 0xdc, - 0x98, 0xff, 0x5b, 0x6c, 0x77, 0xc3, 0x90, 0x2e, 0x7f, 0xa1, 0xe0, 0x24, 0x67, 0x69, 0xff, 0xbe, - 0x88, 0x36, 0xf3, 0xe1, 0x74, 0xa5, 0xa4, 0xde, 0x59, 0x0c, 0x89, 0x7c, 0x0b, 0xa7, 0x4d, 0xd0, - 0x92, 0x48, 0xc1, 0xd3, 0x1d, 0x9d, 0x66, 0x2f, 0x15, 0x31, 0x9b, 0xdd, 0x71, 0x0a, 0x9e, 0xbb, - 0x6e, 0xd8, 0x97, 0x94, 0x44, 0x34, 0x17, 0x7e, 0x86, 0x56, 0x84, 0xa4, 0x72, 0xa0, 0x6e, 0x5e, - 0xb1, 0xee, 0xdd, 0x8a, 0x55, 0x23, 0xdd, 0x77, 0x0c, 0xef, 0x4a, 0x2e, 0x13, 0xc3, 0xd8, 0xfe, - 0xd3, 0x42, 0xdb, 0xb3, 0x90, 0xb7, 0x30, 0xec, 0xaf, 0xa6, 0x87, 0x7d, 0xef, 0x36, 0x15, 0xdd, - 0x30, 0xf0, 0xe7, 0xe8, 0xbd, 0x52, 0xed, 0x6c, 0xc0, 0x3d, 0x50, 0xcf, 0x44, 0x3a, 0xf3, 0x18, - 0x3d, 0xa6, 0x31, 0xe4, 0x97, 0x90, 0x3f, 0x13, 0x47, 0x73, 0xec, 0x64, 0x2e, 0xaa, 0xfd, 0xd7, - 0x9c, 0x8e, 0xa9, 0x61, 0xe1, 0x7b, 0xa8, 0x4a, 0xb5, 0x06, 0xb8, 0xa1, 0x1e, 0x77, 0xa0, 0x6b, - 0xf4, 0x64, 0xec, 0xa1, 0x87, 0xaa, 0xd3, 0x33, 0xab, 0x72, 0xbb, 0xa1, 0x6a, 0x64, 0x61, 0xa8, - 0x5a, 0x26, 0x86, 0x51, 0x65, 0x92, 0x30, 0x3f, 0x2f, 0xb2, 0x32, 0x9d, 0xc9, 0x63, 0xa3, 0x27, - 0x63, 0x8f, 0xf6, 0xbf, 0x95, 0x39, 0x9d, 0xd3, 0xdb, 0x51, 0x28, 0xc9, 0xd7, 0x25, 0x55, 0x4b, - 0x25, 0xf9, 0xe3, 0x92, 0x7c, 0xfc, 0x9b, 0x85, 0x30, 0x1d, 0x53, 0xf4, 0xae, 0xb7, 0x27, 0x1f, - 0xf1, 0x97, 0xb7, 0x5f, 0x5a, 0xbb, 0x5b, 0x22, 0xcb, 0x3f, 0x5d, 0x0d, 0x93, 0x04, 0x2e, 0x3b, - 0x90, 0x39, 0x19, 0xe0, 0x10, 0xd5, 0x72, 0xed, 0x01, 0xe7, 0x8c, 0x9b, 0x2b, 0xfa, 0xf0, 0xd5, - 0x09, 0x69, 0x77, 0xb7, 0xa9, 0x3e, 0xca, 0xdd, 0x09, 0xfe, 0x6a, 0xd4, 0xaa, 0x15, 0xec, 0xa4, - 0xc8, 0xad, 0x42, 0xf9, 0x30, 0x09, 0xb5, 0xf4, 0x06, 0xa1, 0x3e, 0x87, 0x9b, 0x43, 0x15, 0xb8, - 0x1b, 0x07, 0xe8, 0xfd, 0x1b, 0x1a, 0x74, 0xab, 0xa7, 0xfe, 0x67, 0x0b, 0x15, 0x63, 0xe0, 0x43, - 0xb4, 0xa4, 0xfe, 0x2e, 0x99, 0xa3, 0xbf, 0xfb, 0x7a, 0x47, 0x7f, 0x12, 0xc6, 0x30, 0x79, 0xbb, - 0x94, 0x44, 0x34, 0x0b, 0xfe, 0x08, 0xad, 0xc6, 0x20, 0x04, 0x0d, 0x4c, 0x64, 0xf7, 0x8e, 0x71, - 0x5a, 0xed, 0xe5, 0x6a, 0x72, 0x6d, 0x77, 0xef, 0x5f, 0x5c, 0x36, 0x17, 0x5e, 0x5c, 0x36, 0x17, - 0x5e, 0x5e, 0x36, 0x17, 0x7e, 0xca, 0x9a, 0xd6, 0x45, 0xd6, 0xb4, 0x5e, 0x64, 0x4d, 0xeb, 0x65, - 0xd6, 0xb4, 0xfe, 0xce, 0x9a, 0xd6, 0x2f, 0xff, 0x34, 0x17, 0x9e, 0xad, 0x9a, 0xbe, 0xfd, 0x17, - 0x00, 0x00, 0xff, 0xff, 0xb4, 0x63, 0x7e, 0xa7, 0x0b, 0x0b, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/storage/v1beta1/generated.proto b/vendor/k8s.io/api/storage/v1beta1/generated.proto index ecf53bef..373a154b 100644 --- a/vendor/k8s.io/api/storage/v1beta1/generated.proto +++ b/vendor/k8s.io/api/storage/v1beta1/generated.proto @@ -29,14 +29,181 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". option go_package = "v1beta1"; +// CSIDriver captures information about a Container Storage Interface (CSI) +// volume driver deployed on the cluster. +// CSI drivers do not need to create the CSIDriver object directly. Instead they may use the +// cluster-driver-registrar sidecar container. When deployed with a CSI driver it automatically +// creates a CSIDriver object representing the driver. +// Kubernetes attach detach controller uses this object to determine whether attach is required. +// Kubelet uses this object to determine whether pod information needs to be passed on mount. +// CSIDriver objects are non-namespaced. +message CSIDriver { + // Standard object metadata. + // metadata.Name indicates the name of the CSI driver that this object + // refers to; it MUST be the same name returned by the CSI GetPluginName() + // call for that driver. + // The driver name must be 63 characters or less, beginning and ending with + // an alphanumeric character ([a-z0-9A-Z]) with dashes (-), dots (.), and + // alphanumerics between. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Specification of the CSI Driver. + optional CSIDriverSpec spec = 2; +} + +// CSIDriverList is a collection of CSIDriver objects. +message CSIDriverList { + // Standard list metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is the list of CSIDriver + repeated CSIDriver items = 2; +} + +// CSIDriverSpec is the specification of a CSIDriver. +message CSIDriverSpec { + // attachRequired indicates this CSI volume driver requires an attach + // operation (because it implements the CSI ControllerPublishVolume() + // method), and that the Kubernetes attach detach controller should call + // the attach volume interface which checks the volumeattachment status + // and waits until the volume is attached before proceeding to mounting. + // The CSI external-attacher coordinates with CSI volume driver and updates + // the volumeattachment status when the attach operation is complete. + // If the CSIDriverRegistry feature gate is enabled and the value is + // specified to false, the attach operation will be skipped. + // Otherwise the attach operation will be called. + // +optional + optional bool attachRequired = 1; + + // If set to true, podInfoOnMount indicates this CSI volume driver + // requires additional pod information (like podName, podUID, etc.) during + // mount operations. + // If set to false, pod information will not be passed on mount. + // Default is false. + // The CSI driver specifies podInfoOnMount as part of driver deployment. + // If true, Kubelet will pass pod information as VolumeContext in the CSI + // NodePublishVolume() calls. + // The CSI driver is responsible for parsing and validating the information + // passed in as VolumeContext. + // The following VolumeConext will be passed if podInfoOnMount is set to true. + // This list might grow, but the prefix will be used. + // "csi.storage.k8s.io/pod.name": pod.Name + // "csi.storage.k8s.io/pod.namespace": pod.Namespace + // "csi.storage.k8s.io/pod.uid": string(pod.UID) + // "csi.storage.k8s.io/ephemeral": "true" iff the volume is an ephemeral inline volume + // defined by a CSIVolumeSource, otherwise "false" + // + // "csi.storage.k8s.io/ephemeral" is a new feature in Kubernetes 1.16. It is only + // required for drivers which support both the "Persistent" and "Ephemeral" VolumeLifecycleMode. + // Other drivers can leave pod info disabled and/or ignore this field. + // As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when + // deployed on such a cluster and the deployment determines which mode that is, for example + // via a command line parameter of the driver. + // +optional + optional bool podInfoOnMount = 2; + + // VolumeLifecycleModes defines what kind of volumes this CSI volume driver supports. + // The default if the list is empty is "Persistent", which is the usage + // defined by the CSI specification and implemented in Kubernetes via the usual + // PV/PVC mechanism. + // The other mode is "Ephemeral". In this mode, volumes are defined inline + // inside the pod spec with CSIVolumeSource and their lifecycle is tied to + // the lifecycle of that pod. A driver has to be aware of this + // because it is only going to get a NodePublishVolume call for such a volume. + // For more information about implementing this mode, see + // https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html + // A driver can support one or more of these modes and + // more modes may be added in the future. + // +optional + repeated string volumeLifecycleModes = 3; +} + +// DEPRECATED - This group version of CSINode is deprecated by storage/v1/CSINode. +// See the release notes for more information. +// CSINode holds information about all CSI drivers installed on a node. +// CSI drivers do not need to create the CSINode object directly. As long as +// they use the node-driver-registrar sidecar container, the kubelet will +// automatically populate the CSINode object for the CSI driver as part of +// kubelet plugin registration. +// CSINode has the same name as a node. If the object is missing, it means either +// there are no CSI Drivers available on the node, or the Kubelet version is low +// enough that it doesn't create this object. +// CSINode has an OwnerReference that points to the corresponding node object. +message CSINode { + // metadata.name must be the Kubernetes node name. + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // spec is the specification of CSINode + optional CSINodeSpec spec = 2; +} + +// CSINodeDriver holds information about the specification of one CSI driver installed on a node +message CSINodeDriver { + // This is the name of the CSI driver that this object refers to. + // This MUST be the same name returned by the CSI GetPluginName() call for + // that driver. + optional string name = 1; + + // nodeID of the node from the driver point of view. + // This field enables Kubernetes to communicate with storage systems that do + // not share the same nomenclature for nodes. For example, Kubernetes may + // refer to a given node as "node1", but the storage system may refer to + // the same node as "nodeA". When Kubernetes issues a command to the storage + // system to attach a volume to a specific node, it can use this field to + // refer to the node name using the ID that the storage system will + // understand, e.g. "nodeA" instead of "node1". This field is required. + optional string nodeID = 2; + + // topologyKeys is the list of keys supported by the driver. + // When a driver is initialized on a cluster, it provides a set of topology + // keys that it understands (e.g. "company.com/zone", "company.com/region"). + // When a driver is initialized on a node, it provides the same topology keys + // along with values. Kubelet will expose these topology keys as labels + // on its own node object. + // When Kubernetes does topology aware provisioning, it can use this list to + // determine which labels it should retrieve from the node object and pass + // back to the driver. + // It is possible for different nodes to use different topology keys. + // This can be empty if driver does not support topology. + // +optional + repeated string topologyKeys = 3; + + // allocatable represents the volume resources of a node that are available for scheduling. + // +optional + optional VolumeNodeResources allocatable = 4; +} + +// CSINodeList is a collection of CSINode objects. +message CSINodeList { + // Standard list metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is the list of CSINode + repeated CSINode items = 2; +} + +// CSINodeSpec holds information about the specification of all CSI drivers installed on a node +message CSINodeSpec { + // drivers is a list of information of all CSI Drivers existing on a node. + // If all drivers in the list are uninstalled, this can become empty. + // +patchMergeKey=name + // +patchStrategy=merge + repeated CSINodeDriver drivers = 1; +} + // StorageClass describes the parameters for a class of storage for // which PersistentVolumes can be dynamically provisioned. -// +// // StorageClasses are non-namespaced; the name of the storage class // according to etcd is in ObjectMeta.Name. message StorageClass { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -80,7 +247,7 @@ message StorageClass { // StorageClassList is a collection of storage classes. message StorageClassList { // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -90,11 +257,11 @@ message StorageClassList { // VolumeAttachment captures the intent to attach or detach the specified volume // to/from the specified node. -// +// // VolumeAttachment objects are non-namespaced. message VolumeAttachment { // Standard object metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -112,7 +279,7 @@ message VolumeAttachment { // VolumeAttachmentList is a collection of VolumeAttachment objects. message VolumeAttachmentList { // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -128,6 +295,15 @@ message VolumeAttachmentSource { // Name of the persistent volume to attach. // +optional optional string persistentVolumeName = 1; + + // inlineVolumeSpec contains all the information necessary to attach + // a persistent volume defined by a pod's inline VolumeSource. This field + // is populated only for the CSIMigration feature. It contains + // translated fields from a pod's inline VolumeSource to a + // PersistentVolumeSpec. This field is alpha-level and is only + // honored by servers that enabled the CSIMigration feature. + // +optional + optional k8s.io.api.core.v1.PersistentVolumeSpec inlineVolumeSpec = 2; } // VolumeAttachmentSpec is the specification of a VolumeAttachment request. @@ -178,9 +354,19 @@ message VolumeError { optional k8s.io.apimachinery.pkg.apis.meta.v1.Time time = 1; // String detailing the error encountered during Attach or Detach operation. - // This string maybe logged, so it should not contain sensitive + // This string may be logged, so it should not contain sensitive // information. // +optional optional string message = 2; } +// VolumeNodeResources is a set of resource limits for scheduling of volumes. +message VolumeNodeResources { + // Maximum number of unique volumes managed by the CSI driver that can be used on a node. + // A volume that is both attached and mounted on a node is considered to be used once, not twice. + // The same rule applies for a unique volume that is shared among multiple pods on the same node. + // If this field is nil, then the supported number of volumes on this node is unbounded. + // +optional + optional int32 count = 1; +} + diff --git a/vendor/k8s.io/api/storage/v1beta1/register.go b/vendor/k8s.io/api/storage/v1beta1/register.go index 06b0f3d5..c270ace5 100644 --- a/vendor/k8s.io/api/storage/v1beta1/register.go +++ b/vendor/k8s.io/api/storage/v1beta1/register.go @@ -49,6 +49,12 @@ func addKnownTypes(scheme *runtime.Scheme) error { &VolumeAttachment{}, &VolumeAttachmentList{}, + + &CSIDriver{}, + &CSIDriverList{}, + + &CSINode{}, + &CSINodeList{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) diff --git a/vendor/k8s.io/api/storage/v1beta1/types.go b/vendor/k8s.io/api/storage/v1beta1/types.go index 5702c21b..a8faeb9d 100644 --- a/vendor/k8s.io/api/storage/v1beta1/types.go +++ b/vendor/k8s.io/api/storage/v1beta1/types.go @@ -17,7 +17,7 @@ limitations under the License. package v1beta1 import ( - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -33,7 +33,7 @@ import ( type StorageClass struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -80,7 +80,7 @@ type StorageClass struct { type StorageClassList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -115,7 +115,7 @@ type VolumeAttachment struct { metav1.TypeMeta `json:",inline"` // Standard object metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -136,7 +136,7 @@ type VolumeAttachment struct { type VolumeAttachmentList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -166,7 +166,14 @@ type VolumeAttachmentSource struct { // +optional PersistentVolumeName *string `json:"persistentVolumeName,omitempty" protobuf:"bytes,1,opt,name=persistentVolumeName"` - // Placeholder for *VolumeSource to accommodate inline volumes in pods. + // inlineVolumeSpec contains all the information necessary to attach + // a persistent volume defined by a pod's inline VolumeSource. This field + // is populated only for the CSIMigration feature. It contains + // translated fields from a pod's inline VolumeSource to a + // PersistentVolumeSpec. This field is alpha-level and is only + // honored by servers that enabled the CSIMigration feature. + // +optional + InlineVolumeSpec *v1.PersistentVolumeSpec `json:"inlineVolumeSpec,omitempty" protobuf:"bytes,2,opt,name=inlineVolumeSpec"` } // VolumeAttachmentStatus is the status of a VolumeAttachment request. @@ -204,8 +211,230 @@ type VolumeError struct { Time metav1.Time `json:"time,omitempty" protobuf:"bytes,1,opt,name=time"` // String detailing the error encountered during Attach or Detach operation. - // This string maybe logged, so it should not contain sensitive + // This string may be logged, so it should not contain sensitive // information. // +optional Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"` } + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CSIDriver captures information about a Container Storage Interface (CSI) +// volume driver deployed on the cluster. +// CSI drivers do not need to create the CSIDriver object directly. Instead they may use the +// cluster-driver-registrar sidecar container. When deployed with a CSI driver it automatically +// creates a CSIDriver object representing the driver. +// Kubernetes attach detach controller uses this object to determine whether attach is required. +// Kubelet uses this object to determine whether pod information needs to be passed on mount. +// CSIDriver objects are non-namespaced. +type CSIDriver struct { + metav1.TypeMeta `json:",inline"` + + // Standard object metadata. + // metadata.Name indicates the name of the CSI driver that this object + // refers to; it MUST be the same name returned by the CSI GetPluginName() + // call for that driver. + // The driver name must be 63 characters or less, beginning and ending with + // an alphanumeric character ([a-z0-9A-Z]) with dashes (-), dots (.), and + // alphanumerics between. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Specification of the CSI Driver. + Spec CSIDriverSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CSIDriverList is a collection of CSIDriver objects. +type CSIDriverList struct { + metav1.TypeMeta `json:",inline"` + + // Standard list metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is the list of CSIDriver + Items []CSIDriver `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// CSIDriverSpec is the specification of a CSIDriver. +type CSIDriverSpec struct { + // attachRequired indicates this CSI volume driver requires an attach + // operation (because it implements the CSI ControllerPublishVolume() + // method), and that the Kubernetes attach detach controller should call + // the attach volume interface which checks the volumeattachment status + // and waits until the volume is attached before proceeding to mounting. + // The CSI external-attacher coordinates with CSI volume driver and updates + // the volumeattachment status when the attach operation is complete. + // If the CSIDriverRegistry feature gate is enabled and the value is + // specified to false, the attach operation will be skipped. + // Otherwise the attach operation will be called. + // +optional + AttachRequired *bool `json:"attachRequired,omitempty" protobuf:"varint,1,opt,name=attachRequired"` + + // If set to true, podInfoOnMount indicates this CSI volume driver + // requires additional pod information (like podName, podUID, etc.) during + // mount operations. + // If set to false, pod information will not be passed on mount. + // Default is false. + // The CSI driver specifies podInfoOnMount as part of driver deployment. + // If true, Kubelet will pass pod information as VolumeContext in the CSI + // NodePublishVolume() calls. + // The CSI driver is responsible for parsing and validating the information + // passed in as VolumeContext. + // The following VolumeConext will be passed if podInfoOnMount is set to true. + // This list might grow, but the prefix will be used. + // "csi.storage.k8s.io/pod.name": pod.Name + // "csi.storage.k8s.io/pod.namespace": pod.Namespace + // "csi.storage.k8s.io/pod.uid": string(pod.UID) + // "csi.storage.k8s.io/ephemeral": "true" iff the volume is an ephemeral inline volume + // defined by a CSIVolumeSource, otherwise "false" + // + // "csi.storage.k8s.io/ephemeral" is a new feature in Kubernetes 1.16. It is only + // required for drivers which support both the "Persistent" and "Ephemeral" VolumeLifecycleMode. + // Other drivers can leave pod info disabled and/or ignore this field. + // As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when + // deployed on such a cluster and the deployment determines which mode that is, for example + // via a command line parameter of the driver. + // +optional + PodInfoOnMount *bool `json:"podInfoOnMount,omitempty" protobuf:"bytes,2,opt,name=podInfoOnMount"` + + // VolumeLifecycleModes defines what kind of volumes this CSI volume driver supports. + // The default if the list is empty is "Persistent", which is the usage + // defined by the CSI specification and implemented in Kubernetes via the usual + // PV/PVC mechanism. + // The other mode is "Ephemeral". In this mode, volumes are defined inline + // inside the pod spec with CSIVolumeSource and their lifecycle is tied to + // the lifecycle of that pod. A driver has to be aware of this + // because it is only going to get a NodePublishVolume call for such a volume. + // For more information about implementing this mode, see + // https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html + // A driver can support one or more of these modes and + // more modes may be added in the future. + // +optional + VolumeLifecycleModes []VolumeLifecycleMode `json:"volumeLifecycleModes,omitempty" protobuf:"bytes,3,opt,name=volumeLifecycleModes"` +} + +// VolumeLifecycleMode is an enumeration of possible usage modes for a volume +// provided by a CSI driver. More modes may be added in the future. +type VolumeLifecycleMode string + +const ( + // VolumeLifecyclePersistent explicitly confirms that the driver implements + // the full CSI spec. It is the default when CSIDriverSpec.VolumeLifecycleModes is not + // set. Such volumes are managed in Kubernetes via the persistent volume + // claim mechanism and have a lifecycle that is independent of the pods which + // use them. + VolumeLifecyclePersistent VolumeLifecycleMode = "Persistent" + + // VolumeLifecycleEphemeral indicates that the driver can be used for + // ephemeral inline volumes. Such volumes are specified inside the pod + // spec with a CSIVolumeSource and, as far as Kubernetes is concerned, have + // a lifecycle that is tied to the lifecycle of the pod. For example, such + // a volume might contain data that gets created specifically for that pod, + // like secrets. + // But how the volume actually gets created and managed is entirely up to + // the driver. It might also use reference counting to share the same volume + // instance among different pods if the CSIVolumeSource of those pods is + // identical. + VolumeLifecycleEphemeral VolumeLifecycleMode = "Ephemeral" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DEPRECATED - This group version of CSINode is deprecated by storage/v1/CSINode. +// See the release notes for more information. +// CSINode holds information about all CSI drivers installed on a node. +// CSI drivers do not need to create the CSINode object directly. As long as +// they use the node-driver-registrar sidecar container, the kubelet will +// automatically populate the CSINode object for the CSI driver as part of +// kubelet plugin registration. +// CSINode has the same name as a node. If the object is missing, it means either +// there are no CSI Drivers available on the node, or the Kubelet version is low +// enough that it doesn't create this object. +// CSINode has an OwnerReference that points to the corresponding node object. +type CSINode struct { + metav1.TypeMeta `json:",inline"` + + // metadata.name must be the Kubernetes node name. + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // spec is the specification of CSINode + Spec CSINodeSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` +} + +// CSINodeSpec holds information about the specification of all CSI drivers installed on a node +type CSINodeSpec struct { + // drivers is a list of information of all CSI Drivers existing on a node. + // If all drivers in the list are uninstalled, this can become empty. + // +patchMergeKey=name + // +patchStrategy=merge + Drivers []CSINodeDriver `json:"drivers" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,1,rep,name=drivers"` +} + +// CSINodeDriver holds information about the specification of one CSI driver installed on a node +type CSINodeDriver struct { + // This is the name of the CSI driver that this object refers to. + // This MUST be the same name returned by the CSI GetPluginName() call for + // that driver. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + + // nodeID of the node from the driver point of view. + // This field enables Kubernetes to communicate with storage systems that do + // not share the same nomenclature for nodes. For example, Kubernetes may + // refer to a given node as "node1", but the storage system may refer to + // the same node as "nodeA". When Kubernetes issues a command to the storage + // system to attach a volume to a specific node, it can use this field to + // refer to the node name using the ID that the storage system will + // understand, e.g. "nodeA" instead of "node1". This field is required. + NodeID string `json:"nodeID" protobuf:"bytes,2,opt,name=nodeID"` + + // topologyKeys is the list of keys supported by the driver. + // When a driver is initialized on a cluster, it provides a set of topology + // keys that it understands (e.g. "company.com/zone", "company.com/region"). + // When a driver is initialized on a node, it provides the same topology keys + // along with values. Kubelet will expose these topology keys as labels + // on its own node object. + // When Kubernetes does topology aware provisioning, it can use this list to + // determine which labels it should retrieve from the node object and pass + // back to the driver. + // It is possible for different nodes to use different topology keys. + // This can be empty if driver does not support topology. + // +optional + TopologyKeys []string `json:"topologyKeys" protobuf:"bytes,3,rep,name=topologyKeys"` + + // allocatable represents the volume resources of a node that are available for scheduling. + // +optional + Allocatable *VolumeNodeResources `json:"allocatable,omitempty" protobuf:"bytes,4,opt,name=allocatable"` +} + +// VolumeNodeResources is a set of resource limits for scheduling of volumes. +type VolumeNodeResources struct { + // Maximum number of unique volumes managed by the CSI driver that can be used on a node. + // A volume that is both attached and mounted on a node is considered to be used once, not twice. + // The same rule applies for a unique volume that is shared among multiple pods on the same node. + // If this field is nil, then the supported number of volumes on this node is unbounded. + // +optional + Count *int32 `json:"count,omitempty" protobuf:"varint,1,opt,name=count"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CSINodeList is a collection of CSINode objects. +type CSINodeList struct { + metav1.TypeMeta `json:",inline"` + + // Standard list metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is the list of CSINode + Items []CSINode `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go index 044d69f5..53fa666b 100644 --- a/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go @@ -27,9 +27,81 @@ package v1beta1 // Those methods can be generated by using hack/update-generated-swagger-docs.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_CSIDriver = map[string]string{ + "": "CSIDriver captures information about a Container Storage Interface (CSI) volume driver deployed on the cluster. CSI drivers do not need to create the CSIDriver object directly. Instead they may use the cluster-driver-registrar sidecar container. When deployed with a CSI driver it automatically creates a CSIDriver object representing the driver. Kubernetes attach detach controller uses this object to determine whether attach is required. Kubelet uses this object to determine whether pod information needs to be passed on mount. CSIDriver objects are non-namespaced.", + "metadata": "Standard object metadata. metadata.Name indicates the name of the CSI driver that this object refers to; it MUST be the same name returned by the CSI GetPluginName() call for that driver. The driver name must be 63 characters or less, beginning and ending with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), dots (.), and alphanumerics between. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Specification of the CSI Driver.", +} + +func (CSIDriver) SwaggerDoc() map[string]string { + return map_CSIDriver +} + +var map_CSIDriverList = map[string]string{ + "": "CSIDriverList is a collection of CSIDriver objects.", + "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is the list of CSIDriver", +} + +func (CSIDriverList) SwaggerDoc() map[string]string { + return map_CSIDriverList +} + +var map_CSIDriverSpec = map[string]string{ + "": "CSIDriverSpec is the specification of a CSIDriver.", + "attachRequired": "attachRequired indicates this CSI volume driver requires an attach operation (because it implements the CSI ControllerPublishVolume() method), and that the Kubernetes attach detach controller should call the attach volume interface which checks the volumeattachment status and waits until the volume is attached before proceeding to mounting. The CSI external-attacher coordinates with CSI volume driver and updates the volumeattachment status when the attach operation is complete. If the CSIDriverRegistry feature gate is enabled and the value is specified to false, the attach operation will be skipped. Otherwise the attach operation will be called.", + "podInfoOnMount": "If set to true, podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations. If set to false, pod information will not be passed on mount. Default is false. The CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext. The following VolumeConext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \"csi.storage.k8s.io/pod.name\": pod.Name \"csi.storage.k8s.io/pod.namespace\": pod.Namespace \"csi.storage.k8s.io/pod.uid\": string(pod.UID) \"csi.storage.k8s.io/ephemeral\": \"true\" iff the volume is an ephemeral inline volume\n defined by a CSIVolumeSource, otherwise \"false\"\n\n\"csi.storage.k8s.io/ephemeral\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \"Persistent\" and \"Ephemeral\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.", + "volumeLifecycleModes": "VolumeLifecycleModes defines what kind of volumes this CSI volume driver supports. The default if the list is empty is \"Persistent\", which is the usage defined by the CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism. The other mode is \"Ephemeral\". In this mode, volumes are defined inline inside the pod spec with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume. For more information about implementing this mode, see https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html A driver can support one or more of these modes and more modes may be added in the future.", +} + +func (CSIDriverSpec) SwaggerDoc() map[string]string { + return map_CSIDriverSpec +} + +var map_CSINode = map[string]string{ + "": "DEPRECATED - This group version of CSINode is deprecated by storage/v1/CSINode. See the release notes for more information. CSINode holds information about all CSI drivers installed on a node. CSI drivers do not need to create the CSINode object directly. As long as they use the node-driver-registrar sidecar container, the kubelet will automatically populate the CSINode object for the CSI driver as part of kubelet plugin registration. CSINode has the same name as a node. If the object is missing, it means either there are no CSI Drivers available on the node, or the Kubelet version is low enough that it doesn't create this object. CSINode has an OwnerReference that points to the corresponding node object.", + "metadata": "metadata.name must be the Kubernetes node name.", + "spec": "spec is the specification of CSINode", +} + +func (CSINode) SwaggerDoc() map[string]string { + return map_CSINode +} + +var map_CSINodeDriver = map[string]string{ + "": "CSINodeDriver holds information about the specification of one CSI driver installed on a node", + "name": "This is the name of the CSI driver that this object refers to. This MUST be the same name returned by the CSI GetPluginName() call for that driver.", + "nodeID": "nodeID of the node from the driver point of view. This field enables Kubernetes to communicate with storage systems that do not share the same nomenclature for nodes. For example, Kubernetes may refer to a given node as \"node1\", but the storage system may refer to the same node as \"nodeA\". When Kubernetes issues a command to the storage system to attach a volume to a specific node, it can use this field to refer to the node name using the ID that the storage system will understand, e.g. \"nodeA\" instead of \"node1\". This field is required.", + "topologyKeys": "topologyKeys is the list of keys supported by the driver. When a driver is initialized on a cluster, it provides a set of topology keys that it understands (e.g. \"company.com/zone\", \"company.com/region\"). When a driver is initialized on a node, it provides the same topology keys along with values. Kubelet will expose these topology keys as labels on its own node object. When Kubernetes does topology aware provisioning, it can use this list to determine which labels it should retrieve from the node object and pass back to the driver. It is possible for different nodes to use different topology keys. This can be empty if driver does not support topology.", + "allocatable": "allocatable represents the volume resources of a node that are available for scheduling.", +} + +func (CSINodeDriver) SwaggerDoc() map[string]string { + return map_CSINodeDriver +} + +var map_CSINodeList = map[string]string{ + "": "CSINodeList is a collection of CSINode objects.", + "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is the list of CSINode", +} + +func (CSINodeList) SwaggerDoc() map[string]string { + return map_CSINodeList +} + +var map_CSINodeSpec = map[string]string{ + "": "CSINodeSpec holds information about the specification of all CSI drivers installed on a node", + "drivers": "drivers is a list of information of all CSI Drivers existing on a node. If all drivers in the list are uninstalled, this can become empty.", +} + +func (CSINodeSpec) SwaggerDoc() map[string]string { + return map_CSINodeSpec +} + var map_StorageClass = map[string]string{ "": "StorageClass describes the parameters for a class of storage for which PersistentVolumes can be dynamically provisioned.\n\nStorageClasses are non-namespaced; the name of the storage class according to etcd is in ObjectMeta.Name.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "provisioner": "Provisioner indicates the type of the provisioner.", "parameters": "Parameters holds the parameters for the provisioner that should create volumes of this storage class.", "reclaimPolicy": "Dynamically provisioned PersistentVolumes of this storage class are created with this reclaimPolicy. Defaults to Delete.", @@ -45,7 +117,7 @@ func (StorageClass) SwaggerDoc() map[string]string { var map_StorageClassList = map[string]string{ "": "StorageClassList is a collection of storage classes.", - "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "Items is the list of StorageClasses", } @@ -55,7 +127,7 @@ func (StorageClassList) SwaggerDoc() map[string]string { var map_VolumeAttachment = map[string]string{ "": "VolumeAttachment captures the intent to attach or detach the specified volume to/from the specified node.\n\nVolumeAttachment objects are non-namespaced.", - "metadata": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "spec": "Specification of the desired attach/detach volume behavior. Populated by the Kubernetes system.", "status": "Status of the VolumeAttachment request. Populated by the entity completing the attach or detach operation, i.e. the external-attacher.", } @@ -66,7 +138,7 @@ func (VolumeAttachment) SwaggerDoc() map[string]string { var map_VolumeAttachmentList = map[string]string{ "": "VolumeAttachmentList is a collection of VolumeAttachment objects.", - "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "Items is the list of VolumeAttachments", } @@ -75,7 +147,7 @@ func (VolumeAttachmentList) SwaggerDoc() map[string]string { } var map_VolumeAttachmentSource = map[string]string{ - "": "VolumeAttachmentSource represents a volume that should be attached. Right now only PersistenVolumes can be attached via external attacher, in future we may allow also inline volumes in pods. Exactly one member can be set.", + "": "VolumeAttachmentSource represents a volume that should be attached. Right now only PersistenVolumes can be attached via external attacher, in future we may allow also inline volumes in pods. Exactly one member can be set.", "persistentVolumeName": "Name of the persistent volume to attach.", } @@ -109,11 +181,20 @@ func (VolumeAttachmentStatus) SwaggerDoc() map[string]string { var map_VolumeError = map[string]string{ "": "VolumeError captures an error encountered during a volume operation.", "time": "Time the error was encountered.", - "message": "String detailing the error encountered during Attach or Detach operation. This string maybe logged, so it should not contain sensitive information.", + "message": "String detailing the error encountered during Attach or Detach operation. This string may be logged, so it should not contain sensitive information.", } func (VolumeError) SwaggerDoc() map[string]string { return map_VolumeError } +var map_VolumeNodeResources = map[string]string{ + "": "VolumeNodeResources is a set of resource limits for scheduling of volumes.", + "count": "Maximum number of unique volumes managed by the CSI driver that can be used on a node. A volume that is both attached and mounted on a node is considered to be used once, not twice. The same rule applies for a unique volume that is shared among multiple pods on the same node. If this field is nil, then the supported number of volumes on this node is unbounded.", +} + +func (VolumeNodeResources) SwaggerDoc() map[string]string { + return map_VolumeNodeResources +} + // AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go index 8096dba9..52433fcd 100644 --- a/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go @@ -25,6 +25,206 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CSIDriver) DeepCopyInto(out *CSIDriver) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSIDriver. +func (in *CSIDriver) DeepCopy() *CSIDriver { + if in == nil { + return nil + } + out := new(CSIDriver) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CSIDriver) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CSIDriverList) DeepCopyInto(out *CSIDriverList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CSIDriver, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSIDriverList. +func (in *CSIDriverList) DeepCopy() *CSIDriverList { + if in == nil { + return nil + } + out := new(CSIDriverList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CSIDriverList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CSIDriverSpec) DeepCopyInto(out *CSIDriverSpec) { + *out = *in + if in.AttachRequired != nil { + in, out := &in.AttachRequired, &out.AttachRequired + *out = new(bool) + **out = **in + } + if in.PodInfoOnMount != nil { + in, out := &in.PodInfoOnMount, &out.PodInfoOnMount + *out = new(bool) + **out = **in + } + if in.VolumeLifecycleModes != nil { + in, out := &in.VolumeLifecycleModes, &out.VolumeLifecycleModes + *out = make([]VolumeLifecycleMode, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSIDriverSpec. +func (in *CSIDriverSpec) DeepCopy() *CSIDriverSpec { + if in == nil { + return nil + } + out := new(CSIDriverSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CSINode) DeepCopyInto(out *CSINode) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSINode. +func (in *CSINode) DeepCopy() *CSINode { + if in == nil { + return nil + } + out := new(CSINode) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CSINode) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CSINodeDriver) DeepCopyInto(out *CSINodeDriver) { + *out = *in + if in.TopologyKeys != nil { + in, out := &in.TopologyKeys, &out.TopologyKeys + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Allocatable != nil { + in, out := &in.Allocatable, &out.Allocatable + *out = new(VolumeNodeResources) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSINodeDriver. +func (in *CSINodeDriver) DeepCopy() *CSINodeDriver { + if in == nil { + return nil + } + out := new(CSINodeDriver) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CSINodeList) DeepCopyInto(out *CSINodeList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CSINode, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSINodeList. +func (in *CSINodeList) DeepCopy() *CSINodeList { + if in == nil { + return nil + } + out := new(CSINodeList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CSINodeList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CSINodeSpec) DeepCopyInto(out *CSINodeSpec) { + *out = *in + if in.Drivers != nil { + in, out := &in.Drivers, &out.Drivers + *out = make([]CSINodeDriver, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSINodeSpec. +func (in *CSINodeSpec) DeepCopy() *CSINodeSpec { + if in == nil { + return nil + } + out := new(CSINodeSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *StorageClass) DeepCopyInto(out *StorageClass) { *out = *in @@ -89,7 +289,7 @@ func (in *StorageClass) DeepCopyObject() runtime.Object { func (in *StorageClassList) DeepCopyInto(out *StorageClassList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]StorageClass, len(*in)) @@ -150,7 +350,7 @@ func (in *VolumeAttachment) DeepCopyObject() runtime.Object { func (in *VolumeAttachmentList) DeepCopyInto(out *VolumeAttachmentList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]VolumeAttachment, len(*in)) @@ -187,6 +387,11 @@ func (in *VolumeAttachmentSource) DeepCopyInto(out *VolumeAttachmentSource) { *out = new(string) **out = **in } + if in.InlineVolumeSpec != nil { + in, out := &in.InlineVolumeSpec, &out.InlineVolumeSpec + *out = new(v1.PersistentVolumeSpec) + (*in).DeepCopyInto(*out) + } return } @@ -266,3 +471,24 @@ func (in *VolumeError) DeepCopy() *VolumeError { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeNodeResources) DeepCopyInto(out *VolumeNodeResources) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeNodeResources. +func (in *VolumeNodeResources) DeepCopy() *VolumeNodeResources { + if in == nil { + return nil + } + out := new(VolumeNodeResources) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS b/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS index dc6a4c72..435297a8 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS +++ b/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS @@ -1,3 +1,5 @@ +# See the OWNERS docs at https://go.k8s.io/owners + reviewers: - thockin - lavalamp @@ -21,4 +23,3 @@ reviewers: - krousey - cjcullen - david-mcmahon -- goltermann diff --git a/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go b/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go index bcc032df..e53c3e61 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go +++ b/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go @@ -20,6 +20,7 @@ import ( "encoding/json" "fmt" "net/http" + "reflect" "strings" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -31,7 +32,9 @@ import ( const ( // StatusTooManyRequests means the server experienced too many requests within a // given window and that the client must wait to perform the action again. - StatusTooManyRequests = 429 + // DEPRECATED: please use http.StatusTooManyRequests, this will be removed in + // the future version. + StatusTooManyRequests = http.StatusTooManyRequests ) // StatusError is an error intended for consumption by a REST API server; it can also be @@ -67,6 +70,28 @@ func (e *StatusError) DebugError() (string, []interface{}) { return "server response object: %#v", []interface{}{e.ErrStatus} } +// HasStatusCause returns true if the provided error has a details cause +// with the provided type name. +func HasStatusCause(err error, name metav1.CauseType) bool { + _, ok := StatusCause(err, name) + return ok +} + +// StatusCause returns the named cause from the provided error if it exists and +// the error is of the type APIStatus. Otherwise it returns false. +func StatusCause(err error, name metav1.CauseType) (metav1.StatusCause, bool) { + apierr, ok := err.(APIStatus) + if !ok || apierr == nil || apierr.Status().Details == nil { + return metav1.StatusCause{}, false + } + for _, cause := range apierr.Status().Details.Causes { + if cause.Type == name { + return cause, true + } + } + return metav1.StatusCause{}, false +} + // UnexpectedObjectError can be returned by FromObject if it's passed a non-status object. type UnexpectedObjectError struct { Object runtime.Object @@ -82,7 +107,20 @@ func (u *UnexpectedObjectError) Error() string { func FromObject(obj runtime.Object) error { switch t := obj.(type) { case *metav1.Status: - return &StatusError{*t} + return &StatusError{ErrStatus: *t} + case runtime.Unstructured: + var status metav1.Status + obj := t.UnstructuredContent() + if !reflect.DeepEqual(obj["kind"], "Status") { + break + } + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(t.UnstructuredContent(), &status); err != nil { + return err + } + if status.APIVersion != "v1" && status.APIVersion != "meta.k8s.io/v1" { + break + } + return &StatusError{ErrStatus: status} } return &UnexpectedObjectError{obj} } @@ -170,7 +208,22 @@ func NewConflict(qualifiedResource schema.GroupResource, name string, err error) }} } +// NewApplyConflict returns an error including details on the requests apply conflicts +func NewApplyConflict(causes []metav1.StatusCause, message string) *StatusError { + return &StatusError{ErrStatus: metav1.Status{ + Status: metav1.StatusFailure, + Code: http.StatusConflict, + Reason: metav1.StatusReasonConflict, + Details: &metav1.StatusDetails{ + // TODO: Get obj details here? + Causes: causes, + }, + Message: message, + }} +} + // NewGone returns an error indicating the item no longer available at the server and no forwarding address is known. +// DEPRECATED: Please use NewResourceExpired instead. func NewGone(message string) *StatusError { return &StatusError{metav1.Status{ Status: metav1.StatusFailure, @@ -321,12 +374,23 @@ func NewTimeoutError(message string, retryAfterSeconds int) *StatusError { func NewTooManyRequestsError(message string) *StatusError { return &StatusError{metav1.Status{ Status: metav1.StatusFailure, - Code: StatusTooManyRequests, + Code: http.StatusTooManyRequests, Reason: metav1.StatusReasonTooManyRequests, Message: fmt.Sprintf("Too many requests: %s", message), }} } +// NewRequestEntityTooLargeError returns an error indicating that the request +// entity was too large. +func NewRequestEntityTooLargeError(message string) *StatusError { + return &StatusError{metav1.Status{ + Status: metav1.StatusFailure, + Code: http.StatusRequestEntityTooLarge, + Reason: metav1.StatusReasonRequestEntityTooLarge, + Message: fmt.Sprintf("Request entity too large: %s", message), + }} +} + // NewGenericServerResponse returns a new error for server responses that are not in a recognizable form. func NewGenericServerResponse(code int, verb string, qualifiedResource schema.GroupResource, name, serverMessage string, retryAfterSeconds int, isUnexpectedResponse bool) *StatusError { reason := metav1.StatusReasonUnknown @@ -355,7 +419,11 @@ func NewGenericServerResponse(code int, verb string, qualifiedResource schema.Gr case http.StatusNotAcceptable: reason = metav1.StatusReasonNotAcceptable // the server message has details about what types are acceptable - message = serverMessage + if len(serverMessage) == 0 || serverMessage == "unknown" { + message = "the server was unable to respond with a content type that the client supports" + } else { + message = serverMessage + } case http.StatusUnsupportedMediaType: reason = metav1.StatusReasonUnsupportedMediaType // the server message has details about what types are acceptable @@ -513,6 +581,19 @@ func IsTooManyRequests(err error) bool { return false } +// IsRequestEntityTooLargeError determines if err is an error which indicates +// the request entity is too large. +func IsRequestEntityTooLargeError(err error) bool { + if ReasonForError(err) == metav1.StatusReasonRequestEntityTooLarge { + return true + } + switch t := err.(type) { + case APIStatus: + return t.Status().Code == http.StatusRequestEntityTooLarge + } + return false +} + // IsUnexpectedServerError returns true if the server response was not in the expected API format, // and may be the result of another HTTP actor. func IsUnexpectedServerError(err error) bool { @@ -565,3 +646,46 @@ func ReasonForError(err error) metav1.StatusReason { } return metav1.StatusReasonUnknown } + +// ErrorReporter converts generic errors into runtime.Object errors without +// requiring the caller to take a dependency on meta/v1 (where Status lives). +// This prevents circular dependencies in core watch code. +type ErrorReporter struct { + code int + verb string + reason string +} + +// NewClientErrorReporter will respond with valid v1.Status objects that report +// unexpected server responses. Primarily used by watch to report errors when +// we attempt to decode a response from the server and it is not in the form +// we expect. Because watch is a dependency of the core api, we can't return +// meta/v1.Status in that package and so much inject this interface to convert a +// generic error as appropriate. The reason is passed as a unique status cause +// on the returned status, otherwise the generic "ClientError" is returned. +func NewClientErrorReporter(code int, verb string, reason string) *ErrorReporter { + return &ErrorReporter{ + code: code, + verb: verb, + reason: reason, + } +} + +// AsObject returns a valid error runtime.Object (a v1.Status) for the given +// error, using the code and verb of the reporter type. The error is set to +// indicate that this was an unexpected server response. +func (r *ErrorReporter) AsObject(err error) runtime.Object { + status := NewGenericServerResponse(r.code, r.verb, schema.GroupResource{}, "", err.Error(), 0, true) + if status.ErrStatus.Details == nil { + status.ErrStatus.Details = &metav1.StatusDetails{} + } + reason := r.reason + if len(reason) == 0 { + reason = "ClientError" + } + status.ErrStatus.Details.Causes = append(status.ErrStatus.Details.Causes, metav1.StatusCause{ + Type: metav1.CauseType(reason), + Message: err.Error(), + }) + return &status.ErrStatus +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS b/vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS index 5f729ffe..96bccff1 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS @@ -1,3 +1,5 @@ +# See the OWNERS docs at https://go.k8s.io/owners + reviewers: - thockin - smarterclayton @@ -15,11 +17,7 @@ reviewers: - eparis - dims - krousey -- markturansky -- fabioy - resouer - david-mcmahon - mfojtik - jianhuiz -- feihujiang -- ghodss diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/help.go b/vendor/k8s.io/apimachinery/pkg/api/meta/help.go index c70b3d2b..50468b53 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/help.go +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/help.go @@ -17,30 +17,76 @@ limitations under the License. package meta import ( + "errors" "fmt" "reflect" + "sync" "k8s.io/apimachinery/pkg/conversion" "k8s.io/apimachinery/pkg/runtime" ) -// IsListType returns true if the provided Object has a slice called Items +var ( + // isListCache maintains a cache of types that are checked for lists + // which is used by IsListType. + // TODO: remove and replace with an interface check + isListCache = struct { + lock sync.RWMutex + byType map[reflect.Type]bool + }{ + byType: make(map[reflect.Type]bool, 1024), + } +) + +// IsListType returns true if the provided Object has a slice called Items. +// TODO: Replace the code in this check with an interface comparison by +// creating and enforcing that lists implement a list accessor. func IsListType(obj runtime.Object) bool { - // if we're a runtime.Unstructured, check whether this is a list. - // TODO: refactor GetItemsPtr to use an interface that returns []runtime.Object - if unstructured, ok := obj.(runtime.Unstructured); ok { - return unstructured.IsList() + switch t := obj.(type) { + case runtime.Unstructured: + return t.IsList() + } + t := reflect.TypeOf(obj) + + isListCache.lock.RLock() + ok, exists := isListCache.byType[t] + isListCache.lock.RUnlock() + + if !exists { + _, err := getItemsPtr(obj) + ok = err == nil + + // cache only the first 1024 types + isListCache.lock.Lock() + if len(isListCache.byType) < 1024 { + isListCache.byType[t] = ok + } + isListCache.lock.Unlock() } - _, err := GetItemsPtr(obj) - return err == nil + return ok } +var ( + errExpectFieldItems = errors.New("no Items field in this object") + errExpectSliceItems = errors.New("Items field must be a slice of objects") +) + // GetItemsPtr returns a pointer to the list object's Items member. // If 'list' doesn't have an Items member, it's not really a list type // and an error will be returned. // This function will either return a pointer to a slice, or an error, but not both. +// TODO: this will be replaced with an interface in the future func GetItemsPtr(list runtime.Object) (interface{}, error) { + obj, err := getItemsPtr(list) + if err != nil { + return nil, fmt.Errorf("%T is not a list: %v", list, err) + } + return obj, nil +} + +// getItemsPtr returns a pointer to the list object's Items member or an error. +func getItemsPtr(list runtime.Object) (interface{}, error) { v, err := conversion.EnforcePtr(list) if err != nil { return nil, err @@ -48,19 +94,19 @@ func GetItemsPtr(list runtime.Object) (interface{}, error) { items := v.FieldByName("Items") if !items.IsValid() { - return nil, fmt.Errorf("no Items field in %#v", list) + return nil, errExpectFieldItems } switch items.Kind() { case reflect.Interface, reflect.Ptr: target := reflect.TypeOf(items.Interface()).Elem() if target.Kind() != reflect.Slice { - return nil, fmt.Errorf("items: Expected slice, got %s", target.Kind()) + return nil, errExpectSliceItems } return items.Interface(), nil case reflect.Slice: return items.Addr().Interface(), nil default: - return nil, fmt.Errorf("items: Expected slice, got %s", items.Kind()) + return nil, errExpectSliceItems } } @@ -158,6 +204,19 @@ func ExtractList(obj runtime.Object) ([]runtime.Object, error) { // objectSliceType is the type of a slice of Objects var objectSliceType = reflect.TypeOf([]runtime.Object{}) +// LenList returns the length of this list or 0 if it is not a list. +func LenList(list runtime.Object) int { + itemsPtr, err := GetItemsPtr(list) + if err != nil { + return 0 + } + items, err := conversion.EnforcePtr(itemsPtr) + if err != nil { + return 0 + } + return items.Len() +} + // SetList sets the given list object's Items member have the elements given in // objects. // Returns an error if list is not a List type (does not have an Items member), diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go b/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go index 1c2a83cf..fa4b7673 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go @@ -20,14 +20,12 @@ import ( "fmt" "reflect" - "github.com/golang/glog" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - metav1beta1 "k8s.io/apimachinery/pkg/apis/meta/v1beta1" "k8s.io/apimachinery/pkg/conversion" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" + "k8s.io/klog" ) // errNotList is returned when an object implements the Object style interfaces but not the List style @@ -115,12 +113,12 @@ func Accessor(obj interface{}) (metav1.Object, error) { // AsPartialObjectMetadata takes the metav1 interface and returns a partial object. // TODO: consider making this solely a conversion action. -func AsPartialObjectMetadata(m metav1.Object) *metav1beta1.PartialObjectMetadata { +func AsPartialObjectMetadata(m metav1.Object) *metav1.PartialObjectMetadata { switch t := m.(type) { case *metav1.ObjectMeta: - return &metav1beta1.PartialObjectMetadata{ObjectMeta: *t} + return &metav1.PartialObjectMetadata{ObjectMeta: *t} default: - return &metav1beta1.PartialObjectMetadata{ + return &metav1.PartialObjectMetadata{ ObjectMeta: metav1.ObjectMeta{ Name: m.GetName(), GenerateName: m.GetGenerateName(), @@ -132,12 +130,12 @@ func AsPartialObjectMetadata(m metav1.Object) *metav1beta1.PartialObjectMetadata CreationTimestamp: m.GetCreationTimestamp(), DeletionTimestamp: m.GetDeletionTimestamp(), DeletionGracePeriodSeconds: m.GetDeletionGracePeriodSeconds(), - Labels: m.GetLabels(), - Annotations: m.GetAnnotations(), - OwnerReferences: m.GetOwnerReferences(), - Finalizers: m.GetFinalizers(), - ClusterName: m.GetClusterName(), - Initializers: m.GetInitializers(), + Labels: m.GetLabels(), + Annotations: m.GetAnnotations(), + OwnerReferences: m.GetOwnerReferences(), + Finalizers: m.GetFinalizers(), + ClusterName: m.GetClusterName(), + ManagedFields: m.GetManagedFields(), }, } } @@ -607,7 +605,7 @@ func (a genericAccessor) GetOwnerReferences() []metav1.OwnerReference { var ret []metav1.OwnerReference s := a.ownerReferences if s.Kind() != reflect.Ptr || s.Elem().Kind() != reflect.Slice { - glog.Errorf("expect %v to be a pointer to slice", s) + klog.Errorf("expect %v to be a pointer to slice", s) return ret } s = s.Elem() @@ -615,7 +613,7 @@ func (a genericAccessor) GetOwnerReferences() []metav1.OwnerReference { ret = make([]metav1.OwnerReference, s.Len(), s.Len()+1) for i := 0; i < s.Len(); i++ { if err := extractFromOwnerReference(s.Index(i), &ret[i]); err != nil { - glog.Errorf("extractFromOwnerReference failed: %v", err) + klog.Errorf("extractFromOwnerReference failed: %v", err) return ret } } @@ -625,13 +623,13 @@ func (a genericAccessor) GetOwnerReferences() []metav1.OwnerReference { func (a genericAccessor) SetOwnerReferences(references []metav1.OwnerReference) { s := a.ownerReferences if s.Kind() != reflect.Ptr || s.Elem().Kind() != reflect.Slice { - glog.Errorf("expect %v to be a pointer to slice", s) + klog.Errorf("expect %v to be a pointer to slice", s) } s = s.Elem() newReferences := reflect.MakeSlice(s.Type(), len(references), len(references)) for i := 0; i < len(references); i++ { if err := setOwnerReference(newReferences.Index(i), &references[i]); err != nil { - glog.Errorf("setOwnerReference failed: %v", err) + klog.Errorf("setOwnerReference failed: %v", err) return } } diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS b/vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS index c430067f..dc774019 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS @@ -1,3 +1,5 @@ +# See the OWNERS docs at https://go.k8s.io/owners + reviewers: - thockin - lavalamp @@ -9,8 +11,6 @@ reviewers: - janetkuo - tallclair - eparis -- jbeda - xiang90 - mbohlool - david-mcmahon -- goltermann diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go index 802f22a6..9fca2e16 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go @@ -14,24 +14,18 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto -// DO NOT EDIT! -/* -Package resource is a generated protocol buffer package. +package resource -It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto +import ( + fmt "fmt" -It has these top-level messages: - Quantity -*/ -package resource + math "math" -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" + proto "github.com/gogo/protobuf/proto" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -44,19 +38,38 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *Quantity) Reset() { *m = Quantity{} } -func (*Quantity) ProtoMessage() {} -func (*Quantity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *Quantity) Reset() { *m = Quantity{} } +func (*Quantity) ProtoMessage() {} +func (*Quantity) Descriptor() ([]byte, []int) { + return fileDescriptor_612bba87bd70906c, []int{0} +} +func (m *Quantity) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Quantity.Unmarshal(m, b) +} +func (m *Quantity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Quantity.Marshal(b, m, deterministic) +} +func (m *Quantity) XXX_Merge(src proto.Message) { + xxx_messageInfo_Quantity.Merge(m, src) +} +func (m *Quantity) XXX_Size() int { + return xxx_messageInfo_Quantity.Size(m) +} +func (m *Quantity) XXX_DiscardUnknown() { + xxx_messageInfo_Quantity.DiscardUnknown(m) +} + +var xxx_messageInfo_Quantity proto.InternalMessageInfo func init() { proto.RegisterType((*Quantity)(nil), "k8s.io.apimachinery.pkg.api.resource.Quantity") } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto", fileDescriptorGenerated) + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto", fileDescriptor_612bba87bd70906c) } -var fileDescriptorGenerated = []byte{ +var fileDescriptor_612bba87bd70906c = []byte{ // 237 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x4c, 0x8e, 0xb1, 0x4e, 0xc3, 0x30, 0x10, 0x40, 0xcf, 0x0b, 0x2a, 0x19, 0x2b, 0x84, 0x10, 0xc3, 0xa5, 0x42, 0x0c, 0x2c, 0xd8, 0x6b, diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto index 2c615d51..18a6c7cd 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto @@ -26,10 +26,10 @@ option go_package = "resource"; // Quantity is a fixed-point representation of a number. // It provides convenient marshaling/unmarshaling in JSON and YAML, -// in addition to String() and Int64() accessors. -// +// in addition to String() and AsInt64() accessors. +// // The serialization format is: -// +// // ::= // (Note that may be empty, from the "" case in .) // ::= 0 | 1 | ... | 9 @@ -43,16 +43,16 @@ option go_package = "resource"; // ::= m | "" | k | M | G | T | P | E // (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.) // ::= "e" | "E" -// +// // No matter which of the three exponent forms is used, no quantity may represent // a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal // places. Numbers larger or more precise will be capped or rounded up. // (E.g.: 0.1m will rounded up to 1m.) // This may be extended in the future if we require larger or smaller quantities. -// +// // When a Quantity is parsed from a string, it will remember the type of suffix // it had, and will use the same type again when it is serialized. -// +// // Before serializing, Quantity will be put in "canonical form". // This means that Exponent/suffix will be adjusted up or down (with a // corresponding increase or decrease in Mantissa) such that: @@ -60,22 +60,22 @@ option go_package = "resource"; // b. No fractional digits will be emitted // c. The exponent (or suffix) is as large as possible. // The sign will be omitted unless the number is negative. -// +// // Examples: // 1.5 will be serialized as "1500m" // 1.5Gi will be serialized as "1536Mi" -// +// // Note that the quantity will NEVER be internally represented by a // floating point number. That is the whole point of this exercise. -// +// // Non-canonical values will still parse as long as they are well formed, // but will be re-emitted in their canonical form. (So always use canonical // form, or don't diff.) -// +// // This format is intended to make it difficult to use these numbers without // writing some sort of special handling code in the hopes that that will // cause implementors to also use a fixed point implementation. -// +// // +protobuf=true // +protobuf.embed=string // +protobuf.options.marshal=false diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/math.go b/vendor/k8s.io/apimachinery/pkg/api/resource/math.go index 72d3880c..7f63175d 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/math.go +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/math.go @@ -194,9 +194,9 @@ func negativeScaleInt64(base int64, scale Scale) (result int64, exact bool) { } if fraction { if base > 0 { - value += 1 + value++ } else { - value += -1 + value-- } } return value, !fraction diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go index b155a62a..516d041d 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go @@ -29,7 +29,7 @@ import ( // Quantity is a fixed-point representation of a number. // It provides convenient marshaling/unmarshaling in JSON and YAML, -// in addition to String() and Int64() accessors. +// in addition to String() and AsInt64() accessors. // // The serialization format is: // @@ -584,6 +584,12 @@ func (q *Quantity) Neg() { q.d.Dec.Neg(q.d.Dec) } +// Equal checks equality of two Quantities. This is useful for testing with +// cmp.Equal. +func (q Quantity) Equal(v Quantity) bool { + return q.Cmp(v) == 0 +} + // int64QuantityExpectedBytes is the expected width in bytes of the canonical string representation // of most Quantity values. const int64QuantityExpectedBytes = 18 @@ -680,7 +686,7 @@ func NewScaledQuantity(value int64, scale Scale) *Quantity { } } -// Value returns the value of q; any fractional part will be lost. +// Value returns the unscaled value of q rounded up to the nearest integer away from 0. func (q *Quantity) Value() int64 { return q.ScaledValue(0) } @@ -691,7 +697,9 @@ func (q *Quantity) MilliValue() int64 { return q.ScaledValue(Milli) } -// ScaledValue returns the value of ceil(q * 10^scale); this could overflow an int64. +// ScaledValue returns the value of ceil(q / 10^scale). +// For example, NewQuantity(1, DecimalSI).ScaledValue(Milli) returns 1000. +// This could overflow an int64. // To detect overflow, call Value() first and verify the expected magnitude. func (q *Quantity) ScaledValue(scale Scale) int64 { if q.d.Dec == nil { @@ -718,21 +726,3 @@ func (q *Quantity) SetScaled(value int64, scale Scale) { q.d.Dec = nil q.i = int64Amount{value: value, scale: scale} } - -// Copy is a convenience function that makes a deep copy for you. Non-deep -// copies of quantities share pointers and you will regret that. -func (q *Quantity) Copy() *Quantity { - if q.d.Dec == nil { - return &Quantity{ - s: q.s, - i: q.i, - Format: q.Format, - } - } - tmp := &inf.Dec{} - return &Quantity{ - s: q.s, - d: infDecAmount{tmp.Set(q.d.Dec)}, - Format: q.Format, - } -} diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity_proto.go b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity_proto.go index 74dfb4e4..f89ca163 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity_proto.go +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity_proto.go @@ -19,6 +19,7 @@ package resource import ( "fmt" "io" + "math/bits" "github.com/gogo/protobuf/proto" ) @@ -28,7 +29,7 @@ var _ proto.Sizer = &Quantity{} func (m *Quantity) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) - n, err := m.MarshalTo(data) + n, err := m.MarshalToSizedBuffer(data[:size]) if err != nil { return nil, err } @@ -38,30 +39,40 @@ func (m *Quantity) Marshal() (data []byte, err error) { // MarshalTo is a customized version of the generated Protobuf unmarshaler for a struct // with a single string field. func (m *Quantity) MarshalTo(data []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(data[:size]) +} + +// MarshalToSizedBuffer is a customized version of the generated +// Protobuf unmarshaler for a struct with a single string field. +func (m *Quantity) MarshalToSizedBuffer(data []byte) (int, error) { + i := len(data) _ = i var l int _ = l - data[i] = 0xa - i++ // BEGIN CUSTOM MARSHAL out := m.String() + i -= len(out) + copy(data[i:], out) i = encodeVarintGenerated(data, i, uint64(len(out))) - i += copy(data[i:], out) // END CUSTOM MARSHAL + i-- + data[i] = 0xa - return i, nil + return len(data) - i, nil } func encodeVarintGenerated(data []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { data[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } data[offset] = uint8(v) - return offset + 1 + return base } func (m *Quantity) Size() (n int) { @@ -77,14 +88,7 @@ func (m *Quantity) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (bits.Len64(x|1) + 6) / 7 } // Unmarshal is a customized version of the generated Protobuf unmarshaler for a struct diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/conversion.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/conversion.go deleted file mode 100644 index 673e5621..00000000 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/conversion.go +++ /dev/null @@ -1,54 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/conversion" -) - -func Convert_internalversion_ListOptions_To_v1_ListOptions(in *ListOptions, out *metav1.ListOptions, s conversion.Scope) error { - if err := metav1.Convert_fields_Selector_To_string(&in.FieldSelector, &out.FieldSelector, s); err != nil { - return err - } - if err := metav1.Convert_labels_Selector_To_string(&in.LabelSelector, &out.LabelSelector, s); err != nil { - return err - } - out.IncludeUninitialized = in.IncludeUninitialized - out.ResourceVersion = in.ResourceVersion - out.TimeoutSeconds = in.TimeoutSeconds - out.Watch = in.Watch - out.Limit = in.Limit - out.Continue = in.Continue - return nil -} - -func Convert_v1_ListOptions_To_internalversion_ListOptions(in *metav1.ListOptions, out *ListOptions, s conversion.Scope) error { - if err := metav1.Convert_string_To_fields_Selector(&in.FieldSelector, &out.FieldSelector, s); err != nil { - return err - } - if err := metav1.Convert_string_To_labels_Selector(&in.LabelSelector, &out.LabelSelector, s); err != nil { - return err - } - out.IncludeUninitialized = in.IncludeUninitialized - out.ResourceVersion = in.ResourceVersion - out.TimeoutSeconds = in.TimeoutSeconds - out.Watch = in.Watch - out.Limit = in.Limit - out.Continue = in.Continue - return nil -} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/doc.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/doc.go index 1e85c5c4..2741ee2c 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/doc.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/doc.go @@ -17,4 +17,4 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:conversion-gen=k8s.io/apimachinery/pkg/apis/meta/v1 -package internalversion +package internalversion // import "k8s.io/apimachinery/pkg/apis/meta/internalversion" diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/register.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/register.go index 46b8605f..b56140de 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/register.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/register.go @@ -21,15 +21,11 @@ import ( metav1beta1 "k8s.io/apimachinery/pkg/apis/meta/v1beta1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/runtime/serializer" ) // GroupName is the group name for this API. const GroupName = "meta.k8s.io" -// Scheme is the registry for any type that adheres to the meta API spec. -var scheme = runtime.NewScheme() - var ( // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. @@ -38,22 +34,16 @@ var ( AddToScheme = localSchemeBuilder.AddToScheme ) -// Codecs provides access to encoding and decoding for the scheme. -var Codecs = serializer.NewCodecFactory(scheme) - // SchemeGroupVersion is group version used to register these objects var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} -// ParameterCodec handles versioning of objects that are converted to query parameters. -var ParameterCodec = runtime.NewParameterCodec(scheme) - // Kind takes an unqualified kind and returns a Group qualified GroupKind func Kind(kind string) schema.GroupKind { return SchemeGroupVersion.WithKind(kind).GroupKind() } // addToGroupVersion registers common meta types into schemas. -func addToGroupVersion(scheme *runtime.Scheme, groupVersion schema.GroupVersion) error { +func addToGroupVersion(scheme *runtime.Scheme) error { if err := scheme.AddIgnoredConversionType(&metav1.TypeMeta{}, &metav1.TypeMeta{}); err != nil { return err } @@ -66,9 +56,6 @@ func addToGroupVersion(scheme *runtime.Scheme, groupVersion schema.GroupVersion) metav1.Convert_Map_string_To_string_To_v1_LabelSelector, metav1.Convert_v1_LabelSelector_To_Map_string_To_string, - - Convert_internalversion_ListOptions_To_v1_ListOptions, - Convert_v1_ListOptions_To_internalversion_ListOptions, ) if err != nil { return err @@ -89,12 +76,12 @@ func addToGroupVersion(scheme *runtime.Scheme, groupVersion schema.GroupVersion) &metav1beta1.PartialObjectMetadata{}, &metav1beta1.PartialObjectMetadataList{}, ) - scheme.AddKnownTypes(metav1beta1.SchemeGroupVersion, - &metav1beta1.Table{}, - &metav1beta1.TableOptions{}, - &metav1beta1.PartialObjectMetadata{}, - &metav1beta1.PartialObjectMetadataList{}, - ) + if err := metav1beta1.AddMetaToScheme(scheme); err != nil { + return err + } + if err := metav1.AddMetaToScheme(scheme); err != nil { + return err + } // Allow delete options to be decoded across all version in this scheme (we may want to be more clever than this) scheme.AddUnversionedTypes(SchemeGroupVersion, &metav1.DeleteOptions{}, @@ -107,7 +94,6 @@ func addToGroupVersion(scheme *runtime.Scheme, groupVersion schema.GroupVersion) // Unlike other API groups, meta internal knows about all meta external versions, but keeps // the logic for conversion private. func init() { - if err := addToGroupVersion(scheme, SchemeGroupVersion); err != nil { - panic(err) - } + localSchemeBuilder.Register(addToGroupVersion) + localSchemeBuilder.Register(metav1.RegisterConversions) } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/types.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/types.go index 2f6740d3..8d254416 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/types.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/types.go @@ -33,11 +33,17 @@ type ListOptions struct { LabelSelector labels.Selector // A selector based on fields FieldSelector fields.Selector - // If true, partially initialized resources are included in the response. - // +optional - IncludeUninitialized bool // If true, watch for changes to this list Watch bool + // allowWatchBookmarks requests watch events with type "BOOKMARK". + // Servers that do not implement bookmarks may ignore this flag and + // bookmarks are sent at the server's discretion. Clients should not + // assume bookmarks are returned at any specific interval, nor may they + // assume the server will send any BOOKMARK event during a session. + // If this is not a watch, this field is ignored. + // If the feature gate WatchBookmarks is not enabled in apiserver, + // this field is ignored. + AllowWatchBookmarks bool // When specified with a watch call, shows changes that occur after that particular version of a resource. // Defaults to changes from the beginning of history. // When specified for list: diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.conversion.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.conversion.go index 18d190b2..35adbca1 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.conversion.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.conversion.go @@ -55,16 +55,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddConversionFunc((*ListOptions)(nil), (*v1.ListOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_internalversion_ListOptions_To_v1_ListOptions(a.(*ListOptions), b.(*v1.ListOptions), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*v1.ListOptions)(nil), (*ListOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ListOptions_To_internalversion_ListOptions(a.(*v1.ListOptions), b.(*ListOptions), scope) - }); err != nil { - return err - } return nil } @@ -117,8 +107,8 @@ func autoConvert_internalversion_ListOptions_To_v1_ListOptions(in *ListOptions, if err := v1.Convert_fields_Selector_To_string(&in.FieldSelector, &out.FieldSelector, s); err != nil { return err } - out.IncludeUninitialized = in.IncludeUninitialized out.Watch = in.Watch + out.AllowWatchBookmarks = in.AllowWatchBookmarks out.ResourceVersion = in.ResourceVersion out.TimeoutSeconds = (*int64)(unsafe.Pointer(in.TimeoutSeconds)) out.Limit = in.Limit @@ -126,6 +116,11 @@ func autoConvert_internalversion_ListOptions_To_v1_ListOptions(in *ListOptions, return nil } +// Convert_internalversion_ListOptions_To_v1_ListOptions is an autogenerated conversion function. +func Convert_internalversion_ListOptions_To_v1_ListOptions(in *ListOptions, out *v1.ListOptions, s conversion.Scope) error { + return autoConvert_internalversion_ListOptions_To_v1_ListOptions(in, out, s) +} + func autoConvert_v1_ListOptions_To_internalversion_ListOptions(in *v1.ListOptions, out *ListOptions, s conversion.Scope) error { if err := v1.Convert_string_To_labels_Selector(&in.LabelSelector, &out.LabelSelector, s); err != nil { return err @@ -133,11 +128,16 @@ func autoConvert_v1_ListOptions_To_internalversion_ListOptions(in *v1.ListOption if err := v1.Convert_string_To_fields_Selector(&in.FieldSelector, &out.FieldSelector, s); err != nil { return err } - out.IncludeUninitialized = in.IncludeUninitialized out.Watch = in.Watch + out.AllowWatchBookmarks = in.AllowWatchBookmarks out.ResourceVersion = in.ResourceVersion out.TimeoutSeconds = (*int64)(unsafe.Pointer(in.TimeoutSeconds)) out.Limit = in.Limit out.Continue = in.Continue return nil } + +// Convert_v1_ListOptions_To_internalversion_ListOptions is an autogenerated conversion function. +func Convert_v1_ListOptions_To_internalversion_ListOptions(in *v1.ListOptions, out *ListOptions, s conversion.Scope) error { + return autoConvert_v1_ListOptions_To_internalversion_ListOptions(in, out, s) +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.deepcopy.go index 81d85e96..d5e4fc68 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.deepcopy.go @@ -28,7 +28,7 @@ import ( func (in *List) DeepCopyInto(out *List) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]runtime.Object, len(*in)) diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS index cdb125a0..77cfb0c1 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS @@ -1,3 +1,5 @@ +# See the OWNERS docs at https://go.k8s.io/owners + reviewers: - thockin - smarterclayton @@ -28,4 +30,3 @@ reviewers: - mqliang - kevin-wangzefeng - jianhuiz -- feihujiang diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/controller_ref.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/controller_ref.go index 042cd5b9..15b45ffa 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/controller_ref.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/controller_ref.go @@ -22,7 +22,7 @@ import ( // IsControlledBy checks if the object has a controllerRef set to the given owner func IsControlledBy(obj Object, owner Object) bool { - ref := GetControllerOf(obj) + ref := GetControllerOfNoCopy(obj) if ref == nil { return false } @@ -31,9 +31,20 @@ func IsControlledBy(obj Object, owner Object) bool { // GetControllerOf returns a pointer to a copy of the controllerRef if controllee has a controller func GetControllerOf(controllee Object) *OwnerReference { - for _, ref := range controllee.GetOwnerReferences() { - if ref.Controller != nil && *ref.Controller { - return &ref + ref := GetControllerOfNoCopy(controllee) + if ref == nil { + return nil + } + cp := *ref + return &cp +} + +// GetControllerOf returns a pointer to the controllerRef if controllee has a controller +func GetControllerOfNoCopy(controllee Object) *OwnerReference { + refs := controllee.GetOwnerReferences() + for i := range refs { + if refs[i].Controller != nil && *refs[i].Controller { + return &refs[i] } } return nil diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go index 5c36f82c..285a41a4 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go @@ -18,6 +18,7 @@ package v1 import ( "fmt" + "net/url" "strconv" "strings" @@ -26,6 +27,7 @@ import ( "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" ) @@ -35,12 +37,17 @@ func AddConversionFuncs(scheme *runtime.Scheme) error { Convert_v1_ListMeta_To_v1_ListMeta, + Convert_v1_DeleteOptions_To_v1_DeleteOptions, + Convert_intstr_IntOrString_To_intstr_IntOrString, + Convert_Pointer_intstr_IntOrString_To_intstr_IntOrString, + Convert_intstr_IntOrString_To_Pointer_intstr_IntOrString, Convert_Pointer_v1_Duration_To_v1_Duration, Convert_v1_Duration_To_Pointer_v1_Duration, Convert_Slice_string_To_v1_Time, + Convert_Slice_string_To_Pointer_v1_Time, Convert_v1_Time_To_v1_Time, Convert_v1_MicroTime_To_v1_MicroTime, @@ -76,7 +83,9 @@ func AddConversionFuncs(scheme *runtime.Scheme) error { Convert_Slice_string_To_Slice_int32, - Convert_Slice_string_To_v1_DeletionPropagation, + Convert_Slice_string_To_Pointer_v1_DeletionPropagation, + + Convert_Slice_string_To_v1_IncludeObjectPolicy, ) } @@ -192,12 +201,33 @@ func Convert_v1_ListMeta_To_v1_ListMeta(in, out *ListMeta, s conversion.Scope) e return nil } +// +k8s:conversion-fn=copy-only +func Convert_v1_DeleteOptions_To_v1_DeleteOptions(in, out *DeleteOptions, s conversion.Scope) error { + *out = *in + return nil +} + // +k8s:conversion-fn=copy-only func Convert_intstr_IntOrString_To_intstr_IntOrString(in, out *intstr.IntOrString, s conversion.Scope) error { *out = *in return nil } +func Convert_Pointer_intstr_IntOrString_To_intstr_IntOrString(in **intstr.IntOrString, out *intstr.IntOrString, s conversion.Scope) error { + if *in == nil { + *out = intstr.IntOrString{} // zero value + return nil + } + *out = **in // copy + return nil +} + +func Convert_intstr_IntOrString_To_Pointer_intstr_IntOrString(in *intstr.IntOrString, out **intstr.IntOrString, s conversion.Scope) error { + temp := *in // copy + *out = &temp + return nil +} + // +k8s:conversion-fn=copy-only func Convert_v1_Time_To_v1_Time(in *Time, out *Time, s conversion.Scope) error { // Cannot deep copy these, because time.Time has unexported fields. @@ -228,14 +258,30 @@ func Convert_v1_Duration_To_Pointer_v1_Duration(in *Duration, out **Duration, s } // Convert_Slice_string_To_v1_Time allows converting a URL query parameter value -func Convert_Slice_string_To_v1_Time(input *[]string, out *Time, s conversion.Scope) error { +func Convert_Slice_string_To_v1_Time(in *[]string, out *Time, s conversion.Scope) error { str := "" - if len(*input) > 0 { - str = (*input)[0] + if len(*in) > 0 { + str = (*in)[0] } return out.UnmarshalQueryParameter(str) } +func Convert_Slice_string_To_Pointer_v1_Time(in *[]string, out **Time, s conversion.Scope) error { + if in == nil { + return nil + } + str := "" + if len(*in) > 0 { + str = (*in)[0] + } + temp := Time{} + if err := temp.UnmarshalQueryParameter(str); err != nil { + return err + } + *out = &temp + return nil +} + func Convert_string_To_labels_Selector(in *string, out *labels.Selector, s conversion.Scope) error { selector, err := labels.Parse(*in) if err != nil { @@ -308,12 +354,53 @@ func Convert_Slice_string_To_Slice_int32(in *[]string, out *[]int32, s conversio return nil } -// Convert_Slice_string_To_v1_DeletionPropagation allows converting a URL query parameter propagationPolicy -func Convert_Slice_string_To_v1_DeletionPropagation(input *[]string, out *DeletionPropagation, s conversion.Scope) error { - if len(*input) > 0 { - *out = DeletionPropagation((*input)[0]) +// Convert_Slice_string_To_Pointer_v1_DeletionPropagation allows converting a URL query parameter propagationPolicy +func Convert_Slice_string_To_Pointer_v1_DeletionPropagation(in *[]string, out **DeletionPropagation, s conversion.Scope) error { + var str string + if len(*in) > 0 { + str = (*in)[0] } else { - *out = "" + str = "" + } + temp := DeletionPropagation(str) + *out = &temp + return nil +} + +// Convert_Slice_string_To_v1_IncludeObjectPolicy allows converting a URL query parameter value +func Convert_Slice_string_To_v1_IncludeObjectPolicy(in *[]string, out *IncludeObjectPolicy, s conversion.Scope) error { + if len(*in) > 0 { + *out = IncludeObjectPolicy((*in)[0]) + } + return nil +} + +// Convert_url_Values_To_v1_DeleteOptions allows converting a URL to DeleteOptions. +func Convert_url_Values_To_v1_DeleteOptions(in *url.Values, out *DeleteOptions, s conversion.Scope) error { + if err := autoConvert_url_Values_To_v1_DeleteOptions(in, out, s); err != nil { + return err + } + + uid := types.UID("") + if values, ok := (*in)["uid"]; ok && len(values) > 0 { + uid = types.UID(values[0]) + } + + resourceVersion := "" + if values, ok := (*in)["resourceVersion"]; ok && len(values) > 0 { + resourceVersion = values[0] + } + + if len(uid) > 0 || len(resourceVersion) > 0 { + if out.Preconditions == nil { + out.Preconditions = &Preconditions{} + } + if len(uid) > 0 { + out.Preconditions.UID = &uid + } + if len(resourceVersion) > 0 { + out.Preconditions.ResourceVersion = &resourceVersion + } } return nil } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/deepcopy.go new file mode 100644 index 00000000..8751d052 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/deepcopy.go @@ -0,0 +1,46 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime" +) + +func (in *TableRow) DeepCopy() *TableRow { + if in == nil { + return nil + } + + out := new(TableRow) + + if in.Cells != nil { + out.Cells = make([]interface{}, len(in.Cells)) + for i := range in.Cells { + out.Cells[i] = runtime.DeepCopyJSONValue(in.Cells[i]) + } + } + + if in.Conditions != nil { + out.Conditions = make([]TableRowCondition, len(in.Conditions)) + for i := range in.Conditions { + in.Conditions[i].DeepCopyInto(&out.Conditions[i]) + } + } + + in.Object.DeepCopyInto(&out.Object) + return out +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/doc.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/doc.go index 61f201cd..7736753d 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/doc.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/doc.go @@ -14,9 +14,11 @@ See the License for the specific language governing permissions and limitations under the License. */ +// +k8s:conversion-gen=false // +k8s:deepcopy-gen=package // +k8s:openapi-gen=true // +k8s:defaulter-gen=TypeMeta // +groupName=meta.k8s.io + package v1 // import "k8s.io/apimachinery/pkg/apis/meta/v1" diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/duration.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/duration.go index 2eaabf07..babe8a8b 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/duration.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/duration.go @@ -48,3 +48,13 @@ func (d *Duration) UnmarshalJSON(b []byte) error { func (d Duration) MarshalJSON() ([]byte, error) { return json.Marshal(d.Duration.String()) } + +// OpenAPISchemaType is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +// +// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators +func (_ Duration) OpenAPISchemaType() []string { return []string{"string"} } + +// OpenAPISchemaFormat is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +func (_ Duration) OpenAPISchemaFormat() string { return "" } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go index b7508f03..31b1d955 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go @@ -14,74 +14,28 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto -// DO NOT EDIT! -/* - Package v1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto - - It has these top-level messages: - APIGroup - APIGroupList - APIResource - APIResourceList - APIVersions - CreateOptions - DeleteOptions - Duration - ExportOptions - GetOptions - GroupKind - GroupResource - GroupVersion - GroupVersionForDiscovery - GroupVersionKind - GroupVersionResource - Initializer - Initializers - LabelSelector - LabelSelectorRequirement - List - ListMeta - ListOptions - MicroTime - ObjectMeta - OwnerReference - Patch - Preconditions - RootPaths - ServerAddressByClientCIDR - Status - StatusCause - StatusDetails - Time - Timestamp - TypeMeta - UpdateOptions - Verbs - WatchEvent -*/ package v1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import k8s_io_apimachinery_pkg_runtime "k8s.io/apimachinery/pkg/runtime" + io "io" -import time "time" -import k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + runtime "k8s.io/apimachinery/pkg/runtime" -import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" + time "time" -import strings "strings" -import reflect "reflect" - -import io "io" + k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -95,167 +49,1199 @@ var _ = time.Kitchen // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *APIGroup) Reset() { *m = APIGroup{} } -func (*APIGroup) ProtoMessage() {} -func (*APIGroup) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *APIGroup) Reset() { *m = APIGroup{} } +func (*APIGroup) ProtoMessage() {} +func (*APIGroup) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{0} +} +func (m *APIGroup) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *APIGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *APIGroup) XXX_Merge(src proto.Message) { + xxx_messageInfo_APIGroup.Merge(m, src) +} +func (m *APIGroup) XXX_Size() int { + return m.Size() +} +func (m *APIGroup) XXX_DiscardUnknown() { + xxx_messageInfo_APIGroup.DiscardUnknown(m) +} -func (m *APIGroupList) Reset() { *m = APIGroupList{} } -func (*APIGroupList) ProtoMessage() {} -func (*APIGroupList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +var xxx_messageInfo_APIGroup proto.InternalMessageInfo -func (m *APIResource) Reset() { *m = APIResource{} } -func (*APIResource) ProtoMessage() {} -func (*APIResource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +func (m *APIGroupList) Reset() { *m = APIGroupList{} } +func (*APIGroupList) ProtoMessage() {} +func (*APIGroupList) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{1} +} +func (m *APIGroupList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *APIGroupList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *APIGroupList) XXX_Merge(src proto.Message) { + xxx_messageInfo_APIGroupList.Merge(m, src) +} +func (m *APIGroupList) XXX_Size() int { + return m.Size() +} +func (m *APIGroupList) XXX_DiscardUnknown() { + xxx_messageInfo_APIGroupList.DiscardUnknown(m) +} -func (m *APIResourceList) Reset() { *m = APIResourceList{} } -func (*APIResourceList) ProtoMessage() {} -func (*APIResourceList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +var xxx_messageInfo_APIGroupList proto.InternalMessageInfo -func (m *APIVersions) Reset() { *m = APIVersions{} } -func (*APIVersions) ProtoMessage() {} -func (*APIVersions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +func (m *APIResource) Reset() { *m = APIResource{} } +func (*APIResource) ProtoMessage() {} +func (*APIResource) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{2} +} +func (m *APIResource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *APIResource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *APIResource) XXX_Merge(src proto.Message) { + xxx_messageInfo_APIResource.Merge(m, src) +} +func (m *APIResource) XXX_Size() int { + return m.Size() +} +func (m *APIResource) XXX_DiscardUnknown() { + xxx_messageInfo_APIResource.DiscardUnknown(m) +} -func (m *CreateOptions) Reset() { *m = CreateOptions{} } -func (*CreateOptions) ProtoMessage() {} -func (*CreateOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +var xxx_messageInfo_APIResource proto.InternalMessageInfo -func (m *DeleteOptions) Reset() { *m = DeleteOptions{} } -func (*DeleteOptions) ProtoMessage() {} -func (*DeleteOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +func (m *APIResourceList) Reset() { *m = APIResourceList{} } +func (*APIResourceList) ProtoMessage() {} +func (*APIResourceList) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{3} +} +func (m *APIResourceList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *APIResourceList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *APIResourceList) XXX_Merge(src proto.Message) { + xxx_messageInfo_APIResourceList.Merge(m, src) +} +func (m *APIResourceList) XXX_Size() int { + return m.Size() +} +func (m *APIResourceList) XXX_DiscardUnknown() { + xxx_messageInfo_APIResourceList.DiscardUnknown(m) +} -func (m *Duration) Reset() { *m = Duration{} } -func (*Duration) ProtoMessage() {} -func (*Duration) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +var xxx_messageInfo_APIResourceList proto.InternalMessageInfo -func (m *ExportOptions) Reset() { *m = ExportOptions{} } -func (*ExportOptions) ProtoMessage() {} -func (*ExportOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +func (m *APIVersions) Reset() { *m = APIVersions{} } +func (*APIVersions) ProtoMessage() {} +func (*APIVersions) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{4} +} +func (m *APIVersions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *APIVersions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *APIVersions) XXX_Merge(src proto.Message) { + xxx_messageInfo_APIVersions.Merge(m, src) +} +func (m *APIVersions) XXX_Size() int { + return m.Size() +} +func (m *APIVersions) XXX_DiscardUnknown() { + xxx_messageInfo_APIVersions.DiscardUnknown(m) +} -func (m *GetOptions) Reset() { *m = GetOptions{} } -func (*GetOptions) ProtoMessage() {} -func (*GetOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +var xxx_messageInfo_APIVersions proto.InternalMessageInfo -func (m *GroupKind) Reset() { *m = GroupKind{} } -func (*GroupKind) ProtoMessage() {} -func (*GroupKind) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } +func (m *CreateOptions) Reset() { *m = CreateOptions{} } +func (*CreateOptions) ProtoMessage() {} +func (*CreateOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{5} +} +func (m *CreateOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CreateOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CreateOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateOptions.Merge(m, src) +} +func (m *CreateOptions) XXX_Size() int { + return m.Size() +} +func (m *CreateOptions) XXX_DiscardUnknown() { + xxx_messageInfo_CreateOptions.DiscardUnknown(m) +} -func (m *GroupResource) Reset() { *m = GroupResource{} } -func (*GroupResource) ProtoMessage() {} -func (*GroupResource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } +var xxx_messageInfo_CreateOptions proto.InternalMessageInfo -func (m *GroupVersion) Reset() { *m = GroupVersion{} } -func (*GroupVersion) ProtoMessage() {} -func (*GroupVersion) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } +func (m *DeleteOptions) Reset() { *m = DeleteOptions{} } +func (*DeleteOptions) ProtoMessage() {} +func (*DeleteOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{6} +} +func (m *DeleteOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeleteOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeleteOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteOptions.Merge(m, src) +} +func (m *DeleteOptions) XXX_Size() int { + return m.Size() +} +func (m *DeleteOptions) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteOptions proto.InternalMessageInfo + +func (m *Duration) Reset() { *m = Duration{} } +func (*Duration) ProtoMessage() {} +func (*Duration) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{7} +} +func (m *Duration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Duration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Duration) XXX_Merge(src proto.Message) { + xxx_messageInfo_Duration.Merge(m, src) +} +func (m *Duration) XXX_Size() int { + return m.Size() +} +func (m *Duration) XXX_DiscardUnknown() { + xxx_messageInfo_Duration.DiscardUnknown(m) +} + +var xxx_messageInfo_Duration proto.InternalMessageInfo + +func (m *ExportOptions) Reset() { *m = ExportOptions{} } +func (*ExportOptions) ProtoMessage() {} +func (*ExportOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{8} +} +func (m *ExportOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExportOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExportOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExportOptions.Merge(m, src) +} +func (m *ExportOptions) XXX_Size() int { + return m.Size() +} +func (m *ExportOptions) XXX_DiscardUnknown() { + xxx_messageInfo_ExportOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_ExportOptions proto.InternalMessageInfo + +func (m *FieldsV1) Reset() { *m = FieldsV1{} } +func (*FieldsV1) ProtoMessage() {} +func (*FieldsV1) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{9} +} +func (m *FieldsV1) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FieldsV1) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FieldsV1) XXX_Merge(src proto.Message) { + xxx_messageInfo_FieldsV1.Merge(m, src) +} +func (m *FieldsV1) XXX_Size() int { + return m.Size() +} +func (m *FieldsV1) XXX_DiscardUnknown() { + xxx_messageInfo_FieldsV1.DiscardUnknown(m) +} + +var xxx_messageInfo_FieldsV1 proto.InternalMessageInfo + +func (m *GetOptions) Reset() { *m = GetOptions{} } +func (*GetOptions) ProtoMessage() {} +func (*GetOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{10} +} +func (m *GetOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GetOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetOptions.Merge(m, src) +} +func (m *GetOptions) XXX_Size() int { + return m.Size() +} +func (m *GetOptions) XXX_DiscardUnknown() { + xxx_messageInfo_GetOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_GetOptions proto.InternalMessageInfo + +func (m *GroupKind) Reset() { *m = GroupKind{} } +func (*GroupKind) ProtoMessage() {} +func (*GroupKind) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{11} +} +func (m *GroupKind) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GroupKind) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GroupKind) XXX_Merge(src proto.Message) { + xxx_messageInfo_GroupKind.Merge(m, src) +} +func (m *GroupKind) XXX_Size() int { + return m.Size() +} +func (m *GroupKind) XXX_DiscardUnknown() { + xxx_messageInfo_GroupKind.DiscardUnknown(m) +} + +var xxx_messageInfo_GroupKind proto.InternalMessageInfo + +func (m *GroupResource) Reset() { *m = GroupResource{} } +func (*GroupResource) ProtoMessage() {} +func (*GroupResource) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{12} +} +func (m *GroupResource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GroupResource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GroupResource) XXX_Merge(src proto.Message) { + xxx_messageInfo_GroupResource.Merge(m, src) +} +func (m *GroupResource) XXX_Size() int { + return m.Size() +} +func (m *GroupResource) XXX_DiscardUnknown() { + xxx_messageInfo_GroupResource.DiscardUnknown(m) +} + +var xxx_messageInfo_GroupResource proto.InternalMessageInfo + +func (m *GroupVersion) Reset() { *m = GroupVersion{} } +func (*GroupVersion) ProtoMessage() {} +func (*GroupVersion) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{13} +} +func (m *GroupVersion) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GroupVersion) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GroupVersion) XXX_Merge(src proto.Message) { + xxx_messageInfo_GroupVersion.Merge(m, src) +} +func (m *GroupVersion) XXX_Size() int { + return m.Size() +} +func (m *GroupVersion) XXX_DiscardUnknown() { + xxx_messageInfo_GroupVersion.DiscardUnknown(m) +} + +var xxx_messageInfo_GroupVersion proto.InternalMessageInfo func (m *GroupVersionForDiscovery) Reset() { *m = GroupVersionForDiscovery{} } func (*GroupVersionForDiscovery) ProtoMessage() {} func (*GroupVersionForDiscovery) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{13} + return fileDescriptor_cf52fa777ced5367, []int{14} +} +func (m *GroupVersionForDiscovery) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GroupVersionForDiscovery) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GroupVersionForDiscovery) XXX_Merge(src proto.Message) { + xxx_messageInfo_GroupVersionForDiscovery.Merge(m, src) +} +func (m *GroupVersionForDiscovery) XXX_Size() int { + return m.Size() +} +func (m *GroupVersionForDiscovery) XXX_DiscardUnknown() { + xxx_messageInfo_GroupVersionForDiscovery.DiscardUnknown(m) } -func (m *GroupVersionKind) Reset() { *m = GroupVersionKind{} } -func (*GroupVersionKind) ProtoMessage() {} -func (*GroupVersionKind) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } +var xxx_messageInfo_GroupVersionForDiscovery proto.InternalMessageInfo -func (m *GroupVersionResource) Reset() { *m = GroupVersionResource{} } -func (*GroupVersionResource) ProtoMessage() {} -func (*GroupVersionResource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } +func (m *GroupVersionKind) Reset() { *m = GroupVersionKind{} } +func (*GroupVersionKind) ProtoMessage() {} +func (*GroupVersionKind) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{15} +} +func (m *GroupVersionKind) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GroupVersionKind) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GroupVersionKind) XXX_Merge(src proto.Message) { + xxx_messageInfo_GroupVersionKind.Merge(m, src) +} +func (m *GroupVersionKind) XXX_Size() int { + return m.Size() +} +func (m *GroupVersionKind) XXX_DiscardUnknown() { + xxx_messageInfo_GroupVersionKind.DiscardUnknown(m) +} -func (m *Initializer) Reset() { *m = Initializer{} } -func (*Initializer) ProtoMessage() {} -func (*Initializer) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } +var xxx_messageInfo_GroupVersionKind proto.InternalMessageInfo -func (m *Initializers) Reset() { *m = Initializers{} } -func (*Initializers) ProtoMessage() {} -func (*Initializers) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } +func (m *GroupVersionResource) Reset() { *m = GroupVersionResource{} } +func (*GroupVersionResource) ProtoMessage() {} +func (*GroupVersionResource) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{16} +} +func (m *GroupVersionResource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GroupVersionResource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GroupVersionResource) XXX_Merge(src proto.Message) { + xxx_messageInfo_GroupVersionResource.Merge(m, src) +} +func (m *GroupVersionResource) XXX_Size() int { + return m.Size() +} +func (m *GroupVersionResource) XXX_DiscardUnknown() { + xxx_messageInfo_GroupVersionResource.DiscardUnknown(m) +} + +var xxx_messageInfo_GroupVersionResource proto.InternalMessageInfo + +func (m *LabelSelector) Reset() { *m = LabelSelector{} } +func (*LabelSelector) ProtoMessage() {} +func (*LabelSelector) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{17} +} +func (m *LabelSelector) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LabelSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LabelSelector) XXX_Merge(src proto.Message) { + xxx_messageInfo_LabelSelector.Merge(m, src) +} +func (m *LabelSelector) XXX_Size() int { + return m.Size() +} +func (m *LabelSelector) XXX_DiscardUnknown() { + xxx_messageInfo_LabelSelector.DiscardUnknown(m) +} -func (m *LabelSelector) Reset() { *m = LabelSelector{} } -func (*LabelSelector) ProtoMessage() {} -func (*LabelSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } +var xxx_messageInfo_LabelSelector proto.InternalMessageInfo func (m *LabelSelectorRequirement) Reset() { *m = LabelSelectorRequirement{} } func (*LabelSelectorRequirement) ProtoMessage() {} func (*LabelSelectorRequirement) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{19} + return fileDescriptor_cf52fa777ced5367, []int{18} +} +func (m *LabelSelectorRequirement) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LabelSelectorRequirement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LabelSelectorRequirement) XXX_Merge(src proto.Message) { + xxx_messageInfo_LabelSelectorRequirement.Merge(m, src) +} +func (m *LabelSelectorRequirement) XXX_Size() int { + return m.Size() +} +func (m *LabelSelectorRequirement) XXX_DiscardUnknown() { + xxx_messageInfo_LabelSelectorRequirement.DiscardUnknown(m) +} + +var xxx_messageInfo_LabelSelectorRequirement proto.InternalMessageInfo + +func (m *List) Reset() { *m = List{} } +func (*List) ProtoMessage() {} +func (*List) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{19} +} +func (m *List) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *List) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *List) XXX_Merge(src proto.Message) { + xxx_messageInfo_List.Merge(m, src) +} +func (m *List) XXX_Size() int { + return m.Size() +} +func (m *List) XXX_DiscardUnknown() { + xxx_messageInfo_List.DiscardUnknown(m) +} + +var xxx_messageInfo_List proto.InternalMessageInfo + +func (m *ListMeta) Reset() { *m = ListMeta{} } +func (*ListMeta) ProtoMessage() {} +func (*ListMeta) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{20} +} +func (m *ListMeta) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ListMeta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ListMeta) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListMeta.Merge(m, src) +} +func (m *ListMeta) XXX_Size() int { + return m.Size() +} +func (m *ListMeta) XXX_DiscardUnknown() { + xxx_messageInfo_ListMeta.DiscardUnknown(m) +} + +var xxx_messageInfo_ListMeta proto.InternalMessageInfo + +func (m *ListOptions) Reset() { *m = ListOptions{} } +func (*ListOptions) ProtoMessage() {} +func (*ListOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{21} +} +func (m *ListOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ListOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ListOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListOptions.Merge(m, src) +} +func (m *ListOptions) XXX_Size() int { + return m.Size() +} +func (m *ListOptions) XXX_DiscardUnknown() { + xxx_messageInfo_ListOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_ListOptions proto.InternalMessageInfo + +func (m *ManagedFieldsEntry) Reset() { *m = ManagedFieldsEntry{} } +func (*ManagedFieldsEntry) ProtoMessage() {} +func (*ManagedFieldsEntry) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{22} +} +func (m *ManagedFieldsEntry) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ManagedFieldsEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ManagedFieldsEntry) XXX_Merge(src proto.Message) { + xxx_messageInfo_ManagedFieldsEntry.Merge(m, src) +} +func (m *ManagedFieldsEntry) XXX_Size() int { + return m.Size() +} +func (m *ManagedFieldsEntry) XXX_DiscardUnknown() { + xxx_messageInfo_ManagedFieldsEntry.DiscardUnknown(m) +} + +var xxx_messageInfo_ManagedFieldsEntry proto.InternalMessageInfo + +func (m *MicroTime) Reset() { *m = MicroTime{} } +func (*MicroTime) ProtoMessage() {} +func (*MicroTime) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{23} +} +func (m *MicroTime) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MicroTime.Unmarshal(m, b) +} +func (m *MicroTime) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MicroTime.Marshal(b, m, deterministic) +} +func (m *MicroTime) XXX_Merge(src proto.Message) { + xxx_messageInfo_MicroTime.Merge(m, src) +} +func (m *MicroTime) XXX_Size() int { + return xxx_messageInfo_MicroTime.Size(m) +} +func (m *MicroTime) XXX_DiscardUnknown() { + xxx_messageInfo_MicroTime.DiscardUnknown(m) +} + +var xxx_messageInfo_MicroTime proto.InternalMessageInfo + +func (m *ObjectMeta) Reset() { *m = ObjectMeta{} } +func (*ObjectMeta) ProtoMessage() {} +func (*ObjectMeta) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{24} +} +func (m *ObjectMeta) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ObjectMeta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ObjectMeta) XXX_Merge(src proto.Message) { + xxx_messageInfo_ObjectMeta.Merge(m, src) +} +func (m *ObjectMeta) XXX_Size() int { + return m.Size() +} +func (m *ObjectMeta) XXX_DiscardUnknown() { + xxx_messageInfo_ObjectMeta.DiscardUnknown(m) } -func (m *List) Reset() { *m = List{} } -func (*List) ProtoMessage() {} -func (*List) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{20} } +var xxx_messageInfo_ObjectMeta proto.InternalMessageInfo -func (m *ListMeta) Reset() { *m = ListMeta{} } -func (*ListMeta) ProtoMessage() {} -func (*ListMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{21} } +func (m *OwnerReference) Reset() { *m = OwnerReference{} } +func (*OwnerReference) ProtoMessage() {} +func (*OwnerReference) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{25} +} +func (m *OwnerReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *OwnerReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *OwnerReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_OwnerReference.Merge(m, src) +} +func (m *OwnerReference) XXX_Size() int { + return m.Size() +} +func (m *OwnerReference) XXX_DiscardUnknown() { + xxx_messageInfo_OwnerReference.DiscardUnknown(m) +} -func (m *ListOptions) Reset() { *m = ListOptions{} } -func (*ListOptions) ProtoMessage() {} -func (*ListOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } +var xxx_messageInfo_OwnerReference proto.InternalMessageInfo -func (m *MicroTime) Reset() { *m = MicroTime{} } -func (*MicroTime) ProtoMessage() {} -func (*MicroTime) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } +func (m *PartialObjectMetadata) Reset() { *m = PartialObjectMetadata{} } +func (*PartialObjectMetadata) ProtoMessage() {} +func (*PartialObjectMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{26} +} +func (m *PartialObjectMetadata) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PartialObjectMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PartialObjectMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_PartialObjectMetadata.Merge(m, src) +} +func (m *PartialObjectMetadata) XXX_Size() int { + return m.Size() +} +func (m *PartialObjectMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_PartialObjectMetadata.DiscardUnknown(m) +} -func (m *ObjectMeta) Reset() { *m = ObjectMeta{} } -func (*ObjectMeta) ProtoMessage() {} -func (*ObjectMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } +var xxx_messageInfo_PartialObjectMetadata proto.InternalMessageInfo -func (m *OwnerReference) Reset() { *m = OwnerReference{} } -func (*OwnerReference) ProtoMessage() {} -func (*OwnerReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } +func (m *PartialObjectMetadataList) Reset() { *m = PartialObjectMetadataList{} } +func (*PartialObjectMetadataList) ProtoMessage() {} +func (*PartialObjectMetadataList) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{27} +} +func (m *PartialObjectMetadataList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PartialObjectMetadataList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PartialObjectMetadataList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PartialObjectMetadataList.Merge(m, src) +} +func (m *PartialObjectMetadataList) XXX_Size() int { + return m.Size() +} +func (m *PartialObjectMetadataList) XXX_DiscardUnknown() { + xxx_messageInfo_PartialObjectMetadataList.DiscardUnknown(m) +} -func (m *Patch) Reset() { *m = Patch{} } -func (*Patch) ProtoMessage() {} -func (*Patch) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } +var xxx_messageInfo_PartialObjectMetadataList proto.InternalMessageInfo -func (m *Preconditions) Reset() { *m = Preconditions{} } -func (*Preconditions) ProtoMessage() {} -func (*Preconditions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{27} } +func (m *Patch) Reset() { *m = Patch{} } +func (*Patch) ProtoMessage() {} +func (*Patch) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{28} +} +func (m *Patch) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Patch) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Patch) XXX_Merge(src proto.Message) { + xxx_messageInfo_Patch.Merge(m, src) +} +func (m *Patch) XXX_Size() int { + return m.Size() +} +func (m *Patch) XXX_DiscardUnknown() { + xxx_messageInfo_Patch.DiscardUnknown(m) +} -func (m *RootPaths) Reset() { *m = RootPaths{} } -func (*RootPaths) ProtoMessage() {} -func (*RootPaths) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{28} } +var xxx_messageInfo_Patch proto.InternalMessageInfo + +func (m *PatchOptions) Reset() { *m = PatchOptions{} } +func (*PatchOptions) ProtoMessage() {} +func (*PatchOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{29} +} +func (m *PatchOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PatchOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PatchOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_PatchOptions.Merge(m, src) +} +func (m *PatchOptions) XXX_Size() int { + return m.Size() +} +func (m *PatchOptions) XXX_DiscardUnknown() { + xxx_messageInfo_PatchOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_PatchOptions proto.InternalMessageInfo + +func (m *Preconditions) Reset() { *m = Preconditions{} } +func (*Preconditions) ProtoMessage() {} +func (*Preconditions) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{30} +} +func (m *Preconditions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Preconditions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Preconditions) XXX_Merge(src proto.Message) { + xxx_messageInfo_Preconditions.Merge(m, src) +} +func (m *Preconditions) XXX_Size() int { + return m.Size() +} +func (m *Preconditions) XXX_DiscardUnknown() { + xxx_messageInfo_Preconditions.DiscardUnknown(m) +} + +var xxx_messageInfo_Preconditions proto.InternalMessageInfo + +func (m *RootPaths) Reset() { *m = RootPaths{} } +func (*RootPaths) ProtoMessage() {} +func (*RootPaths) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{31} +} +func (m *RootPaths) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RootPaths) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RootPaths) XXX_Merge(src proto.Message) { + xxx_messageInfo_RootPaths.Merge(m, src) +} +func (m *RootPaths) XXX_Size() int { + return m.Size() +} +func (m *RootPaths) XXX_DiscardUnknown() { + xxx_messageInfo_RootPaths.DiscardUnknown(m) +} + +var xxx_messageInfo_RootPaths proto.InternalMessageInfo func (m *ServerAddressByClientCIDR) Reset() { *m = ServerAddressByClientCIDR{} } func (*ServerAddressByClientCIDR) ProtoMessage() {} func (*ServerAddressByClientCIDR) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{29} + return fileDescriptor_cf52fa777ced5367, []int{32} +} +func (m *ServerAddressByClientCIDR) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServerAddressByClientCIDR) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ServerAddressByClientCIDR) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServerAddressByClientCIDR.Merge(m, src) +} +func (m *ServerAddressByClientCIDR) XXX_Size() int { + return m.Size() +} +func (m *ServerAddressByClientCIDR) XXX_DiscardUnknown() { + xxx_messageInfo_ServerAddressByClientCIDR.DiscardUnknown(m) } -func (m *Status) Reset() { *m = Status{} } -func (*Status) ProtoMessage() {} -func (*Status) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{30} } +var xxx_messageInfo_ServerAddressByClientCIDR proto.InternalMessageInfo -func (m *StatusCause) Reset() { *m = StatusCause{} } -func (*StatusCause) ProtoMessage() {} -func (*StatusCause) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{31} } +func (m *Status) Reset() { *m = Status{} } +func (*Status) ProtoMessage() {} +func (*Status) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{33} +} +func (m *Status) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Status) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Status) XXX_Merge(src proto.Message) { + xxx_messageInfo_Status.Merge(m, src) +} +func (m *Status) XXX_Size() int { + return m.Size() +} +func (m *Status) XXX_DiscardUnknown() { + xxx_messageInfo_Status.DiscardUnknown(m) +} -func (m *StatusDetails) Reset() { *m = StatusDetails{} } -func (*StatusDetails) ProtoMessage() {} -func (*StatusDetails) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{32} } +var xxx_messageInfo_Status proto.InternalMessageInfo -func (m *Time) Reset() { *m = Time{} } -func (*Time) ProtoMessage() {} -func (*Time) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{33} } +func (m *StatusCause) Reset() { *m = StatusCause{} } +func (*StatusCause) ProtoMessage() {} +func (*StatusCause) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{34} +} +func (m *StatusCause) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatusCause) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatusCause) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatusCause.Merge(m, src) +} +func (m *StatusCause) XXX_Size() int { + return m.Size() +} +func (m *StatusCause) XXX_DiscardUnknown() { + xxx_messageInfo_StatusCause.DiscardUnknown(m) +} -func (m *Timestamp) Reset() { *m = Timestamp{} } -func (*Timestamp) ProtoMessage() {} -func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{34} } +var xxx_messageInfo_StatusCause proto.InternalMessageInfo -func (m *TypeMeta) Reset() { *m = TypeMeta{} } -func (*TypeMeta) ProtoMessage() {} -func (*TypeMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{35} } +func (m *StatusDetails) Reset() { *m = StatusDetails{} } +func (*StatusDetails) ProtoMessage() {} +func (*StatusDetails) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{35} +} +func (m *StatusDetails) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatusDetails) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatusDetails) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatusDetails.Merge(m, src) +} +func (m *StatusDetails) XXX_Size() int { + return m.Size() +} +func (m *StatusDetails) XXX_DiscardUnknown() { + xxx_messageInfo_StatusDetails.DiscardUnknown(m) +} + +var xxx_messageInfo_StatusDetails proto.InternalMessageInfo + +func (m *TableOptions) Reset() { *m = TableOptions{} } +func (*TableOptions) ProtoMessage() {} +func (*TableOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{36} +} +func (m *TableOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TableOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TableOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_TableOptions.Merge(m, src) +} +func (m *TableOptions) XXX_Size() int { + return m.Size() +} +func (m *TableOptions) XXX_DiscardUnknown() { + xxx_messageInfo_TableOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_TableOptions proto.InternalMessageInfo + +func (m *Time) Reset() { *m = Time{} } +func (*Time) ProtoMessage() {} +func (*Time) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{37} +} +func (m *Time) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Time.Unmarshal(m, b) +} +func (m *Time) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Time.Marshal(b, m, deterministic) +} +func (m *Time) XXX_Merge(src proto.Message) { + xxx_messageInfo_Time.Merge(m, src) +} +func (m *Time) XXX_Size() int { + return xxx_messageInfo_Time.Size(m) +} +func (m *Time) XXX_DiscardUnknown() { + xxx_messageInfo_Time.DiscardUnknown(m) +} + +var xxx_messageInfo_Time proto.InternalMessageInfo + +func (m *Timestamp) Reset() { *m = Timestamp{} } +func (*Timestamp) ProtoMessage() {} +func (*Timestamp) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{38} +} +func (m *Timestamp) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Timestamp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Timestamp) XXX_Merge(src proto.Message) { + xxx_messageInfo_Timestamp.Merge(m, src) +} +func (m *Timestamp) XXX_Size() int { + return m.Size() +} +func (m *Timestamp) XXX_DiscardUnknown() { + xxx_messageInfo_Timestamp.DiscardUnknown(m) +} + +var xxx_messageInfo_Timestamp proto.InternalMessageInfo + +func (m *TypeMeta) Reset() { *m = TypeMeta{} } +func (*TypeMeta) ProtoMessage() {} +func (*TypeMeta) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{39} +} +func (m *TypeMeta) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TypeMeta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TypeMeta) XXX_Merge(src proto.Message) { + xxx_messageInfo_TypeMeta.Merge(m, src) +} +func (m *TypeMeta) XXX_Size() int { + return m.Size() +} +func (m *TypeMeta) XXX_DiscardUnknown() { + xxx_messageInfo_TypeMeta.DiscardUnknown(m) +} + +var xxx_messageInfo_TypeMeta proto.InternalMessageInfo + +func (m *UpdateOptions) Reset() { *m = UpdateOptions{} } +func (*UpdateOptions) ProtoMessage() {} +func (*UpdateOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{40} +} +func (m *UpdateOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UpdateOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *UpdateOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateOptions.Merge(m, src) +} +func (m *UpdateOptions) XXX_Size() int { + return m.Size() +} +func (m *UpdateOptions) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateOptions proto.InternalMessageInfo + +func (m *Verbs) Reset() { *m = Verbs{} } +func (*Verbs) ProtoMessage() {} +func (*Verbs) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{41} +} +func (m *Verbs) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Verbs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Verbs) XXX_Merge(src proto.Message) { + xxx_messageInfo_Verbs.Merge(m, src) +} +func (m *Verbs) XXX_Size() int { + return m.Size() +} +func (m *Verbs) XXX_DiscardUnknown() { + xxx_messageInfo_Verbs.DiscardUnknown(m) +} -func (m *UpdateOptions) Reset() { *m = UpdateOptions{} } -func (*UpdateOptions) ProtoMessage() {} -func (*UpdateOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{36} } +var xxx_messageInfo_Verbs proto.InternalMessageInfo -func (m *Verbs) Reset() { *m = Verbs{} } -func (*Verbs) ProtoMessage() {} -func (*Verbs) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{37} } +func (m *WatchEvent) Reset() { *m = WatchEvent{} } +func (*WatchEvent) ProtoMessage() {} +func (*WatchEvent) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{42} +} +func (m *WatchEvent) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WatchEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *WatchEvent) XXX_Merge(src proto.Message) { + xxx_messageInfo_WatchEvent.Merge(m, src) +} +func (m *WatchEvent) XXX_Size() int { + return m.Size() +} +func (m *WatchEvent) XXX_DiscardUnknown() { + xxx_messageInfo_WatchEvent.DiscardUnknown(m) +} -func (m *WatchEvent) Reset() { *m = WatchEvent{} } -func (*WatchEvent) ProtoMessage() {} -func (*WatchEvent) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{38} } +var xxx_messageInfo_WatchEvent proto.InternalMessageInfo func init() { proto.RegisterType((*APIGroup)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.APIGroup") @@ -267,6 +1253,7 @@ func init() { proto.RegisterType((*DeleteOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.DeleteOptions") proto.RegisterType((*Duration)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Duration") proto.RegisterType((*ExportOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ExportOptions") + proto.RegisterType((*FieldsV1)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.FieldsV1") proto.RegisterType((*GetOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GetOptions") proto.RegisterType((*GroupKind)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupKind") proto.RegisterType((*GroupResource)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupResource") @@ -274,23 +1261,29 @@ func init() { proto.RegisterType((*GroupVersionForDiscovery)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionForDiscovery") proto.RegisterType((*GroupVersionKind)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind") proto.RegisterType((*GroupVersionResource)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionResource") - proto.RegisterType((*Initializer)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Initializer") - proto.RegisterType((*Initializers)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Initializers") proto.RegisterType((*LabelSelector)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector.MatchLabelsEntry") proto.RegisterType((*LabelSelectorRequirement)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelectorRequirement") proto.RegisterType((*List)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.List") proto.RegisterType((*ListMeta)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta") proto.RegisterType((*ListOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ListOptions") + proto.RegisterType((*ManagedFieldsEntry)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ManagedFieldsEntry") proto.RegisterType((*MicroTime)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime") proto.RegisterType((*ObjectMeta)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta.AnnotationsEntry") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta.LabelsEntry") proto.RegisterType((*OwnerReference)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.OwnerReference") + proto.RegisterType((*PartialObjectMetadata)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.PartialObjectMetadata") + proto.RegisterType((*PartialObjectMetadataList)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.PartialObjectMetadataList") proto.RegisterType((*Patch)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Patch") + proto.RegisterType((*PatchOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.PatchOptions") proto.RegisterType((*Preconditions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Preconditions") proto.RegisterType((*RootPaths)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.RootPaths") proto.RegisterType((*ServerAddressByClientCIDR)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ServerAddressByClientCIDR") proto.RegisterType((*Status)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Status") proto.RegisterType((*StatusCause)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.StatusCause") proto.RegisterType((*StatusDetails)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.StatusDetails") + proto.RegisterType((*TableOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.TableOptions") proto.RegisterType((*Time)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Time") proto.RegisterType((*Timestamp)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Timestamp") proto.RegisterType((*TypeMeta)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.TypeMeta") @@ -298,10 +1291,189 @@ func init() { proto.RegisterType((*Verbs)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Verbs") proto.RegisterType((*WatchEvent)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.WatchEvent") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto", fileDescriptor_cf52fa777ced5367) +} + +var fileDescriptor_cf52fa777ced5367 = []byte{ + // 2713 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x19, 0xcd, 0x6f, 0x1b, 0x59, + 0x3d, 0x63, 0xc7, 0x8e, 0xfd, 0x73, 0x9c, 0x8f, 0x97, 0x16, 0xdc, 0x00, 0x71, 0x76, 0x16, 0xad, + 0x52, 0xe8, 0x3a, 0x9b, 0x02, 0xab, 0xd2, 0x65, 0x0b, 0x71, 0x9c, 0x74, 0xc3, 0x36, 0x4d, 0xf4, + 0xd2, 0x16, 0x28, 0x15, 0xea, 0x64, 0xe6, 0xc5, 0x19, 0x32, 0x9e, 0xf1, 0xbe, 0x19, 0x27, 0x35, + 0x1c, 0xd8, 0x03, 0x08, 0x90, 0x60, 0xd5, 0x23, 0xe2, 0x80, 0xb6, 0x82, 0xbf, 0x80, 0x13, 0x7f, + 0x00, 0x12, 0xbd, 0x20, 0xad, 0xc4, 0x65, 0x25, 0x90, 0xb5, 0x0d, 0x07, 0x8e, 0x88, 0x6b, 0x4e, + 0xe8, 0x7d, 0xcd, 0x87, 0x1d, 0x37, 0x63, 0xba, 0xac, 0xf6, 0xe6, 0xf9, 0x7d, 0xff, 0xde, 0xfb, + 0xbd, 0xdf, 0x97, 0x61, 0xeb, 0xf0, 0x9a, 0x5f, 0xb3, 0xbd, 0xe5, 0xc3, 0xce, 0x1e, 0xa1, 0x2e, + 0x09, 0x88, 0xbf, 0x7c, 0x44, 0x5c, 0xcb, 0xa3, 0xcb, 0x12, 0x61, 0xb4, 0xed, 0x96, 0x61, 0x1e, + 0xd8, 0x2e, 0xa1, 0xdd, 0xe5, 0xf6, 0x61, 0x93, 0x01, 0xfc, 0xe5, 0x16, 0x09, 0x8c, 0xe5, 0xa3, + 0x95, 0xe5, 0x26, 0x71, 0x09, 0x35, 0x02, 0x62, 0xd5, 0xda, 0xd4, 0x0b, 0x3c, 0xf4, 0x45, 0xc1, + 0x55, 0x8b, 0x73, 0xd5, 0xda, 0x87, 0x4d, 0x06, 0xf0, 0x6b, 0x8c, 0xab, 0x76, 0xb4, 0x32, 0xff, + 0x6a, 0xd3, 0x0e, 0x0e, 0x3a, 0x7b, 0x35, 0xd3, 0x6b, 0x2d, 0x37, 0xbd, 0xa6, 0xb7, 0xcc, 0x99, + 0xf7, 0x3a, 0xfb, 0xfc, 0x8b, 0x7f, 0xf0, 0x5f, 0x42, 0xe8, 0xfc, 0x50, 0x53, 0x68, 0xc7, 0x0d, + 0xec, 0x16, 0xe9, 0xb7, 0x62, 0xfe, 0xf5, 0xf3, 0x18, 0x7c, 0xf3, 0x80, 0xb4, 0x8c, 0x7e, 0x3e, + 0xfd, 0x2f, 0x59, 0x28, 0xac, 0xee, 0x6c, 0xde, 0xa4, 0x5e, 0xa7, 0x8d, 0x16, 0x61, 0xdc, 0x35, + 0x5a, 0xa4, 0xa2, 0x2d, 0x6a, 0x4b, 0xc5, 0xfa, 0xe4, 0xd3, 0x5e, 0x75, 0xec, 0xa4, 0x57, 0x1d, + 0xbf, 0x6d, 0xb4, 0x08, 0xe6, 0x18, 0xe4, 0x40, 0xe1, 0x88, 0x50, 0xdf, 0xf6, 0x5c, 0xbf, 0x92, + 0x59, 0xcc, 0x2e, 0x95, 0xae, 0xde, 0xa8, 0xa5, 0xf1, 0xbf, 0xc6, 0x15, 0xdc, 0x13, 0xac, 0x1b, + 0x1e, 0x6d, 0xd8, 0xbe, 0xe9, 0x1d, 0x11, 0xda, 0xad, 0xcf, 0x48, 0x2d, 0x05, 0x89, 0xf4, 0x71, + 0xa8, 0x01, 0xfd, 0x54, 0x83, 0x99, 0x36, 0x25, 0xfb, 0x84, 0x52, 0x62, 0x49, 0x7c, 0x25, 0xbb, + 0xa8, 0x7d, 0x0c, 0x6a, 0x2b, 0x52, 0xed, 0xcc, 0x4e, 0x9f, 0x7c, 0x3c, 0xa0, 0x11, 0xfd, 0x5e, + 0x83, 0x79, 0x9f, 0xd0, 0x23, 0x42, 0x57, 0x2d, 0x8b, 0x12, 0xdf, 0xaf, 0x77, 0xd7, 0x1c, 0x9b, + 0xb8, 0xc1, 0xda, 0x66, 0x03, 0xfb, 0x95, 0x71, 0x7e, 0x0e, 0xdf, 0x4c, 0x67, 0xd0, 0xee, 0x30, + 0x39, 0x75, 0x5d, 0x5a, 0x34, 0x3f, 0x94, 0xc4, 0xc7, 0xcf, 0x31, 0x43, 0xdf, 0x87, 0x49, 0x75, + 0x91, 0xb7, 0x6c, 0x3f, 0x40, 0xf7, 0x20, 0xdf, 0x64, 0x1f, 0x7e, 0x45, 0xe3, 0x06, 0xd6, 0xd2, + 0x19, 0xa8, 0x64, 0xd4, 0xa7, 0xa4, 0x3d, 0x79, 0xfe, 0xe9, 0x63, 0x29, 0x4d, 0xff, 0xe5, 0x38, + 0x94, 0x56, 0x77, 0x36, 0x31, 0xf1, 0xbd, 0x0e, 0x35, 0x49, 0x8a, 0xa0, 0xb9, 0x06, 0x93, 0xbe, + 0xed, 0x36, 0x3b, 0x8e, 0x41, 0x19, 0xb4, 0x92, 0xe7, 0x94, 0x17, 0x24, 0xe5, 0xe4, 0x6e, 0x0c, + 0x87, 0x13, 0x94, 0xe8, 0x2a, 0x00, 0x93, 0xe0, 0xb7, 0x0d, 0x93, 0x58, 0x95, 0xcc, 0xa2, 0xb6, + 0x54, 0xa8, 0x23, 0xc9, 0x07, 0xb7, 0x43, 0x0c, 0x8e, 0x51, 0xa1, 0x97, 0x21, 0xc7, 0x2d, 0xad, + 0x14, 0xb8, 0x9a, 0xb2, 0x24, 0xcf, 0x71, 0x37, 0xb0, 0xc0, 0xa1, 0xcb, 0x30, 0x21, 0xa3, 0xac, + 0x52, 0xe4, 0x64, 0xd3, 0x92, 0x6c, 0x42, 0x85, 0x81, 0xc2, 0x33, 0xff, 0x0e, 0x6d, 0xd7, 0xe2, + 0x71, 0x17, 0xf3, 0xef, 0x6d, 0xdb, 0xb5, 0x30, 0xc7, 0xa0, 0x5b, 0x90, 0x3b, 0x22, 0x74, 0x8f, + 0x45, 0x02, 0x0b, 0xcd, 0x2f, 0xa7, 0x3b, 0xe8, 0x7b, 0x8c, 0xa5, 0x5e, 0x64, 0xa6, 0xf1, 0x9f, + 0x58, 0x08, 0x41, 0x35, 0x00, 0xff, 0xc0, 0xa3, 0x01, 0x77, 0xaf, 0x92, 0x5b, 0xcc, 0x2e, 0x15, + 0xeb, 0x53, 0xcc, 0xdf, 0xdd, 0x10, 0x8a, 0x63, 0x14, 0x8c, 0xde, 0x34, 0x02, 0xd2, 0xf4, 0xa8, + 0x4d, 0xfc, 0xca, 0x44, 0x44, 0xbf, 0x16, 0x42, 0x71, 0x8c, 0x02, 0x7d, 0x1b, 0x90, 0x1f, 0x78, + 0xd4, 0x68, 0x12, 0xe9, 0xea, 0x5b, 0x86, 0x7f, 0x50, 0x01, 0xee, 0xdd, 0xbc, 0xf4, 0x0e, 0xed, + 0x0e, 0x50, 0xe0, 0x33, 0xb8, 0xf4, 0x3f, 0x6a, 0x30, 0x1d, 0x8b, 0x05, 0x1e, 0x77, 0xd7, 0x60, + 0xb2, 0x19, 0x7b, 0x75, 0x32, 0x2e, 0xc2, 0xdb, 0x8e, 0xbf, 0x48, 0x9c, 0xa0, 0x44, 0x04, 0x8a, + 0x54, 0x4a, 0x52, 0xd9, 0x65, 0x25, 0x75, 0xd0, 0x2a, 0x1b, 0x22, 0x4d, 0x31, 0xa0, 0x8f, 0x23, + 0xc9, 0xfa, 0xbf, 0x34, 0x1e, 0xc0, 0x2a, 0xdf, 0xa0, 0xa5, 0x58, 0x4e, 0xd3, 0xf8, 0xf1, 0x4d, + 0x0e, 0xc9, 0x47, 0xe7, 0x24, 0x82, 0xcc, 0xa7, 0x22, 0x11, 0x5c, 0x2f, 0xfc, 0xe6, 0xfd, 0xea, + 0xd8, 0xbb, 0xff, 0x58, 0x1c, 0xd3, 0x5b, 0x50, 0x5e, 0xa3, 0xc4, 0x08, 0xc8, 0x76, 0x3b, 0xe0, + 0x0e, 0xe8, 0x90, 0xb7, 0x68, 0x17, 0x77, 0x5c, 0xe9, 0x28, 0xb0, 0xf7, 0xdd, 0xe0, 0x10, 0x2c, + 0x31, 0xec, 0xfe, 0xf6, 0x6d, 0xe2, 0x58, 0x5b, 0x86, 0x6b, 0x34, 0x09, 0x95, 0x71, 0x1f, 0x9e, + 0xea, 0x46, 0x0c, 0x87, 0x13, 0x94, 0xfa, 0xcf, 0xb3, 0x50, 0x6e, 0x10, 0x87, 0x44, 0xfa, 0x36, + 0x00, 0x35, 0xa9, 0x61, 0x92, 0x1d, 0x42, 0x6d, 0xcf, 0xda, 0x25, 0xa6, 0xe7, 0x5a, 0x3e, 0x8f, + 0x88, 0x6c, 0xfd, 0x33, 0x2c, 0xce, 0x6e, 0x0e, 0x60, 0xf1, 0x19, 0x1c, 0xc8, 0x81, 0x72, 0x9b, + 0xf2, 0xdf, 0x76, 0x20, 0x6b, 0x0f, 0x7b, 0x69, 0x5f, 0x49, 0x77, 0xd4, 0x3b, 0x71, 0xd6, 0xfa, + 0xec, 0x49, 0xaf, 0x5a, 0x4e, 0x80, 0x70, 0x52, 0x38, 0xfa, 0x16, 0xcc, 0x78, 0xb4, 0x7d, 0x60, + 0xb8, 0x0d, 0xd2, 0x26, 0xae, 0x45, 0xdc, 0xc0, 0xe7, 0xa7, 0x50, 0xa8, 0x5f, 0x60, 0x15, 0x63, + 0xbb, 0x0f, 0x87, 0x07, 0xa8, 0xd1, 0x7d, 0x98, 0x6d, 0x53, 0xaf, 0x6d, 0x34, 0x0d, 0x26, 0x71, + 0xc7, 0x73, 0x6c, 0xb3, 0xcb, 0xb3, 0x43, 0xb1, 0x7e, 0xe5, 0xa4, 0x57, 0x9d, 0xdd, 0xe9, 0x47, + 0x9e, 0xf6, 0xaa, 0x73, 0xfc, 0xe8, 0x18, 0x24, 0x42, 0xe2, 0x41, 0x31, 0xb1, 0x3b, 0xcc, 0x0d, + 0xbb, 0x43, 0x7d, 0x13, 0x0a, 0x8d, 0x0e, 0xe5, 0x5c, 0xe8, 0x4d, 0x28, 0x58, 0xf2, 0xb7, 0x3c, + 0xf9, 0x97, 0x54, 0xc9, 0x55, 0x34, 0xa7, 0xbd, 0x6a, 0x99, 0x35, 0x09, 0x35, 0x05, 0xc0, 0x21, + 0x8b, 0xfe, 0x00, 0xca, 0xeb, 0x8f, 0xda, 0x1e, 0x0d, 0xd4, 0x9d, 0xbe, 0x02, 0x79, 0xc2, 0x01, + 0x5c, 0x5a, 0x21, 0xaa, 0x13, 0x82, 0x0c, 0x4b, 0x2c, 0xcb, 0xc3, 0xe4, 0x91, 0x61, 0x06, 0x32, + 0x6d, 0x87, 0x79, 0x78, 0x9d, 0x01, 0xb1, 0xc0, 0xe9, 0x9f, 0x87, 0x02, 0x0f, 0x28, 0xff, 0xde, + 0x0a, 0x9a, 0x81, 0x2c, 0x36, 0x8e, 0xb9, 0xd4, 0x49, 0x9c, 0xa5, 0xc6, 0xb1, 0xbe, 0x0d, 0x70, + 0x93, 0x84, 0x8a, 0x57, 0x61, 0x5a, 0x3d, 0xe2, 0x64, 0x6e, 0xf9, 0xac, 0x14, 0x3d, 0x8d, 0x93, + 0x68, 0xdc, 0x4f, 0xaf, 0x3f, 0x80, 0x22, 0xcf, 0x3f, 0x2c, 0x79, 0x47, 0x85, 0x42, 0x7b, 0x4e, + 0xa1, 0x50, 0xd9, 0x3f, 0x33, 0x2c, 0xfb, 0xc7, 0x9e, 0x9b, 0x03, 0x65, 0xc1, 0xab, 0x4a, 0x63, + 0x2a, 0x0d, 0x57, 0xa0, 0xa0, 0xcc, 0x94, 0x5a, 0xc2, 0x96, 0x48, 0x09, 0xc2, 0x21, 0x45, 0x4c, + 0xdb, 0x01, 0x24, 0x72, 0x69, 0x3a, 0x65, 0xb1, 0xba, 0x97, 0x79, 0x7e, 0xdd, 0x8b, 0x69, 0xfa, + 0x09, 0x54, 0x86, 0xf5, 0x51, 0x2f, 0x90, 0xed, 0xd3, 0x9b, 0xa2, 0xbf, 0xa7, 0xc1, 0x4c, 0x5c, + 0x52, 0xfa, 0xeb, 0x4b, 0xaf, 0xe4, 0xfc, 0x3a, 0x1f, 0x3b, 0x91, 0xdf, 0x69, 0x70, 0x21, 0xe1, + 0xda, 0x48, 0x37, 0x3e, 0x82, 0x51, 0xf1, 0xe0, 0xc8, 0x8e, 0x10, 0x1c, 0x7f, 0xcb, 0x40, 0xf9, + 0x96, 0xb1, 0x47, 0x9c, 0x5d, 0xe2, 0x10, 0x33, 0xf0, 0x28, 0xfa, 0x31, 0x94, 0x5a, 0x46, 0x60, + 0x1e, 0x70, 0xa8, 0xea, 0x09, 0x1b, 0xe9, 0x12, 0x68, 0x42, 0x52, 0x6d, 0x2b, 0x12, 0xb3, 0xee, + 0x06, 0xb4, 0x5b, 0x9f, 0x93, 0x26, 0x95, 0x62, 0x18, 0x1c, 0xd7, 0xc6, 0x1b, 0x79, 0xfe, 0xbd, + 0xfe, 0xa8, 0xcd, 0x0a, 0xd6, 0xe8, 0xf3, 0x43, 0xc2, 0x04, 0x4c, 0xde, 0xe9, 0xd8, 0x94, 0xb4, + 0x88, 0x1b, 0x44, 0x8d, 0xfc, 0x56, 0x9f, 0x7c, 0x3c, 0xa0, 0x71, 0xfe, 0x06, 0xcc, 0xf4, 0x1b, + 0xcf, 0xb2, 0xce, 0x21, 0xe9, 0x8a, 0xfb, 0xc2, 0xec, 0x27, 0xba, 0x00, 0xb9, 0x23, 0xc3, 0xe9, + 0xc8, 0xd7, 0x88, 0xc5, 0xc7, 0xf5, 0xcc, 0x35, 0x4d, 0xff, 0x83, 0x06, 0x95, 0x61, 0x86, 0xa0, + 0x2f, 0xc4, 0x04, 0xd5, 0x4b, 0xd2, 0xaa, 0xec, 0xdb, 0xa4, 0x2b, 0xa4, 0xae, 0x43, 0xc1, 0x6b, + 0xb3, 0xd1, 0xcb, 0xa3, 0xf2, 0xd6, 0x2f, 0xab, 0x9b, 0xdc, 0x96, 0xf0, 0xd3, 0x5e, 0xf5, 0x62, + 0x42, 0xbc, 0x42, 0xe0, 0x90, 0x95, 0x65, 0x7f, 0x6e, 0x0f, 0xab, 0x48, 0x61, 0xf6, 0xbf, 0xc7, + 0x21, 0x58, 0x62, 0xf4, 0x3f, 0x69, 0x30, 0xce, 0x5b, 0xb1, 0x07, 0x50, 0x60, 0xe7, 0x67, 0x19, + 0x81, 0xc1, 0xed, 0x4a, 0x3d, 0x04, 0x30, 0xee, 0x2d, 0x12, 0x18, 0x51, 0xb4, 0x29, 0x08, 0x0e, + 0x25, 0x22, 0x0c, 0x39, 0x3b, 0x20, 0x2d, 0x75, 0x91, 0xaf, 0x0e, 0x15, 0x2d, 0x47, 0xd0, 0x1a, + 0x36, 0x8e, 0xd7, 0x1f, 0x05, 0xc4, 0x65, 0x97, 0x11, 0x3d, 0x8d, 0x4d, 0x26, 0x03, 0x0b, 0x51, + 0xfa, 0x7f, 0x34, 0x08, 0x55, 0xb1, 0xe0, 0xf7, 0x89, 0xb3, 0x7f, 0xcb, 0x76, 0x0f, 0xe5, 0xb1, + 0x86, 0xe6, 0xec, 0x4a, 0x38, 0x0e, 0x29, 0xce, 0x2a, 0x0f, 0x99, 0xd1, 0xca, 0x03, 0x53, 0x68, + 0x7a, 0x6e, 0x60, 0xbb, 0x9d, 0x81, 0xd7, 0xb6, 0x26, 0xe1, 0x38, 0xa4, 0x60, 0xcd, 0x0d, 0x25, + 0x2d, 0xc3, 0x76, 0x6d, 0xb7, 0xc9, 0x9c, 0x58, 0xf3, 0x3a, 0x6e, 0xc0, 0xab, 0xbc, 0x6c, 0x6e, + 0xf0, 0x00, 0x16, 0x9f, 0xc1, 0xa1, 0xff, 0x35, 0x0b, 0x25, 0xe6, 0xb3, 0xaa, 0x73, 0x6f, 0x40, + 0xd9, 0x89, 0x47, 0x81, 0xf4, 0xfd, 0xa2, 0x34, 0x25, 0xf9, 0xae, 0x71, 0x92, 0x96, 0x31, 0xf3, + 0x9e, 0x2c, 0x64, 0xce, 0x24, 0x99, 0x37, 0xe2, 0x48, 0x9c, 0xa4, 0x65, 0xd9, 0xeb, 0x98, 0xbd, + 0x0f, 0xd9, 0xed, 0x84, 0x57, 0xf4, 0x1d, 0x06, 0xc4, 0x02, 0x87, 0xb6, 0x60, 0xce, 0x70, 0x1c, + 0xef, 0x98, 0x03, 0xeb, 0x9e, 0x77, 0xd8, 0x32, 0xe8, 0xa1, 0xcf, 0xc7, 0xa8, 0x42, 0xfd, 0x73, + 0x92, 0x65, 0x6e, 0x75, 0x90, 0x04, 0x9f, 0xc5, 0x77, 0xd6, 0xb5, 0x8d, 0x8f, 0x78, 0x6d, 0xd7, + 0x61, 0x8a, 0xc5, 0x97, 0xd7, 0x09, 0x54, 0x87, 0x99, 0xe3, 0x97, 0x80, 0x4e, 0x7a, 0xd5, 0xa9, + 0x3b, 0x09, 0x0c, 0xee, 0xa3, 0x64, 0x2e, 0x3b, 0x76, 0xcb, 0x0e, 0x2a, 0x13, 0x9c, 0x25, 0x74, + 0xf9, 0x16, 0x03, 0x62, 0x81, 0x4b, 0xc4, 0x45, 0xe1, 0xbc, 0xb8, 0xd0, 0x7f, 0x9b, 0x05, 0x24, + 0x5a, 0x62, 0x4b, 0xf4, 0x36, 0x22, 0xd1, 0x5c, 0x86, 0x89, 0x96, 0x6c, 0xa9, 0xb5, 0x64, 0xd6, + 0x57, 0xdd, 0xb4, 0xc2, 0xa3, 0x2d, 0x28, 0x8a, 0x07, 0x1f, 0x05, 0xf1, 0xb2, 0x24, 0x2e, 0x6e, + 0x2b, 0xc4, 0x69, 0xaf, 0x3a, 0x9f, 0x50, 0x13, 0x62, 0xee, 0x74, 0xdb, 0x04, 0x47, 0x12, 0xd8, + 0x14, 0x6d, 0xb4, 0xed, 0xf8, 0xfe, 0xa4, 0x18, 0x4d, 0xd1, 0xd1, 0x24, 0x84, 0x63, 0x54, 0xe8, + 0x2d, 0x18, 0x67, 0x27, 0x25, 0x47, 0xda, 0x2f, 0xa5, 0x4b, 0x1b, 0xec, 0xac, 0xeb, 0x05, 0x56, + 0x35, 0xd9, 0x2f, 0xcc, 0x25, 0x30, 0xed, 0x3c, 0xca, 0x7c, 0x66, 0x96, 0x9c, 0xfd, 0x43, 0xed, + 0x1b, 0x21, 0x06, 0xc7, 0xa8, 0xd0, 0x77, 0xa1, 0xb0, 0x2f, 0xdb, 0x42, 0x7e, 0x31, 0xa9, 0x13, + 0x97, 0x6a, 0x26, 0xc5, 0x08, 0xa7, 0xbe, 0x70, 0x28, 0x4d, 0x7f, 0x07, 0x8a, 0x5b, 0xb6, 0x49, + 0x3d, 0x66, 0x20, 0xbb, 0x12, 0x3f, 0x31, 0x93, 0x84, 0x57, 0xa2, 0xc2, 0x45, 0xe1, 0x59, 0x9c, + 0xb8, 0x86, 0xeb, 0x89, 0xc9, 0x23, 0x17, 0xc5, 0xc9, 0x6d, 0x06, 0xc4, 0x02, 0x77, 0xfd, 0x02, + 0xab, 0xbf, 0xbf, 0x78, 0x52, 0x1d, 0x7b, 0xfc, 0xa4, 0x3a, 0xf6, 0xfe, 0x13, 0x59, 0x8b, 0x4f, + 0x01, 0x60, 0x7b, 0xef, 0x87, 0xc4, 0x14, 0x59, 0x2d, 0xd5, 0xbe, 0x44, 0xad, 0xe9, 0xf8, 0xbe, + 0x24, 0xd3, 0xd7, 0x53, 0xc5, 0x70, 0x38, 0x41, 0x89, 0x96, 0xa1, 0x18, 0x6e, 0x42, 0xe4, 0x45, + 0xcf, 0xaa, 0xc0, 0x09, 0xd7, 0x25, 0x38, 0xa2, 0x49, 0xa4, 0xd8, 0xf1, 0x73, 0x53, 0x6c, 0x1d, + 0xb2, 0x1d, 0xdb, 0xe2, 0xaf, 0xab, 0x58, 0x7f, 0x4d, 0x95, 0xb8, 0xbb, 0x9b, 0x8d, 0xd3, 0x5e, + 0xf5, 0xa5, 0x61, 0x0b, 0xc8, 0xa0, 0xdb, 0x26, 0x7e, 0xed, 0xee, 0x66, 0x03, 0x33, 0xe6, 0xb3, + 0xde, 0x7b, 0x7e, 0xc4, 0xf7, 0x7e, 0x15, 0x40, 0x7a, 0xcd, 0xb8, 0xc5, 0xc3, 0x0d, 0x23, 0xea, + 0x66, 0x88, 0xc1, 0x31, 0x2a, 0xe4, 0xc3, 0xac, 0xc9, 0x46, 0x61, 0xf6, 0x3c, 0xec, 0x16, 0xf1, + 0x03, 0xa3, 0x25, 0x36, 0x44, 0xa3, 0x05, 0xf7, 0x25, 0xa9, 0x66, 0x76, 0xad, 0x5f, 0x18, 0x1e, + 0x94, 0x8f, 0x3c, 0x98, 0xb5, 0xe4, 0x50, 0x17, 0x29, 0x2d, 0x8e, 0xac, 0xf4, 0x22, 0x53, 0xd8, + 0xe8, 0x17, 0x84, 0x07, 0x65, 0xa3, 0x1f, 0xc0, 0xbc, 0x02, 0x0e, 0x4e, 0xd6, 0x7c, 0xc7, 0x93, + 0xad, 0x2f, 0x9c, 0xf4, 0xaa, 0xf3, 0x8d, 0xa1, 0x54, 0xf8, 0x39, 0x12, 0x90, 0x05, 0x79, 0x47, + 0xf4, 0x8f, 0x25, 0x5e, 0xf3, 0xbf, 0x91, 0xce, 0x8b, 0x28, 0xfa, 0x6b, 0xf1, 0xbe, 0x31, 0x9c, + 0x1c, 0x65, 0xcb, 0x28, 0x65, 0xa3, 0x47, 0x50, 0x32, 0x5c, 0xd7, 0x0b, 0x0c, 0x31, 0xeb, 0x4f, + 0x72, 0x55, 0xab, 0x23, 0xab, 0x5a, 0x8d, 0x64, 0xf4, 0xf5, 0xa9, 0x31, 0x0c, 0x8e, 0xab, 0x42, + 0xc7, 0x30, 0xed, 0x1d, 0xbb, 0x84, 0x62, 0xb2, 0x4f, 0x28, 0x71, 0x4d, 0xe2, 0x57, 0xca, 0x5c, + 0xfb, 0x57, 0x53, 0x6a, 0x4f, 0x30, 0x47, 0x21, 0x9d, 0x84, 0xfb, 0xb8, 0x5f, 0x0b, 0xaa, 0xb1, + 0x24, 0xe9, 0x1a, 0x8e, 0xfd, 0x23, 0x42, 0xfd, 0xca, 0x54, 0xb4, 0xc4, 0xdb, 0x08, 0xa1, 0x38, + 0x46, 0x81, 0xbe, 0x06, 0x25, 0xd3, 0xe9, 0xf8, 0x01, 0x11, 0x1b, 0xd5, 0x69, 0xfe, 0x82, 0x42, + 0xff, 0xd6, 0x22, 0x14, 0x8e, 0xd3, 0xa1, 0x0e, 0x94, 0x5b, 0xf1, 0x92, 0x51, 0x99, 0xe5, 0xde, + 0x5d, 0x4b, 0xe7, 0xdd, 0x60, 0x51, 0x8b, 0xfa, 0x8a, 0x04, 0x0e, 0x27, 0xb5, 0xcc, 0x7f, 0x1d, + 0x4a, 0xff, 0x63, 0xcb, 0xcd, 0x5a, 0xf6, 0xfe, 0x7b, 0x1c, 0xa9, 0x65, 0xff, 0x73, 0x06, 0xa6, + 0x92, 0xa7, 0xdf, 0x57, 0x0e, 0x73, 0xa9, 0xca, 0xa1, 0x1a, 0x0e, 0xb5, 0xa1, 0x4b, 0x60, 0x95, + 0xd6, 0xb3, 0x43, 0xd3, 0xba, 0xcc, 0x9e, 0xe3, 0x2f, 0x92, 0x3d, 0x6b, 0x00, 0xac, 0xcf, 0xa0, + 0x9e, 0xe3, 0x10, 0xca, 0x13, 0x67, 0x41, 0x2e, 0x7b, 0x43, 0x28, 0x8e, 0x51, 0xb0, 0x1e, 0x75, + 0xcf, 0xf1, 0xcc, 0x43, 0x7e, 0x04, 0xea, 0xd1, 0xf3, 0x94, 0x59, 0x10, 0x3d, 0x6a, 0x7d, 0x00, + 0x8b, 0xcf, 0xe0, 0xd0, 0xbb, 0x70, 0x71, 0xc7, 0xa0, 0x81, 0x6d, 0x38, 0xd1, 0x03, 0xe3, 0x43, + 0xc0, 0xc3, 0x81, 0x11, 0xe3, 0xb5, 0x51, 0x1f, 0x6a, 0x74, 0xf8, 0x11, 0x2c, 0x1a, 0x33, 0xf4, + 0xbf, 0x6b, 0x70, 0xe9, 0x4c, 0xdd, 0x9f, 0xc0, 0x88, 0xf3, 0x30, 0x39, 0xe2, 0xbc, 0x91, 0x72, + 0xdf, 0x78, 0x96, 0xb5, 0x43, 0x06, 0x9e, 0x09, 0xc8, 0xed, 0xb0, 0x86, 0x58, 0xff, 0xb5, 0x06, + 0x93, 0xfc, 0xd7, 0x28, 0xbb, 0xda, 0x2a, 0xe4, 0xf6, 0x3d, 0xb5, 0x38, 0x2a, 0x88, 0x3f, 0x13, + 0x36, 0x18, 0x00, 0x0b, 0xf8, 0x0b, 0x2c, 0x73, 0xdf, 0xd3, 0x20, 0xb9, 0x25, 0x45, 0x37, 0x44, + 0xfc, 0x6a, 0xe1, 0x1a, 0x73, 0xc4, 0xd8, 0x7d, 0x73, 0xd8, 0x80, 0x36, 0x97, 0x6a, 0x77, 0x77, + 0x05, 0x8a, 0xd8, 0xf3, 0x82, 0x1d, 0x23, 0x38, 0xf0, 0x99, 0xe3, 0x6d, 0xf6, 0x43, 0x9e, 0x0d, + 0x77, 0x9c, 0x63, 0xb0, 0x80, 0xeb, 0xbf, 0xd2, 0xe0, 0xd2, 0xd0, 0xfd, 0x39, 0x4b, 0x01, 0x66, + 0xf8, 0x25, 0x3d, 0x0a, 0xa3, 0x30, 0xa2, 0xc3, 0x31, 0x2a, 0x36, 0x59, 0x25, 0x96, 0xee, 0xfd, + 0x93, 0x55, 0x42, 0x1b, 0x4e, 0xd2, 0xea, 0xff, 0xce, 0x40, 0x7e, 0x37, 0x30, 0x82, 0x8e, 0xff, + 0x7f, 0x8e, 0xd8, 0x57, 0x20, 0xef, 0x73, 0x3d, 0xd2, 0xbc, 0xb0, 0xc6, 0x0a, 0xed, 0x58, 0x62, + 0xf9, 0x34, 0x42, 0x7c, 0xdf, 0x68, 0xaa, 0x8c, 0x15, 0x4d, 0x23, 0x02, 0x8c, 0x15, 0x1e, 0xbd, + 0x0e, 0x79, 0x4a, 0x0c, 0x3f, 0x1c, 0xcc, 0x16, 0x94, 0x48, 0xcc, 0xa1, 0xa7, 0xbd, 0xea, 0xa4, + 0x14, 0xce, 0xbf, 0xb1, 0xa4, 0x46, 0xf7, 0x61, 0xc2, 0x22, 0x81, 0x61, 0x3b, 0x62, 0x1e, 0x4b, + 0xbd, 0xae, 0x17, 0xc2, 0x1a, 0x82, 0xb5, 0x5e, 0x62, 0x36, 0xc9, 0x0f, 0xac, 0x04, 0xb2, 0x6c, + 0x6b, 0x7a, 0x96, 0x18, 0x27, 0x72, 0x51, 0xb6, 0x5d, 0xf3, 0x2c, 0x82, 0x39, 0x46, 0x7f, 0xac, + 0x41, 0x49, 0x48, 0x5a, 0x33, 0x3a, 0x3e, 0x41, 0x2b, 0xa1, 0x17, 0xe2, 0xba, 0x55, 0x27, 0x37, + 0xce, 0x06, 0x8e, 0xd3, 0x5e, 0xb5, 0xc8, 0xc9, 0xf8, 0x24, 0xa2, 0x1c, 0x88, 0x9d, 0x51, 0xe6, + 0x9c, 0x33, 0x7a, 0x19, 0x72, 0xfc, 0xf5, 0xc8, 0xc3, 0x0c, 0xdf, 0x3a, 0x7f, 0x60, 0x58, 0xe0, + 0xf4, 0x8f, 0x32, 0x50, 0x4e, 0x38, 0x97, 0x62, 0x16, 0x08, 0x17, 0x8a, 0x99, 0x14, 0x4b, 0xea, + 0xe1, 0x7f, 0x51, 0xca, 0xda, 0x93, 0x7f, 0x91, 0xda, 0xf3, 0x3d, 0xc8, 0x9b, 0xec, 0x8c, 0xd4, + 0x3f, 0xde, 0x2b, 0xa3, 0x5c, 0x27, 0x3f, 0xdd, 0x28, 0x1a, 0xf9, 0xa7, 0x8f, 0xa5, 0x40, 0x74, + 0x13, 0x66, 0x29, 0x09, 0x68, 0x77, 0x75, 0x3f, 0x20, 0x34, 0x3e, 0xc4, 0xe7, 0xa2, 0x8e, 0x1b, + 0xf7, 0x13, 0xe0, 0x41, 0x1e, 0x7d, 0x0f, 0x26, 0xef, 0x18, 0x7b, 0x4e, 0xf8, 0x07, 0x14, 0x86, + 0xb2, 0xed, 0x9a, 0x4e, 0xc7, 0x22, 0x22, 0x1b, 0xab, 0xec, 0xa5, 0x1e, 0xed, 0x66, 0x1c, 0x79, + 0xda, 0xab, 0xce, 0x25, 0x00, 0xe2, 0x1f, 0x17, 0x9c, 0x14, 0xa1, 0x3b, 0x30, 0xfe, 0x09, 0x4e, + 0x8f, 0xdf, 0x87, 0x62, 0xd4, 0xdf, 0x7f, 0xcc, 0x2a, 0xf5, 0x87, 0x50, 0x60, 0x11, 0xaf, 0xe6, + 0xd2, 0x73, 0x5a, 0x9c, 0x64, 0xe3, 0x94, 0x49, 0xd3, 0x38, 0xe9, 0x2d, 0x28, 0xdf, 0x6d, 0x5b, + 0x2f, 0xf8, 0x17, 0x64, 0x26, 0x75, 0xd5, 0xba, 0x0a, 0xe2, 0xcf, 0x74, 0x56, 0x20, 0x44, 0xe5, + 0x8e, 0x15, 0x88, 0x78, 0xe1, 0x8d, 0xed, 0xca, 0x7f, 0xa6, 0x01, 0xf0, 0xa5, 0xd4, 0xfa, 0x11, + 0x71, 0x03, 0x76, 0x0e, 0x2c, 0xf0, 0xfb, 0xcf, 0x81, 0x67, 0x06, 0x8e, 0x41, 0x77, 0x21, 0xef, + 0x89, 0x68, 0x12, 0x7f, 0x43, 0x8e, 0xb8, 0xf9, 0x0c, 0x1f, 0x81, 0x88, 0x27, 0x2c, 0x85, 0xd5, + 0x97, 0x9e, 0x3e, 0x5b, 0x18, 0xfb, 0xe0, 0xd9, 0xc2, 0xd8, 0x87, 0xcf, 0x16, 0xc6, 0xde, 0x3d, + 0x59, 0xd0, 0x9e, 0x9e, 0x2c, 0x68, 0x1f, 0x9c, 0x2c, 0x68, 0x1f, 0x9e, 0x2c, 0x68, 0x1f, 0x9d, + 0x2c, 0x68, 0x8f, 0xff, 0xb9, 0x30, 0x76, 0x3f, 0x73, 0xb4, 0xf2, 0xdf, 0x00, 0x00, 0x00, 0xff, + 0xff, 0x61, 0xb7, 0xc5, 0x7c, 0xc2, 0x24, 0x00, 0x00, +} + func (m *APIGroup) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -309,53 +1481,65 @@ func (m *APIGroup) Marshal() (dAtA []byte, err error) { } func (m *APIGroup) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *APIGroup) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - if len(m.Versions) > 0 { - for _, msg := range m.Versions { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.ServerAddressByClientCIDRs) > 0 { + for iNdEx := len(m.ServerAddressByClientCIDRs) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ServerAddressByClientCIDRs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x22 } } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PreferredVersion.Size())) - n1, err := m.PreferredVersion.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.PreferredVersion.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n1 - if len(m.ServerAddressByClientCIDRs) > 0 { - for _, msg := range m.ServerAddressByClientCIDRs { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i-- + dAtA[i] = 0x1a + if len(m.Versions) > 0 { + for iNdEx := len(m.Versions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Versions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *APIGroupList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -363,29 +1547,36 @@ func (m *APIGroupList) Marshal() (dAtA []byte, err error) { } func (m *APIGroupList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *APIGroupList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if len(m.Groups) > 0 { - for _, msg := range m.Groups { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Groups) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Groups[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } func (m *APIResource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -393,85 +1584,90 @@ func (m *APIResource) Marshal() (dAtA []byte, err error) { } func (m *APIResource) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *APIResource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x10 - i++ - if m.Namespaced { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) - if m.Verbs != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Verbs.Size())) - n2, err := m.Verbs.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i -= len(m.StorageVersionHash) + copy(dAtA[i:], m.StorageVersionHash) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.StorageVersionHash))) + i-- + dAtA[i] = 0x52 + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0x4a + i -= len(m.Group) + copy(dAtA[i:], m.Group) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i-- + dAtA[i] = 0x42 + if len(m.Categories) > 0 { + for iNdEx := len(m.Categories) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Categories[iNdEx]) + copy(dAtA[i:], m.Categories[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Categories[iNdEx]))) + i-- + dAtA[i] = 0x3a } - i += n2 } + i -= len(m.SingularName) + copy(dAtA[i:], m.SingularName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SingularName))) + i-- + dAtA[i] = 0x32 if len(m.ShortNames) > 0 { - for _, s := range m.ShortNames { + for iNdEx := len(m.ShortNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ShortNames[iNdEx]) + copy(dAtA[i:], m.ShortNames[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ShortNames[iNdEx]))) + i-- dAtA[i] = 0x2a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SingularName))) - i += copy(dAtA[i:], m.SingularName) - if len(m.Categories) > 0 { - for _, s := range m.Categories { - dAtA[i] = 0x3a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ + if m.Verbs != nil { + { + size, err := m.Verbs.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x22 } - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) - i += copy(dAtA[i:], m.Group) - dAtA[i] = 0x4a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) - i += copy(dAtA[i:], m.Version) - return i, nil + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0x1a + i-- + if m.Namespaced { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *APIResourceList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -479,33 +1675,41 @@ func (m *APIResourceList) Marshal() (dAtA []byte, err error) { } func (m *APIResourceList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *APIResourceList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.GroupVersion))) - i += copy(dAtA[i:], m.GroupVersion) if len(m.APIResources) > 0 { - for _, msg := range m.APIResources { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.APIResources) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.APIResources[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + i -= len(m.GroupVersion) + copy(dAtA[i:], m.GroupVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.GroupVersion))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *APIVersions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -513,44 +1717,45 @@ func (m *APIVersions) Marshal() (dAtA []byte, err error) { } func (m *APIVersions) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *APIVersions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Versions) > 0 { - for _, s := range m.Versions { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ + if len(m.ServerAddressByClientCIDRs) > 0 { + for iNdEx := len(m.ServerAddressByClientCIDRs) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ServerAddressByClientCIDRs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i-- + dAtA[i] = 0x12 } } - if len(m.ServerAddressByClientCIDRs) > 0 { - for _, msg := range m.ServerAddressByClientCIDRs { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + if len(m.Versions) > 0 { + for iNdEx := len(m.Versions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Versions[iNdEx]) + copy(dAtA[i:], m.Versions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Versions[iNdEx]))) + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } func (m *CreateOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -558,40 +1763,36 @@ func (m *CreateOptions) Marshal() (dAtA []byte, err error) { } func (m *CreateOptions) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CreateOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + i -= len(m.FieldManager) + copy(dAtA[i:], m.FieldManager) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldManager))) + i-- + dAtA[i] = 0x1a if len(m.DryRun) > 0 { - for _, s := range m.DryRun { + for iNdEx := len(m.DryRun) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.DryRun[iNdEx]) + copy(dAtA[i:], m.DryRun[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DryRun[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - dAtA[i] = 0x10 - i++ - if m.IncludeUninitialized { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - return i, nil + return len(dAtA) - i, nil } func (m *DeleteOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -599,63 +1800,65 @@ func (m *DeleteOptions) Marshal() (dAtA []byte, err error) { } func (m *DeleteOptions) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeleteOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.GracePeriodSeconds != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.GracePeriodSeconds)) - } - if m.Preconditions != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Preconditions.Size())) - n3, err := m.Preconditions.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.DryRun) > 0 { + for iNdEx := len(m.DryRun) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.DryRun[iNdEx]) + copy(dAtA[i:], m.DryRun[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DryRun[iNdEx]))) + i-- + dAtA[i] = 0x2a } - i += n3 + } + if m.PropagationPolicy != nil { + i -= len(*m.PropagationPolicy) + copy(dAtA[i:], *m.PropagationPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PropagationPolicy))) + i-- + dAtA[i] = 0x22 } if m.OrphanDependents != nil { - dAtA[i] = 0x18 - i++ + i-- if *m.OrphanDependents { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - } - if m.PropagationPolicy != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PropagationPolicy))) - i += copy(dAtA[i:], *m.PropagationPolicy) + i-- + dAtA[i] = 0x18 } - if len(m.DryRun) > 0 { - for _, s := range m.DryRun { - dAtA[i] = 0x2a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ + if m.Preconditions != nil { + { + size, err := m.Preconditions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 } - return i, nil + if m.GracePeriodSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.GracePeriodSeconds)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil } func (m *Duration) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -663,20 +1866,25 @@ func (m *Duration) Marshal() (dAtA []byte, err error) { } func (m *Duration) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Duration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Duration)) - return i, nil + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *ExportOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -684,33 +1892,68 @@ func (m *ExportOptions) Marshal() (dAtA []byte, err error) { } func (m *ExportOptions) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExportOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - if m.Export { + i-- + if m.Exact { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ + i-- dAtA[i] = 0x10 - i++ - if m.Exact { + i-- + if m.Export { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - return i, nil + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *FieldsV1) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FieldsV1) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FieldsV1) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Raw != nil { + i -= len(m.Raw) + copy(dAtA[i:], m.Raw) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Raw))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } func (m *GetOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -718,725 +1961,987 @@ func (m *GetOptions) Marshal() (dAtA []byte, err error) { } func (m *GetOptions) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GetOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.ResourceVersion) + copy(dAtA[i:], m.ResourceVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *GroupKind) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GroupKind) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GroupKind) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0x12 + i -= len(m.Group) + copy(dAtA[i:], m.Group) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *GroupResource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GroupResource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GroupResource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Resource) + copy(dAtA[i:], m.Resource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) + i-- + dAtA[i] = 0x12 + i -= len(m.Group) + copy(dAtA[i:], m.Group) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *GroupVersion) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GroupVersion) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GroupVersion) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0x12 + i -= len(m.Group) + copy(dAtA[i:], m.Group) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *GroupVersionForDiscovery) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GroupVersionForDiscovery) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GroupVersionForDiscovery) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0x12 + i -= len(m.GroupVersion) + copy(dAtA[i:], m.GroupVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.GroupVersion))) + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) - i += copy(dAtA[i:], m.ResourceVersion) - dAtA[i] = 0x10 - i++ - if m.IncludeUninitialized { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - return i, nil + return len(dAtA) - i, nil } -func (m *GroupKind) Marshal() (dAtA []byte, err error) { +func (m *GroupVersionKind) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *GroupKind) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *GroupVersionKind) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GroupVersionKind) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) - i += copy(dAtA[i:], m.Group) - dAtA[i] = 0x12 - i++ + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) - return i, nil + i-- + dAtA[i] = 0x1a + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0x12 + i -= len(m.Group) + copy(dAtA[i:], m.Group) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *GroupResource) Marshal() (dAtA []byte, err error) { +func (m *GroupVersionResource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *GroupResource) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *GroupVersionResource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GroupVersionResource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) - i += copy(dAtA[i:], m.Group) - dAtA[i] = 0x12 - i++ + i -= len(m.Resource) + copy(dAtA[i:], m.Resource) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) - i += copy(dAtA[i:], m.Resource) - return i, nil + i-- + dAtA[i] = 0x1a + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0x12 + i -= len(m.Group) + copy(dAtA[i:], m.Group) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *GroupVersion) Marshal() (dAtA []byte, err error) { +func (m *LabelSelector) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *GroupVersion) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *LabelSelector) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LabelSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) - i += copy(dAtA[i:], m.Group) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) - i += copy(dAtA[i:], m.Version) - return i, nil + if len(m.MatchExpressions) > 0 { + for iNdEx := len(m.MatchExpressions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.MatchExpressions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.MatchLabels) > 0 { + keysForMatchLabels := make([]string, 0, len(m.MatchLabels)) + for k := range m.MatchLabels { + keysForMatchLabels = append(keysForMatchLabels, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMatchLabels) + for iNdEx := len(keysForMatchLabels) - 1; iNdEx >= 0; iNdEx-- { + v := m.MatchLabels[string(keysForMatchLabels[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForMatchLabels[iNdEx]) + copy(dAtA[i:], keysForMatchLabels[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForMatchLabels[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil } -func (m *GroupVersionForDiscovery) Marshal() (dAtA []byte, err error) { +func (m *LabelSelectorRequirement) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *GroupVersionForDiscovery) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *LabelSelectorRequirement) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LabelSelectorRequirement) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.GroupVersion))) - i += copy(dAtA[i:], m.GroupVersion) + if len(m.Values) > 0 { + for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Values[iNdEx]) + copy(dAtA[i:], m.Values[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Values[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.Operator) + copy(dAtA[i:], m.Operator) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operator))) + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) - i += copy(dAtA[i:], m.Version) - return i, nil + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *GroupVersionKind) Marshal() (dAtA []byte, err error) { +func (m *List) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *GroupVersionKind) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *List) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *List) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) - i += copy(dAtA[i:], m.Group) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) - i += copy(dAtA[i:], m.Version) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) - return i, nil + return len(dAtA) - i, nil } -func (m *GroupVersionResource) Marshal() (dAtA []byte, err error) { +func (m *ListMeta) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *GroupVersionResource) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ListMeta) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ListMeta) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) - i += copy(dAtA[i:], m.Group) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) - i += copy(dAtA[i:], m.Version) + if m.RemainingItemCount != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.RemainingItemCount)) + i-- + dAtA[i] = 0x20 + } + i -= len(m.Continue) + copy(dAtA[i:], m.Continue) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Continue))) + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) - i += copy(dAtA[i:], m.Resource) - return i, nil + i -= len(m.ResourceVersion) + copy(dAtA[i:], m.ResourceVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) + i-- + dAtA[i] = 0x12 + i -= len(m.SelfLink) + copy(dAtA[i:], m.SelfLink) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SelfLink))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *Initializer) Marshal() (dAtA []byte, err error) { +func (m *ListOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *Initializer) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ListOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ListOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + i-- + if m.AllowWatchBookmarks { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x48 + i -= len(m.Continue) + copy(dAtA[i:], m.Continue) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Continue))) + i-- + dAtA[i] = 0x42 + i = encodeVarintGenerated(dAtA, i, uint64(m.Limit)) + i-- + dAtA[i] = 0x38 + if m.TimeoutSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) + i-- + dAtA[i] = 0x28 + } + i -= len(m.ResourceVersion) + copy(dAtA[i:], m.ResourceVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) + i-- + dAtA[i] = 0x22 + i-- + if m.Watch { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + i -= len(m.FieldSelector) + copy(dAtA[i:], m.FieldSelector) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldSelector))) + i-- + dAtA[i] = 0x12 + i -= len(m.LabelSelector) + copy(dAtA[i:], m.LabelSelector) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.LabelSelector))) + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - return i, nil + return len(dAtA) - i, nil } -func (m *Initializers) Marshal() (dAtA []byte, err error) { +func (m *ManagedFieldsEntry) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *Initializers) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ManagedFieldsEntry) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ManagedFieldsEntry) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Pending) > 0 { - for _, msg := range m.Pending { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.FieldsV1 != nil { + { + size, err := m.FieldsV1.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x3a } - if m.Result != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Result.Size())) - n4, err := m.Result.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i -= len(m.FieldsType) + copy(dAtA[i:], m.FieldsType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldsType))) + i-- + dAtA[i] = 0x32 + if m.Time != nil { + { + size, err := m.Time.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n4 + i-- + dAtA[i] = 0x22 } - return i, nil + i -= len(m.APIVersion) + copy(dAtA[i:], m.APIVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) + i-- + dAtA[i] = 0x1a + i -= len(m.Operation) + copy(dAtA[i:], m.Operation) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operation))) + i-- + dAtA[i] = 0x12 + i -= len(m.Manager) + copy(dAtA[i:], m.Manager) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Manager))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *LabelSelector) Marshal() (dAtA []byte, err error) { +func (m *ObjectMeta) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *LabelSelector) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ObjectMeta) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ObjectMeta) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.MatchLabels) > 0 { - keysForMatchLabels := make([]string, 0, len(m.MatchLabels)) - for k := range m.MatchLabels { - keysForMatchLabels = append(keysForMatchLabels, string(k)) + if len(m.ManagedFields) > 0 { + for iNdEx := len(m.ManagedFields) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ManagedFields[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a } - github_com_gogo_protobuf_sortkeys.Strings(keysForMatchLabels) - for _, k := range keysForMatchLabels { - dAtA[i] = 0xa - i++ - v := m.MatchLabels[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ + } + i -= len(m.ClusterName) + copy(dAtA[i:], m.ClusterName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClusterName))) + i-- + dAtA[i] = 0x7a + if len(m.Finalizers) > 0 { + for iNdEx := len(m.Finalizers) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Finalizers[iNdEx]) + copy(dAtA[i:], m.Finalizers[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Finalizers[iNdEx]))) + i-- + dAtA[i] = 0x72 + } + } + if len(m.OwnerReferences) > 0 { + for iNdEx := len(m.OwnerReferences) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.OwnerReferences[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x6a + } + } + if len(m.Annotations) > 0 { + keysForAnnotations := make([]string, 0, len(m.Annotations)) + for k := range m.Annotations { + keysForAnnotations = append(keysForAnnotations, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) + for iNdEx := len(keysForAnnotations) - 1; iNdEx >= 0; iNdEx-- { + v := m.Annotations[string(keysForAnnotations[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) + i-- + dAtA[i] = 0x12 + i -= len(keysForAnnotations[iNdEx]) + copy(dAtA[i:], keysForAnnotations[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAnnotations[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x62 } } - if len(m.MatchExpressions) > 0 { - for _, msg := range m.MatchExpressions { + if len(m.Labels) > 0 { + keysForLabels := make([]string, 0, len(m.Labels)) + for k := range m.Labels { + keysForLabels = append(keysForLabels, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) + for iNdEx := len(keysForLabels) - 1; iNdEx >= 0; iNdEx-- { + v := m.Labels[string(keysForLabels[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + i -= len(keysForLabels[iNdEx]) + copy(dAtA[i:], keysForLabels[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForLabels[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x5a + } + } + if m.DeletionGracePeriodSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.DeletionGracePeriodSeconds)) + i-- + dAtA[i] = 0x50 + } + if m.DeletionTimestamp != nil { + { + size, err := m.DeletionTimestamp.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x4a } - return i, nil -} - -func (m *LabelSelectorRequirement) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LabelSelectorRequirement) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) - i += copy(dAtA[i:], m.Key) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operator))) - i += copy(dAtA[i:], m.Operator) - if len(m.Values) > 0 { - for _, s := range m.Values { - dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + { + size, err := m.CreationTimestamp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0x42 + i = encodeVarintGenerated(dAtA, i, uint64(m.Generation)) + i-- + dAtA[i] = 0x38 + i -= len(m.ResourceVersion) + copy(dAtA[i:], m.ResourceVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) + i-- + dAtA[i] = 0x32 + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0x2a + i -= len(m.SelfLink) + copy(dAtA[i:], m.SelfLink) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SelfLink))) + i-- + dAtA[i] = 0x22 + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0x1a + i -= len(m.GenerateName) + copy(dAtA[i:], m.GenerateName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.GenerateName))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *List) Marshal() (dAtA []byte, err error) { +func (m *OwnerReference) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *List) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *OwnerReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *OwnerReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n5, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.BlockOwnerDeletion != nil { + i-- + if *m.BlockOwnerDeletion { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 } - i += n5 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + if m.Controller != nil { + i-- + if *m.Controller { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i-- + dAtA[i] = 0x30 } - return i, nil + i -= len(m.APIVersion) + copy(dAtA[i:], m.APIVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) + i-- + dAtA[i] = 0x2a + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0x22 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x1a + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ListMeta) Marshal() (dAtA []byte, err error) { +func (m *PartialObjectMetadata) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ListMeta) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *PartialObjectMetadata) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PartialObjectMetadata) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SelfLink))) - i += copy(dAtA[i:], m.SelfLink) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) - i += copy(dAtA[i:], m.ResourceVersion) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Continue))) - i += copy(dAtA[i:], m.Continue) - return i, nil + return len(dAtA) - i, nil } -func (m *ListOptions) Marshal() (dAtA []byte, err error) { +func (m *PartialObjectMetadataList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ListOptions) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *PartialObjectMetadataList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PartialObjectMetadataList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.LabelSelector))) - i += copy(dAtA[i:], m.LabelSelector) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldSelector))) - i += copy(dAtA[i:], m.FieldSelector) - dAtA[i] = 0x18 - i++ - if m.Watch { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) - i += copy(dAtA[i:], m.ResourceVersion) - if m.TimeoutSeconds != nil { - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } } - dAtA[i] = 0x30 - i++ - if m.IncludeUninitialized { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i++ - dAtA[i] = 0x38 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Limit)) - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Continue))) - i += copy(dAtA[i:], m.Continue) - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ObjectMeta) Marshal() (dAtA []byte, err error) { +func (m *Patch) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ObjectMeta) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *Patch) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Patch) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.GenerateName))) - i += copy(dAtA[i:], m.GenerateName) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i += copy(dAtA[i:], m.Namespace) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SelfLink))) - i += copy(dAtA[i:], m.SelfLink) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) - i += copy(dAtA[i:], m.UID) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) - i += copy(dAtA[i:], m.ResourceVersion) - dAtA[i] = 0x38 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Generation)) - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CreationTimestamp.Size())) - n6, err := m.CreationTimestamp.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 - if m.DeletionTimestamp != nil { - dAtA[i] = 0x4a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DeletionTimestamp.Size())) - n7, err := m.DeletionTimestamp.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 - } - if m.DeletionGracePeriodSeconds != nil { - dAtA[i] = 0x50 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.DeletionGracePeriodSeconds)) - } - if len(m.Labels) > 0 { - keysForLabels := make([]string, 0, len(m.Labels)) - for k := range m.Labels { - keysForLabels = append(keysForLabels, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) - for _, k := range keysForLabels { - dAtA[i] = 0x5a - i++ - v := m.Labels[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } - } - if len(m.Annotations) > 0 { - keysForAnnotations := make([]string, 0, len(m.Annotations)) - for k := range m.Annotations { - keysForAnnotations = append(keysForAnnotations, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) - for _, k := range keysForAnnotations { - dAtA[i] = 0x62 - i++ - v := m.Annotations[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } - } - if len(m.OwnerReferences) > 0 { - for _, msg := range m.OwnerReferences { - dAtA[i] = 0x6a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.Finalizers) > 0 { - for _, s := range m.Finalizers { - dAtA[i] = 0x72 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - dAtA[i] = 0x7a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClusterName))) - i += copy(dAtA[i:], m.ClusterName) - if m.Initializers != nil { - dAtA[i] = 0x82 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Initializers.Size())) - n8, err := m.Initializers.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n8 - } - return i, nil + return len(dAtA) - i, nil } -func (m *OwnerReference) Marshal() (dAtA []byte, err error) { +func (m *PatchOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *OwnerReference) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *PatchOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PatchOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) + i -= len(m.FieldManager) + copy(dAtA[i:], m.FieldManager) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldManager))) + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) - i += copy(dAtA[i:], m.UID) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) - i += copy(dAtA[i:], m.APIVersion) - if m.Controller != nil { - dAtA[i] = 0x30 - i++ - if *m.Controller { + if m.Force != nil { + i-- + if *m.Force { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ + i-- + dAtA[i] = 0x10 } - if m.BlockOwnerDeletion != nil { - dAtA[i] = 0x38 - i++ - if *m.BlockOwnerDeletion { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if len(m.DryRun) > 0 { + for iNdEx := len(m.DryRun) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.DryRun[iNdEx]) + copy(dAtA[i:], m.DryRun[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DryRun[iNdEx]))) + i-- + dAtA[i] = 0xa } - i++ } - return i, nil + return len(dAtA) - i, nil } -func (m *Patch) Marshal() (dAtA []byte, err error) { +func (m *Preconditions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *Patch) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - return i, nil -} - -func (m *Preconditions) Marshal() (dAtA []byte, err error) { +func (m *Preconditions) MarshalTo(dAtA []byte) (int, error) { size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Preconditions) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *Preconditions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + if m.ResourceVersion != nil { + i -= len(*m.ResourceVersion) + copy(dAtA[i:], *m.ResourceVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ResourceVersion))) + i-- + dAtA[i] = 0x12 + } if m.UID != nil { - dAtA[i] = 0xa - i++ + i -= len(*m.UID) + copy(dAtA[i:], *m.UID) i = encodeVarintGenerated(dAtA, i, uint64(len(*m.UID))) - i += copy(dAtA[i:], *m.UID) + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *RootPaths) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1444,32 +2949,31 @@ func (m *RootPaths) Marshal() (dAtA []byte, err error) { } func (m *RootPaths) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RootPaths) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if len(m.Paths) > 0 { - for _, s := range m.Paths { + for iNdEx := len(m.Paths) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Paths[iNdEx]) + copy(dAtA[i:], m.Paths[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Paths[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - return i, nil + return len(dAtA) - i, nil } func (m *ServerAddressByClientCIDR) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1477,25 +2981,32 @@ func (m *ServerAddressByClientCIDR) Marshal() (dAtA []byte, err error) { } func (m *ServerAddressByClientCIDR) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServerAddressByClientCIDR) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClientCIDR))) - i += copy(dAtA[i:], m.ClientCIDR) - dAtA[i] = 0x12 - i++ + i -= len(m.ServerAddress) + copy(dAtA[i:], m.ServerAddress) i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServerAddress))) - i += copy(dAtA[i:], m.ServerAddress) - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.ClientCIDR) + copy(dAtA[i:], m.ClientCIDR) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClientCIDR))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *Status) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1503,50 +3014,62 @@ func (m *Status) Marshal() (dAtA []byte, err error) { } func (m *Status) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Status) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n9, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n9 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(m.Code)) + i-- + dAtA[i] = 0x30 if m.Details != nil { + { + size, err := m.Details.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Details.Size())) - n10, err := m.Details.MarshalTo(dAtA[i:]) + } + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n10 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - dAtA[i] = 0x30 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Code)) - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *StatusCause) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1554,29 +3077,37 @@ func (m *StatusCause) Marshal() (dAtA []byte, err error) { } func (m *StatusCause) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatusCause) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - dAtA[i] = 0x1a - i++ + i -= len(m.Field) + copy(dAtA[i:], m.Field) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Field))) - i += copy(dAtA[i:], m.Field) - return i, nil + i-- + dAtA[i] = 0x1a + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *StatusDetails) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1584,48 +3115,87 @@ func (m *StatusDetails) Marshal() (dAtA []byte, err error) { } func (m *StatusDetails) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatusDetails) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) - i += copy(dAtA[i:], m.Group) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0x32 + i = encodeVarintGenerated(dAtA, i, uint64(m.RetryAfterSeconds)) + i-- + dAtA[i] = 0x28 if len(m.Causes) > 0 { - for _, msg := range m.Causes { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Causes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Causes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x22 } } - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RetryAfterSeconds)) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) - i += copy(dAtA[i:], m.UID) - return i, nil + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0x1a + i -= len(m.Group) + copy(dAtA[i:], m.Group) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *TableOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TableOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TableOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.IncludeObject) + copy(dAtA[i:], m.IncludeObject) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.IncludeObject))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *Timestamp) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1633,23 +3203,28 @@ func (m *Timestamp) Marshal() (dAtA []byte, err error) { } func (m *Timestamp) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Timestamp) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Seconds)) - dAtA[i] = 0x10 - i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Nanos)) - return i, nil + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.Seconds)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *TypeMeta) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1657,25 +3232,32 @@ func (m *TypeMeta) Marshal() (dAtA []byte, err error) { } func (m *TypeMeta) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TypeMeta) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) - dAtA[i] = 0x12 - i++ + i -= len(m.APIVersion) + copy(dAtA[i:], m.APIVersion) i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) - i += copy(dAtA[i:], m.APIVersion) - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *UpdateOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1683,32 +3265,36 @@ func (m *UpdateOptions) Marshal() (dAtA []byte, err error) { } func (m *UpdateOptions) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UpdateOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + i -= len(m.FieldManager) + copy(dAtA[i:], m.FieldManager) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldManager))) + i-- + dAtA[i] = 0x12 if len(m.DryRun) > 0 { - for _, s := range m.DryRun { + for iNdEx := len(m.DryRun) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.DryRun[iNdEx]) + copy(dAtA[i:], m.DryRun[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DryRun[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - return i, nil + return len(dAtA) - i, nil } func (m Verbs) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1716,32 +3302,31 @@ func (m Verbs) Marshal() (dAtA []byte, err error) { } func (m Verbs) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m Verbs) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if len(m) > 0 { - for _, s := range m { + for iNdEx := len(m) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m[iNdEx]) + copy(dAtA[i:], m[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - return i, nil + return len(dAtA) - i, nil } func (m *WatchEvent) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1749,53 +3334,48 @@ func (m *WatchEvent) Marshal() (dAtA []byte, err error) { } func (m *WatchEvent) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WatchEvent) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Object.Size())) - n11, err := m.Object.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Object.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n11 - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *APIGroup) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Name) @@ -1818,6 +3398,9 @@ func (m *APIGroup) Size() (n int) { } func (m *APIGroupList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Groups) > 0 { @@ -1830,6 +3413,9 @@ func (m *APIGroupList) Size() (n int) { } func (m *APIResource) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Name) @@ -1859,10 +3445,15 @@ func (m *APIResource) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = len(m.Version) n += 1 + l + sovGenerated(uint64(l)) + l = len(m.StorageVersionHash) + n += 1 + l + sovGenerated(uint64(l)) return n } func (m *APIResourceList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.GroupVersion) @@ -1877,6 +3468,9 @@ func (m *APIResourceList) Size() (n int) { } func (m *APIVersions) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Versions) > 0 { @@ -1895,6 +3489,9 @@ func (m *APIVersions) Size() (n int) { } func (m *CreateOptions) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.DryRun) > 0 { @@ -1903,11 +3500,15 @@ func (m *CreateOptions) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } - n += 2 + l = len(m.FieldManager) + n += 1 + l + sovGenerated(uint64(l)) return n } func (m *DeleteOptions) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.GracePeriodSeconds != nil { @@ -1934,6 +3535,9 @@ func (m *DeleteOptions) Size() (n int) { } func (m *Duration) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.Duration)) @@ -1941,6 +3545,9 @@ func (m *Duration) Size() (n int) { } func (m *ExportOptions) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 2 @@ -1948,16 +3555,34 @@ func (m *ExportOptions) Size() (n int) { return n } +func (m *FieldsV1) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Raw != nil { + l = len(m.Raw) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + func (m *GetOptions) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.ResourceVersion) n += 1 + l + sovGenerated(uint64(l)) - n += 2 return n } func (m *GroupKind) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Group) @@ -1968,6 +3593,9 @@ func (m *GroupKind) Size() (n int) { } func (m *GroupResource) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Group) @@ -1978,6 +3606,9 @@ func (m *GroupResource) Size() (n int) { } func (m *GroupVersion) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Group) @@ -1988,6 +3619,9 @@ func (m *GroupVersion) Size() (n int) { } func (m *GroupVersionForDiscovery) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.GroupVersion) @@ -1998,6 +3632,9 @@ func (m *GroupVersionForDiscovery) Size() (n int) { } func (m *GroupVersionKind) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Group) @@ -2010,6 +3647,9 @@ func (m *GroupVersionKind) Size() (n int) { } func (m *GroupVersionResource) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Group) @@ -2021,31 +3661,10 @@ func (m *GroupVersionResource) Size() (n int) { return n } -func (m *Initializer) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *Initializers) Size() (n int) { - var l int - _ = l - if len(m.Pending) > 0 { - for _, e := range m.Pending { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.Result != nil { - l = m.Result.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - func (m *LabelSelector) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.MatchLabels) > 0 { @@ -2066,6 +3685,9 @@ func (m *LabelSelector) Size() (n int) { } func (m *LabelSelectorRequirement) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Key) @@ -2082,6 +3704,9 @@ func (m *LabelSelectorRequirement) Size() (n int) { } func (m *List) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -2096,6 +3721,9 @@ func (m *List) Size() (n int) { } func (m *ListMeta) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.SelfLink) @@ -2104,10 +3732,16 @@ func (m *ListMeta) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = len(m.Continue) n += 1 + l + sovGenerated(uint64(l)) + if m.RemainingItemCount != nil { + n += 1 + sovGenerated(uint64(*m.RemainingItemCount)) + } return n } func (m *ListOptions) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.LabelSelector) @@ -2120,14 +3754,42 @@ func (m *ListOptions) Size() (n int) { if m.TimeoutSeconds != nil { n += 1 + sovGenerated(uint64(*m.TimeoutSeconds)) } - n += 2 n += 1 + sovGenerated(uint64(m.Limit)) l = len(m.Continue) n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n +} + +func (m *ManagedFieldsEntry) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Manager) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Operation) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.APIVersion) + n += 1 + l + sovGenerated(uint64(l)) + if m.Time != nil { + l = m.Time.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.FieldsType) + n += 1 + l + sovGenerated(uint64(l)) + if m.FieldsV1 != nil { + l = m.FieldsV1.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } func (m *ObjectMeta) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Name) @@ -2182,14 +3844,19 @@ func (m *ObjectMeta) Size() (n int) { } l = len(m.ClusterName) n += 1 + l + sovGenerated(uint64(l)) - if m.Initializers != nil { - l = m.Initializers.Size() - n += 2 + l + sovGenerated(uint64(l)) + if len(m.ManagedFields) > 0 { + for _, e := range m.ManagedFields { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } } return n } func (m *OwnerReference) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Kind) @@ -2209,23 +3876,84 @@ func (m *OwnerReference) Size() (n int) { return n } +func (m *PartialObjectMetadata) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PartialObjectMetadataList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + func (m *Patch) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *PatchOptions) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l + if len(m.DryRun) > 0 { + for _, s := range m.DryRun { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Force != nil { + n += 2 + } + l = len(m.FieldManager) + n += 1 + l + sovGenerated(uint64(l)) return n } func (m *Preconditions) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.UID != nil { l = len(*m.UID) n += 1 + l + sovGenerated(uint64(l)) } + if m.ResourceVersion != nil { + l = len(*m.ResourceVersion) + n += 1 + l + sovGenerated(uint64(l)) + } return n } func (m *RootPaths) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Paths) > 0 { @@ -2238,6 +3966,9 @@ func (m *RootPaths) Size() (n int) { } func (m *ServerAddressByClientCIDR) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.ClientCIDR) @@ -2248,6 +3979,9 @@ func (m *ServerAddressByClientCIDR) Size() (n int) { } func (m *Status) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -2267,6 +4001,9 @@ func (m *Status) Size() (n int) { } func (m *StatusCause) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -2279,6 +4016,9 @@ func (m *StatusCause) Size() (n int) { } func (m *StatusDetails) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Name) @@ -2299,7 +4039,21 @@ func (m *StatusDetails) Size() (n int) { return n } +func (m *TableOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.IncludeObject) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *Timestamp) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.Seconds)) @@ -2308,6 +4062,9 @@ func (m *Timestamp) Size() (n int) { } func (m *TypeMeta) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Kind) @@ -2318,6 +4075,9 @@ func (m *TypeMeta) Size() (n int) { } func (m *UpdateOptions) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.DryRun) > 0 { @@ -2326,10 +4086,15 @@ func (m *UpdateOptions) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + l = len(m.FieldManager) + n += 1 + l + sovGenerated(uint64(l)) return n } func (m Verbs) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m) > 0 { @@ -2342,6 +4107,9 @@ func (m Verbs) Size() (n int) { } func (m *WatchEvent) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -2350,16 +4118,9 @@ func (m *WatchEvent) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) return n } - -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -2368,11 +4129,21 @@ func (this *APIGroup) String() string { if this == nil { return "nil" } + repeatedStringForVersions := "[]GroupVersionForDiscovery{" + for _, f := range this.Versions { + repeatedStringForVersions += strings.Replace(strings.Replace(f.String(), "GroupVersionForDiscovery", "GroupVersionForDiscovery", 1), `&`, ``, 1) + "," + } + repeatedStringForVersions += "}" + repeatedStringForServerAddressByClientCIDRs := "[]ServerAddressByClientCIDR{" + for _, f := range this.ServerAddressByClientCIDRs { + repeatedStringForServerAddressByClientCIDRs += strings.Replace(strings.Replace(f.String(), "ServerAddressByClientCIDR", "ServerAddressByClientCIDR", 1), `&`, ``, 1) + "," + } + repeatedStringForServerAddressByClientCIDRs += "}" s := strings.Join([]string{`&APIGroup{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Versions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Versions), "GroupVersionForDiscovery", "GroupVersionForDiscovery", 1), `&`, ``, 1) + `,`, + `Versions:` + repeatedStringForVersions + `,`, `PreferredVersion:` + strings.Replace(strings.Replace(this.PreferredVersion.String(), "GroupVersionForDiscovery", "GroupVersionForDiscovery", 1), `&`, ``, 1) + `,`, - `ServerAddressByClientCIDRs:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ServerAddressByClientCIDRs), "ServerAddressByClientCIDR", "ServerAddressByClientCIDR", 1), `&`, ``, 1) + `,`, + `ServerAddressByClientCIDRs:` + repeatedStringForServerAddressByClientCIDRs + `,`, `}`, }, "") return s @@ -2381,8 +4152,13 @@ func (this *APIGroupList) String() string { if this == nil { return "nil" } + repeatedStringForGroups := "[]APIGroup{" + for _, f := range this.Groups { + repeatedStringForGroups += strings.Replace(strings.Replace(f.String(), "APIGroup", "APIGroup", 1), `&`, ``, 1) + "," + } + repeatedStringForGroups += "}" s := strings.Join([]string{`&APIGroupList{`, - `Groups:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Groups), "APIGroup", "APIGroup", 1), `&`, ``, 1) + `,`, + `Groups:` + repeatedStringForGroups + `,`, `}`, }, "") return s @@ -2401,6 +4177,7 @@ func (this *APIResource) String() string { `Categories:` + fmt.Sprintf("%v", this.Categories) + `,`, `Group:` + fmt.Sprintf("%v", this.Group) + `,`, `Version:` + fmt.Sprintf("%v", this.Version) + `,`, + `StorageVersionHash:` + fmt.Sprintf("%v", this.StorageVersionHash) + `,`, `}`, }, "") return s @@ -2409,9 +4186,14 @@ func (this *APIResourceList) String() string { if this == nil { return "nil" } + repeatedStringForAPIResources := "[]APIResource{" + for _, f := range this.APIResources { + repeatedStringForAPIResources += strings.Replace(strings.Replace(f.String(), "APIResource", "APIResource", 1), `&`, ``, 1) + "," + } + repeatedStringForAPIResources += "}" s := strings.Join([]string{`&APIResourceList{`, `GroupVersion:` + fmt.Sprintf("%v", this.GroupVersion) + `,`, - `APIResources:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.APIResources), "APIResource", "APIResource", 1), `&`, ``, 1) + `,`, + `APIResources:` + repeatedStringForAPIResources + `,`, `}`, }, "") return s @@ -2422,7 +4204,7 @@ func (this *CreateOptions) String() string { } s := strings.Join([]string{`&CreateOptions{`, `DryRun:` + fmt.Sprintf("%v", this.DryRun) + `,`, - `IncludeUninitialized:` + fmt.Sprintf("%v", this.IncludeUninitialized) + `,`, + `FieldManager:` + fmt.Sprintf("%v", this.FieldManager) + `,`, `}`, }, "") return s @@ -2433,7 +4215,7 @@ func (this *DeleteOptions) String() string { } s := strings.Join([]string{`&DeleteOptions{`, `GracePeriodSeconds:` + valueToStringGenerated(this.GracePeriodSeconds) + `,`, - `Preconditions:` + strings.Replace(fmt.Sprintf("%v", this.Preconditions), "Preconditions", "Preconditions", 1) + `,`, + `Preconditions:` + strings.Replace(this.Preconditions.String(), "Preconditions", "Preconditions", 1) + `,`, `OrphanDependents:` + valueToStringGenerated(this.OrphanDependents) + `,`, `PropagationPolicy:` + valueToStringGenerated(this.PropagationPolicy) + `,`, `DryRun:` + fmt.Sprintf("%v", this.DryRun) + `,`, @@ -2462,45 +4244,33 @@ func (this *ExportOptions) String() string { }, "") return s } -func (this *GetOptions) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&GetOptions{`, - `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`, - `IncludeUninitialized:` + fmt.Sprintf("%v", this.IncludeUninitialized) + `,`, - `}`, - }, "") - return s -} -func (this *GroupVersionForDiscovery) String() string { +func (this *FieldsV1) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&GroupVersionForDiscovery{`, - `GroupVersion:` + fmt.Sprintf("%v", this.GroupVersion) + `,`, - `Version:` + fmt.Sprintf("%v", this.Version) + `,`, + s := strings.Join([]string{`&FieldsV1{`, + `Raw:` + valueToStringGenerated(this.Raw) + `,`, `}`, }, "") return s } -func (this *Initializer) String() string { +func (this *GetOptions) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&Initializer{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + s := strings.Join([]string{`&GetOptions{`, + `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`, `}`, }, "") return s } -func (this *Initializers) String() string { +func (this *GroupVersionForDiscovery) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&Initializers{`, - `Pending:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Pending), "Initializer", "Initializer", 1), `&`, ``, 1) + `,`, - `Result:` + strings.Replace(fmt.Sprintf("%v", this.Result), "Status", "Status", 1) + `,`, + s := strings.Join([]string{`&GroupVersionForDiscovery{`, + `GroupVersion:` + fmt.Sprintf("%v", this.GroupVersion) + `,`, + `Version:` + fmt.Sprintf("%v", this.Version) + `,`, `}`, }, "") return s @@ -2509,6 +4279,11 @@ func (this *LabelSelector) String() string { if this == nil { return "nil" } + repeatedStringForMatchExpressions := "[]LabelSelectorRequirement{" + for _, f := range this.MatchExpressions { + repeatedStringForMatchExpressions += strings.Replace(strings.Replace(f.String(), "LabelSelectorRequirement", "LabelSelectorRequirement", 1), `&`, ``, 1) + "," + } + repeatedStringForMatchExpressions += "}" keysForMatchLabels := make([]string, 0, len(this.MatchLabels)) for k := range this.MatchLabels { keysForMatchLabels = append(keysForMatchLabels, k) @@ -2521,7 +4296,7 @@ func (this *LabelSelector) String() string { mapStringForMatchLabels += "}" s := strings.Join([]string{`&LabelSelector{`, `MatchLabels:` + mapStringForMatchLabels + `,`, - `MatchExpressions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.MatchExpressions), "LabelSelectorRequirement", "LabelSelectorRequirement", 1), `&`, ``, 1) + `,`, + `MatchExpressions:` + repeatedStringForMatchExpressions + `,`, `}`, }, "") return s @@ -2542,9 +4317,14 @@ func (this *List) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]RawExtension{" + for _, f := range this.Items { + repeatedStringForItems += fmt.Sprintf("%v", f) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&List{`, `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "RawExtension", "k8s_io_apimachinery_pkg_runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -2557,6 +4337,7 @@ func (this *ListMeta) String() string { `SelfLink:` + fmt.Sprintf("%v", this.SelfLink) + `,`, `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`, `Continue:` + fmt.Sprintf("%v", this.Continue) + `,`, + `RemainingItemCount:` + valueToStringGenerated(this.RemainingItemCount) + `,`, `}`, }, "") return s @@ -2571,9 +4352,24 @@ func (this *ListOptions) String() string { `Watch:` + fmt.Sprintf("%v", this.Watch) + `,`, `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`, `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, - `IncludeUninitialized:` + fmt.Sprintf("%v", this.IncludeUninitialized) + `,`, `Limit:` + fmt.Sprintf("%v", this.Limit) + `,`, `Continue:` + fmt.Sprintf("%v", this.Continue) + `,`, + `AllowWatchBookmarks:` + fmt.Sprintf("%v", this.AllowWatchBookmarks) + `,`, + `}`, + }, "") + return s +} +func (this *ManagedFieldsEntry) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ManagedFieldsEntry{`, + `Manager:` + fmt.Sprintf("%v", this.Manager) + `,`, + `Operation:` + fmt.Sprintf("%v", this.Operation) + `,`, + `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, + `Time:` + strings.Replace(fmt.Sprintf("%v", this.Time), "Time", "Time", 1) + `,`, + `FieldsType:` + fmt.Sprintf("%v", this.FieldsType) + `,`, + `FieldsV1:` + strings.Replace(this.FieldsV1.String(), "FieldsV1", "FieldsV1", 1) + `,`, `}`, }, "") return s @@ -2582,6 +4378,16 @@ func (this *ObjectMeta) String() string { if this == nil { return "nil" } + repeatedStringForOwnerReferences := "[]OwnerReference{" + for _, f := range this.OwnerReferences { + repeatedStringForOwnerReferences += strings.Replace(strings.Replace(f.String(), "OwnerReference", "OwnerReference", 1), `&`, ``, 1) + "," + } + repeatedStringForOwnerReferences += "}" + repeatedStringForManagedFields := "[]ManagedFieldsEntry{" + for _, f := range this.ManagedFields { + repeatedStringForManagedFields += strings.Replace(strings.Replace(f.String(), "ManagedFieldsEntry", "ManagedFieldsEntry", 1), `&`, ``, 1) + "," + } + repeatedStringForManagedFields += "}" keysForLabels := make([]string, 0, len(this.Labels)) for k := range this.Labels { keysForLabels = append(keysForLabels, k) @@ -2610,15 +4416,15 @@ func (this *ObjectMeta) String() string { `UID:` + fmt.Sprintf("%v", this.UID) + `,`, `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`, `Generation:` + fmt.Sprintf("%v", this.Generation) + `,`, - `CreationTimestamp:` + strings.Replace(strings.Replace(this.CreationTimestamp.String(), "Time", "Time", 1), `&`, ``, 1) + `,`, + `CreationTimestamp:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CreationTimestamp), "Time", "Time", 1), `&`, ``, 1) + `,`, `DeletionTimestamp:` + strings.Replace(fmt.Sprintf("%v", this.DeletionTimestamp), "Time", "Time", 1) + `,`, `DeletionGracePeriodSeconds:` + valueToStringGenerated(this.DeletionGracePeriodSeconds) + `,`, `Labels:` + mapStringForLabels + `,`, `Annotations:` + mapStringForAnnotations + `,`, - `OwnerReferences:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.OwnerReferences), "OwnerReference", "OwnerReference", 1), `&`, ``, 1) + `,`, + `OwnerReferences:` + repeatedStringForOwnerReferences + `,`, `Finalizers:` + fmt.Sprintf("%v", this.Finalizers) + `,`, `ClusterName:` + fmt.Sprintf("%v", this.ClusterName) + `,`, - `Initializers:` + strings.Replace(fmt.Sprintf("%v", this.Initializers), "Initializers", "Initializers", 1) + `,`, + `ManagedFields:` + repeatedStringForManagedFields + `,`, `}`, }, "") return s @@ -2638,6 +4444,32 @@ func (this *OwnerReference) String() string { }, "") return s } +func (this *PartialObjectMetadata) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PartialObjectMetadata{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "ObjectMeta", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PartialObjectMetadataList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]PartialObjectMetadata{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PartialObjectMetadata", "PartialObjectMetadata", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&PartialObjectMetadataList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} func (this *Patch) String() string { if this == nil { return "nil" @@ -2647,12 +4479,25 @@ func (this *Patch) String() string { }, "") return s } +func (this *PatchOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PatchOptions{`, + `DryRun:` + fmt.Sprintf("%v", this.DryRun) + `,`, + `Force:` + valueToStringGenerated(this.Force) + `,`, + `FieldManager:` + fmt.Sprintf("%v", this.FieldManager) + `,`, + `}`, + }, "") + return s +} func (this *Preconditions) String() string { if this == nil { return "nil" } s := strings.Join([]string{`&Preconditions{`, `UID:` + valueToStringGenerated(this.UID) + `,`, + `ResourceVersion:` + valueToStringGenerated(this.ResourceVersion) + `,`, `}`, }, "") return s @@ -2687,7 +4532,7 @@ func (this *Status) String() string { `Status:` + fmt.Sprintf("%v", this.Status) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Details:` + strings.Replace(fmt.Sprintf("%v", this.Details), "StatusDetails", "StatusDetails", 1) + `,`, + `Details:` + strings.Replace(this.Details.String(), "StatusDetails", "StatusDetails", 1) + `,`, `Code:` + fmt.Sprintf("%v", this.Code) + `,`, `}`, }, "") @@ -2709,17 +4554,32 @@ func (this *StatusDetails) String() string { if this == nil { return "nil" } + repeatedStringForCauses := "[]StatusCause{" + for _, f := range this.Causes { + repeatedStringForCauses += strings.Replace(strings.Replace(f.String(), "StatusCause", "StatusCause", 1), `&`, ``, 1) + "," + } + repeatedStringForCauses += "}" s := strings.Join([]string{`&StatusDetails{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `Group:` + fmt.Sprintf("%v", this.Group) + `,`, `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, - `Causes:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Causes), "StatusCause", "StatusCause", 1), `&`, ``, 1) + `,`, + `Causes:` + repeatedStringForCauses + `,`, `RetryAfterSeconds:` + fmt.Sprintf("%v", this.RetryAfterSeconds) + `,`, `UID:` + fmt.Sprintf("%v", this.UID) + `,`, `}`, }, "") return s } +func (this *TableOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TableOptions{`, + `IncludeObject:` + fmt.Sprintf("%v", this.IncludeObject) + `,`, + `}`, + }, "") + return s +} func (this *Timestamp) String() string { if this == nil { return "nil" @@ -2748,6 +4608,7 @@ func (this *UpdateOptions) String() string { } s := strings.Join([]string{`&UpdateOptions{`, `DryRun:` + fmt.Sprintf("%v", this.DryRun) + `,`, + `FieldManager:` + fmt.Sprintf("%v", this.FieldManager) + `,`, `}`, }, "") return s @@ -2758,7 +4619,7 @@ func (this *WatchEvent) String() string { } s := strings.Join([]string{`&WatchEvent{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Object:` + strings.Replace(strings.Replace(this.Object.String(), "RawExtension", "k8s_io_apimachinery_pkg_runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `Object:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Object), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -2786,7 +4647,7 @@ func (m *APIGroup) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2814,7 +4675,7 @@ func (m *APIGroup) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2824,6 +4685,9 @@ func (m *APIGroup) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2843,7 +4707,7 @@ func (m *APIGroup) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2852,6 +4716,9 @@ func (m *APIGroup) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2874,7 +4741,7 @@ func (m *APIGroup) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2883,6 +4750,9 @@ func (m *APIGroup) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2904,7 +4774,7 @@ func (m *APIGroup) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2913,6 +4783,9 @@ func (m *APIGroup) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2930,6 +4803,9 @@ func (m *APIGroup) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2957,7 +4833,7 @@ func (m *APIGroupList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2985,7 +4861,7 @@ func (m *APIGroupList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2994,6 +4870,9 @@ func (m *APIGroupList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3011,6 +4890,9 @@ func (m *APIGroupList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3038,7 +4920,7 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3066,7 +4948,7 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3076,6 +4958,9 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3095,7 +4980,7 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3115,7 +5000,7 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3125,6 +5010,9 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3144,7 +5032,7 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3153,6 +5041,9 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3177,7 +5068,7 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3187,6 +5078,9 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3206,7 +5100,7 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3216,6 +5110,9 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3235,7 +5132,7 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3245,6 +5142,9 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3264,7 +5164,7 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3274,6 +5174,9 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3293,7 +5196,7 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3303,11 +5206,46 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } m.Version = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StorageVersionHash", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StorageVersionHash = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -3317,6 +5255,9 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3344,7 +5285,7 @@ func (m *APIResourceList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3372,7 +5313,7 @@ func (m *APIResourceList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3382,6 +5323,9 @@ func (m *APIResourceList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3401,7 +5345,7 @@ func (m *APIResourceList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3410,6 +5354,9 @@ func (m *APIResourceList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3427,6 +5374,9 @@ func (m *APIResourceList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3454,7 +5404,7 @@ func (m *APIVersions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3482,7 +5432,7 @@ func (m *APIVersions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3492,6 +5442,9 @@ func (m *APIVersions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3511,7 +5464,7 @@ func (m *APIVersions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3520,6 +5473,9 @@ func (m *APIVersions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3537,6 +5493,9 @@ func (m *APIVersions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3564,7 +5523,7 @@ func (m *CreateOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3592,7 +5551,7 @@ func (m *CreateOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3602,16 +5561,19 @@ func (m *CreateOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } m.DryRun = append(m.DryRun, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field IncludeUninitialized", wireType) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FieldManager", wireType) } - var v int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -3621,12 +5583,24 @@ func (m *CreateOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - m.IncludeUninitialized = bool(v != 0) + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FieldManager = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -3636,6 +5610,9 @@ func (m *CreateOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3663,7 +5640,7 @@ func (m *DeleteOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3691,7 +5668,7 @@ func (m *DeleteOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -3711,7 +5688,7 @@ func (m *DeleteOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3720,6 +5697,9 @@ func (m *DeleteOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3744,7 +5724,7 @@ func (m *DeleteOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3765,7 +5745,7 @@ func (m *DeleteOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3775,6 +5755,9 @@ func (m *DeleteOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3795,7 +5778,7 @@ func (m *DeleteOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3805,6 +5788,9 @@ func (m *DeleteOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3819,6 +5805,9 @@ func (m *DeleteOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3846,7 +5835,7 @@ func (m *Duration) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3874,7 +5863,7 @@ func (m *Duration) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Duration |= (time.Duration(b) & 0x7F) << shift + m.Duration |= time.Duration(b&0x7F) << shift if b < 0x80 { break } @@ -3888,6 +5877,9 @@ func (m *Duration) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3915,7 +5907,7 @@ func (m *ExportOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3943,7 +5935,7 @@ func (m *ExportOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3963,7 +5955,7 @@ func (m *ExportOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3978,6 +5970,96 @@ func (m *ExportOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FieldsV1) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FieldsV1: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FieldsV1: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Raw", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Raw = append(m.Raw[:0], dAtA[iNdEx:postIndex]...) + if m.Raw == nil { + m.Raw = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4005,7 +6087,7 @@ func (m *GetOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4033,7 +6115,7 @@ func (m *GetOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4043,31 +6125,14 @@ func (m *GetOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } m.ResourceVersion = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field IncludeUninitialized", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.IncludeUninitialized = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -4077,6 +6142,9 @@ func (m *GetOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4104,7 +6172,7 @@ func (m *GroupKind) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4132,7 +6200,7 @@ func (m *GroupKind) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4142,6 +6210,9 @@ func (m *GroupKind) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4161,7 +6232,7 @@ func (m *GroupKind) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4171,6 +6242,9 @@ func (m *GroupKind) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4185,6 +6259,9 @@ func (m *GroupKind) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4212,7 +6289,7 @@ func (m *GroupResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4240,7 +6317,7 @@ func (m *GroupResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4250,6 +6327,9 @@ func (m *GroupResource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4269,7 +6349,7 @@ func (m *GroupResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4279,6 +6359,9 @@ func (m *GroupResource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4293,6 +6376,9 @@ func (m *GroupResource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4320,7 +6406,7 @@ func (m *GroupVersion) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4348,7 +6434,7 @@ func (m *GroupVersion) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4358,6 +6444,9 @@ func (m *GroupVersion) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4377,7 +6466,7 @@ func (m *GroupVersion) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4387,6 +6476,9 @@ func (m *GroupVersion) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4401,6 +6493,9 @@ func (m *GroupVersion) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4428,7 +6523,7 @@ func (m *GroupVersionForDiscovery) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4456,7 +6551,7 @@ func (m *GroupVersionForDiscovery) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4466,6 +6561,9 @@ func (m *GroupVersionForDiscovery) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4485,7 +6583,7 @@ func (m *GroupVersionForDiscovery) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4495,6 +6593,9 @@ func (m *GroupVersionForDiscovery) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4509,6 +6610,9 @@ func (m *GroupVersionForDiscovery) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4536,7 +6640,7 @@ func (m *GroupVersionKind) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4564,7 +6668,7 @@ func (m *GroupVersionKind) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4574,6 +6678,9 @@ func (m *GroupVersionKind) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4593,7 +6700,7 @@ func (m *GroupVersionKind) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4603,6 +6710,9 @@ func (m *GroupVersionKind) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4622,7 +6732,7 @@ func (m *GroupVersionKind) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4632,6 +6742,9 @@ func (m *GroupVersionKind) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4646,6 +6759,9 @@ func (m *GroupVersionKind) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4673,7 +6789,7 @@ func (m *GroupVersionResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4701,7 +6817,7 @@ func (m *GroupVersionResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4711,6 +6827,9 @@ func (m *GroupVersionResource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4730,7 +6849,7 @@ func (m *GroupVersionResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4740,6 +6859,9 @@ func (m *GroupVersionResource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4759,7 +6881,7 @@ func (m *GroupVersionResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4769,6 +6891,9 @@ func (m *GroupVersionResource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4783,6 +6908,9 @@ func (m *GroupVersionResource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4795,7 +6923,7 @@ func (m *GroupVersionResource) Unmarshal(dAtA []byte) error { } return nil } -func (m *Initializer) Unmarshal(dAtA []byte) error { +func (m *LabelSelector) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -4810,7 +6938,7 @@ func (m *Initializer) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4818,17 +6946,17 @@ func (m *Initializer) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Initializer: wiretype end group for non-group") + return fmt.Errorf("proto: LabelSelector: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Initializer: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: LabelSelector: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MatchLabels", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -4838,20 +6966,152 @@ func (m *Initializer) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + if m.MatchLabels == nil { + m.MatchLabels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.MatchLabels[mapkey] = mapvalue + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchExpressions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MatchExpressions = append(m.MatchExpressions, LabelSelectorRequirement{}) + if err := m.MatchExpressions[len(m.MatchExpressions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -4862,6 +7122,9 @@ func (m *Initializer) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4874,7 +7137,7 @@ func (m *Initializer) Unmarshal(dAtA []byte) error { } return nil } -func (m *Initializers) Unmarshal(dAtA []byte) error { +func (m *LabelSelectorRequirement) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -4889,7 +7152,7 @@ func (m *Initializers) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4897,17 +7160,17 @@ func (m *Initializers) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Initializers: wiretype end group for non-group") + return fmt.Errorf("proto: LabelSelectorRequirement: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Initializers: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: LabelSelectorRequirement: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pending", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -4917,28 +7180,29 @@ func (m *Initializers) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.Pending = append(m.Pending, Initializer{}) - if err := m.Pending[len(m.Pending)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Key = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Operator", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -4948,24 +7212,55 @@ func (m *Initializers) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - if m.Result == nil { - m.Result = &Status{} + m.Operator = LabelSelectorOperator(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) } - if err := m.Result.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF } + m.Values = append(m.Values, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex @@ -4976,6 +7271,9 @@ func (m *Initializers) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4988,7 +7286,7 @@ func (m *Initializers) Unmarshal(dAtA []byte) error { } return nil } -func (m *LabelSelector) Unmarshal(dAtA []byte) error { +func (m *List) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -5003,7 +7301,7 @@ func (m *LabelSelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5011,54 +7309,17 @@ func (m *LabelSelector) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: LabelSelector: wiretype end group for non-group") + return fmt.Errorf("proto: List: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: LabelSelector: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: List: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MatchLabels", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } - var stringLenmapkey uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -5068,74 +7329,28 @@ func (m *LabelSelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - if m.MatchLabels == nil { - m.MatchLabels = make(map[string]string) + if postIndex > l { + return io.ErrUnexpectedEOF } - if iNdEx < postIndex { - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.MatchLabels[mapkey] = mapvalue - } else { - var mapvalue string - m.MatchLabels[mapkey] = mapvalue + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MatchExpressions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -5147,7 +7362,7 @@ func (m *LabelSelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5156,11 +7371,14 @@ func (m *LabelSelector) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.MatchExpressions = append(m.MatchExpressions, LabelSelectorRequirement{}) - if err := m.MatchExpressions[len(m.MatchExpressions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, runtime.RawExtension{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -5173,6 +7391,9 @@ func (m *LabelSelector) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5185,7 +7406,7 @@ func (m *LabelSelector) Unmarshal(dAtA []byte) error { } return nil } -func (m *LabelSelectorRequirement) Unmarshal(dAtA []byte) error { +func (m *ListMeta) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -5200,7 +7421,7 @@ func (m *LabelSelectorRequirement) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5208,15 +7429,15 @@ func (m *LabelSelectorRequirement) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: LabelSelectorRequirement: wiretype end group for non-group") + return fmt.Errorf("proto: ListMeta: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: LabelSelectorRequirement: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ListMeta: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SelfLink", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -5228,7 +7449,7 @@ func (m *LabelSelectorRequirement) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5238,14 +7459,17 @@ func (m *LabelSelectorRequirement) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Key = string(dAtA[iNdEx:postIndex]) + m.SelfLink = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Operator", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -5257,7 +7481,7 @@ func (m *LabelSelectorRequirement) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5267,14 +7491,17 @@ func (m *LabelSelectorRequirement) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Operator = LabelSelectorOperator(dAtA[iNdEx:postIndex]) + m.ResourceVersion = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Continue", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -5286,7 +7513,7 @@ func (m *LabelSelectorRequirement) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5296,11 +7523,34 @@ func (m *LabelSelectorRequirement) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Values = append(m.Values, string(dAtA[iNdEx:postIndex])) + m.Continue = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RemainingItemCount", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.RemainingItemCount = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -5310,6 +7560,9 @@ func (m *LabelSelectorRequirement) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5322,7 +7575,7 @@ func (m *LabelSelectorRequirement) Unmarshal(dAtA []byte) error { } return nil } -func (m *List) Unmarshal(dAtA []byte) error { +func (m *ListOptions) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -5337,7 +7590,7 @@ func (m *List) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5345,17 +7598,17 @@ func (m *List) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: List: wiretype end group for non-group") + return fmt.Errorf("proto: ListOptions: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: List: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ListOptions: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LabelSelector", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -5365,27 +7618,29 @@ func (m *List) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.LabelSelector = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field FieldSelector", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -5395,76 +7650,47 @@ func (m *List) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, k8s_io_apimachinery_pkg_runtime.RawExtension{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) > l { + if postIndex > l { return io.ErrUnexpectedEOF } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ListMeta) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + m.FieldSelector = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Watch", wireType) } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ListMeta: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ListMeta: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.Watch = bool(v != 0) + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SelfLink", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -5476,7 +7702,7 @@ func (m *ListMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5486,14 +7712,56 @@ func (m *ListMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.SelfLink = string(dAtA[iNdEx:postIndex]) + m.ResourceVersion = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TimeoutSeconds = &v + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) + } + m.Limit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Limit |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Continue", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -5505,7 +7773,7 @@ func (m *ListMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5515,16 +7783,19 @@ func (m *ListMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.ResourceVersion = string(dAtA[iNdEx:postIndex]) + m.Continue = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Continue", wireType) + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowWatchBookmarks", wireType) } - var stringLen uint64 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -5534,21 +7805,12 @@ func (m *ListMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Continue = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex + m.AllowWatchBookmarks = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -5558,6 +7820,9 @@ func (m *ListMeta) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5570,7 +7835,7 @@ func (m *ListMeta) Unmarshal(dAtA []byte) error { } return nil } -func (m *ListOptions) Unmarshal(dAtA []byte) error { +func (m *ManagedFieldsEntry) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -5585,7 +7850,7 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5593,15 +7858,15 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ListOptions: wiretype end group for non-group") + return fmt.Errorf("proto: ManagedFieldsEntry: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ListOptions: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ManagedFieldsEntry: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LabelSelector", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Manager", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -5613,7 +7878,7 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5623,14 +7888,17 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.LabelSelector = string(dAtA[iNdEx:postIndex]) + m.Manager = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FieldSelector", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Operation", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -5642,7 +7910,7 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5652,34 +7920,17 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.FieldSelector = string(dAtA[iNdEx:postIndex]) + m.Operation = ManagedFieldsOperationType(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Watch", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Watch = bool(v != 0) - case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -5691,7 +7942,7 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5701,16 +7952,19 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.ResourceVersion = string(dAtA[iNdEx:postIndex]) + m.APIVersion = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType) } - var v int64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -5720,17 +7974,33 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - m.TimeoutSeconds = &v + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Time == nil { + m.Time = &Time{} + } + if err := m.Time.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field IncludeUninitialized", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FieldsType", wireType) } - var v int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -5740,36 +8010,29 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - m.IncludeUninitialized = bool(v != 0) - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated } - m.Limit = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Limit |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated } - case 8: + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FieldsType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Continue", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field FieldsV1", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -5779,20 +8042,27 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.Continue = string(dAtA[iNdEx:postIndex]) + if m.FieldsV1 == nil { + m.FieldsV1 = &FieldsV1{} + } + if err := m.FieldsV1.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -5803,6 +8073,9 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5830,7 +8103,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5858,7 +8131,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5868,6 +8141,9 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5887,7 +8163,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5897,6 +8173,9 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5916,7 +8195,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5926,6 +8205,9 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5945,7 +8227,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5955,6 +8237,9 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5974,7 +8259,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5984,6 +8269,9 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6003,7 +8291,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6013,6 +8301,9 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6032,7 +8323,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Generation |= (int64(b) & 0x7F) << shift + m.Generation |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -6051,7 +8342,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6060,6 +8351,9 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6081,7 +8375,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6090,6 +8384,9 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6114,7 +8411,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -6134,7 +8431,261 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Labels == nil { + m.Labels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Labels[mapkey] = mapvalue + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Annotations == nil { + m.Annotations = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Annotations[mapkey] = mapvalue + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OwnerReferences", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6143,10 +8694,22 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 + m.OwnerReferences = append(m.OwnerReferences, OwnerReference{}) + if err := m.OwnerReferences[len(m.OwnerReferences)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Finalizers", wireType) + } + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -6156,12 +8719,29 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - var stringLenmapkey uint64 + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Finalizers = append(m.Finalizers, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterName", wireType) + } + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -6171,74 +8751,27 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - if m.Labels == nil { - m.Labels = make(map[string]string) + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated } - if iNdEx < postIndex { - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Labels[mapkey] = mapvalue - } else { - var mapvalue string - m.Labels[mapkey] = mapvalue + if postIndex > l { + return io.ErrUnexpectedEOF } + m.ClusterName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 12: + case 17: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ManagedFields", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -6250,7 +8783,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6259,10 +8792,75 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 + m.ManagedFields = append(m.ManagedFields, ManagedFieldsEntry{}) + if err := m.ManagedFields[len(m.ManagedFields)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OwnerReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OwnerReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OwnerReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -6272,12 +8870,29 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - var stringLenmapkey uint64 + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -6287,76 +8902,29 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - if m.Annotations == nil { - m.Annotations = make(map[string]string) + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated } - if iNdEx < postIndex { - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Annotations[mapkey] = mapvalue - } else { - var mapvalue string - m.Annotations[mapkey] = mapvalue + if postIndex > l { + return io.ErrUnexpectedEOF } + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 13: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field OwnerReferences", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -6366,26 +8934,27 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.OwnerReferences = append(m.OwnerReferences, OwnerReference{}) - if err := m.OwnerReferences[len(m.OwnerReferences)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 14: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Finalizers", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -6397,7 +8966,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6407,16 +8976,19 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Finalizers = append(m.Finalizers, string(dAtA[iNdEx:postIndex])) + m.APIVersion = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 15: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClusterName", wireType) + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Controller", wireType) } - var stringLen uint64 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -6426,24 +8998,90 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + b := bool(v != 0) + m.Controller = &b + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BlockOwnerDeletion", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.BlockOwnerDeletion = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.ClusterName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 16: + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PartialObjectMetadata) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PartialObjectMetadata: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PartialObjectMetadata: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Initializers", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -6455,7 +9093,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6464,13 +9102,13 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Initializers == nil { - m.Initializers = &Initializers{} - } - if err := m.Initializers.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -6483,6 +9121,9 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6495,7 +9136,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } return nil } -func (m *OwnerReference) Unmarshal(dAtA []byte) error { +func (m *PartialObjectMetadataList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -6510,7 +9151,7 @@ func (m *OwnerReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6518,17 +9159,17 @@ func (m *OwnerReference) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: OwnerReference: wiretype end group for non-group") + return fmt.Errorf("proto: PartialObjectMetadataList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: OwnerReference: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PartialObjectMetadataList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -6538,26 +9179,30 @@ func (m *OwnerReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.Kind = string(dAtA[iNdEx:postIndex]) + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 3: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -6567,53 +9212,135 @@ func (m *OwnerReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + m.Items = append(m.Items, PartialObjectMetadata{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } + if skippy < 0 { + return ErrInvalidLengthGenerated } - intStringLen := int(stringLen) - if intStringLen < 0 { + if (iNdEx + skippy) < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Patch) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Patch: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Patch: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PatchOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PatchOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PatchOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DryRun", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -6625,7 +9352,7 @@ func (m *OwnerReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6635,14 +9362,17 @@ func (m *OwnerReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.APIVersion = string(dAtA[iNdEx:postIndex]) + m.DryRun = append(m.DryRun, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 6: + case 2: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Controller", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Force", wireType) } var v int for shift := uint(0); ; shift += 7 { @@ -6654,84 +9384,45 @@ func (m *OwnerReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } b := bool(v != 0) - m.Controller = &b - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field BlockOwnerDeletion", wireType) + m.Force = &b + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FieldManager", wireType) } - var v int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.BlockOwnerDeletion = &b - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Patch) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { - return io.ErrUnexpectedEOF + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Patch: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Patch: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FieldManager = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -6741,6 +9432,9 @@ func (m *Patch) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6768,7 +9462,7 @@ func (m *Preconditions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6796,7 +9490,7 @@ func (m *Preconditions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6806,12 +9500,48 @@ func (m *Preconditions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } s := k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) m.UID = &s iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.ResourceVersion = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -6821,6 +9551,9 @@ func (m *Preconditions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6848,7 +9581,7 @@ func (m *RootPaths) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6876,7 +9609,7 @@ func (m *RootPaths) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6886,6 +9619,9 @@ func (m *RootPaths) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6900,6 +9636,9 @@ func (m *RootPaths) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6927,7 +9666,7 @@ func (m *ServerAddressByClientCIDR) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6955,7 +9694,7 @@ func (m *ServerAddressByClientCIDR) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6965,6 +9704,9 @@ func (m *ServerAddressByClientCIDR) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6984,7 +9726,7 @@ func (m *ServerAddressByClientCIDR) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6994,6 +9736,9 @@ func (m *ServerAddressByClientCIDR) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7008,6 +9753,9 @@ func (m *ServerAddressByClientCIDR) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7035,7 +9783,7 @@ func (m *Status) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7063,7 +9811,7 @@ func (m *Status) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -7072,6 +9820,9 @@ func (m *Status) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7093,7 +9844,7 @@ func (m *Status) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7103,6 +9854,9 @@ func (m *Status) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7122,7 +9876,7 @@ func (m *Status) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7132,6 +9886,9 @@ func (m *Status) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7151,7 +9908,7 @@ func (m *Status) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7161,6 +9918,9 @@ func (m *Status) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7180,7 +9940,7 @@ func (m *Status) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -7189,6 +9949,9 @@ func (m *Status) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7213,7 +9976,7 @@ func (m *Status) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Code |= (int32(b) & 0x7F) << shift + m.Code |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -7227,6 +9990,9 @@ func (m *Status) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7254,7 +10020,7 @@ func (m *StatusCause) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7282,7 +10048,7 @@ func (m *StatusCause) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7292,6 +10058,9 @@ func (m *StatusCause) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7311,7 +10080,7 @@ func (m *StatusCause) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7321,6 +10090,9 @@ func (m *StatusCause) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7340,7 +10112,7 @@ func (m *StatusCause) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7350,6 +10122,9 @@ func (m *StatusCause) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7364,6 +10139,9 @@ func (m *StatusCause) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7391,7 +10169,7 @@ func (m *StatusDetails) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7419,7 +10197,7 @@ func (m *StatusDetails) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7429,6 +10207,9 @@ func (m *StatusDetails) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7448,7 +10229,7 @@ func (m *StatusDetails) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7458,6 +10239,9 @@ func (m *StatusDetails) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7477,7 +10261,7 @@ func (m *StatusDetails) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7487,6 +10271,9 @@ func (m *StatusDetails) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7506,7 +10293,7 @@ func (m *StatusDetails) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -7515,6 +10302,9 @@ func (m *StatusDetails) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7537,7 +10327,7 @@ func (m *StatusDetails) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.RetryAfterSeconds |= (int32(b) & 0x7F) << shift + m.RetryAfterSeconds |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -7556,7 +10346,7 @@ func (m *StatusDetails) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7566,6 +10356,9 @@ func (m *StatusDetails) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7580,6 +10373,94 @@ func (m *StatusDetails) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TableOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TableOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TableOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IncludeObject", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IncludeObject = IncludeObjectPolicy(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7607,7 +10488,7 @@ func (m *Timestamp) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7635,7 +10516,7 @@ func (m *Timestamp) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Seconds |= (int64(b) & 0x7F) << shift + m.Seconds |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -7654,7 +10535,7 @@ func (m *Timestamp) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Nanos |= (int32(b) & 0x7F) << shift + m.Nanos |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -7668,6 +10549,9 @@ func (m *Timestamp) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7695,7 +10579,7 @@ func (m *TypeMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7723,7 +10607,7 @@ func (m *TypeMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7733,6 +10617,9 @@ func (m *TypeMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7752,7 +10639,7 @@ func (m *TypeMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7762,6 +10649,9 @@ func (m *TypeMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7776,6 +10666,9 @@ func (m *TypeMeta) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7803,7 +10696,7 @@ func (m *UpdateOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7831,7 +10724,7 @@ func (m *UpdateOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7841,11 +10734,46 @@ func (m *UpdateOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } m.DryRun = append(m.DryRun, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FieldManager", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FieldManager = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -7855,6 +10783,9 @@ func (m *UpdateOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7882,7 +10813,7 @@ func (m *Verbs) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7910,7 +10841,7 @@ func (m *Verbs) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7920,6 +10851,9 @@ func (m *Verbs) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7934,6 +10868,9 @@ func (m *Verbs) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7961,7 +10898,7 @@ func (m *WatchEvent) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7989,7 +10926,7 @@ func (m *WatchEvent) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7999,6 +10936,9 @@ func (m *WatchEvent) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8018,7 +10958,7 @@ func (m *WatchEvent) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -8027,6 +10967,9 @@ func (m *WatchEvent) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8043,6 +10986,9 @@ func (m *WatchEvent) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -8109,10 +11055,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -8141,6 +11090,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -8159,166 +11111,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 2465 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x59, 0x4d, 0x6c, 0x23, 0x49, - 0xf5, 0x4f, 0xdb, 0xb1, 0x63, 0x3f, 0xc7, 0xf9, 0xa8, 0xcd, 0xfe, 0xff, 0xde, 0x08, 0xec, 0x6c, - 0x2f, 0x5a, 0x65, 0x61, 0xd6, 0x26, 0x59, 0x58, 0x0d, 0x03, 0x2c, 0xc4, 0x71, 0x66, 0x14, 0xed, - 0x64, 0xc6, 0xaa, 0xec, 0x0c, 0x62, 0x18, 0x21, 0x3a, 0xdd, 0x15, 0xa7, 0x49, 0xbb, 0xdb, 0x5b, - 0xd5, 0xce, 0x8c, 0xe1, 0xc0, 0x1e, 0x40, 0x70, 0x40, 0x68, 0x8e, 0x9c, 0xd0, 0x8e, 0xe0, 0xc2, - 0x95, 0x13, 0x17, 0x38, 0x21, 0x31, 0xc7, 0x91, 0xb8, 0xec, 0x01, 0x59, 0x3b, 0xe6, 0xc0, 0x09, - 0x71, 0xcf, 0x09, 0x55, 0x75, 0x75, 0x75, 0xb7, 0x1d, 0x4f, 0xda, 0x3b, 0xbb, 0x88, 0x53, 0xd2, - 0xef, 0xe3, 0xf7, 0x5e, 0x55, 0xbd, 0x7a, 0xef, 0xd5, 0x33, 0x1c, 0x9c, 0x5e, 0x65, 0x75, 0xdb, - 0x6b, 0x9c, 0xf6, 0x8f, 0x08, 0x75, 0x89, 0x4f, 0x58, 0xe3, 0x8c, 0xb8, 0x96, 0x47, 0x1b, 0x92, - 0x61, 0xf4, 0xec, 0xae, 0x61, 0x9e, 0xd8, 0x2e, 0xa1, 0x83, 0x46, 0xef, 0xb4, 0xc3, 0x09, 0xac, - 0xd1, 0x25, 0xbe, 0xd1, 0x38, 0xdb, 0x6a, 0x74, 0x88, 0x4b, 0xa8, 0xe1, 0x13, 0xab, 0xde, 0xa3, - 0x9e, 0xef, 0xa1, 0x2f, 0x04, 0x5a, 0xf5, 0xb8, 0x56, 0xbd, 0x77, 0xda, 0xe1, 0x04, 0x56, 0xe7, - 0x5a, 0xf5, 0xb3, 0xad, 0xf5, 0x37, 0x3b, 0xb6, 0x7f, 0xd2, 0x3f, 0xaa, 0x9b, 0x5e, 0xb7, 0xd1, - 0xf1, 0x3a, 0x5e, 0x43, 0x28, 0x1f, 0xf5, 0x8f, 0xc5, 0x97, 0xf8, 0x10, 0xff, 0x05, 0xa0, 0xeb, - 0x53, 0x5d, 0xa1, 0x7d, 0xd7, 0xb7, 0xbb, 0x64, 0xdc, 0x8b, 0xf5, 0xb7, 0x2f, 0x53, 0x60, 0xe6, - 0x09, 0xe9, 0x1a, 0xe3, 0x7a, 0xfa, 0x5f, 0xb3, 0x50, 0xd8, 0x69, 0xef, 0xdf, 0xa0, 0x5e, 0xbf, - 0x87, 0x36, 0x60, 0xde, 0x35, 0xba, 0xa4, 0xa2, 0x6d, 0x68, 0x9b, 0xc5, 0xe6, 0xe2, 0x93, 0x61, - 0x6d, 0x6e, 0x34, 0xac, 0xcd, 0xdf, 0x32, 0xba, 0x04, 0x0b, 0x0e, 0x72, 0xa0, 0x70, 0x46, 0x28, - 0xb3, 0x3d, 0x97, 0x55, 0x32, 0x1b, 0xd9, 0xcd, 0xd2, 0xf6, 0x3b, 0xf5, 0x34, 0xeb, 0xaf, 0x0b, - 0x03, 0x77, 0x03, 0xd5, 0xeb, 0x1e, 0x6d, 0xd9, 0xcc, 0xf4, 0xce, 0x08, 0x1d, 0x34, 0x57, 0xa4, - 0x95, 0x82, 0x64, 0x32, 0xac, 0x2c, 0xa0, 0x9f, 0x6a, 0xb0, 0xd2, 0xa3, 0xe4, 0x98, 0x50, 0x4a, - 0x2c, 0xc9, 0xaf, 0x64, 0x37, 0xb4, 0x4f, 0xc1, 0x6c, 0x45, 0x9a, 0x5d, 0x69, 0x8f, 0xe1, 0xe3, - 0x09, 0x8b, 0xe8, 0xb7, 0x1a, 0xac, 0x33, 0x42, 0xcf, 0x08, 0xdd, 0xb1, 0x2c, 0x4a, 0x18, 0x6b, - 0x0e, 0x76, 0x1d, 0x9b, 0xb8, 0xfe, 0xee, 0x7e, 0x0b, 0xb3, 0xca, 0xbc, 0xd8, 0x87, 0x6f, 0xa5, - 0x73, 0xe8, 0x70, 0x1a, 0x4e, 0x53, 0x97, 0x1e, 0xad, 0x4f, 0x15, 0x61, 0xf8, 0x39, 0x6e, 0xe8, - 0xc7, 0xb0, 0x18, 0x1e, 0xe4, 0x4d, 0x9b, 0xf9, 0xe8, 0x2e, 0xe4, 0x3b, 0xfc, 0x83, 0x55, 0x34, - 0xe1, 0x60, 0x3d, 0x9d, 0x83, 0x21, 0x46, 0x73, 0x49, 0xfa, 0x93, 0x17, 0x9f, 0x0c, 0x4b, 0x34, - 0xfd, 0x4f, 0x59, 0x28, 0xed, 0xb4, 0xf7, 0x31, 0x61, 0x5e, 0x9f, 0x9a, 0x24, 0x45, 0xd0, 0x6c, - 0x03, 0xf0, 0xbf, 0xac, 0x67, 0x98, 0xc4, 0xaa, 0x64, 0x36, 0xb4, 0xcd, 0x42, 0x13, 0x49, 0x39, - 0xb8, 0xa5, 0x38, 0x38, 0x26, 0xc5, 0x51, 0x4f, 0x6d, 0xd7, 0x12, 0xa7, 0x1d, 0x43, 0x7d, 0xd7, - 0x76, 0x2d, 0x2c, 0x38, 0xe8, 0x26, 0xe4, 0xce, 0x08, 0x3d, 0xe2, 0xfb, 0xcf, 0x03, 0xe2, 0x4b, - 0xe9, 0x96, 0x77, 0x97, 0xab, 0x34, 0x8b, 0xa3, 0x61, 0x2d, 0x27, 0xfe, 0xc5, 0x01, 0x08, 0xaa, - 0x03, 0xb0, 0x13, 0x8f, 0xfa, 0xc2, 0x9d, 0x4a, 0x6e, 0x23, 0xbb, 0x59, 0x6c, 0x2e, 0x71, 0xff, - 0x0e, 0x15, 0x15, 0xc7, 0x24, 0xd0, 0x55, 0x58, 0x64, 0xb6, 0xdb, 0xe9, 0x3b, 0x06, 0xe5, 0x84, - 0x4a, 0x5e, 0xf8, 0xb9, 0x26, 0xfd, 0x5c, 0x3c, 0x8c, 0xf1, 0x70, 0x42, 0x92, 0x5b, 0x32, 0x0d, - 0x9f, 0x74, 0x3c, 0x6a, 0x13, 0x56, 0x59, 0x88, 0x2c, 0xed, 0x2a, 0x2a, 0x8e, 0x49, 0xa0, 0xd7, - 0x20, 0x27, 0x76, 0xbe, 0x52, 0x10, 0x26, 0xca, 0xd2, 0x44, 0x4e, 0x1c, 0x0b, 0x0e, 0x78, 0xe8, - 0x0d, 0x58, 0x90, 0xb7, 0xa6, 0x52, 0x14, 0x62, 0xcb, 0x52, 0x6c, 0x21, 0x0c, 0xeb, 0x90, 0xaf, - 0xff, 0x41, 0x83, 0xe5, 0xd8, 0xf9, 0x89, 0x58, 0xb9, 0x0a, 0x8b, 0x9d, 0xd8, 0x4d, 0x91, 0x67, - 0xa9, 0x56, 0x13, 0xbf, 0x45, 0x38, 0x21, 0x89, 0x08, 0x14, 0xa9, 0x44, 0x0a, 0x33, 0xc2, 0x56, - 0xea, 0x40, 0x0b, 0x7d, 0x88, 0x2c, 0xc5, 0x88, 0x0c, 0x47, 0xc8, 0xfa, 0x3f, 0x35, 0x11, 0x74, - 0x61, 0x8e, 0x40, 0x9b, 0xb1, 0x3c, 0xa4, 0x89, 0x2d, 0x5c, 0x9c, 0x92, 0x43, 0x2e, 0xb9, 0xbc, - 0x99, 0xff, 0x89, 0xcb, 0x7b, 0xad, 0xf0, 0xeb, 0x0f, 0x6b, 0x73, 0x1f, 0xfc, 0x7d, 0x63, 0x4e, - 0xff, 0x99, 0x06, 0xe5, 0x5d, 0x4a, 0x0c, 0x9f, 0xdc, 0xee, 0xf9, 0x62, 0x05, 0x3a, 0xe4, 0x2d, - 0x3a, 0xc0, 0x7d, 0x57, 0xae, 0x14, 0xf8, 0xa5, 0x6c, 0x09, 0x0a, 0x96, 0x1c, 0xd4, 0x86, 0x35, - 0xdb, 0x35, 0x9d, 0xbe, 0x45, 0xee, 0xb8, 0xb6, 0x6b, 0xfb, 0xb6, 0xe1, 0xd8, 0x3f, 0x52, 0x97, - 0xed, 0x73, 0xd2, 0xbb, 0xb5, 0xfd, 0x0b, 0x64, 0xf0, 0x85, 0x9a, 0xfa, 0xcf, 0xb3, 0x50, 0x6e, - 0x11, 0x87, 0x44, 0x7e, 0x5c, 0x07, 0xd4, 0xa1, 0x86, 0x49, 0xda, 0x84, 0xda, 0x9e, 0x75, 0x48, - 0x4c, 0xcf, 0xb5, 0x98, 0x08, 0x95, 0x6c, 0xf3, 0xff, 0x46, 0xc3, 0x1a, 0xba, 0x31, 0xc1, 0xc5, - 0x17, 0x68, 0x20, 0x07, 0xca, 0x3d, 0x2a, 0xfe, 0xb7, 0x7d, 0x59, 0x48, 0xf8, 0x05, 0x7e, 0x2b, - 0xdd, 0x19, 0xb4, 0xe3, 0xaa, 0xcd, 0xd5, 0xd1, 0xb0, 0x56, 0x4e, 0x90, 0x70, 0x12, 0x1c, 0x7d, - 0x1b, 0x56, 0x3c, 0xda, 0x3b, 0x31, 0xdc, 0x16, 0xe9, 0x11, 0xd7, 0x22, 0xae, 0xcf, 0x44, 0x52, - 0x29, 0x34, 0xd7, 0x78, 0xfa, 0xbf, 0x3d, 0xc6, 0xc3, 0x13, 0xd2, 0xe8, 0x1e, 0xac, 0xf6, 0xa8, - 0xd7, 0x33, 0x3a, 0x06, 0x47, 0x6c, 0x7b, 0x8e, 0x6d, 0x0e, 0x44, 0xd2, 0x29, 0x36, 0xaf, 0x8c, - 0x86, 0xb5, 0xd5, 0xf6, 0x38, 0xf3, 0x7c, 0x58, 0x7b, 0x49, 0x6c, 0x1d, 0xa7, 0x44, 0x4c, 0x3c, - 0x09, 0x13, 0x3b, 0xdb, 0xdc, 0xb4, 0xb3, 0xd5, 0xf7, 0xa1, 0xd0, 0xea, 0x53, 0xa1, 0x85, 0xbe, - 0x09, 0x05, 0x4b, 0xfe, 0x2f, 0x77, 0xfe, 0xd5, 0xb0, 0x7e, 0x86, 0x32, 0xe7, 0xc3, 0x5a, 0x99, - 0x57, 0xfc, 0x7a, 0x48, 0xc0, 0x4a, 0x45, 0xbf, 0x0f, 0xe5, 0xbd, 0x87, 0x3d, 0x8f, 0xfa, 0xe1, - 0x99, 0xbe, 0x0e, 0x79, 0x22, 0x08, 0x02, 0xad, 0x10, 0x25, 0xfd, 0x40, 0x0c, 0x4b, 0x2e, 0x4f, - 0x42, 0xe4, 0xa1, 0x61, 0xfa, 0x32, 0xa0, 0x54, 0x12, 0xda, 0xe3, 0x44, 0x1c, 0xf0, 0xf4, 0xc7, - 0x1a, 0xc0, 0x0d, 0xa2, 0xb0, 0x77, 0x60, 0x39, 0xbc, 0xc0, 0xc9, 0xbc, 0xf2, 0xff, 0x52, 0x7b, - 0x19, 0x27, 0xd9, 0x78, 0x5c, 0xfe, 0x33, 0x08, 0xeb, 0xfb, 0x50, 0x14, 0xd9, 0x8c, 0x17, 0x92, - 0x28, 0xb5, 0x6a, 0xcf, 0x49, 0xad, 0x61, 0x25, 0xca, 0x4c, 0xab, 0x44, 0xb1, 0xcb, 0xeb, 0x40, - 0x39, 0xd0, 0x0d, 0x8b, 0x63, 0x2a, 0x0b, 0x57, 0xa0, 0x10, 0x2e, 0x5c, 0x5a, 0x51, 0x4d, 0x51, - 0x08, 0x84, 0x95, 0x44, 0xcc, 0xda, 0x09, 0x24, 0x32, 0x73, 0x3a, 0x63, 0xb1, 0x4a, 0x91, 0x79, - 0x7e, 0xa5, 0x88, 0x59, 0xfa, 0x09, 0x54, 0xa6, 0x75, 0x52, 0x2f, 0x50, 0x3b, 0xd2, 0xbb, 0xa2, - 0xff, 0x4a, 0x83, 0x95, 0x38, 0x52, 0xfa, 0xe3, 0x4b, 0x6f, 0xe4, 0xf2, 0x9e, 0x23, 0xb6, 0x23, - 0xbf, 0xd1, 0x60, 0x2d, 0xb1, 0xb4, 0x99, 0x4e, 0x7c, 0x06, 0xa7, 0xe2, 0xc1, 0x91, 0x9d, 0x21, - 0x38, 0x1a, 0x50, 0xda, 0x57, 0x71, 0x4f, 0x2f, 0xef, 0xd2, 0xf4, 0x3f, 0x6b, 0xb0, 0x18, 0xd3, - 0x60, 0xe8, 0x3e, 0x2c, 0xf0, 0x1c, 0x68, 0xbb, 0x1d, 0xd9, 0x41, 0xa6, 0x2c, 0xec, 0x31, 0x90, - 0x68, 0x5d, 0xed, 0x00, 0x09, 0x87, 0x90, 0xa8, 0x0d, 0x79, 0x4a, 0x58, 0xdf, 0xf1, 0x65, 0xfa, - 0xbf, 0x92, 0xb2, 0x04, 0xfb, 0x86, 0xdf, 0x67, 0x41, 0x9e, 0xc4, 0x42, 0x1f, 0x4b, 0x1c, 0xfd, - 0x6f, 0x19, 0x28, 0xdf, 0x34, 0x8e, 0x88, 0x73, 0x48, 0x1c, 0x62, 0xfa, 0x1e, 0x45, 0x3f, 0x86, - 0x52, 0xd7, 0xf0, 0xcd, 0x13, 0x41, 0x0d, 0xfb, 0xe0, 0x56, 0x3a, 0x43, 0x09, 0xa4, 0xfa, 0x41, - 0x04, 0xb3, 0xe7, 0xfa, 0x74, 0xd0, 0x7c, 0x49, 0x2e, 0xac, 0x14, 0xe3, 0xe0, 0xb8, 0x35, 0xf1, - 0x78, 0x11, 0xdf, 0x7b, 0x0f, 0x7b, 0xbc, 0xe0, 0xcf, 0xfe, 0x66, 0x4a, 0xb8, 0x80, 0xc9, 0xfb, - 0x7d, 0x9b, 0x92, 0x2e, 0x71, 0xfd, 0xe8, 0xf1, 0x72, 0x30, 0x86, 0x8f, 0x27, 0x2c, 0xae, 0xbf, - 0x03, 0x2b, 0xe3, 0xce, 0xa3, 0x15, 0xc8, 0x9e, 0x92, 0x41, 0x10, 0x0b, 0x98, 0xff, 0x8b, 0xd6, - 0x20, 0x77, 0x66, 0x38, 0x7d, 0x99, 0x7f, 0x70, 0xf0, 0x71, 0x2d, 0x73, 0x55, 0xd3, 0x7f, 0xa7, - 0x41, 0x65, 0x9a, 0x23, 0xe8, 0xf3, 0x31, 0xa0, 0x66, 0x49, 0x7a, 0x95, 0x7d, 0x97, 0x0c, 0x02, - 0xd4, 0x3d, 0x28, 0x78, 0x3d, 0xfe, 0xdc, 0xf4, 0xa8, 0x8c, 0xf3, 0x37, 0xc2, 0xd8, 0xbd, 0x2d, - 0xe9, 0xe7, 0xc3, 0xda, 0xcb, 0x09, 0xf8, 0x90, 0x81, 0x95, 0x2a, 0x2f, 0x92, 0xc2, 0x1f, 0x5e, - 0xb8, 0x55, 0x91, 0xbc, 0x2b, 0x28, 0x58, 0x72, 0xf4, 0x3f, 0x6a, 0x30, 0x2f, 0x5a, 0xd9, 0xfb, - 0x50, 0xe0, 0xfb, 0x67, 0x19, 0xbe, 0x21, 0xfc, 0x4a, 0xfd, 0xf0, 0xe1, 0xda, 0x07, 0xc4, 0x37, - 0xa2, 0xfb, 0x15, 0x52, 0xb0, 0x42, 0x44, 0x18, 0x72, 0xb6, 0x4f, 0xba, 0xe1, 0x41, 0xbe, 0x39, - 0x15, 0x5a, 0x3e, 0xbb, 0xeb, 0xd8, 0x78, 0xb0, 0xf7, 0xd0, 0x27, 0x2e, 0x3f, 0x8c, 0x28, 0x19, - 0xec, 0x73, 0x0c, 0x1c, 0x40, 0xe9, 0xbf, 0xd7, 0x40, 0x99, 0xe2, 0xd7, 0x9d, 0x11, 0xe7, 0xf8, - 0xa6, 0xed, 0x9e, 0xca, 0x6d, 0x55, 0xee, 0x1c, 0x4a, 0x3a, 0x56, 0x12, 0x17, 0x95, 0xd8, 0xcc, - 0x8c, 0x25, 0xf6, 0x0a, 0x14, 0x4c, 0xcf, 0xf5, 0x6d, 0xb7, 0x3f, 0x91, 0x5f, 0x76, 0x25, 0x1d, - 0x2b, 0x09, 0xfd, 0x69, 0x16, 0x4a, 0xdc, 0xd7, 0xb0, 0xc6, 0x7f, 0x1d, 0xca, 0x4e, 0xfc, 0xf4, - 0xa4, 0xcf, 0x2f, 0x4b, 0x88, 0xe4, 0x7d, 0xc4, 0x49, 0x59, 0xae, 0x7c, 0x6c, 0x13, 0xc7, 0x52, - 0xca, 0x99, 0xa4, 0xf2, 0xf5, 0x38, 0x13, 0x27, 0x65, 0x79, 0x9e, 0x7d, 0xc0, 0xe3, 0x5a, 0x36, - 0x73, 0x6a, 0x6b, 0xbf, 0xc3, 0x89, 0x38, 0xe0, 0x5d, 0xb4, 0x3f, 0xf3, 0x33, 0xee, 0xcf, 0x35, - 0x58, 0xe2, 0x07, 0xe9, 0xf5, 0xfd, 0xb0, 0xe3, 0xcd, 0x89, 0xbe, 0x0b, 0x8d, 0x86, 0xb5, 0xa5, - 0xf7, 0x12, 0x1c, 0x3c, 0x26, 0x39, 0xb5, 0x7d, 0xc9, 0x7f, 0xd2, 0xf6, 0x85, 0xaf, 0xda, 0xb1, - 0xbb, 0xb6, 0x5f, 0x59, 0x10, 0x4e, 0xa8, 0x55, 0xdf, 0xe4, 0x44, 0x1c, 0xf0, 0x12, 0x47, 0x5a, - 0xb8, 0xf4, 0x48, 0xdf, 0x87, 0xe2, 0x81, 0x6d, 0x52, 0x8f, 0xaf, 0x85, 0x17, 0x26, 0x96, 0x68, - 0xec, 0x55, 0x02, 0x0f, 0xd7, 0x18, 0xf2, 0xb9, 0x2b, 0xae, 0xe1, 0x7a, 0x41, 0xfb, 0x9e, 0x8b, - 0x5c, 0xb9, 0xc5, 0x89, 0x38, 0xe0, 0x5d, 0x5b, 0xe3, 0xf5, 0xe8, 0x17, 0x8f, 0x6b, 0x73, 0x8f, - 0x1e, 0xd7, 0xe6, 0x3e, 0x7c, 0x2c, 0x6b, 0xd3, 0xbf, 0x00, 0xe0, 0xf6, 0xd1, 0x0f, 0x89, 0x19, - 0xc4, 0xfc, 0xe5, 0x13, 0x04, 0xde, 0x63, 0xc8, 0xc1, 0x95, 0x78, 0x6d, 0x67, 0xc6, 0x7a, 0x8c, - 0x18, 0x0f, 0x27, 0x24, 0x51, 0x03, 0x8a, 0x6a, 0xaa, 0x20, 0xe3, 0x7b, 0x55, 0xaa, 0x15, 0xd5, - 0xe8, 0x01, 0x47, 0x32, 0x89, 0x0b, 0x38, 0x7f, 0xe9, 0x05, 0x6c, 0x42, 0xb6, 0x6f, 0x5b, 0x22, - 0x24, 0x8a, 0xcd, 0x2f, 0x87, 0x09, 0xf0, 0xce, 0x7e, 0xeb, 0x7c, 0x58, 0x7b, 0x75, 0xda, 0x48, - 0xce, 0x1f, 0xf4, 0x08, 0xab, 0xdf, 0xd9, 0x6f, 0x61, 0xae, 0x7c, 0x51, 0x90, 0xe6, 0x67, 0x0c, - 0xd2, 0x6d, 0x00, 0xb9, 0x6a, 0xae, 0x1d, 0xc4, 0x86, 0x9a, 0xb0, 0xdc, 0x50, 0x1c, 0x1c, 0x93, - 0x42, 0x0c, 0x56, 0x4d, 0xfe, 0xce, 0xb4, 0x3d, 0x97, 0x1f, 0x3d, 0xf3, 0x8d, 0x6e, 0x30, 0x63, - 0x28, 0x6d, 0x7f, 0x31, 0x5d, 0xc6, 0xe4, 0x6a, 0xcd, 0x57, 0xa4, 0x99, 0xd5, 0xdd, 0x71, 0x30, - 0x3c, 0x89, 0x8f, 0x3c, 0x58, 0xb5, 0xe4, 0xcb, 0x28, 0x32, 0x5a, 0x9c, 0xd9, 0xe8, 0xcb, 0xdc, - 0x60, 0x6b, 0x1c, 0x08, 0x4f, 0x62, 0xa3, 0xef, 0xc3, 0x7a, 0x48, 0x9c, 0x7c, 0x9e, 0x56, 0x40, - 0xec, 0x54, 0x95, 0x3f, 0xdc, 0x5b, 0x53, 0xa5, 0xf0, 0x73, 0x10, 0x90, 0x05, 0x79, 0x27, 0xe8, - 0x2e, 0x4a, 0xa2, 0x22, 0x7c, 0x23, 0xdd, 0x2a, 0xa2, 0xe8, 0xaf, 0xc7, 0xbb, 0x0a, 0xf5, 0xfc, - 0x92, 0x0d, 0x85, 0xc4, 0x46, 0x0f, 0xa1, 0x64, 0xb8, 0xae, 0xe7, 0x1b, 0xc1, 0x83, 0x79, 0x51, - 0x98, 0xda, 0x99, 0xd9, 0xd4, 0x4e, 0x84, 0x31, 0xd6, 0xc5, 0xc4, 0x38, 0x38, 0x6e, 0x0a, 0x3d, - 0x80, 0x65, 0xef, 0x81, 0x4b, 0x28, 0x26, 0xc7, 0x84, 0x12, 0xd7, 0x24, 0xac, 0x52, 0x16, 0xd6, - 0xbf, 0x92, 0xd2, 0x7a, 0x42, 0x39, 0x0a, 0xe9, 0x24, 0x9d, 0xe1, 0x71, 0x2b, 0xa8, 0x0e, 0x70, - 0x6c, 0xbb, 0xb2, 0x17, 0xad, 0x2c, 0x45, 0x63, 0xb2, 0xeb, 0x8a, 0x8a, 0x63, 0x12, 0xe8, 0xab, - 0x50, 0x32, 0x9d, 0x3e, 0xf3, 0x49, 0x30, 0x8f, 0x5b, 0x16, 0x37, 0x48, 0xad, 0x6f, 0x37, 0x62, - 0xe1, 0xb8, 0x1c, 0x3a, 0x81, 0x45, 0x3b, 0xd6, 0xf4, 0x56, 0x56, 0x44, 0x2c, 0x6e, 0xcf, 0xdc, - 0xe9, 0xb2, 0xe6, 0x0a, 0xcf, 0x44, 0x71, 0x0a, 0x4e, 0x20, 0xaf, 0x7f, 0x0d, 0x4a, 0x9f, 0xb0, - 0x07, 0xe3, 0x3d, 0xdc, 0xf8, 0xd1, 0xcd, 0xd4, 0xc3, 0xfd, 0x25, 0x03, 0x4b, 0xc9, 0x0d, 0x57, - 0x6f, 0x1d, 0x6d, 0xea, 0x7c, 0x35, 0xcc, 0xca, 0xd9, 0xa9, 0x59, 0x59, 0x26, 0xbf, 0xf9, 0x17, - 0x49, 0x7e, 0xdb, 0x00, 0x46, 0xcf, 0x0e, 0xf3, 0x5e, 0x90, 0x47, 0x55, 0xe6, 0x8a, 0x26, 0x7e, - 0x38, 0x26, 0x25, 0x26, 0xa8, 0x9e, 0xeb, 0x53, 0xcf, 0x71, 0x08, 0x95, 0xc5, 0x34, 0x98, 0xa0, - 0x2a, 0x2a, 0x8e, 0x49, 0xa0, 0xeb, 0x80, 0x8e, 0x1c, 0xcf, 0x3c, 0x15, 0x5b, 0x10, 0xde, 0x73, - 0x91, 0x25, 0x0b, 0xc1, 0xe0, 0xaa, 0x39, 0xc1, 0xc5, 0x17, 0x68, 0xe8, 0x0b, 0x90, 0x6b, 0xf3, - 0xb6, 0x42, 0xbf, 0x0d, 0xc9, 0x99, 0x13, 0x7a, 0x27, 0xd8, 0x09, 0x4d, 0x0d, 0x85, 0x66, 0xdb, - 0x05, 0xfd, 0x0a, 0x14, 0xb1, 0xe7, 0xf9, 0x6d, 0xc3, 0x3f, 0x61, 0xa8, 0x06, 0xb9, 0x1e, 0xff, - 0x47, 0x8e, 0xfb, 0xc4, 0xac, 0x5a, 0x70, 0x70, 0x40, 0xd7, 0x7f, 0xa9, 0xc1, 0x2b, 0x53, 0xe7, - 0x8c, 0x7c, 0x47, 0x4d, 0xf5, 0x25, 0x5d, 0x52, 0x3b, 0x1a, 0xc9, 0xe1, 0x98, 0x14, 0xef, 0xc4, - 0x12, 0xc3, 0xc9, 0xf1, 0x4e, 0x2c, 0x61, 0x0d, 0x27, 0x65, 0xf5, 0x7f, 0x67, 0x20, 0x1f, 0x3c, - 0xcb, 0x3e, 0xe3, 0xe6, 0xfb, 0x75, 0xc8, 0x33, 0x61, 0x47, 0xba, 0xa7, 0xb2, 0x65, 0x60, 0x1d, - 0x4b, 0x2e, 0x6f, 0x62, 0xba, 0x84, 0x31, 0xa3, 0x13, 0x06, 0xaf, 0x6a, 0x62, 0x0e, 0x02, 0x32, - 0x0e, 0xf9, 0xe8, 0x6d, 0xfe, 0x0a, 0x35, 0x98, 0xea, 0x0b, 0xab, 0x21, 0x24, 0x16, 0xd4, 0xf3, - 0x61, 0x6d, 0x51, 0x82, 0x8b, 0x6f, 0x2c, 0xa5, 0xd1, 0x3d, 0x58, 0xb0, 0x88, 0x6f, 0xd8, 0x4e, - 0xd0, 0x0e, 0xa6, 0x9e, 0x5e, 0x06, 0x60, 0xad, 0x40, 0xb5, 0x59, 0xe2, 0x3e, 0xc9, 0x0f, 0x1c, - 0x02, 0xf2, 0x8b, 0x67, 0x7a, 0x56, 0xf0, 0x93, 0x42, 0x2e, 0xba, 0x78, 0xbb, 0x9e, 0x45, 0xb0, - 0xe0, 0xe8, 0x8f, 0x34, 0x28, 0x05, 0x48, 0xbb, 0x46, 0x9f, 0x11, 0xb4, 0xa5, 0x56, 0x11, 0x1c, - 0x77, 0x58, 0x93, 0xe7, 0xdf, 0x1b, 0xf4, 0xc8, 0xf9, 0xb0, 0x56, 0x14, 0x62, 0xfc, 0x43, 0x2d, - 0x20, 0xb6, 0x47, 0x99, 0x4b, 0xf6, 0xe8, 0x35, 0xc8, 0x89, 0xd6, 0x5b, 0x6e, 0xa6, 0x6a, 0xf4, - 0x44, 0x7b, 0x8e, 0x03, 0x9e, 0xfe, 0x71, 0x06, 0xca, 0x89, 0xc5, 0xa5, 0xe8, 0xea, 0xd4, 0xa8, - 0x24, 0x93, 0x62, 0xfc, 0x36, 0xfd, 0x87, 0xa0, 0xef, 0x42, 0xde, 0xe4, 0xeb, 0x0b, 0x7f, 0x89, - 0xdb, 0x9a, 0xe5, 0x28, 0xc4, 0xce, 0x44, 0x91, 0x24, 0x3e, 0x19, 0x96, 0x80, 0xe8, 0x06, 0xac, - 0x52, 0xe2, 0xd3, 0xc1, 0xce, 0xb1, 0x4f, 0x68, 0xbc, 0xff, 0xcf, 0x45, 0x7d, 0x0f, 0x1e, 0x17, - 0xc0, 0x93, 0x3a, 0x61, 0xaa, 0xcc, 0xbf, 0x40, 0xaa, 0xd4, 0x1d, 0x98, 0xff, 0x2f, 0xf6, 0xe8, - 0xdf, 0x83, 0x62, 0xd4, 0x45, 0x7d, 0xca, 0x26, 0xf5, 0x1f, 0x40, 0x81, 0x47, 0x63, 0xd8, 0xfd, - 0x5f, 0x52, 0x89, 0x92, 0x35, 0x22, 0x93, 0xa6, 0x46, 0xe8, 0x6f, 0x41, 0xf9, 0x4e, 0xcf, 0x9a, - 0xed, 0x57, 0x14, 0x7d, 0x1b, 0x82, 0x1f, 0x05, 0x79, 0x0a, 0x0e, 0x9e, 0xf9, 0xb1, 0x14, 0x1c, - 0x7f, 0xb3, 0x27, 0x7f, 0xaf, 0x01, 0xf1, 0xe6, 0xdc, 0x3b, 0x23, 0xae, 0xcf, 0x57, 0xc3, 0x8f, - 0x6d, 0x7c, 0x35, 0xe2, 0xee, 0x09, 0x0e, 0xba, 0x03, 0x79, 0x4f, 0xb4, 0x64, 0x72, 0xf0, 0x35, - 0xe3, 0x0c, 0x41, 0x85, 0x6a, 0xd0, 0xd7, 0x61, 0x09, 0xd6, 0xdc, 0x7c, 0xf2, 0xac, 0x3a, 0xf7, - 0xf4, 0x59, 0x75, 0xee, 0xa3, 0x67, 0xd5, 0xb9, 0x0f, 0x46, 0x55, 0xed, 0xc9, 0xa8, 0xaa, 0x3d, - 0x1d, 0x55, 0xb5, 0x8f, 0x46, 0x55, 0xed, 0xe3, 0x51, 0x55, 0x7b, 0xf4, 0x8f, 0xea, 0xdc, 0xbd, - 0xcc, 0xd9, 0xd6, 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0xab, 0xec, 0x02, 0x4a, 0x00, 0x21, 0x00, - 0x00, -} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto index eb3237f2..ba1194dc 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto @@ -92,6 +92,16 @@ message APIResource { // categories is a list of the grouped resources this resource belongs to (e.g. 'all') repeated string categories = 7; + + // The hash value of the storage version, the version this resource is + // converted to when written to the data store. Value must be treated + // as opaque by clients. Only equality comparison on the value is valid. + // This is an alpha feature and may change or be removed in the future. + // The field is populated by the apiserver only if the + // StorageVersionHash feature gate is enabled. + // This field will remain optional even if it graduates. + // +optional + optional string storageVersionHash = 10; } // APIResourceList is a list of APIResource, it is used to expose the name of the @@ -107,7 +117,7 @@ message APIResourceList { // APIVersions lists the versions that are available, to allow clients to // discover the API at /api, which is the root path of the legacy v1 API. -// +// // +protobuf.options.(gogoproto.goproto_stringer)=false // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object message APIVersions { @@ -134,9 +144,12 @@ message CreateOptions { // +optional repeated string dryRun = 1; - // If IncludeUninitialized is specified, the object may be - // returned without completing initialization. - optional bool includeUninitialized = 2; + // fieldManager is a name associated with the actor or entity + // that is making these changes. The value must be less than or + // 128 characters long, and only contain printable characters, + // as defined by https://golang.org/pkg/unicode/#IsPrint. + // +optional + optional string fieldManager = 3; } // DeleteOptions may be provided when deleting an API object. @@ -150,6 +163,7 @@ message DeleteOptions { // Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be // returned. + // +k8s:conversion-gen=false // +optional optional Preconditions preconditions = 2; @@ -188,14 +202,33 @@ message Duration { } // ExportOptions is the query options to the standard REST get call. +// Deprecated. Planned for removal in 1.18. message ExportOptions { // Should this value be exported. Export strips fields that a user can not specify. + // Deprecated. Planned for removal in 1.18. optional bool export = 1; // Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. + // Deprecated. Planned for removal in 1.18. optional bool exact = 2; } +// FieldsV1 stores a set of fields in a data structure like a Trie, in JSON format. +// +// Each key is either a '.' representing the field itself, and will always map to an empty set, +// or a string representing a sub-field or item. The string will follow one of these four formats: +// 'f:', where is the name of a field in a struct, or key in a map +// 'v:', where is the exact json formatted value of a list item +// 'i:', where is position of a item in a list +// 'k:', where is a map of a list item's key fields to their unique values +// If a key maps to an empty Fields value, the field that key represents is part of the set. +// +// The exact format is defined in sigs.k8s.io/structured-merge-diff +message FieldsV1 { + // Raw is the underlying serialization of this object. + optional bytes Raw = 1; +} + // GetOptions is the standard query options to the standard REST get call. message GetOptions { // When specified: @@ -203,15 +236,11 @@ message GetOptions { // - if it's 0, then we simply return what we currently have in cache, no guarantee; // - if set to non zero, then the result is at least as fresh as given rv. optional string resourceVersion = 1; - - // If true, partially initialized resources are included in the response. - // +optional - optional bool includeUninitialized = 2; } // GroupKind specifies a Group and a Kind, but does not force a version. This is useful for identifying // concepts during lookup stages without having partially valid types -// +// // +protobuf.options.(gogoproto.goproto_stringer)=false message GroupKind { optional string group = 1; @@ -221,7 +250,7 @@ message GroupKind { // GroupResource specifies a Group and a Resource, but does not force a version. This is useful for identifying // concepts during lookup stages without having partially valid types -// +// // +protobuf.options.(gogoproto.goproto_stringer)=false message GroupResource { optional string group = 1; @@ -230,7 +259,7 @@ message GroupResource { } // GroupVersion contains the "group" and the "version", which uniquely identifies the API. -// +// // +protobuf.options.(gogoproto.goproto_stringer)=false message GroupVersion { optional string group = 1; @@ -251,7 +280,7 @@ message GroupVersionForDiscovery { // GroupVersionKind unambiguously identifies a kind. It doesn't anonymously include GroupVersion // to avoid automatic coersion. It doesn't use a GroupVersion to avoid custom marshalling -// +// // +protobuf.options.(gogoproto.goproto_stringer)=false message GroupVersionKind { optional string group = 1; @@ -263,7 +292,7 @@ message GroupVersionKind { // GroupVersionResource unambiguously identifies a resource. It doesn't anonymously include GroupVersion // to avoid automatic coersion. It doesn't use a GroupVersion to avoid custom marshalling -// +// // +protobuf.options.(gogoproto.goproto_stringer)=false message GroupVersionResource { optional string group = 1; @@ -273,27 +302,6 @@ message GroupVersionResource { optional string resource = 3; } -// Initializer is information about an initializer that has not yet completed. -message Initializer { - // name of the process that is responsible for initializing this object. - optional string name = 1; -} - -// Initializers tracks the progress of initialization. -message Initializers { - // Pending is a list of initializers that must execute in order before this object is visible. - // When the last pending initializer is removed, and no failing result is set, the initializers - // struct will be set to nil and the object is considered as initialized and visible to all - // clients. - // +patchMergeKey=name - // +patchStrategy=merge - repeated Initializer pending = 1; - - // If result is set with the Failure field, the object will be persisted to storage and then deleted, - // ensuring that other clients can observe the deletion. - optional Status result = 2; -} - // A label selector is a label query over a set of resources. The result of matchLabels and // matchExpressions are ANDed. An empty label selector matches all objects. A null // label selector matches no objects. @@ -332,7 +340,7 @@ message LabelSelectorRequirement { // List holds a list of objects, which may not be known by the server. message List { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional ListMeta metadata = 1; @@ -346,6 +354,10 @@ message ListMeta { // selfLink is a URL representing this object. // Populated by the system. // Read-only. + // + // DEPRECATED + // Kubernetes will stop propagating this field in 1.20 release and the field is planned + // to be removed in 1.21 release. // +optional optional string selfLink = 1; @@ -354,7 +366,7 @@ message ListMeta { // Value must be treated as opaque by clients and passed unmodified back to the server. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency // +optional optional string resourceVersion = 2; @@ -366,6 +378,18 @@ message ListMeta { // identical to the value in the first response, unless you have received this token from an error // message. optional string continue = 3; + + // remainingItemCount is the number of subsequent items in the list which are not included in this + // list response. If the list request contained label or field selectors, then the number of + // remaining items is unknown and the field will be left unset and omitted during serialization. + // If the list is complete (either because it is not chunking or because this is the last chunk), + // then there are no more remaining items and this field will be left unset and omitted during + // serialization. + // Servers older than v1.15 do not set this field. + // The intended use of the remainingItemCount is *estimating* the size of a collection. Clients + // should not rely on the remainingItemCount to be set or to be exact. + // +optional + optional int64 remainingItemCount = 4; } // ListOptions is the query options to a standard REST list call. @@ -380,15 +404,22 @@ message ListOptions { // +optional optional string fieldSelector = 2; - // If true, partially initialized resources are included in the response. - // +optional - optional bool includeUninitialized = 6; - // Watch for changes to the described resources and return them as a stream of // add, update, and remove notifications. Specify resourceVersion. // +optional optional bool watch = 3; + // allowWatchBookmarks requests watch events with type "BOOKMARK". + // Servers that do not implement bookmarks may ignore this flag and + // bookmarks are sent at the server's discretion. Clients should not + // assume bookmarks are returned at any specific interval, nor may they + // assume the server will send any BOOKMARK event during a session. + // If this is not a watch, this field is ignored. + // If the feature gate WatchBookmarks is not enabled in apiserver, + // this field is ignored. + // +optional + optional bool allowWatchBookmarks = 9; + // When specified with a watch call, shows changes that occur after that particular version of a resource. // Defaults to changes from the beginning of history. // When specified for list: @@ -411,7 +442,7 @@ message ListOptions { // more results are available. Servers may choose not to support the limit argument and will return // all of the available results. If limit is specified and the continue field is empty, clients may // assume that no more results are available. This field is not supported if watch is true. - // + // // The server guarantees that the objects returned when using continue will be identical to issuing // a single list call without a limit - that is, no objects created, modified, or deleted after the // first request is issued will be included in any subsequent continued requests. This is sometimes @@ -432,14 +463,43 @@ message ListOptions { // a list starting from the next key, but from the latest snapshot, which is inconsistent from the // previous list results - objects that are created, modified, or deleted after the first list request // will be included in the response, as long as their keys are after the "next key". - // + // // This field is not supported when watch is true. Clients may start a watch from the last // resourceVersion value returned by the server and not miss any modifications. optional string continue = 8; } +// ManagedFieldsEntry is a workflow-id, a FieldSet and the group version of the resource +// that the fieldset applies to. +message ManagedFieldsEntry { + // Manager is an identifier of the workflow managing these fields. + optional string manager = 1; + + // Operation is the type of operation which lead to this ManagedFieldsEntry being created. + // The only valid values for this field are 'Apply' and 'Update'. + optional string operation = 2; + + // APIVersion defines the version of this resource that this field set + // applies to. The format is "group/version" just like the top-level + // APIVersion field. It is necessary to track the version of a field + // set because it cannot be automatically converted. + optional string apiVersion = 3; + + // Time is timestamp of when these fields were set. It should always be empty if Operation is 'Apply' + // +optional + optional Time time = 4; + + // FieldsType is the discriminator for the different fields format and version. + // There is currently only one possible value: "FieldsV1" + optional string fieldsType = 6; + + // FieldsV1 holds the first JSON version format as described in the "FieldsV1" type. + // +optional + optional FieldsV1 fieldsV1 = 7; +} + // MicroTime is version of Time with microsecond level precision. -// +// // +protobuf.options.marshal=false // +protobuf.as=Timestamp // +protobuf.options.(gogoproto.goproto_stringer)=false @@ -475,14 +535,14 @@ message ObjectMeta { // The provided value has the same validation rules as the Name field, // and may be truncated by the length of the suffix required to make the value // unique on the server. - // + // // If this field is specified and the generated name exists, the server will // NOT return a 409 - instead, it will either return 201 Created or 500 with Reason // ServerTimeout indicating a unique name could not be found in the time allotted, and the client // should retry (optionally after the time indicated in the Retry-After header). - // + // // Applied only if Name is not specified. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency // +optional optional string generateName = 2; @@ -490,7 +550,7 @@ message ObjectMeta { // equivalent to the "default" namespace, but "default" is the canonical representation. // Not all objects are required to be scoped to a namespace - the value of this field for // those objects will be empty. - // + // // Must be a DNS_LABEL. // Cannot be updated. // More info: http://kubernetes.io/docs/user-guide/namespaces @@ -500,13 +560,17 @@ message ObjectMeta { // SelfLink is a URL representing this object. // Populated by the system. // Read-only. + // + // DEPRECATED + // Kubernetes will stop propagating this field in 1.20 release and the field is planned + // to be removed in 1.21 release. // +optional optional string selfLink = 4; // UID is the unique in time and space value for this object. It is typically generated by // the server on successful creation of a resource and is not allowed to change on PUT // operations. - // + // // Populated by the system. // Read-only. // More info: http://kubernetes.io/docs/user-guide/identifiers#uids @@ -518,11 +582,11 @@ message ObjectMeta { // concurrency, change detection, and the watch operation on a resource or set of resources. // Clients must treat these values as opaque and passed unmodified back to the server. // They may only be valid for a particular resource or set of resources. - // + // // Populated by the system. // Read-only. // Value must be treated as opaque by clients and . - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency // +optional optional string resourceVersion = 6; @@ -534,11 +598,11 @@ message ObjectMeta { // CreationTimestamp is a timestamp representing the server time when this object was // created. It is not guaranteed to be set in happens-before order across separate operations. // Clients may not set this value. It is represented in RFC3339 form and is in UTC. - // + // // Populated by the system. // Read-only. // Null for lists. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional Time creationTimestamp = 8; @@ -556,10 +620,10 @@ message ObjectMeta { // exist after this timestamp, until an administrator or automated process can determine the // resource is fully terminated. // If not set, graceful deletion of the object has not been requested. - // + // // Populated by the system when a graceful deletion is requested. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional Time deletionTimestamp = 9; @@ -593,21 +657,19 @@ message ObjectMeta { // +patchStrategy=merge repeated OwnerReference ownerReferences = 13; - // An initializer is a controller which enforces some system invariant at object creation time. - // This field is a list of initializers that have not yet acted on this object. If nil or empty, - // this object has been completely initialized. Otherwise, the object is considered uninitialized - // and is hidden (in list/watch and get calls) from clients that haven't explicitly asked to - // observe uninitialized objects. - // - // When an object is created, the system will populate this list with the current set of initializers. - // Only privileged users may set or modify this list. Once it is empty, it may not be modified further - // by any user. - optional Initializers initializers = 16; - // Must be empty before the object is deleted from the registry. Each entry // is an identifier for the responsible component that will remove the entry // from the list. If the deletionTimestamp of the object is non-nil, entries // in this list can only be removed. + // Finalizers may be processed and removed in any order. Order is NOT enforced + // because it introduces significant risk of stuck finalizers. + // finalizers is a shared field, any actor with permission can reorder it. + // If the finalizer list is processed in order, then this can lead to a situation + // in which the component responsible for the first finalizer in the list is + // waiting for a signal (field value, external system, or other) produced by a + // component responsible for a finalizer later in the list, resulting in a deadlock. + // Without enforced ordering finalizers are free to order amongst themselves and + // are not vulnerable to ordering changes in the list. // +optional // +patchStrategy=merge repeated string finalizers = 14; @@ -617,17 +679,28 @@ message ObjectMeta { // This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request. // +optional optional string clusterName = 15; + + // ManagedFields maps workflow-id and version to the set of fields + // that are managed by that workflow. This is mostly for internal + // housekeeping, and users typically shouldn't need to set or + // understand this field. A workflow can be the user's name, a + // controller's name, or the name of a specific apply path like + // "ci-cd". The set of fields is always in the version that the + // workflow used when modifying the object. + // + // +optional + repeated ManagedFieldsEntry managedFields = 17; } // OwnerReference contains enough information to let you identify an owning -// object. Currently, an owning object must be in the same namespace, so there -// is no namespace field. +// object. An owning object must be in the same namespace as the dependent, or +// be cluster-scoped, so there is no namespace field. message OwnerReference { // API version of the referent. optional string apiVersion = 5; // Kind of the referent. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds optional string kind = 1; // Name of the referent. @@ -652,15 +725,69 @@ message OwnerReference { optional bool blockOwnerDeletion = 7; } +// PartialObjectMetadata is a generic representation of any object with ObjectMeta. It allows clients +// to get access to a particular ObjectMeta schema without knowing the details of the version. +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +message PartialObjectMetadata { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional ObjectMeta metadata = 1; +} + +// PartialObjectMetadataList contains a list of objects containing only their metadata +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +message PartialObjectMetadataList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + optional ListMeta metadata = 1; + + // items contains each of the included items. + repeated PartialObjectMetadata items = 2; +} + // Patch is provided to give a concrete name and type to the Kubernetes PATCH request body. message Patch { } +// PatchOptions may be provided when patching an API object. +// PatchOptions is meant to be a superset of UpdateOptions. +message PatchOptions { + // When present, indicates that modifications should not be + // persisted. An invalid or unrecognized dryRun directive will + // result in an error response and no further processing of the + // request. Valid values are: + // - All: all dry run stages will be processed + // +optional + repeated string dryRun = 1; + + // Force is going to "force" Apply requests. It means user will + // re-acquire conflicting fields owned by other people. Force + // flag must be unset for non-apply patch requests. + // +optional + optional bool force = 2; + + // fieldManager is a name associated with the actor or entity + // that is making these changes. The value must be less than or + // 128 characters long, and only contain printable characters, + // as defined by https://golang.org/pkg/unicode/#IsPrint. This + // field is required for apply requests + // (application/apply-patch) but optional for non-apply patch + // types (JsonPatch, MergePatch, StrategicMergePatch). + // +optional + optional string fieldManager = 3; +} + // Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out. message Preconditions { // Specifies the target UID. // +optional optional string uid = 1; + + // Specifies the target ResourceVersion + // +optional + optional string resourceVersion = 2; } // RootPaths lists the paths available at root. @@ -683,13 +810,13 @@ message ServerAddressByClientCIDR { // Status is a return value for calls that don't return other objects. message Status { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional ListMeta metadata = 1; // Status of the operation. // One of: "Success" or "Failure". - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional string status = 2; @@ -734,7 +861,7 @@ message StatusCause { // Arrays are zero-indexed. Fields may appear more than once in an array of // causes due to fields having multiple errors. // Optional. - // + // // Examples: // "name" - the field "name" on the current resource // "items[0].name" - the field "name" on the first array entry in "items" @@ -760,7 +887,7 @@ message StatusDetails { // The kind attribute of the resource associated with the status StatusReason. // On some operations may differ from the requested resource Kind. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional string kind = 3; @@ -782,10 +909,20 @@ message StatusDetails { optional int32 retryAfterSeconds = 5; } +// TableOptions are used when a Table is requested by the caller. +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +message TableOptions { + // includeObject decides whether to include each object along with its columnar information. + // Specifying "None" will return no object, specifying "Object" will return the full object contents, and + // specifying "Metadata" (the default) will return the object's metadata in the PartialObjectMetadata kind + // in version v1beta1 of the meta.k8s.io API group. + optional string includeObject = 1; +} + // Time is a wrapper around time.Time which supports correct // marshaling to YAML and JSON. Wrappers are provided for many // of the factory methods that the time package offers. -// +// // +protobuf.options.marshal=false // +protobuf.as=Timestamp // +protobuf.options.(gogoproto.goproto_stringer)=false @@ -821,26 +958,27 @@ message Timestamp { // TypeMeta describes an individual object in an API response or request // with strings representing the type of the object and its API schema version. // Structures that are versioned or persisted should inline TypeMeta. -// +// // +k8s:deepcopy-gen=false message TypeMeta { // Kind is a string value representing the REST resource this object represents. // Servers may infer this from the endpoint the client submits requests to. // Cannot be updated. // In CamelCase. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional string kind = 1; // APIVersion defines the versioned schema of this representation of an object. // Servers should convert recognized schemas to the latest internal value, and // may reject unrecognized values. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources // +optional optional string apiVersion = 2; } // UpdateOptions may be provided when updating an API object. +// All fields in UpdateOptions should also be present in PatchOptions. message UpdateOptions { // When present, indicates that modifications should not be // persisted. An invalid or unrecognized dryRun directive will @@ -849,10 +987,17 @@ message UpdateOptions { // - All: all dry run stages will be processed // +optional repeated string dryRun = 1; + + // fieldManager is a name associated with the actor or entity + // that is making these changes. The value must be less than or + // 128 characters long, and only contain printable characters, + // as defined by https://golang.org/pkg/unicode/#IsPrint. + // +optional + optional string fieldManager = 2; } // Verbs masks the value so protobuf can generate -// +// // +protobuf.nullable=true // +protobuf.options.(gogoproto.goproto_stringer)=false message Verbs { @@ -862,7 +1007,7 @@ message Verbs { } // Event represents a single event to a watched resource. -// +// // +protobuf=true // +k8s:deepcopy-gen=true // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go index d845d7b0..ec016fd3 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go @@ -17,6 +17,9 @@ limitations under the License. package v1 import ( + "bytes" + "encoding/json" + "errors" "fmt" "k8s.io/apimachinery/pkg/fields" @@ -227,8 +230,51 @@ func NewUIDPreconditions(uid string) *Preconditions { return &Preconditions{UID: &u} } +// NewRVDeletionPrecondition returns a DeleteOptions with a ResourceVersion precondition set. +func NewRVDeletionPrecondition(rv string) *DeleteOptions { + p := Preconditions{ResourceVersion: &rv} + return &DeleteOptions{Preconditions: &p} +} + // HasObjectMetaSystemFieldValues returns true if fields that are managed by the system on ObjectMeta have values. func HasObjectMetaSystemFieldValues(meta Object) bool { return !meta.GetCreationTimestamp().Time.IsZero() || len(meta.GetUID()) != 0 } + +// ResetObjectMetaForStatus forces the meta fields for a status update to match the meta fields +// for a pre-existing object. This is opt-in for new objects with Status subresource. +func ResetObjectMetaForStatus(meta, existingMeta Object) { + meta.SetDeletionTimestamp(existingMeta.GetDeletionTimestamp()) + meta.SetGeneration(existingMeta.GetGeneration()) + meta.SetSelfLink(existingMeta.GetSelfLink()) + meta.SetLabels(existingMeta.GetLabels()) + meta.SetAnnotations(existingMeta.GetAnnotations()) + meta.SetFinalizers(existingMeta.GetFinalizers()) + meta.SetOwnerReferences(existingMeta.GetOwnerReferences()) + meta.SetManagedFields(existingMeta.GetManagedFields()) +} + +// MarshalJSON implements json.Marshaler +// MarshalJSON may get called on pointers or values, so implement MarshalJSON on value. +// http://stackoverflow.com/questions/21390979/custom-marshaljson-never-gets-called-in-go +func (f FieldsV1) MarshalJSON() ([]byte, error) { + if f.Raw == nil { + return []byte("null"), nil + } + return f.Raw, nil +} + +// UnmarshalJSON implements json.Unmarshaler +func (f *FieldsV1) UnmarshalJSON(b []byte) error { + if f == nil { + return errors.New("metav1.Fields: UnmarshalJSON on nil pointer") + } + if !bytes.Equal(b, []byte("null")) { + f.Raw = append(f.Raw[0:0], b...) + } + return nil +} + +var _ json.Marshaler = FieldsV1{} +var _ json.Unmarshaler = &FieldsV1{} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/meta.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/meta.go index ee144754..912cf203 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/meta.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/meta.go @@ -55,14 +55,14 @@ type Object interface { SetLabels(labels map[string]string) GetAnnotations() map[string]string SetAnnotations(annotations map[string]string) - GetInitializers() *Initializers - SetInitializers(initializers *Initializers) GetFinalizers() []string SetFinalizers(finalizers []string) GetOwnerReferences() []OwnerReference SetOwnerReferences([]OwnerReference) GetClusterName() string SetClusterName(clusterName string) + GetManagedFields() []ManagedFieldsEntry + SetManagedFields(managedFields []ManagedFieldsEntry) } // ListMetaAccessor retrieves the list interface from an object @@ -92,6 +92,8 @@ type ListInterface interface { SetSelfLink(selfLink string) GetContinue() string SetContinue(c string) + GetRemainingItemCount() *int64 + SetRemainingItemCount(c *int64) } // Type exposes the type and APIVersion of versioned or internal API objects. @@ -103,12 +105,16 @@ type Type interface { SetKind(kind string) } +var _ ListInterface = &ListMeta{} + func (meta *ListMeta) GetResourceVersion() string { return meta.ResourceVersion } func (meta *ListMeta) SetResourceVersion(version string) { meta.ResourceVersion = version } func (meta *ListMeta) GetSelfLink() string { return meta.SelfLink } func (meta *ListMeta) SetSelfLink(selfLink string) { meta.SelfLink = selfLink } func (meta *ListMeta) GetContinue() string { return meta.Continue } func (meta *ListMeta) SetContinue(c string) { meta.Continue = c } +func (meta *ListMeta) GetRemainingItemCount() *int64 { return meta.RemainingItemCount } +func (meta *ListMeta) SetRemainingItemCount(c *int64) { meta.RemainingItemCount = c } func (obj *TypeMeta) GetObjectKind() schema.ObjectKind { return obj } @@ -158,13 +164,15 @@ func (meta *ObjectMeta) GetLabels() map[string]string { return m func (meta *ObjectMeta) SetLabels(labels map[string]string) { meta.Labels = labels } func (meta *ObjectMeta) GetAnnotations() map[string]string { return meta.Annotations } func (meta *ObjectMeta) SetAnnotations(annotations map[string]string) { meta.Annotations = annotations } -func (meta *ObjectMeta) GetInitializers() *Initializers { return meta.Initializers } -func (meta *ObjectMeta) SetInitializers(initializers *Initializers) { meta.Initializers = initializers } func (meta *ObjectMeta) GetFinalizers() []string { return meta.Finalizers } func (meta *ObjectMeta) SetFinalizers(finalizers []string) { meta.Finalizers = finalizers } func (meta *ObjectMeta) GetOwnerReferences() []OwnerReference { return meta.OwnerReferences } func (meta *ObjectMeta) SetOwnerReferences(references []OwnerReference) { meta.OwnerReferences = references } -func (meta *ObjectMeta) GetClusterName() string { return meta.ClusterName } -func (meta *ObjectMeta) SetClusterName(clusterName string) { meta.ClusterName = clusterName } +func (meta *ObjectMeta) GetClusterName() string { return meta.ClusterName } +func (meta *ObjectMeta) SetClusterName(clusterName string) { meta.ClusterName = clusterName } +func (meta *ObjectMeta) GetManagedFields() []ManagedFieldsEntry { return meta.ManagedFields } +func (meta *ObjectMeta) SetManagedFields(managedFields []ManagedFieldsEntry) { + meta.ManagedFields = managedFields +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go index 6f6c5111..cdd9a6a7 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go @@ -41,11 +41,6 @@ func (t *MicroTime) DeepCopyInto(out *MicroTime) { *out = *t } -// String returns the representation of the time. -func (t MicroTime) String() string { - return t.Time.String() -} - // NewMicroTime returns a wrapped instance of the provided time func NewMicroTime(time time.Time) MicroTime { return MicroTime{time} @@ -72,22 +67,40 @@ func (t *MicroTime) IsZero() bool { // Before reports whether the time instant t is before u. func (t *MicroTime) Before(u *MicroTime) bool { - return t.Time.Before(u.Time) + if t != nil && u != nil { + return t.Time.Before(u.Time) + } + return false } // Equal reports whether the time instant t is equal to u. func (t *MicroTime) Equal(u *MicroTime) bool { - return t.Time.Equal(u.Time) + if t == nil && u == nil { + return true + } + if t != nil && u != nil { + return t.Time.Equal(u.Time) + } + return false } // BeforeTime reports whether the time instant t is before second-lever precision u. func (t *MicroTime) BeforeTime(u *Time) bool { - return t.Time.Before(u.Time) + if t != nil && u != nil { + return t.Time.Before(u.Time) + } + return false } // EqualTime reports whether the time instant t is equal to second-lever precision u. func (t *MicroTime) EqualTime(u *Time) bool { - return t.Time.Equal(u.Time) + if t == nil && u == nil { + return true + } + if t != nil && u != nil { + return t.Time.Equal(u.Time) + } + return false } // UnixMicro returns the local time corresponding to the given Unix time diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time_proto.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time_proto.go index 14841be5..6dd6d899 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time_proto.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time_proto.go @@ -70,3 +70,11 @@ func (m *MicroTime) MarshalTo(data []byte) (int, error) { } return m.ProtoMicroTime().MarshalTo(data) } + +// MarshalToSizedBuffer implements the protobuf marshalling interface. +func (m *MicroTime) MarshalToSizedBuffer(data []byte) (int, error) { + if m == nil || m.Time.IsZero() { + return 0, nil + } + return m.ProtoMicroTime().MarshalToSizedBuffer(data) +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/register.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/register.go index 0827729d..a7b8aa34 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/register.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/register.go @@ -25,6 +25,13 @@ import ( // GroupName is the group name for this API. const GroupName = "meta.k8s.io" +var ( + // localSchemeBuilder is used to make compiler happy for autogenerated + // conversions. However, it's not used. + schemeBuilder runtime.SchemeBuilder + localSchemeBuilder = &schemeBuilder +) + // SchemeGroupVersion is group version used to register these objects var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} @@ -40,6 +47,31 @@ func Kind(kind string) schema.GroupKind { return SchemeGroupVersion.WithKind(kind).GroupKind() } +// scheme is the registry for the common types that adhere to the meta v1 API spec. +var scheme = runtime.NewScheme() + +// ParameterCodec knows about query parameters used with the meta v1 API spec. +var ParameterCodec = runtime.NewParameterCodec(scheme) + +func addEventConversionFuncs(scheme *runtime.Scheme) error { + return scheme.AddConversionFuncs( + Convert_v1_WatchEvent_To_watch_Event, + Convert_v1_InternalEvent_To_v1_WatchEvent, + Convert_watch_Event_To_v1_WatchEvent, + Convert_v1_WatchEvent_To_v1_InternalEvent, + ) +} + +var optionsTypes = []runtime.Object{ + &ListOptions{}, + &ExportOptions{}, + &GetOptions{}, + &DeleteOptions{}, + &CreateOptions{}, + &UpdateOptions{}, + &PatchOptions{}, +} + // AddToGroupVersion registers common meta types into schemas. func AddToGroupVersion(scheme *runtime.Scheme, groupVersion schema.GroupVersion) { scheme.AddKnownTypeWithName(groupVersion.WithKind(WatchEventKind), &WatchEvent{}) @@ -48,20 +80,7 @@ func AddToGroupVersion(scheme *runtime.Scheme, groupVersion schema.GroupVersion) &InternalEvent{}, ) // Supports legacy code paths, most callers should use metav1.ParameterCodec for now - scheme.AddKnownTypes(groupVersion, - &ListOptions{}, - &ExportOptions{}, - &GetOptions{}, - &DeleteOptions{}, - &CreateOptions{}, - &UpdateOptions{}, - ) - utilruntime.Must(scheme.AddConversionFuncs( - Convert_v1_WatchEvent_To_watch_Event, - Convert_v1_InternalEvent_To_v1_WatchEvent, - Convert_watch_Event_To_v1_WatchEvent, - Convert_v1_WatchEvent_To_v1_InternalEvent, - )) + scheme.AddKnownTypes(groupVersion, optionsTypes...) // Register Unversioned types under their own special group scheme.AddUnversionedTypes(Unversioned, &Status{}, @@ -71,26 +90,31 @@ func AddToGroupVersion(scheme *runtime.Scheme, groupVersion schema.GroupVersion) &APIResourceList{}, ) + utilruntime.Must(addEventConversionFuncs(scheme)) + // register manually. This usually goes through the SchemeBuilder, which we cannot use here. utilruntime.Must(AddConversionFuncs(scheme)) utilruntime.Must(RegisterDefaults(scheme)) } -// scheme is the registry for the common types that adhere to the meta v1 API spec. -var scheme = runtime.NewScheme() +// AddMetaToScheme registers base meta types into schemas. +func AddMetaToScheme(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &Table{}, + &TableOptions{}, + &PartialObjectMetadata{}, + &PartialObjectMetadataList{}, + ) -// ParameterCodec knows about query parameters used with the meta v1 API spec. -var ParameterCodec = runtime.NewParameterCodec(scheme) + return scheme.AddConversionFuncs( + Convert_Slice_string_To_v1_IncludeObjectPolicy, + ) +} func init() { - scheme.AddUnversionedTypes(SchemeGroupVersion, - &ListOptions{}, - &ExportOptions{}, - &GetOptions{}, - &DeleteOptions{}, - &CreateOptions{}, - &UpdateOptions{}, - ) + scheme.AddUnversionedTypes(SchemeGroupVersion, optionsTypes...) + + utilruntime.Must(AddMetaToScheme(scheme)) // register manually. This usually goes through the SchemeBuilder, which we cannot use here. utilruntime.Must(RegisterDefaults(scheme)) diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go index efff656e..fe510ed9 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go @@ -20,7 +20,7 @@ import ( "encoding/json" "time" - "github.com/google/gofuzz" + fuzz "github.com/google/gofuzz" ) // Time is a wrapper around time.Time which supports correct @@ -41,11 +41,6 @@ func (t *Time) DeepCopyInto(out *Time) { *out = *t } -// String returns the representation of the time. -func (t Time) String() string { - return t.Time.String() -} - // NewTime returns a wrapped instance of the provided time func NewTime(time time.Time) Time { return Time{time} @@ -72,7 +67,10 @@ func (t *Time) IsZero() bool { // Before reports whether the time instant t is before u. func (t *Time) Before(u *Time) bool { - return t.Time.Before(u.Time) + if t != nil && u != nil { + return t.Time.Before(u.Time) + } + return false } // Equal reports whether the time instant t is equal to u. @@ -147,8 +145,12 @@ func (t Time) MarshalJSON() ([]byte, error) { // Encode unset/nil objects as JSON's "null". return []byte("null"), nil } - - return json.Marshal(t.UTC().Format(time.RFC3339)) + buf := make([]byte, 0, len(time.RFC3339)+2) + buf = append(buf, '"') + // time cannot contain non escapable JSON characters + buf = t.UTC().AppendFormat(buf, time.RFC3339) + buf = append(buf, '"') + return buf, nil } // OpenAPISchemaType is used by the kube-openapi generator when constructing diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_proto.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_proto.go index ed72186b..eac8d965 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_proto.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_proto.go @@ -75,7 +75,7 @@ func (m *Time) Unmarshal(data []byte) error { return nil } -// Marshal implements the protobuf marshalling interface. +// Marshal implements the protobuf marshaling interface. func (m *Time) Marshal() (data []byte, err error) { if m == nil || m.Time.IsZero() { return nil, nil @@ -83,10 +83,18 @@ func (m *Time) Marshal() (data []byte, err error) { return m.ProtoTime().Marshal() } -// MarshalTo implements the protobuf marshalling interface. +// MarshalTo implements the protobuf marshaling interface. func (m *Time) MarshalTo(data []byte) (int, error) { if m == nil || m.Time.IsZero() { return 0, nil } return m.ProtoTime().MarshalTo(data) } + +// MarshalToSizedBuffer implements the protobuf reverse marshaling interface. +func (m *Time) MarshalToSizedBuffer(data []byte) (int, error) { + if m == nil || m.Time.IsZero() { + return 0, nil + } + return m.ProtoTime().MarshalToSizedBuffer(data) +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go index 4d3a55d7..bf125b62 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go @@ -43,14 +43,14 @@ type TypeMeta struct { // Servers may infer this from the endpoint the client submits requests to. // Cannot be updated. // In CamelCase. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional Kind string `json:"kind,omitempty" protobuf:"bytes,1,opt,name=kind"` // APIVersion defines the versioned schema of this representation of an object. // Servers should convert recognized schemas to the latest internal value, and // may reject unrecognized values. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources // +optional APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,2,opt,name=apiVersion"` } @@ -61,6 +61,10 @@ type ListMeta struct { // selfLink is a URL representing this object. // Populated by the system. // Read-only. + // + // DEPRECATED + // Kubernetes will stop propagating this field in 1.20 release and the field is planned + // to be removed in 1.21 release. // +optional SelfLink string `json:"selfLink,omitempty" protobuf:"bytes,1,opt,name=selfLink"` @@ -69,7 +73,7 @@ type ListMeta struct { // Value must be treated as opaque by clients and passed unmodified back to the server. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency // +optional ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,2,opt,name=resourceVersion"` @@ -81,6 +85,18 @@ type ListMeta struct { // identical to the value in the first response, unless you have received this token from an error // message. Continue string `json:"continue,omitempty" protobuf:"bytes,3,opt,name=continue"` + + // remainingItemCount is the number of subsequent items in the list which are not included in this + // list response. If the list request contained label or field selectors, then the number of + // remaining items is unknown and the field will be left unset and omitted during serialization. + // If the list is complete (either because it is not chunking or because this is the last chunk), + // then there are no more remaining items and this field will be left unset and omitted during + // serialization. + // Servers older than v1.15 do not set this field. + // The intended use of the remainingItemCount is *estimating* the size of a collection. Clients + // should not rely on the remainingItemCount to be set or to be exact. + // +optional + RemainingItemCount *int64 `json:"remainingItemCount,omitempty" protobuf:"bytes,4,opt,name=remainingItemCount"` } // These are internal finalizer values for Kubernetes-like APIs, must be qualified name unless defined here @@ -115,7 +131,7 @@ type ObjectMeta struct { // should retry (optionally after the time indicated in the Retry-After header). // // Applied only if Name is not specified. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency // +optional GenerateName string `json:"generateName,omitempty" protobuf:"bytes,2,opt,name=generateName"` @@ -133,6 +149,10 @@ type ObjectMeta struct { // SelfLink is a URL representing this object. // Populated by the system. // Read-only. + // + // DEPRECATED + // Kubernetes will stop propagating this field in 1.20 release and the field is planned + // to be removed in 1.21 release. // +optional SelfLink string `json:"selfLink,omitempty" protobuf:"bytes,4,opt,name=selfLink"` @@ -155,7 +175,7 @@ type ObjectMeta struct { // Populated by the system. // Read-only. // Value must be treated as opaque by clients and . - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency // +optional ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,6,opt,name=resourceVersion"` @@ -171,7 +191,7 @@ type ObjectMeta struct { // Populated by the system. // Read-only. // Null for lists. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional CreationTimestamp Time `json:"creationTimestamp,omitempty" protobuf:"bytes,8,opt,name=creationTimestamp"` @@ -192,7 +212,7 @@ type ObjectMeta struct { // // Populated by the system when a graceful deletion is requested. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional DeletionTimestamp *Time `json:"deletionTimestamp,omitempty" protobuf:"bytes,9,opt,name=deletionTimestamp"` @@ -226,21 +246,19 @@ type ObjectMeta struct { // +patchStrategy=merge OwnerReferences []OwnerReference `json:"ownerReferences,omitempty" patchStrategy:"merge" patchMergeKey:"uid" protobuf:"bytes,13,rep,name=ownerReferences"` - // An initializer is a controller which enforces some system invariant at object creation time. - // This field is a list of initializers that have not yet acted on this object. If nil or empty, - // this object has been completely initialized. Otherwise, the object is considered uninitialized - // and is hidden (in list/watch and get calls) from clients that haven't explicitly asked to - // observe uninitialized objects. - // - // When an object is created, the system will populate this list with the current set of initializers. - // Only privileged users may set or modify this list. Once it is empty, it may not be modified further - // by any user. - Initializers *Initializers `json:"initializers,omitempty" protobuf:"bytes,16,opt,name=initializers"` - // Must be empty before the object is deleted from the registry. Each entry // is an identifier for the responsible component that will remove the entry // from the list. If the deletionTimestamp of the object is non-nil, entries // in this list can only be removed. + // Finalizers may be processed and removed in any order. Order is NOT enforced + // because it introduces significant risk of stuck finalizers. + // finalizers is a shared field, any actor with permission can reorder it. + // If the finalizer list is processed in order, then this can lead to a situation + // in which the component responsible for the first finalizer in the list is + // waiting for a signal (field value, external system, or other) produced by a + // component responsible for a finalizer later in the list, resulting in a deadlock. + // Without enforced ordering finalizers are free to order amongst themselves and + // are not vulnerable to ordering changes in the list. // +optional // +patchStrategy=merge Finalizers []string `json:"finalizers,omitempty" patchStrategy:"merge" protobuf:"bytes,14,rep,name=finalizers"` @@ -250,26 +268,17 @@ type ObjectMeta struct { // This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request. // +optional ClusterName string `json:"clusterName,omitempty" protobuf:"bytes,15,opt,name=clusterName"` -} -// Initializers tracks the progress of initialization. -type Initializers struct { - // Pending is a list of initializers that must execute in order before this object is visible. - // When the last pending initializer is removed, and no failing result is set, the initializers - // struct will be set to nil and the object is considered as initialized and visible to all - // clients. - // +patchMergeKey=name - // +patchStrategy=merge - Pending []Initializer `json:"pending" protobuf:"bytes,1,rep,name=pending" patchStrategy:"merge" patchMergeKey:"name"` - // If result is set with the Failure field, the object will be persisted to storage and then deleted, - // ensuring that other clients can observe the deletion. - Result *Status `json:"result,omitempty" protobuf:"bytes,2,opt,name=result"` -} - -// Initializer is information about an initializer that has not yet completed. -type Initializer struct { - // name of the process that is responsible for initializing this object. - Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // ManagedFields maps workflow-id and version to the set of fields + // that are managed by that workflow. This is mostly for internal + // housekeeping, and users typically shouldn't need to set or + // understand this field. A workflow can be the user's name, a + // controller's name, or the name of a specific apply path like + // "ci-cd". The set of fields is always in the version that the + // workflow used when modifying the object. + // + // +optional + ManagedFields []ManagedFieldsEntry `json:"managedFields,omitempty" protobuf:"bytes,17,rep,name=managedFields"` } const ( @@ -286,13 +295,13 @@ const ( ) // OwnerReference contains enough information to let you identify an owning -// object. Currently, an owning object must be in the same namespace, so there -// is no namespace field. +// object. An owning object must be in the same namespace as the dependent, or +// be cluster-scoped, so there is no namespace field. type OwnerReference struct { // API version of the referent. APIVersion string `json:"apiVersion" protobuf:"bytes,5,opt,name=apiVersion"` // Kind of the referent. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` // Name of the referent. // More info: http://kubernetes.io/docs/user-guide/identifiers#names @@ -313,6 +322,7 @@ type OwnerReference struct { BlockOwnerDeletion *bool `json:"blockOwnerDeletion,omitempty" protobuf:"varint,7,opt,name=blockOwnerDeletion"` } +// +k8s:conversion-gen:explicit-from=net/url.Values // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // ListOptions is the query options to a standard REST list call. @@ -327,13 +337,24 @@ type ListOptions struct { // Defaults to everything. // +optional FieldSelector string `json:"fieldSelector,omitempty" protobuf:"bytes,2,opt,name=fieldSelector"` - // If true, partially initialized resources are included in the response. - // +optional - IncludeUninitialized bool `json:"includeUninitialized,omitempty" protobuf:"varint,6,opt,name=includeUninitialized"` + + // +k8s:deprecated=includeUninitialized,protobuf=6 + // Watch for changes to the described resources and return them as a stream of // add, update, and remove notifications. Specify resourceVersion. // +optional Watch bool `json:"watch,omitempty" protobuf:"varint,3,opt,name=watch"` + // allowWatchBookmarks requests watch events with type "BOOKMARK". + // Servers that do not implement bookmarks may ignore this flag and + // bookmarks are sent at the server's discretion. Clients should not + // assume bookmarks are returned at any specific interval, nor may they + // assume the server will send any BOOKMARK event during a session. + // If this is not a watch, this field is ignored. + // If the feature gate WatchBookmarks is not enabled in apiserver, + // this field is ignored. + // +optional + AllowWatchBookmarks bool `json:"allowWatchBookmarks,omitempty" protobuf:"varint,9,opt,name=allowWatchBookmarks"` + // When specified with a watch call, shows changes that occur after that particular version of a resource. // Defaults to changes from the beginning of history. // When specified for list: @@ -381,17 +402,22 @@ type ListOptions struct { Continue string `json:"continue,omitempty" protobuf:"bytes,8,opt,name=continue"` } +// +k8s:conversion-gen:explicit-from=net/url.Values // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // ExportOptions is the query options to the standard REST get call. +// Deprecated. Planned for removal in 1.18. type ExportOptions struct { TypeMeta `json:",inline"` // Should this value be exported. Export strips fields that a user can not specify. + // Deprecated. Planned for removal in 1.18. Export bool `json:"export" protobuf:"varint,1,opt,name=export"` // Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. + // Deprecated. Planned for removal in 1.18. Exact bool `json:"exact" protobuf:"varint,2,opt,name=exact"` } +// +k8s:conversion-gen:explicit-from=net/url.Values // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // GetOptions is the standard query options to the standard REST get call. @@ -402,9 +428,7 @@ type GetOptions struct { // - if it's 0, then we simply return what we currently have in cache, no guarantee; // - if set to non zero, then the result is at least as fresh as given rv. ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,1,opt,name=resourceVersion"` - // If true, partially initialized resources are included in the response. - // +optional - IncludeUninitialized bool `json:"includeUninitialized,omitempty" protobuf:"varint,2,opt,name=includeUninitialized"` + // +k8s:deprecated=includeUninitialized,protobuf=2 } // DeletionPropagation decides if a deletion will propagate to the dependents of @@ -431,6 +455,7 @@ const ( DryRunAll = "All" ) +// +k8s:conversion-gen:explicit-from=net/url.Values // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // DeleteOptions may be provided when deleting an API object. @@ -446,6 +471,7 @@ type DeleteOptions struct { // Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be // returned. + // +k8s:conversion-gen=false // +optional Preconditions *Preconditions `json:"preconditions,omitempty" protobuf:"bytes,2,opt,name=preconditions"` @@ -476,12 +502,38 @@ type DeleteOptions struct { DryRun []string `json:"dryRun,omitempty" protobuf:"bytes,5,rep,name=dryRun"` } +// +k8s:conversion-gen:explicit-from=net/url.Values // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // CreateOptions may be provided when creating an API object. type CreateOptions struct { TypeMeta `json:",inline"` + // When present, indicates that modifications should not be + // persisted. An invalid or unrecognized dryRun directive will + // result in an error response and no further processing of the + // request. Valid values are: + // - All: all dry run stages will be processed + // +optional + DryRun []string `json:"dryRun,omitempty" protobuf:"bytes,1,rep,name=dryRun"` + // +k8s:deprecated=includeUninitialized,protobuf=2 + + // fieldManager is a name associated with the actor or entity + // that is making these changes. The value must be less than or + // 128 characters long, and only contain printable characters, + // as defined by https://golang.org/pkg/unicode/#IsPrint. + // +optional + FieldManager string `json:"fieldManager,omitempty" protobuf:"bytes,3,name=fieldManager"` +} + +// +k8s:conversion-gen:explicit-from=net/url.Values +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PatchOptions may be provided when patching an API object. +// PatchOptions is meant to be a superset of UpdateOptions. +type PatchOptions struct { + TypeMeta `json:",inline"` + // When present, indicates that modifications should not be // persisted. An invalid or unrecognized dryRun directive will // result in an error response and no further processing of the @@ -490,14 +542,28 @@ type CreateOptions struct { // +optional DryRun []string `json:"dryRun,omitempty" protobuf:"bytes,1,rep,name=dryRun"` - // If IncludeUninitialized is specified, the object may be - // returned without completing initialization. - IncludeUninitialized bool `json:"includeUninitialized,omitempty" protobuf:"varint,2,opt,name=includeUninitialized"` + // Force is going to "force" Apply requests. It means user will + // re-acquire conflicting fields owned by other people. Force + // flag must be unset for non-apply patch requests. + // +optional + Force *bool `json:"force,omitempty" protobuf:"varint,2,opt,name=force"` + + // fieldManager is a name associated with the actor or entity + // that is making these changes. The value must be less than or + // 128 characters long, and only contain printable characters, + // as defined by https://golang.org/pkg/unicode/#IsPrint. This + // field is required for apply requests + // (application/apply-patch) but optional for non-apply patch + // types (JsonPatch, MergePatch, StrategicMergePatch). + // +optional + FieldManager string `json:"fieldManager,omitempty" protobuf:"bytes,3,name=fieldManager"` } +// +k8s:conversion-gen:explicit-from=net/url.Values // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // UpdateOptions may be provided when updating an API object. +// All fields in UpdateOptions should also be present in PatchOptions. type UpdateOptions struct { TypeMeta `json:",inline"` @@ -508,6 +574,13 @@ type UpdateOptions struct { // - All: all dry run stages will be processed // +optional DryRun []string `json:"dryRun,omitempty" protobuf:"bytes,1,rep,name=dryRun"` + + // fieldManager is a name associated with the actor or entity + // that is making these changes. The value must be less than or + // 128 characters long, and only contain printable characters, + // as defined by https://golang.org/pkg/unicode/#IsPrint. + // +optional + FieldManager string `json:"fieldManager,omitempty" protobuf:"bytes,2,name=fieldManager"` } // Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out. @@ -515,6 +588,9 @@ type Preconditions struct { // Specifies the target UID. // +optional UID *types.UID `json:"uid,omitempty" protobuf:"bytes,1,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"` + // Specifies the target ResourceVersion + // +optional + ResourceVersion *string `json:"resourceVersion,omitempty" protobuf:"bytes,2,opt,name=resourceVersion"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -523,13 +599,13 @@ type Preconditions struct { type Status struct { TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Status of the operation. // One of: "Success" or "Failure". - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Status string `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"` // A human-readable description of the status of this operation. @@ -568,7 +644,7 @@ type StatusDetails struct { Group string `json:"group,omitempty" protobuf:"bytes,2,opt,name=group"` // The kind attribute of the resource associated with the status StatusReason. // On some operations may differ from the requested resource Kind. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional Kind string `json:"kind,omitempty" protobuf:"bytes,3,opt,name=kind"` // UID of the resource. @@ -700,11 +776,13 @@ const ( // doesn't make any sense, for example deleting a read-only object. This is different than // StatusReasonInvalid above which indicates that the API call could possibly succeed, but the // data was invalid. API calls that return BadRequest can never succeed. + // Status code 400 StatusReasonBadRequest StatusReason = "BadRequest" // StatusReasonMethodNotAllowed means that the action the client attempted to perform on the // resource was not supported by the code - for instance, attempting to delete a resource that // can only be created. API calls that return MethodNotAllowed can never succeed. + // Status code 405 StatusReasonMethodNotAllowed StatusReason = "MethodNotAllowed" // StatusReasonNotAcceptable means that the accept types indicated by the client were not acceptable @@ -713,6 +791,10 @@ const ( // Status code 406 StatusReasonNotAcceptable StatusReason = "NotAcceptable" + // StatusReasonRequestEntityTooLarge means that the request entity is too large. + // Status code 413 + StatusReasonRequestEntityTooLarge StatusReason = "RequestEntityTooLarge" + // StatusReasonUnsupportedMediaType means that the content type sent by the client is not acceptable // to the server - for instance, attempting to send protobuf for a resource that supports only json and yaml. // API calls that return UnsupportedMediaType can never succeed. @@ -788,6 +870,9 @@ const ( // without the expected return type. The presence of this cause indicates the error may be // due to an intervening proxy or the server software malfunctioning. CauseTypeUnexpectedServerResponse CauseType = "UnexpectedServerResponse" + // FieldManagerConflict is used to report when another client claims to manage this field, + // It should only be returned for a request using server-side apply. + CauseTypeFieldManagerConflict CauseType = "FieldManagerConflict" ) // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -796,7 +881,7 @@ const ( type List struct { TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -902,6 +987,15 @@ type APIResource struct { ShortNames []string `json:"shortNames,omitempty" protobuf:"bytes,5,rep,name=shortNames"` // categories is a list of the grouped resources this resource belongs to (e.g. 'all') Categories []string `json:"categories,omitempty" protobuf:"bytes,7,rep,name=categories"` + // The hash value of the storage version, the version this resource is + // converted to when written to the data store. Value must be treated + // as opaque by clients. Only equality comparison on the value is valid. + // This is an alpha feature and may change or be removed in the future. + // The field is populated by the apiserver only if the + // StorageVersionHash feature gate is enabled. + // This field will remain optional even if it graduates. + // +optional + StorageVersionHash string `json:"storageVersionHash,omitempty" protobuf:"bytes,10,opt,name=storageVersionHash"` } // Verbs masks the value so protobuf can generate @@ -1003,3 +1097,217 @@ const ( LabelSelectorOpExists LabelSelectorOperator = "Exists" LabelSelectorOpDoesNotExist LabelSelectorOperator = "DoesNotExist" ) + +// ManagedFieldsEntry is a workflow-id, a FieldSet and the group version of the resource +// that the fieldset applies to. +type ManagedFieldsEntry struct { + // Manager is an identifier of the workflow managing these fields. + Manager string `json:"manager,omitempty" protobuf:"bytes,1,opt,name=manager"` + // Operation is the type of operation which lead to this ManagedFieldsEntry being created. + // The only valid values for this field are 'Apply' and 'Update'. + Operation ManagedFieldsOperationType `json:"operation,omitempty" protobuf:"bytes,2,opt,name=operation,casttype=ManagedFieldsOperationType"` + // APIVersion defines the version of this resource that this field set + // applies to. The format is "group/version" just like the top-level + // APIVersion field. It is necessary to track the version of a field + // set because it cannot be automatically converted. + APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,3,opt,name=apiVersion"` + // Time is timestamp of when these fields were set. It should always be empty if Operation is 'Apply' + // +optional + Time *Time `json:"time,omitempty" protobuf:"bytes,4,opt,name=time"` + + // Fields is tombstoned to show why 5 is a reserved protobuf tag. + //Fields *Fields `json:"fields,omitempty" protobuf:"bytes,5,opt,name=fields,casttype=Fields"` + + // FieldsType is the discriminator for the different fields format and version. + // There is currently only one possible value: "FieldsV1" + FieldsType string `json:"fieldsType,omitempty" protobuf:"bytes,6,opt,name=fieldsType"` + // FieldsV1 holds the first JSON version format as described in the "FieldsV1" type. + // +optional + FieldsV1 *FieldsV1 `json:"fieldsV1,omitempty" protobuf:"bytes,7,opt,name=fieldsV1"` +} + +// ManagedFieldsOperationType is the type of operation which lead to a ManagedFieldsEntry being created. +type ManagedFieldsOperationType string + +const ( + ManagedFieldsOperationApply ManagedFieldsOperationType = "Apply" + ManagedFieldsOperationUpdate ManagedFieldsOperationType = "Update" +) + +// FieldsV1 stores a set of fields in a data structure like a Trie, in JSON format. +// +// Each key is either a '.' representing the field itself, and will always map to an empty set, +// or a string representing a sub-field or item. The string will follow one of these four formats: +// 'f:', where is the name of a field in a struct, or key in a map +// 'v:', where is the exact json formatted value of a list item +// 'i:', where is position of a item in a list +// 'k:', where is a map of a list item's key fields to their unique values +// If a key maps to an empty Fields value, the field that key represents is part of the set. +// +// The exact format is defined in sigs.k8s.io/structured-merge-diff +type FieldsV1 struct { + // Raw is the underlying serialization of this object. + Raw []byte `json:"-" protobuf:"bytes,1,opt,name=Raw"` +} + +// TODO: Table does not generate to protobuf because of the interface{} - fix protobuf +// generation to support a meta type that can accept any valid JSON. This can be introduced +// in a v1 because clients a) receive an error if they try to access proto today, and b) +// once introduced they would be able to gracefully switch over to using it. + +// Table is a tabular representation of a set of API resources. The server transforms the +// object into a set of preferred columns for quickly reviewing the objects. +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +protobuf=false +type Table struct { + TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + ListMeta `json:"metadata,omitempty"` + + // columnDefinitions describes each column in the returned items array. The number of cells per row + // will always match the number of column definitions. + ColumnDefinitions []TableColumnDefinition `json:"columnDefinitions"` + // rows is the list of items in the table. + Rows []TableRow `json:"rows"` +} + +// TableColumnDefinition contains information about a column returned in the Table. +// +protobuf=false +type TableColumnDefinition struct { + // name is a human readable name for the column. + Name string `json:"name"` + // type is an OpenAPI type definition for this column, such as number, integer, string, or + // array. + // See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more. + Type string `json:"type"` + // format is an optional OpenAPI type modifier for this column. A format modifies the type and + // imposes additional rules, like date or time formatting for a string. The 'name' format is applied + // to the primary identifier column which has type 'string' to assist in clients identifying column + // is the resource name. + // See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more. + Format string `json:"format"` + // description is a human readable description of this column. + Description string `json:"description"` + // priority is an integer defining the relative importance of this column compared to others. Lower + // numbers are considered higher priority. Columns that may be omitted in limited space scenarios + // should be given a higher priority. + Priority int32 `json:"priority"` +} + +// TableRow is an individual row in a table. +// +protobuf=false +type TableRow struct { + // cells will be as wide as the column definitions array and may contain strings, numbers (float64 or + // int64), booleans, simple maps, lists, or null. See the type field of the column definition for a + // more detailed description. + Cells []interface{} `json:"cells"` + // conditions describe additional status of a row that are relevant for a human user. These conditions + // apply to the row, not to the object, and will be specific to table output. The only defined + // condition type is 'Completed', for a row that indicates a resource that has run to completion and + // can be given less visual priority. + // +optional + Conditions []TableRowCondition `json:"conditions,omitempty"` + // This field contains the requested additional information about each object based on the includeObject + // policy when requesting the Table. If "None", this field is empty, if "Object" this will be the + // default serialization of the object for the current API version, and if "Metadata" (the default) will + // contain the object metadata. Check the returned kind and apiVersion of the object before parsing. + // The media type of the object will always match the enclosing list - if this as a JSON table, these + // will be JSON encoded objects. + // +optional + Object runtime.RawExtension `json:"object,omitempty"` +} + +// TableRowCondition allows a row to be marked with additional information. +// +protobuf=false +type TableRowCondition struct { + // Type of row condition. The only defined value is 'Completed' indicating that the + // object this row represents has reached a completed state and may be given less visual + // priority than other rows. Clients are not required to honor any conditions but should + // be consistent where possible about handling the conditions. + Type RowConditionType `json:"type"` + // Status of the condition, one of True, False, Unknown. + Status ConditionStatus `json:"status"` + // (brief) machine readable reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty"` + // Human readable message indicating details about last transition. + // +optional + Message string `json:"message,omitempty"` +} + +type RowConditionType string + +// These are valid conditions of a row. This list is not exhaustive and new conditions may be +// included by other resources. +const ( + // RowCompleted means the underlying resource has reached completion and may be given less + // visual priority than other resources. + RowCompleted RowConditionType = "Completed" +) + +type ConditionStatus string + +// These are valid condition statuses. "ConditionTrue" means a resource is in the condition. +// "ConditionFalse" means a resource is not in the condition. "ConditionUnknown" means kubernetes +// can't decide if a resource is in the condition or not. In the future, we could add other +// intermediate conditions, e.g. ConditionDegraded. +const ( + ConditionTrue ConditionStatus = "True" + ConditionFalse ConditionStatus = "False" + ConditionUnknown ConditionStatus = "Unknown" +) + +// IncludeObjectPolicy controls which portion of the object is returned with a Table. +type IncludeObjectPolicy string + +const ( + // IncludeNone returns no object. + IncludeNone IncludeObjectPolicy = "None" + // IncludeMetadata serializes the object containing only its metadata field. + IncludeMetadata IncludeObjectPolicy = "Metadata" + // IncludeObject contains the full object. + IncludeObject IncludeObjectPolicy = "Object" +) + +// TableOptions are used when a Table is requested by the caller. +// +k8s:conversion-gen:explicit-from=net/url.Values +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type TableOptions struct { + TypeMeta `json:",inline"` + + // NoHeaders is only exposed for internal callers. It is not included in our OpenAPI definitions + // and may be removed as a field in a future release. + NoHeaders bool `json:"-"` + + // includeObject decides whether to include each object along with its columnar information. + // Specifying "None" will return no object, specifying "Object" will return the full object contents, and + // specifying "Metadata" (the default) will return the object's metadata in the PartialObjectMetadata kind + // in version v1beta1 of the meta.k8s.io API group. + IncludeObject IncludeObjectPolicy `json:"includeObject,omitempty" protobuf:"bytes,1,opt,name=includeObject,casttype=IncludeObjectPolicy"` +} + +// PartialObjectMetadata is a generic representation of any object with ObjectMeta. It allows clients +// to get access to a particular ObjectMeta schema without knowing the details of the version. +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type PartialObjectMetadata struct { + TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` +} + +// PartialObjectMetadataList contains a list of objects containing only their metadata +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type PartialObjectMetadataList struct { + TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items contains each of the included items. + Items []PartialObjectMetadata `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go index 35e800f8..b62e591e 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go @@ -49,16 +49,17 @@ func (APIGroupList) SwaggerDoc() map[string]string { } var map_APIResource = map[string]string{ - "": "APIResource specifies the name of a resource and whether it is namespaced.", - "name": "name is the plural name of the resource.", - "singularName": "singularName is the singular name of the resource. This allows clients to handle plural and singular opaquely. The singularName is more correct for reporting status on a single item and both singular and plural are allowed from the kubectl CLI interface.", - "namespaced": "namespaced indicates if a resource is namespaced or not.", - "group": "group is the preferred group of the resource. Empty implies the group of the containing resource list. For subresources, this may have a different value, for example: Scale\".", - "version": "version is the preferred version of the resource. Empty implies the version of the containing resource list For subresources, this may have a different value, for example: v1 (while inside a v1beta1 version of the core resource's group)\".", - "kind": "kind is the kind for the resource (e.g. 'Foo' is the kind for a resource 'foo')", - "verbs": "verbs is a list of supported kube verbs (this includes get, list, watch, create, update, patch, delete, deletecollection, and proxy)", - "shortNames": "shortNames is a list of suggested short names of the resource.", - "categories": "categories is a list of the grouped resources this resource belongs to (e.g. 'all')", + "": "APIResource specifies the name of a resource and whether it is namespaced.", + "name": "name is the plural name of the resource.", + "singularName": "singularName is the singular name of the resource. This allows clients to handle plural and singular opaquely. The singularName is more correct for reporting status on a single item and both singular and plural are allowed from the kubectl CLI interface.", + "namespaced": "namespaced indicates if a resource is namespaced or not.", + "group": "group is the preferred group of the resource. Empty implies the group of the containing resource list. For subresources, this may have a different value, for example: Scale\".", + "version": "version is the preferred version of the resource. Empty implies the version of the containing resource list For subresources, this may have a different value, for example: v1 (while inside a v1beta1 version of the core resource's group)\".", + "kind": "kind is the kind for the resource (e.g. 'Foo' is the kind for a resource 'foo')", + "verbs": "verbs is a list of supported kube verbs (this includes get, list, watch, create, update, patch, delete, deletecollection, and proxy)", + "shortNames": "shortNames is a list of suggested short names of the resource.", + "categories": "categories is a list of the grouped resources this resource belongs to (e.g. 'all')", + "storageVersionHash": "The hash value of the storage version, the version this resource is converted to when written to the data store. Value must be treated as opaque by clients. Only equality comparison on the value is valid. This is an alpha feature and may change or be removed in the future. The field is populated by the apiserver only if the StorageVersionHash feature gate is enabled. This field will remain optional even if it graduates.", } func (APIResource) SwaggerDoc() map[string]string { @@ -86,9 +87,9 @@ func (APIVersions) SwaggerDoc() map[string]string { } var map_CreateOptions = map[string]string{ - "": "CreateOptions may be provided when creating an API object.", - "dryRun": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", - "includeUninitialized": "If IncludeUninitialized is specified, the object may be returned without completing initialization.", + "": "CreateOptions may be provided when creating an API object.", + "dryRun": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", + "fieldManager": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.", } func (CreateOptions) SwaggerDoc() map[string]string { @@ -109,19 +110,26 @@ func (DeleteOptions) SwaggerDoc() map[string]string { } var map_ExportOptions = map[string]string{ - "": "ExportOptions is the query options to the standard REST get call.", - "export": "Should this value be exported. Export strips fields that a user can not specify.", - "exact": "Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.", + "": "ExportOptions is the query options to the standard REST get call. Deprecated. Planned for removal in 1.18.", + "export": "Should this value be exported. Export strips fields that a user can not specify. Deprecated. Planned for removal in 1.18.", + "exact": "Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. Deprecated. Planned for removal in 1.18.", } func (ExportOptions) SwaggerDoc() map[string]string { return map_ExportOptions } +var map_FieldsV1 = map[string]string{ + "": "FieldsV1 stores a set of fields in a data structure like a Trie, in JSON format.\n\nEach key is either a '.' representing the field itself, and will always map to an empty set, or a string representing a sub-field or item. The string will follow one of these four formats: 'f:', where is the name of a field in a struct, or key in a map 'v:', where is the exact json formatted value of a list item 'i:', where is position of a item in a list 'k:', where is a map of a list item's key fields to their unique values If a key maps to an empty Fields value, the field that key represents is part of the set.\n\nThe exact format is defined in sigs.k8s.io/structured-merge-diff", +} + +func (FieldsV1) SwaggerDoc() map[string]string { + return map_FieldsV1 +} + var map_GetOptions = map[string]string{ - "": "GetOptions is the standard query options to the standard REST get call.", - "resourceVersion": "When specified: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.", - "includeUninitialized": "If true, partially initialized resources are included in the response.", + "": "GetOptions is the standard query options to the standard REST get call.", + "resourceVersion": "When specified: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.", } func (GetOptions) SwaggerDoc() map[string]string { @@ -138,25 +146,6 @@ func (GroupVersionForDiscovery) SwaggerDoc() map[string]string { return map_GroupVersionForDiscovery } -var map_Initializer = map[string]string{ - "": "Initializer is information about an initializer that has not yet completed.", - "name": "name of the process that is responsible for initializing this object.", -} - -func (Initializer) SwaggerDoc() map[string]string { - return map_Initializer -} - -var map_Initializers = map[string]string{ - "": "Initializers tracks the progress of initialization.", - "pending": "Pending is a list of initializers that must execute in order before this object is visible. When the last pending initializer is removed, and no failing result is set, the initializers struct will be set to nil and the object is considered as initialized and visible to all clients.", - "result": "If result is set with the Failure field, the object will be persisted to storage and then deleted, ensuring that other clients can observe the deletion.", -} - -func (Initializers) SwaggerDoc() map[string]string { - return map_Initializers -} - var map_LabelSelector = map[string]string{ "": "A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects.", "matchLabels": "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed.", @@ -180,7 +169,7 @@ func (LabelSelectorRequirement) SwaggerDoc() map[string]string { var map_List = map[string]string{ "": "List holds a list of objects, which may not be known by the server.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "items": "List of objects", } @@ -189,10 +178,11 @@ func (List) SwaggerDoc() map[string]string { } var map_ListMeta = map[string]string{ - "": "ListMeta describes metadata that synthetic resources must have, including lists and various status objects. A resource may have only one of {ObjectMeta, ListMeta}.", - "selfLink": "selfLink is a URL representing this object. Populated by the system. Read-only.", - "resourceVersion": "String that identifies the server's internal version of this object that can be used by clients to determine when objects have changed. Value must be treated as opaque by clients and passed unmodified back to the server. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency", - "continue": "continue may be set if the user set a limit on the number of items returned, and indicates that the server has more data available. The value is opaque and may be used to issue another request to the endpoint that served this list to retrieve the next set of available objects. Continuing a consistent list may not be possible if the server configuration has changed or more than a few minutes have passed. The resourceVersion field returned when using this continue value will be identical to the value in the first response, unless you have received this token from an error message.", + "": "ListMeta describes metadata that synthetic resources must have, including lists and various status objects. A resource may have only one of {ObjectMeta, ListMeta}.", + "selfLink": "selfLink is a URL representing this object. Populated by the system. Read-only.\n\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release.", + "resourceVersion": "String that identifies the server's internal version of this object that can be used by clients to determine when objects have changed. Value must be treated as opaque by clients and passed unmodified back to the server. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency", + "continue": "continue may be set if the user set a limit on the number of items returned, and indicates that the server has more data available. The value is opaque and may be used to issue another request to the endpoint that served this list to retrieve the next set of available objects. Continuing a consistent list may not be possible if the server configuration has changed or more than a few minutes have passed. The resourceVersion field returned when using this continue value will be identical to the value in the first response, unless you have received this token from an error message.", + "remainingItemCount": "remainingItemCount is the number of subsequent items in the list which are not included in this list response. If the list request contained label or field selectors, then the number of remaining items is unknown and the field will be left unset and omitted during serialization. If the list is complete (either because it is not chunking or because this is the last chunk), then there are no more remaining items and this field will be left unset and omitted during serialization. Servers older than v1.15 do not set this field. The intended use of the remainingItemCount is *estimating* the size of a collection. Clients should not rely on the remainingItemCount to be set or to be exact.", } func (ListMeta) SwaggerDoc() map[string]string { @@ -200,39 +190,53 @@ func (ListMeta) SwaggerDoc() map[string]string { } var map_ListOptions = map[string]string{ - "": "ListOptions is the query options to a standard REST list call.", - "labelSelector": "A selector to restrict the list of returned objects by their labels. Defaults to everything.", - "fieldSelector": "A selector to restrict the list of returned objects by their fields. Defaults to everything.", - "includeUninitialized": "If true, partially initialized resources are included in the response.", - "watch": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.", - "resourceVersion": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.", - "timeoutSeconds": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.", - "limit": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.", - "continue": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.", + "": "ListOptions is the query options to a standard REST list call.", + "labelSelector": "A selector to restrict the list of returned objects by their labels. Defaults to everything.", + "fieldSelector": "A selector to restrict the list of returned objects by their fields. Defaults to everything.", + "watch": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.", + "allowWatchBookmarks": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.", + "resourceVersion": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.", + "timeoutSeconds": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.", + "limit": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.", + "continue": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.", } func (ListOptions) SwaggerDoc() map[string]string { return map_ListOptions } +var map_ManagedFieldsEntry = map[string]string{ + "": "ManagedFieldsEntry is a workflow-id, a FieldSet and the group version of the resource that the fieldset applies to.", + "manager": "Manager is an identifier of the workflow managing these fields.", + "operation": "Operation is the type of operation which lead to this ManagedFieldsEntry being created. The only valid values for this field are 'Apply' and 'Update'.", + "apiVersion": "APIVersion defines the version of this resource that this field set applies to. The format is \"group/version\" just like the top-level APIVersion field. It is necessary to track the version of a field set because it cannot be automatically converted.", + "time": "Time is timestamp of when these fields were set. It should always be empty if Operation is 'Apply'", + "fieldsType": "FieldsType is the discriminator for the different fields format and version. There is currently only one possible value: \"FieldsV1\"", + "fieldsV1": "FieldsV1 holds the first JSON version format as described in the \"FieldsV1\" type.", +} + +func (ManagedFieldsEntry) SwaggerDoc() map[string]string { + return map_ManagedFieldsEntry +} + var map_ObjectMeta = map[string]string{ "": "ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create.", "name": "Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names", - "generateName": "GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency", + "generateName": "GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency", "namespace": "Namespace defines the space within each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces", - "selfLink": "SelfLink is a URL representing this object. Populated by the system. Read-only.", + "selfLink": "SelfLink is a URL representing this object. Populated by the system. Read-only.\n\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release.", "uid": "UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\n\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids", - "resourceVersion": "An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\n\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency", + "resourceVersion": "An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\n\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency", "generation": "A sequence number representing a specific generation of the desired state. Populated by the system. Read-only.", - "creationTimestamp": "CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "deletionTimestamp": "DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field, once the finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. Once the deletionTimestamp is set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested.\n\nPopulated by the system when a graceful deletion is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "creationTimestamp": "CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "deletionTimestamp": "DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field, once the finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. Once the deletionTimestamp is set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested.\n\nPopulated by the system when a graceful deletion is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "deletionGracePeriodSeconds": "Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only.", "labels": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels", "annotations": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations", "ownerReferences": "List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller.", - "initializers": "An initializer is a controller which enforces some system invariant at object creation time. This field is a list of initializers that have not yet acted on this object. If nil or empty, this object has been completely initialized. Otherwise, the object is considered uninitialized and is hidden (in list/watch and get calls) from clients that haven't explicitly asked to observe uninitialized objects.\n\nWhen an object is created, the system will populate this list with the current set of initializers. Only privileged users may set or modify this list. Once it is empty, it may not be modified further by any user.", - "finalizers": "Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed.", + "finalizers": "Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list.", "clusterName": "The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request.", + "managedFields": "ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \"ci-cd\". The set of fields is always in the version that the workflow used when modifying the object.", } func (ObjectMeta) SwaggerDoc() map[string]string { @@ -240,9 +244,9 @@ func (ObjectMeta) SwaggerDoc() map[string]string { } var map_OwnerReference = map[string]string{ - "": "OwnerReference contains enough information to let you identify an owning object. Currently, an owning object must be in the same namespace, so there is no namespace field.", + "": "OwnerReference contains enough information to let you identify an owning object. An owning object must be in the same namespace as the dependent, or be cluster-scoped, so there is no namespace field.", "apiVersion": "API version of the referent.", - "kind": "Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "kind": "Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "name": "Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names", "uid": "UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids", "controller": "If true, this reference points to the managing controller.", @@ -253,6 +257,25 @@ func (OwnerReference) SwaggerDoc() map[string]string { return map_OwnerReference } +var map_PartialObjectMetadata = map[string]string{ + "": "PartialObjectMetadata is a generic representation of any object with ObjectMeta. It allows clients to get access to a particular ObjectMeta schema without knowing the details of the version.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (PartialObjectMetadata) SwaggerDoc() map[string]string { + return map_PartialObjectMetadata +} + +var map_PartialObjectMetadataList = map[string]string{ + "": "PartialObjectMetadataList contains a list of objects containing only their metadata", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "items": "items contains each of the included items.", +} + +func (PartialObjectMetadataList) SwaggerDoc() map[string]string { + return map_PartialObjectMetadataList +} + var map_Patch = map[string]string{ "": "Patch is provided to give a concrete name and type to the Kubernetes PATCH request body.", } @@ -261,9 +284,21 @@ func (Patch) SwaggerDoc() map[string]string { return map_Patch } +var map_PatchOptions = map[string]string{ + "": "PatchOptions may be provided when patching an API object. PatchOptions is meant to be a superset of UpdateOptions.", + "dryRun": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", + "force": "Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.", + "fieldManager": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).", +} + +func (PatchOptions) SwaggerDoc() map[string]string { + return map_PatchOptions +} + var map_Preconditions = map[string]string{ - "": "Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out.", - "uid": "Specifies the target UID.", + "": "Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out.", + "uid": "Specifies the target UID.", + "resourceVersion": "Specifies the target ResourceVersion", } func (Preconditions) SwaggerDoc() map[string]string { @@ -291,8 +326,8 @@ func (ServerAddressByClientCIDR) SwaggerDoc() map[string]string { var map_Status = map[string]string{ "": "Status is a return value for calls that don't return other objects.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - "status": "Status of the operation. One of: \"Success\" or \"Failure\". More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "status": "Status of the operation. One of: \"Success\" or \"Failure\". More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", "message": "A human-readable description of the status of this operation.", "reason": "A machine-readable description of why this operation is in the \"Failure\" status. If this value is empty there is no information available. A Reason clarifies an HTTP status code but does not override it.", "details": "Extended data associated with the reason. Each reason may define its own extended details. This field is optional and the data returned is not guaranteed to conform to any schema except that defined by the reason type.", @@ -318,7 +353,7 @@ var map_StatusDetails = map[string]string{ "": "StatusDetails is a set of additional properties that MAY be set by the server to provide additional information about a response. The Reason field of a Status object defines what attributes will be set. Clients must ignore fields that do not match the defined type of each attribute, and should assume that any attribute may be empty, invalid, or under defined.", "name": "The name attribute of the resource associated with the status StatusReason (when there is a single name which can be described).", "group": "The group attribute of the resource associated with the status StatusReason.", - "kind": "The kind attribute of the resource associated with the status StatusReason. On some operations may differ from the requested resource Kind. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "kind": "The kind attribute of the resource associated with the status StatusReason. On some operations may differ from the requested resource Kind. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "uid": "UID of the resource. (when there is a single resource which can be described). More info: http://kubernetes.io/docs/user-guide/identifiers#uids", "causes": "The Causes array includes more details associated with the StatusReason failure. Not all StatusReasons may provide detailed causes.", "retryAfterSeconds": "If specified, the time in seconds before the operation should be retried. Some errors may indicate the client must take an alternate action - for those errors this field may indicate how long to wait before taking the alternate action.", @@ -328,10 +363,66 @@ func (StatusDetails) SwaggerDoc() map[string]string { return map_StatusDetails } +var map_Table = map[string]string{ + "": "Table is a tabular representation of a set of API resources. The server transforms the object into a set of preferred columns for quickly reviewing the objects.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "columnDefinitions": "columnDefinitions describes each column in the returned items array. The number of cells per row will always match the number of column definitions.", + "rows": "rows is the list of items in the table.", +} + +func (Table) SwaggerDoc() map[string]string { + return map_Table +} + +var map_TableColumnDefinition = map[string]string{ + "": "TableColumnDefinition contains information about a column returned in the Table.", + "name": "name is a human readable name for the column.", + "type": "type is an OpenAPI type definition for this column, such as number, integer, string, or array. See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more.", + "format": "format is an optional OpenAPI type modifier for this column. A format modifies the type and imposes additional rules, like date or time formatting for a string. The 'name' format is applied to the primary identifier column which has type 'string' to assist in clients identifying column is the resource name. See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more.", + "description": "description is a human readable description of this column.", + "priority": "priority is an integer defining the relative importance of this column compared to others. Lower numbers are considered higher priority. Columns that may be omitted in limited space scenarios should be given a higher priority.", +} + +func (TableColumnDefinition) SwaggerDoc() map[string]string { + return map_TableColumnDefinition +} + +var map_TableOptions = map[string]string{ + "": "TableOptions are used when a Table is requested by the caller.", + "includeObject": "includeObject decides whether to include each object along with its columnar information. Specifying \"None\" will return no object, specifying \"Object\" will return the full object contents, and specifying \"Metadata\" (the default) will return the object's metadata in the PartialObjectMetadata kind in version v1beta1 of the meta.k8s.io API group.", +} + +func (TableOptions) SwaggerDoc() map[string]string { + return map_TableOptions +} + +var map_TableRow = map[string]string{ + "": "TableRow is an individual row in a table.", + "cells": "cells will be as wide as the column definitions array and may contain strings, numbers (float64 or int64), booleans, simple maps, lists, or null. See the type field of the column definition for a more detailed description.", + "conditions": "conditions describe additional status of a row that are relevant for a human user. These conditions apply to the row, not to the object, and will be specific to table output. The only defined condition type is 'Completed', for a row that indicates a resource that has run to completion and can be given less visual priority.", + "object": "This field contains the requested additional information about each object based on the includeObject policy when requesting the Table. If \"None\", this field is empty, if \"Object\" this will be the default serialization of the object for the current API version, and if \"Metadata\" (the default) will contain the object metadata. Check the returned kind and apiVersion of the object before parsing. The media type of the object will always match the enclosing list - if this as a JSON table, these will be JSON encoded objects.", +} + +func (TableRow) SwaggerDoc() map[string]string { + return map_TableRow +} + +var map_TableRowCondition = map[string]string{ + "": "TableRowCondition allows a row to be marked with additional information.", + "type": "Type of row condition. The only defined value is 'Completed' indicating that the object this row represents has reached a completed state and may be given less visual priority than other rows. Clients are not required to honor any conditions but should be consistent where possible about handling the conditions.", + "status": "Status of the condition, one of True, False, Unknown.", + "reason": "(brief) machine readable reason for the condition's last transition.", + "message": "Human readable message indicating details about last transition.", +} + +func (TableRowCondition) SwaggerDoc() map[string]string { + return map_TableRowCondition +} + var map_TypeMeta = map[string]string{ "": "TypeMeta describes an individual object in an API response or request with strings representing the type of the object and its API schema version. Structures that are versioned or persisted should inline TypeMeta.", - "kind": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - "apiVersion": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", + "kind": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "apiVersion": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", } func (TypeMeta) SwaggerDoc() map[string]string { @@ -339,8 +430,9 @@ func (TypeMeta) SwaggerDoc() map[string]string { } var map_UpdateOptions = map[string]string{ - "": "UpdateOptions may be provided when updating an API object.", - "dryRun": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", + "": "UpdateOptions may be provided when updating an API object. All fields in UpdateOptions should also be present in PatchOptions.", + "dryRun": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", + "fieldManager": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.", } func (UpdateOptions) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go index fc138e75..4244b8a6 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go @@ -27,11 +27,15 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/json" + "k8s.io/klog" ) // NestedFieldCopy returns a deep copy of the value of a nested field. // Returns false if the value is missing. // No error is returned for a nil field. +// +// Note: fields passed to this function are treated as keys within the passed +// object; no array/slice syntax is supported. func NestedFieldCopy(obj map[string]interface{}, fields ...string) (interface{}, bool, error) { val, found, err := NestedFieldNoCopy(obj, fields...) if !found || err != nil { @@ -43,10 +47,16 @@ func NestedFieldCopy(obj map[string]interface{}, fields ...string) (interface{}, // NestedFieldNoCopy returns a reference to a nested field. // Returns false if value is not found and an error if unable // to traverse obj. +// +// Note: fields passed to this function are treated as keys within the passed +// object; no array/slice syntax is supported. func NestedFieldNoCopy(obj map[string]interface{}, fields ...string) (interface{}, bool, error) { var val interface{} = obj for i, field := range fields { + if val == nil { + return nil, false, nil + } if m, ok := val.(map[string]interface{}); ok { val, ok = m[field] if !ok { @@ -272,6 +282,22 @@ func getNestedString(obj map[string]interface{}, fields ...string) string { return val } +func getNestedInt64(obj map[string]interface{}, fields ...string) int64 { + val, found, err := NestedInt64(obj, fields...) + if !found || err != nil { + return 0 + } + return val +} + +func getNestedInt64Pointer(obj map[string]interface{}, fields ...string) *int64 { + val, found, err := NestedInt64(obj, fields...) + if !found || err != nil { + return nil + } + return &val +} + func jsonPath(fields []string) string { return "." + strings.Join(fields, ".") } @@ -304,6 +330,8 @@ var UnstructuredJSONScheme runtime.Codec = unstructuredJSONScheme{} type unstructuredJSONScheme struct{} +const unstructuredJSONSchemeIdentifier runtime.Identifier = "unstructuredJSON" + func (s unstructuredJSONScheme) Decode(data []byte, _ *schema.GroupVersionKind, obj runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) { var err error if obj != nil { @@ -324,7 +352,14 @@ func (s unstructuredJSONScheme) Decode(data []byte, _ *schema.GroupVersionKind, return obj, &gvk, nil } -func (unstructuredJSONScheme) Encode(obj runtime.Object, w io.Writer) error { +func (s unstructuredJSONScheme) Encode(obj runtime.Object, w io.Writer) error { + if co, ok := obj.(runtime.CacheableObject); ok { + return co.CacheEncode(s.Identifier(), s.doEncode, w) + } + return s.doEncode(obj, w) +} + +func (unstructuredJSONScheme) doEncode(obj runtime.Object, w io.Writer) error { switch t := obj.(type) { case *Unstructured: return json.NewEncoder(w).Encode(t.Object) @@ -348,6 +383,11 @@ func (unstructuredJSONScheme) Encode(obj runtime.Object, w io.Writer) error { } } +// Identifier implements runtime.Encoder interface. +func (unstructuredJSONScheme) Identifier() runtime.Identifier { + return unstructuredJSONSchemeIdentifier +} + func (s unstructuredJSONScheme) decode(data []byte) (runtime.Object, error) { type detector struct { Items gojson.RawMessage @@ -375,12 +415,6 @@ func (s unstructuredJSONScheme) decodeInto(data []byte, obj runtime.Object) erro return s.decodeToUnstructured(data, x) case *UnstructuredList: return s.decodeToList(data, x) - case *runtime.VersionedObjects: - o, err := s.decode(data) - if err == nil { - x.Objects = []runtime.Object{o} - } - return err default: return json.Unmarshal(data, x) } @@ -435,12 +469,30 @@ func (s unstructuredJSONScheme) decodeToList(data []byte, list *UnstructuredList return nil } -type JSONFallbackEncoder struct { - runtime.Encoder +type jsonFallbackEncoder struct { + encoder runtime.Encoder + identifier runtime.Identifier } -func (c JSONFallbackEncoder) Encode(obj runtime.Object, w io.Writer) error { - err := c.Encoder.Encode(obj, w) +func NewJSONFallbackEncoder(encoder runtime.Encoder) runtime.Encoder { + result := map[string]string{ + "name": "fallback", + "base": string(encoder.Identifier()), + } + identifier, err := gojson.Marshal(result) + if err != nil { + klog.Fatalf("Failed marshaling identifier for jsonFallbackEncoder: %v", err) + } + return &jsonFallbackEncoder{ + encoder: encoder, + identifier: runtime.Identifier(identifier), + } +} + +func (c *jsonFallbackEncoder) Encode(obj runtime.Object, w io.Writer) error { + // There is no need to handle runtime.CacheableObject, as we only + // fallback to other encoders here. + err := c.encoder.Encode(obj, w) if runtime.IsNotRegisteredError(err) { switch obj.(type) { case *Unstructured, *UnstructuredList: @@ -449,3 +501,8 @@ func (c JSONFallbackEncoder) Encode(obj runtime.Object, w io.Writer) error { } return err } + +// Identifier implements runtime.Encoder interface. +func (c *jsonFallbackEncoder) Identifier() runtime.Identifier { + return c.identifier +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go index 781469ec..d1903394 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go @@ -47,6 +47,7 @@ type Unstructured struct { var _ metav1.Object = &Unstructured{} var _ runtime.Unstructured = &Unstructured{} +var _ metav1.ListInterface = &Unstructured{} func (obj *Unstructured) GetObjectKind() schema.ObjectKind { return obj } @@ -126,6 +127,16 @@ func (u *Unstructured) UnmarshalJSON(b []byte) error { return err } +// NewEmptyInstance returns a new instance of the concrete type containing only kind/apiVersion and no other data. +// This should be called instead of reflect.New() for unstructured types because the go type alone does not preserve kind/apiVersion info. +func (in *Unstructured) NewEmptyInstance() runtime.Unstructured { + out := new(Unstructured) + if in != nil { + out.GetObjectKind().SetGroupVersionKind(in.GetObjectKind().GroupVersionKind()) + } + return out +} + func (in *Unstructured) DeepCopy() *Unstructured { if in == nil { return nil @@ -143,13 +154,20 @@ func (u *Unstructured) setNestedField(value interface{}, fields ...string) { SetNestedField(u.Object, value, fields...) } -func (u *Unstructured) setNestedSlice(value []string, fields ...string) { +func (u *Unstructured) setNestedStringSlice(value []string, fields ...string) { if u.Object == nil { u.Object = make(map[string]interface{}) } SetNestedStringSlice(u.Object, value, fields...) } +func (u *Unstructured) setNestedSlice(value []interface{}, fields ...string) { + if u.Object == nil { + u.Object = make(map[string]interface{}) + } + SetNestedSlice(u.Object, value, fields...) +} + func (u *Unstructured) setNestedMap(value map[string]string, fields ...string) { if u.Object == nil { u.Object = make(map[string]interface{}) @@ -312,6 +330,18 @@ func (u *Unstructured) SetContinue(c string) { u.setNestedField(c, "metadata", "continue") } +func (u *Unstructured) GetRemainingItemCount() *int64 { + return getNestedInt64Pointer(u.Object, "metadata", "remainingItemCount") +} + +func (u *Unstructured) SetRemainingItemCount(c *int64) { + if c == nil { + RemoveNestedField(u.Object, "metadata", "remainingItemCount") + } else { + u.setNestedField(*c, "metadata", "remainingItemCount") + } +} + func (u *Unstructured) GetCreationTimestamp() metav1.Time { var timestamp metav1.Time timestamp.UnmarshalQueryParameter(getNestedString(u.Object, "metadata", "creationTimestamp")) @@ -401,31 +431,6 @@ func (u *Unstructured) GroupVersionKind() schema.GroupVersionKind { return gvk } -func (u *Unstructured) GetInitializers() *metav1.Initializers { - m, found, err := nestedMapNoCopy(u.Object, "metadata", "initializers") - if !found || err != nil { - return nil - } - out := &metav1.Initializers{} - if err := runtime.DefaultUnstructuredConverter.FromUnstructured(m, out); err != nil { - utilruntime.HandleError(fmt.Errorf("unable to retrieve initializers for object: %v", err)) - return nil - } - return out -} - -func (u *Unstructured) SetInitializers(initializers *metav1.Initializers) { - if initializers == nil { - RemoveNestedField(u.Object, "metadata", "initializers") - return - } - out, err := runtime.DefaultUnstructuredConverter.ToUnstructured(initializers) - if err != nil { - utilruntime.HandleError(fmt.Errorf("unable to retrieve initializers for object: %v", err)) - } - u.setNestedField(out, "metadata", "initializers") -} - func (u *Unstructured) GetFinalizers() []string { val, _, _ := NestedStringSlice(u.Object, "metadata", "finalizers") return val @@ -436,7 +441,7 @@ func (u *Unstructured) SetFinalizers(finalizers []string) { RemoveNestedField(u.Object, "metadata", "finalizers") return } - u.setNestedSlice(finalizers, "metadata", "finalizers") + u.setNestedStringSlice(finalizers, "metadata", "finalizers") } func (u *Unstructured) GetClusterName() string { @@ -450,3 +455,42 @@ func (u *Unstructured) SetClusterName(clusterName string) { } u.setNestedField(clusterName, "metadata", "clusterName") } + +func (u *Unstructured) GetManagedFields() []metav1.ManagedFieldsEntry { + items, found, err := NestedSlice(u.Object, "metadata", "managedFields") + if !found || err != nil { + return nil + } + managedFields := []metav1.ManagedFieldsEntry{} + for _, item := range items { + m, ok := item.(map[string]interface{}) + if !ok { + utilruntime.HandleError(fmt.Errorf("unable to retrieve managedFields for object, item %v is not a map", item)) + return nil + } + out := metav1.ManagedFieldsEntry{} + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(m, &out); err != nil { + utilruntime.HandleError(fmt.Errorf("unable to retrieve managedFields for object: %v", err)) + return nil + } + managedFields = append(managedFields, out) + } + return managedFields +} + +func (u *Unstructured) SetManagedFields(managedFields []metav1.ManagedFieldsEntry) { + if managedFields == nil { + RemoveNestedField(u.Object, "metadata", "managedFields") + return + } + items := []interface{}{} + for _, managedFieldsEntry := range managedFields { + out, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&managedFieldsEntry) + if err != nil { + utilruntime.HandleError(fmt.Errorf("unable to set managedFields for object: %v", err)) + return + } + items = append(items, out) + } + u.setNestedSlice(items, "metadata", "managedFields") +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured_list.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured_list.go index bf3fd023..5028f5fb 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured_list.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured_list.go @@ -52,6 +52,16 @@ func (u *UnstructuredList) EachListItem(fn func(runtime.Object) error) error { return nil } +// NewEmptyInstance returns a new instance of the concrete type containing only kind/apiVersion and no other data. +// This should be called instead of reflect.New() for unstructured types because the go type alone does not preserve kind/apiVersion info. +func (u *UnstructuredList) NewEmptyInstance() runtime.Unstructured { + out := new(UnstructuredList) + if u != nil { + out.SetGroupVersionKind(u.GroupVersionKind()) + } + return out +} + // UnstructuredContent returns a map contain an overlay of the Items field onto // the Object field. Items always overwrites overlay. func (u *UnstructuredList) UnstructuredContent() map[string]interface{} { @@ -166,6 +176,18 @@ func (u *UnstructuredList) SetContinue(c string) { u.setNestedField(c, "metadata", "continue") } +func (u *UnstructuredList) GetRemainingItemCount() *int64 { + return getNestedInt64Pointer(u.Object, "metadata", "remainingItemCount") +} + +func (u *UnstructuredList) SetRemainingItemCount(c *int64) { + if c == nil { + RemoveNestedField(u.Object, "metadata", "remainingItemCount") + } else { + u.setNestedField(*c, "metadata", "remainingItemCount") + } +} + func (u *UnstructuredList) SetGroupVersionKind(gvk schema.GroupVersionKind) { u.SetAPIVersion(gvk.GroupVersion().String()) u.SetKind(gvk.Kind) diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.conversion.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.conversion.go new file mode 100644 index 00000000..2ade69dd --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.conversion.go @@ -0,0 +1,523 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by conversion-gen. DO NOT EDIT. + +package v1 + +import ( + url "net/url" + unsafe "unsafe" + + resource "k8s.io/apimachinery/pkg/api/resource" + conversion "k8s.io/apimachinery/pkg/conversion" + fields "k8s.io/apimachinery/pkg/fields" + labels "k8s.io/apimachinery/pkg/labels" + runtime "k8s.io/apimachinery/pkg/runtime" + intstr "k8s.io/apimachinery/pkg/util/intstr" + watch "k8s.io/apimachinery/pkg/watch" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(s *runtime.Scheme) error { + if err := s.AddGeneratedConversionFunc((*url.Values)(nil), (*CreateOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_url_Values_To_v1_CreateOptions(a.(*url.Values), b.(*CreateOptions), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*url.Values)(nil), (*DeleteOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_url_Values_To_v1_DeleteOptions(a.(*url.Values), b.(*DeleteOptions), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*url.Values)(nil), (*ExportOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_url_Values_To_v1_ExportOptions(a.(*url.Values), b.(*ExportOptions), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*url.Values)(nil), (*GetOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_url_Values_To_v1_GetOptions(a.(*url.Values), b.(*GetOptions), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*url.Values)(nil), (*ListOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_url_Values_To_v1_ListOptions(a.(*url.Values), b.(*ListOptions), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*url.Values)(nil), (*PatchOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_url_Values_To_v1_PatchOptions(a.(*url.Values), b.(*PatchOptions), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*url.Values)(nil), (*TableOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_url_Values_To_v1_TableOptions(a.(*url.Values), b.(*TableOptions), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*url.Values)(nil), (*UpdateOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_url_Values_To_v1_UpdateOptions(a.(*url.Values), b.(*UpdateOptions), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*map[string]string)(nil), (*LabelSelector)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_Map_string_To_string_To_v1_LabelSelector(a.(*map[string]string), b.(*LabelSelector), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((**bool)(nil), (*bool)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_Pointer_bool_To_bool(a.(**bool), b.(*bool), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((**float64)(nil), (*float64)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_Pointer_float64_To_float64(a.(**float64), b.(*float64), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((**int32)(nil), (*int32)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_Pointer_int32_To_int32(a.(**int32), b.(*int32), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((**int64)(nil), (*int)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_Pointer_int64_To_int(a.(**int64), b.(*int), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((**int64)(nil), (*int64)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_Pointer_int64_To_int64(a.(**int64), b.(*int64), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((**intstr.IntOrString)(nil), (*intstr.IntOrString)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_Pointer_intstr_IntOrString_To_intstr_IntOrString(a.(**intstr.IntOrString), b.(*intstr.IntOrString), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((**string)(nil), (*string)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_Pointer_string_To_string(a.(**string), b.(*string), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((**Duration)(nil), (*Duration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_Pointer_v1_Duration_To_v1_Duration(a.(**Duration), b.(*Duration), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*[]string)(nil), (**DeletionPropagation)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_Slice_string_To_Pointer_v1_DeletionPropagation(a.(*[]string), b.(**DeletionPropagation), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*[]string)(nil), (**Time)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_Slice_string_To_Pointer_v1_Time(a.(*[]string), b.(**Time), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*[]string)(nil), (*[]int32)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_Slice_string_To_Slice_int32(a.(*[]string), b.(*[]int32), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*[]string)(nil), (*IncludeObjectPolicy)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_Slice_string_To_v1_IncludeObjectPolicy(a.(*[]string), b.(*IncludeObjectPolicy), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*[]string)(nil), (*Time)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_Slice_string_To_v1_Time(a.(*[]string), b.(*Time), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*bool)(nil), (**bool)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_bool_To_Pointer_bool(a.(*bool), b.(**bool), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*fields.Selector)(nil), (*string)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_fields_Selector_To_string(a.(*fields.Selector), b.(*string), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*float64)(nil), (**float64)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_float64_To_Pointer_float64(a.(*float64), b.(**float64), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*int32)(nil), (**int32)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_int32_To_Pointer_int32(a.(*int32), b.(**int32), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*int64)(nil), (**int64)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_int64_To_Pointer_int64(a.(*int64), b.(**int64), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*int)(nil), (**int64)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_int_To_Pointer_int64(a.(*int), b.(**int64), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*intstr.IntOrString)(nil), (**intstr.IntOrString)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_intstr_IntOrString_To_Pointer_intstr_IntOrString(a.(*intstr.IntOrString), b.(**intstr.IntOrString), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*intstr.IntOrString)(nil), (*intstr.IntOrString)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_intstr_IntOrString_To_intstr_IntOrString(a.(*intstr.IntOrString), b.(*intstr.IntOrString), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*labels.Selector)(nil), (*string)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_labels_Selector_To_string(a.(*labels.Selector), b.(*string), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*resource.Quantity)(nil), (*resource.Quantity)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_resource_Quantity_To_resource_Quantity(a.(*resource.Quantity), b.(*resource.Quantity), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*string)(nil), (**string)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_string_To_Pointer_string(a.(*string), b.(**string), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*string)(nil), (*fields.Selector)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_string_To_fields_Selector(a.(*string), b.(*fields.Selector), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*string)(nil), (*labels.Selector)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_string_To_labels_Selector(a.(*string), b.(*labels.Selector), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*url.Values)(nil), (*DeleteOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_url_Values_To_v1_DeleteOptions(a.(*url.Values), b.(*DeleteOptions), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*DeleteOptions)(nil), (*DeleteOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_DeleteOptions_To_v1_DeleteOptions(a.(*DeleteOptions), b.(*DeleteOptions), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*Duration)(nil), (**Duration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_Duration_To_Pointer_v1_Duration(a.(*Duration), b.(**Duration), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*InternalEvent)(nil), (*WatchEvent)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_InternalEvent_To_v1_WatchEvent(a.(*InternalEvent), b.(*WatchEvent), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*LabelSelector)(nil), (*map[string]string)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_LabelSelector_To_Map_string_To_string(a.(*LabelSelector), b.(*map[string]string), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*ListMeta)(nil), (*ListMeta)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ListMeta_To_v1_ListMeta(a.(*ListMeta), b.(*ListMeta), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*MicroTime)(nil), (*MicroTime)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_MicroTime_To_v1_MicroTime(a.(*MicroTime), b.(*MicroTime), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*Time)(nil), (*Time)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_Time_To_v1_Time(a.(*Time), b.(*Time), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*TypeMeta)(nil), (*TypeMeta)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_TypeMeta_To_v1_TypeMeta(a.(*TypeMeta), b.(*TypeMeta), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*WatchEvent)(nil), (*InternalEvent)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_WatchEvent_To_v1_InternalEvent(a.(*WatchEvent), b.(*InternalEvent), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*WatchEvent)(nil), (*watch.Event)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_WatchEvent_To_watch_Event(a.(*WatchEvent), b.(*watch.Event), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*watch.Event)(nil), (*WatchEvent)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_watch_Event_To_v1_WatchEvent(a.(*watch.Event), b.(*WatchEvent), scope) + }); err != nil { + return err + } + return nil +} + +func autoConvert_url_Values_To_v1_CreateOptions(in *url.Values, out *CreateOptions, s conversion.Scope) error { + // WARNING: Field TypeMeta does not have json tag, skipping. + + if values, ok := map[string][]string(*in)["dryRun"]; ok && len(values) > 0 { + out.DryRun = *(*[]string)(unsafe.Pointer(&values)) + } else { + out.DryRun = nil + } + if values, ok := map[string][]string(*in)["fieldManager"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_string(&values, &out.FieldManager, s); err != nil { + return err + } + } else { + out.FieldManager = "" + } + return nil +} + +// Convert_url_Values_To_v1_CreateOptions is an autogenerated conversion function. +func Convert_url_Values_To_v1_CreateOptions(in *url.Values, out *CreateOptions, s conversion.Scope) error { + return autoConvert_url_Values_To_v1_CreateOptions(in, out, s) +} + +func autoConvert_url_Values_To_v1_DeleteOptions(in *url.Values, out *DeleteOptions, s conversion.Scope) error { + // WARNING: Field TypeMeta does not have json tag, skipping. + + if values, ok := map[string][]string(*in)["gracePeriodSeconds"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_Pointer_int64(&values, &out.GracePeriodSeconds, s); err != nil { + return err + } + } else { + out.GracePeriodSeconds = nil + } + // INFO: in.Preconditions opted out of conversion generation + if values, ok := map[string][]string(*in)["orphanDependents"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_Pointer_bool(&values, &out.OrphanDependents, s); err != nil { + return err + } + } else { + out.OrphanDependents = nil + } + if values, ok := map[string][]string(*in)["propagationPolicy"]; ok && len(values) > 0 { + if err := Convert_Slice_string_To_Pointer_v1_DeletionPropagation(&values, &out.PropagationPolicy, s); err != nil { + return err + } + } else { + out.PropagationPolicy = nil + } + if values, ok := map[string][]string(*in)["dryRun"]; ok && len(values) > 0 { + out.DryRun = *(*[]string)(unsafe.Pointer(&values)) + } else { + out.DryRun = nil + } + return nil +} + +func autoConvert_url_Values_To_v1_ExportOptions(in *url.Values, out *ExportOptions, s conversion.Scope) error { + // WARNING: Field TypeMeta does not have json tag, skipping. + + if values, ok := map[string][]string(*in)["export"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_bool(&values, &out.Export, s); err != nil { + return err + } + } else { + out.Export = false + } + if values, ok := map[string][]string(*in)["exact"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_bool(&values, &out.Exact, s); err != nil { + return err + } + } else { + out.Exact = false + } + return nil +} + +// Convert_url_Values_To_v1_ExportOptions is an autogenerated conversion function. +func Convert_url_Values_To_v1_ExportOptions(in *url.Values, out *ExportOptions, s conversion.Scope) error { + return autoConvert_url_Values_To_v1_ExportOptions(in, out, s) +} + +func autoConvert_url_Values_To_v1_GetOptions(in *url.Values, out *GetOptions, s conversion.Scope) error { + // WARNING: Field TypeMeta does not have json tag, skipping. + + if values, ok := map[string][]string(*in)["resourceVersion"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_string(&values, &out.ResourceVersion, s); err != nil { + return err + } + } else { + out.ResourceVersion = "" + } + return nil +} + +// Convert_url_Values_To_v1_GetOptions is an autogenerated conversion function. +func Convert_url_Values_To_v1_GetOptions(in *url.Values, out *GetOptions, s conversion.Scope) error { + return autoConvert_url_Values_To_v1_GetOptions(in, out, s) +} + +func autoConvert_url_Values_To_v1_ListOptions(in *url.Values, out *ListOptions, s conversion.Scope) error { + // WARNING: Field TypeMeta does not have json tag, skipping. + + if values, ok := map[string][]string(*in)["labelSelector"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_string(&values, &out.LabelSelector, s); err != nil { + return err + } + } else { + out.LabelSelector = "" + } + if values, ok := map[string][]string(*in)["fieldSelector"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_string(&values, &out.FieldSelector, s); err != nil { + return err + } + } else { + out.FieldSelector = "" + } + if values, ok := map[string][]string(*in)["watch"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_bool(&values, &out.Watch, s); err != nil { + return err + } + } else { + out.Watch = false + } + if values, ok := map[string][]string(*in)["allowWatchBookmarks"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_bool(&values, &out.AllowWatchBookmarks, s); err != nil { + return err + } + } else { + out.AllowWatchBookmarks = false + } + if values, ok := map[string][]string(*in)["resourceVersion"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_string(&values, &out.ResourceVersion, s); err != nil { + return err + } + } else { + out.ResourceVersion = "" + } + if values, ok := map[string][]string(*in)["timeoutSeconds"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_Pointer_int64(&values, &out.TimeoutSeconds, s); err != nil { + return err + } + } else { + out.TimeoutSeconds = nil + } + if values, ok := map[string][]string(*in)["limit"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_int64(&values, &out.Limit, s); err != nil { + return err + } + } else { + out.Limit = 0 + } + if values, ok := map[string][]string(*in)["continue"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_string(&values, &out.Continue, s); err != nil { + return err + } + } else { + out.Continue = "" + } + return nil +} + +// Convert_url_Values_To_v1_ListOptions is an autogenerated conversion function. +func Convert_url_Values_To_v1_ListOptions(in *url.Values, out *ListOptions, s conversion.Scope) error { + return autoConvert_url_Values_To_v1_ListOptions(in, out, s) +} + +func autoConvert_url_Values_To_v1_PatchOptions(in *url.Values, out *PatchOptions, s conversion.Scope) error { + // WARNING: Field TypeMeta does not have json tag, skipping. + + if values, ok := map[string][]string(*in)["dryRun"]; ok && len(values) > 0 { + out.DryRun = *(*[]string)(unsafe.Pointer(&values)) + } else { + out.DryRun = nil + } + if values, ok := map[string][]string(*in)["force"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_Pointer_bool(&values, &out.Force, s); err != nil { + return err + } + } else { + out.Force = nil + } + if values, ok := map[string][]string(*in)["fieldManager"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_string(&values, &out.FieldManager, s); err != nil { + return err + } + } else { + out.FieldManager = "" + } + return nil +} + +// Convert_url_Values_To_v1_PatchOptions is an autogenerated conversion function. +func Convert_url_Values_To_v1_PatchOptions(in *url.Values, out *PatchOptions, s conversion.Scope) error { + return autoConvert_url_Values_To_v1_PatchOptions(in, out, s) +} + +func autoConvert_url_Values_To_v1_TableOptions(in *url.Values, out *TableOptions, s conversion.Scope) error { + // WARNING: Field TypeMeta does not have json tag, skipping. + + if values, ok := map[string][]string(*in)["-"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_bool(&values, &out.NoHeaders, s); err != nil { + return err + } + } else { + out.NoHeaders = false + } + if values, ok := map[string][]string(*in)["includeObject"]; ok && len(values) > 0 { + if err := Convert_Slice_string_To_v1_IncludeObjectPolicy(&values, &out.IncludeObject, s); err != nil { + return err + } + } else { + out.IncludeObject = "" + } + return nil +} + +// Convert_url_Values_To_v1_TableOptions is an autogenerated conversion function. +func Convert_url_Values_To_v1_TableOptions(in *url.Values, out *TableOptions, s conversion.Scope) error { + return autoConvert_url_Values_To_v1_TableOptions(in, out, s) +} + +func autoConvert_url_Values_To_v1_UpdateOptions(in *url.Values, out *UpdateOptions, s conversion.Scope) error { + // WARNING: Field TypeMeta does not have json tag, skipping. + + if values, ok := map[string][]string(*in)["dryRun"]; ok && len(values) > 0 { + out.DryRun = *(*[]string)(unsafe.Pointer(&values)) + } else { + out.DryRun = nil + } + if values, ok := map[string][]string(*in)["fieldManager"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_string(&values, &out.FieldManager, s); err != nil { + return err + } + } else { + out.FieldManager = "" + } + return nil +} + +// Convert_url_Values_To_v1_UpdateOptions is an autogenerated conversion function. +func Convert_url_Values_To_v1_UpdateOptions(in *url.Values, out *UpdateOptions, s conversion.Scope) error { + return autoConvert_url_Values_To_v1_UpdateOptions(in, out, s) +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go index 10845993..b82fdf20 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go @@ -312,6 +312,27 @@ func (in *ExportOptions) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FieldsV1) DeepCopyInto(out *FieldsV1) { + *out = *in + if in.Raw != nil { + in, out := &in.Raw, &out.Raw + *out = make([]byte, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FieldsV1. +func (in *FieldsV1) DeepCopy() *FieldsV1 { + if in == nil { + return nil + } + out := new(FieldsV1) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *GetOptions) DeepCopyInto(out *GetOptions) { *out = *in @@ -433,48 +454,6 @@ func (in *GroupVersionResource) DeepCopy() *GroupVersionResource { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Initializer) DeepCopyInto(out *Initializer) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Initializer. -func (in *Initializer) DeepCopy() *Initializer { - if in == nil { - return nil - } - out := new(Initializer) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Initializers) DeepCopyInto(out *Initializers) { - *out = *in - if in.Pending != nil { - in, out := &in.Pending, &out.Pending - *out = make([]Initializer, len(*in)) - copy(*out, *in) - } - if in.Result != nil { - in, out := &in.Result, &out.Result - *out = new(Status) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Initializers. -func (in *Initializers) DeepCopy() *Initializers { - if in == nil { - return nil - } - out := new(Initializers) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *InternalEvent) DeepCopyInto(out *InternalEvent) { *out = *in @@ -549,7 +528,7 @@ func (in *LabelSelectorRequirement) DeepCopy() *LabelSelectorRequirement { func (in *List) DeepCopyInto(out *List) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]runtime.RawExtension, len(*in)) @@ -581,6 +560,11 @@ func (in *List) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ListMeta) DeepCopyInto(out *ListMeta) { *out = *in + if in.RemainingItemCount != nil { + in, out := &in.RemainingItemCount, &out.RemainingItemCount + *out = new(int64) + **out = **in + } return } @@ -624,6 +608,31 @@ func (in *ListOptions) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedFieldsEntry) DeepCopyInto(out *ManagedFieldsEntry) { + *out = *in + if in.Time != nil { + in, out := &in.Time, &out.Time + *out = (*in).DeepCopy() + } + if in.FieldsV1 != nil { + in, out := &in.FieldsV1, &out.FieldsV1 + *out = new(FieldsV1) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedFieldsEntry. +func (in *ManagedFieldsEntry) DeepCopy() *ManagedFieldsEntry { + if in == nil { + return nil + } + out := new(ManagedFieldsEntry) + in.DeepCopyInto(out) + return out +} + // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MicroTime. func (in *MicroTime) DeepCopy() *MicroTime { if in == nil { @@ -668,16 +677,18 @@ func (in *ObjectMeta) DeepCopyInto(out *ObjectMeta) { (*in)[i].DeepCopyInto(&(*out)[i]) } } - if in.Initializers != nil { - in, out := &in.Initializers, &out.Initializers - *out = new(Initializers) - (*in).DeepCopyInto(*out) - } if in.Finalizers != nil { in, out := &in.Finalizers, &out.Finalizers *out = make([]string, len(*in)) copy(*out, *in) } + if in.ManagedFields != nil { + in, out := &in.ManagedFields, &out.ManagedFields + *out = make([]ManagedFieldsEntry, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } @@ -717,6 +728,65 @@ func (in *OwnerReference) DeepCopy() *OwnerReference { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PartialObjectMetadata) DeepCopyInto(out *PartialObjectMetadata) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PartialObjectMetadata. +func (in *PartialObjectMetadata) DeepCopy() *PartialObjectMetadata { + if in == nil { + return nil + } + out := new(PartialObjectMetadata) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PartialObjectMetadata) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PartialObjectMetadataList) DeepCopyInto(out *PartialObjectMetadataList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PartialObjectMetadata, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PartialObjectMetadataList. +func (in *PartialObjectMetadataList) DeepCopy() *PartialObjectMetadataList { + if in == nil { + return nil + } + out := new(PartialObjectMetadataList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PartialObjectMetadataList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Patch) DeepCopyInto(out *Patch) { *out = *in @@ -733,6 +803,41 @@ func (in *Patch) DeepCopy() *Patch { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PatchOptions) DeepCopyInto(out *PatchOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.DryRun != nil { + in, out := &in.DryRun, &out.DryRun + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Force != nil { + in, out := &in.Force, &out.Force + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PatchOptions. +func (in *PatchOptions) DeepCopy() *PatchOptions { + if in == nil { + return nil + } + out := new(PatchOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PatchOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Preconditions) DeepCopyInto(out *Preconditions) { *out = *in @@ -741,6 +846,11 @@ func (in *Preconditions) DeepCopyInto(out *Preconditions) { *out = new(types.UID) **out = **in } + if in.ResourceVersion != nil { + in, out := &in.ResourceVersion, &out.ResourceVersion + *out = new(string) + **out = **in + } return } @@ -795,7 +905,7 @@ func (in *ServerAddressByClientCIDR) DeepCopy() *ServerAddressByClientCIDR { func (in *Status) DeepCopyInto(out *Status) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Details != nil { in, out := &in.Details, &out.Details *out = new(StatusDetails) @@ -859,6 +969,108 @@ func (in *StatusDetails) DeepCopy() *StatusDetails { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Table) DeepCopyInto(out *Table) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.ColumnDefinitions != nil { + in, out := &in.ColumnDefinitions, &out.ColumnDefinitions + *out = make([]TableColumnDefinition, len(*in)) + copy(*out, *in) + } + if in.Rows != nil { + in, out := &in.Rows, &out.Rows + *out = make([]TableRow, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Table. +func (in *Table) DeepCopy() *Table { + if in == nil { + return nil + } + out := new(Table) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Table) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TableColumnDefinition) DeepCopyInto(out *TableColumnDefinition) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableColumnDefinition. +func (in *TableColumnDefinition) DeepCopy() *TableColumnDefinition { + if in == nil { + return nil + } + out := new(TableColumnDefinition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TableOptions) DeepCopyInto(out *TableOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableOptions. +func (in *TableOptions) DeepCopy() *TableOptions { + if in == nil { + return nil + } + out := new(TableOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TableOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TableRow) DeepCopyInto(out *TableRow) { + clone := in.DeepCopy() + *out = *clone + return +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TableRowCondition) DeepCopyInto(out *TableRowCondition) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableRowCondition. +func (in *TableRowCondition) DeepCopy() *TableRowCondition { + if in == nil { + return nil + } + out := new(TableRowCondition) + in.DeepCopyInto(out) + return out +} + // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Time. func (in *Time) DeepCopy() *Time { if in == nil { diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/deepcopy.go index 3b2bedd9..2b7e8ca0 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/deepcopy.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/deepcopy.go @@ -15,30 +15,3 @@ limitations under the License. */ package v1beta1 - -import "k8s.io/apimachinery/pkg/runtime" - -func (in *TableRow) DeepCopy() *TableRow { - if in == nil { - return nil - } - - out := new(TableRow) - - if in.Cells != nil { - out.Cells = make([]interface{}, len(in.Cells)) - for i := range in.Cells { - out.Cells[i] = runtime.DeepCopyJSONValue(in.Cells[i]) - } - } - - if in.Conditions != nil { - out.Conditions = make([]TableRowCondition, len(in.Conditions)) - for i := range in.Conditions { - in.Conditions[i].DeepCopyInto(&out.Conditions[i]) - } - } - - in.Object.DeepCopyInto(&out.Object) - return out -} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/doc.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/doc.go index dc461cc2..20c9d2ec 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/doc.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/doc.go @@ -19,4 +19,5 @@ limitations under the License. // +k8s:defaulter-gen=TypeMeta // +groupName=meta.k8s.io -package v1beta1 + +package v1beta1 // import "k8s.io/apimachinery/pkg/apis/meta/v1beta1" diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go index fe3df691..5fae30ae 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go @@ -14,31 +14,24 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto -// DO NOT EDIT! -/* - Package v1beta1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto - - It has these top-level messages: - PartialObjectMetadata - PartialObjectMetadataList - TableOptions -*/ package v1beta1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import strings "strings" -import reflect "reflect" + io "io" -import io "io" + proto "github.com/gogo/protobuf/proto" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -51,55 +44,71 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *PartialObjectMetadata) Reset() { *m = PartialObjectMetadata{} } -func (*PartialObjectMetadata) ProtoMessage() {} -func (*PartialObjectMetadata) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } - func (m *PartialObjectMetadataList) Reset() { *m = PartialObjectMetadataList{} } func (*PartialObjectMetadataList) ProtoMessage() {} func (*PartialObjectMetadataList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{1} + return fileDescriptor_90ec10f86b91f9a8, []int{0} +} +func (m *PartialObjectMetadataList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PartialObjectMetadataList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PartialObjectMetadataList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PartialObjectMetadataList.Merge(m, src) +} +func (m *PartialObjectMetadataList) XXX_Size() int { + return m.Size() +} +func (m *PartialObjectMetadataList) XXX_DiscardUnknown() { + xxx_messageInfo_PartialObjectMetadataList.DiscardUnknown(m) } -func (m *TableOptions) Reset() { *m = TableOptions{} } -func (*TableOptions) ProtoMessage() {} -func (*TableOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +var xxx_messageInfo_PartialObjectMetadataList proto.InternalMessageInfo func init() { - proto.RegisterType((*PartialObjectMetadata)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1beta1.PartialObjectMetadata") proto.RegisterType((*PartialObjectMetadataList)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1beta1.PartialObjectMetadataList") - proto.RegisterType((*TableOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1beta1.TableOptions") } -func (m *PartialObjectMetadata) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto", fileDescriptor_90ec10f86b91f9a8) } -func (m *PartialObjectMetadata) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - return i, nil +var fileDescriptor_90ec10f86b91f9a8 = []byte{ + // 321 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x91, 0x41, 0x4b, 0xf3, 0x30, + 0x18, 0xc7, 0x9b, 0xf7, 0x65, 0x38, 0x3a, 0x04, 0xd9, 0x69, 0xee, 0x90, 0x0d, 0x4f, 0xf3, 0xb0, + 0x84, 0x0d, 0x11, 0xc1, 0xdb, 0x6e, 0x82, 0xa2, 0xec, 0x28, 0x1e, 0x4c, 0xbb, 0xc7, 0x2e, 0xd6, + 0x34, 0x25, 0x79, 0x3a, 0xf0, 0xe6, 0x47, 0xf0, 0x63, 0xed, 0xb8, 0xe3, 0x40, 0x18, 0xae, 0x7e, + 0x11, 0x49, 0x57, 0x45, 0xa6, 0x62, 0x6f, 0x79, 0xfe, 0xe1, 0xf7, 0xcb, 0x3f, 0x89, 0x3f, 0x8e, + 0x4f, 0x2c, 0x93, 0x9a, 0xc7, 0x59, 0x00, 0x26, 0x01, 0x04, 0xcb, 0x67, 0x90, 0x4c, 0xb4, 0xe1, + 0xe5, 0x86, 0x48, 0xa5, 0x12, 0xe1, 0x54, 0x26, 0x60, 0x1e, 0x79, 0x1a, 0x47, 0x2e, 0xb0, 0x5c, + 0x01, 0x0a, 0x3e, 0x1b, 0x04, 0x80, 0x62, 0xc0, 0x23, 0x48, 0xc0, 0x08, 0x84, 0x09, 0x4b, 0x8d, + 0x46, 0xdd, 0x3c, 0xdc, 0xa0, 0xec, 0x2b, 0xca, 0xd2, 0x38, 0x72, 0x81, 0x65, 0x0e, 0x65, 0x25, + 0xda, 0xee, 0x47, 0x12, 0xa7, 0x59, 0xc0, 0x42, 0xad, 0x78, 0xa4, 0x23, 0xcd, 0x0b, 0x43, 0x90, + 0xdd, 0x15, 0x53, 0x31, 0x14, 0xab, 0x8d, 0xb9, 0x7d, 0x54, 0xa5, 0xd4, 0x76, 0x9f, 0xf6, 0xaf, + 0x57, 0x31, 0x59, 0x82, 0x52, 0xc1, 0x37, 0xe0, 0xf8, 0x2f, 0xc0, 0x86, 0x53, 0x50, 0x62, 0x9b, + 0x3b, 0x78, 0x21, 0xfe, 0xfe, 0x95, 0x30, 0x28, 0xc5, 0xc3, 0x65, 0x70, 0x0f, 0x21, 0x5e, 0x00, + 0x8a, 0x89, 0x40, 0x71, 0x2e, 0x2d, 0x36, 0x6f, 0xfc, 0xba, 0x2a, 0xe7, 0xd6, 0xbf, 0x2e, 0xe9, + 0x35, 0x86, 0x8c, 0x55, 0x79, 0x29, 0xe6, 0x68, 0x67, 0x1a, 0xed, 0xcd, 0x57, 0x1d, 0x2f, 0x5f, + 0x75, 0xea, 0x1f, 0xc9, 0xf8, 0xd3, 0xd8, 0xbc, 0xf5, 0x6b, 0x12, 0x41, 0xd9, 0x16, 0xe9, 0xfe, + 0xef, 0x35, 0x86, 0xa7, 0xd5, 0xd4, 0x3f, 0xb6, 0x1d, 0xed, 0x96, 0xe7, 0xd4, 0xce, 0x9c, 0x71, + 0xbc, 0x11, 0x8f, 0xfa, 0xf3, 0x35, 0xf5, 0x16, 0x6b, 0xea, 0x2d, 0xd7, 0xd4, 0x7b, 0xca, 0x29, + 0x99, 0xe7, 0x94, 0x2c, 0x72, 0x4a, 0x96, 0x39, 0x25, 0xaf, 0x39, 0x25, 0xcf, 0x6f, 0xd4, 0xbb, + 0xde, 0x29, 0xbf, 0xf6, 0x3d, 0x00, 0x00, 0xff, 0xff, 0xc6, 0x7e, 0x00, 0x08, 0x5a, 0x02, 0x00, + 0x00, } func (m *PartialObjectMetadataList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -107,83 +116,57 @@ func (m *PartialObjectMetadataList) Marshal() (dAtA []byte, err error) { } func (m *PartialObjectMetadataList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PartialObjectMetadataList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0xa } } - return i, nil -} - -func (m *TableOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TableOptions) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.IncludeObject))) - i += copy(dAtA[i:], m.IncludeObject) - return i, nil + return len(dAtA) - i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *PartialObjectMetadata) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n + return base } - func (m *PartialObjectMetadataList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Items) > 0 { @@ -192,56 +175,29 @@ func (m *PartialObjectMetadataList) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } - return n -} - -func (m *TableOptions) Size() (n int) { - var l int - _ = l - l = len(m.IncludeObject) + l = m.ListMeta.Size() n += 1 + l + sovGenerated(uint64(l)) return n } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } -func (this *PartialObjectMetadata) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PartialObjectMetadata{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} func (this *PartialObjectMetadataList) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&PartialObjectMetadataList{`, - `Items:` + strings.Replace(fmt.Sprintf("%v", this.Items), "PartialObjectMetadata", "PartialObjectMetadata", 1) + `,`, - `}`, - }, "") - return s -} -func (this *TableOptions) String() string { - if this == nil { - return "nil" + repeatedStringForItems := "[]PartialObjectMetadata{" + for _, f := range this.Items { + repeatedStringForItems += fmt.Sprintf("%v", f) + "," } - s := strings.Join([]string{`&TableOptions{`, - `IncludeObject:` + fmt.Sprintf("%v", this.IncludeObject) + `,`, + repeatedStringForItems += "}" + s := strings.Join([]string{`&PartialObjectMetadataList{`, + `Items:` + repeatedStringForItems + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -254,7 +210,7 @@ func valueToStringGenerated(v interface{}) string { pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("*%v", pv) } -func (m *PartialObjectMetadata) Unmarshal(dAtA []byte) error { +func (m *PartialObjectMetadataList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -269,7 +225,7 @@ func (m *PartialObjectMetadata) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -277,15 +233,15 @@ func (m *PartialObjectMetadata) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PartialObjectMetadata: wiretype end group for non-group") + return fmt.Errorf("proto: PartialObjectMetadataList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PartialObjectMetadata: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PartialObjectMetadataList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -297,7 +253,7 @@ func (m *PartialObjectMetadata) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -306,66 +262,20 @@ func (m *PartialObjectMetadata) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, v1.PartialObjectMetadata{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PartialObjectMetadataList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PartialObjectMetadataList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PartialObjectMetadataList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -377,7 +287,7 @@ func (m *PartialObjectMetadataList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -386,11 +296,13 @@ func (m *PartialObjectMetadataList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, &PartialObjectMetadata{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -403,83 +315,7 @@ func (m *PartialObjectMetadataList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TableOptions) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TableOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TableOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IncludeObject", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.IncludeObject = IncludeObjectPolicy(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { + if (iNdEx + skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -548,10 +384,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -580,6 +419,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -598,35 +440,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 375 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x91, 0xcd, 0x0a, 0xd3, 0x40, - 0x10, 0xc7, 0xb3, 0x48, 0xd1, 0x6e, 0xed, 0x25, 0x22, 0xd4, 0x1e, 0x36, 0xa5, 0xa7, 0x0a, 0x76, - 0xd7, 0x16, 0x11, 0x8f, 0x92, 0x5b, 0x41, 0x69, 0x09, 0x9e, 0x3c, 0xb9, 0x49, 0xc6, 0x74, 0xcd, - 0xc7, 0x86, 0xec, 0xa6, 0xd0, 0x8b, 0xf8, 0x08, 0x3e, 0x56, 0x8f, 0x3d, 0xf6, 0x14, 0x6c, 0x7c, - 0x0b, 0x4f, 0x92, 0x0f, 0xec, 0x87, 0x15, 0x7b, 0x9b, 0xf9, 0x0f, 0xbf, 0x5f, 0x66, 0xb2, 0xd8, - 0x09, 0xdf, 0x28, 0x2a, 0x24, 0x0b, 0x73, 0x17, 0xb2, 0x04, 0x34, 0x28, 0xb6, 0x81, 0xc4, 0x97, - 0x19, 0x6b, 0x07, 0x3c, 0x15, 0x31, 0xf7, 0xd6, 0x22, 0x81, 0x6c, 0xcb, 0xd2, 0x30, 0xa8, 0x02, - 0xc5, 0x62, 0xd0, 0x9c, 0x6d, 0x66, 0x2e, 0x68, 0x3e, 0x63, 0x01, 0x24, 0x90, 0x71, 0x0d, 0x3e, - 0x4d, 0x33, 0xa9, 0xa5, 0xf9, 0xbc, 0x41, 0xe9, 0x39, 0x4a, 0xd3, 0x30, 0xa8, 0x02, 0x45, 0x2b, - 0x94, 0xb6, 0xe8, 0x70, 0x1a, 0x08, 0xbd, 0xce, 0x5d, 0xea, 0xc9, 0x98, 0x05, 0x32, 0x90, 0xac, - 0x36, 0xb8, 0xf9, 0xe7, 0xba, 0xab, 0x9b, 0xba, 0x6a, 0xcc, 0xc3, 0x57, 0xf7, 0x2c, 0x75, 0xbd, - 0xcf, 0xf0, 0x9f, 0xa7, 0x64, 0x79, 0xa2, 0x45, 0x0c, 0x7f, 0x01, 0xaf, 0xff, 0x07, 0x28, 0x6f, - 0x0d, 0x31, 0xbf, 0xe6, 0xc6, 0x5b, 0xfc, 0x74, 0xc5, 0x33, 0x2d, 0x78, 0xb4, 0x74, 0xbf, 0x80, - 0xa7, 0xdf, 0x83, 0xe6, 0x3e, 0xd7, 0xdc, 0xfc, 0x84, 0x1f, 0xc5, 0x6d, 0x3d, 0x40, 0x23, 0x34, - 0xe9, 0xcd, 0x5f, 0xd2, 0x7b, 0x7e, 0x12, 0x3d, 0x79, 0x6c, 0x73, 0x57, 0x58, 0x46, 0x59, 0x58, - 0xf8, 0x94, 0x39, 0x7f, 0xac, 0xe3, 0xaf, 0xf8, 0xd9, 0xcd, 0x4f, 0xbf, 0x13, 0x4a, 0x9b, 0x1c, - 0x77, 0x84, 0x86, 0x58, 0x0d, 0xd0, 0xe8, 0xc1, 0xa4, 0x37, 0x7f, 0x4b, 0xef, 0x7e, 0x20, 0x7a, - 0x53, 0x6a, 0x77, 0xcb, 0xc2, 0xea, 0x2c, 0x2a, 0xa5, 0xd3, 0x98, 0xc7, 0x2e, 0x7e, 0xfc, 0x81, - 0xbb, 0x11, 0x2c, 0x53, 0x2d, 0x64, 0xa2, 0x4c, 0x07, 0xf7, 0x45, 0xe2, 0x45, 0xb9, 0x0f, 0x0d, - 0x5a, 0x9f, 0xdd, 0xb5, 0x5f, 0xb4, 0x47, 0xf4, 0x17, 0xe7, 0xc3, 0x5f, 0x85, 0xf5, 0xe4, 0x22, - 0x58, 0xc9, 0x48, 0x78, 0x5b, 0xe7, 0x52, 0x61, 0x4f, 0x77, 0x47, 0x62, 0xec, 0x8f, 0xc4, 0x38, - 0x1c, 0x89, 0xf1, 0xad, 0x24, 0x68, 0x57, 0x12, 0xb4, 0x2f, 0x09, 0x3a, 0x94, 0x04, 0xfd, 0x28, - 0x09, 0xfa, 0xfe, 0x93, 0x18, 0x1f, 0x1f, 0xb6, 0xab, 0xff, 0x0e, 0x00, 0x00, 0xff, 0xff, 0xf3, - 0xe1, 0xde, 0x86, 0xdb, 0x02, 0x00, 0x00, -} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto index 83be9979..19606666 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto @@ -28,30 +28,15 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". option go_package = "v1beta1"; -// PartialObjectMetadata is a generic representation of any object with ObjectMeta. It allows clients -// to get access to a particular ObjectMeta schema without knowing the details of the version. +// PartialObjectMetadataList contains a list of objects containing only their metadata. // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -message PartialObjectMetadata { - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata +message PartialObjectMetadataList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; -} + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 2; -// PartialObjectMetadataList contains a list of objects containing only their metadata -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -message PartialObjectMetadataList { // items contains each of the included items. - repeated PartialObjectMetadata items = 1; -} - -// TableOptions are used when a Table is requested by the caller. -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -message TableOptions { - // includeObject decides whether to include each object along with its columnar information. - // Specifying "None" will return no object, specifying "Object" will return the full object contents, and - // specifying "Metadata" (the default) will return the object's metadata in the PartialObjectMetadata kind - // in version v1beta1 of the meta.k8s.io API group. - optional string includeObject = 1; + repeated k8s.io.apimachinery.pkg.apis.meta.v1.PartialObjectMetadata items = 1; } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/register.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/register.go index d13254b4..4b4acd72 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/register.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/register.go @@ -19,6 +19,7 @@ package v1beta1 import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" ) // GroupName is the group name for this API. @@ -38,7 +39,8 @@ var scheme = runtime.NewScheme() // ParameterCodec knows about query parameters used with the meta v1beta1 API spec. var ParameterCodec = runtime.NewParameterCodec(scheme) -func init() { +// AddMetaToScheme registers base meta types into schemas. +func AddMetaToScheme(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &Table{}, &TableOptions{}, @@ -46,12 +48,14 @@ func init() { &PartialObjectMetadataList{}, ) - if err := scheme.AddConversionFuncs( + return scheme.AddConversionFuncs( Convert_Slice_string_To_v1beta1_IncludeObjectPolicy, - ); err != nil { - panic(err) - } + ) +} + +func init() { + utilruntime.Must(AddMetaToScheme(scheme)) // register manually. This usually goes through the SchemeBuilder, which we cannot use here. - //scheme.AddGeneratedDeepCopyFuncs(GetGeneratedDeepCopyFuncs()...) + utilruntime.Must(RegisterDefaults(scheme)) } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types.go index 344c533e..f16170a3 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types.go @@ -18,144 +18,67 @@ limitations under the License. package v1beta1 import ( - "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// TODO: Table does not generate to protobuf because of the interface{} - fix protobuf -// generation to support a meta type that can accept any valid JSON. - // Table is a tabular representation of a set of API resources. The server transforms the // object into a set of preferred columns for quickly reviewing the objects. -// +protobuf=false // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -type Table struct { - v1.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds - // +optional - v1.ListMeta `json:"metadata,omitempty"` - - // columnDefinitions describes each column in the returned items array. The number of cells per row - // will always match the number of column definitions. - ColumnDefinitions []TableColumnDefinition `json:"columnDefinitions"` - // rows is the list of items in the table. - Rows []TableRow `json:"rows"` -} +// +protobuf=false +type Table = v1.Table // TableColumnDefinition contains information about a column returned in the Table. // +protobuf=false -type TableColumnDefinition struct { - // name is a human readable name for the column. - Name string `json:"name"` - // type is an OpenAPI type definition for this column. - // See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more. - Type string `json:"type"` - // format is an optional OpenAPI type definition for this column. The 'name' format is applied - // to the primary identifier column to assist in clients identifying column is the resource name. - // See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more. - Format string `json:"format"` - // description is a human readable description of this column. - Description string `json:"description"` - // priority is an integer defining the relative importance of this column compared to others. Lower - // numbers are considered higher priority. Columns that may be omitted in limited space scenarios - // should be given a higher priority. - Priority int32 `json:"priority"` -} +type TableColumnDefinition = v1.TableColumnDefinition // TableRow is an individual row in a table. // +protobuf=false -type TableRow struct { - // cells will be as wide as headers and may contain strings, numbers (float64 or int64), booleans, simple - // maps, or lists, or null. See the type field of the column definition for a more detailed description. - Cells []interface{} `json:"cells"` - // conditions describe additional status of a row that are relevant for a human user. - // +optional - Conditions []TableRowCondition `json:"conditions,omitempty"` - // This field contains the requested additional information about each object based on the includeObject - // policy when requesting the Table. If "None", this field is empty, if "Object" this will be the - // default serialization of the object for the current API version, and if "Metadata" (the default) will - // contain the object metadata. Check the returned kind and apiVersion of the object before parsing. - // +optional - Object runtime.RawExtension `json:"object,omitempty"` -} +type TableRow = v1.TableRow // TableRowCondition allows a row to be marked with additional information. // +protobuf=false -type TableRowCondition struct { - // Type of row condition. - Type RowConditionType `json:"type"` - // Status of the condition, one of True, False, Unknown. - Status ConditionStatus `json:"status"` - // (brief) machine readable reason for the condition's last transition. - // +optional - Reason string `json:"reason,omitempty"` - // Human readable message indicating details about last transition. - // +optional - Message string `json:"message,omitempty"` -} +type TableRowCondition = v1.TableRowCondition -type RowConditionType string +type RowConditionType = v1.RowConditionType -// These are valid conditions of a row. This list is not exhaustive and new conditions may be -// included by other resources. -const ( - // RowCompleted means the underlying resource has reached completion and may be given less - // visual priority than other resources. - RowCompleted RowConditionType = "Completed" -) +type ConditionStatus = v1.ConditionStatus -type ConditionStatus string - -// These are valid condition statuses. "ConditionTrue" means a resource is in the condition. -// "ConditionFalse" means a resource is not in the condition. "ConditionUnknown" means kubernetes -// can't decide if a resource is in the condition or not. In the future, we could add other -// intermediate conditions, e.g. ConditionDegraded. -const ( - ConditionTrue ConditionStatus = "True" - ConditionFalse ConditionStatus = "False" - ConditionUnknown ConditionStatus = "Unknown" -) - -// IncludeObjectPolicy controls which portion of the object is returned with a Table. -type IncludeObjectPolicy string - -const ( - // IncludeNone returns no object. - IncludeNone IncludeObjectPolicy = "None" - // IncludeMetadata serializes the object containing only its metadata field. - IncludeMetadata IncludeObjectPolicy = "Metadata" - // IncludeObject contains the full object. - IncludeObject IncludeObjectPolicy = "Object" -) +type IncludeObjectPolicy = v1.IncludeObjectPolicy // TableOptions are used when a Table is requested by the caller. // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -type TableOptions struct { - v1.TypeMeta `json:",inline"` - // includeObject decides whether to include each object along with its columnar information. - // Specifying "None" will return no object, specifying "Object" will return the full object contents, and - // specifying "Metadata" (the default) will return the object's metadata in the PartialObjectMetadata kind - // in version v1beta1 of the meta.k8s.io API group. - IncludeObject IncludeObjectPolicy `json:"includeObject,omitempty" protobuf:"bytes,1,opt,name=includeObject,casttype=IncludeObjectPolicy"` -} +type TableOptions = v1.TableOptions // PartialObjectMetadata is a generic representation of any object with ObjectMeta. It allows clients // to get access to a particular ObjectMeta schema without knowing the details of the version. // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -type PartialObjectMetadata struct { - v1.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` -} +type PartialObjectMetadata = v1.PartialObjectMetadata + +// IMPORTANT: PartialObjectMetadataList has different protobuf field ids in v1beta1 than +// v1 because ListMeta was accidentally omitted prior to 1.15. Therefore this type must +// remain independent of v1.PartialObjectMetadataList to preserve mappings. -// PartialObjectMetadataList contains a list of objects containing only their metadata +// PartialObjectMetadataList contains a list of objects containing only their metadata. // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object type PartialObjectMetadataList struct { v1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + v1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,2,opt,name=metadata"` // items contains each of the included items. - Items []*PartialObjectMetadata `json:"items" protobuf:"bytes,1,rep,name=items"` + Items []v1.PartialObjectMetadata `json:"items" protobuf:"bytes,1,rep,name=items"` } + +const ( + RowCompleted = v1.RowCompleted + + ConditionTrue = v1.ConditionTrue + ConditionFalse = v1.ConditionFalse + ConditionUnknown = v1.ConditionUnknown + + IncludeNone = v1.IncludeNone + IncludeMetadata = v1.IncludeMetadata + IncludeObject = v1.IncludeObject +) diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types_swagger_doc_generated.go index 7394535d..ef7e7c1e 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types_swagger_doc_generated.go @@ -27,78 +27,14 @@ package v1beta1 // Those methods can be generated by using hack/update-generated-swagger-docs.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. -var map_PartialObjectMetadata = map[string]string{ - "": "PartialObjectMetadata is a generic representation of any object with ObjectMeta. It allows clients to get access to a particular ObjectMeta schema without knowing the details of the version.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", -} - -func (PartialObjectMetadata) SwaggerDoc() map[string]string { - return map_PartialObjectMetadata -} - var map_PartialObjectMetadataList = map[string]string{ - "": "PartialObjectMetadataList contains a list of objects containing only their metadata", - "items": "items contains each of the included items.", + "": "PartialObjectMetadataList contains a list of objects containing only their metadata.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "items": "items contains each of the included items.", } func (PartialObjectMetadataList) SwaggerDoc() map[string]string { return map_PartialObjectMetadataList } -var map_Table = map[string]string{ - "": "Table is a tabular representation of a set of API resources. The server transforms the object into a set of preferred columns for quickly reviewing the objects.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - "columnDefinitions": "columnDefinitions describes each column in the returned items array. The number of cells per row will always match the number of column definitions.", - "rows": "rows is the list of items in the table.", -} - -func (Table) SwaggerDoc() map[string]string { - return map_Table -} - -var map_TableColumnDefinition = map[string]string{ - "": "TableColumnDefinition contains information about a column returned in the Table.", - "name": "name is a human readable name for the column.", - "type": "type is an OpenAPI type definition for this column. See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more.", - "format": "format is an optional OpenAPI type definition for this column. The 'name' format is applied to the primary identifier column to assist in clients identifying column is the resource name. See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more.", - "description": "description is a human readable description of this column.", - "priority": "priority is an integer defining the relative importance of this column compared to others. Lower numbers are considered higher priority. Columns that may be omitted in limited space scenarios should be given a higher priority.", -} - -func (TableColumnDefinition) SwaggerDoc() map[string]string { - return map_TableColumnDefinition -} - -var map_TableOptions = map[string]string{ - "": "TableOptions are used when a Table is requested by the caller.", - "includeObject": "includeObject decides whether to include each object along with its columnar information. Specifying \"None\" will return no object, specifying \"Object\" will return the full object contents, and specifying \"Metadata\" (the default) will return the object's metadata in the PartialObjectMetadata kind in version v1beta1 of the meta.k8s.io API group.", -} - -func (TableOptions) SwaggerDoc() map[string]string { - return map_TableOptions -} - -var map_TableRow = map[string]string{ - "": "TableRow is an individual row in a table.", - "cells": "cells will be as wide as headers and may contain strings, numbers (float64 or int64), booleans, simple maps, or lists, or null. See the type field of the column definition for a more detailed description.", - "conditions": "conditions describe additional status of a row that are relevant for a human user.", - "object": "This field contains the requested additional information about each object based on the includeObject policy when requesting the Table. If \"None\", this field is empty, if \"Object\" this will be the default serialization of the object for the current API version, and if \"Metadata\" (the default) will contain the object metadata. Check the returned kind and apiVersion of the object before parsing.", -} - -func (TableRow) SwaggerDoc() map[string]string { - return map_TableRow -} - -var map_TableRowCondition = map[string]string{ - "": "TableRowCondition allows a row to be marked with additional information.", - "type": "Type of row condition.", - "status": "Status of the condition, one of True, False, Unknown.", - "reason": "(brief) machine readable reason for the condition's last transition.", - "message": "Human readable message indicating details about last transition.", -} - -func (TableRowCondition) SwaggerDoc() map[string]string { - return map_TableRowCondition -} - // AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.deepcopy.go index b77db1b1..89053b98 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.deepcopy.go @@ -21,48 +21,20 @@ limitations under the License. package v1beta1 import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PartialObjectMetadata) DeepCopyInto(out *PartialObjectMetadata) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PartialObjectMetadata. -func (in *PartialObjectMetadata) DeepCopy() *PartialObjectMetadata { - if in == nil { - return nil - } - out := new(PartialObjectMetadata) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PartialObjectMetadata) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PartialObjectMetadataList) DeepCopyInto(out *PartialObjectMetadataList) { *out = *in out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]*PartialObjectMetadata, len(*in)) + *out = make([]v1.PartialObjectMetadata, len(*in)) for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(PartialObjectMetadata) - (*in).DeepCopyInto(*out) - } + (*in)[i].DeepCopyInto(&(*out)[i]) } } return @@ -85,105 +57,3 @@ func (in *PartialObjectMetadataList) DeepCopyObject() runtime.Object { } return nil } - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Table) DeepCopyInto(out *Table) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.ColumnDefinitions != nil { - in, out := &in.ColumnDefinitions, &out.ColumnDefinitions - *out = make([]TableColumnDefinition, len(*in)) - copy(*out, *in) - } - if in.Rows != nil { - in, out := &in.Rows, &out.Rows - *out = make([]TableRow, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Table. -func (in *Table) DeepCopy() *Table { - if in == nil { - return nil - } - out := new(Table) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Table) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TableColumnDefinition) DeepCopyInto(out *TableColumnDefinition) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableColumnDefinition. -func (in *TableColumnDefinition) DeepCopy() *TableColumnDefinition { - if in == nil { - return nil - } - out := new(TableColumnDefinition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TableOptions) DeepCopyInto(out *TableOptions) { - *out = *in - out.TypeMeta = in.TypeMeta - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableOptions. -func (in *TableOptions) DeepCopy() *TableOptions { - if in == nil { - return nil - } - out := new(TableOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *TableOptions) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TableRow) DeepCopyInto(out *TableRow) { - clone := in.DeepCopy() - *out = *clone - return -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TableRowCondition) DeepCopyInto(out *TableRowCondition) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableRowCondition. -func (in *TableRowCondition) DeepCopy() *TableRowCondition { - if in == nil { - return nil - } - out := new(TableRowCondition) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/convert.go b/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/convert.go index b3804aa4..2f0dd007 100644 --- a/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/convert.go +++ b/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/convert.go @@ -54,10 +54,6 @@ func jsonTag(field reflect.StructField) (string, bool) { return tag, omitempty } -func formatValue(value interface{}) string { - return fmt.Sprintf("%v", value) -} - func isPointerKind(kind reflect.Kind) bool { return kind == reflect.Ptr } diff --git a/vendor/k8s.io/apimachinery/pkg/labels/labels.go b/vendor/k8s.io/apimachinery/pkg/labels/labels.go index 32db4d96..abf3ace6 100644 --- a/vendor/k8s.io/apimachinery/pkg/labels/labels.go +++ b/vendor/k8s.io/apimachinery/pkg/labels/labels.go @@ -172,7 +172,7 @@ func ConvertSelectorToLabelsMap(selector string) (Set, error) { return labelsMap, err } value := strings.TrimSpace(l[1]) - if err := validateLabelValue(value); err != nil { + if err := validateLabelValue(key, value); err != nil { return labelsMap, err } labelsMap[key] = value diff --git a/vendor/k8s.io/apimachinery/pkg/labels/selector.go b/vendor/k8s.io/apimachinery/pkg/labels/selector.go index 374d2ef1..2f8e1e2b 100644 --- a/vendor/k8s.io/apimachinery/pkg/labels/selector.go +++ b/vendor/k8s.io/apimachinery/pkg/labels/selector.go @@ -23,10 +23,10 @@ import ( "strconv" "strings" - "github.com/golang/glog" "k8s.io/apimachinery/pkg/selection" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/klog" ) // Requirements is AND of all requirements. @@ -54,6 +54,11 @@ type Selector interface { // Make a deep copy of the selector. DeepCopySelector() Selector + + // RequiresExactMatch allows a caller to introspect whether a given selector + // requires a single specific label to be set, and if so returns the value it + // requires. + RequiresExactMatch(label string) (value string, found bool) } // Everything returns a selector that matches all labels. @@ -63,12 +68,13 @@ func Everything() Selector { type nothingSelector struct{} -func (n nothingSelector) Matches(_ Labels) bool { return false } -func (n nothingSelector) Empty() bool { return false } -func (n nothingSelector) String() string { return "" } -func (n nothingSelector) Add(_ ...Requirement) Selector { return n } -func (n nothingSelector) Requirements() (Requirements, bool) { return nil, false } -func (n nothingSelector) DeepCopySelector() Selector { return n } +func (n nothingSelector) Matches(_ Labels) bool { return false } +func (n nothingSelector) Empty() bool { return false } +func (n nothingSelector) String() string { return "" } +func (n nothingSelector) Add(_ ...Requirement) Selector { return n } +func (n nothingSelector) Requirements() (Requirements, bool) { return nil, false } +func (n nothingSelector) DeepCopySelector() Selector { return n } +func (n nothingSelector) RequiresExactMatch(label string) (value string, found bool) { return "", false } // Nothing returns a selector that matches no labels func Nothing() Selector { @@ -162,7 +168,7 @@ func NewRequirement(key string, op selection.Operator, vals []string) (*Requirem } for i := range vals { - if err := validateLabelValue(vals[i]); err != nil { + if err := validateLabelValue(key, vals[i]); err != nil { return nil, err } } @@ -211,13 +217,13 @@ func (r *Requirement) Matches(ls Labels) bool { } lsValue, err := strconv.ParseInt(ls.Get(r.key), 10, 64) if err != nil { - glog.V(10).Infof("ParseInt failed for value %+v in label %+v, %+v", ls.Get(r.key), ls, err) + klog.V(10).Infof("ParseInt failed for value %+v in label %+v, %+v", ls.Get(r.key), ls, err) return false } // There should be only one strValue in r.strValues, and can be converted to a integer. if len(r.strValues) != 1 { - glog.V(10).Infof("Invalid values count %+v of requirement %#v, for 'Gt', 'Lt' operators, exactly one value is required", len(r.strValues), r) + klog.V(10).Infof("Invalid values count %+v of requirement %#v, for 'Gt', 'Lt' operators, exactly one value is required", len(r.strValues), r) return false } @@ -225,7 +231,7 @@ func (r *Requirement) Matches(ls Labels) bool { for i := range r.strValues { rValue, err = strconv.ParseInt(r.strValues[i], 10, 64) if err != nil { - glog.V(10).Infof("ParseInt failed for value %+v in requirement %#v, for 'Gt', 'Lt' operators, the value must be an integer", r.strValues[i], r) + klog.V(10).Infof("ParseInt failed for value %+v in requirement %#v, for 'Gt', 'Lt' operators, the value must be an integer", r.strValues[i], r) return false } } @@ -358,6 +364,23 @@ func (lsel internalSelector) String() string { return strings.Join(reqs, ",") } +// RequiresExactMatch introspect whether a given selector requires a single specific field +// to be set, and if so returns the value it requires. +func (lsel internalSelector) RequiresExactMatch(label string) (value string, found bool) { + for ix := range lsel { + if lsel[ix].key == label { + switch lsel[ix].operator { + case selection.Equals, selection.DoubleEquals, selection.In: + if len(lsel[ix].strValues) == 1 { + return lsel[ix].strValues[0], true + } + } + return "", false + } + } + return "", false +} + // Token represents constant definition for lexer token type Token int @@ -837,9 +860,9 @@ func validateLabelKey(k string) error { return nil } -func validateLabelValue(v string) error { +func validateLabelValue(k, v string) error { if errs := validation.IsValidLabelValue(v); len(errs) != 0 { - return fmt.Errorf("invalid label value: %q: %s", v, strings.Join(errs, "; ")) + return fmt.Errorf("invalid label value: %q: at key: %q: %s", v, k, strings.Join(errs, "; ")) } return nil } @@ -850,7 +873,7 @@ func SelectorFromSet(ls Set) Selector { if ls == nil || len(ls) == 0 { return internalSelector{} } - var requirements internalSelector + requirements := make([]Requirement, 0, len(ls)) for label, value := range ls { r, err := NewRequirement(label, selection.Equals, []string{value}) if err == nil { @@ -862,7 +885,7 @@ func SelectorFromSet(ls Set) Selector { } // sort to have deterministic string representation sort.Sort(ByKey(requirements)) - return requirements + return internalSelector(requirements) } // SelectorFromValidatedSet returns a Selector which will match exactly the given Set. @@ -872,13 +895,13 @@ func SelectorFromValidatedSet(ls Set) Selector { if ls == nil || len(ls) == 0 { return internalSelector{} } - var requirements internalSelector + requirements := make([]Requirement, 0, len(ls)) for label, value := range ls { requirements = append(requirements, Requirement{key: label, operator: selection.Equals, strValues: []string{value}}) } // sort to have deterministic string representation sort.Sort(ByKey(requirements)) - return requirements + return internalSelector(requirements) } // ParseToRequirements takes a string representing a selector and returns a list of diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/codec.go b/vendor/k8s.io/apimachinery/pkg/runtime/codec.go index 6b859b28..0bccf9dd 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/codec.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/codec.go @@ -19,13 +19,17 @@ package runtime import ( "bytes" "encoding/base64" + "encoding/json" "fmt" "io" "net/url" "reflect" + "strconv" + "strings" "k8s.io/apimachinery/pkg/conversion/queryparams" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/klog" ) // codec binds an encoder and decoder. @@ -100,10 +104,19 @@ type NoopEncoder struct { var _ Serializer = NoopEncoder{} +const noopEncoderIdentifier Identifier = "noop" + func (n NoopEncoder) Encode(obj Object, w io.Writer) error { + // There is no need to handle runtime.CacheableObject, as we don't + // process the obj at all. return fmt.Errorf("encoding is not allowed for this codec: %v", reflect.TypeOf(n.Decoder)) } +// Identifier implements runtime.Encoder interface. +func (n NoopEncoder) Identifier() Identifier { + return noopEncoderIdentifier +} + // NoopDecoder converts an Encoder to a Serializer or Codec for code that expects them but only uses encoding. type NoopDecoder struct { Encoder @@ -193,19 +206,51 @@ func (c *parameterCodec) EncodeParameters(obj Object, to schema.GroupVersion) (u type base64Serializer struct { Encoder Decoder + + identifier Identifier } func NewBase64Serializer(e Encoder, d Decoder) Serializer { - return &base64Serializer{e, d} + return &base64Serializer{ + Encoder: e, + Decoder: d, + identifier: identifier(e), + } +} + +func identifier(e Encoder) Identifier { + result := map[string]string{ + "name": "base64", + } + if e != nil { + result["encoder"] = string(e.Identifier()) + } + identifier, err := json.Marshal(result) + if err != nil { + klog.Fatalf("Failed marshaling identifier for base64Serializer: %v", err) + } + return Identifier(identifier) } func (s base64Serializer) Encode(obj Object, stream io.Writer) error { + if co, ok := obj.(CacheableObject); ok { + return co.CacheEncode(s.Identifier(), s.doEncode, stream) + } + return s.doEncode(obj, stream) +} + +func (s base64Serializer) doEncode(obj Object, stream io.Writer) error { e := base64.NewEncoder(base64.StdEncoding, stream) err := s.Encoder.Encode(obj, e) e.Close() return err } +// Identifier implements runtime.Encoder interface. +func (s base64Serializer) Identifier() Identifier { + return s.identifier +} + func (s base64Serializer) Decode(data []byte, defaults *schema.GroupVersionKind, into Object) (Object, *schema.GroupVersionKind, error) { out := make([]byte, base64.StdEncoding.DecodedLen(len(data))) n, err := base64.StdEncoding.Decode(out, data) @@ -238,6 +283,11 @@ var ( DisabledGroupVersioner GroupVersioner = disabledGroupVersioner{} ) +const ( + internalGroupVersionerIdentifier = "internal" + disabledGroupVersionerIdentifier = "disabled" +) + type internalGroupVersioner struct{} // KindForGroupVersionKinds returns an internal Kind if one is found, or converts the first provided kind to the internal version. @@ -253,6 +303,11 @@ func (internalGroupVersioner) KindForGroupVersionKinds(kinds []schema.GroupVersi return schema.GroupVersionKind{}, false } +// Identifier implements GroupVersioner interface. +func (internalGroupVersioner) Identifier() string { + return internalGroupVersionerIdentifier +} + type disabledGroupVersioner struct{} // KindForGroupVersionKinds returns false for any input. @@ -260,19 +315,9 @@ func (disabledGroupVersioner) KindForGroupVersionKinds(kinds []schema.GroupVersi return schema.GroupVersionKind{}, false } -// GroupVersioners implements GroupVersioner and resolves to the first exact match for any kind. -type GroupVersioners []GroupVersioner - -// KindForGroupVersionKinds returns the first match of any of the group versioners, or false if no match occurred. -func (gvs GroupVersioners) KindForGroupVersionKinds(kinds []schema.GroupVersionKind) (schema.GroupVersionKind, bool) { - for _, gv := range gvs { - target, ok := gv.KindForGroupVersionKinds(kinds) - if !ok { - continue - } - return target, true - } - return schema.GroupVersionKind{}, false +// Identifier implements GroupVersioner interface. +func (disabledGroupVersioner) Identifier() string { + return disabledGroupVersionerIdentifier } // Assert that schema.GroupVersion and GroupVersions implement GroupVersioner @@ -283,6 +328,7 @@ var _ GroupVersioner = multiGroupVersioner{} type multiGroupVersioner struct { target schema.GroupVersion acceptedGroupKinds []schema.GroupKind + coerce bool } // NewMultiGroupVersioner returns the provided group version for any kind that matches one of the provided group kinds. @@ -294,6 +340,22 @@ func NewMultiGroupVersioner(gv schema.GroupVersion, groupKinds ...schema.GroupKi return multiGroupVersioner{target: gv, acceptedGroupKinds: groupKinds} } +// NewCoercingMultiGroupVersioner returns the provided group version for any incoming kind. +// Incoming kinds that match the provided groupKinds are preferred. +// Kind may be empty in the provided group kind, in which case any kind will match. +// Examples: +// gv=mygroup/__internal, groupKinds=mygroup/Foo, anothergroup/Bar +// KindForGroupVersionKinds(yetanother/v1/Baz, anothergroup/v1/Bar) -> mygroup/__internal/Bar (matched preferred group/kind) +// +// gv=mygroup/__internal, groupKinds=mygroup, anothergroup +// KindForGroupVersionKinds(yetanother/v1/Baz, anothergroup/v1/Bar) -> mygroup/__internal/Bar (matched preferred group) +// +// gv=mygroup/__internal, groupKinds=mygroup, anothergroup +// KindForGroupVersionKinds(yetanother/v1/Baz, yetanother/v1/Bar) -> mygroup/__internal/Baz (no preferred group/kind match, uses first kind in list) +func NewCoercingMultiGroupVersioner(gv schema.GroupVersion, groupKinds ...schema.GroupKind) GroupVersioner { + return multiGroupVersioner{target: gv, acceptedGroupKinds: groupKinds, coerce: true} +} + // KindForGroupVersionKinds returns the target group version if any kind matches any of the original group kinds. It will // use the originating kind where possible. func (v multiGroupVersioner) KindForGroupVersionKinds(kinds []schema.GroupVersionKind) (schema.GroupVersionKind, bool) { @@ -308,5 +370,27 @@ func (v multiGroupVersioner) KindForGroupVersionKinds(kinds []schema.GroupVersio return v.target.WithKind(src.Kind), true } } + if v.coerce && len(kinds) > 0 { + return v.target.WithKind(kinds[0].Kind), true + } return schema.GroupVersionKind{}, false } + +// Identifier implements GroupVersioner interface. +func (v multiGroupVersioner) Identifier() string { + groupKinds := make([]string, 0, len(v.acceptedGroupKinds)) + for _, gk := range v.acceptedGroupKinds { + groupKinds = append(groupKinds, gk.String()) + } + result := map[string]string{ + "name": "multi", + "target": v.target.String(), + "accepted": strings.Join(groupKinds, ","), + "coerce": strconv.FormatBool(v.coerce), + } + identifier, err := json.Marshal(result) + if err != nil { + klog.Fatalf("Failed marshaling Identifier for %#v: %v", v, err) + } + return string(identifier) +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/conversion.go b/vendor/k8s.io/apimachinery/pkg/runtime/conversion.go index 08d2abfe..0947dce7 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/conversion.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/conversion.go @@ -61,19 +61,21 @@ var DefaultStringConversions = []interface{}{ Convert_Slice_string_To_int64, } -func Convert_Slice_string_To_string(input *[]string, out *string, s conversion.Scope) error { - if len(*input) == 0 { +func Convert_Slice_string_To_string(in *[]string, out *string, s conversion.Scope) error { + if len(*in) == 0 { *out = "" + return nil } - *out = (*input)[0] + *out = (*in)[0] return nil } -func Convert_Slice_string_To_int(input *[]string, out *int, s conversion.Scope) error { - if len(*input) == 0 { +func Convert_Slice_string_To_int(in *[]string, out *int, s conversion.Scope) error { + if len(*in) == 0 { *out = 0 + return nil } - str := (*input)[0] + str := (*in)[0] i, err := strconv.Atoi(str) if err != nil { return err @@ -83,15 +85,16 @@ func Convert_Slice_string_To_int(input *[]string, out *int, s conversion.Scope) } // Convert_Slice_string_To_bool will convert a string parameter to boolean. -// Only the absence of a value, a value of "false", or a value of "0" resolve to false. +// Only the absence of a value (i.e. zero-length slice), a value of "false", or a +// value of "0" resolve to false. // Any other value (including empty string) resolves to true. -func Convert_Slice_string_To_bool(input *[]string, out *bool, s conversion.Scope) error { - if len(*input) == 0 { +func Convert_Slice_string_To_bool(in *[]string, out *bool, s conversion.Scope) error { + if len(*in) == 0 { *out = false return nil } - switch strings.ToLower((*input)[0]) { - case "false", "0": + switch { + case (*in)[0] == "0", strings.EqualFold((*in)[0], "false"): *out = false default: *out = true @@ -99,15 +102,79 @@ func Convert_Slice_string_To_bool(input *[]string, out *bool, s conversion.Scope return nil } -func Convert_Slice_string_To_int64(input *[]string, out *int64, s conversion.Scope) error { - if len(*input) == 0 { +// Convert_Slice_string_To_bool will convert a string parameter to boolean. +// Only the absence of a value (i.e. zero-length slice), a value of "false", or a +// value of "0" resolve to false. +// Any other value (including empty string) resolves to true. +func Convert_Slice_string_To_Pointer_bool(in *[]string, out **bool, s conversion.Scope) error { + if len(*in) == 0 { + boolVar := false + *out = &boolVar + return nil + } + switch { + case (*in)[0] == "0", strings.EqualFold((*in)[0], "false"): + boolVar := false + *out = &boolVar + default: + boolVar := true + *out = &boolVar + } + return nil +} + +func string_to_int64(in string) (int64, error) { + return strconv.ParseInt(in, 10, 64) +} + +func Convert_string_To_int64(in *string, out *int64, s conversion.Scope) error { + if in == nil { + *out = 0 + return nil + } + i, err := string_to_int64(*in) + if err != nil { + return err + } + *out = i + return nil +} + +func Convert_Slice_string_To_int64(in *[]string, out *int64, s conversion.Scope) error { + if len(*in) == 0 { *out = 0 + return nil } - str := (*input)[0] - i, err := strconv.ParseInt(str, 10, 64) + i, err := string_to_int64((*in)[0]) if err != nil { return err } *out = i return nil } + +func Convert_string_To_Pointer_int64(in *string, out **int64, s conversion.Scope) error { + if in == nil { + *out = nil + return nil + } + i, err := string_to_int64(*in) + if err != nil { + return err + } + *out = &i + return nil +} + +func Convert_Slice_string_To_Pointer_int64(in *[]string, out **int64, s conversion.Scope) error { + if len(*in) == 0 { + *out = nil + return nil + } + i, err := string_to_int64((*in)[0]) + if err != nil { + return err + } + *out = &i + return nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/converter.go b/vendor/k8s.io/apimachinery/pkg/runtime/converter.go index 291d7a4e..b3e8a53b 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/converter.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/converter.go @@ -33,7 +33,7 @@ import ( "k8s.io/apimachinery/pkg/util/json" utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "github.com/golang/glog" + "k8s.io/klog" ) // UnstructuredConverter is an interface for converting between interface{} @@ -94,7 +94,7 @@ func parseBool(key string) bool { } value, err := strconv.ParseBool(key) if err != nil { - utilruntime.HandleError(fmt.Errorf("Couldn't parse '%s' as bool for unstructured mismatch detection", key)) + utilruntime.HandleError(fmt.Errorf("couldn't parse '%s' as bool for unstructured mismatch detection", key)) } return value } @@ -133,10 +133,10 @@ func (c *unstructuredConverter) FromUnstructured(u map[string]interface{}, obj i newObj := reflect.New(t.Elem()).Interface() newErr := fromUnstructuredViaJSON(u, newObj) if (err != nil) != (newErr != nil) { - glog.Fatalf("FromUnstructured unexpected error for %v: error: %v", u, err) + klog.Fatalf("FromUnstructured unexpected error for %v: error: %v", u, err) } if err == nil && !c.comparison.DeepEqual(obj, newObj) { - glog.Fatalf("FromUnstructured mismatch\nobj1: %#v\nobj2: %#v", obj, newObj) + klog.Fatalf("FromUnstructured mismatch\nobj1: %#v\nobj2: %#v", obj, newObj) } } return err @@ -424,10 +424,10 @@ func (c *unstructuredConverter) ToUnstructured(obj interface{}) (map[string]inte newUnstr := map[string]interface{}{} newErr := toUnstructuredViaJSON(obj, &newUnstr) if (err != nil) != (newErr != nil) { - glog.Fatalf("ToUnstructured unexpected error for %v: error: %v; newErr: %v", obj, err, newErr) + klog.Fatalf("ToUnstructured unexpected error for %v: error: %v; newErr: %v", obj, err, newErr) } if err == nil && !c.comparison.DeepEqual(u, newUnstr) { - glog.Fatalf("ToUnstructured mismatch\nobj1: %#v\nobj2: %#v", u, newUnstr) + klog.Fatalf("ToUnstructured mismatch\nobj1: %#v\nobj2: %#v", u, newUnstr) } } if err != nil { @@ -746,7 +746,7 @@ func isZero(v reflect.Value) bool { func structToUnstructured(sv, dv reflect.Value) error { st, dt := sv.Type(), dv.Type() if dt.Kind() == reflect.Interface && dv.NumMethod() == 0 { - dv.Set(reflect.MakeMap(mapStringInterfaceType)) + dv.Set(reflect.MakeMapWithSize(mapStringInterfaceType, st.NumField())) dv = dv.Elem() dt = dv.Type() } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/error.go b/vendor/k8s.io/apimachinery/pkg/runtime/error.go index 322b0313..be0c5edc 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/error.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/error.go @@ -120,3 +120,32 @@ func IsMissingVersion(err error) bool { _, ok := err.(*missingVersionErr) return ok } + +// strictDecodingError is a base error type that is returned by a strict Decoder such +// as UniversalStrictDecoder. +type strictDecodingError struct { + message string + data string +} + +// NewStrictDecodingError creates a new strictDecodingError object. +func NewStrictDecodingError(message string, data string) error { + return &strictDecodingError{ + message: message, + data: data, + } +} + +func (e *strictDecodingError) Error() string { + return fmt.Sprintf("strict decoder error for %s: %s", e.data, e.message) +} + +// IsStrictDecodingError returns true if the error indicates that the provided object +// strictness violations. +func IsStrictDecodingError(err error) bool { + if err == nil { + return false + } + _, ok := err.(*strictDecodingError) + return ok +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go index 967e0f53..af2f076b 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go @@ -14,31 +14,22 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto -// DO NOT EDIT! -/* - Package runtime is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto - - It has these top-level messages: - RawExtension - TypeMeta - Unknown -*/ package runtime -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import strings "strings" -import reflect "reflect" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" -import io "io" + proto "github.com/gogo/protobuf/proto" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -51,27 +42,132 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *RawExtension) Reset() { *m = RawExtension{} } -func (*RawExtension) ProtoMessage() {} -func (*RawExtension) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *RawExtension) Reset() { *m = RawExtension{} } +func (*RawExtension) ProtoMessage() {} +func (*RawExtension) Descriptor() ([]byte, []int) { + return fileDescriptor_9d3c45d7f546725c, []int{0} +} +func (m *RawExtension) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RawExtension) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RawExtension) XXX_Merge(src proto.Message) { + xxx_messageInfo_RawExtension.Merge(m, src) +} +func (m *RawExtension) XXX_Size() int { + return m.Size() +} +func (m *RawExtension) XXX_DiscardUnknown() { + xxx_messageInfo_RawExtension.DiscardUnknown(m) +} + +var xxx_messageInfo_RawExtension proto.InternalMessageInfo + +func (m *TypeMeta) Reset() { *m = TypeMeta{} } +func (*TypeMeta) ProtoMessage() {} +func (*TypeMeta) Descriptor() ([]byte, []int) { + return fileDescriptor_9d3c45d7f546725c, []int{1} +} +func (m *TypeMeta) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TypeMeta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TypeMeta) XXX_Merge(src proto.Message) { + xxx_messageInfo_TypeMeta.Merge(m, src) +} +func (m *TypeMeta) XXX_Size() int { + return m.Size() +} +func (m *TypeMeta) XXX_DiscardUnknown() { + xxx_messageInfo_TypeMeta.DiscardUnknown(m) +} + +var xxx_messageInfo_TypeMeta proto.InternalMessageInfo -func (m *TypeMeta) Reset() { *m = TypeMeta{} } -func (*TypeMeta) ProtoMessage() {} -func (*TypeMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +func (m *Unknown) Reset() { *m = Unknown{} } +func (*Unknown) ProtoMessage() {} +func (*Unknown) Descriptor() ([]byte, []int) { + return fileDescriptor_9d3c45d7f546725c, []int{2} +} +func (m *Unknown) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Unknown) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Unknown) XXX_Merge(src proto.Message) { + xxx_messageInfo_Unknown.Merge(m, src) +} +func (m *Unknown) XXX_Size() int { + return m.Size() +} +func (m *Unknown) XXX_DiscardUnknown() { + xxx_messageInfo_Unknown.DiscardUnknown(m) +} -func (m *Unknown) Reset() { *m = Unknown{} } -func (*Unknown) ProtoMessage() {} -func (*Unknown) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +var xxx_messageInfo_Unknown proto.InternalMessageInfo func init() { proto.RegisterType((*RawExtension)(nil), "k8s.io.apimachinery.pkg.runtime.RawExtension") proto.RegisterType((*TypeMeta)(nil), "k8s.io.apimachinery.pkg.runtime.TypeMeta") proto.RegisterType((*Unknown)(nil), "k8s.io.apimachinery.pkg.runtime.Unknown") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto", fileDescriptor_9d3c45d7f546725c) +} + +var fileDescriptor_9d3c45d7f546725c = []byte{ + // 378 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x8f, 0x4f, 0xab, 0x13, 0x31, + 0x14, 0xc5, 0x27, 0xaf, 0x85, 0x3e, 0xd3, 0xc2, 0x93, 0xb8, 0x70, 0x74, 0x91, 0x79, 0x74, 0xe5, + 0x5b, 0xbc, 0x04, 0x1e, 0x08, 0x6e, 0x3b, 0xa5, 0xa0, 0x88, 0x20, 0xc1, 0x3f, 0xe0, 0xca, 0x74, + 0x26, 0x4e, 0xc3, 0xd0, 0x9b, 0x21, 0xcd, 0x38, 0x76, 0xe7, 0x47, 0xf0, 0x63, 0x75, 0xd9, 0x65, + 0x57, 0xc5, 0x8e, 0x1f, 0xc2, 0xad, 0x34, 0x4d, 0x6b, 0xd5, 0x85, 0xbb, 0xe4, 0x9e, 0xf3, 0x3b, + 0xf7, 0x1e, 0xfc, 0xbc, 0x7c, 0xb6, 0x60, 0xda, 0xf0, 0xb2, 0x9e, 0x2a, 0x0b, 0xca, 0xa9, 0x05, + 0xff, 0xac, 0x20, 0x37, 0x96, 0x07, 0x41, 0x56, 0x7a, 0x2e, 0xb3, 0x99, 0x06, 0x65, 0x97, 0xbc, + 0x2a, 0x0b, 0x6e, 0x6b, 0x70, 0x7a, 0xae, 0x78, 0xa1, 0x40, 0x59, 0xe9, 0x54, 0xce, 0x2a, 0x6b, + 0x9c, 0x21, 0xc9, 0x01, 0x60, 0xe7, 0x00, 0xab, 0xca, 0x82, 0x05, 0xe0, 0xf1, 0x6d, 0xa1, 0xdd, + 0xac, 0x9e, 0xb2, 0xcc, 0xcc, 0x79, 0x61, 0x0a, 0xc3, 0x3d, 0x37, 0xad, 0x3f, 0xf9, 0x9f, 0xff, + 0xf8, 0xd7, 0x21, 0x6f, 0x78, 0x83, 0x07, 0x42, 0x36, 0x93, 0x2f, 0x4e, 0xc1, 0x42, 0x1b, 0x20, + 0x8f, 0x70, 0xc7, 0xca, 0x26, 0x46, 0xd7, 0xe8, 0xc9, 0x20, 0xed, 0xb5, 0xdb, 0xa4, 0x23, 0x64, + 0x23, 0xf6, 0xb3, 0xe1, 0x47, 0x7c, 0xf9, 0x66, 0x59, 0xa9, 0x57, 0xca, 0x49, 0x72, 0x87, 0xb1, + 0xac, 0xf4, 0x3b, 0x65, 0xf7, 0x90, 0x77, 0xdf, 0x4b, 0xc9, 0x6a, 0x9b, 0x44, 0xed, 0x36, 0xc1, + 0xa3, 0xd7, 0x2f, 0x82, 0x22, 0xce, 0x5c, 0xe4, 0x1a, 0x77, 0x4b, 0x0d, 0x79, 0x7c, 0xe1, 0xdd, + 0x83, 0xe0, 0xee, 0xbe, 0xd4, 0x90, 0x0b, 0xaf, 0x0c, 0x7f, 0x22, 0xdc, 0x7b, 0x0b, 0x25, 0x98, + 0x06, 0xc8, 0x7b, 0x7c, 0xe9, 0xc2, 0x36, 0x9f, 0xdf, 0xbf, 0xbb, 0x61, 0xff, 0xe9, 0xce, 0x8e, + 0xe7, 0xa5, 0xf7, 0x43, 0xf8, 0xe9, 0x60, 0x71, 0x0a, 0x3b, 0x36, 0xbc, 0xf8, 0xb7, 0x21, 0x19, + 0xe1, 0xab, 0xcc, 0x80, 0x53, 0xe0, 0x26, 0x90, 0x99, 0x5c, 0x43, 0x11, 0x77, 0xfc, 0xb1, 0x0f, + 0x43, 0xde, 0xd5, 0xf8, 0x4f, 0x59, 0xfc, 0xed, 0x27, 0x4f, 0x71, 0x3f, 0x8c, 0xf6, 0xab, 0xe3, + 0xae, 0xc7, 0x1f, 0x04, 0xbc, 0x3f, 0xfe, 0x2d, 0x89, 0x73, 0x5f, 0x7a, 0xbb, 0xda, 0xd1, 0x68, + 0xbd, 0xa3, 0xd1, 0x66, 0x47, 0xa3, 0xaf, 0x2d, 0x45, 0xab, 0x96, 0xa2, 0x75, 0x4b, 0xd1, 0xa6, + 0xa5, 0xe8, 0x7b, 0x4b, 0xd1, 0xb7, 0x1f, 0x34, 0xfa, 0xd0, 0x0b, 0x45, 0x7f, 0x05, 0x00, 0x00, + 0xff, 0xff, 0xe3, 0x33, 0x18, 0x0b, 0x50, 0x02, 0x00, 0x00, +} + func (m *RawExtension) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -79,23 +175,29 @@ func (m *RawExtension) Marshal() (dAtA []byte, err error) { } func (m *RawExtension) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RawExtension) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if m.Raw != nil { - dAtA[i] = 0xa - i++ + i -= len(m.Raw) + copy(dAtA[i:], m.Raw) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Raw))) - i += copy(dAtA[i:], m.Raw) + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *TypeMeta) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -103,25 +205,32 @@ func (m *TypeMeta) Marshal() (dAtA []byte, err error) { } func (m *TypeMeta) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TypeMeta) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) - i += copy(dAtA[i:], m.APIVersion) - dAtA[i] = 0x12 - i++ + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.APIVersion) + copy(dAtA[i:], m.APIVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *Unknown) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -129,63 +238,60 @@ func (m *Unknown) Marshal() (dAtA []byte, err error) { } func (m *Unknown) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Unknown) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.TypeMeta.Size())) - n1, err := m.TypeMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 + i -= len(m.ContentType) + copy(dAtA[i:], m.ContentType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContentType))) + i-- + dAtA[i] = 0x22 + i -= len(m.ContentEncoding) + copy(dAtA[i:], m.ContentEncoding) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContentEncoding))) + i-- + dAtA[i] = 0x1a if m.Raw != nil { - dAtA[i] = 0x12 - i++ + i -= len(m.Raw) + copy(dAtA[i:], m.Raw) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Raw))) - i += copy(dAtA[i:], m.Raw) + i-- + dAtA[i] = 0x12 } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContentEncoding))) - i += copy(dAtA[i:], m.ContentEncoding) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContentType))) - i += copy(dAtA[i:], m.ContentType) - return i, nil + { + size, err := m.TypeMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *RawExtension) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Raw != nil { @@ -196,6 +302,9 @@ func (m *RawExtension) Size() (n int) { } func (m *TypeMeta) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.APIVersion) @@ -206,6 +315,9 @@ func (m *TypeMeta) Size() (n int) { } func (m *Unknown) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.TypeMeta.Size() @@ -222,14 +334,7 @@ func (m *Unknown) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -291,7 +396,7 @@ func (m *RawExtension) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -319,7 +424,7 @@ func (m *RawExtension) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= (int(b) & 0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -328,6 +433,9 @@ func (m *RawExtension) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -345,6 +453,9 @@ func (m *RawExtension) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -372,7 +483,7 @@ func (m *TypeMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -400,7 +511,7 @@ func (m *TypeMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -410,6 +521,9 @@ func (m *TypeMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -429,7 +543,7 @@ func (m *TypeMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -439,6 +553,9 @@ func (m *TypeMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -453,6 +570,9 @@ func (m *TypeMeta) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -480,7 +600,7 @@ func (m *Unknown) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -508,7 +628,7 @@ func (m *Unknown) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -517,6 +637,9 @@ func (m *Unknown) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -538,7 +661,7 @@ func (m *Unknown) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= (int(b) & 0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -547,6 +670,9 @@ func (m *Unknown) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -569,7 +695,7 @@ func (m *Unknown) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -579,6 +705,9 @@ func (m *Unknown) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -598,7 +727,7 @@ func (m *Unknown) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -608,6 +737,9 @@ func (m *Unknown) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -622,6 +754,9 @@ func (m *Unknown) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -688,10 +823,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -720,6 +858,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -738,35 +879,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 378 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x8f, 0x4f, 0xab, 0x13, 0x31, - 0x14, 0xc5, 0x27, 0xaf, 0x85, 0x3e, 0xd3, 0xc2, 0x93, 0xb8, 0x70, 0x74, 0x91, 0x79, 0x74, 0xe5, - 0x5b, 0xbc, 0x04, 0x1e, 0x08, 0x6e, 0x3b, 0xa5, 0xa0, 0x88, 0x20, 0xc1, 0x3f, 0xe0, 0xca, 0x74, - 0x26, 0x4e, 0xc3, 0xd0, 0x9b, 0x21, 0xcd, 0x38, 0x76, 0xe7, 0x47, 0xf0, 0x63, 0x75, 0xd9, 0x65, - 0x57, 0xc5, 0x8e, 0x1f, 0xc2, 0xad, 0x34, 0x4d, 0x6b, 0xd5, 0x85, 0xbb, 0xe4, 0x9e, 0xf3, 0x3b, - 0xf7, 0x1e, 0xfc, 0xbc, 0x7c, 0xb6, 0x60, 0xda, 0xf0, 0xb2, 0x9e, 0x2a, 0x0b, 0xca, 0xa9, 0x05, - 0xff, 0xac, 0x20, 0x37, 0x96, 0x07, 0x41, 0x56, 0x7a, 0x2e, 0xb3, 0x99, 0x06, 0x65, 0x97, 0xbc, - 0x2a, 0x0b, 0x6e, 0x6b, 0x70, 0x7a, 0xae, 0x78, 0xa1, 0x40, 0x59, 0xe9, 0x54, 0xce, 0x2a, 0x6b, - 0x9c, 0x21, 0xc9, 0x01, 0x60, 0xe7, 0x00, 0xab, 0xca, 0x82, 0x05, 0xe0, 0xf1, 0x6d, 0xa1, 0xdd, - 0xac, 0x9e, 0xb2, 0xcc, 0xcc, 0x79, 0x61, 0x0a, 0xc3, 0x3d, 0x37, 0xad, 0x3f, 0xf9, 0x9f, 0xff, - 0xf8, 0xd7, 0x21, 0x6f, 0x78, 0x83, 0x07, 0x42, 0x36, 0x93, 0x2f, 0x4e, 0xc1, 0x42, 0x1b, 0x20, - 0x8f, 0x70, 0xc7, 0xca, 0x26, 0x46, 0xd7, 0xe8, 0xc9, 0x20, 0xed, 0xb5, 0xdb, 0xa4, 0x23, 0x64, - 0x23, 0xf6, 0xb3, 0xe1, 0x47, 0x7c, 0xf9, 0x66, 0x59, 0xa9, 0x57, 0xca, 0x49, 0x72, 0x87, 0xb1, - 0xac, 0xf4, 0x3b, 0x65, 0xf7, 0x90, 0x77, 0xdf, 0x4b, 0xc9, 0x6a, 0x9b, 0x44, 0xed, 0x36, 0xc1, - 0xa3, 0xd7, 0x2f, 0x82, 0x22, 0xce, 0x5c, 0xe4, 0x1a, 0x77, 0x4b, 0x0d, 0x79, 0x7c, 0xe1, 0xdd, - 0x83, 0xe0, 0xee, 0xbe, 0xd4, 0x90, 0x0b, 0xaf, 0x0c, 0x7f, 0x22, 0xdc, 0x7b, 0x0b, 0x25, 0x98, - 0x06, 0xc8, 0x7b, 0x7c, 0xe9, 0xc2, 0x36, 0x9f, 0xdf, 0xbf, 0xbb, 0x61, 0xff, 0xe9, 0xce, 0x8e, - 0xe7, 0xa5, 0xf7, 0x43, 0xf8, 0xe9, 0x60, 0x71, 0x0a, 0x3b, 0x36, 0xbc, 0xf8, 0xb7, 0x21, 0x19, - 0xe1, 0xab, 0xcc, 0x80, 0x53, 0xe0, 0x26, 0x90, 0x99, 0x5c, 0x43, 0x11, 0x77, 0xfc, 0xb1, 0x0f, - 0x43, 0xde, 0xd5, 0xf8, 0x4f, 0x59, 0xfc, 0xed, 0x27, 0x4f, 0x71, 0x3f, 0x8c, 0xf6, 0xab, 0xe3, - 0xae, 0xc7, 0x1f, 0x04, 0xbc, 0x3f, 0xfe, 0x2d, 0x89, 0x73, 0x5f, 0x7a, 0xbb, 0xda, 0xd1, 0x68, - 0xbd, 0xa3, 0xd1, 0x66, 0x47, 0xa3, 0xaf, 0x2d, 0x45, 0xab, 0x96, 0xa2, 0x75, 0x4b, 0xd1, 0xa6, - 0xa5, 0xe8, 0x7b, 0x4b, 0xd1, 0xb7, 0x1f, 0x34, 0xfa, 0xd0, 0x0b, 0x45, 0x7f, 0x05, 0x00, 0x00, - 0xff, 0xff, 0xe3, 0x33, 0x18, 0x0b, 0x50, 0x02, 0x00, 0x00, -} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto b/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto index fb61ac96..0e212ec9 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto +++ b/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto @@ -25,11 +25,11 @@ package k8s.io.apimachinery.pkg.runtime; option go_package = "runtime"; // RawExtension is used to hold extensions in external versions. -// +// // To use this, make a field which has RawExtension as its type in your external, versioned // struct, and Object in your internal struct. You also need to register your // various plugin types. -// +// // // Internal package: // type MyAPIObject struct { // runtime.TypeMeta `json:",inline"` @@ -38,7 +38,7 @@ option go_package = "runtime"; // type PluginA struct { // AOption string `json:"aOption"` // } -// +// // // External package: // type MyAPIObject struct { // runtime.TypeMeta `json:",inline"` @@ -47,7 +47,7 @@ option go_package = "runtime"; // type PluginA struct { // AOption string `json:"aOption"` // } -// +// // // On the wire, the JSON will look something like this: // { // "kind":"MyAPIObject", @@ -57,7 +57,7 @@ option go_package = "runtime"; // "aOption":"foo", // }, // } -// +// // So what happens? Decode first uses json or yaml to unmarshal the serialized data into // your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. // The next step is to copy (using pkg/conversion) into the internal struct. The runtime @@ -65,13 +65,13 @@ option go_package = "runtime"; // JSON stored in RawExtension, turning it into the correct object type, and storing it // in the Object. (TODO: In the case where the object is of an unknown type, a // runtime.Unknown object will be created and stored.) -// +// // +k8s:deepcopy-gen=true // +protobuf=true // +k8s:openapi-gen=true message RawExtension { // Raw is the underlying serialization of this object. - // + // // TODO: Determine how to detect ContentType and ContentEncoding of 'Raw' data. optional bytes raw = 1; } @@ -83,10 +83,10 @@ message RawExtension { // ... // other fields // } // func (obj *MyAwesomeAPIObject) SetGroupVersionKind(gvk *metav1.GroupVersionKind) { metav1.UpdateTypeMeta(obj,gvk) }; GroupVersionKind() *GroupVersionKind -// +// // TypeMeta is provided here for convenience. You may use it directly from this package or define // your own with the same fields. -// +// // +k8s:deepcopy-gen=false // +protobuf=true // +k8s:openapi-gen=true @@ -103,7 +103,7 @@ message TypeMeta { // TypeMeta features-- kind, version, etc. // TODO: Make this object have easy access to field based accessors and settors for // metadata and field mutatation. -// +// // +k8s:deepcopy-gen=true // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +protobuf=true diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/helper.go b/vendor/k8s.io/apimachinery/pkg/runtime/helper.go index 33f11eb1..7bd1a3a6 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/helper.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/helper.go @@ -51,7 +51,7 @@ func UnsafeObjectConvertor(scheme *Scheme) ObjectConvertor { func SetField(src interface{}, v reflect.Value, fieldName string) error { field := v.FieldByName(fieldName) if !field.IsValid() { - return fmt.Errorf("couldn't find %v field in %#v", fieldName, v.Interface()) + return fmt.Errorf("couldn't find %v field in %T", fieldName, v.Interface()) } srcValue := reflect.ValueOf(src) if srcValue.Type().AssignableTo(field.Type()) { @@ -70,7 +70,7 @@ func SetField(src interface{}, v reflect.Value, fieldName string) error { func Field(v reflect.Value, fieldName string, dest interface{}) error { field := v.FieldByName(fieldName) if !field.IsValid() { - return fmt.Errorf("couldn't find %v field in %#v", fieldName, v.Interface()) + return fmt.Errorf("couldn't find %v field in %T", fieldName, v.Interface()) } destValue, err := conversion.EnforcePtr(dest) if err != nil { @@ -93,7 +93,7 @@ func Field(v reflect.Value, fieldName string, dest interface{}) error { func FieldPtr(v reflect.Value, fieldName string, dest interface{}) error { field := v.FieldByName(fieldName) if !field.IsValid() { - return fmt.Errorf("couldn't find %v field in %#v", fieldName, v.Interface()) + return fmt.Errorf("couldn't find %v field in %T", fieldName, v.Interface()) } v, err := conversion.EnforcePtr(dest) if err != nil { @@ -210,3 +210,50 @@ type defaultFramer struct{} func (defaultFramer) NewFrameReader(r io.ReadCloser) io.ReadCloser { return r } func (defaultFramer) NewFrameWriter(w io.Writer) io.Writer { return w } + +// WithVersionEncoder serializes an object and ensures the GVK is set. +type WithVersionEncoder struct { + Version GroupVersioner + Encoder + ObjectTyper +} + +// Encode does not do conversion. It sets the gvk during serialization. +func (e WithVersionEncoder) Encode(obj Object, stream io.Writer) error { + gvks, _, err := e.ObjectTyper.ObjectKinds(obj) + if err != nil { + if IsNotRegisteredError(err) { + return e.Encoder.Encode(obj, stream) + } + return err + } + kind := obj.GetObjectKind() + oldGVK := kind.GroupVersionKind() + gvk := gvks[0] + if e.Version != nil { + preferredGVK, ok := e.Version.KindForGroupVersionKinds(gvks) + if ok { + gvk = preferredGVK + } + } + kind.SetGroupVersionKind(gvk) + err = e.Encoder.Encode(obj, stream) + kind.SetGroupVersionKind(oldGVK) + return err +} + +// WithoutVersionDecoder clears the group version kind of a deserialized object. +type WithoutVersionDecoder struct { + Decoder +} + +// Decode does not do conversion. It removes the gvk during deserialization. +func (d WithoutVersionDecoder) Decode(data []byte, defaults *schema.GroupVersionKind, into Object) (Object, *schema.GroupVersionKind, error) { + obj, gvk, err := d.Decoder.Decode(data, defaults, into) + if obj != nil { + kind := obj.GetObjectKind() + // clearing the gvk is just a convention of a codec + kind.SetGroupVersionKind(schema.GroupVersionKind{}) + } + return obj, gvk, err +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go b/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go index 699ff13e..f44693c0 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go @@ -37,13 +37,36 @@ type GroupVersioner interface { // Scheme.New(target) and then perform a conversion between the current Go type and the destination Go type. // Sophisticated implementations may use additional information about the input kinds to pick a destination kind. KindForGroupVersionKinds(kinds []schema.GroupVersionKind) (target schema.GroupVersionKind, ok bool) + // Identifier returns string representation of the object. + // Identifiers of two different encoders should be equal only if for every input + // kinds they return the same result. + Identifier() string } +// Identifier represents an identifier. +// Identitier of two different objects should be equal if and only if for every +// input the output they produce is exactly the same. +type Identifier string + // Encoder writes objects to a serialized form type Encoder interface { // Encode writes an object to a stream. Implementations may return errors if the versions are // incompatible, or if no conversion is defined. Encode(obj Object, w io.Writer) error + // Identifier returns an identifier of the encoder. + // Identifiers of two different encoders should be equal if and only if for every input + // object it will be encoded to the same representation by both of them. + // + // Identifier is inteted for use with CacheableObject#CacheEncode method. In order to + // correctly handle CacheableObject, Encode() method should look similar to below, where + // doEncode() is the encoding logic of implemented encoder: + // func (e *MyEncoder) Encode(obj Object, w io.Writer) error { + // if co, ok := obj.(CacheableObject); ok { + // return co.CacheEncode(e.Identifier(), e.doEncode, w) + // } + // return e.doEncode(obj, w) + // } + Identifier() Identifier } // Decoder attempts to load an object from data. @@ -91,6 +114,10 @@ type Framer interface { type SerializerInfo struct { // MediaType is the value that represents this serializer over the wire. MediaType string + // MediaTypeType is the first part of the MediaType ("application" in "application/json"). + MediaTypeType string + // MediaTypeSubType is the second part of the MediaType ("json" in "application/json"). + MediaTypeSubType string // EncodesAsText indicates this serializer can be encoded to UTF-8 safely. EncodesAsText bool // Serializer is the individual object serializer for this media type. @@ -128,6 +155,28 @@ type NegotiatedSerializer interface { DecoderToVersion(serializer Decoder, gv GroupVersioner) Decoder } +// ClientNegotiator handles turning an HTTP content type into the appropriate encoder. +// Use NewClientNegotiator or NewVersionedClientNegotiator to create this interface from +// a NegotiatedSerializer. +type ClientNegotiator interface { + // Encoder returns the appropriate encoder for the provided contentType (e.g. application/json) + // and any optional mediaType parameters (e.g. pretty=1), or an error. If no serializer is found + // a NegotiateError will be returned. The current client implementations consider params to be + // optional modifiers to the contentType and will ignore unrecognized parameters. + Encoder(contentType string, params map[string]string) (Encoder, error) + // Decoder returns the appropriate decoder for the provided contentType (e.g. application/json) + // and any optional mediaType parameters (e.g. pretty=1), or an error. If no serializer is found + // a NegotiateError will be returned. The current client implementations consider params to be + // optional modifiers to the contentType and will ignore unrecognized parameters. + Decoder(contentType string, params map[string]string) (Decoder, error) + // StreamDecoder returns the appropriate stream decoder for the provided contentType (e.g. + // application/json) and any optional mediaType parameters (e.g. pretty=1), or an error. If no + // serializer is found a NegotiateError will be returned. The Serializer and Framer will always + // be returned if a Decoder is returned. The current client implementations consider params to be + // optional modifiers to the contentType and will ignore unrecognized parameters. + StreamDecoder(contentType string, params map[string]string) (Decoder, Serializer, Framer, error) +} + // StorageSerializer is an interface used for obtaining encoders, decoders, and serializers // that can read and write data at rest. This would commonly be used by client tools that must // read files, or server side storage interfaces that persist restful objects. @@ -206,6 +255,25 @@ type ObjectCreater interface { New(kind schema.GroupVersionKind) (out Object, err error) } +// EquivalentResourceMapper provides information about resources that address the same underlying data as a specified resource +type EquivalentResourceMapper interface { + // EquivalentResourcesFor returns a list of resources that address the same underlying data as resource. + // If subresource is specified, only equivalent resources which also have the same subresource are included. + // The specified resource can be included in the returned list. + EquivalentResourcesFor(resource schema.GroupVersionResource, subresource string) []schema.GroupVersionResource + // KindFor returns the kind expected by the specified resource[/subresource]. + // A zero value is returned if the kind is unknown. + KindFor(resource schema.GroupVersionResource, subresource string) schema.GroupVersionKind +} + +// EquivalentResourceRegistry provides an EquivalentResourceMapper interface, +// and allows registering known resource[/subresource] -> kind +type EquivalentResourceRegistry interface { + EquivalentResourceMapper + // RegisterKindFor registers the existence of the specified resource[/subresource] along with its expected kind. + RegisterKindFor(resource schema.GroupVersionResource, subresource string, kind schema.GroupVersionKind) +} + // ResourceVersioner provides methods for setting and retrieving // the resource version from an API object. type ResourceVersioner interface { @@ -233,10 +301,34 @@ type Object interface { DeepCopyObject() Object } +// CacheableObject allows an object to cache its different serializations +// to avoid performing the same serialization multiple times. +type CacheableObject interface { + // CacheEncode writes an object to a stream. The function will + // be used in case of cache miss. The function takes ownership + // of the object. + // If CacheableObject is a wrapper, then deep-copy of the wrapped object + // should be passed to function. + // CacheEncode assumes that for two different calls with the same , + // function will also be the same. + CacheEncode(id Identifier, encode func(Object, io.Writer) error, w io.Writer) error + // GetObject returns a deep-copy of an object to be encoded - the caller of + // GetObject() is the owner of returned object. The reason for making a copy + // is to avoid bugs, where caller modifies the object and forgets to copy it, + // thus modifying the object for everyone. + // The object returned by GetObject should be the same as the one that is supposed + // to be passed to function in CacheEncode method. + // If CacheableObject is a wrapper, the copy of wrapped object should be returned. + GetObject() Object +} + // Unstructured objects store values as map[string]interface{}, with only values that can be serialized // to JSON allowed. type Unstructured interface { Object + // NewEmptyInstance returns a new instance of the concrete type containing only kind/apiVersion and no other data. + // This should be called instead of reflect.New() for unstructured types because the go type alone does not preserve kind/apiVersion info. + NewEmptyInstance() Unstructured // UnstructuredContent returns a non-nil map with this object's contents. Values may be // []interface{}, map[string]interface{}, or any primitive type. Contents are typically serialized to // and from JSON. SetUnstructuredContent should be used to mutate the contents. diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/mapper.go b/vendor/k8s.io/apimachinery/pkg/runtime/mapper.go new file mode 100644 index 00000000..3ff84611 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/mapper.go @@ -0,0 +1,98 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +import ( + "sync" + + "k8s.io/apimachinery/pkg/runtime/schema" +) + +type equivalentResourceRegistry struct { + // keyFunc computes a key for the specified resource (this allows honoring colocated resources across API groups). + // if null, or if "" is returned, resource.String() is used as the key + keyFunc func(resource schema.GroupResource) string + // resources maps key -> subresource -> equivalent resources (subresource is not included in the returned resources). + // main resources are stored with subresource="". + resources map[string]map[string][]schema.GroupVersionResource + // kinds maps resource -> subresource -> kind + kinds map[schema.GroupVersionResource]map[string]schema.GroupVersionKind + // keys caches the computed key for each GroupResource + keys map[schema.GroupResource]string + + mutex sync.RWMutex +} + +var _ EquivalentResourceMapper = (*equivalentResourceRegistry)(nil) +var _ EquivalentResourceRegistry = (*equivalentResourceRegistry)(nil) + +// NewEquivalentResourceRegistry creates a resource registry that considers all versions of a GroupResource to be equivalent. +func NewEquivalentResourceRegistry() EquivalentResourceRegistry { + return &equivalentResourceRegistry{} +} + +// NewEquivalentResourceRegistryWithIdentity creates a resource mapper with a custom identity function. +// If "" is returned by the function, GroupResource#String is used as the identity. +// GroupResources with the same identity string are considered equivalent. +func NewEquivalentResourceRegistryWithIdentity(keyFunc func(schema.GroupResource) string) EquivalentResourceRegistry { + return &equivalentResourceRegistry{keyFunc: keyFunc} +} + +func (r *equivalentResourceRegistry) EquivalentResourcesFor(resource schema.GroupVersionResource, subresource string) []schema.GroupVersionResource { + r.mutex.RLock() + defer r.mutex.RUnlock() + return r.resources[r.keys[resource.GroupResource()]][subresource] +} +func (r *equivalentResourceRegistry) KindFor(resource schema.GroupVersionResource, subresource string) schema.GroupVersionKind { + r.mutex.RLock() + defer r.mutex.RUnlock() + return r.kinds[resource][subresource] +} +func (r *equivalentResourceRegistry) RegisterKindFor(resource schema.GroupVersionResource, subresource string, kind schema.GroupVersionKind) { + r.mutex.Lock() + defer r.mutex.Unlock() + if r.kinds == nil { + r.kinds = map[schema.GroupVersionResource]map[string]schema.GroupVersionKind{} + } + if r.kinds[resource] == nil { + r.kinds[resource] = map[string]schema.GroupVersionKind{} + } + r.kinds[resource][subresource] = kind + + // get the shared key of the parent resource + key := "" + gr := resource.GroupResource() + if r.keyFunc != nil { + key = r.keyFunc(gr) + } + if key == "" { + key = gr.String() + } + + if r.keys == nil { + r.keys = map[schema.GroupResource]string{} + } + r.keys[gr] = key + + if r.resources == nil { + r.resources = map[string]map[string][]schema.GroupVersionResource{} + } + if r.resources[key] == nil { + r.resources[key] = map[string][]schema.GroupVersionResource{} + } + r.resources[key][subresource] = append(r.resources[key][subresource], resource) +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/negotiate.go b/vendor/k8s.io/apimachinery/pkg/runtime/negotiate.go new file mode 100644 index 00000000..159b3012 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/negotiate.go @@ -0,0 +1,146 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// NegotiateError is returned when a ClientNegotiator is unable to locate +// a serializer for the requested operation. +type NegotiateError struct { + ContentType string + Stream bool +} + +func (e NegotiateError) Error() string { + if e.Stream { + return fmt.Sprintf("no stream serializers registered for %s", e.ContentType) + } + return fmt.Sprintf("no serializers registered for %s", e.ContentType) +} + +type clientNegotiator struct { + serializer NegotiatedSerializer + encode, decode GroupVersioner +} + +func (n *clientNegotiator) Encoder(contentType string, params map[string]string) (Encoder, error) { + // TODO: `pretty=1` is handled in NegotiateOutputMediaType, consider moving it to this method + // if client negotiators truly need to use it + mediaTypes := n.serializer.SupportedMediaTypes() + info, ok := SerializerInfoForMediaType(mediaTypes, contentType) + if !ok { + if len(contentType) != 0 || len(mediaTypes) == 0 { + return nil, NegotiateError{ContentType: contentType} + } + info = mediaTypes[0] + } + return n.serializer.EncoderForVersion(info.Serializer, n.encode), nil +} + +func (n *clientNegotiator) Decoder(contentType string, params map[string]string) (Decoder, error) { + mediaTypes := n.serializer.SupportedMediaTypes() + info, ok := SerializerInfoForMediaType(mediaTypes, contentType) + if !ok { + if len(contentType) != 0 || len(mediaTypes) == 0 { + return nil, NegotiateError{ContentType: contentType} + } + info = mediaTypes[0] + } + return n.serializer.DecoderToVersion(info.Serializer, n.decode), nil +} + +func (n *clientNegotiator) StreamDecoder(contentType string, params map[string]string) (Decoder, Serializer, Framer, error) { + mediaTypes := n.serializer.SupportedMediaTypes() + info, ok := SerializerInfoForMediaType(mediaTypes, contentType) + if !ok { + if len(contentType) != 0 || len(mediaTypes) == 0 { + return nil, nil, nil, NegotiateError{ContentType: contentType, Stream: true} + } + info = mediaTypes[0] + } + if info.StreamSerializer == nil { + return nil, nil, nil, NegotiateError{ContentType: info.MediaType, Stream: true} + } + return n.serializer.DecoderToVersion(info.Serializer, n.decode), info.StreamSerializer.Serializer, info.StreamSerializer.Framer, nil +} + +// NewClientNegotiator will attempt to retrieve the appropriate encoder, decoder, or +// stream decoder for a given content type. Does not perform any conversion, but will +// encode the object to the desired group, version, and kind. Use when creating a client. +func NewClientNegotiator(serializer NegotiatedSerializer, gv schema.GroupVersion) ClientNegotiator { + return &clientNegotiator{ + serializer: serializer, + encode: gv, + } +} + +// NewInternalClientNegotiator applies the default client rules for connecting to a Kubernetes apiserver +// where objects are converted to gv prior to sending and decoded to their internal representation prior +// to retrieval. +// +// DEPRECATED: Internal clients are deprecated and will be removed in a future Kubernetes release. +func NewInternalClientNegotiator(serializer NegotiatedSerializer, gv schema.GroupVersion) ClientNegotiator { + decode := schema.GroupVersions{ + { + Group: gv.Group, + Version: APIVersionInternal, + }, + // always include the legacy group as a decoding target to handle non-error `Status` return types + { + Group: "", + Version: APIVersionInternal, + }, + } + return &clientNegotiator{ + encode: gv, + decode: decode, + serializer: serializer, + } +} + +// NewSimpleClientNegotiator will negotiate for a single serializer. This should only be used +// for testing or when the caller is taking responsibility for setting the GVK on encoded objects. +func NewSimpleClientNegotiator(info SerializerInfo, gv schema.GroupVersion) ClientNegotiator { + return &clientNegotiator{ + serializer: &simpleNegotiatedSerializer{info: info}, + encode: gv, + } +} + +type simpleNegotiatedSerializer struct { + info SerializerInfo +} + +func NewSimpleNegotiatedSerializer(info SerializerInfo) NegotiatedSerializer { + return &simpleNegotiatedSerializer{info: info} +} + +func (n *simpleNegotiatedSerializer) SupportedMediaTypes() []SerializerInfo { + return []SerializerInfo{n.info} +} + +func (n *simpleNegotiatedSerializer) EncoderForVersion(e Encoder, _ GroupVersioner) Encoder { + return e +} + +func (n *simpleNegotiatedSerializer) DecoderToVersion(d Decoder, _gv GroupVersioner) Decoder { + return d +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/register.go b/vendor/k8s.io/apimachinery/pkg/runtime/register.go index eeb380c3..1cd2e4c3 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/register.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/register.go @@ -29,33 +29,3 @@ func (obj *TypeMeta) GroupVersionKind() schema.GroupVersionKind { } func (obj *TypeMeta) GetObjectKind() schema.ObjectKind { return obj } - -// GetObjectKind implements Object for VersionedObjects, returning an empty ObjectKind -// interface if no objects are provided, or the ObjectKind interface of the object in the -// highest array position. -func (obj *VersionedObjects) GetObjectKind() schema.ObjectKind { - last := obj.Last() - if last == nil { - return schema.EmptyObjectKind - } - return last.GetObjectKind() -} - -// First returns the leftmost object in the VersionedObjects array, which is usually the -// object as serialized on the wire. -func (obj *VersionedObjects) First() Object { - if len(obj.Objects) == 0 { - return nil - } - return obj.Objects[0] -} - -// Last is the rightmost object in the VersionedObjects array, which is the object after -// all transformations have been applied. This is the same object that would be returned -// by Decode in a normal invocation (without VersionedObjects in the into argument). -func (obj *VersionedObjects) Last() Object { - if len(obj.Objects) == 0 { - return nil - } - return obj.Objects[len(obj.Objects)-1] -} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go index 5c9934c7..a7276649 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go @@ -14,23 +14,18 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto -// DO NOT EDIT! -/* -Package schema is a generated protocol buffer package. +package schema -It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto +import ( + fmt "fmt" -It has these top-level messages: -*/ -package schema + math "math" -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" + proto "github.com/gogo/protobuf/proto" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -44,10 +39,10 @@ var _ = math.Inf const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto", fileDescriptorGenerated) + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto", fileDescriptor_0462724132518e0d) } -var fileDescriptorGenerated = []byte{ +var fileDescriptor_0462724132518e0d = []byte{ // 185 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x4c, 0xcc, 0xaf, 0x6e, 0xc3, 0x30, 0x10, 0xc7, 0x71, 0x9b, 0x0c, 0x0c, 0x0e, 0x0e, 0x1c, 0x1c, 0xda, 0x7c, 0x74, 0xb8, 0x2f, 0x50, diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go b/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go index 5f02961d..63610331 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go @@ -66,7 +66,7 @@ func (gr GroupResource) Empty() bool { return len(gr.Group) == 0 && len(gr.Resource) == 0 } -func (gr *GroupResource) String() string { +func (gr GroupResource) String() string { if len(gr.Group) == 0 { return gr.Resource } @@ -111,7 +111,7 @@ func (gvr GroupVersionResource) GroupVersion() GroupVersion { return GroupVersion{Group: gvr.Group, Version: gvr.Version} } -func (gvr *GroupVersionResource) String() string { +func (gvr GroupVersionResource) String() string { return strings.Join([]string{gvr.Group, "/", gvr.Version, ", Resource=", gvr.Resource}, "") } @@ -130,7 +130,7 @@ func (gk GroupKind) WithVersion(version string) GroupVersionKind { return GroupVersionKind{Group: gk.Group, Version: version, Kind: gk.Kind} } -func (gk *GroupKind) String() string { +func (gk GroupKind) String() string { if len(gk.Group) == 0 { return gk.Kind } @@ -191,6 +191,11 @@ func (gv GroupVersion) String() string { return gv.Version } +// Identifier implements runtime.GroupVersioner interface. +func (gv GroupVersion) Identifier() string { + return gv.String() +} + // KindForGroupVersionKinds identifies the preferred GroupVersionKind out of a list. It returns ok false // if none of the options match the group. It prefers a match to group and version over just group. // TODO: Move GroupVersion to a package under pkg/runtime, since it's used by scheme. @@ -246,6 +251,15 @@ func (gv GroupVersion) WithResource(resource string) GroupVersionResource { // in fewer places. type GroupVersions []GroupVersion +// Identifier implements runtime.GroupVersioner interface. +func (gv GroupVersions) Identifier() string { + groupVersions := make([]string, 0, len(gv)) + for i := range gv { + groupVersions = append(groupVersions, gv[i].String()) + } + return fmt.Sprintf("[%s]", strings.Join(groupVersions, ",")) +} + // KindForGroupVersionKinds identifies the preferred GroupVersionKind out of a list. It returns ok false // if none of the options match the group. func (gvs GroupVersions) KindForGroupVersionKinds(kinds []GroupVersionKind) (GroupVersionKind, bool) { @@ -281,8 +295,8 @@ func bestMatch(kinds []GroupVersionKind, targets []GroupVersionKind) GroupVersio // ToAPIVersionAndKind is a convenience method for satisfying runtime.Object on types that // do not use TypeMeta. -func (gvk *GroupVersionKind) ToAPIVersionAndKind() (string, string) { - if gvk == nil { +func (gvk GroupVersionKind) ToAPIVersionAndKind() (string, string) { + if gvk.Empty() { return "", "" } return gvk.GroupVersion().String(), gvk.Kind diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go index 65f45112..f21b0ef1 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go @@ -17,9 +17,13 @@ limitations under the License. package serializer import ( + "mime" + "strings" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer/json" + "k8s.io/apimachinery/pkg/runtime/serializer/protobuf" "k8s.io/apimachinery/pkg/runtime/serializer/recognizer" "k8s.io/apimachinery/pkg/runtime/serializer/versioning" ) @@ -44,30 +48,53 @@ type serializerType struct { StreamSerializer runtime.Serializer } -func newSerializersForScheme(scheme *runtime.Scheme, mf json.MetaFactory) []serializerType { - jsonSerializer := json.NewSerializer(mf, scheme, scheme, false) - jsonPrettySerializer := json.NewSerializer(mf, scheme, scheme, true) - yamlSerializer := json.NewYAMLSerializer(mf, scheme, scheme) +func newSerializersForScheme(scheme *runtime.Scheme, mf json.MetaFactory, options CodecFactoryOptions) []serializerType { + jsonSerializer := json.NewSerializerWithOptions( + mf, scheme, scheme, + json.SerializerOptions{Yaml: false, Pretty: false, Strict: options.Strict}, + ) + jsonSerializerType := serializerType{ + AcceptContentTypes: []string{runtime.ContentTypeJSON}, + ContentType: runtime.ContentTypeJSON, + FileExtensions: []string{"json"}, + EncodesAsText: true, + Serializer: jsonSerializer, + + Framer: json.Framer, + StreamSerializer: jsonSerializer, + } + if options.Pretty { + jsonSerializerType.PrettySerializer = json.NewSerializerWithOptions( + mf, scheme, scheme, + json.SerializerOptions{Yaml: false, Pretty: true, Strict: options.Strict}, + ) + } - serializers := []serializerType{ - { - AcceptContentTypes: []string{"application/json"}, - ContentType: "application/json", - FileExtensions: []string{"json"}, - EncodesAsText: true, - Serializer: jsonSerializer, - PrettySerializer: jsonPrettySerializer, + yamlSerializer := json.NewSerializerWithOptions( + mf, scheme, scheme, + json.SerializerOptions{Yaml: true, Pretty: false, Strict: options.Strict}, + ) + protoSerializer := protobuf.NewSerializer(scheme, scheme) + protoRawSerializer := protobuf.NewRawSerializer(scheme, scheme) - Framer: json.Framer, - StreamSerializer: jsonSerializer, - }, + serializers := []serializerType{ + jsonSerializerType, { - AcceptContentTypes: []string{"application/yaml"}, - ContentType: "application/yaml", + AcceptContentTypes: []string{runtime.ContentTypeYAML}, + ContentType: runtime.ContentTypeYAML, FileExtensions: []string{"yaml"}, EncodesAsText: true, Serializer: yamlSerializer, }, + { + AcceptContentTypes: []string{runtime.ContentTypeProtobuf}, + ContentType: runtime.ContentTypeProtobuf, + FileExtensions: []string{"pb"}, + Serializer: protoSerializer, + + Framer: protobuf.LengthDelimitedFramer, + StreamSerializer: protoRawSerializer, + }, } for _, fn := range serializerExtensions { @@ -89,14 +116,56 @@ type CodecFactory struct { legacySerializer runtime.Serializer } +// CodecFactoryOptions holds the options for configuring CodecFactory behavior +type CodecFactoryOptions struct { + // Strict configures all serializers in strict mode + Strict bool + // Pretty includes a pretty serializer along with the non-pretty one + Pretty bool +} + +// CodecFactoryOptionsMutator takes a pointer to an options struct and then modifies it. +// Functions implementing this type can be passed to the NewCodecFactory() constructor. +type CodecFactoryOptionsMutator func(*CodecFactoryOptions) + +// EnablePretty enables including a pretty serializer along with the non-pretty one +func EnablePretty(options *CodecFactoryOptions) { + options.Pretty = true +} + +// DisablePretty disables including a pretty serializer along with the non-pretty one +func DisablePretty(options *CodecFactoryOptions) { + options.Pretty = false +} + +// EnableStrict enables configuring all serializers in strict mode +func EnableStrict(options *CodecFactoryOptions) { + options.Strict = true +} + +// DisableStrict disables configuring all serializers in strict mode +func DisableStrict(options *CodecFactoryOptions) { + options.Strict = false +} + // NewCodecFactory provides methods for retrieving serializers for the supported wire formats // and conversion wrappers to define preferred internal and external versions. In the future, // as the internal version is used less, callers may instead use a defaulting serializer and // only convert objects which are shared internally (Status, common API machinery). +// +// Mutators can be passed to change the CodecFactoryOptions before construction of the factory. +// It is recommended to explicitly pass mutators instead of relying on defaults. +// By default, Pretty is enabled -- this is conformant with previously supported behavior. +// // TODO: allow other codecs to be compiled in? // TODO: accept a scheme interface -func NewCodecFactory(scheme *runtime.Scheme) CodecFactory { - serializers := newSerializersForScheme(scheme, json.DefaultMetaFactory) +func NewCodecFactory(scheme *runtime.Scheme, mutators ...CodecFactoryOptionsMutator) CodecFactory { + options := CodecFactoryOptions{Pretty: true} + for _, fn := range mutators { + fn(&options) + } + + serializers := newSerializersForScheme(scheme, json.DefaultMetaFactory, options) return newCodecFactory(scheme, serializers) } @@ -120,6 +189,15 @@ func newCodecFactory(scheme *runtime.Scheme, serializers []serializerType) Codec Serializer: d.Serializer, PrettySerializer: d.PrettySerializer, } + + mediaType, _, err := mime.ParseMediaType(info.MediaType) + if err != nil { + panic(err) + } + parts := strings.SplitN(mediaType, "/", 2) + info.MediaTypeType = parts[0] + info.MediaTypeSubType = parts[1] + if d.StreamSerializer != nil { info.StreamSerializer = &runtime.StreamSerializerInfo{ Serializer: d.StreamSerializer, @@ -148,6 +226,12 @@ func newCodecFactory(scheme *runtime.Scheme, serializers []serializerType) Codec } } +// WithoutConversion returns a NegotiatedSerializer that performs no conversion, even if the +// caller requests it. +func (f CodecFactory) WithoutConversion() runtime.NegotiatedSerializer { + return WithoutConversionCodecFactory{f} +} + // SupportedMediaTypes returns the RFC2046 media types that this factory has serializers for. func (f CodecFactory) SupportedMediaTypes() []runtime.SerializerInfo { return f.accepts @@ -215,23 +299,26 @@ func (f CodecFactory) EncoderForVersion(encoder runtime.Encoder, gv runtime.Grou return f.CodecForVersions(encoder, nil, gv, nil) } -// DirectCodecFactory provides methods for retrieving "DirectCodec"s, which do not do conversion. -type DirectCodecFactory struct { +// WithoutConversionCodecFactory is a CodecFactory that will explicitly ignore requests to perform conversion. +// This wrapper is used while code migrates away from using conversion (such as external clients) and in the future +// will be unnecessary when we change the signature of NegotiatedSerializer. +type WithoutConversionCodecFactory struct { CodecFactory } -// EncoderForVersion returns an encoder that does not do conversion. -func (f DirectCodecFactory) EncoderForVersion(serializer runtime.Encoder, version runtime.GroupVersioner) runtime.Encoder { - return versioning.DirectEncoder{ +// EncoderForVersion returns an encoder that does not do conversion, but does set the group version kind of the object +// when serialized. +func (f WithoutConversionCodecFactory) EncoderForVersion(serializer runtime.Encoder, version runtime.GroupVersioner) runtime.Encoder { + return runtime.WithVersionEncoder{ Version: version, Encoder: serializer, ObjectTyper: f.CodecFactory.scheme, } } -// DecoderToVersion returns an decoder that does not do conversion. gv is ignored. -func (f DirectCodecFactory) DecoderToVersion(serializer runtime.Decoder, _ runtime.GroupVersioner) runtime.Decoder { - return versioning.DirectDecoder{ +// DecoderToVersion returns an decoder that does not do conversion. +func (f WithoutConversionCodecFactory) DecoderToVersion(serializer runtime.Decoder, _ runtime.GroupVersioner) runtime.Decoder { + return runtime.WithoutVersionDecoder{ Decoder: serializer, } } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go index 382c4858..9d17f09e 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go @@ -22,47 +22,87 @@ import ( "strconv" "unsafe" - "github.com/ghodss/yaml" jsoniter "github.com/json-iterator/go" "github.com/modern-go/reflect2" + "sigs.k8s.io/yaml" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer/recognizer" "k8s.io/apimachinery/pkg/util/framer" utilyaml "k8s.io/apimachinery/pkg/util/yaml" + "k8s.io/klog" ) // NewSerializer creates a JSON serializer that handles encoding versioned objects into the proper JSON form. If typer // is not nil, the object has the group, version, and kind fields set. +// Deprecated: use NewSerializerWithOptions instead. func NewSerializer(meta MetaFactory, creater runtime.ObjectCreater, typer runtime.ObjectTyper, pretty bool) *Serializer { - return &Serializer{ - meta: meta, - creater: creater, - typer: typer, - yaml: false, - pretty: pretty, - } + return NewSerializerWithOptions(meta, creater, typer, SerializerOptions{false, pretty, false}) } // NewYAMLSerializer creates a YAML serializer that handles encoding versioned objects into the proper YAML form. If typer // is not nil, the object has the group, version, and kind fields set. This serializer supports only the subset of YAML that // matches JSON, and will error if constructs are used that do not serialize to JSON. +// Deprecated: use NewSerializerWithOptions instead. func NewYAMLSerializer(meta MetaFactory, creater runtime.ObjectCreater, typer runtime.ObjectTyper) *Serializer { + return NewSerializerWithOptions(meta, creater, typer, SerializerOptions{true, false, false}) +} + +// NewSerializerWithOptions creates a JSON/YAML serializer that handles encoding versioned objects into the proper JSON/YAML +// form. If typer is not nil, the object has the group, version, and kind fields set. Options are copied into the Serializer +// and are immutable. +func NewSerializerWithOptions(meta MetaFactory, creater runtime.ObjectCreater, typer runtime.ObjectTyper, options SerializerOptions) *Serializer { return &Serializer{ - meta: meta, - creater: creater, - typer: typer, - yaml: true, + meta: meta, + creater: creater, + typer: typer, + options: options, + identifier: identifier(options), + } +} + +// identifier computes Identifier of Encoder based on the given options. +func identifier(options SerializerOptions) runtime.Identifier { + result := map[string]string{ + "name": "json", + "yaml": strconv.FormatBool(options.Yaml), + "pretty": strconv.FormatBool(options.Pretty), } + identifier, err := json.Marshal(result) + if err != nil { + klog.Fatalf("Failed marshaling identifier for json Serializer: %v", err) + } + return runtime.Identifier(identifier) +} + +// SerializerOptions holds the options which are used to configure a JSON/YAML serializer. +// example: +// (1) To configure a JSON serializer, set `Yaml` to `false`. +// (2) To configure a YAML serializer, set `Yaml` to `true`. +// (3) To configure a strict serializer that can return strictDecodingError, set `Strict` to `true`. +type SerializerOptions struct { + // Yaml: configures the Serializer to work with JSON(false) or YAML(true). + // When `Yaml` is enabled, this serializer only supports the subset of YAML that + // matches JSON, and will error if constructs are used that do not serialize to JSON. + Yaml bool + + // Pretty: configures a JSON enabled Serializer(`Yaml: false`) to produce human-readable output. + // This option is silently ignored when `Yaml` is `true`. + Pretty bool + + // Strict: configures the Serializer to return strictDecodingError's when duplicate fields are present decoding JSON or YAML. + // Note that enabling this option is not as performant as the non-strict variant, and should not be used in fast paths. + Strict bool } type Serializer struct { meta MetaFactory + options SerializerOptions creater runtime.ObjectCreater typer runtime.ObjectTyper - yaml bool - pretty bool + + identifier runtime.Identifier } // Serializer implements Serializer @@ -119,11 +159,28 @@ func CaseSensitiveJsonIterator() jsoniter.API { return config } -// Private copy of jsoniter to try to shield against possible mutations +// StrictCaseSensitiveJsonIterator returns a jsoniterator API that's configured to be +// case-sensitive, but also disallows unknown fields when unmarshalling. It is compatible with +// the encoding/json standard library. +func StrictCaseSensitiveJsonIterator() jsoniter.API { + config := jsoniter.Config{ + EscapeHTML: true, + SortMapKeys: true, + ValidateJsonRawMessage: true, + CaseSensitive: true, + DisallowUnknownFields: true, + }.Froze() + // Force jsoniter to decode number to interface{} via int64/float64, if possible. + config.RegisterExtension(&customNumberExtension{}) + return config +} + +// Private copies of jsoniter to try to shield against possible mutations // from outside. Still does not protect from package level jsoniter.Register*() functions - someone calling them // in some other library will mess with every usage of the jsoniter library in the whole program. // See https://github.com/json-iterator/go/issues/265 var caseSensitiveJsonIterator = CaseSensitiveJsonIterator() +var strictCaseSensitiveJsonIterator = StrictCaseSensitiveJsonIterator() // gvkWithDefaults returns group kind and version defaulting from provided default func gvkWithDefaults(actual, defaultGVK schema.GroupVersionKind) schema.GroupVersionKind { @@ -149,18 +206,8 @@ func gvkWithDefaults(actual, defaultGVK schema.GroupVersionKind) schema.GroupVer // On success or most errors, the method will return the calculated schema kind. // The gvk calculate priority will be originalData > default gvk > into func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) { - if versioned, ok := into.(*runtime.VersionedObjects); ok { - into = versioned.Last() - obj, actual, err := s.Decode(originalData, gvk, into) - if err != nil { - return nil, actual, err - } - versioned.Objects = []runtime.Object{obj} - return versioned, actual, nil - } - data := originalData - if s.yaml { + if s.options.Yaml { altered, err := yaml.YAMLToJSON(data) if err != nil { return nil, nil, err @@ -216,12 +263,45 @@ func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, i if err := caseSensitiveJsonIterator.Unmarshal(data, obj); err != nil { return nil, actual, err } + + // If the deserializer is non-strict, return successfully here. + if !s.options.Strict { + return obj, actual, nil + } + + // In strict mode pass the data trough the YAMLToJSONStrict converter. + // This is done to catch duplicate fields regardless of encoding (JSON or YAML). For JSON data, + // the output would equal the input, unless there is a parsing error such as duplicate fields. + // As we know this was successful in the non-strict case, the only error that may be returned here + // is because of the newly-added strictness. hence we know we can return the typed strictDecoderError + // the actual error is that the object contains duplicate fields. + altered, err := yaml.YAMLToJSONStrict(originalData) + if err != nil { + return nil, actual, runtime.NewStrictDecodingError(err.Error(), string(originalData)) + } + // As performance is not an issue for now for the strict deserializer (one has regardless to do + // the unmarshal twice), we take the sanitized, altered data that is guaranteed to have no duplicated + // fields, and unmarshal this into a copy of the already-populated obj. Any error that occurs here is + // due to that a matching field doesn't exist in the object. hence we can return a typed strictDecoderError, + // the actual error is that the object contains unknown field. + strictObj := obj.DeepCopyObject() + if err := strictCaseSensitiveJsonIterator.Unmarshal(altered, strictObj); err != nil { + return nil, actual, runtime.NewStrictDecodingError(err.Error(), string(originalData)) + } + // Always return the same object as the non-strict serializer to avoid any deviations. return obj, actual, nil } // Encode serializes the provided object to the given writer. func (s *Serializer) Encode(obj runtime.Object, w io.Writer) error { - if s.yaml { + if co, ok := obj.(runtime.CacheableObject); ok { + return co.CacheEncode(s.Identifier(), s.doEncode, w) + } + return s.doEncode(obj, w) +} + +func (s *Serializer) doEncode(obj runtime.Object, w io.Writer) error { + if s.options.Yaml { json, err := caseSensitiveJsonIterator.Marshal(obj) if err != nil { return err @@ -234,7 +314,7 @@ func (s *Serializer) Encode(obj runtime.Object, w io.Writer) error { return err } - if s.pretty { + if s.options.Pretty { data, err := caseSensitiveJsonIterator.MarshalIndent(obj, "", " ") if err != nil { return err @@ -246,9 +326,14 @@ func (s *Serializer) Encode(obj runtime.Object, w io.Writer) error { return encoder.Encode(obj) } +// Identifier implements runtime.Encoder interface. +func (s *Serializer) Identifier() runtime.Identifier { + return s.identifier +} + // RecognizesData implements the RecognizingDecoder interface. func (s *Serializer) RecognizesData(peek io.Reader) (ok, unknown bool, err error) { - if s.yaml { + if s.options.Yaml { // we could potentially look for '---' return false, true, nil } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go index b99ba25c..f606b7d7 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go @@ -69,27 +69,25 @@ func IsNotMarshalable(err error) bool { // NewSerializer creates a Protobuf serializer that handles encoding versioned objects into the proper wire form. If a typer // is passed, the encoded object will have group, version, and kind fields set. If typer is nil, the objects will be written // as-is (any type info passed with the object will be used). -// -// This encoding scheme is experimental, and is subject to change at any time. -func NewSerializer(creater runtime.ObjectCreater, typer runtime.ObjectTyper, defaultContentType string) *Serializer { +func NewSerializer(creater runtime.ObjectCreater, typer runtime.ObjectTyper) *Serializer { return &Serializer{ - prefix: protoEncodingPrefix, - creater: creater, - typer: typer, - contentType: defaultContentType, + prefix: protoEncodingPrefix, + creater: creater, + typer: typer, } } type Serializer struct { - prefix []byte - creater runtime.ObjectCreater - typer runtime.ObjectTyper - contentType string + prefix []byte + creater runtime.ObjectCreater + typer runtime.ObjectTyper } var _ runtime.Serializer = &Serializer{} var _ recognizer.RecognizingDecoder = &Serializer{} +const serializerIdentifier runtime.Identifier = "protobuf" + // Decode attempts to convert the provided data into a protobuf message, extract the stored schema kind, apply the provided default // gvk, and then load that data into an object matching the desired schema kind or the provided into. If into is *runtime.Unknown, // the raw data will be extracted and no decoding will be performed. If into is not registered with the typer, then the object will @@ -97,23 +95,6 @@ var _ recognizer.RecognizingDecoder = &Serializer{} // not fully qualified with kind/version/group, the type of the into will be used to alter the returned gvk. On success or most // errors, the method will return the calculated schema kind. func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) { - if versioned, ok := into.(*runtime.VersionedObjects); ok { - into = versioned.Last() - obj, actual, err := s.Decode(originalData, gvk, into) - if err != nil { - return nil, actual, err - } - // the last item in versioned becomes into, so if versioned was not originally empty we reset the object - // array so the first position is the decoded object and the second position is the outermost object. - // if there were no objects in the versioned list passed to us, only add ourselves. - if into != nil && into != obj { - versioned.Objects = []runtime.Object{obj, into} - } else { - versioned.Objects = []runtime.Object{obj} - } - return versioned, actual, err - } - prefixLen := len(s.prefix) switch { case len(originalData) == 0: @@ -138,7 +119,7 @@ func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, i if intoUnknown, ok := into.(*runtime.Unknown); ok && intoUnknown != nil { *intoUnknown = unk if ok, _, _ := s.RecognizesData(bytes.NewBuffer(unk.Raw)); ok { - intoUnknown.ContentType = s.contentType + intoUnknown.ContentType = runtime.ContentTypeProtobuf } return intoUnknown, &actual, nil } @@ -180,6 +161,13 @@ func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, i // Encode serializes the provided object to the given writer. func (s *Serializer) Encode(obj runtime.Object, w io.Writer) error { + if co, ok := obj.(runtime.CacheableObject); ok { + return co.CacheEncode(s.Identifier(), s.doEncode, w) + } + return s.doEncode(obj, w) +} + +func (s *Serializer) doEncode(obj runtime.Object, w io.Writer) error { prefixSize := uint64(len(s.prefix)) var unk runtime.Unknown @@ -207,7 +195,7 @@ func (s *Serializer) Encode(obj runtime.Object, w io.Writer) error { switch t := obj.(type) { case bufferedMarshaller: // this path performs a single allocation during write but requires the caller to implement - // the more efficient Size and MarshalTo methods + // the more efficient Size and MarshalToSizedBuffer methods encodedSize := uint64(t.Size()) estimatedSize := prefixSize + estimateUnknownSize(&unk, encodedSize) data := make([]byte, estimatedSize) @@ -249,6 +237,11 @@ func (s *Serializer) Encode(obj runtime.Object, w io.Writer) error { } } +// Identifier implements runtime.Encoder interface. +func (s *Serializer) Identifier() runtime.Identifier { + return serializerIdentifier +} + // RecognizesData implements the RecognizingDecoder interface. func (s *Serializer) RecognizesData(peek io.Reader) (bool, bool, error) { prefix := make([]byte, 4) @@ -287,6 +280,12 @@ type bufferedMarshaller interface { runtime.ProtobufMarshaller } +// Like bufferedMarshaller, but is able to marshal backwards, which is more efficient since it doesn't call Size() as frequently. +type bufferedReverseMarshaller interface { + proto.Sizer + runtime.ProtobufReverseMarshaller +} + // estimateUnknownSize returns the expected bytes consumed by a given runtime.Unknown // object with a nil RawJSON struct and the expected size of the provided buffer. The // returned size will not be correct if RawJSOn is set on unk. @@ -303,24 +302,24 @@ func estimateUnknownSize(unk *runtime.Unknown, byteSize uint64) uint64 { // encoded object, and thus is not self describing (callers must know what type is being described in order to decode). // // This encoding scheme is experimental, and is subject to change at any time. -func NewRawSerializer(creater runtime.ObjectCreater, typer runtime.ObjectTyper, defaultContentType string) *RawSerializer { +func NewRawSerializer(creater runtime.ObjectCreater, typer runtime.ObjectTyper) *RawSerializer { return &RawSerializer{ - creater: creater, - typer: typer, - contentType: defaultContentType, + creater: creater, + typer: typer, } } // RawSerializer encodes and decodes objects without adding a runtime.Unknown wrapper (objects are encoded without identifying // type). type RawSerializer struct { - creater runtime.ObjectCreater - typer runtime.ObjectTyper - contentType string + creater runtime.ObjectCreater + typer runtime.ObjectTyper } var _ runtime.Serializer = &RawSerializer{} +const rawSerializerIdentifier runtime.Identifier = "raw-protobuf" + // Decode attempts to convert the provided data into a protobuf message, extract the stored schema kind, apply the provided default // gvk, and then load that data into an object matching the desired schema kind or the provided into. If into is *runtime.Unknown, // the raw data will be extracted and no decoding will be performed. If into is not registered with the typer, then the object will @@ -332,20 +331,6 @@ func (s *RawSerializer) Decode(originalData []byte, gvk *schema.GroupVersionKind return nil, nil, fmt.Errorf("this serializer requires an object to decode into: %#v", s) } - if versioned, ok := into.(*runtime.VersionedObjects); ok { - into = versioned.Last() - obj, actual, err := s.Decode(originalData, gvk, into) - if err != nil { - return nil, actual, err - } - if into != nil && into != obj { - versioned.Objects = []runtime.Object{obj, into} - } else { - versioned.Objects = []runtime.Object{obj} - } - return versioned, actual, err - } - if len(originalData) == 0 { // TODO: treat like decoding {} from JSON with defaulting return nil, nil, fmt.Errorf("empty data") @@ -358,7 +343,7 @@ func (s *RawSerializer) Decode(originalData []byte, gvk *schema.GroupVersionKind if intoUnknown, ok := into.(*runtime.Unknown); ok && intoUnknown != nil { intoUnknown.Raw = data intoUnknown.ContentEncoding = "" - intoUnknown.ContentType = s.contentType + intoUnknown.ContentType = runtime.ContentTypeProtobuf intoUnknown.SetGroupVersionKind(*actual) return intoUnknown, actual, nil } @@ -411,12 +396,35 @@ func unmarshalToObject(typer runtime.ObjectTyper, creater runtime.ObjectCreater, if err := proto.Unmarshal(data, pb); err != nil { return nil, actual, err } + if actual != nil { + obj.GetObjectKind().SetGroupVersionKind(*actual) + } return obj, actual, nil } // Encode serializes the provided object to the given writer. Overrides is ignored. func (s *RawSerializer) Encode(obj runtime.Object, w io.Writer) error { + if co, ok := obj.(runtime.CacheableObject); ok { + return co.CacheEncode(s.Identifier(), s.doEncode, w) + } + return s.doEncode(obj, w) +} + +func (s *RawSerializer) doEncode(obj runtime.Object, w io.Writer) error { switch t := obj.(type) { + case bufferedReverseMarshaller: + // this path performs a single allocation during write but requires the caller to implement + // the more efficient Size and MarshalToSizedBuffer methods + encodedSize := uint64(t.Size()) + data := make([]byte, encodedSize) + + n, err := t.MarshalToSizedBuffer(data) + if err != nil { + return err + } + _, err = w.Write(data[:n]) + return err + case bufferedMarshaller: // this path performs a single allocation during write but requires the caller to implement // the more efficient Size and MarshalTo methods @@ -444,6 +452,11 @@ func (s *RawSerializer) Encode(obj runtime.Object, w io.Writer) error { } } +// Identifier implements runtime.Encoder interface. +func (s *RawSerializer) Identifier() runtime.Identifier { + return rawSerializerIdentifier +} + var LengthDelimitedFramer = lengthDelimitedFramer{} type lengthDelimitedFramer struct{} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf_extension.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf_extension.go deleted file mode 100644 index 545cf78d..00000000 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf_extension.go +++ /dev/null @@ -1,48 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package serializer - -import ( - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/serializer/protobuf" -) - -const ( - // contentTypeProtobuf is the protobuf type exposed for Kubernetes. It is private to prevent others from - // depending on it unintentionally. - // TODO: potentially move to pkg/api (since it's part of the Kube public API) and pass it in to the - // CodecFactory on initialization. - contentTypeProtobuf = "application/vnd.kubernetes.protobuf" -) - -func protobufSerializer(scheme *runtime.Scheme) (serializerType, bool) { - serializer := protobuf.NewSerializer(scheme, scheme, contentTypeProtobuf) - raw := protobuf.NewRawSerializer(scheme, scheme, contentTypeProtobuf) - return serializerType{ - AcceptContentTypes: []string{contentTypeProtobuf}, - ContentType: contentTypeProtobuf, - FileExtensions: []string{"pb"}, - Serializer: serializer, - - Framer: protobuf.LengthDelimitedFramer, - StreamSerializer: raw, - }, true -} - -func init() { - serializerExtensions = append(serializerExtensions, protobufSerializer) -} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go index 91fd4ed4..a60a7c04 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go @@ -64,7 +64,7 @@ func NewDecoder(r io.ReadCloser, d runtime.Decoder) Decoder { reader: r, decoder: d, buf: make([]byte, 1024), - maxBytes: 1024 * 1024, + maxBytes: 16 * 1024 * 1024, } } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go index a5ae3ac4..ced184c9 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go @@ -17,11 +17,15 @@ limitations under the License. package versioning import ( + "encoding/json" "io" + "reflect" + "sync" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/klog" ) // NewDefaultingCodecForScheme is a convenience method for callers that are using a scheme. @@ -61,6 +65,8 @@ func NewCodec( encodeVersion: encodeVersion, decodeVersion: decodeVersion, + identifier: identifier(encodeVersion, encoder), + originalSchemeName: originalSchemeName, } return internal @@ -77,69 +83,86 @@ type codec struct { encodeVersion runtime.GroupVersioner decodeVersion runtime.GroupVersioner + identifier runtime.Identifier + // originalSchemeName is optional, but when filled in it holds the name of the scheme from which this codec originates originalSchemeName string } +var identifiersMap sync.Map + +type codecIdentifier struct { + EncodeGV string `json:"encodeGV,omitempty"` + Encoder string `json:"encoder,omitempty"` + Name string `json:"name,omitempty"` +} + +// identifier computes Identifier of Encoder based on codec parameters. +func identifier(encodeGV runtime.GroupVersioner, encoder runtime.Encoder) runtime.Identifier { + result := codecIdentifier{ + Name: "versioning", + } + + if encodeGV != nil { + result.EncodeGV = encodeGV.Identifier() + } + if encoder != nil { + result.Encoder = string(encoder.Identifier()) + } + if id, ok := identifiersMap.Load(result); ok { + return id.(runtime.Identifier) + } + identifier, err := json.Marshal(result) + if err != nil { + klog.Fatalf("Failed marshaling identifier for codec: %v", err) + } + identifiersMap.Store(result, runtime.Identifier(identifier)) + return runtime.Identifier(identifier) +} + // Decode attempts a decode of the object, then tries to convert it to the internal version. If into is provided and the decoding is // successful, the returned runtime.Object will be the value passed as into. Note that this may bypass conversion if you pass an // into that matches the serialized version. func (c *codec) Decode(data []byte, defaultGVK *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) { - versioned, isVersioned := into.(*runtime.VersionedObjects) - if isVersioned { - into = versioned.Last() + // If the into object is unstructured and expresses an opinion about its group/version, + // create a new instance of the type so we always exercise the conversion path (skips short-circuiting on `into == obj`) + decodeInto := into + if into != nil { + if _, ok := into.(runtime.Unstructured); ok && !into.GetObjectKind().GroupVersionKind().GroupVersion().Empty() { + decodeInto = reflect.New(reflect.TypeOf(into).Elem()).Interface().(runtime.Object) + } } - obj, gvk, err := c.decoder.Decode(data, defaultGVK, into) + obj, gvk, err := c.decoder.Decode(data, defaultGVK, decodeInto) if err != nil { return nil, gvk, err } if d, ok := obj.(runtime.NestedObjectDecoder); ok { - if err := d.DecodeNestedObjects(DirectDecoder{c.decoder}); err != nil { + if err := d.DecodeNestedObjects(runtime.WithoutVersionDecoder{c.decoder}); err != nil { return nil, gvk, err } } // if we specify a target, use generic conversion. if into != nil { - if into == obj { - if isVersioned { - return versioned, gvk, nil - } - return into, gvk, nil - } - // perform defaulting if requested if c.defaulter != nil { - // create a copy to ensure defaulting is not applied to the original versioned objects - if isVersioned { - versioned.Objects = []runtime.Object{obj.DeepCopyObject()} - } c.defaulter.Default(obj) - } else { - if isVersioned { - versioned.Objects = []runtime.Object{obj} - } + } + + // Short-circuit conversion if the into object is same object + if into == obj { + return into, gvk, nil } if err := c.convertor.Convert(obj, into, c.decodeVersion); err != nil { return nil, gvk, err } - if isVersioned { - versioned.Objects = append(versioned.Objects, into) - return versioned, gvk, nil - } return into, gvk, nil } - // Convert if needed. - if isVersioned { - // create a copy, because ConvertToVersion does not guarantee non-mutation of objects - versioned.Objects = []runtime.Object{obj.DeepCopyObject()} - } - // perform defaulting if requested if c.defaulter != nil { c.defaulter.Default(obj) @@ -149,18 +172,19 @@ func (c *codec) Decode(data []byte, defaultGVK *schema.GroupVersionKind, into ru if err != nil { return nil, gvk, err } - if isVersioned { - if versioned.Last() != out { - versioned.Objects = append(versioned.Objects, out) - } - return versioned, gvk, nil - } return out, gvk, nil } // Encode ensures the provided object is output in the appropriate group and version, invoking // conversion if necessary. Unversioned objects (according to the ObjectTyper) are output as is. func (c *codec) Encode(obj runtime.Object, w io.Writer) error { + if co, ok := obj.(runtime.CacheableObject); ok { + return co.CacheEncode(c.Identifier(), c.doEncode, w) + } + return c.doEncode(obj, w) +} + +func (c *codec) doEncode(obj runtime.Object, w io.Writer) error { switch obj := obj.(type) { case *runtime.Unknown: return c.encoder.Encode(obj, w) @@ -189,84 +213,38 @@ func (c *codec) Encode(obj runtime.Object, w io.Writer) error { return err } + objectKind := obj.GetObjectKind() + old := objectKind.GroupVersionKind() + // restore the old GVK after encoding + defer objectKind.SetGroupVersionKind(old) + if c.encodeVersion == nil || isUnversioned { if e, ok := obj.(runtime.NestedObjectEncoder); ok { - if err := e.EncodeNestedObjects(DirectEncoder{Encoder: c.encoder, ObjectTyper: c.typer}); err != nil { + if err := e.EncodeNestedObjects(runtime.WithVersionEncoder{Encoder: c.encoder, ObjectTyper: c.typer}); err != nil { return err } } - objectKind := obj.GetObjectKind() - old := objectKind.GroupVersionKind() objectKind.SetGroupVersionKind(gvks[0]) - err = c.encoder.Encode(obj, w) - objectKind.SetGroupVersionKind(old) - return err + return c.encoder.Encode(obj, w) } // Perform a conversion if necessary - objectKind := obj.GetObjectKind() - old := objectKind.GroupVersionKind() out, err := c.convertor.ConvertToVersion(obj, c.encodeVersion) if err != nil { return err } if e, ok := out.(runtime.NestedObjectEncoder); ok { - if err := e.EncodeNestedObjects(DirectEncoder{Version: c.encodeVersion, Encoder: c.encoder, ObjectTyper: c.typer}); err != nil { + if err := e.EncodeNestedObjects(runtime.WithVersionEncoder{Version: c.encodeVersion, Encoder: c.encoder, ObjectTyper: c.typer}); err != nil { return err } } // Conversion is responsible for setting the proper group, version, and kind onto the outgoing object - err = c.encoder.Encode(out, w) - // restore the old GVK, in case conversion returned the same object - objectKind.SetGroupVersionKind(old) - return err + return c.encoder.Encode(out, w) } -// DirectEncoder serializes an object and ensures the GVK is set. -type DirectEncoder struct { - Version runtime.GroupVersioner - runtime.Encoder - runtime.ObjectTyper -} - -// Encode does not do conversion. It sets the gvk during serialization. -func (e DirectEncoder) Encode(obj runtime.Object, stream io.Writer) error { - gvks, _, err := e.ObjectTyper.ObjectKinds(obj) - if err != nil { - if runtime.IsNotRegisteredError(err) { - return e.Encoder.Encode(obj, stream) - } - return err - } - kind := obj.GetObjectKind() - oldGVK := kind.GroupVersionKind() - gvk := gvks[0] - if e.Version != nil { - preferredGVK, ok := e.Version.KindForGroupVersionKinds(gvks) - if ok { - gvk = preferredGVK - } - } - kind.SetGroupVersionKind(gvk) - err = e.Encoder.Encode(obj, stream) - kind.SetGroupVersionKind(oldGVK) - return err -} - -// DirectDecoder clears the group version kind of a deserialized object. -type DirectDecoder struct { - runtime.Decoder -} - -// Decode does not do conversion. It removes the gvk during deserialization. -func (d DirectDecoder) Decode(data []byte, defaults *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) { - obj, gvk, err := d.Decoder.Decode(data, defaults, into) - if obj != nil { - kind := obj.GetObjectKind() - // clearing the gvk is just a convention of a codec - kind.SetGroupVersionKind(schema.GroupVersionKind{}) - } - return obj, gvk, err +// Identifier implements runtime.Encoder interface. +func (c *codec) Identifier() runtime.Identifier { + return c.identifier } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/types.go b/vendor/k8s.io/apimachinery/pkg/runtime/types.go index e4515d8e..31359f35 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/types.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/types.go @@ -41,7 +41,9 @@ type TypeMeta struct { } const ( - ContentTypeJSON string = "application/json" + ContentTypeJSON string = "application/json" + ContentTypeYAML string = "application/yaml" + ContentTypeProtobuf string = "application/vnd.kubernetes.protobuf" ) // RawExtension is used to hold extensions in external versions. @@ -93,7 +95,7 @@ type RawExtension struct { // Raw is the underlying serialization of this object. // // TODO: Determine how to detect ContentType and ContentEncoding of 'Raw' data. - Raw []byte `protobuf:"bytes,1,opt,name=raw"` + Raw []byte `json:"-" protobuf:"bytes,1,opt,name=raw"` // Object can hold a representation of this extension - useful for working with versioned // structs. Object Object `json:"-"` @@ -122,16 +124,3 @@ type Unknown struct { // Unspecified means ContentTypeJSON. ContentType string `protobuf:"bytes,4,opt,name=contentType"` } - -// VersionedObjects is used by Decoders to give callers a way to access all versions -// of an object during the decoding process. -// -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:deepcopy-gen=true -type VersionedObjects struct { - // Objects is the set of objects retrieved during decoding, in order of conversion. - // The 0 index is the object as serialized on the wire. If conversion has occurred, - // other objects may be present. The right most object is the same as would be returned - // by a normal Decode call. - Objects []Object -} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/types_proto.go b/vendor/k8s.io/apimachinery/pkg/runtime/types_proto.go index ead96ee0..a82227b2 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/types_proto.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/types_proto.go @@ -24,46 +24,66 @@ type ProtobufMarshaller interface { MarshalTo(data []byte) (int, error) } +type ProtobufReverseMarshaller interface { + MarshalToSizedBuffer(data []byte) (int, error) +} + // NestedMarshalTo allows a caller to avoid extra allocations during serialization of an Unknown -// that will contain an object that implements ProtobufMarshaller. +// that will contain an object that implements ProtobufMarshaller or ProtobufReverseMarshaller. func (m *Unknown) NestedMarshalTo(data []byte, b ProtobufMarshaller, size uint64) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.TypeMeta.Size())) - n1, err := m.TypeMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err + // Calculate the full size of the message. + msgSize := m.Size() + if b != nil { + msgSize += int(size) + sovGenerated(size) + 1 } - i += n1 + // Reverse marshal the fields of m. + i := msgSize + i -= len(m.ContentType) + copy(data[i:], m.ContentType) + i = encodeVarintGenerated(data, i, uint64(len(m.ContentType))) + i-- + data[i] = 0x22 + i -= len(m.ContentEncoding) + copy(data[i:], m.ContentEncoding) + i = encodeVarintGenerated(data, i, uint64(len(m.ContentEncoding))) + i-- + data[i] = 0x1a if b != nil { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, size) - n2, err := b.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - if uint64(n2) != size { - // programmer error: the Size() method for protobuf does not match the results of MarshalTo, which means the proto - // struct returned would be wrong. - return 0, fmt.Errorf("the Size() value of %T was %d, but NestedMarshalTo wrote %d bytes to data", b, size, n2) + if r, ok := b.(ProtobufReverseMarshaller); ok { + n1, err := r.MarshalToSizedBuffer(data[:i]) + if err != nil { + return 0, err + } + i -= int(size) + if uint64(n1) != size { + // programmer error: the Size() method for protobuf does not match the results of LashramOt, which means the proto + // struct returned would be wrong. + return 0, fmt.Errorf("the Size() value of %T was %d, but NestedMarshalTo wrote %d bytes to data", b, size, n1) + } + } else { + i -= int(size) + n1, err := b.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + if uint64(n1) != size { + // programmer error: the Size() method for protobuf does not match the results of MarshalTo, which means the proto + // struct returned would be wrong. + return 0, fmt.Errorf("the Size() value of %T was %d, but NestedMarshalTo wrote %d bytes to data", b, size, n1) + } } - i += n2 + i = encodeVarintGenerated(data, i, size) + i-- + data[i] = 0x12 } - - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.ContentEncoding))) - i += copy(data[i:], m.ContentEncoding) - - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.ContentType))) - i += copy(data[i:], m.ContentType) - return i, nil + n2, err := m.TypeMeta.MarshalToSizedBuffer(data[:i]) + if err != nil { + return 0, err + } + i -= n2 + i = encodeVarintGenerated(data, i, uint64(n2)) + i-- + data[i] = 0xa + return msgSize - i, nil } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go index 8b9182f3..b0393839 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go @@ -73,36 +73,3 @@ func (in *Unknown) DeepCopyObject() Object { } return nil } - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VersionedObjects) DeepCopyInto(out *VersionedObjects) { - *out = *in - if in.Objects != nil { - in, out := &in.Objects, &out.Objects - *out = make([]Object, len(*in)) - for i := range *in { - if (*in)[i] != nil { - (*out)[i] = (*in)[i].DeepCopyObject() - } - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VersionedObjects. -func (in *VersionedObjects) DeepCopy() *VersionedObjects { - if in == nil { - return nil - } - out := new(VersionedObjects) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new Object. -func (in *VersionedObjects) DeepCopyObject() Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} diff --git a/vendor/k8s.io/apimachinery/pkg/types/patch.go b/vendor/k8s.io/apimachinery/pkg/types/patch.go index d522d1db..fe8ecaaf 100644 --- a/vendor/k8s.io/apimachinery/pkg/types/patch.go +++ b/vendor/k8s.io/apimachinery/pkg/types/patch.go @@ -25,4 +25,5 @@ const ( JSONPatchType PatchType = "application/json-patch+json" MergePatchType PatchType = "application/merge-patch+json" StrategicMergePatchType PatchType = "application/strategic-merge-patch+json" + ApplyPatchType PatchType = "application/apply-patch+yaml" ) diff --git a/vendor/k8s.io/apimachinery/pkg/util/cache/cache.go b/vendor/k8s.io/apimachinery/pkg/util/cache/cache.go deleted file mode 100644 index 9a09fe54..00000000 --- a/vendor/k8s.io/apimachinery/pkg/util/cache/cache.go +++ /dev/null @@ -1,83 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cache - -import ( - "sync" -) - -const ( - shardsCount int = 32 -) - -type Cache []*cacheShard - -func NewCache(maxSize int) Cache { - if maxSize < shardsCount { - maxSize = shardsCount - } - cache := make(Cache, shardsCount) - for i := 0; i < shardsCount; i++ { - cache[i] = &cacheShard{ - items: make(map[uint64]interface{}), - maxSize: maxSize / shardsCount, - } - } - return cache -} - -func (c Cache) getShard(index uint64) *cacheShard { - return c[index%uint64(shardsCount)] -} - -// Returns true if object already existed, false otherwise. -func (c *Cache) Add(index uint64, obj interface{}) bool { - return c.getShard(index).add(index, obj) -} - -func (c *Cache) Get(index uint64) (obj interface{}, found bool) { - return c.getShard(index).get(index) -} - -type cacheShard struct { - items map[uint64]interface{} - sync.RWMutex - maxSize int -} - -// Returns true if object already existed, false otherwise. -func (s *cacheShard) add(index uint64, obj interface{}) bool { - s.Lock() - defer s.Unlock() - _, isOverwrite := s.items[index] - if !isOverwrite && len(s.items) >= s.maxSize { - var randomKey uint64 - for randomKey = range s.items { - break - } - delete(s.items, randomKey) - } - s.items[index] = obj - return isOverwrite -} - -func (s *cacheShard) get(index uint64) (obj interface{}, found bool) { - s.RLock() - defer s.RUnlock() - obj, found = s.items[index] - return -} diff --git a/vendor/k8s.io/apimachinery/pkg/util/cache/expiring.go b/vendor/k8s.io/apimachinery/pkg/util/cache/expiring.go new file mode 100644 index 00000000..84b4f588 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/cache/expiring.go @@ -0,0 +1,192 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "container/heap" + "sync" + "time" + + utilclock "k8s.io/apimachinery/pkg/util/clock" +) + +// NewExpiring returns an initialized expiring cache. +func NewExpiring() *Expiring { + return NewExpiringWithClock(utilclock.RealClock{}) +} + +// NewExpiringWithClock is like NewExpiring but allows passing in a custom +// clock for testing. +func NewExpiringWithClock(clock utilclock.Clock) *Expiring { + return &Expiring{ + clock: clock, + cache: make(map[interface{}]entry), + } +} + +// Expiring is a map whose entries expire after a per-entry timeout. +type Expiring struct { + clock utilclock.Clock + + // mu protects the below fields + mu sync.RWMutex + // cache is the internal map that backs the cache. + cache map[interface{}]entry + // generation is used as a cheap resource version for cache entries. Cleanups + // are scheduled with a key and generation. When the cleanup runs, it first + // compares its generation with the current generation of the entry. It + // deletes the entry iff the generation matches. This prevents cleanups + // scheduled for earlier versions of an entry from deleting later versions of + // an entry when Set() is called multiple times with the same key. + // + // The integer value of the generation of an entry is meaningless. + generation uint64 + + heap expiringHeap +} + +type entry struct { + val interface{} + expiry time.Time + generation uint64 +} + +// Get looks up an entry in the cache. +func (c *Expiring) Get(key interface{}) (val interface{}, ok bool) { + c.mu.RLock() + defer c.mu.RUnlock() + e, ok := c.cache[key] + if !ok || !c.clock.Now().Before(e.expiry) { + return nil, false + } + return e.val, true +} + +// Set sets a key/value/expiry entry in the map, overwriting any previous entry +// with the same key. The entry expires at the given expiry time, but its TTL +// may be lengthened or shortened by additional calls to Set(). Garbage +// collection of expired entries occurs during calls to Set(), however calls to +// Get() will not return expired entries that have not yet been garbage +// collected. +func (c *Expiring) Set(key interface{}, val interface{}, ttl time.Duration) { + now := c.clock.Now() + expiry := now.Add(ttl) + + c.mu.Lock() + defer c.mu.Unlock() + + c.generation++ + + c.cache[key] = entry{ + val: val, + expiry: expiry, + generation: c.generation, + } + + // Run GC inline before pushing the new entry. + c.gc(now) + + heap.Push(&c.heap, &expiringHeapEntry{ + key: key, + expiry: expiry, + generation: c.generation, + }) +} + +// Delete deletes an entry in the map. +func (c *Expiring) Delete(key interface{}) { + c.mu.Lock() + defer c.mu.Unlock() + c.del(key, 0) +} + +// del deletes the entry for the given key. The generation argument is the +// generation of the entry that should be deleted. If the generation has been +// changed (e.g. if a set has occurred on an existing element but the old +// cleanup still runs), this is a noop. If the generation argument is 0, the +// entry's generation is ignored and the entry is deleted. +// +// del must be called under the write lock. +func (c *Expiring) del(key interface{}, generation uint64) { + e, ok := c.cache[key] + if !ok { + return + } + if generation != 0 && generation != e.generation { + return + } + delete(c.cache, key) +} + +// Len returns the number of items in the cache. +func (c *Expiring) Len() int { + c.mu.RLock() + defer c.mu.RUnlock() + return len(c.cache) +} + +func (c *Expiring) gc(now time.Time) { + for { + // Return from gc if the heap is empty or the next element is not yet + // expired. + // + // heap[0] is a peek at the next element in the heap, which is not obvious + // from looking at the (*expiringHeap).Pop() implmentation below. + // heap.Pop() swaps the first entry with the last entry of the heap, then + // calls (*expiringHeap).Pop() which returns the last element. + if len(c.heap) == 0 || now.Before(c.heap[0].expiry) { + return + } + cleanup := heap.Pop(&c.heap).(*expiringHeapEntry) + c.del(cleanup.key, cleanup.generation) + } +} + +type expiringHeapEntry struct { + key interface{} + expiry time.Time + generation uint64 +} + +// expiringHeap is a min-heap ordered by expiration time of its entries. The +// expiring cache uses this as a priority queue to efficiently organize entries +// which will be garbage collected once they expire. +type expiringHeap []*expiringHeapEntry + +var _ heap.Interface = &expiringHeap{} + +func (cq expiringHeap) Len() int { + return len(cq) +} + +func (cq expiringHeap) Less(i, j int) bool { + return cq[i].expiry.Before(cq[j].expiry) +} + +func (cq expiringHeap) Swap(i, j int) { + cq[i], cq[j] = cq[j], cq[i] +} + +func (cq *expiringHeap) Push(c interface{}) { + *cq = append(*cq, c.(*expiringHeapEntry)) +} + +func (cq *expiringHeap) Pop() interface{} { + c := (*cq)[cq.Len()-1] + *cq = (*cq)[:cq.Len()-1] + return c +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/clock/clock.go b/vendor/k8s.io/apimachinery/pkg/util/clock/clock.go index 9567f900..1689e62e 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/clock/clock.go +++ b/vendor/k8s.io/apimachinery/pkg/util/clock/clock.go @@ -21,11 +21,18 @@ import ( "time" ) +// PassiveClock allows for injecting fake or real clocks into code +// that needs to read the current time but does not support scheduling +// activity in the future. +type PassiveClock interface { + Now() time.Time + Since(time.Time) time.Duration +} + // Clock allows for injecting fake or real clocks into code that // needs to do arbitrary things based on time. type Clock interface { - Now() time.Time - Since(time.Time) time.Duration + PassiveClock After(time.Duration) <-chan time.Time NewTimer(time.Duration) Timer Sleep(time.Duration) @@ -66,10 +73,15 @@ func (RealClock) Sleep(d time.Duration) { time.Sleep(d) } -// FakeClock implements Clock, but returns an arbitrary time. -type FakeClock struct { +// FakePassiveClock implements PassiveClock, but returns an arbitrary time. +type FakePassiveClock struct { lock sync.RWMutex time time.Time +} + +// FakeClock implements Clock, but returns an arbitrary time. +type FakeClock struct { + FakePassiveClock // waiters are waiting for the fake time to pass their specified time waiters []fakeClockWaiter @@ -80,29 +92,41 @@ type fakeClockWaiter struct { stepInterval time.Duration skipIfBlocked bool destChan chan time.Time - fired bool +} + +func NewFakePassiveClock(t time.Time) *FakePassiveClock { + return &FakePassiveClock{ + time: t, + } } func NewFakeClock(t time.Time) *FakeClock { return &FakeClock{ - time: t, + FakePassiveClock: *NewFakePassiveClock(t), } } // Now returns f's time. -func (f *FakeClock) Now() time.Time { +func (f *FakePassiveClock) Now() time.Time { f.lock.RLock() defer f.lock.RUnlock() return f.time } // Since returns time since the time in f. -func (f *FakeClock) Since(ts time.Time) time.Duration { +func (f *FakePassiveClock) Since(ts time.Time) time.Duration { f.lock.RLock() defer f.lock.RUnlock() return f.time.Sub(ts) } +// Sets the time. +func (f *FakePassiveClock) SetTime(t time.Time) { + f.lock.Lock() + defer f.lock.Unlock() + f.time = t +} + // Fake version of time.After(d). func (f *FakeClock) After(d time.Duration) <-chan time.Time { f.lock.Lock() @@ -175,12 +199,10 @@ func (f *FakeClock) setTimeLocked(t time.Time) { if w.skipIfBlocked { select { case w.destChan <- t: - w.fired = true default: } } else { w.destChan <- t - w.fired = true } if w.stepInterval > 0 { @@ -287,36 +309,50 @@ func (f *fakeTimer) C() <-chan time.Time { return f.waiter.destChan } -// Stop stops the timer and returns true if the timer has not yet fired, or false otherwise. +// Stop conditionally stops the timer. If the timer has neither fired +// nor been stopped then this call stops the timer and returns true, +// otherwise this call returns false. This is like time.Timer::Stop. func (f *fakeTimer) Stop() bool { f.fakeClock.lock.Lock() defer f.fakeClock.lock.Unlock() - - newWaiters := make([]fakeClockWaiter, 0, len(f.fakeClock.waiters)) - for i := range f.fakeClock.waiters { - w := &f.fakeClock.waiters[i] - if w != &f.waiter { - newWaiters = append(newWaiters, *w) + // The timer has already fired or been stopped, unless it is found + // among the clock's waiters. + stopped := false + oldWaiters := f.fakeClock.waiters + newWaiters := make([]fakeClockWaiter, 0, len(oldWaiters)) + seekChan := f.waiter.destChan + for i := range oldWaiters { + // Identify the timer's fakeClockWaiter by the identity of the + // destination channel, nothing else is necessarily unique and + // constant since the timer's creation. + if oldWaiters[i].destChan == seekChan { + stopped = true + } else { + newWaiters = append(newWaiters, oldWaiters[i]) } } f.fakeClock.waiters = newWaiters - return !f.waiter.fired + return stopped } -// Reset resets the timer to the fake clock's "now" + d. It returns true if the timer has not yet -// fired, or false otherwise. +// Reset conditionally updates the firing time of the timer. If the +// timer has neither fired nor been stopped then this call resets the +// timer to the fake clock's "now" + d and returns true, otherwise +// this call returns false. This is like time.Timer::Reset. func (f *fakeTimer) Reset(d time.Duration) bool { f.fakeClock.lock.Lock() defer f.fakeClock.lock.Unlock() - - active := !f.waiter.fired - - f.waiter.fired = false - f.waiter.targetTime = f.fakeClock.time.Add(d) - - return active + waiters := f.fakeClock.waiters + seekChan := f.waiter.destChan + for i := range waiters { + if waiters[i].destChan == seekChan { + waiters[i].targetTime = f.fakeClock.time.Add(d) + return true + } + } + return false } type Ticker interface { diff --git a/vendor/k8s.io/apimachinery/pkg/util/diff/diff.go b/vendor/k8s.io/apimachinery/pkg/util/diff/diff.go index 06042617..fa9ffa51 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/diff/diff.go +++ b/vendor/k8s.io/apimachinery/pkg/util/diff/diff.go @@ -18,16 +18,13 @@ package diff import ( "bytes" - "encoding/json" "fmt" "reflect" - "sort" "strings" "text/tabwriter" "github.com/davecgh/go-spew/spew" - - "k8s.io/apimachinery/pkg/util/validation/field" + "github.com/google/go-cmp/cmp" ) // StringDiff diffs a and b and returns a human readable diff. @@ -50,220 +47,29 @@ func StringDiff(a, b string) string { return string(out) } -// ObjectDiff writes the two objects out as JSON and prints out the identical part of -// the objects followed by the remaining part of 'a' and finally the remaining part of 'b'. -// For debugging tests. +func legacyDiff(a, b interface{}) string { + return cmp.Diff(a, b) +} + +// ObjectDiff prints the diff of two go objects and fails if the objects +// contain unhandled unexported fields. +// DEPRECATED: use github.com/google/go-cmp/cmp.Diff func ObjectDiff(a, b interface{}) string { - ab, err := json.Marshal(a) - if err != nil { - panic(fmt.Sprintf("a: %v", err)) - } - bb, err := json.Marshal(b) - if err != nil { - panic(fmt.Sprintf("b: %v", err)) - } - return StringDiff(string(ab), string(bb)) + return legacyDiff(a, b) } -// ObjectGoPrintDiff is like ObjectDiff, but uses go-spew to print the objects, -// which shows absolutely everything by recursing into every single pointer -// (go's %#v formatters OTOH stop at a certain point). This is needed when you -// can't figure out why reflect.DeepEqual is returning false and nothing is -// showing you differences. This will. +// ObjectGoPrintDiff prints the diff of two go objects and fails if the objects +// contain unhandled unexported fields. +// DEPRECATED: use github.com/google/go-cmp/cmp.Diff func ObjectGoPrintDiff(a, b interface{}) string { - s := spew.ConfigState{DisableMethods: true} - return StringDiff( - s.Sprintf("%#v", a), - s.Sprintf("%#v", b), - ) + return legacyDiff(a, b) } +// ObjectReflectDiff prints the diff of two go objects and fails if the objects +// contain unhandled unexported fields. +// DEPRECATED: use github.com/google/go-cmp/cmp.Diff func ObjectReflectDiff(a, b interface{}) string { - vA, vB := reflect.ValueOf(a), reflect.ValueOf(b) - if vA.Type() != vB.Type() { - return fmt.Sprintf("type A %T and type B %T do not match", a, b) - } - diffs := objectReflectDiff(field.NewPath("object"), vA, vB) - if len(diffs) == 0 { - return "" - } - out := []string{""} - for _, d := range diffs { - elidedA, elidedB := limit(d.a, d.b, 80) - out = append(out, - fmt.Sprintf("%s:", d.path), - fmt.Sprintf(" a: %s", elidedA), - fmt.Sprintf(" b: %s", elidedB), - ) - } - return strings.Join(out, "\n") -} - -// limit: -// 1. stringifies aObj and bObj -// 2. elides identical prefixes if either is too long -// 3. elides remaining content from the end if either is too long -func limit(aObj, bObj interface{}, max int) (string, string) { - elidedPrefix := "" - elidedASuffix := "" - elidedBSuffix := "" - a, b := fmt.Sprintf("%#v", aObj), fmt.Sprintf("%#v", bObj) - - if aObj != nil && bObj != nil { - if aType, bType := fmt.Sprintf("%T", aObj), fmt.Sprintf("%T", bObj); aType != bType { - a = fmt.Sprintf("%s (%s)", a, aType) - b = fmt.Sprintf("%s (%s)", b, bType) - } - } - - for { - switch { - case len(a) > max && len(a) > 4 && len(b) > 4 && a[:4] == b[:4]: - // a is too long, b has data, and the first several characters are the same - elidedPrefix = "..." - a = a[2:] - b = b[2:] - - case len(b) > max && len(b) > 4 && len(a) > 4 && a[:4] == b[:4]: - // b is too long, a has data, and the first several characters are the same - elidedPrefix = "..." - a = a[2:] - b = b[2:] - - case len(a) > max: - a = a[:max] - elidedASuffix = "..." - - case len(b) > max: - b = b[:max] - elidedBSuffix = "..." - - default: - // both are short enough - return elidedPrefix + a + elidedASuffix, elidedPrefix + b + elidedBSuffix - } - } -} - -func public(s string) bool { - if len(s) == 0 { - return false - } - return s[:1] == strings.ToUpper(s[:1]) -} - -type diff struct { - path *field.Path - a, b interface{} -} - -type orderedDiffs []diff - -func (d orderedDiffs) Len() int { return len(d) } -func (d orderedDiffs) Swap(i, j int) { d[i], d[j] = d[j], d[i] } -func (d orderedDiffs) Less(i, j int) bool { - a, b := d[i].path.String(), d[j].path.String() - if a < b { - return true - } - return false -} - -func objectReflectDiff(path *field.Path, a, b reflect.Value) []diff { - switch a.Type().Kind() { - case reflect.Struct: - var changes []diff - for i := 0; i < a.Type().NumField(); i++ { - if !public(a.Type().Field(i).Name) { - if reflect.DeepEqual(a.Interface(), b.Interface()) { - continue - } - return []diff{{path: path, a: fmt.Sprintf("%#v", a), b: fmt.Sprintf("%#v", b)}} - } - if sub := objectReflectDiff(path.Child(a.Type().Field(i).Name), a.Field(i), b.Field(i)); len(sub) > 0 { - changes = append(changes, sub...) - } - } - return changes - case reflect.Ptr, reflect.Interface: - if a.IsNil() || b.IsNil() { - switch { - case a.IsNil() && b.IsNil(): - return nil - case a.IsNil(): - return []diff{{path: path, a: nil, b: b.Interface()}} - default: - return []diff{{path: path, a: a.Interface(), b: nil}} - } - } - return objectReflectDiff(path, a.Elem(), b.Elem()) - case reflect.Chan: - if !reflect.DeepEqual(a.Interface(), b.Interface()) { - return []diff{{path: path, a: a.Interface(), b: b.Interface()}} - } - return nil - case reflect.Slice: - lA, lB := a.Len(), b.Len() - l := lA - if lB < lA { - l = lB - } - if lA == lB && lA == 0 { - if a.IsNil() != b.IsNil() { - return []diff{{path: path, a: a.Interface(), b: b.Interface()}} - } - return nil - } - var diffs []diff - for i := 0; i < l; i++ { - if !reflect.DeepEqual(a.Index(i), b.Index(i)) { - diffs = append(diffs, objectReflectDiff(path.Index(i), a.Index(i), b.Index(i))...) - } - } - for i := l; i < lA; i++ { - diffs = append(diffs, diff{path: path.Index(i), a: a.Index(i), b: nil}) - } - for i := l; i < lB; i++ { - diffs = append(diffs, diff{path: path.Index(i), a: nil, b: b.Index(i)}) - } - return diffs - case reflect.Map: - if reflect.DeepEqual(a.Interface(), b.Interface()) { - return nil - } - aKeys := make(map[interface{}]interface{}) - for _, key := range a.MapKeys() { - aKeys[key.Interface()] = a.MapIndex(key).Interface() - } - var missing []diff - for _, key := range b.MapKeys() { - if _, ok := aKeys[key.Interface()]; ok { - delete(aKeys, key.Interface()) - if reflect.DeepEqual(a.MapIndex(key).Interface(), b.MapIndex(key).Interface()) { - continue - } - missing = append(missing, objectReflectDiff(path.Key(fmt.Sprintf("%s", key.Interface())), a.MapIndex(key), b.MapIndex(key))...) - continue - } - missing = append(missing, diff{path: path.Key(fmt.Sprintf("%s", key.Interface())), a: nil, b: b.MapIndex(key).Interface()}) - } - for key, value := range aKeys { - missing = append(missing, diff{path: path.Key(fmt.Sprintf("%s", key)), a: value, b: nil}) - } - if len(missing) == 0 { - missing = append(missing, diff{path: path, a: a.Interface(), b: b.Interface()}) - } - sort.Sort(orderedDiffs(missing)) - return missing - default: - if reflect.DeepEqual(a.Interface(), b.Interface()) { - return nil - } - if !a.CanInterface() { - return []diff{{path: path, a: fmt.Sprintf("%#v", a), b: fmt.Sprintf("%#v", b)}} - } - return []diff{{path: path, a: a.Interface(), b: b.Interface()}} - } + return legacyDiff(a, b) } // ObjectGoPrintSideBySide prints a and b as textual dumps side by side, @@ -311,3 +117,41 @@ func ObjectGoPrintSideBySide(a, b interface{}) string { w.Flush() return buf.String() } + +// IgnoreUnset is an option that ignores fields that are unset on the right +// hand side of a comparison. This is useful in testing to assert that an +// object is a derivative. +func IgnoreUnset() cmp.Option { + return cmp.Options{ + // ignore unset fields in v2 + cmp.FilterPath(func(path cmp.Path) bool { + _, v2 := path.Last().Values() + switch v2.Kind() { + case reflect.Slice, reflect.Map: + if v2.IsNil() || v2.Len() == 0 { + return true + } + case reflect.String: + if v2.Len() == 0 { + return true + } + case reflect.Interface, reflect.Ptr: + if v2.IsNil() { + return true + } + } + return false + }, cmp.Ignore()), + // ignore map entries that aren't set in v2 + cmp.FilterPath(func(path cmp.Path) bool { + switch i := path.Last().(type) { + case cmp.MapIndex: + if _, v2 := i.Values(); !v2.IsValid() { + fmt.Println("E") + return true + } + } + return false + }, cmp.Ignore()), + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go b/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go index 88e93767..62a73f34 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go +++ b/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go @@ -19,6 +19,8 @@ package errors import ( "errors" "fmt" + + "k8s.io/apimachinery/pkg/util/sets" ) // MessageCountMap contains occurrence for each error message. @@ -67,12 +69,38 @@ func (agg aggregate) Error() string { if len(agg) == 1 { return agg[0].Error() } - result := fmt.Sprintf("[%s", agg[0].Error()) - for i := 1; i < len(agg); i++ { - result += fmt.Sprintf(", %s", agg[i].Error()) + seenerrs := sets.NewString() + result := "" + agg.visit(func(err error) { + msg := err.Error() + if seenerrs.Has(msg) { + return + } + seenerrs.Insert(msg) + if len(seenerrs) > 1 { + result += ", " + } + result += msg + }) + if len(seenerrs) == 1 { + return result + } + return "[" + result + "]" +} + +func (agg aggregate) visit(f func(err error)) { + for _, err := range agg { + switch err := err.(type) { + case aggregate: + err.visit(f) + case Aggregate: + for _, nestedErr := range err.Errors() { + f(nestedErr) + } + default: + f(err) + } } - result += "]" - return result } // Errors is part of the Aggregate interface. diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go index 5c2ac4f2..64cbc770 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go @@ -14,26 +14,20 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto -// DO NOT EDIT! -/* - Package intstr is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto - - It has these top-level messages: - IntOrString -*/ package intstr -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import io "io" + io "io" + math "math" + math_bits "math/bits" + + proto "github.com/gogo/protobuf/proto" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -46,17 +40,69 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *IntOrString) Reset() { *m = IntOrString{} } -func (*IntOrString) ProtoMessage() {} -func (*IntOrString) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *IntOrString) Reset() { *m = IntOrString{} } +func (*IntOrString) ProtoMessage() {} +func (*IntOrString) Descriptor() ([]byte, []int) { + return fileDescriptor_94e046ae3ce6121c, []int{0} +} +func (m *IntOrString) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IntOrString) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IntOrString) XXX_Merge(src proto.Message) { + xxx_messageInfo_IntOrString.Merge(m, src) +} +func (m *IntOrString) XXX_Size() int { + return m.Size() +} +func (m *IntOrString) XXX_DiscardUnknown() { + xxx_messageInfo_IntOrString.DiscardUnknown(m) +} + +var xxx_messageInfo_IntOrString proto.InternalMessageInfo func init() { proto.RegisterType((*IntOrString)(nil), "k8s.io.apimachinery.pkg.util.intstr.IntOrString") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto", fileDescriptor_94e046ae3ce6121c) +} + +var fileDescriptor_94e046ae3ce6121c = []byte{ + // 292 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x4c, 0x8f, 0x31, 0x4b, 0x33, 0x31, + 0x1c, 0xc6, 0x93, 0xb7, 0x7d, 0x8b, 0x9e, 0xe0, 0x50, 0x1c, 0x8a, 0x43, 0x7a, 0x28, 0xc8, 0x0d, + 0x9a, 0xac, 0xe2, 0xd8, 0xad, 0x20, 0x08, 0x57, 0x71, 0x70, 0xbb, 0x6b, 0x63, 0x1a, 0xae, 0x4d, + 0x42, 0xee, 0x7f, 0xc2, 0x6d, 0xfd, 0x08, 0xba, 0x39, 0xfa, 0x71, 0x6e, 0xec, 0xd8, 0x41, 0x8a, + 0x17, 0xbf, 0x85, 0x93, 0x5c, 0xee, 0x40, 0xa7, 0xe4, 0x79, 0x9e, 0xdf, 0x2f, 0x90, 0xe0, 0x36, + 0xbb, 0xce, 0xa9, 0xd4, 0x2c, 0x2b, 0x52, 0x6e, 0x15, 0x07, 0x9e, 0xb3, 0x67, 0xae, 0x16, 0xda, + 0xb2, 0x6e, 0x48, 0x8c, 0x5c, 0x27, 0xf3, 0xa5, 0x54, 0xdc, 0x96, 0xcc, 0x64, 0x82, 0x15, 0x20, + 0x57, 0x4c, 0x2a, 0xc8, 0xc1, 0x32, 0xc1, 0x15, 0xb7, 0x09, 0xf0, 0x05, 0x35, 0x56, 0x83, 0x1e, + 0x9e, 0xb7, 0x12, 0xfd, 0x2b, 0x51, 0x93, 0x09, 0xda, 0x48, 0xb4, 0x95, 0x4e, 0xaf, 0x84, 0x84, + 0x65, 0x91, 0xd2, 0xb9, 0x5e, 0x33, 0xa1, 0x85, 0x66, 0xde, 0x4d, 0x8b, 0x27, 0x9f, 0x7c, 0xf0, + 0xb7, 0xf6, 0xcd, 0xb3, 0x57, 0x1c, 0x1c, 0x4d, 0x15, 0xdc, 0xd9, 0x19, 0x58, 0xa9, 0xc4, 0x30, + 0x0a, 0xfa, 0x50, 0x1a, 0x3e, 0xc2, 0x21, 0x8e, 0x7a, 0x93, 0x93, 0x6a, 0x3f, 0x46, 0x6e, 0x3f, + 0xee, 0xdf, 0x97, 0x86, 0x7f, 0x77, 0x67, 0xec, 0x89, 0xe1, 0x45, 0x30, 0x90, 0x0a, 0x1e, 0x92, + 0xd5, 0xe8, 0x5f, 0x88, 0xa3, 0xff, 0x93, 0xe3, 0x8e, 0x1d, 0x4c, 0x7d, 0x1b, 0x77, 0x6b, 0xc3, + 0xe5, 0x60, 0x1b, 0xae, 0x17, 0xe2, 0xe8, 0xf0, 0x97, 0x9b, 0xf9, 0x36, 0xee, 0xd6, 0x9b, 0x83, + 0xb7, 0xf7, 0x31, 0xda, 0x7c, 0x84, 0x68, 0x72, 0x59, 0xd5, 0x04, 0x6d, 0x6b, 0x82, 0x76, 0x35, + 0x41, 0x1b, 0x47, 0x70, 0xe5, 0x08, 0xde, 0x3a, 0x82, 0x77, 0x8e, 0xe0, 0x4f, 0x47, 0xf0, 0xcb, + 0x17, 0x41, 0x8f, 0x83, 0xf6, 0xc3, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x52, 0xa0, 0xb5, 0xc9, + 0x64, 0x01, 0x00, 0x00, +} + func (m *IntOrString) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -64,51 +110,44 @@ func (m *IntOrString) Marshal() (dAtA []byte, err error) { } func (m *IntOrString) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IntOrString) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Type)) - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.IntVal)) - dAtA[i] = 0x1a - i++ + i -= len(m.StrVal) + copy(dAtA[i:], m.StrVal) i = encodeVarintGenerated(dAtA, i, uint64(len(m.StrVal))) - i += copy(dAtA[i:], m.StrVal) - return i, nil + i-- + dAtA[i] = 0x1a + i = encodeVarintGenerated(dAtA, i, uint64(m.IntVal)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *IntOrString) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.Type)) @@ -119,14 +158,7 @@ func (m *IntOrString) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -146,7 +178,7 @@ func (m *IntOrString) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -174,7 +206,7 @@ func (m *IntOrString) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Type |= (Type(b) & 0x7F) << shift + m.Type |= Type(b&0x7F) << shift if b < 0x80 { break } @@ -193,7 +225,7 @@ func (m *IntOrString) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.IntVal |= (int32(b) & 0x7F) << shift + m.IntVal |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -212,7 +244,7 @@ func (m *IntOrString) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -222,6 +254,9 @@ func (m *IntOrString) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -236,6 +271,9 @@ func (m *IntOrString) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -302,10 +340,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -334,6 +375,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -352,30 +396,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 292 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x4c, 0x8f, 0x31, 0x4b, 0x33, 0x31, - 0x1c, 0xc6, 0x93, 0xb7, 0x7d, 0x8b, 0x9e, 0xe0, 0x50, 0x1c, 0x8a, 0x43, 0x7a, 0x28, 0xc8, 0x0d, - 0x9a, 0xac, 0xe2, 0xd8, 0xad, 0x20, 0x08, 0x57, 0x71, 0x70, 0xbb, 0x6b, 0x63, 0x1a, 0xae, 0x4d, - 0x42, 0xee, 0x7f, 0xc2, 0x6d, 0xfd, 0x08, 0xba, 0x39, 0xfa, 0x71, 0x6e, 0xec, 0xd8, 0x41, 0x8a, - 0x17, 0xbf, 0x85, 0x93, 0x5c, 0xee, 0x40, 0xa7, 0xe4, 0x79, 0x9e, 0xdf, 0x2f, 0x90, 0xe0, 0x36, - 0xbb, 0xce, 0xa9, 0xd4, 0x2c, 0x2b, 0x52, 0x6e, 0x15, 0x07, 0x9e, 0xb3, 0x67, 0xae, 0x16, 0xda, - 0xb2, 0x6e, 0x48, 0x8c, 0x5c, 0x27, 0xf3, 0xa5, 0x54, 0xdc, 0x96, 0xcc, 0x64, 0x82, 0x15, 0x20, - 0x57, 0x4c, 0x2a, 0xc8, 0xc1, 0x32, 0xc1, 0x15, 0xb7, 0x09, 0xf0, 0x05, 0x35, 0x56, 0x83, 0x1e, - 0x9e, 0xb7, 0x12, 0xfd, 0x2b, 0x51, 0x93, 0x09, 0xda, 0x48, 0xb4, 0x95, 0x4e, 0xaf, 0x84, 0x84, - 0x65, 0x91, 0xd2, 0xb9, 0x5e, 0x33, 0xa1, 0x85, 0x66, 0xde, 0x4d, 0x8b, 0x27, 0x9f, 0x7c, 0xf0, - 0xb7, 0xf6, 0xcd, 0xb3, 0x57, 0x1c, 0x1c, 0x4d, 0x15, 0xdc, 0xd9, 0x19, 0x58, 0xa9, 0xc4, 0x30, - 0x0a, 0xfa, 0x50, 0x1a, 0x3e, 0xc2, 0x21, 0x8e, 0x7a, 0x93, 0x93, 0x6a, 0x3f, 0x46, 0x6e, 0x3f, - 0xee, 0xdf, 0x97, 0x86, 0x7f, 0x77, 0x67, 0xec, 0x89, 0xe1, 0x45, 0x30, 0x90, 0x0a, 0x1e, 0x92, - 0xd5, 0xe8, 0x5f, 0x88, 0xa3, 0xff, 0x93, 0xe3, 0x8e, 0x1d, 0x4c, 0x7d, 0x1b, 0x77, 0x6b, 0xc3, - 0xe5, 0x60, 0x1b, 0xae, 0x17, 0xe2, 0xe8, 0xf0, 0x97, 0x9b, 0xf9, 0x36, 0xee, 0xd6, 0x9b, 0x83, - 0xb7, 0xf7, 0x31, 0xda, 0x7c, 0x84, 0x68, 0x72, 0x59, 0xd5, 0x04, 0x6d, 0x6b, 0x82, 0x76, 0x35, - 0x41, 0x1b, 0x47, 0x70, 0xe5, 0x08, 0xde, 0x3a, 0x82, 0x77, 0x8e, 0xe0, 0x4f, 0x47, 0xf0, 0xcb, - 0x17, 0x41, 0x8f, 0x83, 0xf6, 0xc3, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x52, 0xa0, 0xb5, 0xc9, - 0x64, 0x01, 0x00, 0x00, -} diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto b/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto index 1c3ec732..e79fb9e5 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto +++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto @@ -29,7 +29,7 @@ option go_package = "intstr"; // inner type. This allows you to have, for example, a JSON field that can // accept a name or number. // TODO: Rename to Int32OrString -// +// // +protobuf=true // +protobuf.options.(gogoproto.goproto_stringer)=false // +k8s:openapi-gen=true diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go b/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go index 642b83ce..2df62955 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go +++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go @@ -25,8 +25,8 @@ import ( "strconv" "strings" - "github.com/golang/glog" "github.com/google/gofuzz" + "k8s.io/klog" ) // IntOrString is a type that can hold an int32 or a string. When used in @@ -45,7 +45,7 @@ type IntOrString struct { } // Type represents the stored type of IntOrString. -type Type int +type Type int64 const ( Int Type = iota // The IntOrString holds an int. @@ -58,7 +58,7 @@ const ( // TODO: convert to (val int32) func FromInt(val int) IntOrString { if val > math.MaxInt32 || val < math.MinInt32 { - glog.Errorf("value: %d overflows int32\n%s\n", val, debug.Stack()) + klog.Errorf("value: %d overflows int32\n%s\n", val, debug.Stack()) } return IntOrString{Type: Int, IntVal: int32(val)} } @@ -122,11 +122,11 @@ func (intstr IntOrString) MarshalJSON() ([]byte, error) { // the OpenAPI spec of this type. // // See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators -func (_ IntOrString) OpenAPISchemaType() []string { return []string{"string"} } +func (IntOrString) OpenAPISchemaType() []string { return []string{"string"} } // OpenAPISchemaFormat is used by the kube-openapi generator when constructing // the OpenAPI spec of this type. -func (_ IntOrString) OpenAPISchemaFormat() string { return "int-or-string" } +func (IntOrString) OpenAPISchemaFormat() string { return "int-or-string" } func (intstr *IntOrString) Fuzz(c fuzz.Continue) { if intstr == nil { diff --git a/vendor/k8s.io/apimachinery/pkg/util/json/json.go b/vendor/k8s.io/apimachinery/pkg/util/json/json.go index 10c8cb83..0e2e3017 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/json/json.go +++ b/vendor/k8s.io/apimachinery/pkg/util/json/json.go @@ -19,6 +19,7 @@ package json import ( "bytes" "encoding/json" + "fmt" "io" ) @@ -34,6 +35,9 @@ func Marshal(v interface{}) ([]byte, error) { return json.Marshal(v) } +// limit recursive depth to prevent stack overflow errors +const maxDepth = 10000 + // Unmarshal unmarshals the given data // If v is a *map[string]interface{}, numbers are converted to int64 or float64 func Unmarshal(data []byte, v interface{}) error { @@ -48,7 +52,7 @@ func Unmarshal(data []byte, v interface{}) error { return err } // If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64 - return convertMapNumbers(*v) + return convertMapNumbers(*v, 0) case *[]interface{}: // Build a decoder from the given data @@ -60,7 +64,7 @@ func Unmarshal(data []byte, v interface{}) error { return err } // If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64 - return convertSliceNumbers(*v) + return convertSliceNumbers(*v, 0) default: return json.Unmarshal(data, v) @@ -69,16 +73,20 @@ func Unmarshal(data []byte, v interface{}) error { // convertMapNumbers traverses the map, converting any json.Number values to int64 or float64. // values which are map[string]interface{} or []interface{} are recursively visited -func convertMapNumbers(m map[string]interface{}) error { +func convertMapNumbers(m map[string]interface{}, depth int) error { + if depth > maxDepth { + return fmt.Errorf("exceeded max depth of %d", maxDepth) + } + var err error for k, v := range m { switch v := v.(type) { case json.Number: m[k], err = convertNumber(v) case map[string]interface{}: - err = convertMapNumbers(v) + err = convertMapNumbers(v, depth+1) case []interface{}: - err = convertSliceNumbers(v) + err = convertSliceNumbers(v, depth+1) } if err != nil { return err @@ -89,16 +97,20 @@ func convertMapNumbers(m map[string]interface{}) error { // convertSliceNumbers traverses the slice, converting any json.Number values to int64 or float64. // values which are map[string]interface{} or []interface{} are recursively visited -func convertSliceNumbers(s []interface{}) error { +func convertSliceNumbers(s []interface{}, depth int) error { + if depth > maxDepth { + return fmt.Errorf("exceeded max depth of %d", maxDepth) + } + var err error for i, v := range s { switch v := v.(type) { case json.Number: s[i], err = convertNumber(v) case map[string]interface{}: - err = convertMapNumbers(v) + err = convertMapNumbers(v, depth+1) case []interface{}: - err = convertSliceNumbers(v) + err = convertSliceNumbers(v, depth+1) } if err != nil { return err diff --git a/vendor/k8s.io/apimachinery/pkg/util/mergepatch/OWNERS b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/OWNERS index 8e8d9fce..3f72c69b 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/mergepatch/OWNERS +++ b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/OWNERS @@ -1,3 +1,5 @@ +# See the OWNERS docs at https://go.k8s.io/owners + approvers: - pwittrock reviewers: diff --git a/vendor/k8s.io/apimachinery/pkg/util/mergepatch/util.go b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/util.go index d09a939b..990fa0d4 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/mergepatch/util.go +++ b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/util.go @@ -21,7 +21,7 @@ import ( "reflect" "github.com/davecgh/go-spew/spew" - "github.com/ghodss/yaml" + "sigs.k8s.io/yaml" ) // PreconditionFunc asserts that an incompatible change is not present within a patch. diff --git a/vendor/k8s.io/apimachinery/pkg/util/naming/from_stack.go b/vendor/k8s.io/apimachinery/pkg/util/naming/from_stack.go index 2965d5a8..d69bf32c 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/naming/from_stack.go +++ b/vendor/k8s.io/apimachinery/pkg/util/naming/from_stack.go @@ -82,7 +82,7 @@ var stackCreator = regexp.MustCompile(`(?m)^created by (.*)\n\s+(.*):(\d+) \+0x[ func extractStackCreator() (string, int, bool) { stack := debug.Stack() matches := stackCreator.FindStringSubmatch(string(stack)) - if matches == nil || len(matches) != 4 { + if len(matches) != 4 { return "", 0, false } line, err := strconv.Atoi(matches[3]) diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/http.go b/vendor/k8s.io/apimachinery/pkg/util/net/http.go index 8abbdea8..f9540c63 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/net/http.go +++ b/vendor/k8s.io/apimachinery/pkg/util/net/http.go @@ -31,8 +31,8 @@ import ( "strconv" "strings" - "github.com/golang/glog" "golang.org/x/net/http2" + "k8s.io/klog" ) // JoinPreservingTrailingSlash does a path.Join of the specified elements, @@ -68,14 +68,17 @@ func IsProbableEOF(err error) bool { if uerr, ok := err.(*url.Error); ok { err = uerr.Err } + msg := err.Error() switch { case err == io.EOF: return true - case err.Error() == "http: can't write HTTP request on broken connection": + case msg == "http: can't write HTTP request on broken connection": return true - case strings.Contains(err.Error(), "connection reset by peer"): + case strings.Contains(msg, "http2: server sent GOAWAY and closed the connection"): return true - case strings.Contains(strings.ToLower(err.Error()), "use of closed network connection"): + case strings.Contains(msg, "connection reset by peer"): + return true + case strings.Contains(strings.ToLower(msg), "use of closed network connection"): return true } return false @@ -98,6 +101,9 @@ func SetOldTransportDefaults(t *http.Transport) *http.Transport { if t.TLSHandshakeTimeout == 0 { t.TLSHandshakeTimeout = defaultTransport.TLSHandshakeTimeout } + if t.IdleConnTimeout == 0 { + t.IdleConnTimeout = defaultTransport.IdleConnTimeout + } return t } @@ -107,15 +113,30 @@ func SetTransportDefaults(t *http.Transport) *http.Transport { t = SetOldTransportDefaults(t) // Allow clients to disable http2 if needed. if s := os.Getenv("DISABLE_HTTP2"); len(s) > 0 { - glog.Infof("HTTP2 has been explicitly disabled") - } else { + klog.Infof("HTTP2 has been explicitly disabled") + } else if allowsHTTP2(t) { if err := http2.ConfigureTransport(t); err != nil { - glog.Warningf("Transport failed http2 configuration: %v", err) + klog.Warningf("Transport failed http2 configuration: %v", err) } } return t } +func allowsHTTP2(t *http.Transport) bool { + if t.TLSClientConfig == nil || len(t.TLSClientConfig.NextProtos) == 0 { + // the transport expressed no NextProto preference, allow + return true + } + for _, p := range t.TLSClientConfig.NextProtos { + if p == http2.NextProtoTLS { + // the transport explicitly allowed http/2 + return true + } + } + // the transport explicitly set NextProtos and excluded http/2 + return false +} + type RoundTripperWrapper interface { http.RoundTripper WrappedRoundTripper() http.RoundTripper @@ -321,9 +342,10 @@ type Dialer interface { // ConnectWithRedirects uses dialer to send req, following up to 10 redirects (relative to // originalLocation). It returns the opened net.Conn and the raw response bytes. -func ConnectWithRedirects(originalMethod string, originalLocation *url.URL, header http.Header, originalBody io.Reader, dialer Dialer) (net.Conn, []byte, error) { +// If requireSameHostRedirects is true, only redirects to the same host are permitted. +func ConnectWithRedirects(originalMethod string, originalLocation *url.URL, header http.Header, originalBody io.Reader, dialer Dialer, requireSameHostRedirects bool) (net.Conn, []byte, error) { const ( - maxRedirects = 10 + maxRedirects = 9 // Fail on the 10th redirect maxResponseSize = 16384 // play it safe to allow the potential for lots of / large headers ) @@ -367,7 +389,7 @@ redirectLoop: resp, err := http.ReadResponse(respReader, nil) if err != nil { // Unable to read the backend response; let the client handle it. - glog.Warningf("Error reading backend response: %v", err) + klog.Warningf("Error reading backend response: %v", err) break redirectLoop } @@ -387,10 +409,6 @@ redirectLoop: resp.Body.Close() // not used - // Reset the connection. - intermediateConn.Close() - intermediateConn = nil - // Prepare to follow the redirect. redirectStr := resp.Header.Get("Location") if redirectStr == "" { @@ -404,6 +422,15 @@ redirectLoop: if err != nil { return nil, nil, fmt.Errorf("malformed Location header: %v", err) } + + // Only follow redirects to the same host. Otherwise, propagate the redirect response back. + if requireSameHostRedirects && location.Hostname() != originalLocation.Hostname() { + break redirectLoop + } + + // Reset the connection. + intermediateConn.Close() + intermediateConn = nil } connToReturn := intermediateConn diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/interface.go b/vendor/k8s.io/apimachinery/pkg/util/net/interface.go index 0ab9b360..836494d5 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/net/interface.go +++ b/vendor/k8s.io/apimachinery/pkg/util/net/interface.go @@ -26,7 +26,7 @@ import ( "strings" - "github.com/golang/glog" + "k8s.io/klog" ) type AddressFamily uint @@ -36,6 +36,18 @@ const ( familyIPv6 AddressFamily = 6 ) +type AddressFamilyPreference []AddressFamily + +var ( + preferIPv4 = AddressFamilyPreference{familyIPv4, familyIPv6} + preferIPv6 = AddressFamilyPreference{familyIPv6, familyIPv4} +) + +const ( + // LoopbackInterfaceName is the default name of the loopback interface + LoopbackInterfaceName = "lo" +) + const ( ipv4RouteFile = "/proc/net/route" ipv6RouteFile = "/proc/net/ipv6_route" @@ -53,7 +65,7 @@ type RouteFile struct { parse func(input io.Reader) ([]Route, error) } -// noRoutesError can be returned by ChooseBindAddress() in case of no routes +// noRoutesError can be returned in case of no routes type noRoutesError struct { message string } @@ -193,7 +205,7 @@ func isInterfaceUp(intf *net.Interface) bool { return false } if intf.Flags&net.FlagUp != 0 { - glog.V(4).Infof("Interface %v is up", intf.Name) + klog.V(4).Infof("Interface %v is up", intf.Name) return true } return false @@ -208,20 +220,20 @@ func isLoopbackOrPointToPoint(intf *net.Interface) bool { func getMatchingGlobalIP(addrs []net.Addr, family AddressFamily) (net.IP, error) { if len(addrs) > 0 { for i := range addrs { - glog.V(4).Infof("Checking addr %s.", addrs[i].String()) + klog.V(4).Infof("Checking addr %s.", addrs[i].String()) ip, _, err := net.ParseCIDR(addrs[i].String()) if err != nil { return nil, err } if memberOf(ip, family) { if ip.IsGlobalUnicast() { - glog.V(4).Infof("IP found %v", ip) + klog.V(4).Infof("IP found %v", ip) return ip, nil } else { - glog.V(4).Infof("Non-global unicast address found %v", ip) + klog.V(4).Infof("Non-global unicast address found %v", ip) } } else { - glog.V(4).Infof("%v is not an IPv%d address", ip, int(family)) + klog.V(4).Infof("%v is not an IPv%d address", ip, int(family)) } } @@ -241,20 +253,20 @@ func getIPFromInterface(intfName string, forFamily AddressFamily, nw networkInte if err != nil { return nil, err } - glog.V(4).Infof("Interface %q has %d addresses :%v.", intfName, len(addrs), addrs) + klog.V(4).Infof("Interface %q has %d addresses :%v.", intfName, len(addrs), addrs) matchingIP, err := getMatchingGlobalIP(addrs, forFamily) if err != nil { return nil, err } if matchingIP != nil { - glog.V(4).Infof("Found valid IPv%d address %v for interface %q.", int(forFamily), matchingIP, intfName) + klog.V(4).Infof("Found valid IPv%d address %v for interface %q.", int(forFamily), matchingIP, intfName) return matchingIP, nil } } return nil, nil } -// memberOF tells if the IP is of the desired family. Used for checking interface addresses. +// memberOf tells if the IP is of the desired family. Used for checking interface addresses. func memberOf(ip net.IP, family AddressFamily) bool { if ip.To4() != nil { return family == familyIPv4 @@ -265,8 +277,8 @@ func memberOf(ip net.IP, family AddressFamily) bool { // chooseIPFromHostInterfaces looks at all system interfaces, trying to find one that is up that // has a global unicast address (non-loopback, non-link local, non-point2point), and returns the IP. -// Searches for IPv4 addresses, and then IPv6 addresses. -func chooseIPFromHostInterfaces(nw networkInterfacer) (net.IP, error) { +// addressFamilies determines whether it prefers IPv4 or IPv6 +func chooseIPFromHostInterfaces(nw networkInterfacer, addressFamilies AddressFamilyPreference) (net.IP, error) { intfs, err := nw.Interfaces() if err != nil { return nil, err @@ -274,15 +286,15 @@ func chooseIPFromHostInterfaces(nw networkInterfacer) (net.IP, error) { if len(intfs) == 0 { return nil, fmt.Errorf("no interfaces found on host.") } - for _, family := range []AddressFamily{familyIPv4, familyIPv6} { - glog.V(4).Infof("Looking for system interface with a global IPv%d address", uint(family)) + for _, family := range addressFamilies { + klog.V(4).Infof("Looking for system interface with a global IPv%d address", uint(family)) for _, intf := range intfs { if !isInterfaceUp(&intf) { - glog.V(4).Infof("Skipping: down interface %q", intf.Name) + klog.V(4).Infof("Skipping: down interface %q", intf.Name) continue } if isLoopbackOrPointToPoint(&intf) { - glog.V(4).Infof("Skipping: LB or P2P interface %q", intf.Name) + klog.V(4).Infof("Skipping: LB or P2P interface %q", intf.Name) continue } addrs, err := nw.Addrs(&intf) @@ -290,7 +302,7 @@ func chooseIPFromHostInterfaces(nw networkInterfacer) (net.IP, error) { return nil, err } if len(addrs) == 0 { - glog.V(4).Infof("Skipping: no addresses on interface %q", intf.Name) + klog.V(4).Infof("Skipping: no addresses on interface %q", intf.Name) continue } for _, addr := range addrs { @@ -299,15 +311,15 @@ func chooseIPFromHostInterfaces(nw networkInterfacer) (net.IP, error) { return nil, fmt.Errorf("Unable to parse CIDR for interface %q: %s", intf.Name, err) } if !memberOf(ip, family) { - glog.V(4).Infof("Skipping: no address family match for %q on interface %q.", ip, intf.Name) + klog.V(4).Infof("Skipping: no address family match for %q on interface %q.", ip, intf.Name) continue } // TODO: Decide if should open up to allow IPv6 LLAs in future. if !ip.IsGlobalUnicast() { - glog.V(4).Infof("Skipping: non-global address %q on interface %q.", ip, intf.Name) + klog.V(4).Infof("Skipping: non-global address %q on interface %q.", ip, intf.Name) continue } - glog.V(4).Infof("Found global unicast address %q on interface %q.", ip, intf.Name) + klog.V(4).Infof("Found global unicast address %q on interface %q.", ip, intf.Name) return ip, nil } } @@ -321,15 +333,19 @@ func chooseIPFromHostInterfaces(nw networkInterfacer) (net.IP, error) { // IP of the interface with a gateway on it (with priority given to IPv4). For a node // with no internet connection, it returns error. func ChooseHostInterface() (net.IP, error) { + return chooseHostInterface(preferIPv4) +} + +func chooseHostInterface(addressFamilies AddressFamilyPreference) (net.IP, error) { var nw networkInterfacer = networkInterface{} if _, err := os.Stat(ipv4RouteFile); os.IsNotExist(err) { - return chooseIPFromHostInterfaces(nw) + return chooseIPFromHostInterfaces(nw, addressFamilies) } routes, err := getAllDefaultRoutes() if err != nil { return nil, err } - return chooseHostInterfaceFromRoute(routes, nw) + return chooseHostInterfaceFromRoute(routes, nw, addressFamilies) } // networkInterfacer defines an interface for several net library functions. Production @@ -377,36 +393,43 @@ func getAllDefaultRoutes() ([]Route, error) { } // chooseHostInterfaceFromRoute cycles through each default route provided, looking for a -// global IP address from the interface for the route. Will first look all each IPv4 route for -// an IPv4 IP, and then will look at each IPv6 route for an IPv6 IP. -func chooseHostInterfaceFromRoute(routes []Route, nw networkInterfacer) (net.IP, error) { - for _, family := range []AddressFamily{familyIPv4, familyIPv6} { - glog.V(4).Infof("Looking for default routes with IPv%d addresses", uint(family)) +// global IP address from the interface for the route. addressFamilies determines whether it +// prefers IPv4 or IPv6 +func chooseHostInterfaceFromRoute(routes []Route, nw networkInterfacer, addressFamilies AddressFamilyPreference) (net.IP, error) { + for _, family := range addressFamilies { + klog.V(4).Infof("Looking for default routes with IPv%d addresses", uint(family)) for _, route := range routes { if route.Family != family { continue } - glog.V(4).Infof("Default route transits interface %q", route.Interface) + klog.V(4).Infof("Default route transits interface %q", route.Interface) finalIP, err := getIPFromInterface(route.Interface, family, nw) if err != nil { return nil, err } if finalIP != nil { - glog.V(4).Infof("Found active IP %v ", finalIP) + klog.V(4).Infof("Found active IP %v ", finalIP) return finalIP, nil } } } - glog.V(4).Infof("No active IP found by looking at default routes") + klog.V(4).Infof("No active IP found by looking at default routes") return nil, fmt.Errorf("unable to select an IP from default routes.") } -// If bind-address is usable, return it directly -// If bind-address is not usable (unset, 0.0.0.0, or loopback), we will use the host's default -// interface. -func ChooseBindAddress(bindAddress net.IP) (net.IP, error) { +// ResolveBindAddress returns the IP address of a daemon, based on the given bindAddress: +// If bindAddress is unset, it returns the host's default IP, as with ChooseHostInterface(). +// If bindAddress is unspecified or loopback, it returns the default IP of the same +// address family as bindAddress. +// Otherwise, it just returns bindAddress. +func ResolveBindAddress(bindAddress net.IP) (net.IP, error) { + addressFamilies := preferIPv4 + if bindAddress != nil && memberOf(bindAddress, familyIPv6) { + addressFamilies = preferIPv6 + } + if bindAddress == nil || bindAddress.IsUnspecified() || bindAddress.IsLoopback() { - hostIP, err := ChooseHostInterface() + hostIP, err := chooseHostInterface(addressFamilies) if err != nil { return nil, err } @@ -414,3 +437,21 @@ func ChooseBindAddress(bindAddress net.IP) (net.IP, error) { } return bindAddress, nil } + +// ChooseBindAddressForInterface choose a global IP for a specific interface, with priority given to IPv4. +// This is required in case of network setups where default routes are present, but network +// interfaces use only link-local addresses (e.g. as described in RFC5549). +// e.g when using BGP to announce a host IP over link-local ip addresses and this ip address is attached to the lo interface. +func ChooseBindAddressForInterface(intfName string) (net.IP, error) { + var nw networkInterfacer = networkInterface{} + for _, family := range preferIPv4 { + ip, err := getIPFromInterface(intfName, family, nw) + if err != nil { + return nil, err + } + if ip != nil { + return ip, nil + } + } + return nil, fmt.Errorf("unable to select an IP from %s network interface", intfName) +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/util.go b/vendor/k8s.io/apimachinery/pkg/util/net/util.go index 8344d10c..2e7cb949 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/net/util.go +++ b/vendor/k8s.io/apimachinery/pkg/util/net/util.go @@ -54,3 +54,20 @@ func IsConnectionReset(err error) bool { } return false } + +// Returns if the given err is "connection refused" error +func IsConnectionRefused(err error) bool { + if urlErr, ok := err.(*url.Error); ok { + err = urlErr.Err + } + if opErr, ok := err.(*net.OpError); ok { + err = opErr.Err + } + if osErr, ok := err.(*os.SyscallError); ok { + err = osErr.Err + } + if errno, ok := err.(syscall.Errno); ok && errno == syscall.ECONNREFUSED { + return true + } + return false +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go index da32fe12..1428443f 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go +++ b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go @@ -18,11 +18,12 @@ package runtime import ( "fmt" + "net/http" "runtime" "sync" "time" - "github.com/golang/glog" + "k8s.io/klog" ) var ( @@ -40,11 +41,7 @@ var PanicHandlers = []func(interface{}){logPanic} // called in case of panic. HandleCrash actually crashes, after calling the // handlers and logging the panic message. // -// TODO: remove this function. We are switching to a world where it's safe for -// apiserver to panic, since it will be restarted by kubelet. At the beginning -// of the Kubernetes project, nothing was going to restart apiserver and so -// catching panics was important. But it's actually much simpler for monitoring -// software if we just exit when an unexpected panic happens. +// E.g., you can provide one or more additional handlers for something like shutting down go routines gracefully. func HandleCrash(additionalHandlers ...func(interface{})) { if r := recover(); r != nil { for _, fn := range PanicHandlers { @@ -60,23 +57,26 @@ func HandleCrash(additionalHandlers ...func(interface{})) { } } -// logPanic logs the caller tree when a panic occurs. +// logPanic logs the caller tree when a panic occurs (except in the special case of http.ErrAbortHandler). func logPanic(r interface{}) { - callers := getCallers(r) - glog.Errorf("Observed a panic: %#v (%v)\n%v", r, r, callers) -} - -func getCallers(r interface{}) string { - callers := "" - for i := 0; true; i++ { - _, file, line, ok := runtime.Caller(i) - if !ok { - break - } - callers = callers + fmt.Sprintf("%v:%v\n", file, line) + if r == http.ErrAbortHandler { + // honor the http.ErrAbortHandler sentinel panic value: + // ErrAbortHandler is a sentinel panic value to abort a handler. + // While any panic from ServeHTTP aborts the response to the client, + // panicking with ErrAbortHandler also suppresses logging of a stack trace to the server's error log. + return } - return callers + // Same as stdlib http server code. Manually allocate stack trace buffer size + // to prevent excessively large logs + const size = 64 << 10 + stacktrace := make([]byte, size) + stacktrace = stacktrace[:runtime.Stack(stacktrace, false)] + if _, ok := r.(string); ok { + klog.Errorf("Observed a panic: %s\n%s", r, stacktrace) + } else { + klog.Errorf("Observed a panic: %#v (%v)\n%s", r, r, stacktrace) + } } // ErrorHandlers is a list of functions which will be invoked when an unreturnable @@ -111,7 +111,7 @@ func HandleError(err error) { // logError prints an error with the call stack of the location it was reported func logError(err error) { - glog.ErrorDepth(2, err) + klog.ErrorDepth(2, err) } type rudimentaryErrorBackoff struct { @@ -151,13 +151,17 @@ func GetCaller() string { // handlers to handle errors and panics the same way. func RecoverFromPanic(err *error) { if r := recover(); r != nil { - callers := getCallers(r) + // Same as stdlib http server code. Manually allocate stack trace buffer size + // to prevent excessively large logs + const size = 64 << 10 + stacktrace := make([]byte, size) + stacktrace = stacktrace[:runtime.Stack(stacktrace, false)] *err = fmt.Errorf( - "recovered from panic %q. (err=%v) Call stack:\n%v", + "recovered from panic %q. (err=%v) Call stack:\n%s", r, *err, - callers) + stacktrace) } } diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/byte.go b/vendor/k8s.io/apimachinery/pkg/util/sets/byte.go index 766f4501..9bfa85d4 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/sets/byte.go +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/byte.go @@ -46,17 +46,19 @@ func ByteKeySet(theMap interface{}) Byte { } // Insert adds items to the set. -func (s Byte) Insert(items ...byte) { +func (s Byte) Insert(items ...byte) Byte { for _, item := range items { s[item] = Empty{} } + return s } // Delete removes all items from the set. -func (s Byte) Delete(items ...byte) { +func (s Byte) Delete(items ...byte) Byte { for _, item := range items { delete(s, item) } + return s } // Has returns true if and only if item is contained in the set. diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/int.go b/vendor/k8s.io/apimachinery/pkg/util/sets/int.go index a0a513cd..88bd7096 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/sets/int.go +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/int.go @@ -46,17 +46,19 @@ func IntKeySet(theMap interface{}) Int { } // Insert adds items to the set. -func (s Int) Insert(items ...int) { +func (s Int) Insert(items ...int) Int { for _, item := range items { s[item] = Empty{} } + return s } // Delete removes all items from the set. -func (s Int) Delete(items ...int) { +func (s Int) Delete(items ...int) Int { for _, item := range items { delete(s, item) } + return s } // Has returns true if and only if item is contained in the set. diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/int32.go b/vendor/k8s.io/apimachinery/pkg/util/sets/int32.go new file mode 100644 index 00000000..96a48555 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/int32.go @@ -0,0 +1,205 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by set-gen. DO NOT EDIT. + +package sets + +import ( + "reflect" + "sort" +) + +// sets.Int32 is a set of int32s, implemented via map[int32]struct{} for minimal memory consumption. +type Int32 map[int32]Empty + +// NewInt32 creates a Int32 from a list of values. +func NewInt32(items ...int32) Int32 { + ss := Int32{} + ss.Insert(items...) + return ss +} + +// Int32KeySet creates a Int32 from a keys of a map[int32](? extends interface{}). +// If the value passed in is not actually a map, this will panic. +func Int32KeySet(theMap interface{}) Int32 { + v := reflect.ValueOf(theMap) + ret := Int32{} + + for _, keyValue := range v.MapKeys() { + ret.Insert(keyValue.Interface().(int32)) + } + return ret +} + +// Insert adds items to the set. +func (s Int32) Insert(items ...int32) Int32 { + for _, item := range items { + s[item] = Empty{} + } + return s +} + +// Delete removes all items from the set. +func (s Int32) Delete(items ...int32) Int32 { + for _, item := range items { + delete(s, item) + } + return s +} + +// Has returns true if and only if item is contained in the set. +func (s Int32) Has(item int32) bool { + _, contained := s[item] + return contained +} + +// HasAll returns true if and only if all items are contained in the set. +func (s Int32) HasAll(items ...int32) bool { + for _, item := range items { + if !s.Has(item) { + return false + } + } + return true +} + +// HasAny returns true if any items are contained in the set. +func (s Int32) HasAny(items ...int32) bool { + for _, item := range items { + if s.Has(item) { + return true + } + } + return false +} + +// Difference returns a set of objects that are not in s2 +// For example: +// s1 = {a1, a2, a3} +// s2 = {a1, a2, a4, a5} +// s1.Difference(s2) = {a3} +// s2.Difference(s1) = {a4, a5} +func (s Int32) Difference(s2 Int32) Int32 { + result := NewInt32() + for key := range s { + if !s2.Has(key) { + result.Insert(key) + } + } + return result +} + +// Union returns a new set which includes items in either s1 or s2. +// For example: +// s1 = {a1, a2} +// s2 = {a3, a4} +// s1.Union(s2) = {a1, a2, a3, a4} +// s2.Union(s1) = {a1, a2, a3, a4} +func (s1 Int32) Union(s2 Int32) Int32 { + result := NewInt32() + for key := range s1 { + result.Insert(key) + } + for key := range s2 { + result.Insert(key) + } + return result +} + +// Intersection returns a new set which includes the item in BOTH s1 and s2 +// For example: +// s1 = {a1, a2} +// s2 = {a2, a3} +// s1.Intersection(s2) = {a2} +func (s1 Int32) Intersection(s2 Int32) Int32 { + var walk, other Int32 + result := NewInt32() + if s1.Len() < s2.Len() { + walk = s1 + other = s2 + } else { + walk = s2 + other = s1 + } + for key := range walk { + if other.Has(key) { + result.Insert(key) + } + } + return result +} + +// IsSuperset returns true if and only if s1 is a superset of s2. +func (s1 Int32) IsSuperset(s2 Int32) bool { + for item := range s2 { + if !s1.Has(item) { + return false + } + } + return true +} + +// Equal returns true if and only if s1 is equal (as a set) to s2. +// Two sets are equal if their membership is identical. +// (In practice, this means same elements, order doesn't matter) +func (s1 Int32) Equal(s2 Int32) bool { + return len(s1) == len(s2) && s1.IsSuperset(s2) +} + +type sortableSliceOfInt32 []int32 + +func (s sortableSliceOfInt32) Len() int { return len(s) } +func (s sortableSliceOfInt32) Less(i, j int) bool { return lessInt32(s[i], s[j]) } +func (s sortableSliceOfInt32) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// List returns the contents as a sorted int32 slice. +func (s Int32) List() []int32 { + res := make(sortableSliceOfInt32, 0, len(s)) + for key := range s { + res = append(res, key) + } + sort.Sort(res) + return []int32(res) +} + +// UnsortedList returns the slice with contents in random order. +func (s Int32) UnsortedList() []int32 { + res := make([]int32, 0, len(s)) + for key := range s { + res = append(res, key) + } + return res +} + +// Returns a single element from the set. +func (s Int32) PopAny() (int32, bool) { + for key := range s { + s.Delete(key) + return key, true + } + var zeroValue int32 + return zeroValue, false +} + +// Len returns the size of the set. +func (s Int32) Len() int { + return len(s) +} + +func lessInt32(lhs, rhs int32) bool { + return lhs < rhs +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/int64.go b/vendor/k8s.io/apimachinery/pkg/util/sets/int64.go index 9ca9af0c..b375a1b0 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/sets/int64.go +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/int64.go @@ -46,17 +46,19 @@ func Int64KeySet(theMap interface{}) Int64 { } // Insert adds items to the set. -func (s Int64) Insert(items ...int64) { +func (s Int64) Insert(items ...int64) Int64 { for _, item := range items { s[item] = Empty{} } + return s } // Delete removes all items from the set. -func (s Int64) Delete(items ...int64) { +func (s Int64) Delete(items ...int64) Int64 { for _, item := range items { delete(s, item) } + return s } // Has returns true if and only if item is contained in the set. diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/string.go b/vendor/k8s.io/apimachinery/pkg/util/sets/string.go index ba00ad7d..e6f37db8 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/sets/string.go +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/string.go @@ -46,17 +46,19 @@ func StringKeySet(theMap interface{}) String { } // Insert adds items to the set. -func (s String) Insert(items ...string) { +func (s String) Insert(items ...string) String { for _, item := range items { s[item] = Empty{} } + return s } // Delete removes all items from the set. -func (s String) Delete(items ...string) { +func (s String) Delete(items ...string) String { for _, item := range items { delete(s, item) } + return s } // Has returns true if and only if item is contained in the set. diff --git a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/OWNERS b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/OWNERS index dbbe0de4..cfee199f 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/OWNERS +++ b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/OWNERS @@ -1,3 +1,5 @@ +# See the OWNERS docs at https://go.k8s.io/owners + approvers: - pwittrock - mengqiy diff --git a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go index ddf99817..c55894e5 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go +++ b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go @@ -1014,7 +1014,7 @@ func validatePatchWithSetOrderList(patchList, setOrderList interface{}, mergeKey setOrderIndex++ } // If patchIndex is inbound but setOrderIndex if out of bound mean there are items mismatching between the patch list and setElementOrder list. - // the second check is is a sanity check, and should always be true if the first is true. + // the second check is a sanity check, and should always be true if the first is true. if patchIndex < len(nonDeleteList) && setOrderIndex >= len(typedSetOrderList) { return fmt.Errorf("The order in patch list:\n%v\n doesn't match %s list:\n%v\n", typedPatchList, setElementOrderDirectivePrefix, setOrderList) } diff --git a/vendor/k8s.io/apimachinery/pkg/util/uuid/uuid.go b/vendor/k8s.io/apimachinery/pkg/util/uuid/uuid.go index bf478223..1fa351aa 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/uuid/uuid.go +++ b/vendor/k8s.io/apimachinery/pkg/util/uuid/uuid.go @@ -17,27 +17,11 @@ limitations under the License. package uuid import ( - "sync" - - "github.com/pborman/uuid" + "github.com/google/uuid" "k8s.io/apimachinery/pkg/types" ) -var uuidLock sync.Mutex -var lastUUID uuid.UUID - func NewUUID() types.UID { - uuidLock.Lock() - defer uuidLock.Unlock() - result := uuid.NewUUID() - // The UUID package is naive and can generate identical UUIDs if the - // time interval is quick enough. - // The UUID uses 100 ns increments so it's short enough to actively - // wait for a new value. - for uuid.Equal(lastUUID, result) == true { - result = uuid.NewUUID() - } - lastUUID = result - return types.UID(result.String()) + return types.UID(uuid.New().String()) } diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go b/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go index 4767fd1d..0cd5d657 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go +++ b/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go @@ -116,6 +116,10 @@ const ( // This is similar to ErrorTypeInvalid, but the error will not include the // too-long value. See TooLong(). ErrorTypeTooLong ErrorType = "FieldValueTooLong" + // ErrorTypeTooMany is used to report "too many". This is used to + // report that a given list has too many items. This is similar to FieldValueTooLong, + // but the error indicates quantity instead of length. + ErrorTypeTooMany ErrorType = "FieldValueTooMany" // ErrorTypeInternal is used to report other errors that are not related // to user input. See InternalError(). ErrorTypeInternal ErrorType = "InternalError" @@ -138,6 +142,8 @@ func (t ErrorType) String() string { return "Forbidden" case ErrorTypeTooLong: return "Too long" + case ErrorTypeTooMany: + return "Too many" case ErrorTypeInternal: return "Internal error" default: @@ -198,7 +204,14 @@ func Forbidden(field *Path, detail string) *Error { // Invalid, but the returned error will not include the too-long // value. func TooLong(field *Path, value interface{}, maxLength int) *Error { - return &Error{ErrorTypeTooLong, field.String(), value, fmt.Sprintf("must have at most %d characters", maxLength)} + return &Error{ErrorTypeTooLong, field.String(), value, fmt.Sprintf("must have at most %d bytes", maxLength)} +} + +// TooMany returns a *Error indicating "too many". This is used to +// report that a given list has too many items. This is similar to TooLong, +// but the returned error indicates quantity instead of length. +func TooMany(field *Path, actualQuantity, maxQuantity int) *Error { + return &Error{ErrorTypeTooMany, field.String(), actualQuantity, fmt.Sprintf("must have at most %d items", maxQuantity)} } // InternalError returns a *Error indicating "internal error". This is used diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go b/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go index e0d17154..8e1907c2 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go +++ b/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go @@ -70,7 +70,11 @@ func IsQualifiedName(value string) []string { return errs } -// IsFullyQualifiedName checks if the name is fully qualified. +// IsFullyQualifiedName checks if the name is fully qualified. This is similar +// to IsFullyQualifiedDomainName but requires a minimum of 3 segments instead of +// 2 and does not accept a trailing . as valid. +// TODO: This function is deprecated and preserved until all callers migrate to +// IsFullyQualifiedDomainName; please don't add new callers. func IsFullyQualifiedName(fldPath *field.Path, name string) field.ErrorList { var allErrors field.ErrorList if len(name) == 0 { @@ -85,8 +89,30 @@ func IsFullyQualifiedName(fldPath *field.Path, name string) field.ErrorList { return allErrors } +// IsFullyQualifiedDomainName checks if the domain name is fully qualified. This +// is similar to IsFullyQualifiedName but only requires a minimum of 2 segments +// instead of 3 and accepts a trailing . as valid. +func IsFullyQualifiedDomainName(fldPath *field.Path, name string) field.ErrorList { + var allErrors field.ErrorList + if len(name) == 0 { + return append(allErrors, field.Required(fldPath, "")) + } + if strings.HasSuffix(name, ".") { + name = name[:len(name)-1] + } + if errs := IsDNS1123Subdomain(name); len(errs) > 0 { + return append(allErrors, field.Invalid(fldPath, name, strings.Join(errs, ","))) + } + if len(strings.Split(name, ".")) < 2 { + return append(allErrors, field.Invalid(fldPath, name, "should be a domain with at least two segments separated by dots")) + } + return allErrors +} + const labelValueFmt string = "(" + qualifiedNameFmt + ")?" const labelValueErrMsg string = "a valid label must be an empty string or consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character" + +// LabelValueMaxLength is a label's max length const LabelValueMaxLength int = 63 var labelValueRegexp = regexp.MustCompile("^" + labelValueFmt + "$") @@ -107,6 +133,8 @@ func IsValidLabelValue(value string) []string { const dns1123LabelFmt string = "[a-z0-9]([-a-z0-9]*[a-z0-9])?" const dns1123LabelErrMsg string = "a DNS-1123 label must consist of lower case alphanumeric characters or '-', and must start and end with an alphanumeric character" + +// DNS1123LabelMaxLength is a label's max length in DNS (RFC 1123) const DNS1123LabelMaxLength int = 63 var dns1123LabelRegexp = regexp.MustCompile("^" + dns1123LabelFmt + "$") @@ -126,6 +154,8 @@ func IsDNS1123Label(value string) []string { const dns1123SubdomainFmt string = dns1123LabelFmt + "(\\." + dns1123LabelFmt + ")*" const dns1123SubdomainErrorMsg string = "a DNS-1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character" + +// DNS1123SubdomainMaxLength is a subdomain's max length in DNS (RFC 1123) const DNS1123SubdomainMaxLength int = 253 var dns1123SubdomainRegexp = regexp.MustCompile("^" + dns1123SubdomainFmt + "$") @@ -145,6 +175,8 @@ func IsDNS1123Subdomain(value string) []string { const dns1035LabelFmt string = "[a-z]([-a-z0-9]*[a-z0-9])?" const dns1035LabelErrMsg string = "a DNS-1035 label must consist of lower case alphanumeric characters or '-', start with an alphabetic character, and end with an alphanumeric character" + +// DNS1035LabelMaxLength is a label's max length in DNS (RFC 1035) const DNS1035LabelMaxLength int = 63 var dns1035LabelRegexp = regexp.MustCompile("^" + dns1035LabelFmt + "$") @@ -277,11 +309,32 @@ func IsValidIP(value string) []string { return nil } +// IsValidIPv4Address tests that the argument is a valid IPv4 address. +func IsValidIPv4Address(fldPath *field.Path, value string) field.ErrorList { + var allErrors field.ErrorList + ip := net.ParseIP(value) + if ip == nil || ip.To4() == nil { + allErrors = append(allErrors, field.Invalid(fldPath, value, "must be a valid IPv4 address")) + } + return allErrors +} + +// IsValidIPv6Address tests that the argument is a valid IPv6 address. +func IsValidIPv6Address(fldPath *field.Path, value string) field.ErrorList { + var allErrors field.ErrorList + ip := net.ParseIP(value) + if ip == nil || ip.To4() != nil { + allErrors = append(allErrors, field.Invalid(fldPath, value, "must be a valid IPv6 address")) + } + return allErrors +} + const percentFmt string = "[0-9]+%" const percentErrMsg string = "a valid percent string must be a numeric string followed by an ending '%'" var percentRegexp = regexp.MustCompile("^" + percentFmt + "$") +// IsValidPercent checks that string is in the form of a percentage func IsValidPercent(percent string) []string { if !percentRegexp.MatchString(percent) { return []string{RegexError(percentErrMsg, percentFmt, "1%", "93%")} @@ -391,13 +444,13 @@ func hasChDirPrefix(value string) []string { return errs } -// IsSocketAddr checks that a string conforms is a valid socket address +// IsValidSocketAddr checks that string represents a valid socket address // as defined in RFC 789. (e.g 0.0.0.0:10254 or [::]:10254)) func IsValidSocketAddr(value string) []string { var errs []string ip, port, err := net.SplitHostPort(value) if err != nil { - return append(errs, "must be a valid socket address format, (e.g. 0.0.0.0:10254 or [::]:10254)") + errs = append(errs, "must be a valid socket address format, (e.g. 0.0.0.0:10254 or [::]:10254)") return errs } portInt, _ := strconv.Atoi(port) diff --git a/vendor/k8s.io/kubernetes/pkg/util/version/doc.go b/vendor/k8s.io/apimachinery/pkg/util/version/doc.go similarity index 90% rename from vendor/k8s.io/kubernetes/pkg/util/version/doc.go rename to vendor/k8s.io/apimachinery/pkg/util/version/doc.go index ebe43152..5b2b22b6 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/version/doc.go +++ b/vendor/k8s.io/apimachinery/pkg/util/version/doc.go @@ -15,4 +15,4 @@ limitations under the License. */ // Package version provides utilities for version number comparisons -package version // import "k8s.io/kubernetes/pkg/util/version" +package version // import "k8s.io/apimachinery/pkg/util/version" diff --git a/vendor/k8s.io/kubernetes/pkg/util/version/version.go b/vendor/k8s.io/apimachinery/pkg/util/version/version.go similarity index 87% rename from vendor/k8s.io/kubernetes/pkg/util/version/version.go rename to vendor/k8s.io/apimachinery/pkg/util/version/version.go index 24724e50..8946926a 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/version/version.go +++ b/vendor/k8s.io/apimachinery/pkg/util/version/version.go @@ -154,6 +154,43 @@ func (v *Version) Components() []uint { return v.components } +// WithMajor returns copy of the version object with requested major number +func (v *Version) WithMajor(major uint) *Version { + result := *v + result.components = []uint{major, v.Minor(), v.Patch()} + return &result +} + +// WithMinor returns copy of the version object with requested minor number +func (v *Version) WithMinor(minor uint) *Version { + result := *v + result.components = []uint{v.Major(), minor, v.Patch()} + return &result +} + +// WithPatch returns copy of the version object with requested patch number +func (v *Version) WithPatch(patch uint) *Version { + result := *v + result.components = []uint{v.Major(), v.Minor(), patch} + return &result +} + +// WithPreRelease returns copy of the version object with requested prerelease +func (v *Version) WithPreRelease(preRelease string) *Version { + result := *v + result.components = []uint{v.Major(), v.Minor(), v.Patch()} + result.preRelease = preRelease + return &result +} + +// WithBuildMetadata returns copy of the version object with requested buildMetadata +func (v *Version) WithBuildMetadata(buildMetadata string) *Version { + result := *v + result.components = []uint{v.Major(), v.Minor(), v.Patch()} + result.buildMetadata = buildMetadata + return &result +} + // String converts a Version back to a string; note that for versions parsed with // ParseGeneric, this will not include the trailing uninterpreted portion of the version // number. diff --git a/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go b/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go index ca61168c..386c3e7e 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go +++ b/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go @@ -88,6 +88,15 @@ func Until(f func(), period time.Duration, stopCh <-chan struct{}) { JitterUntil(f, period, 0.0, true, stopCh) } +// UntilWithContext loops until context is done, running f every period. +// +// UntilWithContext is syntactic sugar on top of JitterUntilWithContext +// with zero jitter factor and with sliding = true (which means the timer +// for period starts after the f completes). +func UntilWithContext(ctx context.Context, f func(context.Context), period time.Duration) { + JitterUntilWithContext(ctx, f, period, 0.0, true) +} + // NonSlidingUntil loops until stop channel is closed, running f every // period. // @@ -98,6 +107,16 @@ func NonSlidingUntil(f func(), period time.Duration, stopCh <-chan struct{}) { JitterUntil(f, period, 0.0, false, stopCh) } +// NonSlidingUntilWithContext loops until context is done, running f every +// period. +// +// NonSlidingUntilWithContext is syntactic sugar on top of JitterUntilWithContext +// with zero jitter factor, with sliding = false (meaning the timer for period +// starts at the same time as the function starts). +func NonSlidingUntilWithContext(ctx context.Context, f func(context.Context), period time.Duration) { + JitterUntilWithContext(ctx, f, period, 0.0, false) +} + // JitterUntil loops until stop channel is closed, running f every period. // // If jitterFactor is positive, the period is jittered before every run of f. @@ -151,6 +170,19 @@ func JitterUntil(f func(), period time.Duration, jitterFactor float64, sliding b } } +// JitterUntilWithContext loops until context is done, running f every period. +// +// If jitterFactor is positive, the period is jittered before every run of f. +// If jitterFactor is not positive, the period is unchanged and not jittered. +// +// If sliding is true, the period is computed after f runs. If it is false then +// period includes the runtime for f. +// +// Cancel context to stop. f may not be invoked if context is already expired. +func JitterUntilWithContext(ctx context.Context, f func(context.Context), period time.Duration, jitterFactor float64, sliding bool) { + JitterUntil(func() { f(ctx) }, period, jitterFactor, sliding, ctx.Done()) +} + // Jitter returns a time.Duration between duration and duration + maxFactor * // duration. // @@ -173,36 +205,97 @@ type ConditionFunc func() (done bool, err error) // Backoff holds parameters applied to a Backoff function. type Backoff struct { - Duration time.Duration // the base duration - Factor float64 // Duration is multiplied by factor each iteration - Jitter float64 // The amount of jitter applied each iteration - Steps int // Exit with error after this many steps + // The initial duration. + Duration time.Duration + // Duration is multiplied by factor each iteration, if factor is not zero + // and the limits imposed by Steps and Cap have not been reached. + // Should not be negative. + // The jitter does not contribute to the updates to the duration parameter. + Factor float64 + // The sleep at each iteration is the duration plus an additional + // amount chosen uniformly at random from the interval between + // zero and `jitter*duration`. + Jitter float64 + // The remaining number of iterations in which the duration + // parameter may change (but progress can be stopped earlier by + // hitting the cap). If not positive, the duration is not + // changed. Used for exponential backoff in combination with + // Factor and Cap. + Steps int + // A limit on revised values of the duration parameter. If a + // multiplication by the factor parameter would make the duration + // exceed the cap then the duration is set to the cap and the + // steps parameter is set to zero. + Cap time.Duration } -// ExponentialBackoff repeats a condition check with exponential backoff. +// Step (1) returns an amount of time to sleep determined by the +// original Duration and Jitter and (2) mutates the provided Backoff +// to update its Steps and Duration. +func (b *Backoff) Step() time.Duration { + if b.Steps < 1 { + if b.Jitter > 0 { + return Jitter(b.Duration, b.Jitter) + } + return b.Duration + } + b.Steps-- + + duration := b.Duration + + // calculate the next step + if b.Factor != 0 { + b.Duration = time.Duration(float64(b.Duration) * b.Factor) + if b.Cap > 0 && b.Duration > b.Cap { + b.Duration = b.Cap + b.Steps = 0 + } + } + + if b.Jitter > 0 { + duration = Jitter(duration, b.Jitter) + } + return duration +} + +// contextForChannel derives a child context from a parent channel. // -// It checks the condition up to Steps times, increasing the wait by multiplying -// the previous duration by Factor. +// The derived context's Done channel is closed when the returned cancel function +// is called or when the parent channel is closed, whichever happens first. // -// If Jitter is greater than zero, a random amount of each duration is added -// (between duration and duration*(1+jitter)). +// Note the caller must *always* call the CancelFunc, otherwise resources may be leaked. +func contextForChannel(parentCh <-chan struct{}) (context.Context, context.CancelFunc) { + ctx, cancel := context.WithCancel(context.Background()) + + go func() { + select { + case <-parentCh: + cancel() + case <-ctx.Done(): + } + }() + return ctx, cancel +} + +// ExponentialBackoff repeats a condition check with exponential backoff. // -// If the condition never returns true, ErrWaitTimeout is returned. All other -// errors terminate immediately. +// It repeatedly checks the condition and then sleeps, using `backoff.Step()` +// to determine the length of the sleep and adjust Duration and Steps. +// Stops and returns as soon as: +// 1. the condition check returns true or an error, +// 2. `backoff.Steps` checks of the condition have been done, or +// 3. a sleep truncated by the cap on duration has been completed. +// In case (1) the returned error is what the condition function returned. +// In all other cases, ErrWaitTimeout is returned. func ExponentialBackoff(backoff Backoff, condition ConditionFunc) error { - duration := backoff.Duration - for i := 0; i < backoff.Steps; i++ { - if i != 0 { - adjusted := duration - if backoff.Jitter > 0.0 { - adjusted = Jitter(duration, backoff.Jitter) - } - time.Sleep(adjusted) - duration = time.Duration(float64(duration) * backoff.Factor) - } + for backoff.Steps > 0 { if ok, err := condition(); err != nil || ok { return err } + if backoff.Steps == 1 { + break + } + time.Sleep(backoff.Step()) } return ErrWaitTimeout } @@ -287,7 +380,9 @@ func PollImmediateInfinite(interval time.Duration, condition ConditionFunc) erro // PollUntil always waits interval before the first run of 'condition'. // 'condition' will always be invoked at least once. func PollUntil(interval time.Duration, condition ConditionFunc, stopCh <-chan struct{}) error { - return WaitFor(poller(interval, 0), condition, stopCh) + ctx, cancel := contextForChannel(stopCh) + defer cancel() + return WaitFor(poller(interval, 0), condition, ctx.Done()) } // PollImmediateUntil tries a condition func until it returns true, an error or stopCh is closed. @@ -317,36 +412,48 @@ type WaitFunc func(done <-chan struct{}) <-chan struct{} // WaitFor continually checks 'fn' as driven by 'wait'. // // WaitFor gets a channel from 'wait()'', and then invokes 'fn' once for every value -// placed on the channel and once more when the channel is closed. +// placed on the channel and once more when the channel is closed. If the channel is closed +// and 'fn' returns false without error, WaitFor returns ErrWaitTimeout. // -// If 'fn' returns an error the loop ends and that error is returned, and if +// If 'fn' returns an error the loop ends and that error is returned. If // 'fn' returns true the loop ends and nil is returned. // -// ErrWaitTimeout will be returned if the channel is closed without fn ever +// ErrWaitTimeout will be returned if the 'done' channel is closed without fn ever // returning true. +// +// When the done channel is closed, because the golang `select` statement is +// "uniform pseudo-random", the `fn` might still run one or multiple time, +// though eventually `WaitFor` will return. func WaitFor(wait WaitFunc, fn ConditionFunc, done <-chan struct{}) error { - c := wait(done) + stopCh := make(chan struct{}) + defer close(stopCh) + c := wait(stopCh) for { - _, open := <-c - ok, err := fn() - if err != nil { - return err - } - if ok { - return nil - } - if !open { - break + select { + case _, open := <-c: + ok, err := fn() + if err != nil { + return err + } + if ok { + return nil + } + if !open { + return ErrWaitTimeout + } + case <-done: + return ErrWaitTimeout } } - return ErrWaitTimeout } // poller returns a WaitFunc that will send to the channel every interval until // timeout has elapsed and then closes the channel. // // Over very short intervals you may receive no ticks before the channel is -// closed. A timeout of 0 is interpreted as an infinity. +// closed. A timeout of 0 is interpreted as an infinity, and in such a case +// it would be the caller's responsibility to close the done channel. +// Failure to do so would result in a leaked goroutine. // // Output ticks are not buffered. If the channel is not ready to receive an // item, the tick is skipped. diff --git a/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go b/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go index 3cd85515..a9a3853a 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go +++ b/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go @@ -26,8 +26,8 @@ import ( "strings" "unicode" - "github.com/ghodss/yaml" - "github.com/golang/glog" + "k8s.io/klog" + "sigs.k8s.io/yaml" ) // ToJSON converts a single YAML document into a JSON document @@ -217,11 +217,9 @@ func (d *YAMLOrJSONDecoder) Decode(into interface{}) error { if d.decoder == nil { buffer, origData, isJSON := GuessJSONStream(d.r, d.bufferSize) if isJSON { - glog.V(4).Infof("decoding stream as JSON") d.decoder = json.NewDecoder(buffer) d.rawData = origData } else { - glog.V(4).Infof("decoding stream as YAML") d.decoder = NewYAMLToJSONDecoder(buffer) } } @@ -230,7 +228,7 @@ func (d *YAMLOrJSONDecoder) Decode(into interface{}) error { if syntax, ok := err.(*json.SyntaxError); ok { data, readErr := ioutil.ReadAll(jsonDecoder.Buffered()) if readErr != nil { - glog.V(4).Infof("reading stream failed: %v", readErr) + klog.V(4).Infof("reading stream failed: %v", readErr) } js := string(data) diff --git a/vendor/k8s.io/apimachinery/pkg/version/doc.go b/vendor/k8s.io/apimachinery/pkg/version/doc.go index 5e77af7e..29574fd6 100644 --- a/vendor/k8s.io/apimachinery/pkg/version/doc.go +++ b/vendor/k8s.io/apimachinery/pkg/version/doc.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package version supplies the type for version information collected at build time. // +k8s:openapi-gen=true + +// Package version supplies the type for version information collected at build time. package version // import "k8s.io/apimachinery/pkg/version" diff --git a/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go b/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go index 93bb1cdf..8af256eb 100644 --- a/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go +++ b/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go @@ -17,10 +17,12 @@ limitations under the License. package watch import ( + "fmt" "io" "sync" - "github.com/golang/glog" + "k8s.io/klog" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/net" utilruntime "k8s.io/apimachinery/pkg/util/runtime" @@ -39,19 +41,28 @@ type Decoder interface { Close() } +// Reporter hides the details of how an error is turned into a runtime.Object for +// reporting on a watch stream since this package may not import a higher level report. +type Reporter interface { + // AsObject must convert err into a valid runtime.Object for the watch stream. + AsObject(err error) runtime.Object +} + // StreamWatcher turns any stream for which you can write a Decoder interface // into a watch.Interface. type StreamWatcher struct { sync.Mutex - source Decoder - result chan Event - stopped bool + source Decoder + reporter Reporter + result chan Event + stopped bool } // NewStreamWatcher creates a StreamWatcher from the given decoder. -func NewStreamWatcher(d Decoder) *StreamWatcher { +func NewStreamWatcher(d Decoder, r Reporter) *StreamWatcher { sw := &StreamWatcher{ - source: d, + source: d, + reporter: r, // It's easy for a consumer to add buffering via an extra // goroutine/channel, but impossible for them to remove it, // so nonbuffered is better. @@ -100,13 +111,15 @@ func (sw *StreamWatcher) receive() { case io.EOF: // watch closed normally case io.ErrUnexpectedEOF: - glog.V(1).Infof("Unexpected EOF during watch stream event decoding: %v", err) + klog.V(1).Infof("Unexpected EOF during watch stream event decoding: %v", err) default: - msg := "Unable to decode an event from the watch stream: %v" if net.IsProbableEOF(err) { - glog.V(5).Infof(msg, err) + klog.V(5).Infof("Unable to decode an event from the watch stream: %v", err) } else { - glog.Errorf(msg, err) + sw.result <- Event{ + Type: Error, + Object: sw.reporter.AsObject(fmt.Errorf("unable to decode an event from the watch stream: %v", err)), + } } } return diff --git a/vendor/k8s.io/apimachinery/pkg/watch/watch.go b/vendor/k8s.io/apimachinery/pkg/watch/watch.go index a627d1d5..3945be3a 100644 --- a/vendor/k8s.io/apimachinery/pkg/watch/watch.go +++ b/vendor/k8s.io/apimachinery/pkg/watch/watch.go @@ -20,7 +20,7 @@ import ( "fmt" "sync" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/apimachinery/pkg/runtime" ) @@ -44,6 +44,7 @@ const ( Added EventType = "ADDED" Modified EventType = "MODIFIED" Deleted EventType = "DELETED" + Bookmark EventType = "BOOKMARK" Error EventType = "ERROR" DefaultChanSize int32 = 100 @@ -57,6 +58,10 @@ type Event struct { // Object is: // * If Type is Added or Modified: the new state of the object. // * If Type is Deleted: the state of the object immediately before deletion. + // * If Type is Bookmark: the object (instance of a type being watched) where + // only ResourceVersion field is set. On successful restart of watch from a + // bookmark resourceVersion, client is guaranteed to not get repeat event + // nor miss any events. // * If Type is Error: *api.Status is recommended; other types may make sense // depending on context. Object runtime.Object @@ -106,7 +111,7 @@ func (f *FakeWatcher) Stop() { f.Lock() defer f.Unlock() if !f.Stopped { - glog.V(4).Infof("Stopping fake watcher.") + klog.V(4).Infof("Stopping fake watcher.") close(f.result) f.Stopped = true } @@ -173,7 +178,7 @@ func (f *RaceFreeFakeWatcher) Stop() { f.Lock() defer f.Unlock() if !f.Stopped { - glog.V(4).Infof("Stopping fake watcher.") + klog.V(4).Infof("Stopping fake watcher.") close(f.result) f.Stopped = true } diff --git a/vendor/k8s.io/apimachinery/third_party/forked/golang/json/OWNERS b/vendor/k8s.io/apimachinery/third_party/forked/golang/json/OWNERS index 8e8d9fce..3f72c69b 100644 --- a/vendor/k8s.io/apimachinery/third_party/forked/golang/json/OWNERS +++ b/vendor/k8s.io/apimachinery/third_party/forked/golang/json/OWNERS @@ -1,3 +1,5 @@ +# See the OWNERS docs at https://go.k8s.io/owners + approvers: - pwittrock reviewers: diff --git a/vendor/k8s.io/client-go/discovery/cached_discovery.go b/vendor/k8s.io/client-go/discovery/cached_discovery.go deleted file mode 100644 index d38a0bbd..00000000 --- a/vendor/k8s.io/client-go/discovery/cached_discovery.go +++ /dev/null @@ -1,282 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package discovery - -import ( - "errors" - "io/ioutil" - "net/http" - "os" - "path/filepath" - "sync" - "time" - - "github.com/golang/glog" - "github.com/googleapis/gnostic/OpenAPIv2" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/version" - "k8s.io/client-go/kubernetes/scheme" - restclient "k8s.io/client-go/rest" -) - -// CachedDiscoveryClient implements the functions that discovery server-supported API groups, -// versions and resources. -type CachedDiscoveryClient struct { - delegate DiscoveryInterface - - // cacheDirectory is the directory where discovery docs are held. It must be unique per host:port combination to work well. - cacheDirectory string - - // ttl is how long the cache should be considered valid - ttl time.Duration - - // mutex protects the variables below - mutex sync.Mutex - - // ourFiles are all filenames of cache files created by this process - ourFiles map[string]struct{} - // invalidated is true if all cache files should be ignored that are not ours (e.g. after Invalidate() was called) - invalidated bool - // fresh is true if all used cache files were ours - fresh bool -} - -var _ CachedDiscoveryInterface = &CachedDiscoveryClient{} - -// ServerResourcesForGroupVersion returns the supported resources for a group and version. -func (d *CachedDiscoveryClient) ServerResourcesForGroupVersion(groupVersion string) (*metav1.APIResourceList, error) { - filename := filepath.Join(d.cacheDirectory, groupVersion, "serverresources.json") - cachedBytes, err := d.getCachedFile(filename) - // don't fail on errors, we either don't have a file or won't be able to run the cached check. Either way we can fallback. - if err == nil { - cachedResources := &metav1.APIResourceList{} - if err := runtime.DecodeInto(scheme.Codecs.UniversalDecoder(), cachedBytes, cachedResources); err == nil { - glog.V(10).Infof("returning cached discovery info from %v", filename) - return cachedResources, nil - } - } - - liveResources, err := d.delegate.ServerResourcesForGroupVersion(groupVersion) - if err != nil { - glog.V(3).Infof("skipped caching discovery info due to %v", err) - return liveResources, err - } - if liveResources == nil || len(liveResources.APIResources) == 0 { - glog.V(3).Infof("skipped caching discovery info, no resources found") - return liveResources, err - } - - if err := d.writeCachedFile(filename, liveResources); err != nil { - glog.V(1).Infof("failed to write cache to %v due to %v", filename, err) - } - - return liveResources, nil -} - -// ServerResources returns the supported resources for all groups and versions. -func (d *CachedDiscoveryClient) ServerResources() ([]*metav1.APIResourceList, error) { - return ServerResources(d) -} - -func (d *CachedDiscoveryClient) ServerGroups() (*metav1.APIGroupList, error) { - filename := filepath.Join(d.cacheDirectory, "servergroups.json") - cachedBytes, err := d.getCachedFile(filename) - // don't fail on errors, we either don't have a file or won't be able to run the cached check. Either way we can fallback. - if err == nil { - cachedGroups := &metav1.APIGroupList{} - if err := runtime.DecodeInto(scheme.Codecs.UniversalDecoder(), cachedBytes, cachedGroups); err == nil { - glog.V(10).Infof("returning cached discovery info from %v", filename) - return cachedGroups, nil - } - } - - liveGroups, err := d.delegate.ServerGroups() - if err != nil { - glog.V(3).Infof("skipped caching discovery info due to %v", err) - return liveGroups, err - } - if liveGroups == nil || len(liveGroups.Groups) == 0 { - glog.V(3).Infof("skipped caching discovery info, no groups found") - return liveGroups, err - } - - if err := d.writeCachedFile(filename, liveGroups); err != nil { - glog.V(1).Infof("failed to write cache to %v due to %v", filename, err) - } - - return liveGroups, nil -} - -func (d *CachedDiscoveryClient) getCachedFile(filename string) ([]byte, error) { - // after invalidation ignore cache files not created by this process - d.mutex.Lock() - _, ourFile := d.ourFiles[filename] - if d.invalidated && !ourFile { - d.mutex.Unlock() - return nil, errors.New("cache invalidated") - } - d.mutex.Unlock() - - file, err := os.Open(filename) - if err != nil { - return nil, err - } - defer file.Close() - - fileInfo, err := file.Stat() - if err != nil { - return nil, err - } - - if time.Now().After(fileInfo.ModTime().Add(d.ttl)) { - return nil, errors.New("cache expired") - } - - // the cache is present and its valid. Try to read and use it. - cachedBytes, err := ioutil.ReadAll(file) - if err != nil { - return nil, err - } - - d.mutex.Lock() - defer d.mutex.Unlock() - d.fresh = d.fresh && ourFile - - return cachedBytes, nil -} - -func (d *CachedDiscoveryClient) writeCachedFile(filename string, obj runtime.Object) error { - if err := os.MkdirAll(filepath.Dir(filename), 0755); err != nil { - return err - } - - bytes, err := runtime.Encode(scheme.Codecs.LegacyCodec(), obj) - if err != nil { - return err - } - - f, err := ioutil.TempFile(filepath.Dir(filename), filepath.Base(filename)+".") - if err != nil { - return err - } - defer os.Remove(f.Name()) - _, err = f.Write(bytes) - if err != nil { - return err - } - - err = os.Chmod(f.Name(), 0755) - if err != nil { - return err - } - - name := f.Name() - err = f.Close() - if err != nil { - return err - } - - // atomic rename - d.mutex.Lock() - defer d.mutex.Unlock() - err = os.Rename(name, filename) - if err == nil { - d.ourFiles[filename] = struct{}{} - } - return err -} - -func (d *CachedDiscoveryClient) RESTClient() restclient.Interface { - return d.delegate.RESTClient() -} - -func (d *CachedDiscoveryClient) ServerPreferredResources() ([]*metav1.APIResourceList, error) { - return ServerPreferredResources(d) -} - -func (d *CachedDiscoveryClient) ServerPreferredNamespacedResources() ([]*metav1.APIResourceList, error) { - return ServerPreferredNamespacedResources(d) -} - -func (d *CachedDiscoveryClient) ServerVersion() (*version.Info, error) { - return d.delegate.ServerVersion() -} - -func (d *CachedDiscoveryClient) OpenAPISchema() (*openapi_v2.Document, error) { - return d.delegate.OpenAPISchema() -} - -func (d *CachedDiscoveryClient) Fresh() bool { - d.mutex.Lock() - defer d.mutex.Unlock() - - return d.fresh -} - -func (d *CachedDiscoveryClient) Invalidate() { - d.mutex.Lock() - defer d.mutex.Unlock() - - d.ourFiles = map[string]struct{}{} - d.fresh = true - d.invalidated = true -} - -// NewCachedDiscoveryClientForConfig creates a new DiscoveryClient for the given config, and wraps -// the created client in a CachedDiscoveryClient. The provided configuration is updated with a -// custom transport that understands cache responses. -// We receive two distinct cache directories for now, in order to preserve old behavior -// which makes use of the --cache-dir flag value for storing cache data from the CacheRoundTripper, -// and makes use of the hardcoded destination (~/.kube/cache/discovery/...) for storing -// CachedDiscoveryClient cache data. If httpCacheDir is empty, the restconfig's transport will not -// be updated with a roundtripper that understands cache responses. -// If discoveryCacheDir is empty, cached server resource data will be looked up in the current directory. -// TODO(juanvallejo): the value of "--cache-dir" should be honored. Consolidate discoveryCacheDir with httpCacheDir -// so that server resources and http-cache data are stored in the same location, provided via config flags. -func NewCachedDiscoveryClientForConfig(config *restclient.Config, discoveryCacheDir, httpCacheDir string, ttl time.Duration) (*CachedDiscoveryClient, error) { - if len(httpCacheDir) > 0 { - // update the given restconfig with a custom roundtripper that - // understands how to handle cache responses. - wt := config.WrapTransport - config.WrapTransport = func(rt http.RoundTripper) http.RoundTripper { - if wt != nil { - rt = wt(rt) - } - return newCacheRoundTripper(httpCacheDir, rt) - } - } - - discoveryClient, err := NewDiscoveryClientForConfig(config) - if err != nil { - return nil, err - } - - return newCachedDiscoveryClient(discoveryClient, discoveryCacheDir, ttl), nil -} - -// NewCachedDiscoveryClient creates a new DiscoveryClient. cacheDirectory is the directory where discovery docs are held. It must be unique per host:port combination to work well. -func newCachedDiscoveryClient(delegate DiscoveryInterface, cacheDirectory string, ttl time.Duration) *CachedDiscoveryClient { - return &CachedDiscoveryClient{ - delegate: delegate, - cacheDirectory: cacheDirectory, - ttl: ttl, - ourFiles: map[string]struct{}{}, - fresh: true, - } -} diff --git a/vendor/k8s.io/client-go/discovery/discovery_client.go b/vendor/k8s.io/client-go/discovery/discovery_client.go index a9660297..5d89457c 100644 --- a/vendor/k8s.io/client-go/discovery/discovery_client.go +++ b/vendor/k8s.io/client-go/discovery/discovery_client.go @@ -26,7 +26,7 @@ import ( "time" "github.com/golang/protobuf/proto" - "github.com/googleapis/gnostic/OpenAPIv2" + openapi_v2 "github.com/googleapis/gnostic/OpenAPIv2" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -60,6 +60,9 @@ type DiscoveryInterface interface { } // CachedDiscoveryInterface is a DiscoveryInterface with cache invalidation and freshness. +// Note that If the ServerResourcesForGroupVersion method returns a cache miss +// error, the user needs to explicitly call Invalidate to clear the cache, +// otherwise the same cache miss error will be returned next time. type CachedDiscoveryInterface interface { DiscoveryInterface // Fresh is supposed to tell the caller whether or not to retry if the cache @@ -68,7 +71,8 @@ type CachedDiscoveryInterface interface { // TODO: this needs to be revisited, this interface can't be locked properly // and doesn't make a lot of sense. Fresh() bool - // Invalidate enforces that no cached data is used in the future that is older than the current time. + // Invalidate enforces that no cached data that is older than the current time + // is used. Invalidate() } @@ -84,12 +88,28 @@ type ServerResourcesInterface interface { // ServerResourcesForGroupVersion returns the supported resources for a group and version. ServerResourcesForGroupVersion(groupVersion string) (*metav1.APIResourceList, error) // ServerResources returns the supported resources for all groups and versions. + // + // The returned resource list might be non-nil with partial results even in the case of + // non-nil error. + // + // Deprecated: use ServerGroupsAndResources instead. ServerResources() ([]*metav1.APIResourceList, error) + // ServerResources returns the supported groups and resources for all groups and versions. + // + // The returned group and resource lists might be non-nil with partial results even in the + // case of non-nil error. + ServerGroupsAndResources() ([]*metav1.APIGroup, []*metav1.APIResourceList, error) // ServerPreferredResources returns the supported resources with the version preferred by the // server. + // + // The returned group and resource lists might be non-nil with partial results even in the + // case of non-nil error. ServerPreferredResources() ([]*metav1.APIResourceList, error) // ServerPreferredNamespacedResources returns the supported namespaced resources with the // version preferred by the server. + // + // The returned resource list might be non-nil with partial results even in the case of + // non-nil error. ServerPreferredNamespacedResources() ([]*metav1.APIResourceList, error) } @@ -187,14 +207,18 @@ func (d *DiscoveryClient) ServerResourcesForGroupVersion(groupVersion string) (r return resources, nil } -// serverResources returns the supported resources for all groups and versions. -func (d *DiscoveryClient) serverResources() ([]*metav1.APIResourceList, error) { - return ServerResources(d) -} - // ServerResources returns the supported resources for all groups and versions. +// Deprecated: use ServerGroupsAndResources instead. func (d *DiscoveryClient) ServerResources() ([]*metav1.APIResourceList, error) { - return withRetries(defaultRetries, d.serverResources) + _, rs, err := d.ServerGroupsAndResources() + return rs, err +} + +// ServerGroupsAndResources returns the supported resources for all groups and versions. +func (d *DiscoveryClient) ServerGroupsAndResources() ([]*metav1.APIGroup, []*metav1.APIResourceList, error) { + return withRetries(defaultRetries, func() ([]*metav1.APIGroup, []*metav1.APIResourceList, error) { + return ServerGroupsAndResources(d) + }) } // ErrGroupDiscoveryFailed is returned if one or more API groups fail to load. @@ -220,23 +244,28 @@ func IsGroupDiscoveryFailedError(err error) bool { return err != nil && ok } -// serverPreferredResources returns the supported resources with the version preferred by the server. -func (d *DiscoveryClient) serverPreferredResources() ([]*metav1.APIResourceList, error) { - return ServerPreferredResources(d) -} - // ServerResources uses the provided discovery interface to look up supported resources for all groups and versions. +// Deprecated: use ServerGroupsAndResources instead. func ServerResources(d DiscoveryInterface) ([]*metav1.APIResourceList, error) { - apiGroups, err := d.ServerGroups() - if err != nil { - return nil, err + _, rs, err := ServerGroupsAndResources(d) + return rs, err +} + +func ServerGroupsAndResources(d DiscoveryInterface) ([]*metav1.APIGroup, []*metav1.APIResourceList, error) { + sgs, err := d.ServerGroups() + if sgs == nil { + return nil, nil, err + } + resultGroups := []*metav1.APIGroup{} + for i := range sgs.Groups { + resultGroups = append(resultGroups, &sgs.Groups[i]) } - groupVersionResources, failedGroups := fetchGroupVersionResources(d, apiGroups) + groupVersionResources, failedGroups := fetchGroupVersionResources(d, sgs) // order results by group/version discovery order result := []*metav1.APIResourceList{} - for _, apiGroup := range apiGroups.Groups { + for _, apiGroup := range sgs.Groups { for _, version := range apiGroup.Versions { gv := schema.GroupVersion{Group: apiGroup.Name, Version: version.Version} if resources, ok := groupVersionResources[gv]; ok { @@ -246,10 +275,10 @@ func ServerResources(d DiscoveryInterface) ([]*metav1.APIResourceList, error) { } if len(failedGroups) == 0 { - return result, nil + return resultGroups, result, nil } - return result, &ErrGroupDiscoveryFailed{Groups: failedGroups} + return resultGroups, result, &ErrGroupDiscoveryFailed{Groups: failedGroups} } // ServerPreferredResources uses the provided discovery interface to look up preferred resources @@ -263,8 +292,8 @@ func ServerPreferredResources(d DiscoveryInterface) ([]*metav1.APIResourceList, result := []*metav1.APIResourceList{} grVersions := map[schema.GroupResource]string{} // selected version of a GroupResource - grApiResources := map[schema.GroupResource]*metav1.APIResource{} // selected APIResource for a GroupResource - gvApiResourceLists := map[schema.GroupVersion]*metav1.APIResourceList{} // blueprint for a APIResourceList for later grouping + grAPIResources := map[schema.GroupResource]*metav1.APIResource{} // selected APIResource for a GroupResource + gvAPIResourceLists := map[schema.GroupVersion]*metav1.APIResourceList{} // blueprint for a APIResourceList for later grouping for _, apiGroup := range serverGroupList.Groups { for _, version := range apiGroup.Versions { @@ -276,11 +305,11 @@ func ServerPreferredResources(d DiscoveryInterface) ([]*metav1.APIResourceList, } // create empty list which is filled later in another loop - emptyApiResourceList := metav1.APIResourceList{ + emptyAPIResourceList := metav1.APIResourceList{ GroupVersion: version.GroupVersion, } - gvApiResourceLists[groupVersion] = &emptyApiResourceList - result = append(result, &emptyApiResourceList) + gvAPIResourceLists[groupVersion] = &emptyAPIResourceList + result = append(result, &emptyAPIResourceList) for i := range apiResourceList.APIResources { apiResource := &apiResourceList.APIResources[i] @@ -288,21 +317,21 @@ func ServerPreferredResources(d DiscoveryInterface) ([]*metav1.APIResourceList, continue } gv := schema.GroupResource{Group: apiGroup.Name, Resource: apiResource.Name} - if _, ok := grApiResources[gv]; ok && version.Version != apiGroup.PreferredVersion.Version { + if _, ok := grAPIResources[gv]; ok && version.Version != apiGroup.PreferredVersion.Version { // only override with preferred version continue } grVersions[gv] = version.Version - grApiResources[gv] = apiResource + grAPIResources[gv] = apiResource } } } // group selected APIResources according to GroupVersion into APIResourceLists - for groupResource, apiResource := range grApiResources { + for groupResource, apiResource := range grAPIResources { version := grVersions[groupResource] groupVersion := schema.GroupVersion{Group: groupResource.Group, Version: version} - apiResourceList := gvApiResourceLists[groupVersion] + apiResourceList := gvAPIResourceLists[groupVersion] apiResourceList.APIResources = append(apiResourceList.APIResources, *apiResource) } @@ -313,7 +342,7 @@ func ServerPreferredResources(d DiscoveryInterface) ([]*metav1.APIResourceList, return result, &ErrGroupDiscoveryFailed{Groups: failedGroups} } -// fetchServerResourcesForGroupVersions uses the discovery client to fetch the resources for the specified groups in parallel +// fetchServerResourcesForGroupVersions uses the discovery client to fetch the resources for the specified groups in parallel. func fetchGroupVersionResources(d DiscoveryInterface, apiGroups *metav1.APIGroupList) (map[schema.GroupVersion]*metav1.APIResourceList, map[schema.GroupVersion]error) { groupVersionResources := make(map[schema.GroupVersion]*metav1.APIResourceList) failedGroups := make(map[schema.GroupVersion]error) @@ -337,7 +366,9 @@ func fetchGroupVersionResources(d DiscoveryInterface, apiGroups *metav1.APIGroup if err != nil { // TODO: maybe restrict this to NotFound errors failedGroups[groupVersion] = err - } else { + } + if apiResourceList != nil { + // even in case of error, some fallback might have been returned groupVersionResources[groupVersion] = apiResourceList } }() @@ -351,7 +382,11 @@ func fetchGroupVersionResources(d DiscoveryInterface, apiGroups *metav1.APIGroup // ServerPreferredResources returns the supported resources with the version preferred by the // server. func (d *DiscoveryClient) ServerPreferredResources() ([]*metav1.APIResourceList, error) { - return withRetries(defaultRetries, d.serverPreferredResources) + _, rs, err := withRetries(defaultRetries, func() ([]*metav1.APIGroup, []*metav1.APIResourceList, error) { + rs, err := ServerPreferredResources(d) + return nil, rs, err + }) + return rs, err } // ServerPreferredNamespacedResources returns the supported namespaced resources with the @@ -377,7 +412,7 @@ func (d *DiscoveryClient) ServerVersion() (*version.Info, error) { var info version.Info err = json.Unmarshal(body, &info) if err != nil { - return nil, fmt.Errorf("got '%s': %v", string(body), err) + return nil, fmt.Errorf("unable to parse the server version: %v", err) } return &info, nil } @@ -388,7 +423,7 @@ func (d *DiscoveryClient) OpenAPISchema() (*openapi_v2.Document, error) { if err != nil { if errors.IsForbidden(err) || errors.IsNotFound(err) || errors.IsNotAcceptable(err) { // single endpoint not found/registered in old server, try to fetch old endpoint - // TODO(roycaihw): remove this in 1.11 + // TODO: remove this when kubectl/client-go don't work with 1.9 server data, err = d.restClient.Get().AbsPath("/swagger-2.0.0.pb-v1").Do().Raw() if err != nil { return nil, err @@ -406,19 +441,20 @@ func (d *DiscoveryClient) OpenAPISchema() (*openapi_v2.Document, error) { } // withRetries retries the given recovery function in case the groups supported by the server change after ServerGroup() returns. -func withRetries(maxRetries int, f func() ([]*metav1.APIResourceList, error)) ([]*metav1.APIResourceList, error) { +func withRetries(maxRetries int, f func() ([]*metav1.APIGroup, []*metav1.APIResourceList, error)) ([]*metav1.APIGroup, []*metav1.APIResourceList, error) { var result []*metav1.APIResourceList + var resultGroups []*metav1.APIGroup var err error for i := 0; i < maxRetries; i++ { - result, err = f() + resultGroups, result, err = f() if err == nil { - return result, nil + return resultGroups, result, nil } if _, ok := err.(*ErrGroupDiscoveryFailed); !ok { - return nil, err + return nil, nil, err } } - return result, err + return resultGroups, result, err } func setDiscoveryDefaults(config *restclient.Config) error { @@ -427,6 +463,13 @@ func setDiscoveryDefaults(config *restclient.Config) error { if config.Timeout == 0 { config.Timeout = defaultTimeout } + if config.Burst == 0 && config.QPS < 100 { + // discovery is expected to be bursty, increase the default burst + // to accommodate looking up resource info for many API groups. + // matches burst set by ConfigFlags#ToDiscoveryClient(). + // see https://issue.k8s.io/86149 + config.Burst = 100 + } codec := runtime.NoopEncoder{Decoder: scheme.Codecs.UniversalDecoder()} config.NegotiatedSerializer = serializer.NegotiatedSerializerWrapper(runtime.SerializerInfo{Serializer: codec}) if len(config.UserAgent) == 0 { @@ -464,9 +507,9 @@ func NewDiscoveryClient(c restclient.Interface) *DiscoveryClient { // RESTClient returns a RESTClient that is used to communicate // with API server by this client implementation. -func (c *DiscoveryClient) RESTClient() restclient.Interface { - if c == nil { +func (d *DiscoveryClient) RESTClient() restclient.Interface { + if d == nil { return nil } - return c.restClient + return d.restClient } diff --git a/vendor/k8s.io/client-go/discovery/doc.go b/vendor/k8s.io/client-go/discovery/doc.go index 76495588..6baa1ef2 100644 --- a/vendor/k8s.io/client-go/discovery/doc.go +++ b/vendor/k8s.io/client-go/discovery/doc.go @@ -16,4 +16,4 @@ limitations under the License. // Package discovery provides ways to discover server-supported // API groups, versions and resources. -package discovery +package discovery // import "k8s.io/client-go/discovery" diff --git a/vendor/k8s.io/client-go/discovery/helper.go b/vendor/k8s.io/client-go/discovery/helper.go index 353d34b3..3bfe514e 100644 --- a/vendor/k8s.io/client-go/discovery/helper.go +++ b/vendor/k8s.io/client-go/discovery/helper.go @@ -31,11 +31,11 @@ import ( func MatchesServerVersion(clientVersion apimachineryversion.Info, client DiscoveryInterface) error { sVer, err := client.ServerVersion() if err != nil { - return fmt.Errorf("couldn't read version from server: %v\n", err) + return fmt.Errorf("couldn't read version from server: %v", err) } // GitVersion includes GitCommit and GitTreeState, but best to be safe? if clientVersion.GitVersion != sVer.GitVersion || clientVersion.GitCommit != sVer.GitCommit || clientVersion.GitTreeState != sVer.GitTreeState { - return fmt.Errorf("server version (%#v) differs from client version (%#v)!\n", sVer, clientVersion) + return fmt.Errorf("server version (%#v) differs from client version (%#v)", sVer, clientVersion) } return nil @@ -101,12 +101,15 @@ func FilteredBy(pred ResourcePredicate, rls []*metav1.APIResourceList) []*metav1 return result } +// ResourcePredicate has a method to check if a resource matches a given condition. type ResourcePredicate interface { Match(groupVersion string, r *metav1.APIResource) bool } +// ResourcePredicateFunc returns true if it matches a resource based on a custom condition. type ResourcePredicateFunc func(groupVersion string, r *metav1.APIResource) bool +// Match is a wrapper around ResourcePredicateFunc. func (fn ResourcePredicateFunc) Match(groupVersion string, r *metav1.APIResource) bool { return fn(groupVersion, r) } @@ -116,6 +119,7 @@ type SupportsAllVerbs struct { Verbs []string } +// Match checks if a resource contains all the given verbs. func (p SupportsAllVerbs) Match(groupVersion string, r *metav1.APIResource) bool { return sets.NewString([]string(r.Verbs)...).HasAll(p.Verbs...) } diff --git a/vendor/k8s.io/client-go/discovery/round_tripper.go b/vendor/k8s.io/client-go/discovery/round_tripper.go deleted file mode 100644 index 75b7f520..00000000 --- a/vendor/k8s.io/client-go/discovery/round_tripper.go +++ /dev/null @@ -1,62 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package discovery - -import ( - "net/http" - "path/filepath" - - "github.com/golang/glog" - "github.com/gregjones/httpcache" - "github.com/gregjones/httpcache/diskcache" - "github.com/peterbourgon/diskv" -) - -type cacheRoundTripper struct { - rt *httpcache.Transport -} - -// newCacheRoundTripper creates a roundtripper that reads the ETag on -// response headers and send the If-None-Match header on subsequent -// corresponding requests. -func newCacheRoundTripper(cacheDir string, rt http.RoundTripper) http.RoundTripper { - d := diskv.New(diskv.Options{ - BasePath: cacheDir, - TempDir: filepath.Join(cacheDir, ".diskv-temp"), - }) - t := httpcache.NewTransport(diskcache.NewWithDiskv(d)) - t.Transport = rt - - return &cacheRoundTripper{rt: t} -} - -func (rt *cacheRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { - return rt.rt.RoundTrip(req) -} - -func (rt *cacheRoundTripper) CancelRequest(req *http.Request) { - type canceler interface { - CancelRequest(*http.Request) - } - if cr, ok := rt.rt.Transport.(canceler); ok { - cr.CancelRequest(req) - } else { - glog.Errorf("CancelRequest not implemented by %T", rt.rt.Transport) - } -} - -func (rt *cacheRoundTripper) WrappedRoundTripper() http.RoundTripper { return rt.rt.Transport } diff --git a/vendor/k8s.io/client-go/informers/admissionregistration/interface.go b/vendor/k8s.io/client-go/informers/admissionregistration/interface.go index 7a0783cc..14a6db43 100644 --- a/vendor/k8s.io/client-go/informers/admissionregistration/interface.go +++ b/vendor/k8s.io/client-go/informers/admissionregistration/interface.go @@ -19,15 +19,15 @@ limitations under the License. package admissionregistration import ( - v1alpha1 "k8s.io/client-go/informers/admissionregistration/v1alpha1" + v1 "k8s.io/client-go/informers/admissionregistration/v1" v1beta1 "k8s.io/client-go/informers/admissionregistration/v1beta1" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" ) // Interface provides access to each of this group's versions. type Interface interface { - // V1alpha1 provides access to shared informers for resources in V1alpha1. - V1alpha1() v1alpha1.Interface + // V1 provides access to shared informers for resources in V1. + V1() v1.Interface // V1beta1 provides access to shared informers for resources in V1beta1. V1beta1() v1beta1.Interface } @@ -43,9 +43,9 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } -// V1alpha1 returns a new v1alpha1.Interface. -func (g *group) V1alpha1() v1alpha1.Interface { - return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions) +// V1 returns a new v1.Interface. +func (g *group) V1() v1.Interface { + return v1.New(g.factory, g.namespace, g.tweakListOptions) } // V1beta1 returns a new v1beta1.Interface. diff --git a/vendor/k8s.io/client-go/informers/admissionregistration/v1/interface.go b/vendor/k8s.io/client-go/informers/admissionregistration/v1/interface.go new file mode 100644 index 00000000..1ecae9ec --- /dev/null +++ b/vendor/k8s.io/client-go/informers/admissionregistration/v1/interface.go @@ -0,0 +1,52 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // MutatingWebhookConfigurations returns a MutatingWebhookConfigurationInformer. + MutatingWebhookConfigurations() MutatingWebhookConfigurationInformer + // ValidatingWebhookConfigurations returns a ValidatingWebhookConfigurationInformer. + ValidatingWebhookConfigurations() ValidatingWebhookConfigurationInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// MutatingWebhookConfigurations returns a MutatingWebhookConfigurationInformer. +func (v *version) MutatingWebhookConfigurations() MutatingWebhookConfigurationInformer { + return &mutatingWebhookConfigurationInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// ValidatingWebhookConfigurations returns a ValidatingWebhookConfigurationInformer. +func (v *version) ValidatingWebhookConfigurations() ValidatingWebhookConfigurationInformer { + return &validatingWebhookConfigurationInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/client-go/informers/admissionregistration/v1/mutatingwebhookconfiguration.go b/vendor/k8s.io/client-go/informers/admissionregistration/v1/mutatingwebhookconfiguration.go new file mode 100644 index 00000000..4fadd9a2 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/admissionregistration/v1/mutatingwebhookconfiguration.go @@ -0,0 +1,88 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + time "time" + + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/admissionregistration/v1" + cache "k8s.io/client-go/tools/cache" +) + +// MutatingWebhookConfigurationInformer provides access to a shared informer and lister for +// MutatingWebhookConfigurations. +type MutatingWebhookConfigurationInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.MutatingWebhookConfigurationLister +} + +type mutatingWebhookConfigurationInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewMutatingWebhookConfigurationInformer constructs a new informer for MutatingWebhookConfiguration type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewMutatingWebhookConfigurationInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredMutatingWebhookConfigurationInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredMutatingWebhookConfigurationInformer constructs a new informer for MutatingWebhookConfiguration type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredMutatingWebhookConfigurationInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AdmissionregistrationV1().MutatingWebhookConfigurations().List(options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AdmissionregistrationV1().MutatingWebhookConfigurations().Watch(options) + }, + }, + &admissionregistrationv1.MutatingWebhookConfiguration{}, + resyncPeriod, + indexers, + ) +} + +func (f *mutatingWebhookConfigurationInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredMutatingWebhookConfigurationInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *mutatingWebhookConfigurationInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&admissionregistrationv1.MutatingWebhookConfiguration{}, f.defaultInformer) +} + +func (f *mutatingWebhookConfigurationInformer) Lister() v1.MutatingWebhookConfigurationLister { + return v1.NewMutatingWebhookConfigurationLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/admissionregistration/v1/validatingwebhookconfiguration.go b/vendor/k8s.io/client-go/informers/admissionregistration/v1/validatingwebhookconfiguration.go new file mode 100644 index 00000000..1c648e60 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/admissionregistration/v1/validatingwebhookconfiguration.go @@ -0,0 +1,88 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + time "time" + + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/admissionregistration/v1" + cache "k8s.io/client-go/tools/cache" +) + +// ValidatingWebhookConfigurationInformer provides access to a shared informer and lister for +// ValidatingWebhookConfigurations. +type ValidatingWebhookConfigurationInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.ValidatingWebhookConfigurationLister +} + +type validatingWebhookConfigurationInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewValidatingWebhookConfigurationInformer constructs a new informer for ValidatingWebhookConfiguration type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewValidatingWebhookConfigurationInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredValidatingWebhookConfigurationInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredValidatingWebhookConfigurationInformer constructs a new informer for ValidatingWebhookConfiguration type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredValidatingWebhookConfigurationInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AdmissionregistrationV1().ValidatingWebhookConfigurations().List(options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Watch(options) + }, + }, + &admissionregistrationv1.ValidatingWebhookConfiguration{}, + resyncPeriod, + indexers, + ) +} + +func (f *validatingWebhookConfigurationInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredValidatingWebhookConfigurationInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *validatingWebhookConfigurationInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&admissionregistrationv1.ValidatingWebhookConfiguration{}, f.defaultInformer) +} + +func (f *validatingWebhookConfigurationInformer) Lister() v1.ValidatingWebhookConfigurationLister { + return v1.NewValidatingWebhookConfigurationLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/initializerconfiguration.go b/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/initializerconfiguration.go deleted file mode 100644 index 4cfaae5b..00000000 --- a/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/initializerconfiguration.go +++ /dev/null @@ -1,88 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - time "time" - - admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - watch "k8s.io/apimachinery/pkg/watch" - internalinterfaces "k8s.io/client-go/informers/internalinterfaces" - kubernetes "k8s.io/client-go/kubernetes" - v1alpha1 "k8s.io/client-go/listers/admissionregistration/v1alpha1" - cache "k8s.io/client-go/tools/cache" -) - -// InitializerConfigurationInformer provides access to a shared informer and lister for -// InitializerConfigurations. -type InitializerConfigurationInformer interface { - Informer() cache.SharedIndexInformer - Lister() v1alpha1.InitializerConfigurationLister -} - -type initializerConfigurationInformer struct { - factory internalinterfaces.SharedInformerFactory - tweakListOptions internalinterfaces.TweakListOptionsFunc -} - -// NewInitializerConfigurationInformer constructs a new informer for InitializerConfiguration type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewInitializerConfigurationInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredInitializerConfigurationInformer(client, resyncPeriod, indexers, nil) -} - -// NewFilteredInitializerConfigurationInformer constructs a new informer for InitializerConfiguration type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewFilteredInitializerConfigurationInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { - return cache.NewSharedIndexInformer( - &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.AdmissionregistrationV1alpha1().InitializerConfigurations().List(options) - }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.AdmissionregistrationV1alpha1().InitializerConfigurations().Watch(options) - }, - }, - &admissionregistrationv1alpha1.InitializerConfiguration{}, - resyncPeriod, - indexers, - ) -} - -func (f *initializerConfigurationInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredInitializerConfigurationInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) -} - -func (f *initializerConfigurationInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&admissionregistrationv1alpha1.InitializerConfiguration{}, f.defaultInformer) -} - -func (f *initializerConfigurationInformer) Lister() v1alpha1.InitializerConfigurationLister { - return v1alpha1.NewInitializerConfigurationLister(f.Informer().GetIndexer()) -} diff --git a/vendor/k8s.io/client-go/informers/auditregistration/interface.go b/vendor/k8s.io/client-go/informers/auditregistration/interface.go new file mode 100644 index 00000000..0f1682c4 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/auditregistration/interface.go @@ -0,0 +1,46 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package auditregistration + +import ( + v1alpha1 "k8s.io/client-go/informers/auditregistration/v1alpha1" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to each of this group's versions. +type Interface interface { + // V1alpha1 provides access to shared informers for resources in V1alpha1. + V1alpha1() v1alpha1.Interface +} + +type group struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// V1alpha1 returns a new v1alpha1.Interface. +func (g *group) V1alpha1() v1alpha1.Interface { + return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/vendor/k8s.io/client-go/informers/auditregistration/v1alpha1/auditsink.go b/vendor/k8s.io/client-go/informers/auditregistration/v1alpha1/auditsink.go new file mode 100644 index 00000000..69778ad2 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/auditregistration/v1alpha1/auditsink.go @@ -0,0 +1,88 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + time "time" + + auditregistrationv1alpha1 "k8s.io/api/auditregistration/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1alpha1 "k8s.io/client-go/listers/auditregistration/v1alpha1" + cache "k8s.io/client-go/tools/cache" +) + +// AuditSinkInformer provides access to a shared informer and lister for +// AuditSinks. +type AuditSinkInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.AuditSinkLister +} + +type auditSinkInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewAuditSinkInformer constructs a new informer for AuditSink type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewAuditSinkInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredAuditSinkInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredAuditSinkInformer constructs a new informer for AuditSink type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredAuditSinkInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AuditregistrationV1alpha1().AuditSinks().List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AuditregistrationV1alpha1().AuditSinks().Watch(options) + }, + }, + &auditregistrationv1alpha1.AuditSink{}, + resyncPeriod, + indexers, + ) +} + +func (f *auditSinkInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredAuditSinkInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *auditSinkInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&auditregistrationv1alpha1.AuditSink{}, f.defaultInformer) +} + +func (f *auditSinkInformer) Lister() v1alpha1.AuditSinkLister { + return v1alpha1.NewAuditSinkLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/auditregistration/v1alpha1/interface.go b/vendor/k8s.io/client-go/informers/auditregistration/v1alpha1/interface.go new file mode 100644 index 00000000..0a67ba82 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/auditregistration/v1alpha1/interface.go @@ -0,0 +1,45 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // AuditSinks returns a AuditSinkInformer. + AuditSinks() AuditSinkInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// AuditSinks returns a AuditSinkInformer. +func (v *version) AuditSinks() AuditSinkInformer { + return &auditSinkInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/client-go/informers/coordination/interface.go b/vendor/k8s.io/client-go/informers/coordination/interface.go index 8e541d80..54cfd7b9 100644 --- a/vendor/k8s.io/client-go/informers/coordination/interface.go +++ b/vendor/k8s.io/client-go/informers/coordination/interface.go @@ -19,12 +19,15 @@ limitations under the License. package coordination import ( + v1 "k8s.io/client-go/informers/coordination/v1" v1beta1 "k8s.io/client-go/informers/coordination/v1beta1" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" ) // Interface provides access to each of this group's versions. type Interface interface { + // V1 provides access to shared informers for resources in V1. + V1() v1.Interface // V1beta1 provides access to shared informers for resources in V1beta1. V1beta1() v1beta1.Interface } @@ -40,6 +43,11 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } +// V1 returns a new v1.Interface. +func (g *group) V1() v1.Interface { + return v1.New(g.factory, g.namespace, g.tweakListOptions) +} + // V1beta1 returns a new v1beta1.Interface. func (g *group) V1beta1() v1beta1.Interface { return v1beta1.New(g.factory, g.namespace, g.tweakListOptions) diff --git a/vendor/k8s.io/client-go/informers/coordination/v1/interface.go b/vendor/k8s.io/client-go/informers/coordination/v1/interface.go new file mode 100644 index 00000000..05c4acbe --- /dev/null +++ b/vendor/k8s.io/client-go/informers/coordination/v1/interface.go @@ -0,0 +1,45 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // Leases returns a LeaseInformer. + Leases() LeaseInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// Leases returns a LeaseInformer. +func (v *version) Leases() LeaseInformer { + return &leaseInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/client-go/informers/coordination/v1/lease.go b/vendor/k8s.io/client-go/informers/coordination/v1/lease.go new file mode 100644 index 00000000..b8a3de47 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/coordination/v1/lease.go @@ -0,0 +1,89 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + time "time" + + coordinationv1 "k8s.io/api/coordination/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/coordination/v1" + cache "k8s.io/client-go/tools/cache" +) + +// LeaseInformer provides access to a shared informer and lister for +// Leases. +type LeaseInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.LeaseLister +} + +type leaseInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewLeaseInformer constructs a new informer for Lease type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewLeaseInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredLeaseInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredLeaseInformer constructs a new informer for Lease type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredLeaseInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoordinationV1().Leases(namespace).List(options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoordinationV1().Leases(namespace).Watch(options) + }, + }, + &coordinationv1.Lease{}, + resyncPeriod, + indexers, + ) +} + +func (f *leaseInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredLeaseInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *leaseInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&coordinationv1.Lease{}, f.defaultInformer) +} + +func (f *leaseInformer) Lister() v1.LeaseLister { + return v1.NewLeaseLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/discovery/interface.go b/vendor/k8s.io/client-go/informers/discovery/interface.go new file mode 100644 index 00000000..c0cae331 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/discovery/interface.go @@ -0,0 +1,54 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package discovery + +import ( + v1alpha1 "k8s.io/client-go/informers/discovery/v1alpha1" + v1beta1 "k8s.io/client-go/informers/discovery/v1beta1" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to each of this group's versions. +type Interface interface { + // V1alpha1 provides access to shared informers for resources in V1alpha1. + V1alpha1() v1alpha1.Interface + // V1beta1 provides access to shared informers for resources in V1beta1. + V1beta1() v1beta1.Interface +} + +type group struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// V1alpha1 returns a new v1alpha1.Interface. +func (g *group) V1alpha1() v1alpha1.Interface { + return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions) +} + +// V1beta1 returns a new v1beta1.Interface. +func (g *group) V1beta1() v1beta1.Interface { + return v1beta1.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/vendor/k8s.io/client-go/informers/discovery/v1alpha1/endpointslice.go b/vendor/k8s.io/client-go/informers/discovery/v1alpha1/endpointslice.go new file mode 100644 index 00000000..a545ce15 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/discovery/v1alpha1/endpointslice.go @@ -0,0 +1,89 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + time "time" + + discoveryv1alpha1 "k8s.io/api/discovery/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1alpha1 "k8s.io/client-go/listers/discovery/v1alpha1" + cache "k8s.io/client-go/tools/cache" +) + +// EndpointSliceInformer provides access to a shared informer and lister for +// EndpointSlices. +type EndpointSliceInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.EndpointSliceLister +} + +type endpointSliceInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewEndpointSliceInformer constructs a new informer for EndpointSlice type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewEndpointSliceInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredEndpointSliceInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredEndpointSliceInformer constructs a new informer for EndpointSlice type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredEndpointSliceInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.DiscoveryV1alpha1().EndpointSlices(namespace).List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.DiscoveryV1alpha1().EndpointSlices(namespace).Watch(options) + }, + }, + &discoveryv1alpha1.EndpointSlice{}, + resyncPeriod, + indexers, + ) +} + +func (f *endpointSliceInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredEndpointSliceInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *endpointSliceInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&discoveryv1alpha1.EndpointSlice{}, f.defaultInformer) +} + +func (f *endpointSliceInformer) Lister() v1alpha1.EndpointSliceLister { + return v1alpha1.NewEndpointSliceLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/discovery/v1alpha1/interface.go b/vendor/k8s.io/client-go/informers/discovery/v1alpha1/interface.go new file mode 100644 index 00000000..711dcae5 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/discovery/v1alpha1/interface.go @@ -0,0 +1,45 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // EndpointSlices returns a EndpointSliceInformer. + EndpointSlices() EndpointSliceInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// EndpointSlices returns a EndpointSliceInformer. +func (v *version) EndpointSlices() EndpointSliceInformer { + return &endpointSliceInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/client-go/informers/discovery/v1beta1/endpointslice.go b/vendor/k8s.io/client-go/informers/discovery/v1beta1/endpointslice.go new file mode 100644 index 00000000..f658866c --- /dev/null +++ b/vendor/k8s.io/client-go/informers/discovery/v1beta1/endpointslice.go @@ -0,0 +1,89 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1beta1 + +import ( + time "time" + + discoveryv1beta1 "k8s.io/api/discovery/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1beta1 "k8s.io/client-go/listers/discovery/v1beta1" + cache "k8s.io/client-go/tools/cache" +) + +// EndpointSliceInformer provides access to a shared informer and lister for +// EndpointSlices. +type EndpointSliceInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1beta1.EndpointSliceLister +} + +type endpointSliceInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewEndpointSliceInformer constructs a new informer for EndpointSlice type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewEndpointSliceInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredEndpointSliceInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredEndpointSliceInformer constructs a new informer for EndpointSlice type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredEndpointSliceInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.DiscoveryV1beta1().EndpointSlices(namespace).List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.DiscoveryV1beta1().EndpointSlices(namespace).Watch(options) + }, + }, + &discoveryv1beta1.EndpointSlice{}, + resyncPeriod, + indexers, + ) +} + +func (f *endpointSliceInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredEndpointSliceInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *endpointSliceInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&discoveryv1beta1.EndpointSlice{}, f.defaultInformer) +} + +func (f *endpointSliceInformer) Lister() v1beta1.EndpointSliceLister { + return v1beta1.NewEndpointSliceLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/discovery/v1beta1/interface.go b/vendor/k8s.io/client-go/informers/discovery/v1beta1/interface.go new file mode 100644 index 00000000..4661646e --- /dev/null +++ b/vendor/k8s.io/client-go/informers/discovery/v1beta1/interface.go @@ -0,0 +1,45 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1beta1 + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // EndpointSlices returns a EndpointSliceInformer. + EndpointSlices() EndpointSliceInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// EndpointSlices returns a EndpointSliceInformer. +func (v *version) EndpointSlices() EndpointSliceInformer { + return &endpointSliceInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/client-go/informers/extensions/v1beta1/interface.go b/vendor/k8s.io/client-go/informers/extensions/v1beta1/interface.go index a259d27a..6f0bea7e 100644 --- a/vendor/k8s.io/client-go/informers/extensions/v1beta1/interface.go +++ b/vendor/k8s.io/client-go/informers/extensions/v1beta1/interface.go @@ -30,6 +30,8 @@ type Interface interface { Deployments() DeploymentInformer // Ingresses returns a IngressInformer. Ingresses() IngressInformer + // NetworkPolicies returns a NetworkPolicyInformer. + NetworkPolicies() NetworkPolicyInformer // PodSecurityPolicies returns a PodSecurityPolicyInformer. PodSecurityPolicies() PodSecurityPolicyInformer // ReplicaSets returns a ReplicaSetInformer. @@ -62,6 +64,11 @@ func (v *version) Ingresses() IngressInformer { return &ingressInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } +// NetworkPolicies returns a NetworkPolicyInformer. +func (v *version) NetworkPolicies() NetworkPolicyInformer { + return &networkPolicyInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + // PodSecurityPolicies returns a PodSecurityPolicyInformer. func (v *version) PodSecurityPolicies() PodSecurityPolicyInformer { return &podSecurityPolicyInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} diff --git a/vendor/k8s.io/client-go/informers/extensions/v1beta1/networkpolicy.go b/vendor/k8s.io/client-go/informers/extensions/v1beta1/networkpolicy.go new file mode 100644 index 00000000..92f4f040 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/extensions/v1beta1/networkpolicy.go @@ -0,0 +1,89 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1beta1 + +import ( + time "time" + + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1beta1 "k8s.io/client-go/listers/extensions/v1beta1" + cache "k8s.io/client-go/tools/cache" +) + +// NetworkPolicyInformer provides access to a shared informer and lister for +// NetworkPolicies. +type NetworkPolicyInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1beta1.NetworkPolicyLister +} + +type networkPolicyInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewNetworkPolicyInformer constructs a new informer for NetworkPolicy type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewNetworkPolicyInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredNetworkPolicyInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredNetworkPolicyInformer constructs a new informer for NetworkPolicy type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredNetworkPolicyInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ExtensionsV1beta1().NetworkPolicies(namespace).List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ExtensionsV1beta1().NetworkPolicies(namespace).Watch(options) + }, + }, + &extensionsv1beta1.NetworkPolicy{}, + resyncPeriod, + indexers, + ) +} + +func (f *networkPolicyInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredNetworkPolicyInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *networkPolicyInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&extensionsv1beta1.NetworkPolicy{}, f.defaultInformer) +} + +func (f *networkPolicyInformer) Lister() v1beta1.NetworkPolicyLister { + return v1beta1.NewNetworkPolicyLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/factory.go b/vendor/k8s.io/client-go/informers/factory.go index 7ae22ee2..dbbc8f5e 100644 --- a/vendor/k8s.io/client-go/informers/factory.go +++ b/vendor/k8s.io/client-go/informers/factory.go @@ -28,15 +28,19 @@ import ( schema "k8s.io/apimachinery/pkg/runtime/schema" admissionregistration "k8s.io/client-go/informers/admissionregistration" apps "k8s.io/client-go/informers/apps" + auditregistration "k8s.io/client-go/informers/auditregistration" autoscaling "k8s.io/client-go/informers/autoscaling" batch "k8s.io/client-go/informers/batch" certificates "k8s.io/client-go/informers/certificates" coordination "k8s.io/client-go/informers/coordination" core "k8s.io/client-go/informers/core" + discovery "k8s.io/client-go/informers/discovery" events "k8s.io/client-go/informers/events" extensions "k8s.io/client-go/informers/extensions" + flowcontrol "k8s.io/client-go/informers/flowcontrol" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" networking "k8s.io/client-go/informers/networking" + node "k8s.io/client-go/informers/node" policy "k8s.io/client-go/informers/policy" rbac "k8s.io/client-go/informers/rbac" scheduling "k8s.io/client-go/informers/scheduling" @@ -188,14 +192,18 @@ type SharedInformerFactory interface { Admissionregistration() admissionregistration.Interface Apps() apps.Interface + Auditregistration() auditregistration.Interface Autoscaling() autoscaling.Interface Batch() batch.Interface Certificates() certificates.Interface Coordination() coordination.Interface Core() core.Interface + Discovery() discovery.Interface Events() events.Interface Extensions() extensions.Interface + Flowcontrol() flowcontrol.Interface Networking() networking.Interface + Node() node.Interface Policy() policy.Interface Rbac() rbac.Interface Scheduling() scheduling.Interface @@ -211,6 +219,10 @@ func (f *sharedInformerFactory) Apps() apps.Interface { return apps.New(f, f.namespace, f.tweakListOptions) } +func (f *sharedInformerFactory) Auditregistration() auditregistration.Interface { + return auditregistration.New(f, f.namespace, f.tweakListOptions) +} + func (f *sharedInformerFactory) Autoscaling() autoscaling.Interface { return autoscaling.New(f, f.namespace, f.tweakListOptions) } @@ -231,6 +243,10 @@ func (f *sharedInformerFactory) Core() core.Interface { return core.New(f, f.namespace, f.tweakListOptions) } +func (f *sharedInformerFactory) Discovery() discovery.Interface { + return discovery.New(f, f.namespace, f.tweakListOptions) +} + func (f *sharedInformerFactory) Events() events.Interface { return events.New(f, f.namespace, f.tweakListOptions) } @@ -239,10 +255,18 @@ func (f *sharedInformerFactory) Extensions() extensions.Interface { return extensions.New(f, f.namespace, f.tweakListOptions) } +func (f *sharedInformerFactory) Flowcontrol() flowcontrol.Interface { + return flowcontrol.New(f, f.namespace, f.tweakListOptions) +} + func (f *sharedInformerFactory) Networking() networking.Interface { return networking.New(f, f.namespace, f.tweakListOptions) } +func (f *sharedInformerFactory) Node() node.Interface { + return node.New(f, f.namespace, f.tweakListOptions) +} + func (f *sharedInformerFactory) Policy() policy.Interface { return policy.New(f, f.namespace, f.tweakListOptions) } diff --git a/vendor/k8s.io/client-go/informers/flowcontrol/interface.go b/vendor/k8s.io/client-go/informers/flowcontrol/interface.go new file mode 100644 index 00000000..27e68efe --- /dev/null +++ b/vendor/k8s.io/client-go/informers/flowcontrol/interface.go @@ -0,0 +1,46 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package flowcontrol + +import ( + v1alpha1 "k8s.io/client-go/informers/flowcontrol/v1alpha1" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to each of this group's versions. +type Interface interface { + // V1alpha1 provides access to shared informers for resources in V1alpha1. + V1alpha1() v1alpha1.Interface +} + +type group struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// V1alpha1 returns a new v1alpha1.Interface. +func (g *group) V1alpha1() v1alpha1.Interface { + return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/vendor/k8s.io/client-go/informers/flowcontrol/v1alpha1/flowschema.go b/vendor/k8s.io/client-go/informers/flowcontrol/v1alpha1/flowschema.go new file mode 100644 index 00000000..af1d874c --- /dev/null +++ b/vendor/k8s.io/client-go/informers/flowcontrol/v1alpha1/flowschema.go @@ -0,0 +1,88 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + time "time" + + flowcontrolv1alpha1 "k8s.io/api/flowcontrol/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1alpha1 "k8s.io/client-go/listers/flowcontrol/v1alpha1" + cache "k8s.io/client-go/tools/cache" +) + +// FlowSchemaInformer provides access to a shared informer and lister for +// FlowSchemas. +type FlowSchemaInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.FlowSchemaLister +} + +type flowSchemaInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewFlowSchemaInformer constructs a new informer for FlowSchema type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFlowSchemaInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredFlowSchemaInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredFlowSchemaInformer constructs a new informer for FlowSchema type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredFlowSchemaInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.FlowcontrolV1alpha1().FlowSchemas().List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.FlowcontrolV1alpha1().FlowSchemas().Watch(options) + }, + }, + &flowcontrolv1alpha1.FlowSchema{}, + resyncPeriod, + indexers, + ) +} + +func (f *flowSchemaInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredFlowSchemaInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *flowSchemaInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&flowcontrolv1alpha1.FlowSchema{}, f.defaultInformer) +} + +func (f *flowSchemaInformer) Lister() v1alpha1.FlowSchemaLister { + return v1alpha1.NewFlowSchemaLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/flowcontrol/v1alpha1/interface.go b/vendor/k8s.io/client-go/informers/flowcontrol/v1alpha1/interface.go new file mode 100644 index 00000000..7097c005 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/flowcontrol/v1alpha1/interface.go @@ -0,0 +1,52 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // FlowSchemas returns a FlowSchemaInformer. + FlowSchemas() FlowSchemaInformer + // PriorityLevelConfigurations returns a PriorityLevelConfigurationInformer. + PriorityLevelConfigurations() PriorityLevelConfigurationInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// FlowSchemas returns a FlowSchemaInformer. +func (v *version) FlowSchemas() FlowSchemaInformer { + return &flowSchemaInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// PriorityLevelConfigurations returns a PriorityLevelConfigurationInformer. +func (v *version) PriorityLevelConfigurations() PriorityLevelConfigurationInformer { + return &priorityLevelConfigurationInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/client-go/informers/flowcontrol/v1alpha1/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/informers/flowcontrol/v1alpha1/prioritylevelconfiguration.go new file mode 100644 index 00000000..c145b7d4 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/flowcontrol/v1alpha1/prioritylevelconfiguration.go @@ -0,0 +1,88 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + time "time" + + flowcontrolv1alpha1 "k8s.io/api/flowcontrol/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1alpha1 "k8s.io/client-go/listers/flowcontrol/v1alpha1" + cache "k8s.io/client-go/tools/cache" +) + +// PriorityLevelConfigurationInformer provides access to a shared informer and lister for +// PriorityLevelConfigurations. +type PriorityLevelConfigurationInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.PriorityLevelConfigurationLister +} + +type priorityLevelConfigurationInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewPriorityLevelConfigurationInformer constructs a new informer for PriorityLevelConfiguration type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewPriorityLevelConfigurationInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredPriorityLevelConfigurationInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredPriorityLevelConfigurationInformer constructs a new informer for PriorityLevelConfiguration type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredPriorityLevelConfigurationInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.FlowcontrolV1alpha1().PriorityLevelConfigurations().List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.FlowcontrolV1alpha1().PriorityLevelConfigurations().Watch(options) + }, + }, + &flowcontrolv1alpha1.PriorityLevelConfiguration{}, + resyncPeriod, + indexers, + ) +} + +func (f *priorityLevelConfigurationInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredPriorityLevelConfigurationInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *priorityLevelConfigurationInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&flowcontrolv1alpha1.PriorityLevelConfiguration{}, f.defaultInformer) +} + +func (f *priorityLevelConfigurationInformer) Lister() v1alpha1.PriorityLevelConfigurationLister { + return v1alpha1.NewPriorityLevelConfigurationLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/generic.go b/vendor/k8s.io/client-go/informers/generic.go index 3af96304..8e6df246 100644 --- a/vendor/k8s.io/client-go/informers/generic.go +++ b/vendor/k8s.io/client-go/informers/generic.go @@ -21,11 +21,12 @@ package informers import ( "fmt" - v1alpha1 "k8s.io/api/admissionregistration/v1alpha1" + v1 "k8s.io/api/admissionregistration/v1" v1beta1 "k8s.io/api/admissionregistration/v1beta1" - v1 "k8s.io/api/apps/v1" + appsv1 "k8s.io/api/apps/v1" appsv1beta1 "k8s.io/api/apps/v1beta1" v1beta2 "k8s.io/api/apps/v1beta2" + v1alpha1 "k8s.io/api/auditregistration/v1alpha1" autoscalingv1 "k8s.io/api/autoscaling/v1" v2beta1 "k8s.io/api/autoscaling/v2beta1" v2beta2 "k8s.io/api/autoscaling/v2beta2" @@ -33,15 +34,23 @@ import ( batchv1beta1 "k8s.io/api/batch/v1beta1" v2alpha1 "k8s.io/api/batch/v2alpha1" certificatesv1beta1 "k8s.io/api/certificates/v1beta1" + coordinationv1 "k8s.io/api/coordination/v1" coordinationv1beta1 "k8s.io/api/coordination/v1beta1" corev1 "k8s.io/api/core/v1" + discoveryv1alpha1 "k8s.io/api/discovery/v1alpha1" + discoveryv1beta1 "k8s.io/api/discovery/v1beta1" eventsv1beta1 "k8s.io/api/events/v1beta1" extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + flowcontrolv1alpha1 "k8s.io/api/flowcontrol/v1alpha1" networkingv1 "k8s.io/api/networking/v1" + networkingv1beta1 "k8s.io/api/networking/v1beta1" + nodev1alpha1 "k8s.io/api/node/v1alpha1" + nodev1beta1 "k8s.io/api/node/v1beta1" policyv1beta1 "k8s.io/api/policy/v1beta1" rbacv1 "k8s.io/api/rbac/v1" rbacv1alpha1 "k8s.io/api/rbac/v1alpha1" rbacv1beta1 "k8s.io/api/rbac/v1beta1" + schedulingv1 "k8s.io/api/scheduling/v1" schedulingv1alpha1 "k8s.io/api/scheduling/v1alpha1" schedulingv1beta1 "k8s.io/api/scheduling/v1beta1" settingsv1alpha1 "k8s.io/api/settings/v1alpha1" @@ -78,9 +87,11 @@ func (f *genericInformer) Lister() cache.GenericLister { // TODO extend this to unknown resources with a client pool func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) { switch resource { - // Group=admissionregistration.k8s.io, Version=v1alpha1 - case v1alpha1.SchemeGroupVersion.WithResource("initializerconfigurations"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Admissionregistration().V1alpha1().InitializerConfigurations().Informer()}, nil + // Group=admissionregistration.k8s.io, Version=v1 + case v1.SchemeGroupVersion.WithResource("mutatingwebhookconfigurations"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Admissionregistration().V1().MutatingWebhookConfigurations().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("validatingwebhookconfigurations"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Admissionregistration().V1().ValidatingWebhookConfigurations().Informer()}, nil // Group=admissionregistration.k8s.io, Version=v1beta1 case v1beta1.SchemeGroupVersion.WithResource("mutatingwebhookconfigurations"): @@ -89,15 +100,15 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource return &genericInformer{resource: resource.GroupResource(), informer: f.Admissionregistration().V1beta1().ValidatingWebhookConfigurations().Informer()}, nil // Group=apps, Version=v1 - case v1.SchemeGroupVersion.WithResource("controllerrevisions"): + case appsv1.SchemeGroupVersion.WithResource("controllerrevisions"): return &genericInformer{resource: resource.GroupResource(), informer: f.Apps().V1().ControllerRevisions().Informer()}, nil - case v1.SchemeGroupVersion.WithResource("daemonsets"): + case appsv1.SchemeGroupVersion.WithResource("daemonsets"): return &genericInformer{resource: resource.GroupResource(), informer: f.Apps().V1().DaemonSets().Informer()}, nil - case v1.SchemeGroupVersion.WithResource("deployments"): + case appsv1.SchemeGroupVersion.WithResource("deployments"): return &genericInformer{resource: resource.GroupResource(), informer: f.Apps().V1().Deployments().Informer()}, nil - case v1.SchemeGroupVersion.WithResource("replicasets"): + case appsv1.SchemeGroupVersion.WithResource("replicasets"): return &genericInformer{resource: resource.GroupResource(), informer: f.Apps().V1().ReplicaSets().Informer()}, nil - case v1.SchemeGroupVersion.WithResource("statefulsets"): + case appsv1.SchemeGroupVersion.WithResource("statefulsets"): return &genericInformer{resource: resource.GroupResource(), informer: f.Apps().V1().StatefulSets().Informer()}, nil // Group=apps, Version=v1beta1 @@ -120,6 +131,10 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource case v1beta2.SchemeGroupVersion.WithResource("statefulsets"): return &genericInformer{resource: resource.GroupResource(), informer: f.Apps().V1beta2().StatefulSets().Informer()}, nil + // Group=auditregistration.k8s.io, Version=v1alpha1 + case v1alpha1.SchemeGroupVersion.WithResource("auditsinks"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Auditregistration().V1alpha1().AuditSinks().Informer()}, nil + // Group=autoscaling, Version=v1 case autoscalingv1.SchemeGroupVersion.WithResource("horizontalpodautoscalers"): return &genericInformer{resource: resource.GroupResource(), informer: f.Autoscaling().V1().HorizontalPodAutoscalers().Informer()}, nil @@ -148,6 +163,10 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource case certificatesv1beta1.SchemeGroupVersion.WithResource("certificatesigningrequests"): return &genericInformer{resource: resource.GroupResource(), informer: f.Certificates().V1beta1().CertificateSigningRequests().Informer()}, nil + // Group=coordination.k8s.io, Version=v1 + case coordinationv1.SchemeGroupVersion.WithResource("leases"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Coordination().V1().Leases().Informer()}, nil + // Group=coordination.k8s.io, Version=v1beta1 case coordinationv1beta1.SchemeGroupVersion.WithResource("leases"): return &genericInformer{resource: resource.GroupResource(), informer: f.Coordination().V1beta1().Leases().Informer()}, nil @@ -186,6 +205,14 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource case corev1.SchemeGroupVersion.WithResource("serviceaccounts"): return &genericInformer{resource: resource.GroupResource(), informer: f.Core().V1().ServiceAccounts().Informer()}, nil + // Group=discovery.k8s.io, Version=v1alpha1 + case discoveryv1alpha1.SchemeGroupVersion.WithResource("endpointslices"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Discovery().V1alpha1().EndpointSlices().Informer()}, nil + + // Group=discovery.k8s.io, Version=v1beta1 + case discoveryv1beta1.SchemeGroupVersion.WithResource("endpointslices"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Discovery().V1beta1().EndpointSlices().Informer()}, nil + // Group=events.k8s.io, Version=v1beta1 case eventsv1beta1.SchemeGroupVersion.WithResource("events"): return &genericInformer{resource: resource.GroupResource(), informer: f.Events().V1beta1().Events().Informer()}, nil @@ -197,15 +224,35 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource return &genericInformer{resource: resource.GroupResource(), informer: f.Extensions().V1beta1().Deployments().Informer()}, nil case extensionsv1beta1.SchemeGroupVersion.WithResource("ingresses"): return &genericInformer{resource: resource.GroupResource(), informer: f.Extensions().V1beta1().Ingresses().Informer()}, nil + case extensionsv1beta1.SchemeGroupVersion.WithResource("networkpolicies"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Extensions().V1beta1().NetworkPolicies().Informer()}, nil case extensionsv1beta1.SchemeGroupVersion.WithResource("podsecuritypolicies"): return &genericInformer{resource: resource.GroupResource(), informer: f.Extensions().V1beta1().PodSecurityPolicies().Informer()}, nil case extensionsv1beta1.SchemeGroupVersion.WithResource("replicasets"): return &genericInformer{resource: resource.GroupResource(), informer: f.Extensions().V1beta1().ReplicaSets().Informer()}, nil + // Group=flowcontrol.apiserver.k8s.io, Version=v1alpha1 + case flowcontrolv1alpha1.SchemeGroupVersion.WithResource("flowschemas"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Flowcontrol().V1alpha1().FlowSchemas().Informer()}, nil + case flowcontrolv1alpha1.SchemeGroupVersion.WithResource("prioritylevelconfigurations"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Flowcontrol().V1alpha1().PriorityLevelConfigurations().Informer()}, nil + // Group=networking.k8s.io, Version=v1 case networkingv1.SchemeGroupVersion.WithResource("networkpolicies"): return &genericInformer{resource: resource.GroupResource(), informer: f.Networking().V1().NetworkPolicies().Informer()}, nil + // Group=networking.k8s.io, Version=v1beta1 + case networkingv1beta1.SchemeGroupVersion.WithResource("ingresses"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Networking().V1beta1().Ingresses().Informer()}, nil + + // Group=node.k8s.io, Version=v1alpha1 + case nodev1alpha1.SchemeGroupVersion.WithResource("runtimeclasses"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Node().V1alpha1().RuntimeClasses().Informer()}, nil + + // Group=node.k8s.io, Version=v1beta1 + case nodev1beta1.SchemeGroupVersion.WithResource("runtimeclasses"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Node().V1beta1().RuntimeClasses().Informer()}, nil + // Group=policy, Version=v1beta1 case policyv1beta1.SchemeGroupVersion.WithResource("poddisruptionbudgets"): return &genericInformer{resource: resource.GroupResource(), informer: f.Policy().V1beta1().PodDisruptionBudgets().Informer()}, nil @@ -242,6 +289,10 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource case rbacv1beta1.SchemeGroupVersion.WithResource("rolebindings"): return &genericInformer{resource: resource.GroupResource(), informer: f.Rbac().V1beta1().RoleBindings().Informer()}, nil + // Group=scheduling.k8s.io, Version=v1 + case schedulingv1.SchemeGroupVersion.WithResource("priorityclasses"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Scheduling().V1().PriorityClasses().Informer()}, nil + // Group=scheduling.k8s.io, Version=v1alpha1 case schedulingv1alpha1.SchemeGroupVersion.WithResource("priorityclasses"): return &genericInformer{resource: resource.GroupResource(), informer: f.Scheduling().V1alpha1().PriorityClasses().Informer()}, nil @@ -255,14 +306,22 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource return &genericInformer{resource: resource.GroupResource(), informer: f.Settings().V1alpha1().PodPresets().Informer()}, nil // Group=storage.k8s.io, Version=v1 + case storagev1.SchemeGroupVersion.WithResource("csinodes"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Storage().V1().CSINodes().Informer()}, nil case storagev1.SchemeGroupVersion.WithResource("storageclasses"): return &genericInformer{resource: resource.GroupResource(), informer: f.Storage().V1().StorageClasses().Informer()}, nil + case storagev1.SchemeGroupVersion.WithResource("volumeattachments"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Storage().V1().VolumeAttachments().Informer()}, nil // Group=storage.k8s.io, Version=v1alpha1 case storagev1alpha1.SchemeGroupVersion.WithResource("volumeattachments"): return &genericInformer{resource: resource.GroupResource(), informer: f.Storage().V1alpha1().VolumeAttachments().Informer()}, nil // Group=storage.k8s.io, Version=v1beta1 + case storagev1beta1.SchemeGroupVersion.WithResource("csidrivers"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Storage().V1beta1().CSIDrivers().Informer()}, nil + case storagev1beta1.SchemeGroupVersion.WithResource("csinodes"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Storage().V1beta1().CSINodes().Informer()}, nil case storagev1beta1.SchemeGroupVersion.WithResource("storageclasses"): return &genericInformer{resource: resource.GroupResource(), informer: f.Storage().V1beta1().StorageClasses().Informer()}, nil case storagev1beta1.SchemeGroupVersion.WithResource("volumeattachments"): diff --git a/vendor/k8s.io/client-go/informers/internalinterfaces/factory_interfaces.go b/vendor/k8s.io/client-go/informers/internalinterfaces/factory_interfaces.go index 5e05516b..b00ed70c 100644 --- a/vendor/k8s.io/client-go/informers/internalinterfaces/factory_interfaces.go +++ b/vendor/k8s.io/client-go/informers/internalinterfaces/factory_interfaces.go @@ -27,6 +27,7 @@ import ( cache "k8s.io/client-go/tools/cache" ) +// NewInformerFunc takes kubernetes.Interface and time.Duration to return a SharedIndexInformer. type NewInformerFunc func(kubernetes.Interface, time.Duration) cache.SharedIndexInformer // SharedInformerFactory a small interface to allow for adding an informer without an import cycle @@ -35,4 +36,5 @@ type SharedInformerFactory interface { InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer } +// TweakListOptionsFunc is a function that transforms a v1.ListOptions. type TweakListOptionsFunc func(*v1.ListOptions) diff --git a/vendor/k8s.io/client-go/informers/networking/interface.go b/vendor/k8s.io/client-go/informers/networking/interface.go index 989e8fa0..4a028d5d 100644 --- a/vendor/k8s.io/client-go/informers/networking/interface.go +++ b/vendor/k8s.io/client-go/informers/networking/interface.go @@ -21,12 +21,15 @@ package networking import ( internalinterfaces "k8s.io/client-go/informers/internalinterfaces" v1 "k8s.io/client-go/informers/networking/v1" + v1beta1 "k8s.io/client-go/informers/networking/v1beta1" ) // Interface provides access to each of this group's versions. type Interface interface { // V1 provides access to shared informers for resources in V1. V1() v1.Interface + // V1beta1 provides access to shared informers for resources in V1beta1. + V1beta1() v1beta1.Interface } type group struct { @@ -44,3 +47,8 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList func (g *group) V1() v1.Interface { return v1.New(g.factory, g.namespace, g.tweakListOptions) } + +// V1beta1 returns a new v1beta1.Interface. +func (g *group) V1beta1() v1beta1.Interface { + return v1beta1.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/vendor/k8s.io/client-go/informers/networking/v1beta1/ingress.go b/vendor/k8s.io/client-go/informers/networking/v1beta1/ingress.go new file mode 100644 index 00000000..8abd00e1 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/networking/v1beta1/ingress.go @@ -0,0 +1,89 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1beta1 + +import ( + time "time" + + networkingv1beta1 "k8s.io/api/networking/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1beta1 "k8s.io/client-go/listers/networking/v1beta1" + cache "k8s.io/client-go/tools/cache" +) + +// IngressInformer provides access to a shared informer and lister for +// Ingresses. +type IngressInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1beta1.IngressLister +} + +type ingressInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewIngressInformer constructs a new informer for Ingress type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewIngressInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredIngressInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredIngressInformer constructs a new informer for Ingress type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredIngressInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.NetworkingV1beta1().Ingresses(namespace).List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.NetworkingV1beta1().Ingresses(namespace).Watch(options) + }, + }, + &networkingv1beta1.Ingress{}, + resyncPeriod, + indexers, + ) +} + +func (f *ingressInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredIngressInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *ingressInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&networkingv1beta1.Ingress{}, f.defaultInformer) +} + +func (f *ingressInformer) Lister() v1beta1.IngressLister { + return v1beta1.NewIngressLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/networking/v1beta1/interface.go b/vendor/k8s.io/client-go/informers/networking/v1beta1/interface.go new file mode 100644 index 00000000..ab170dfc --- /dev/null +++ b/vendor/k8s.io/client-go/informers/networking/v1beta1/interface.go @@ -0,0 +1,45 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1beta1 + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // Ingresses returns a IngressInformer. + Ingresses() IngressInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// Ingresses returns a IngressInformer. +func (v *version) Ingresses() IngressInformer { + return &ingressInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/client-go/informers/node/interface.go b/vendor/k8s.io/client-go/informers/node/interface.go new file mode 100644 index 00000000..97736937 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/node/interface.go @@ -0,0 +1,54 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package node + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + v1alpha1 "k8s.io/client-go/informers/node/v1alpha1" + v1beta1 "k8s.io/client-go/informers/node/v1beta1" +) + +// Interface provides access to each of this group's versions. +type Interface interface { + // V1alpha1 provides access to shared informers for resources in V1alpha1. + V1alpha1() v1alpha1.Interface + // V1beta1 provides access to shared informers for resources in V1beta1. + V1beta1() v1beta1.Interface +} + +type group struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// V1alpha1 returns a new v1alpha1.Interface. +func (g *group) V1alpha1() v1alpha1.Interface { + return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions) +} + +// V1beta1 returns a new v1beta1.Interface. +func (g *group) V1beta1() v1beta1.Interface { + return v1beta1.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/interface.go b/vendor/k8s.io/client-go/informers/node/v1alpha1/interface.go similarity index 78% rename from vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/interface.go rename to vendor/k8s.io/client-go/informers/node/v1alpha1/interface.go index 0f47d65d..c5644295 100644 --- a/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/interface.go +++ b/vendor/k8s.io/client-go/informers/node/v1alpha1/interface.go @@ -24,8 +24,8 @@ import ( // Interface provides access to all the informers in this group version. type Interface interface { - // InitializerConfigurations returns a InitializerConfigurationInformer. - InitializerConfigurations() InitializerConfigurationInformer + // RuntimeClasses returns a RuntimeClassInformer. + RuntimeClasses() RuntimeClassInformer } type version struct { @@ -39,7 +39,7 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } -// InitializerConfigurations returns a InitializerConfigurationInformer. -func (v *version) InitializerConfigurations() InitializerConfigurationInformer { - return &initializerConfigurationInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +// RuntimeClasses returns a RuntimeClassInformer. +func (v *version) RuntimeClasses() RuntimeClassInformer { + return &runtimeClassInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} } diff --git a/vendor/k8s.io/client-go/informers/node/v1alpha1/runtimeclass.go b/vendor/k8s.io/client-go/informers/node/v1alpha1/runtimeclass.go new file mode 100644 index 00000000..31edf930 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/node/v1alpha1/runtimeclass.go @@ -0,0 +1,88 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + time "time" + + nodev1alpha1 "k8s.io/api/node/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1alpha1 "k8s.io/client-go/listers/node/v1alpha1" + cache "k8s.io/client-go/tools/cache" +) + +// RuntimeClassInformer provides access to a shared informer and lister for +// RuntimeClasses. +type RuntimeClassInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.RuntimeClassLister +} + +type runtimeClassInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewRuntimeClassInformer constructs a new informer for RuntimeClass type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewRuntimeClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredRuntimeClassInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredRuntimeClassInformer constructs a new informer for RuntimeClass type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredRuntimeClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.NodeV1alpha1().RuntimeClasses().List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.NodeV1alpha1().RuntimeClasses().Watch(options) + }, + }, + &nodev1alpha1.RuntimeClass{}, + resyncPeriod, + indexers, + ) +} + +func (f *runtimeClassInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredRuntimeClassInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *runtimeClassInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&nodev1alpha1.RuntimeClass{}, f.defaultInformer) +} + +func (f *runtimeClassInformer) Lister() v1alpha1.RuntimeClassLister { + return v1alpha1.NewRuntimeClassLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/node/v1beta1/interface.go b/vendor/k8s.io/client-go/informers/node/v1beta1/interface.go new file mode 100644 index 00000000..44a1defb --- /dev/null +++ b/vendor/k8s.io/client-go/informers/node/v1beta1/interface.go @@ -0,0 +1,45 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1beta1 + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // RuntimeClasses returns a RuntimeClassInformer. + RuntimeClasses() RuntimeClassInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// RuntimeClasses returns a RuntimeClassInformer. +func (v *version) RuntimeClasses() RuntimeClassInformer { + return &runtimeClassInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/client-go/informers/node/v1beta1/runtimeclass.go b/vendor/k8s.io/client-go/informers/node/v1beta1/runtimeclass.go new file mode 100644 index 00000000..6972993a --- /dev/null +++ b/vendor/k8s.io/client-go/informers/node/v1beta1/runtimeclass.go @@ -0,0 +1,88 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1beta1 + +import ( + time "time" + + nodev1beta1 "k8s.io/api/node/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1beta1 "k8s.io/client-go/listers/node/v1beta1" + cache "k8s.io/client-go/tools/cache" +) + +// RuntimeClassInformer provides access to a shared informer and lister for +// RuntimeClasses. +type RuntimeClassInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1beta1.RuntimeClassLister +} + +type runtimeClassInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewRuntimeClassInformer constructs a new informer for RuntimeClass type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewRuntimeClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredRuntimeClassInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredRuntimeClassInformer constructs a new informer for RuntimeClass type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredRuntimeClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.NodeV1beta1().RuntimeClasses().List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.NodeV1beta1().RuntimeClasses().Watch(options) + }, + }, + &nodev1beta1.RuntimeClass{}, + resyncPeriod, + indexers, + ) +} + +func (f *runtimeClassInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredRuntimeClassInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *runtimeClassInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&nodev1beta1.RuntimeClass{}, f.defaultInformer) +} + +func (f *runtimeClassInformer) Lister() v1beta1.RuntimeClassLister { + return v1beta1.NewRuntimeClassLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/scheduling/interface.go b/vendor/k8s.io/client-go/informers/scheduling/interface.go index 16d030c3..659089b5 100644 --- a/vendor/k8s.io/client-go/informers/scheduling/interface.go +++ b/vendor/k8s.io/client-go/informers/scheduling/interface.go @@ -20,12 +20,15 @@ package scheduling import ( internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + v1 "k8s.io/client-go/informers/scheduling/v1" v1alpha1 "k8s.io/client-go/informers/scheduling/v1alpha1" v1beta1 "k8s.io/client-go/informers/scheduling/v1beta1" ) // Interface provides access to each of this group's versions. type Interface interface { + // V1 provides access to shared informers for resources in V1. + V1() v1.Interface // V1alpha1 provides access to shared informers for resources in V1alpha1. V1alpha1() v1alpha1.Interface // V1beta1 provides access to shared informers for resources in V1beta1. @@ -43,6 +46,11 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } +// V1 returns a new v1.Interface. +func (g *group) V1() v1.Interface { + return v1.New(g.factory, g.namespace, g.tweakListOptions) +} + // V1alpha1 returns a new v1alpha1.Interface. func (g *group) V1alpha1() v1alpha1.Interface { return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions) diff --git a/vendor/k8s.io/client-go/informers/scheduling/v1/interface.go b/vendor/k8s.io/client-go/informers/scheduling/v1/interface.go new file mode 100644 index 00000000..fd7931f3 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/scheduling/v1/interface.go @@ -0,0 +1,45 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // PriorityClasses returns a PriorityClassInformer. + PriorityClasses() PriorityClassInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// PriorityClasses returns a PriorityClassInformer. +func (v *version) PriorityClasses() PriorityClassInformer { + return &priorityClassInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/client-go/informers/scheduling/v1/priorityclass.go b/vendor/k8s.io/client-go/informers/scheduling/v1/priorityclass.go new file mode 100644 index 00000000..a9ee6289 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/scheduling/v1/priorityclass.go @@ -0,0 +1,88 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + time "time" + + schedulingv1 "k8s.io/api/scheduling/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/scheduling/v1" + cache "k8s.io/client-go/tools/cache" +) + +// PriorityClassInformer provides access to a shared informer and lister for +// PriorityClasses. +type PriorityClassInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.PriorityClassLister +} + +type priorityClassInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewPriorityClassInformer constructs a new informer for PriorityClass type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewPriorityClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredPriorityClassInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredPriorityClassInformer constructs a new informer for PriorityClass type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredPriorityClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.SchedulingV1().PriorityClasses().List(options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.SchedulingV1().PriorityClasses().Watch(options) + }, + }, + &schedulingv1.PriorityClass{}, + resyncPeriod, + indexers, + ) +} + +func (f *priorityClassInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredPriorityClassInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *priorityClassInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&schedulingv1.PriorityClass{}, f.defaultInformer) +} + +func (f *priorityClassInformer) Lister() v1.PriorityClassLister { + return v1.NewPriorityClassLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/storage/v1/csinode.go b/vendor/k8s.io/client-go/informers/storage/v1/csinode.go new file mode 100644 index 00000000..eed947c4 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/storage/v1/csinode.go @@ -0,0 +1,88 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + time "time" + + storagev1 "k8s.io/api/storage/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/storage/v1" + cache "k8s.io/client-go/tools/cache" +) + +// CSINodeInformer provides access to a shared informer and lister for +// CSINodes. +type CSINodeInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.CSINodeLister +} + +type cSINodeInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewCSINodeInformer constructs a new informer for CSINode type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewCSINodeInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredCSINodeInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredCSINodeInformer constructs a new informer for CSINode type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredCSINodeInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.StorageV1().CSINodes().List(options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.StorageV1().CSINodes().Watch(options) + }, + }, + &storagev1.CSINode{}, + resyncPeriod, + indexers, + ) +} + +func (f *cSINodeInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredCSINodeInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *cSINodeInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&storagev1.CSINode{}, f.defaultInformer) +} + +func (f *cSINodeInformer) Lister() v1.CSINodeLister { + return v1.NewCSINodeLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/storage/v1/interface.go b/vendor/k8s.io/client-go/informers/storage/v1/interface.go index d7e4b5c4..59f367d3 100644 --- a/vendor/k8s.io/client-go/informers/storage/v1/interface.go +++ b/vendor/k8s.io/client-go/informers/storage/v1/interface.go @@ -24,8 +24,12 @@ import ( // Interface provides access to all the informers in this group version. type Interface interface { + // CSINodes returns a CSINodeInformer. + CSINodes() CSINodeInformer // StorageClasses returns a StorageClassInformer. StorageClasses() StorageClassInformer + // VolumeAttachments returns a VolumeAttachmentInformer. + VolumeAttachments() VolumeAttachmentInformer } type version struct { @@ -39,7 +43,17 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } +// CSINodes returns a CSINodeInformer. +func (v *version) CSINodes() CSINodeInformer { + return &cSINodeInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + // StorageClasses returns a StorageClassInformer. func (v *version) StorageClasses() StorageClassInformer { return &storageClassInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} } + +// VolumeAttachments returns a VolumeAttachmentInformer. +func (v *version) VolumeAttachments() VolumeAttachmentInformer { + return &volumeAttachmentInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/client-go/informers/storage/v1/volumeattachment.go b/vendor/k8s.io/client-go/informers/storage/v1/volumeattachment.go new file mode 100644 index 00000000..7ca3b86f --- /dev/null +++ b/vendor/k8s.io/client-go/informers/storage/v1/volumeattachment.go @@ -0,0 +1,88 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + time "time" + + storagev1 "k8s.io/api/storage/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/storage/v1" + cache "k8s.io/client-go/tools/cache" +) + +// VolumeAttachmentInformer provides access to a shared informer and lister for +// VolumeAttachments. +type VolumeAttachmentInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.VolumeAttachmentLister +} + +type volumeAttachmentInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewVolumeAttachmentInformer constructs a new informer for VolumeAttachment type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewVolumeAttachmentInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredVolumeAttachmentInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredVolumeAttachmentInformer constructs a new informer for VolumeAttachment type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredVolumeAttachmentInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.StorageV1().VolumeAttachments().List(options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.StorageV1().VolumeAttachments().Watch(options) + }, + }, + &storagev1.VolumeAttachment{}, + resyncPeriod, + indexers, + ) +} + +func (f *volumeAttachmentInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredVolumeAttachmentInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *volumeAttachmentInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&storagev1.VolumeAttachment{}, f.defaultInformer) +} + +func (f *volumeAttachmentInformer) Lister() v1.VolumeAttachmentLister { + return v1.NewVolumeAttachmentLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/storage/v1beta1/csidriver.go b/vendor/k8s.io/client-go/informers/storage/v1beta1/csidriver.go new file mode 100644 index 00000000..7f7cb216 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/storage/v1beta1/csidriver.go @@ -0,0 +1,88 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1beta1 + +import ( + time "time" + + storagev1beta1 "k8s.io/api/storage/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1beta1 "k8s.io/client-go/listers/storage/v1beta1" + cache "k8s.io/client-go/tools/cache" +) + +// CSIDriverInformer provides access to a shared informer and lister for +// CSIDrivers. +type CSIDriverInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1beta1.CSIDriverLister +} + +type cSIDriverInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewCSIDriverInformer constructs a new informer for CSIDriver type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewCSIDriverInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredCSIDriverInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredCSIDriverInformer constructs a new informer for CSIDriver type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredCSIDriverInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.StorageV1beta1().CSIDrivers().List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.StorageV1beta1().CSIDrivers().Watch(options) + }, + }, + &storagev1beta1.CSIDriver{}, + resyncPeriod, + indexers, + ) +} + +func (f *cSIDriverInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredCSIDriverInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *cSIDriverInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&storagev1beta1.CSIDriver{}, f.defaultInformer) +} + +func (f *cSIDriverInformer) Lister() v1beta1.CSIDriverLister { + return v1beta1.NewCSIDriverLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/storage/v1beta1/csinode.go b/vendor/k8s.io/client-go/informers/storage/v1beta1/csinode.go new file mode 100644 index 00000000..218bb118 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/storage/v1beta1/csinode.go @@ -0,0 +1,88 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1beta1 + +import ( + time "time" + + storagev1beta1 "k8s.io/api/storage/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1beta1 "k8s.io/client-go/listers/storage/v1beta1" + cache "k8s.io/client-go/tools/cache" +) + +// CSINodeInformer provides access to a shared informer and lister for +// CSINodes. +type CSINodeInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1beta1.CSINodeLister +} + +type cSINodeInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewCSINodeInformer constructs a new informer for CSINode type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewCSINodeInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredCSINodeInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredCSINodeInformer constructs a new informer for CSINode type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredCSINodeInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.StorageV1beta1().CSINodes().List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.StorageV1beta1().CSINodes().Watch(options) + }, + }, + &storagev1beta1.CSINode{}, + resyncPeriod, + indexers, + ) +} + +func (f *cSINodeInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredCSINodeInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *cSINodeInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&storagev1beta1.CSINode{}, f.defaultInformer) +} + +func (f *cSINodeInformer) Lister() v1beta1.CSINodeLister { + return v1beta1.NewCSINodeLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/storage/v1beta1/interface.go b/vendor/k8s.io/client-go/informers/storage/v1beta1/interface.go index aa11c2bb..af4ee2f7 100644 --- a/vendor/k8s.io/client-go/informers/storage/v1beta1/interface.go +++ b/vendor/k8s.io/client-go/informers/storage/v1beta1/interface.go @@ -24,6 +24,10 @@ import ( // Interface provides access to all the informers in this group version. type Interface interface { + // CSIDrivers returns a CSIDriverInformer. + CSIDrivers() CSIDriverInformer + // CSINodes returns a CSINodeInformer. + CSINodes() CSINodeInformer // StorageClasses returns a StorageClassInformer. StorageClasses() StorageClassInformer // VolumeAttachments returns a VolumeAttachmentInformer. @@ -41,6 +45,16 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } +// CSIDrivers returns a CSIDriverInformer. +func (v *version) CSIDrivers() CSIDriverInformer { + return &cSIDriverInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// CSINodes returns a CSINodeInformer. +func (v *version) CSINodes() CSINodeInformer { + return &cSINodeInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + // StorageClasses returns a StorageClassInformer. func (v *version) StorageClasses() StorageClassInformer { return &storageClassInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} diff --git a/vendor/k8s.io/client-go/kubernetes/clientset.go b/vendor/k8s.io/client-go/kubernetes/clientset.go index 122e4bb7..cf98b050 100644 --- a/vendor/k8s.io/client-go/kubernetes/clientset.go +++ b/vendor/k8s.io/client-go/kubernetes/clientset.go @@ -19,12 +19,15 @@ limitations under the License. package kubernetes import ( + "fmt" + discovery "k8s.io/client-go/discovery" - admissionregistrationv1alpha1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1" + admissionregistrationv1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1" admissionregistrationv1beta1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1" appsv1 "k8s.io/client-go/kubernetes/typed/apps/v1" appsv1beta1 "k8s.io/client-go/kubernetes/typed/apps/v1beta1" appsv1beta2 "k8s.io/client-go/kubernetes/typed/apps/v1beta2" + auditregistrationv1alpha1 "k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1" authenticationv1 "k8s.io/client-go/kubernetes/typed/authentication/v1" authenticationv1beta1 "k8s.io/client-go/kubernetes/typed/authentication/v1beta1" authorizationv1 "k8s.io/client-go/kubernetes/typed/authorization/v1" @@ -36,15 +39,23 @@ import ( batchv1beta1 "k8s.io/client-go/kubernetes/typed/batch/v1beta1" batchv2alpha1 "k8s.io/client-go/kubernetes/typed/batch/v2alpha1" certificatesv1beta1 "k8s.io/client-go/kubernetes/typed/certificates/v1beta1" + coordinationv1 "k8s.io/client-go/kubernetes/typed/coordination/v1" coordinationv1beta1 "k8s.io/client-go/kubernetes/typed/coordination/v1beta1" corev1 "k8s.io/client-go/kubernetes/typed/core/v1" + discoveryv1alpha1 "k8s.io/client-go/kubernetes/typed/discovery/v1alpha1" + discoveryv1beta1 "k8s.io/client-go/kubernetes/typed/discovery/v1beta1" eventsv1beta1 "k8s.io/client-go/kubernetes/typed/events/v1beta1" extensionsv1beta1 "k8s.io/client-go/kubernetes/typed/extensions/v1beta1" + flowcontrolv1alpha1 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1" networkingv1 "k8s.io/client-go/kubernetes/typed/networking/v1" + networkingv1beta1 "k8s.io/client-go/kubernetes/typed/networking/v1beta1" + nodev1alpha1 "k8s.io/client-go/kubernetes/typed/node/v1alpha1" + nodev1beta1 "k8s.io/client-go/kubernetes/typed/node/v1beta1" policyv1beta1 "k8s.io/client-go/kubernetes/typed/policy/v1beta1" rbacv1 "k8s.io/client-go/kubernetes/typed/rbac/v1" rbacv1alpha1 "k8s.io/client-go/kubernetes/typed/rbac/v1alpha1" rbacv1beta1 "k8s.io/client-go/kubernetes/typed/rbac/v1beta1" + schedulingv1 "k8s.io/client-go/kubernetes/typed/scheduling/v1" schedulingv1alpha1 "k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1" schedulingv1beta1 "k8s.io/client-go/kubernetes/typed/scheduling/v1beta1" settingsv1alpha1 "k8s.io/client-go/kubernetes/typed/settings/v1alpha1" @@ -57,70 +68,45 @@ import ( type Interface interface { Discovery() discovery.DiscoveryInterface - AdmissionregistrationV1alpha1() admissionregistrationv1alpha1.AdmissionregistrationV1alpha1Interface + AdmissionregistrationV1() admissionregistrationv1.AdmissionregistrationV1Interface AdmissionregistrationV1beta1() admissionregistrationv1beta1.AdmissionregistrationV1beta1Interface - // Deprecated: please explicitly pick a version if possible. - Admissionregistration() admissionregistrationv1beta1.AdmissionregistrationV1beta1Interface + AppsV1() appsv1.AppsV1Interface AppsV1beta1() appsv1beta1.AppsV1beta1Interface AppsV1beta2() appsv1beta2.AppsV1beta2Interface - AppsV1() appsv1.AppsV1Interface - // Deprecated: please explicitly pick a version if possible. - Apps() appsv1.AppsV1Interface + AuditregistrationV1alpha1() auditregistrationv1alpha1.AuditregistrationV1alpha1Interface AuthenticationV1() authenticationv1.AuthenticationV1Interface - // Deprecated: please explicitly pick a version if possible. - Authentication() authenticationv1.AuthenticationV1Interface AuthenticationV1beta1() authenticationv1beta1.AuthenticationV1beta1Interface AuthorizationV1() authorizationv1.AuthorizationV1Interface - // Deprecated: please explicitly pick a version if possible. - Authorization() authorizationv1.AuthorizationV1Interface AuthorizationV1beta1() authorizationv1beta1.AuthorizationV1beta1Interface AutoscalingV1() autoscalingv1.AutoscalingV1Interface - // Deprecated: please explicitly pick a version if possible. - Autoscaling() autoscalingv1.AutoscalingV1Interface AutoscalingV2beta1() autoscalingv2beta1.AutoscalingV2beta1Interface AutoscalingV2beta2() autoscalingv2beta2.AutoscalingV2beta2Interface BatchV1() batchv1.BatchV1Interface - // Deprecated: please explicitly pick a version if possible. - Batch() batchv1.BatchV1Interface BatchV1beta1() batchv1beta1.BatchV1beta1Interface BatchV2alpha1() batchv2alpha1.BatchV2alpha1Interface CertificatesV1beta1() certificatesv1beta1.CertificatesV1beta1Interface - // Deprecated: please explicitly pick a version if possible. - Certificates() certificatesv1beta1.CertificatesV1beta1Interface CoordinationV1beta1() coordinationv1beta1.CoordinationV1beta1Interface - // Deprecated: please explicitly pick a version if possible. - Coordination() coordinationv1beta1.CoordinationV1beta1Interface + CoordinationV1() coordinationv1.CoordinationV1Interface CoreV1() corev1.CoreV1Interface - // Deprecated: please explicitly pick a version if possible. - Core() corev1.CoreV1Interface + DiscoveryV1alpha1() discoveryv1alpha1.DiscoveryV1alpha1Interface + DiscoveryV1beta1() discoveryv1beta1.DiscoveryV1beta1Interface EventsV1beta1() eventsv1beta1.EventsV1beta1Interface - // Deprecated: please explicitly pick a version if possible. - Events() eventsv1beta1.EventsV1beta1Interface ExtensionsV1beta1() extensionsv1beta1.ExtensionsV1beta1Interface - // Deprecated: please explicitly pick a version if possible. - Extensions() extensionsv1beta1.ExtensionsV1beta1Interface + FlowcontrolV1alpha1() flowcontrolv1alpha1.FlowcontrolV1alpha1Interface NetworkingV1() networkingv1.NetworkingV1Interface - // Deprecated: please explicitly pick a version if possible. - Networking() networkingv1.NetworkingV1Interface + NetworkingV1beta1() networkingv1beta1.NetworkingV1beta1Interface + NodeV1alpha1() nodev1alpha1.NodeV1alpha1Interface + NodeV1beta1() nodev1beta1.NodeV1beta1Interface PolicyV1beta1() policyv1beta1.PolicyV1beta1Interface - // Deprecated: please explicitly pick a version if possible. - Policy() policyv1beta1.PolicyV1beta1Interface RbacV1() rbacv1.RbacV1Interface - // Deprecated: please explicitly pick a version if possible. - Rbac() rbacv1.RbacV1Interface RbacV1beta1() rbacv1beta1.RbacV1beta1Interface RbacV1alpha1() rbacv1alpha1.RbacV1alpha1Interface SchedulingV1alpha1() schedulingv1alpha1.SchedulingV1alpha1Interface SchedulingV1beta1() schedulingv1beta1.SchedulingV1beta1Interface - // Deprecated: please explicitly pick a version if possible. - Scheduling() schedulingv1beta1.SchedulingV1beta1Interface + SchedulingV1() schedulingv1.SchedulingV1Interface SettingsV1alpha1() settingsv1alpha1.SettingsV1alpha1Interface - // Deprecated: please explicitly pick a version if possible. - Settings() settingsv1alpha1.SettingsV1alpha1Interface StorageV1beta1() storagev1beta1.StorageV1beta1Interface StorageV1() storagev1.StorageV1Interface - // Deprecated: please explicitly pick a version if possible. - Storage() storagev1.StorageV1Interface StorageV1alpha1() storagev1alpha1.StorageV1alpha1Interface } @@ -128,42 +114,51 @@ type Interface interface { // version included in a Clientset. type Clientset struct { *discovery.DiscoveryClient - admissionregistrationV1alpha1 *admissionregistrationv1alpha1.AdmissionregistrationV1alpha1Client - admissionregistrationV1beta1 *admissionregistrationv1beta1.AdmissionregistrationV1beta1Client - appsV1beta1 *appsv1beta1.AppsV1beta1Client - appsV1beta2 *appsv1beta2.AppsV1beta2Client - appsV1 *appsv1.AppsV1Client - authenticationV1 *authenticationv1.AuthenticationV1Client - authenticationV1beta1 *authenticationv1beta1.AuthenticationV1beta1Client - authorizationV1 *authorizationv1.AuthorizationV1Client - authorizationV1beta1 *authorizationv1beta1.AuthorizationV1beta1Client - autoscalingV1 *autoscalingv1.AutoscalingV1Client - autoscalingV2beta1 *autoscalingv2beta1.AutoscalingV2beta1Client - autoscalingV2beta2 *autoscalingv2beta2.AutoscalingV2beta2Client - batchV1 *batchv1.BatchV1Client - batchV1beta1 *batchv1beta1.BatchV1beta1Client - batchV2alpha1 *batchv2alpha1.BatchV2alpha1Client - certificatesV1beta1 *certificatesv1beta1.CertificatesV1beta1Client - coordinationV1beta1 *coordinationv1beta1.CoordinationV1beta1Client - coreV1 *corev1.CoreV1Client - eventsV1beta1 *eventsv1beta1.EventsV1beta1Client - extensionsV1beta1 *extensionsv1beta1.ExtensionsV1beta1Client - networkingV1 *networkingv1.NetworkingV1Client - policyV1beta1 *policyv1beta1.PolicyV1beta1Client - rbacV1 *rbacv1.RbacV1Client - rbacV1beta1 *rbacv1beta1.RbacV1beta1Client - rbacV1alpha1 *rbacv1alpha1.RbacV1alpha1Client - schedulingV1alpha1 *schedulingv1alpha1.SchedulingV1alpha1Client - schedulingV1beta1 *schedulingv1beta1.SchedulingV1beta1Client - settingsV1alpha1 *settingsv1alpha1.SettingsV1alpha1Client - storageV1beta1 *storagev1beta1.StorageV1beta1Client - storageV1 *storagev1.StorageV1Client - storageV1alpha1 *storagev1alpha1.StorageV1alpha1Client -} - -// AdmissionregistrationV1alpha1 retrieves the AdmissionregistrationV1alpha1Client -func (c *Clientset) AdmissionregistrationV1alpha1() admissionregistrationv1alpha1.AdmissionregistrationV1alpha1Interface { - return c.admissionregistrationV1alpha1 + admissionregistrationV1 *admissionregistrationv1.AdmissionregistrationV1Client + admissionregistrationV1beta1 *admissionregistrationv1beta1.AdmissionregistrationV1beta1Client + appsV1 *appsv1.AppsV1Client + appsV1beta1 *appsv1beta1.AppsV1beta1Client + appsV1beta2 *appsv1beta2.AppsV1beta2Client + auditregistrationV1alpha1 *auditregistrationv1alpha1.AuditregistrationV1alpha1Client + authenticationV1 *authenticationv1.AuthenticationV1Client + authenticationV1beta1 *authenticationv1beta1.AuthenticationV1beta1Client + authorizationV1 *authorizationv1.AuthorizationV1Client + authorizationV1beta1 *authorizationv1beta1.AuthorizationV1beta1Client + autoscalingV1 *autoscalingv1.AutoscalingV1Client + autoscalingV2beta1 *autoscalingv2beta1.AutoscalingV2beta1Client + autoscalingV2beta2 *autoscalingv2beta2.AutoscalingV2beta2Client + batchV1 *batchv1.BatchV1Client + batchV1beta1 *batchv1beta1.BatchV1beta1Client + batchV2alpha1 *batchv2alpha1.BatchV2alpha1Client + certificatesV1beta1 *certificatesv1beta1.CertificatesV1beta1Client + coordinationV1beta1 *coordinationv1beta1.CoordinationV1beta1Client + coordinationV1 *coordinationv1.CoordinationV1Client + coreV1 *corev1.CoreV1Client + discoveryV1alpha1 *discoveryv1alpha1.DiscoveryV1alpha1Client + discoveryV1beta1 *discoveryv1beta1.DiscoveryV1beta1Client + eventsV1beta1 *eventsv1beta1.EventsV1beta1Client + extensionsV1beta1 *extensionsv1beta1.ExtensionsV1beta1Client + flowcontrolV1alpha1 *flowcontrolv1alpha1.FlowcontrolV1alpha1Client + networkingV1 *networkingv1.NetworkingV1Client + networkingV1beta1 *networkingv1beta1.NetworkingV1beta1Client + nodeV1alpha1 *nodev1alpha1.NodeV1alpha1Client + nodeV1beta1 *nodev1beta1.NodeV1beta1Client + policyV1beta1 *policyv1beta1.PolicyV1beta1Client + rbacV1 *rbacv1.RbacV1Client + rbacV1beta1 *rbacv1beta1.RbacV1beta1Client + rbacV1alpha1 *rbacv1alpha1.RbacV1alpha1Client + schedulingV1alpha1 *schedulingv1alpha1.SchedulingV1alpha1Client + schedulingV1beta1 *schedulingv1beta1.SchedulingV1beta1Client + schedulingV1 *schedulingv1.SchedulingV1Client + settingsV1alpha1 *settingsv1alpha1.SettingsV1alpha1Client + storageV1beta1 *storagev1beta1.StorageV1beta1Client + storageV1 *storagev1.StorageV1Client + storageV1alpha1 *storagev1alpha1.StorageV1alpha1Client +} + +// AdmissionregistrationV1 retrieves the AdmissionregistrationV1Client +func (c *Clientset) AdmissionregistrationV1() admissionregistrationv1.AdmissionregistrationV1Interface { + return c.admissionregistrationV1 } // AdmissionregistrationV1beta1 retrieves the AdmissionregistrationV1beta1Client @@ -171,10 +166,9 @@ func (c *Clientset) AdmissionregistrationV1beta1() admissionregistrationv1beta1. return c.admissionregistrationV1beta1 } -// Deprecated: Admissionregistration retrieves the default version of AdmissionregistrationClient. -// Please explicitly pick a version. -func (c *Clientset) Admissionregistration() admissionregistrationv1beta1.AdmissionregistrationV1beta1Interface { - return c.admissionregistrationV1beta1 +// AppsV1 retrieves the AppsV1Client +func (c *Clientset) AppsV1() appsv1.AppsV1Interface { + return c.appsV1 } // AppsV1beta1 retrieves the AppsV1beta1Client @@ -187,15 +181,9 @@ func (c *Clientset) AppsV1beta2() appsv1beta2.AppsV1beta2Interface { return c.appsV1beta2 } -// AppsV1 retrieves the AppsV1Client -func (c *Clientset) AppsV1() appsv1.AppsV1Interface { - return c.appsV1 -} - -// Deprecated: Apps retrieves the default version of AppsClient. -// Please explicitly pick a version. -func (c *Clientset) Apps() appsv1.AppsV1Interface { - return c.appsV1 +// AuditregistrationV1alpha1 retrieves the AuditregistrationV1alpha1Client +func (c *Clientset) AuditregistrationV1alpha1() auditregistrationv1alpha1.AuditregistrationV1alpha1Interface { + return c.auditregistrationV1alpha1 } // AuthenticationV1 retrieves the AuthenticationV1Client @@ -203,12 +191,6 @@ func (c *Clientset) AuthenticationV1() authenticationv1.AuthenticationV1Interfac return c.authenticationV1 } -// Deprecated: Authentication retrieves the default version of AuthenticationClient. -// Please explicitly pick a version. -func (c *Clientset) Authentication() authenticationv1.AuthenticationV1Interface { - return c.authenticationV1 -} - // AuthenticationV1beta1 retrieves the AuthenticationV1beta1Client func (c *Clientset) AuthenticationV1beta1() authenticationv1beta1.AuthenticationV1beta1Interface { return c.authenticationV1beta1 @@ -219,12 +201,6 @@ func (c *Clientset) AuthorizationV1() authorizationv1.AuthorizationV1Interface { return c.authorizationV1 } -// Deprecated: Authorization retrieves the default version of AuthorizationClient. -// Please explicitly pick a version. -func (c *Clientset) Authorization() authorizationv1.AuthorizationV1Interface { - return c.authorizationV1 -} - // AuthorizationV1beta1 retrieves the AuthorizationV1beta1Client func (c *Clientset) AuthorizationV1beta1() authorizationv1beta1.AuthorizationV1beta1Interface { return c.authorizationV1beta1 @@ -235,12 +211,6 @@ func (c *Clientset) AutoscalingV1() autoscalingv1.AutoscalingV1Interface { return c.autoscalingV1 } -// Deprecated: Autoscaling retrieves the default version of AutoscalingClient. -// Please explicitly pick a version. -func (c *Clientset) Autoscaling() autoscalingv1.AutoscalingV1Interface { - return c.autoscalingV1 -} - // AutoscalingV2beta1 retrieves the AutoscalingV2beta1Client func (c *Clientset) AutoscalingV2beta1() autoscalingv2beta1.AutoscalingV2beta1Interface { return c.autoscalingV2beta1 @@ -256,12 +226,6 @@ func (c *Clientset) BatchV1() batchv1.BatchV1Interface { return c.batchV1 } -// Deprecated: Batch retrieves the default version of BatchClient. -// Please explicitly pick a version. -func (c *Clientset) Batch() batchv1.BatchV1Interface { - return c.batchV1 -} - // BatchV1beta1 retrieves the BatchV1beta1Client func (c *Clientset) BatchV1beta1() batchv1beta1.BatchV1beta1Interface { return c.batchV1beta1 @@ -277,21 +241,14 @@ func (c *Clientset) CertificatesV1beta1() certificatesv1beta1.CertificatesV1beta return c.certificatesV1beta1 } -// Deprecated: Certificates retrieves the default version of CertificatesClient. -// Please explicitly pick a version. -func (c *Clientset) Certificates() certificatesv1beta1.CertificatesV1beta1Interface { - return c.certificatesV1beta1 -} - // CoordinationV1beta1 retrieves the CoordinationV1beta1Client func (c *Clientset) CoordinationV1beta1() coordinationv1beta1.CoordinationV1beta1Interface { return c.coordinationV1beta1 } -// Deprecated: Coordination retrieves the default version of CoordinationClient. -// Please explicitly pick a version. -func (c *Clientset) Coordination() coordinationv1beta1.CoordinationV1beta1Interface { - return c.coordinationV1beta1 +// CoordinationV1 retrieves the CoordinationV1Client +func (c *Clientset) CoordinationV1() coordinationv1.CoordinationV1Interface { + return c.coordinationV1 } // CoreV1 retrieves the CoreV1Client @@ -299,20 +256,18 @@ func (c *Clientset) CoreV1() corev1.CoreV1Interface { return c.coreV1 } -// Deprecated: Core retrieves the default version of CoreClient. -// Please explicitly pick a version. -func (c *Clientset) Core() corev1.CoreV1Interface { - return c.coreV1 +// DiscoveryV1alpha1 retrieves the DiscoveryV1alpha1Client +func (c *Clientset) DiscoveryV1alpha1() discoveryv1alpha1.DiscoveryV1alpha1Interface { + return c.discoveryV1alpha1 } -// EventsV1beta1 retrieves the EventsV1beta1Client -func (c *Clientset) EventsV1beta1() eventsv1beta1.EventsV1beta1Interface { - return c.eventsV1beta1 +// DiscoveryV1beta1 retrieves the DiscoveryV1beta1Client +func (c *Clientset) DiscoveryV1beta1() discoveryv1beta1.DiscoveryV1beta1Interface { + return c.discoveryV1beta1 } -// Deprecated: Events retrieves the default version of EventsClient. -// Please explicitly pick a version. -func (c *Clientset) Events() eventsv1beta1.EventsV1beta1Interface { +// EventsV1beta1 retrieves the EventsV1beta1Client +func (c *Clientset) EventsV1beta1() eventsv1beta1.EventsV1beta1Interface { return c.eventsV1beta1 } @@ -321,10 +276,9 @@ func (c *Clientset) ExtensionsV1beta1() extensionsv1beta1.ExtensionsV1beta1Inter return c.extensionsV1beta1 } -// Deprecated: Extensions retrieves the default version of ExtensionsClient. -// Please explicitly pick a version. -func (c *Clientset) Extensions() extensionsv1beta1.ExtensionsV1beta1Interface { - return c.extensionsV1beta1 +// FlowcontrolV1alpha1 retrieves the FlowcontrolV1alpha1Client +func (c *Clientset) FlowcontrolV1alpha1() flowcontrolv1alpha1.FlowcontrolV1alpha1Interface { + return c.flowcontrolV1alpha1 } // NetworkingV1 retrieves the NetworkingV1Client @@ -332,20 +286,23 @@ func (c *Clientset) NetworkingV1() networkingv1.NetworkingV1Interface { return c.networkingV1 } -// Deprecated: Networking retrieves the default version of NetworkingClient. -// Please explicitly pick a version. -func (c *Clientset) Networking() networkingv1.NetworkingV1Interface { - return c.networkingV1 +// NetworkingV1beta1 retrieves the NetworkingV1beta1Client +func (c *Clientset) NetworkingV1beta1() networkingv1beta1.NetworkingV1beta1Interface { + return c.networkingV1beta1 } -// PolicyV1beta1 retrieves the PolicyV1beta1Client -func (c *Clientset) PolicyV1beta1() policyv1beta1.PolicyV1beta1Interface { - return c.policyV1beta1 +// NodeV1alpha1 retrieves the NodeV1alpha1Client +func (c *Clientset) NodeV1alpha1() nodev1alpha1.NodeV1alpha1Interface { + return c.nodeV1alpha1 +} + +// NodeV1beta1 retrieves the NodeV1beta1Client +func (c *Clientset) NodeV1beta1() nodev1beta1.NodeV1beta1Interface { + return c.nodeV1beta1 } -// Deprecated: Policy retrieves the default version of PolicyClient. -// Please explicitly pick a version. -func (c *Clientset) Policy() policyv1beta1.PolicyV1beta1Interface { +// PolicyV1beta1 retrieves the PolicyV1beta1Client +func (c *Clientset) PolicyV1beta1() policyv1beta1.PolicyV1beta1Interface { return c.policyV1beta1 } @@ -354,12 +311,6 @@ func (c *Clientset) RbacV1() rbacv1.RbacV1Interface { return c.rbacV1 } -// Deprecated: Rbac retrieves the default version of RbacClient. -// Please explicitly pick a version. -func (c *Clientset) Rbac() rbacv1.RbacV1Interface { - return c.rbacV1 -} - // RbacV1beta1 retrieves the RbacV1beta1Client func (c *Clientset) RbacV1beta1() rbacv1beta1.RbacV1beta1Interface { return c.rbacV1beta1 @@ -380,10 +331,9 @@ func (c *Clientset) SchedulingV1beta1() schedulingv1beta1.SchedulingV1beta1Inter return c.schedulingV1beta1 } -// Deprecated: Scheduling retrieves the default version of SchedulingClient. -// Please explicitly pick a version. -func (c *Clientset) Scheduling() schedulingv1beta1.SchedulingV1beta1Interface { - return c.schedulingV1beta1 +// SchedulingV1 retrieves the SchedulingV1Client +func (c *Clientset) SchedulingV1() schedulingv1.SchedulingV1Interface { + return c.schedulingV1 } // SettingsV1alpha1 retrieves the SettingsV1alpha1Client @@ -391,12 +341,6 @@ func (c *Clientset) SettingsV1alpha1() settingsv1alpha1.SettingsV1alpha1Interfac return c.settingsV1alpha1 } -// Deprecated: Settings retrieves the default version of SettingsClient. -// Please explicitly pick a version. -func (c *Clientset) Settings() settingsv1alpha1.SettingsV1alpha1Interface { - return c.settingsV1alpha1 -} - // StorageV1beta1 retrieves the StorageV1beta1Client func (c *Clientset) StorageV1beta1() storagev1beta1.StorageV1beta1Interface { return c.storageV1beta1 @@ -407,12 +351,6 @@ func (c *Clientset) StorageV1() storagev1.StorageV1Interface { return c.storageV1 } -// Deprecated: Storage retrieves the default version of StorageClient. -// Please explicitly pick a version. -func (c *Clientset) Storage() storagev1.StorageV1Interface { - return c.storageV1 -} - // StorageV1alpha1 retrieves the StorageV1alpha1Client func (c *Clientset) StorageV1alpha1() storagev1alpha1.StorageV1alpha1Interface { return c.storageV1alpha1 @@ -427,14 +365,19 @@ func (c *Clientset) Discovery() discovery.DiscoveryInterface { } // NewForConfig creates a new Clientset for the given config. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfig will generate a rate-limiter in configShallowCopy. func NewForConfig(c *rest.Config) (*Clientset, error) { configShallowCopy := *c if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { + if configShallowCopy.Burst <= 0 { + return nil, fmt.Errorf("Burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0") + } configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) } var cs Clientset var err error - cs.admissionregistrationV1alpha1, err = admissionregistrationv1alpha1.NewForConfig(&configShallowCopy) + cs.admissionregistrationV1, err = admissionregistrationv1.NewForConfig(&configShallowCopy) if err != nil { return nil, err } @@ -442,6 +385,10 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { if err != nil { return nil, err } + cs.appsV1, err = appsv1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } cs.appsV1beta1, err = appsv1beta1.NewForConfig(&configShallowCopy) if err != nil { return nil, err @@ -450,7 +397,7 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { if err != nil { return nil, err } - cs.appsV1, err = appsv1.NewForConfig(&configShallowCopy) + cs.auditregistrationV1alpha1, err = auditregistrationv1alpha1.NewForConfig(&configShallowCopy) if err != nil { return nil, err } @@ -502,10 +449,22 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { if err != nil { return nil, err } + cs.coordinationV1, err = coordinationv1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } cs.coreV1, err = corev1.NewForConfig(&configShallowCopy) if err != nil { return nil, err } + cs.discoveryV1alpha1, err = discoveryv1alpha1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.discoveryV1beta1, err = discoveryv1beta1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } cs.eventsV1beta1, err = eventsv1beta1.NewForConfig(&configShallowCopy) if err != nil { return nil, err @@ -514,10 +473,26 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { if err != nil { return nil, err } + cs.flowcontrolV1alpha1, err = flowcontrolv1alpha1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } cs.networkingV1, err = networkingv1.NewForConfig(&configShallowCopy) if err != nil { return nil, err } + cs.networkingV1beta1, err = networkingv1beta1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.nodeV1alpha1, err = nodev1alpha1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.nodeV1beta1, err = nodev1beta1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } cs.policyV1beta1, err = policyv1beta1.NewForConfig(&configShallowCopy) if err != nil { return nil, err @@ -542,6 +517,10 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { if err != nil { return nil, err } + cs.schedulingV1, err = schedulingv1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } cs.settingsV1alpha1, err = settingsv1alpha1.NewForConfig(&configShallowCopy) if err != nil { return nil, err @@ -570,11 +549,12 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { // panics if there is an error in the config. func NewForConfigOrDie(c *rest.Config) *Clientset { var cs Clientset - cs.admissionregistrationV1alpha1 = admissionregistrationv1alpha1.NewForConfigOrDie(c) + cs.admissionregistrationV1 = admissionregistrationv1.NewForConfigOrDie(c) cs.admissionregistrationV1beta1 = admissionregistrationv1beta1.NewForConfigOrDie(c) + cs.appsV1 = appsv1.NewForConfigOrDie(c) cs.appsV1beta1 = appsv1beta1.NewForConfigOrDie(c) cs.appsV1beta2 = appsv1beta2.NewForConfigOrDie(c) - cs.appsV1 = appsv1.NewForConfigOrDie(c) + cs.auditregistrationV1alpha1 = auditregistrationv1alpha1.NewForConfigOrDie(c) cs.authenticationV1 = authenticationv1.NewForConfigOrDie(c) cs.authenticationV1beta1 = authenticationv1beta1.NewForConfigOrDie(c) cs.authorizationV1 = authorizationv1.NewForConfigOrDie(c) @@ -587,16 +567,24 @@ func NewForConfigOrDie(c *rest.Config) *Clientset { cs.batchV2alpha1 = batchv2alpha1.NewForConfigOrDie(c) cs.certificatesV1beta1 = certificatesv1beta1.NewForConfigOrDie(c) cs.coordinationV1beta1 = coordinationv1beta1.NewForConfigOrDie(c) + cs.coordinationV1 = coordinationv1.NewForConfigOrDie(c) cs.coreV1 = corev1.NewForConfigOrDie(c) + cs.discoveryV1alpha1 = discoveryv1alpha1.NewForConfigOrDie(c) + cs.discoveryV1beta1 = discoveryv1beta1.NewForConfigOrDie(c) cs.eventsV1beta1 = eventsv1beta1.NewForConfigOrDie(c) cs.extensionsV1beta1 = extensionsv1beta1.NewForConfigOrDie(c) + cs.flowcontrolV1alpha1 = flowcontrolv1alpha1.NewForConfigOrDie(c) cs.networkingV1 = networkingv1.NewForConfigOrDie(c) + cs.networkingV1beta1 = networkingv1beta1.NewForConfigOrDie(c) + cs.nodeV1alpha1 = nodev1alpha1.NewForConfigOrDie(c) + cs.nodeV1beta1 = nodev1beta1.NewForConfigOrDie(c) cs.policyV1beta1 = policyv1beta1.NewForConfigOrDie(c) cs.rbacV1 = rbacv1.NewForConfigOrDie(c) cs.rbacV1beta1 = rbacv1beta1.NewForConfigOrDie(c) cs.rbacV1alpha1 = rbacv1alpha1.NewForConfigOrDie(c) cs.schedulingV1alpha1 = schedulingv1alpha1.NewForConfigOrDie(c) cs.schedulingV1beta1 = schedulingv1beta1.NewForConfigOrDie(c) + cs.schedulingV1 = schedulingv1.NewForConfigOrDie(c) cs.settingsV1alpha1 = settingsv1alpha1.NewForConfigOrDie(c) cs.storageV1beta1 = storagev1beta1.NewForConfigOrDie(c) cs.storageV1 = storagev1.NewForConfigOrDie(c) @@ -609,11 +597,12 @@ func NewForConfigOrDie(c *rest.Config) *Clientset { // New creates a new Clientset for the given RESTClient. func New(c rest.Interface) *Clientset { var cs Clientset - cs.admissionregistrationV1alpha1 = admissionregistrationv1alpha1.New(c) + cs.admissionregistrationV1 = admissionregistrationv1.New(c) cs.admissionregistrationV1beta1 = admissionregistrationv1beta1.New(c) + cs.appsV1 = appsv1.New(c) cs.appsV1beta1 = appsv1beta1.New(c) cs.appsV1beta2 = appsv1beta2.New(c) - cs.appsV1 = appsv1.New(c) + cs.auditregistrationV1alpha1 = auditregistrationv1alpha1.New(c) cs.authenticationV1 = authenticationv1.New(c) cs.authenticationV1beta1 = authenticationv1beta1.New(c) cs.authorizationV1 = authorizationv1.New(c) @@ -626,16 +615,24 @@ func New(c rest.Interface) *Clientset { cs.batchV2alpha1 = batchv2alpha1.New(c) cs.certificatesV1beta1 = certificatesv1beta1.New(c) cs.coordinationV1beta1 = coordinationv1beta1.New(c) + cs.coordinationV1 = coordinationv1.New(c) cs.coreV1 = corev1.New(c) + cs.discoveryV1alpha1 = discoveryv1alpha1.New(c) + cs.discoveryV1beta1 = discoveryv1beta1.New(c) cs.eventsV1beta1 = eventsv1beta1.New(c) cs.extensionsV1beta1 = extensionsv1beta1.New(c) + cs.flowcontrolV1alpha1 = flowcontrolv1alpha1.New(c) cs.networkingV1 = networkingv1.New(c) + cs.networkingV1beta1 = networkingv1beta1.New(c) + cs.nodeV1alpha1 = nodev1alpha1.New(c) + cs.nodeV1beta1 = nodev1beta1.New(c) cs.policyV1beta1 = policyv1beta1.New(c) cs.rbacV1 = rbacv1.New(c) cs.rbacV1beta1 = rbacv1beta1.New(c) cs.rbacV1alpha1 = rbacv1alpha1.New(c) cs.schedulingV1alpha1 = schedulingv1alpha1.New(c) cs.schedulingV1beta1 = schedulingv1beta1.New(c) + cs.schedulingV1 = schedulingv1.New(c) cs.settingsV1alpha1 = settingsv1alpha1.New(c) cs.storageV1beta1 = storagev1beta1.New(c) cs.storageV1 = storagev1.New(c) diff --git a/vendor/k8s.io/client-go/kubernetes/scheme/register.go b/vendor/k8s.io/client-go/kubernetes/scheme/register.go index 9ca89b76..4d8e8b7f 100644 --- a/vendor/k8s.io/client-go/kubernetes/scheme/register.go +++ b/vendor/k8s.io/client-go/kubernetes/scheme/register.go @@ -19,11 +19,12 @@ limitations under the License. package scheme import ( - admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1" + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" appsv1 "k8s.io/api/apps/v1" appsv1beta1 "k8s.io/api/apps/v1beta1" appsv1beta2 "k8s.io/api/apps/v1beta2" + auditregistrationv1alpha1 "k8s.io/api/auditregistration/v1alpha1" authenticationv1 "k8s.io/api/authentication/v1" authenticationv1beta1 "k8s.io/api/authentication/v1beta1" authorizationv1 "k8s.io/api/authorization/v1" @@ -35,15 +36,23 @@ import ( batchv1beta1 "k8s.io/api/batch/v1beta1" batchv2alpha1 "k8s.io/api/batch/v2alpha1" certificatesv1beta1 "k8s.io/api/certificates/v1beta1" + coordinationv1 "k8s.io/api/coordination/v1" coordinationv1beta1 "k8s.io/api/coordination/v1beta1" corev1 "k8s.io/api/core/v1" + discoveryv1alpha1 "k8s.io/api/discovery/v1alpha1" + discoveryv1beta1 "k8s.io/api/discovery/v1beta1" eventsv1beta1 "k8s.io/api/events/v1beta1" extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + flowcontrolv1alpha1 "k8s.io/api/flowcontrol/v1alpha1" networkingv1 "k8s.io/api/networking/v1" + networkingv1beta1 "k8s.io/api/networking/v1beta1" + nodev1alpha1 "k8s.io/api/node/v1alpha1" + nodev1beta1 "k8s.io/api/node/v1beta1" policyv1beta1 "k8s.io/api/policy/v1beta1" rbacv1 "k8s.io/api/rbac/v1" rbacv1alpha1 "k8s.io/api/rbac/v1alpha1" rbacv1beta1 "k8s.io/api/rbac/v1beta1" + schedulingv1 "k8s.io/api/scheduling/v1" schedulingv1alpha1 "k8s.io/api/scheduling/v1alpha1" schedulingv1beta1 "k8s.io/api/scheduling/v1beta1" settingsv1alpha1 "k8s.io/api/settings/v1alpha1" @@ -61,11 +70,12 @@ var Scheme = runtime.NewScheme() var Codecs = serializer.NewCodecFactory(Scheme) var ParameterCodec = runtime.NewParameterCodec(Scheme) var localSchemeBuilder = runtime.SchemeBuilder{ - admissionregistrationv1alpha1.AddToScheme, + admissionregistrationv1.AddToScheme, admissionregistrationv1beta1.AddToScheme, + appsv1.AddToScheme, appsv1beta1.AddToScheme, appsv1beta2.AddToScheme, - appsv1.AddToScheme, + auditregistrationv1alpha1.AddToScheme, authenticationv1.AddToScheme, authenticationv1beta1.AddToScheme, authorizationv1.AddToScheme, @@ -78,16 +88,24 @@ var localSchemeBuilder = runtime.SchemeBuilder{ batchv2alpha1.AddToScheme, certificatesv1beta1.AddToScheme, coordinationv1beta1.AddToScheme, + coordinationv1.AddToScheme, corev1.AddToScheme, + discoveryv1alpha1.AddToScheme, + discoveryv1beta1.AddToScheme, eventsv1beta1.AddToScheme, extensionsv1beta1.AddToScheme, + flowcontrolv1alpha1.AddToScheme, networkingv1.AddToScheme, + networkingv1beta1.AddToScheme, + nodev1alpha1.AddToScheme, + nodev1beta1.AddToScheme, policyv1beta1.AddToScheme, rbacv1.AddToScheme, rbacv1beta1.AddToScheme, rbacv1alpha1.AddToScheme, schedulingv1alpha1.AddToScheme, schedulingv1beta1.AddToScheme, + schedulingv1.AddToScheme, settingsv1alpha1.AddToScheme, storagev1beta1.AddToScheme, storagev1.AddToScheme, diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/admissionregistration_client.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/admissionregistration_client.go new file mode 100644 index 00000000..75127315 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/admissionregistration_client.go @@ -0,0 +1,94 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/admissionregistration/v1" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type AdmissionregistrationV1Interface interface { + RESTClient() rest.Interface + MutatingWebhookConfigurationsGetter + ValidatingWebhookConfigurationsGetter +} + +// AdmissionregistrationV1Client is used to interact with features provided by the admissionregistration.k8s.io group. +type AdmissionregistrationV1Client struct { + restClient rest.Interface +} + +func (c *AdmissionregistrationV1Client) MutatingWebhookConfigurations() MutatingWebhookConfigurationInterface { + return newMutatingWebhookConfigurations(c) +} + +func (c *AdmissionregistrationV1Client) ValidatingWebhookConfigurations() ValidatingWebhookConfigurationInterface { + return newValidatingWebhookConfigurations(c) +} + +// NewForConfig creates a new AdmissionregistrationV1Client for the given config. +func NewForConfig(c *rest.Config) (*AdmissionregistrationV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &AdmissionregistrationV1Client{client}, nil +} + +// NewForConfigOrDie creates a new AdmissionregistrationV1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *AdmissionregistrationV1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new AdmissionregistrationV1Client for the given RESTClient. +func New(c rest.Interface) *AdmissionregistrationV1Client { + return &AdmissionregistrationV1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *AdmissionregistrationV1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/doc.go similarity index 52% rename from vendor/k8s.io/kubernetes/pkg/apis/core/doc.go rename to vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/doc.go index 6017bfda..3af5d054 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/core/doc.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2014 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,11 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package +// Code generated by client-gen. DO NOT EDIT. -// Package api contains the latest (or "internal") version of the -// Kubernetes API objects. This is the API objects as represented in memory. -// The contract presented to clients is located in the versioned packages, -// which are sub-directories. The first one is "v1". Those packages -// describe how a particular version is serialized to storage/network. -package core // import "k8s.io/kubernetes/pkg/apis/core" +// This package has the automatically generated typed clients. +package v1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/generated_expansion.go new file mode 100644 index 00000000..a5b062e3 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/generated_expansion.go @@ -0,0 +1,23 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +type MutatingWebhookConfigurationExpansion interface{} + +type ValidatingWebhookConfigurationExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/mutatingwebhookconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/mutatingwebhookconfiguration.go new file mode 100644 index 00000000..1f5e5e38 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/mutatingwebhookconfiguration.go @@ -0,0 +1,164 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "time" + + v1 "k8s.io/api/admissionregistration/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// MutatingWebhookConfigurationsGetter has a method to return a MutatingWebhookConfigurationInterface. +// A group's client should implement this interface. +type MutatingWebhookConfigurationsGetter interface { + MutatingWebhookConfigurations() MutatingWebhookConfigurationInterface +} + +// MutatingWebhookConfigurationInterface has methods to work with MutatingWebhookConfiguration resources. +type MutatingWebhookConfigurationInterface interface { + Create(*v1.MutatingWebhookConfiguration) (*v1.MutatingWebhookConfiguration, error) + Update(*v1.MutatingWebhookConfiguration) (*v1.MutatingWebhookConfiguration, error) + Delete(name string, options *metav1.DeleteOptions) error + DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error + Get(name string, options metav1.GetOptions) (*v1.MutatingWebhookConfiguration, error) + List(opts metav1.ListOptions) (*v1.MutatingWebhookConfigurationList, error) + Watch(opts metav1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.MutatingWebhookConfiguration, err error) + MutatingWebhookConfigurationExpansion +} + +// mutatingWebhookConfigurations implements MutatingWebhookConfigurationInterface +type mutatingWebhookConfigurations struct { + client rest.Interface +} + +// newMutatingWebhookConfigurations returns a MutatingWebhookConfigurations +func newMutatingWebhookConfigurations(c *AdmissionregistrationV1Client) *mutatingWebhookConfigurations { + return &mutatingWebhookConfigurations{ + client: c.RESTClient(), + } +} + +// Get takes name of the mutatingWebhookConfiguration, and returns the corresponding mutatingWebhookConfiguration object, and an error if there is any. +func (c *mutatingWebhookConfigurations) Get(name string, options metav1.GetOptions) (result *v1.MutatingWebhookConfiguration, err error) { + result = &v1.MutatingWebhookConfiguration{} + err = c.client.Get(). + Resource("mutatingwebhookconfigurations"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of MutatingWebhookConfigurations that match those selectors. +func (c *mutatingWebhookConfigurations) List(opts metav1.ListOptions) (result *v1.MutatingWebhookConfigurationList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.MutatingWebhookConfigurationList{} + err = c.client.Get(). + Resource("mutatingwebhookconfigurations"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested mutatingWebhookConfigurations. +func (c *mutatingWebhookConfigurations) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("mutatingwebhookconfigurations"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a mutatingWebhookConfiguration and creates it. Returns the server's representation of the mutatingWebhookConfiguration, and an error, if there is any. +func (c *mutatingWebhookConfigurations) Create(mutatingWebhookConfiguration *v1.MutatingWebhookConfiguration) (result *v1.MutatingWebhookConfiguration, err error) { + result = &v1.MutatingWebhookConfiguration{} + err = c.client.Post(). + Resource("mutatingwebhookconfigurations"). + Body(mutatingWebhookConfiguration). + Do(). + Into(result) + return +} + +// Update takes the representation of a mutatingWebhookConfiguration and updates it. Returns the server's representation of the mutatingWebhookConfiguration, and an error, if there is any. +func (c *mutatingWebhookConfigurations) Update(mutatingWebhookConfiguration *v1.MutatingWebhookConfiguration) (result *v1.MutatingWebhookConfiguration, err error) { + result = &v1.MutatingWebhookConfiguration{} + err = c.client.Put(). + Resource("mutatingwebhookconfigurations"). + Name(mutatingWebhookConfiguration.Name). + Body(mutatingWebhookConfiguration). + Do(). + Into(result) + return +} + +// Delete takes name of the mutatingWebhookConfiguration and deletes it. Returns an error if one occurs. +func (c *mutatingWebhookConfigurations) Delete(name string, options *metav1.DeleteOptions) error { + return c.client.Delete(). + Resource("mutatingwebhookconfigurations"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *mutatingWebhookConfigurations) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("mutatingwebhookconfigurations"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched mutatingWebhookConfiguration. +func (c *mutatingWebhookConfigurations) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.MutatingWebhookConfiguration, err error) { + result = &v1.MutatingWebhookConfiguration{} + err = c.client.Patch(pt). + Resource("mutatingwebhookconfigurations"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingwebhookconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingwebhookconfiguration.go new file mode 100644 index 00000000..7987b6e3 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingwebhookconfiguration.go @@ -0,0 +1,164 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "time" + + v1 "k8s.io/api/admissionregistration/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// ValidatingWebhookConfigurationsGetter has a method to return a ValidatingWebhookConfigurationInterface. +// A group's client should implement this interface. +type ValidatingWebhookConfigurationsGetter interface { + ValidatingWebhookConfigurations() ValidatingWebhookConfigurationInterface +} + +// ValidatingWebhookConfigurationInterface has methods to work with ValidatingWebhookConfiguration resources. +type ValidatingWebhookConfigurationInterface interface { + Create(*v1.ValidatingWebhookConfiguration) (*v1.ValidatingWebhookConfiguration, error) + Update(*v1.ValidatingWebhookConfiguration) (*v1.ValidatingWebhookConfiguration, error) + Delete(name string, options *metav1.DeleteOptions) error + DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error + Get(name string, options metav1.GetOptions) (*v1.ValidatingWebhookConfiguration, error) + List(opts metav1.ListOptions) (*v1.ValidatingWebhookConfigurationList, error) + Watch(opts metav1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ValidatingWebhookConfiguration, err error) + ValidatingWebhookConfigurationExpansion +} + +// validatingWebhookConfigurations implements ValidatingWebhookConfigurationInterface +type validatingWebhookConfigurations struct { + client rest.Interface +} + +// newValidatingWebhookConfigurations returns a ValidatingWebhookConfigurations +func newValidatingWebhookConfigurations(c *AdmissionregistrationV1Client) *validatingWebhookConfigurations { + return &validatingWebhookConfigurations{ + client: c.RESTClient(), + } +} + +// Get takes name of the validatingWebhookConfiguration, and returns the corresponding validatingWebhookConfiguration object, and an error if there is any. +func (c *validatingWebhookConfigurations) Get(name string, options metav1.GetOptions) (result *v1.ValidatingWebhookConfiguration, err error) { + result = &v1.ValidatingWebhookConfiguration{} + err = c.client.Get(). + Resource("validatingwebhookconfigurations"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ValidatingWebhookConfigurations that match those selectors. +func (c *validatingWebhookConfigurations) List(opts metav1.ListOptions) (result *v1.ValidatingWebhookConfigurationList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.ValidatingWebhookConfigurationList{} + err = c.client.Get(). + Resource("validatingwebhookconfigurations"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested validatingWebhookConfigurations. +func (c *validatingWebhookConfigurations) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("validatingwebhookconfigurations"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a validatingWebhookConfiguration and creates it. Returns the server's representation of the validatingWebhookConfiguration, and an error, if there is any. +func (c *validatingWebhookConfigurations) Create(validatingWebhookConfiguration *v1.ValidatingWebhookConfiguration) (result *v1.ValidatingWebhookConfiguration, err error) { + result = &v1.ValidatingWebhookConfiguration{} + err = c.client.Post(). + Resource("validatingwebhookconfigurations"). + Body(validatingWebhookConfiguration). + Do(). + Into(result) + return +} + +// Update takes the representation of a validatingWebhookConfiguration and updates it. Returns the server's representation of the validatingWebhookConfiguration, and an error, if there is any. +func (c *validatingWebhookConfigurations) Update(validatingWebhookConfiguration *v1.ValidatingWebhookConfiguration) (result *v1.ValidatingWebhookConfiguration, err error) { + result = &v1.ValidatingWebhookConfiguration{} + err = c.client.Put(). + Resource("validatingwebhookconfigurations"). + Name(validatingWebhookConfiguration.Name). + Body(validatingWebhookConfiguration). + Do(). + Into(result) + return +} + +// Delete takes name of the validatingWebhookConfiguration and deletes it. Returns an error if one occurs. +func (c *validatingWebhookConfigurations) Delete(name string, options *metav1.DeleteOptions) error { + return c.client.Delete(). + Resource("validatingwebhookconfigurations"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *validatingWebhookConfigurations) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("validatingwebhookconfigurations"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched validatingWebhookConfiguration. +func (c *validatingWebhookConfigurations) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ValidatingWebhookConfiguration, err error) { + result = &v1.ValidatingWebhookConfiguration{} + err = c.client.Patch(pt). + Resource("validatingwebhookconfigurations"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/initializerconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/initializerconfiguration.go deleted file mode 100644 index e014ea72..00000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/initializerconfiguration.go +++ /dev/null @@ -1,147 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1alpha1 "k8s.io/api/admissionregistration/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// InitializerConfigurationsGetter has a method to return a InitializerConfigurationInterface. -// A group's client should implement this interface. -type InitializerConfigurationsGetter interface { - InitializerConfigurations() InitializerConfigurationInterface -} - -// InitializerConfigurationInterface has methods to work with InitializerConfiguration resources. -type InitializerConfigurationInterface interface { - Create(*v1alpha1.InitializerConfiguration) (*v1alpha1.InitializerConfiguration, error) - Update(*v1alpha1.InitializerConfiguration) (*v1alpha1.InitializerConfiguration, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1alpha1.InitializerConfiguration, error) - List(opts v1.ListOptions) (*v1alpha1.InitializerConfigurationList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.InitializerConfiguration, err error) - InitializerConfigurationExpansion -} - -// initializerConfigurations implements InitializerConfigurationInterface -type initializerConfigurations struct { - client rest.Interface -} - -// newInitializerConfigurations returns a InitializerConfigurations -func newInitializerConfigurations(c *AdmissionregistrationV1alpha1Client) *initializerConfigurations { - return &initializerConfigurations{ - client: c.RESTClient(), - } -} - -// Get takes name of the initializerConfiguration, and returns the corresponding initializerConfiguration object, and an error if there is any. -func (c *initializerConfigurations) Get(name string, options v1.GetOptions) (result *v1alpha1.InitializerConfiguration, err error) { - result = &v1alpha1.InitializerConfiguration{} - err = c.client.Get(). - Resource("initializerconfigurations"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of InitializerConfigurations that match those selectors. -func (c *initializerConfigurations) List(opts v1.ListOptions) (result *v1alpha1.InitializerConfigurationList, err error) { - result = &v1alpha1.InitializerConfigurationList{} - err = c.client.Get(). - Resource("initializerconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested initializerConfigurations. -func (c *initializerConfigurations) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Resource("initializerconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a initializerConfiguration and creates it. Returns the server's representation of the initializerConfiguration, and an error, if there is any. -func (c *initializerConfigurations) Create(initializerConfiguration *v1alpha1.InitializerConfiguration) (result *v1alpha1.InitializerConfiguration, err error) { - result = &v1alpha1.InitializerConfiguration{} - err = c.client.Post(). - Resource("initializerconfigurations"). - Body(initializerConfiguration). - Do(). - Into(result) - return -} - -// Update takes the representation of a initializerConfiguration and updates it. Returns the server's representation of the initializerConfiguration, and an error, if there is any. -func (c *initializerConfigurations) Update(initializerConfiguration *v1alpha1.InitializerConfiguration) (result *v1alpha1.InitializerConfiguration, err error) { - result = &v1alpha1.InitializerConfiguration{} - err = c.client.Put(). - Resource("initializerconfigurations"). - Name(initializerConfiguration.Name). - Body(initializerConfiguration). - Do(). - Into(result) - return -} - -// Delete takes name of the initializerConfiguration and deletes it. Returns an error if one occurs. -func (c *initializerConfigurations) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Resource("initializerconfigurations"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *initializerConfigurations) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Resource("initializerconfigurations"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched initializerConfiguration. -func (c *initializerConfigurations) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.InitializerConfiguration, err error) { - result = &v1alpha1.InitializerConfiguration{} - err = c.client.Patch(pt). - Resource("initializerconfigurations"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/admissionregistration_client.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/admissionregistration_client.go index b13ea795..2d93ff02 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/admissionregistration_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/admissionregistration_client.go @@ -20,7 +20,6 @@ package v1beta1 import ( v1beta1 "k8s.io/api/admissionregistration/v1beta1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -76,7 +75,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingwebhookconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingwebhookconfiguration.go index cb015710..4524896c 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingwebhookconfiguration.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingwebhookconfiguration.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "time" + v1beta1 "k8s.io/api/admissionregistration/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -72,10 +74,15 @@ func (c *mutatingWebhookConfigurations) Get(name string, options v1.GetOptions) // List takes label and field selectors, and returns the list of MutatingWebhookConfigurations that match those selectors. func (c *mutatingWebhookConfigurations) List(opts v1.ListOptions) (result *v1beta1.MutatingWebhookConfigurationList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1beta1.MutatingWebhookConfigurationList{} err = c.client.Get(). Resource("mutatingwebhookconfigurations"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -83,10 +90,15 @@ func (c *mutatingWebhookConfigurations) List(opts v1.ListOptions) (result *v1bet // Watch returns a watch.Interface that watches the requested mutatingWebhookConfigurations. func (c *mutatingWebhookConfigurations) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Resource("mutatingwebhookconfigurations"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -125,9 +137,14 @@ func (c *mutatingWebhookConfigurations) Delete(name string, options *v1.DeleteOp // DeleteCollection deletes a collection of objects. func (c *mutatingWebhookConfigurations) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Resource("mutatingwebhookconfigurations"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingwebhookconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingwebhookconfiguration.go index 3a9339f6..7e711b30 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingwebhookconfiguration.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingwebhookconfiguration.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "time" + v1beta1 "k8s.io/api/admissionregistration/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -72,10 +74,15 @@ func (c *validatingWebhookConfigurations) Get(name string, options v1.GetOptions // List takes label and field selectors, and returns the list of ValidatingWebhookConfigurations that match those selectors. func (c *validatingWebhookConfigurations) List(opts v1.ListOptions) (result *v1beta1.ValidatingWebhookConfigurationList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1beta1.ValidatingWebhookConfigurationList{} err = c.client.Get(). Resource("validatingwebhookconfigurations"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -83,10 +90,15 @@ func (c *validatingWebhookConfigurations) List(opts v1.ListOptions) (result *v1b // Watch returns a watch.Interface that watches the requested validatingWebhookConfigurations. func (c *validatingWebhookConfigurations) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Resource("validatingwebhookconfigurations"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -125,9 +137,14 @@ func (c *validatingWebhookConfigurations) Delete(name string, options *v1.Delete // DeleteCollection deletes a collection of objects. func (c *validatingWebhookConfigurations) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Resource("validatingwebhookconfigurations"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/apps_client.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/apps_client.go index da19c759..621c734a 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/apps_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/apps_client.go @@ -20,7 +20,6 @@ package v1 import ( v1 "k8s.io/api/apps/v1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -91,7 +90,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/controllerrevision.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/controllerrevision.go index 1ddaa1a7..e28e4d2a 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/controllerrevision.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/controllerrevision.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "time" + v1 "k8s.io/api/apps/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -75,11 +77,16 @@ func (c *controllerRevisions) Get(name string, options metav1.GetOptions) (resul // List takes label and field selectors, and returns the list of ControllerRevisions that match those selectors. func (c *controllerRevisions) List(opts metav1.ListOptions) (result *v1.ControllerRevisionList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1.ControllerRevisionList{} err = c.client.Get(). Namespace(c.ns). Resource("controllerrevisions"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -87,11 +94,16 @@ func (c *controllerRevisions) List(opts metav1.ListOptions) (result *v1.Controll // Watch returns a watch.Interface that watches the requested controllerRevisions. func (c *controllerRevisions) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("controllerrevisions"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -133,10 +145,15 @@ func (c *controllerRevisions) Delete(name string, options *metav1.DeleteOptions) // DeleteCollection deletes a collection of objects. func (c *controllerRevisions) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("controllerrevisions"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/daemonset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/daemonset.go index 03a87069..a535cdab 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/daemonset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/daemonset.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "time" + v1 "k8s.io/api/apps/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -76,11 +78,16 @@ func (c *daemonSets) Get(name string, options metav1.GetOptions) (result *v1.Dae // List takes label and field selectors, and returns the list of DaemonSets that match those selectors. func (c *daemonSets) List(opts metav1.ListOptions) (result *v1.DaemonSetList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1.DaemonSetList{} err = c.client.Get(). Namespace(c.ns). Resource("daemonsets"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -88,11 +95,16 @@ func (c *daemonSets) List(opts metav1.ListOptions) (result *v1.DaemonSetList, er // Watch returns a watch.Interface that watches the requested daemonSets. func (c *daemonSets) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("daemonsets"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -150,10 +162,15 @@ func (c *daemonSets) Delete(name string, options *metav1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *daemonSets) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("daemonsets"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go index 73d46f8b..f9799a45 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go @@ -19,7 +19,10 @@ limitations under the License. package v1 import ( + "time" + v1 "k8s.io/api/apps/v1" + autoscalingv1 "k8s.io/api/autoscaling/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" @@ -44,6 +47,9 @@ type DeploymentInterface interface { List(opts metav1.ListOptions) (*v1.DeploymentList, error) Watch(opts metav1.ListOptions) (watch.Interface, error) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Deployment, err error) + GetScale(deploymentName string, options metav1.GetOptions) (*autoscalingv1.Scale, error) + UpdateScale(deploymentName string, scale *autoscalingv1.Scale) (*autoscalingv1.Scale, error) + DeploymentExpansion } @@ -76,11 +82,16 @@ func (c *deployments) Get(name string, options metav1.GetOptions) (result *v1.De // List takes label and field selectors, and returns the list of Deployments that match those selectors. func (c *deployments) List(opts metav1.ListOptions) (result *v1.DeploymentList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1.DeploymentList{} err = c.client.Get(). Namespace(c.ns). Resource("deployments"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -88,11 +99,16 @@ func (c *deployments) List(opts metav1.ListOptions) (result *v1.DeploymentList, // Watch returns a watch.Interface that watches the requested deployments. func (c *deployments) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("deployments"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -150,10 +166,15 @@ func (c *deployments) Delete(name string, options *metav1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *deployments) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("deployments"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() @@ -172,3 +193,31 @@ func (c *deployments) Patch(name string, pt types.PatchType, data []byte, subres Into(result) return } + +// GetScale takes name of the deployment, and returns the corresponding autoscalingv1.Scale object, and an error if there is any. +func (c *deployments) GetScale(deploymentName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) { + result = &autoscalingv1.Scale{} + err = c.client.Get(). + Namespace(c.ns). + Resource("deployments"). + Name(deploymentName). + SubResource("scale"). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. +func (c *deployments) UpdateScale(deploymentName string, scale *autoscalingv1.Scale) (result *autoscalingv1.Scale, err error) { + result = &autoscalingv1.Scale{} + err = c.client.Put(). + Namespace(c.ns). + Resource("deployments"). + Name(deploymentName). + SubResource("scale"). + Body(scale). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go index 07794116..ff3504e7 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go @@ -19,7 +19,10 @@ limitations under the License. package v1 import ( + "time" + v1 "k8s.io/api/apps/v1" + autoscalingv1 "k8s.io/api/autoscaling/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" @@ -44,6 +47,9 @@ type ReplicaSetInterface interface { List(opts metav1.ListOptions) (*v1.ReplicaSetList, error) Watch(opts metav1.ListOptions) (watch.Interface, error) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ReplicaSet, err error) + GetScale(replicaSetName string, options metav1.GetOptions) (*autoscalingv1.Scale, error) + UpdateScale(replicaSetName string, scale *autoscalingv1.Scale) (*autoscalingv1.Scale, error) + ReplicaSetExpansion } @@ -76,11 +82,16 @@ func (c *replicaSets) Get(name string, options metav1.GetOptions) (result *v1.Re // List takes label and field selectors, and returns the list of ReplicaSets that match those selectors. func (c *replicaSets) List(opts metav1.ListOptions) (result *v1.ReplicaSetList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1.ReplicaSetList{} err = c.client.Get(). Namespace(c.ns). Resource("replicasets"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -88,11 +99,16 @@ func (c *replicaSets) List(opts metav1.ListOptions) (result *v1.ReplicaSetList, // Watch returns a watch.Interface that watches the requested replicaSets. func (c *replicaSets) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("replicasets"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -150,10 +166,15 @@ func (c *replicaSets) Delete(name string, options *metav1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *replicaSets) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("replicasets"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() @@ -172,3 +193,31 @@ func (c *replicaSets) Patch(name string, pt types.PatchType, data []byte, subres Into(result) return } + +// GetScale takes name of the replicaSet, and returns the corresponding autoscalingv1.Scale object, and an error if there is any. +func (c *replicaSets) GetScale(replicaSetName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) { + result = &autoscalingv1.Scale{} + err = c.client.Get(). + Namespace(c.ns). + Resource("replicasets"). + Name(replicaSetName). + SubResource("scale"). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. +func (c *replicaSets) UpdateScale(replicaSetName string, scale *autoscalingv1.Scale) (result *autoscalingv1.Scale, err error) { + result = &autoscalingv1.Scale{} + err = c.client.Put(). + Namespace(c.ns). + Resource("replicasets"). + Name(replicaSetName). + SubResource("scale"). + Body(scale). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go index 54322d97..c12c470b 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go @@ -19,7 +19,10 @@ limitations under the License. package v1 import ( + "time" + v1 "k8s.io/api/apps/v1" + autoscalingv1 "k8s.io/api/autoscaling/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" @@ -44,6 +47,9 @@ type StatefulSetInterface interface { List(opts metav1.ListOptions) (*v1.StatefulSetList, error) Watch(opts metav1.ListOptions) (watch.Interface, error) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.StatefulSet, err error) + GetScale(statefulSetName string, options metav1.GetOptions) (*autoscalingv1.Scale, error) + UpdateScale(statefulSetName string, scale *autoscalingv1.Scale) (*autoscalingv1.Scale, error) + StatefulSetExpansion } @@ -76,11 +82,16 @@ func (c *statefulSets) Get(name string, options metav1.GetOptions) (result *v1.S // List takes label and field selectors, and returns the list of StatefulSets that match those selectors. func (c *statefulSets) List(opts metav1.ListOptions) (result *v1.StatefulSetList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1.StatefulSetList{} err = c.client.Get(). Namespace(c.ns). Resource("statefulsets"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -88,11 +99,16 @@ func (c *statefulSets) List(opts metav1.ListOptions) (result *v1.StatefulSetList // Watch returns a watch.Interface that watches the requested statefulSets. func (c *statefulSets) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("statefulsets"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -150,10 +166,15 @@ func (c *statefulSets) Delete(name string, options *metav1.DeleteOptions) error // DeleteCollection deletes a collection of objects. func (c *statefulSets) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("statefulsets"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() @@ -172,3 +193,31 @@ func (c *statefulSets) Patch(name string, pt types.PatchType, data []byte, subre Into(result) return } + +// GetScale takes name of the statefulSet, and returns the corresponding autoscalingv1.Scale object, and an error if there is any. +func (c *statefulSets) GetScale(statefulSetName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) { + result = &autoscalingv1.Scale{} + err = c.client.Get(). + Namespace(c.ns). + Resource("statefulsets"). + Name(statefulSetName). + SubResource("scale"). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. +func (c *statefulSets) UpdateScale(statefulSetName string, scale *autoscalingv1.Scale) (result *autoscalingv1.Scale, err error) { + result = &autoscalingv1.Scale{} + err = c.client.Put(). + Namespace(c.ns). + Resource("statefulsets"). + Name(statefulSetName). + SubResource("scale"). + Body(scale). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/apps_client.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/apps_client.go index 4d882e26..e5dd64d9 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/apps_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/apps_client.go @@ -20,7 +20,6 @@ package v1beta1 import ( v1beta1 "k8s.io/api/apps/v1beta1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -29,7 +28,6 @@ type AppsV1beta1Interface interface { RESTClient() rest.Interface ControllerRevisionsGetter DeploymentsGetter - ScalesGetter StatefulSetsGetter } @@ -46,10 +44,6 @@ func (c *AppsV1beta1Client) Deployments(namespace string) DeploymentInterface { return newDeployments(c, namespace) } -func (c *AppsV1beta1Client) Scales(namespace string) ScaleInterface { - return newScales(c, namespace) -} - func (c *AppsV1beta1Client) StatefulSets(namespace string) StatefulSetInterface { return newStatefulSets(c, namespace) } @@ -86,7 +80,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/controllerrevision.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/controllerrevision.go index ec8fa924..45ddb915 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/controllerrevision.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/controllerrevision.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "time" + v1beta1 "k8s.io/api/apps/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -75,11 +77,16 @@ func (c *controllerRevisions) Get(name string, options v1.GetOptions) (result *v // List takes label and field selectors, and returns the list of ControllerRevisions that match those selectors. func (c *controllerRevisions) List(opts v1.ListOptions) (result *v1beta1.ControllerRevisionList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1beta1.ControllerRevisionList{} err = c.client.Get(). Namespace(c.ns). Resource("controllerrevisions"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -87,11 +94,16 @@ func (c *controllerRevisions) List(opts v1.ListOptions) (result *v1beta1.Control // Watch returns a watch.Interface that watches the requested controllerRevisions. func (c *controllerRevisions) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("controllerrevisions"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -133,10 +145,15 @@ func (c *controllerRevisions) Delete(name string, options *v1.DeleteOptions) err // DeleteCollection deletes a collection of objects. func (c *controllerRevisions) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("controllerrevisions"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/deployment.go index 365e06f3..05fdcb7a 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/deployment.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/deployment.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "time" + v1beta1 "k8s.io/api/apps/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -76,11 +78,16 @@ func (c *deployments) Get(name string, options v1.GetOptions) (result *v1beta1.D // List takes label and field selectors, and returns the list of Deployments that match those selectors. func (c *deployments) List(opts v1.ListOptions) (result *v1beta1.DeploymentList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1beta1.DeploymentList{} err = c.client.Get(). Namespace(c.ns). Resource("deployments"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -88,11 +95,16 @@ func (c *deployments) List(opts v1.ListOptions) (result *v1beta1.DeploymentList, // Watch returns a watch.Interface that watches the requested deployments. func (c *deployments) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("deployments"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -150,10 +162,15 @@ func (c *deployments) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *deployments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("deployments"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/generated_expansion.go index b2bfd73a..113455df 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/generated_expansion.go @@ -22,6 +22,4 @@ type ControllerRevisionExpansion interface{} type DeploymentExpansion interface{} -type ScaleExpansion interface{} - type StatefulSetExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/scale.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/scale.go deleted file mode 100644 index cef27bd1..00000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/scale.go +++ /dev/null @@ -1,48 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -import ( - rest "k8s.io/client-go/rest" -) - -// ScalesGetter has a method to return a ScaleInterface. -// A group's client should implement this interface. -type ScalesGetter interface { - Scales(namespace string) ScaleInterface -} - -// ScaleInterface has methods to work with Scale resources. -type ScaleInterface interface { - ScaleExpansion -} - -// scales implements ScaleInterface -type scales struct { - client rest.Interface - ns string -} - -// newScales returns a Scales -func newScales(c *AppsV1beta1Client, namespace string) *scales { - return &scales{ - client: c.RESTClient(), - ns: namespace, - } -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/statefulset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/statefulset.go index 65174545..c4b35b42 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/statefulset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/statefulset.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "time" + v1beta1 "k8s.io/api/apps/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -76,11 +78,16 @@ func (c *statefulSets) Get(name string, options v1.GetOptions) (result *v1beta1. // List takes label and field selectors, and returns the list of StatefulSets that match those selectors. func (c *statefulSets) List(opts v1.ListOptions) (result *v1beta1.StatefulSetList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1beta1.StatefulSetList{} err = c.client.Get(). Namespace(c.ns). Resource("statefulsets"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -88,11 +95,16 @@ func (c *statefulSets) List(opts v1.ListOptions) (result *v1beta1.StatefulSetLis // Watch returns a watch.Interface that watches the requested statefulSets. func (c *statefulSets) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("statefulsets"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -150,10 +162,15 @@ func (c *statefulSets) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *statefulSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("statefulsets"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/apps_client.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/apps_client.go index 27549499..7ca4e0b2 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/apps_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/apps_client.go @@ -20,7 +20,6 @@ package v1beta2 import ( v1beta2 "k8s.io/api/apps/v1beta2" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -31,7 +30,6 @@ type AppsV1beta2Interface interface { DaemonSetsGetter DeploymentsGetter ReplicaSetsGetter - ScalesGetter StatefulSetsGetter } @@ -56,10 +54,6 @@ func (c *AppsV1beta2Client) ReplicaSets(namespace string) ReplicaSetInterface { return newReplicaSets(c, namespace) } -func (c *AppsV1beta2Client) Scales(namespace string) ScaleInterface { - return newScales(c, namespace) -} - func (c *AppsV1beta2Client) StatefulSets(namespace string) StatefulSetInterface { return newStatefulSets(c, namespace) } @@ -96,7 +90,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1beta2.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/controllerrevision.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/controllerrevision.go index 1271cc62..e1d60251 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/controllerrevision.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/controllerrevision.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta2 import ( + "time" + v1beta2 "k8s.io/api/apps/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -75,11 +77,16 @@ func (c *controllerRevisions) Get(name string, options v1.GetOptions) (result *v // List takes label and field selectors, and returns the list of ControllerRevisions that match those selectors. func (c *controllerRevisions) List(opts v1.ListOptions) (result *v1beta2.ControllerRevisionList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1beta2.ControllerRevisionList{} err = c.client.Get(). Namespace(c.ns). Resource("controllerrevisions"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -87,11 +94,16 @@ func (c *controllerRevisions) List(opts v1.ListOptions) (result *v1beta2.Control // Watch returns a watch.Interface that watches the requested controllerRevisions. func (c *controllerRevisions) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("controllerrevisions"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -133,10 +145,15 @@ func (c *controllerRevisions) Delete(name string, options *v1.DeleteOptions) err // DeleteCollection deletes a collection of objects. func (c *controllerRevisions) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("controllerrevisions"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/daemonset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/daemonset.go index 683c0681..f8b7ac25 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/daemonset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/daemonset.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta2 import ( + "time" + v1beta2 "k8s.io/api/apps/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -76,11 +78,16 @@ func (c *daemonSets) Get(name string, options v1.GetOptions) (result *v1beta2.Da // List takes label and field selectors, and returns the list of DaemonSets that match those selectors. func (c *daemonSets) List(opts v1.ListOptions) (result *v1beta2.DaemonSetList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1beta2.DaemonSetList{} err = c.client.Get(). Namespace(c.ns). Resource("daemonsets"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -88,11 +95,16 @@ func (c *daemonSets) List(opts v1.ListOptions) (result *v1beta2.DaemonSetList, e // Watch returns a watch.Interface that watches the requested daemonSets. func (c *daemonSets) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("daemonsets"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -150,10 +162,15 @@ func (c *daemonSets) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *daemonSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("daemonsets"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/deployment.go index 9a04513f..510250b0 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/deployment.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/deployment.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta2 import ( + "time" + v1beta2 "k8s.io/api/apps/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -76,11 +78,16 @@ func (c *deployments) Get(name string, options v1.GetOptions) (result *v1beta2.D // List takes label and field selectors, and returns the list of Deployments that match those selectors. func (c *deployments) List(opts v1.ListOptions) (result *v1beta2.DeploymentList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1beta2.DeploymentList{} err = c.client.Get(). Namespace(c.ns). Resource("deployments"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -88,11 +95,16 @@ func (c *deployments) List(opts v1.ListOptions) (result *v1beta2.DeploymentList, // Watch returns a watch.Interface that watches the requested deployments. func (c *deployments) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("deployments"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -150,10 +162,15 @@ func (c *deployments) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *deployments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("deployments"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/generated_expansion.go index bceae598..6a217496 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/generated_expansion.go @@ -26,6 +26,4 @@ type DeploymentExpansion interface{} type ReplicaSetExpansion interface{} -type ScaleExpansion interface{} - type StatefulSetExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/replicaset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/replicaset.go index 9fd9de93..7b738774 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/replicaset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/replicaset.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta2 import ( + "time" + v1beta2 "k8s.io/api/apps/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -76,11 +78,16 @@ func (c *replicaSets) Get(name string, options v1.GetOptions) (result *v1beta2.R // List takes label and field selectors, and returns the list of ReplicaSets that match those selectors. func (c *replicaSets) List(opts v1.ListOptions) (result *v1beta2.ReplicaSetList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1beta2.ReplicaSetList{} err = c.client.Get(). Namespace(c.ns). Resource("replicasets"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -88,11 +95,16 @@ func (c *replicaSets) List(opts v1.ListOptions) (result *v1beta2.ReplicaSetList, // Watch returns a watch.Interface that watches the requested replicaSets. func (c *replicaSets) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("replicasets"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -150,10 +162,15 @@ func (c *replicaSets) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *replicaSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("replicasets"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/scale.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/scale.go deleted file mode 100644 index f8d6a7fb..00000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/scale.go +++ /dev/null @@ -1,48 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta2 - -import ( - rest "k8s.io/client-go/rest" -) - -// ScalesGetter has a method to return a ScaleInterface. -// A group's client should implement this interface. -type ScalesGetter interface { - Scales(namespace string) ScaleInterface -} - -// ScaleInterface has methods to work with Scale resources. -type ScaleInterface interface { - ScaleExpansion -} - -// scales implements ScaleInterface -type scales struct { - client rest.Interface - ns string -} - -// newScales returns a Scales -func newScales(c *AppsV1beta2Client, namespace string) *scales { - return &scales{ - client: c.RESTClient(), - ns: namespace, - } -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/statefulset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/statefulset.go index 095601e1..de7c3db8 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/statefulset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/statefulset.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta2 import ( + "time" + v1beta2 "k8s.io/api/apps/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -79,11 +81,16 @@ func (c *statefulSets) Get(name string, options v1.GetOptions) (result *v1beta2. // List takes label and field selectors, and returns the list of StatefulSets that match those selectors. func (c *statefulSets) List(opts v1.ListOptions) (result *v1beta2.StatefulSetList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1beta2.StatefulSetList{} err = c.client.Get(). Namespace(c.ns). Resource("statefulsets"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -91,11 +98,16 @@ func (c *statefulSets) List(opts v1.ListOptions) (result *v1beta2.StatefulSetLis // Watch returns a watch.Interface that watches the requested statefulSets. func (c *statefulSets) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("statefulsets"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -153,10 +165,15 @@ func (c *statefulSets) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *statefulSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("statefulsets"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/admissionregistration_client.go b/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/auditregistration_client.go similarity index 54% rename from vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/admissionregistration_client.go rename to vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/auditregistration_client.go index 5e02f722..ec63179e 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/admissionregistration_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/auditregistration_client.go @@ -19,28 +19,27 @@ limitations under the License. package v1alpha1 import ( - v1alpha1 "k8s.io/api/admissionregistration/v1alpha1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" + v1alpha1 "k8s.io/api/auditregistration/v1alpha1" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) -type AdmissionregistrationV1alpha1Interface interface { +type AuditregistrationV1alpha1Interface interface { RESTClient() rest.Interface - InitializerConfigurationsGetter + AuditSinksGetter } -// AdmissionregistrationV1alpha1Client is used to interact with features provided by the admissionregistration.k8s.io group. -type AdmissionregistrationV1alpha1Client struct { +// AuditregistrationV1alpha1Client is used to interact with features provided by the auditregistration.k8s.io group. +type AuditregistrationV1alpha1Client struct { restClient rest.Interface } -func (c *AdmissionregistrationV1alpha1Client) InitializerConfigurations() InitializerConfigurationInterface { - return newInitializerConfigurations(c) +func (c *AuditregistrationV1alpha1Client) AuditSinks() AuditSinkInterface { + return newAuditSinks(c) } -// NewForConfig creates a new AdmissionregistrationV1alpha1Client for the given config. -func NewForConfig(c *rest.Config) (*AdmissionregistrationV1alpha1Client, error) { +// NewForConfig creates a new AuditregistrationV1alpha1Client for the given config. +func NewForConfig(c *rest.Config) (*AuditregistrationV1alpha1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err @@ -49,12 +48,12 @@ func NewForConfig(c *rest.Config) (*AdmissionregistrationV1alpha1Client, error) if err != nil { return nil, err } - return &AdmissionregistrationV1alpha1Client{client}, nil + return &AuditregistrationV1alpha1Client{client}, nil } -// NewForConfigOrDie creates a new AdmissionregistrationV1alpha1Client for the given config and +// NewForConfigOrDie creates a new AuditregistrationV1alpha1Client for the given config and // panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *AdmissionregistrationV1alpha1Client { +func NewForConfigOrDie(c *rest.Config) *AuditregistrationV1alpha1Client { client, err := NewForConfig(c) if err != nil { panic(err) @@ -62,16 +61,16 @@ func NewForConfigOrDie(c *rest.Config) *AdmissionregistrationV1alpha1Client { return client } -// New creates a new AdmissionregistrationV1alpha1Client for the given RESTClient. -func New(c rest.Interface) *AdmissionregistrationV1alpha1Client { - return &AdmissionregistrationV1alpha1Client{c} +// New creates a new AuditregistrationV1alpha1Client for the given RESTClient. +func New(c rest.Interface) *AuditregistrationV1alpha1Client { + return &AuditregistrationV1alpha1Client{c} } func setConfigDefaults(config *rest.Config) error { gv := v1alpha1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() @@ -82,7 +81,7 @@ func setConfigDefaults(config *rest.Config) error { // RESTClient returns a RESTClient that is used to communicate // with API server by this client implementation. -func (c *AdmissionregistrationV1alpha1Client) RESTClient() rest.Interface { +func (c *AuditregistrationV1alpha1Client) RESTClient() rest.Interface { if c == nil { return nil } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/auditsink.go b/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/auditsink.go new file mode 100644 index 00000000..414d4800 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/auditsink.go @@ -0,0 +1,164 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "time" + + v1alpha1 "k8s.io/api/auditregistration/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// AuditSinksGetter has a method to return a AuditSinkInterface. +// A group's client should implement this interface. +type AuditSinksGetter interface { + AuditSinks() AuditSinkInterface +} + +// AuditSinkInterface has methods to work with AuditSink resources. +type AuditSinkInterface interface { + Create(*v1alpha1.AuditSink) (*v1alpha1.AuditSink, error) + Update(*v1alpha1.AuditSink) (*v1alpha1.AuditSink, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha1.AuditSink, error) + List(opts v1.ListOptions) (*v1alpha1.AuditSinkList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.AuditSink, err error) + AuditSinkExpansion +} + +// auditSinks implements AuditSinkInterface +type auditSinks struct { + client rest.Interface +} + +// newAuditSinks returns a AuditSinks +func newAuditSinks(c *AuditregistrationV1alpha1Client) *auditSinks { + return &auditSinks{ + client: c.RESTClient(), + } +} + +// Get takes name of the auditSink, and returns the corresponding auditSink object, and an error if there is any. +func (c *auditSinks) Get(name string, options v1.GetOptions) (result *v1alpha1.AuditSink, err error) { + result = &v1alpha1.AuditSink{} + err = c.client.Get(). + Resource("auditsinks"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of AuditSinks that match those selectors. +func (c *auditSinks) List(opts v1.ListOptions) (result *v1alpha1.AuditSinkList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.AuditSinkList{} + err = c.client.Get(). + Resource("auditsinks"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested auditSinks. +func (c *auditSinks) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("auditsinks"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a auditSink and creates it. Returns the server's representation of the auditSink, and an error, if there is any. +func (c *auditSinks) Create(auditSink *v1alpha1.AuditSink) (result *v1alpha1.AuditSink, err error) { + result = &v1alpha1.AuditSink{} + err = c.client.Post(). + Resource("auditsinks"). + Body(auditSink). + Do(). + Into(result) + return +} + +// Update takes the representation of a auditSink and updates it. Returns the server's representation of the auditSink, and an error, if there is any. +func (c *auditSinks) Update(auditSink *v1alpha1.AuditSink) (result *v1alpha1.AuditSink, err error) { + result = &v1alpha1.AuditSink{} + err = c.client.Put(). + Resource("auditsinks"). + Name(auditSink.Name). + Body(auditSink). + Do(). + Into(result) + return +} + +// Delete takes name of the auditSink and deletes it. Returns an error if one occurs. +func (c *auditSinks) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("auditsinks"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *auditSinks) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("auditsinks"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched auditSink. +func (c *auditSinks) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.AuditSink, err error) { + result = &v1alpha1.AuditSink{} + err = c.client.Patch(pt). + Resource("auditsinks"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/doc.go similarity index 100% rename from vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/doc.go rename to vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/doc.go diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/generated_expansion.go similarity index 92% rename from vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/generated_expansion.go rename to vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/generated_expansion.go index 1e29b96f..f0f51172 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/generated_expansion.go @@ -18,4 +18,4 @@ limitations under the License. package v1alpha1 -type InitializerConfigurationExpansion interface{} +type AuditSinkExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/authentication_client.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/authentication_client.go index 3bdcee59..de8864e2 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/authentication_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/authentication_client.go @@ -20,7 +20,6 @@ package v1 import ( v1 "k8s.io/api/authentication/v1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -71,7 +70,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview_expansion.go index ea21f1b4..8a21b7c7 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview_expansion.go @@ -17,16 +17,24 @@ limitations under the License. package v1 import ( + "context" + authenticationapi "k8s.io/api/authentication/v1" ) type TokenReviewExpansion interface { Create(tokenReview *authenticationapi.TokenReview) (result *authenticationapi.TokenReview, err error) + CreateContext(ctx context.Context, tokenReview *authenticationapi.TokenReview) (result *authenticationapi.TokenReview, err error) } func (c *tokenReviews) Create(tokenReview *authenticationapi.TokenReview) (result *authenticationapi.TokenReview, err error) { + return c.CreateContext(context.Background(), tokenReview) +} + +func (c *tokenReviews) CreateContext(ctx context.Context, tokenReview *authenticationapi.TokenReview) (result *authenticationapi.TokenReview, err error) { result = &authenticationapi.TokenReview{} err = c.client.Post(). + Context(ctx). Resource("tokenreviews"). Body(tokenReview). Do(). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/authentication_client.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/authentication_client.go index 7f3334a0..816bd0a2 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/authentication_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/authentication_client.go @@ -20,7 +20,6 @@ package v1beta1 import ( v1beta1 "k8s.io/api/authentication/v1beta1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -71,7 +70,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview_expansion.go index 8f186fa7..0476b173 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview_expansion.go @@ -17,16 +17,24 @@ limitations under the License. package v1beta1 import ( + "context" + authenticationapi "k8s.io/api/authentication/v1beta1" ) type TokenReviewExpansion interface { Create(tokenReview *authenticationapi.TokenReview) (result *authenticationapi.TokenReview, err error) + CreateContext(ctx context.Context, tokenReview *authenticationapi.TokenReview) (result *authenticationapi.TokenReview, err error) } func (c *tokenReviews) Create(tokenReview *authenticationapi.TokenReview) (result *authenticationapi.TokenReview, err error) { + return c.CreateContext(context.Background(), tokenReview) +} + +func (c *tokenReviews) CreateContext(ctx context.Context, tokenReview *authenticationapi.TokenReview) (result *authenticationapi.TokenReview, err error) { result = &authenticationapi.TokenReview{} err = c.client.Post(). + Context(ctx). Resource("tokenreviews"). Body(tokenReview). Do(). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/authorization_client.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/authorization_client.go index e84b9008..2cc22632 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/authorization_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/authorization_client.go @@ -20,7 +20,6 @@ package v1 import ( v1 "k8s.io/api/authorization/v1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -86,7 +85,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview_expansion.go index 0c123b07..9836308b 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview_expansion.go @@ -17,16 +17,24 @@ limitations under the License. package v1 import ( + "context" + authorizationapi "k8s.io/api/authorization/v1" ) type LocalSubjectAccessReviewExpansion interface { Create(sar *authorizationapi.LocalSubjectAccessReview) (result *authorizationapi.LocalSubjectAccessReview, err error) + CreateContext(ctx context.Context, sar *authorizationapi.LocalSubjectAccessReview) (result *authorizationapi.LocalSubjectAccessReview, err error) } func (c *localSubjectAccessReviews) Create(sar *authorizationapi.LocalSubjectAccessReview) (result *authorizationapi.LocalSubjectAccessReview, err error) { + return c.CreateContext(context.Background(), sar) +} + +func (c *localSubjectAccessReviews) CreateContext(ctx context.Context, sar *authorizationapi.LocalSubjectAccessReview) (result *authorizationapi.LocalSubjectAccessReview, err error) { result = &authorizationapi.LocalSubjectAccessReview{} err = c.client.Post(). + Context(ctx). Namespace(c.ns). Resource("localsubjectaccessreviews"). Body(sar). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview_expansion.go index 5b70a27d..916e5b43 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview_expansion.go @@ -17,16 +17,24 @@ limitations under the License. package v1 import ( + "context" + authorizationapi "k8s.io/api/authorization/v1" ) type SelfSubjectAccessReviewExpansion interface { Create(sar *authorizationapi.SelfSubjectAccessReview) (result *authorizationapi.SelfSubjectAccessReview, err error) + CreateContext(ctx context.Context, sar *authorizationapi.SelfSubjectAccessReview) (result *authorizationapi.SelfSubjectAccessReview, err error) } func (c *selfSubjectAccessReviews) Create(sar *authorizationapi.SelfSubjectAccessReview) (result *authorizationapi.SelfSubjectAccessReview, err error) { + return c.CreateContext(context.Background(), sar) +} + +func (c *selfSubjectAccessReviews) CreateContext(ctx context.Context, sar *authorizationapi.SelfSubjectAccessReview) (result *authorizationapi.SelfSubjectAccessReview, err error) { result = &authorizationapi.SelfSubjectAccessReview{} err = c.client.Post(). + Context(ctx). Resource("selfsubjectaccessreviews"). Body(sar). Do(). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview_expansion.go index e2cad880..365282ed 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview_expansion.go @@ -17,16 +17,24 @@ limitations under the License. package v1 import ( + "context" + authorizationapi "k8s.io/api/authorization/v1" ) type SelfSubjectRulesReviewExpansion interface { Create(srr *authorizationapi.SelfSubjectRulesReview) (result *authorizationapi.SelfSubjectRulesReview, err error) + CreateContext(ctx context.Context, srr *authorizationapi.SelfSubjectRulesReview) (result *authorizationapi.SelfSubjectRulesReview, err error) } func (c *selfSubjectRulesReviews) Create(srr *authorizationapi.SelfSubjectRulesReview) (result *authorizationapi.SelfSubjectRulesReview, err error) { + return c.CreateContext(context.Background(), srr) +} + +func (c *selfSubjectRulesReviews) CreateContext(ctx context.Context, srr *authorizationapi.SelfSubjectRulesReview) (result *authorizationapi.SelfSubjectRulesReview, err error) { result = &authorizationapi.SelfSubjectRulesReview{} err = c.client.Post(). + Context(ctx). Resource("selfsubjectrulesreviews"). Body(srr). Do(). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview_expansion.go index b5ed87d3..927544f1 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview_expansion.go @@ -17,17 +17,25 @@ limitations under the License. package v1 import ( + "context" + authorizationapi "k8s.io/api/authorization/v1" ) // The SubjectAccessReviewExpansion interface allows manually adding extra methods to the AuthorizationInterface. type SubjectAccessReviewExpansion interface { Create(sar *authorizationapi.SubjectAccessReview) (result *authorizationapi.SubjectAccessReview, err error) + CreateContext(ctx context.Context, sar *authorizationapi.SubjectAccessReview) (result *authorizationapi.SubjectAccessReview, err error) } func (c *subjectAccessReviews) Create(sar *authorizationapi.SubjectAccessReview) (result *authorizationapi.SubjectAccessReview, err error) { + return c.CreateContext(context.Background(), sar) +} + +func (c *subjectAccessReviews) CreateContext(ctx context.Context, sar *authorizationapi.SubjectAccessReview) (result *authorizationapi.SubjectAccessReview, err error) { result = &authorizationapi.SubjectAccessReview{} err = c.client.Post(). + Context(ctx). Resource("subjectaccessreviews"). Body(sar). Do(). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/authorization_client.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/authorization_client.go index 7f236f6e..88eac75b 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/authorization_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/authorization_client.go @@ -20,7 +20,6 @@ package v1beta1 import ( v1beta1 "k8s.io/api/authorization/v1beta1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -86,7 +85,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview_expansion.go index bf1b8a5f..148cf628 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview_expansion.go @@ -17,16 +17,24 @@ limitations under the License. package v1beta1 import ( + "context" + authorizationapi "k8s.io/api/authorization/v1beta1" ) type LocalSubjectAccessReviewExpansion interface { Create(sar *authorizationapi.LocalSubjectAccessReview) (result *authorizationapi.LocalSubjectAccessReview, err error) + CreateContext(ctx context.Context, sar *authorizationapi.LocalSubjectAccessReview) (result *authorizationapi.LocalSubjectAccessReview, err error) } func (c *localSubjectAccessReviews) Create(sar *authorizationapi.LocalSubjectAccessReview) (result *authorizationapi.LocalSubjectAccessReview, err error) { + return c.CreateContext(context.Background(), sar) +} + +func (c *localSubjectAccessReviews) CreateContext(ctx context.Context, sar *authorizationapi.LocalSubjectAccessReview) (result *authorizationapi.LocalSubjectAccessReview, err error) { result = &authorizationapi.LocalSubjectAccessReview{} err = c.client.Post(). + Context(ctx). Namespace(c.ns). Resource("localsubjectaccessreviews"). Body(sar). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview_expansion.go index 58fecfd8..6edead0e 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview_expansion.go @@ -17,16 +17,24 @@ limitations under the License. package v1beta1 import ( + "context" + authorizationapi "k8s.io/api/authorization/v1beta1" ) type SelfSubjectAccessReviewExpansion interface { Create(sar *authorizationapi.SelfSubjectAccessReview) (result *authorizationapi.SelfSubjectAccessReview, err error) + CreateContext(ctx context.Context, sar *authorizationapi.SelfSubjectAccessReview) (result *authorizationapi.SelfSubjectAccessReview, err error) } func (c *selfSubjectAccessReviews) Create(sar *authorizationapi.SelfSubjectAccessReview) (result *authorizationapi.SelfSubjectAccessReview, err error) { + return c.CreateContext(context.Background(), sar) +} + +func (c *selfSubjectAccessReviews) CreateContext(ctx context.Context, sar *authorizationapi.SelfSubjectAccessReview) (result *authorizationapi.SelfSubjectAccessReview, err error) { result = &authorizationapi.SelfSubjectAccessReview{} err = c.client.Post(). + Context(ctx). Resource("selfsubjectaccessreviews"). Body(sar). Do(). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview_expansion.go index 5f1f37ef..a459d5c3 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview_expansion.go @@ -17,16 +17,24 @@ limitations under the License. package v1beta1 import ( + "context" + authorizationapi "k8s.io/api/authorization/v1beta1" ) type SelfSubjectRulesReviewExpansion interface { Create(srr *authorizationapi.SelfSubjectRulesReview) (result *authorizationapi.SelfSubjectRulesReview, err error) + CreateContext(ctx context.Context, srr *authorizationapi.SelfSubjectRulesReview) (result *authorizationapi.SelfSubjectRulesReview, err error) } func (c *selfSubjectRulesReviews) Create(srr *authorizationapi.SelfSubjectRulesReview) (result *authorizationapi.SelfSubjectRulesReview, err error) { + return c.CreateContext(context.Background(), srr) +} + +func (c *selfSubjectRulesReviews) CreateContext(ctx context.Context, srr *authorizationapi.SelfSubjectRulesReview) (result *authorizationapi.SelfSubjectRulesReview, err error) { result = &authorizationapi.SelfSubjectRulesReview{} err = c.client.Post(). + Context(ctx). Resource("selfsubjectrulesreviews"). Body(srr). Do(). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview_expansion.go index 4f93689e..7072e29c 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview_expansion.go @@ -17,17 +17,25 @@ limitations under the License. package v1beta1 import ( + "context" + authorizationapi "k8s.io/api/authorization/v1beta1" ) // The SubjectAccessReviewExpansion interface allows manually adding extra methods to the AuthorizationInterface. type SubjectAccessReviewExpansion interface { Create(sar *authorizationapi.SubjectAccessReview) (result *authorizationapi.SubjectAccessReview, err error) + CreateContext(ctx context.Context, sar *authorizationapi.SubjectAccessReview) (result *authorizationapi.SubjectAccessReview, err error) } func (c *subjectAccessReviews) Create(sar *authorizationapi.SubjectAccessReview) (result *authorizationapi.SubjectAccessReview, err error) { + return c.CreateContext(context.Background(), sar) +} + +func (c *subjectAccessReviews) CreateContext(ctx context.Context, sar *authorizationapi.SubjectAccessReview) (result *authorizationapi.SubjectAccessReview, err error) { result = &authorizationapi.SubjectAccessReview{} err = c.client.Post(). + Context(ctx). Resource("subjectaccessreviews"). Body(sar). Do(). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/autoscaling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/autoscaling_client.go index 2bd49e2d..4f3e96ae 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/autoscaling_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/autoscaling_client.go @@ -20,7 +20,6 @@ package v1 import ( v1 "k8s.io/api/autoscaling/v1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -71,7 +70,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/horizontalpodautoscaler.go index 6891b6b6..0e0839fb 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/horizontalpodautoscaler.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/horizontalpodautoscaler.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "time" + v1 "k8s.io/api/autoscaling/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -76,11 +78,16 @@ func (c *horizontalPodAutoscalers) Get(name string, options metav1.GetOptions) ( // List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors. func (c *horizontalPodAutoscalers) List(opts metav1.ListOptions) (result *v1.HorizontalPodAutoscalerList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1.HorizontalPodAutoscalerList{} err = c.client.Get(). Namespace(c.ns). Resource("horizontalpodautoscalers"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -88,11 +95,16 @@ func (c *horizontalPodAutoscalers) List(opts metav1.ListOptions) (result *v1.Hor // Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers. func (c *horizontalPodAutoscalers) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("horizontalpodautoscalers"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -150,10 +162,15 @@ func (c *horizontalPodAutoscalers) Delete(name string, options *metav1.DeleteOpt // DeleteCollection deletes a collection of objects. func (c *horizontalPodAutoscalers) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("horizontalpodautoscalers"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/autoscaling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/autoscaling_client.go index 3a49b26b..c1a91fc3 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/autoscaling_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/autoscaling_client.go @@ -20,7 +20,6 @@ package v2beta1 import ( v2beta1 "k8s.io/api/autoscaling/v2beta1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -71,7 +70,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v2beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/horizontalpodautoscaler.go index 4ac8cce7..02d5cfb9 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/horizontalpodautoscaler.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/horizontalpodautoscaler.go @@ -19,6 +19,8 @@ limitations under the License. package v2beta1 import ( + "time" + v2beta1 "k8s.io/api/autoscaling/v2beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -76,11 +78,16 @@ func (c *horizontalPodAutoscalers) Get(name string, options v1.GetOptions) (resu // List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors. func (c *horizontalPodAutoscalers) List(opts v1.ListOptions) (result *v2beta1.HorizontalPodAutoscalerList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v2beta1.HorizontalPodAutoscalerList{} err = c.client.Get(). Namespace(c.ns). Resource("horizontalpodautoscalers"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -88,11 +95,16 @@ func (c *horizontalPodAutoscalers) List(opts v1.ListOptions) (result *v2beta1.Ho // Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers. func (c *horizontalPodAutoscalers) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("horizontalpodautoscalers"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -150,10 +162,15 @@ func (c *horizontalPodAutoscalers) Delete(name string, options *v1.DeleteOptions // DeleteCollection deletes a collection of objects. func (c *horizontalPodAutoscalers) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("horizontalpodautoscalers"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/autoscaling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/autoscaling_client.go index 03fe25e4..bd2b3927 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/autoscaling_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/autoscaling_client.go @@ -20,7 +20,6 @@ package v2beta2 import ( v2beta2 "k8s.io/api/autoscaling/v2beta2" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -71,7 +70,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v2beta2.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/horizontalpodautoscaler.go index ddabda7e..91a0fa64 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/horizontalpodautoscaler.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/horizontalpodautoscaler.go @@ -19,6 +19,8 @@ limitations under the License. package v2beta2 import ( + "time" + v2beta2 "k8s.io/api/autoscaling/v2beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -76,11 +78,16 @@ func (c *horizontalPodAutoscalers) Get(name string, options v1.GetOptions) (resu // List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors. func (c *horizontalPodAutoscalers) List(opts v1.ListOptions) (result *v2beta2.HorizontalPodAutoscalerList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v2beta2.HorizontalPodAutoscalerList{} err = c.client.Get(). Namespace(c.ns). Resource("horizontalpodautoscalers"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -88,11 +95,16 @@ func (c *horizontalPodAutoscalers) List(opts v1.ListOptions) (result *v2beta2.Ho // Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers. func (c *horizontalPodAutoscalers) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("horizontalpodautoscalers"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -150,10 +162,15 @@ func (c *horizontalPodAutoscalers) Delete(name string, options *v1.DeleteOptions // DeleteCollection deletes a collection of objects. func (c *horizontalPodAutoscalers) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("horizontalpodautoscalers"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/batch_client.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/batch_client.go index d5e35e6b..8dfc118a 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/batch_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/batch_client.go @@ -20,7 +20,6 @@ package v1 import ( v1 "k8s.io/api/batch/v1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -71,7 +70,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/job.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/job.go index ba8332a9..b55c602b 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/job.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/job.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "time" + v1 "k8s.io/api/batch/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -76,11 +78,16 @@ func (c *jobs) Get(name string, options metav1.GetOptions) (result *v1.Job, err // List takes label and field selectors, and returns the list of Jobs that match those selectors. func (c *jobs) List(opts metav1.ListOptions) (result *v1.JobList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1.JobList{} err = c.client.Get(). Namespace(c.ns). Resource("jobs"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -88,11 +95,16 @@ func (c *jobs) List(opts metav1.ListOptions) (result *v1.JobList, err error) { // Watch returns a watch.Interface that watches the requested jobs. func (c *jobs) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("jobs"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -150,10 +162,15 @@ func (c *jobs) Delete(name string, options *metav1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *jobs) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("jobs"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/batch_client.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/batch_client.go index aa71ca83..25708535 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/batch_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/batch_client.go @@ -20,7 +20,6 @@ package v1beta1 import ( v1beta1 "k8s.io/api/batch/v1beta1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -71,7 +70,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/cronjob.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/cronjob.go index 04637c36..d89d2fa2 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/cronjob.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/cronjob.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "time" + v1beta1 "k8s.io/api/batch/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -76,11 +78,16 @@ func (c *cronJobs) Get(name string, options v1.GetOptions) (result *v1beta1.Cron // List takes label and field selectors, and returns the list of CronJobs that match those selectors. func (c *cronJobs) List(opts v1.ListOptions) (result *v1beta1.CronJobList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1beta1.CronJobList{} err = c.client.Get(). Namespace(c.ns). Resource("cronjobs"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -88,11 +95,16 @@ func (c *cronJobs) List(opts v1.ListOptions) (result *v1beta1.CronJobList, err e // Watch returns a watch.Interface that watches the requested cronJobs. func (c *cronJobs) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("cronjobs"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -150,10 +162,15 @@ func (c *cronJobs) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *cronJobs) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("cronjobs"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/batch_client.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/batch_client.go index e6c6306b..d45c19d5 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/batch_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/batch_client.go @@ -20,7 +20,6 @@ package v2alpha1 import ( v2alpha1 "k8s.io/api/batch/v2alpha1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -71,7 +70,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v2alpha1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/cronjob.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/cronjob.go index 4d922f9a..19123b60 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/cronjob.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/cronjob.go @@ -19,6 +19,8 @@ limitations under the License. package v2alpha1 import ( + "time" + v2alpha1 "k8s.io/api/batch/v2alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -76,11 +78,16 @@ func (c *cronJobs) Get(name string, options v1.GetOptions) (result *v2alpha1.Cro // List takes label and field selectors, and returns the list of CronJobs that match those selectors. func (c *cronJobs) List(opts v1.ListOptions) (result *v2alpha1.CronJobList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v2alpha1.CronJobList{} err = c.client.Get(). Namespace(c.ns). Resource("cronjobs"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -88,11 +95,16 @@ func (c *cronJobs) List(opts v1.ListOptions) (result *v2alpha1.CronJobList, err // Watch returns a watch.Interface that watches the requested cronJobs. func (c *cronJobs) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("cronjobs"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -150,10 +162,15 @@ func (c *cronJobs) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *cronJobs) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("cronjobs"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificates_client.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificates_client.go index baac42ee..1c52d551 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificates_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificates_client.go @@ -20,7 +20,6 @@ package v1beta1 import ( v1beta1 "k8s.io/api/certificates/v1beta1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -71,7 +70,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest.go index b39169a8..712d3a01 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "time" + v1beta1 "k8s.io/api/certificates/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -73,10 +75,15 @@ func (c *certificateSigningRequests) Get(name string, options v1.GetOptions) (re // List takes label and field selectors, and returns the list of CertificateSigningRequests that match those selectors. func (c *certificateSigningRequests) List(opts v1.ListOptions) (result *v1beta1.CertificateSigningRequestList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1beta1.CertificateSigningRequestList{} err = c.client.Get(). Resource("certificatesigningrequests"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -84,10 +91,15 @@ func (c *certificateSigningRequests) List(opts v1.ListOptions) (result *v1beta1. // Watch returns a watch.Interface that watches the requested certificateSigningRequests. func (c *certificateSigningRequests) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Resource("certificatesigningrequests"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -141,9 +153,14 @@ func (c *certificateSigningRequests) Delete(name string, options *v1.DeleteOptio // DeleteCollection deletes a collection of objects. func (c *certificateSigningRequests) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Resource("certificatesigningrequests"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/coordination_client.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/coordination_client.go new file mode 100644 index 00000000..0df7b71b --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/coordination_client.go @@ -0,0 +1,89 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/coordination/v1" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type CoordinationV1Interface interface { + RESTClient() rest.Interface + LeasesGetter +} + +// CoordinationV1Client is used to interact with features provided by the coordination.k8s.io group. +type CoordinationV1Client struct { + restClient rest.Interface +} + +func (c *CoordinationV1Client) Leases(namespace string) LeaseInterface { + return newLeases(c, namespace) +} + +// NewForConfig creates a new CoordinationV1Client for the given config. +func NewForConfig(c *rest.Config) (*CoordinationV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &CoordinationV1Client{client}, nil +} + +// NewForConfigOrDie creates a new CoordinationV1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *CoordinationV1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new CoordinationV1Client for the given RESTClient. +func New(c rest.Interface) *CoordinationV1Client { + return &CoordinationV1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *CoordinationV1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/doc.go new file mode 100644 index 00000000..3af5d054 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/generated_expansion.go new file mode 100644 index 00000000..ab24f373 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/generated_expansion.go @@ -0,0 +1,21 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +type LeaseExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/lease.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/lease.go new file mode 100644 index 00000000..b6cf1b64 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/lease.go @@ -0,0 +1,174 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "time" + + v1 "k8s.io/api/coordination/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// LeasesGetter has a method to return a LeaseInterface. +// A group's client should implement this interface. +type LeasesGetter interface { + Leases(namespace string) LeaseInterface +} + +// LeaseInterface has methods to work with Lease resources. +type LeaseInterface interface { + Create(*v1.Lease) (*v1.Lease, error) + Update(*v1.Lease) (*v1.Lease, error) + Delete(name string, options *metav1.DeleteOptions) error + DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error + Get(name string, options metav1.GetOptions) (*v1.Lease, error) + List(opts metav1.ListOptions) (*v1.LeaseList, error) + Watch(opts metav1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Lease, err error) + LeaseExpansion +} + +// leases implements LeaseInterface +type leases struct { + client rest.Interface + ns string +} + +// newLeases returns a Leases +func newLeases(c *CoordinationV1Client, namespace string) *leases { + return &leases{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the lease, and returns the corresponding lease object, and an error if there is any. +func (c *leases) Get(name string, options metav1.GetOptions) (result *v1.Lease, err error) { + result = &v1.Lease{} + err = c.client.Get(). + Namespace(c.ns). + Resource("leases"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Leases that match those selectors. +func (c *leases) List(opts metav1.ListOptions) (result *v1.LeaseList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.LeaseList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("leases"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested leases. +func (c *leases) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("leases"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a lease and creates it. Returns the server's representation of the lease, and an error, if there is any. +func (c *leases) Create(lease *v1.Lease) (result *v1.Lease, err error) { + result = &v1.Lease{} + err = c.client.Post(). + Namespace(c.ns). + Resource("leases"). + Body(lease). + Do(). + Into(result) + return +} + +// Update takes the representation of a lease and updates it. Returns the server's representation of the lease, and an error, if there is any. +func (c *leases) Update(lease *v1.Lease) (result *v1.Lease, err error) { + result = &v1.Lease{} + err = c.client.Put(). + Namespace(c.ns). + Resource("leases"). + Name(lease.Name). + Body(lease). + Do(). + Into(result) + return +} + +// Delete takes name of the lease and deletes it. Returns an error if one occurs. +func (c *leases) Delete(name string, options *metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("leases"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *leases) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("leases"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched lease. +func (c *leases) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Lease, err error) { + result = &v1.Lease{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("leases"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/coordination_client.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/coordination_client.go index 91a76484..d68ed5d3 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/coordination_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/coordination_client.go @@ -20,7 +20,6 @@ package v1beta1 import ( v1beta1 "k8s.io/api/coordination/v1beta1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -71,7 +70,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/lease.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/lease.go index 16277255..490d815a 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/lease.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/lease.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "time" + v1beta1 "k8s.io/api/coordination/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -75,11 +77,16 @@ func (c *leases) Get(name string, options v1.GetOptions) (result *v1beta1.Lease, // List takes label and field selectors, and returns the list of Leases that match those selectors. func (c *leases) List(opts v1.ListOptions) (result *v1beta1.LeaseList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1beta1.LeaseList{} err = c.client.Get(). Namespace(c.ns). Resource("leases"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -87,11 +94,16 @@ func (c *leases) List(opts v1.ListOptions) (result *v1beta1.LeaseList, err error // Watch returns a watch.Interface that watches the requested leases. func (c *leases) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("leases"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -133,10 +145,15 @@ func (c *leases) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *leases) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("leases"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/componentstatus.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/componentstatus.go index e497661c..302b2fdc 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/componentstatus.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/componentstatus.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "time" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -72,10 +74,15 @@ func (c *componentStatuses) Get(name string, options metav1.GetOptions) (result // List takes label and field selectors, and returns the list of ComponentStatuses that match those selectors. func (c *componentStatuses) List(opts metav1.ListOptions) (result *v1.ComponentStatusList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1.ComponentStatusList{} err = c.client.Get(). Resource("componentstatuses"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -83,10 +90,15 @@ func (c *componentStatuses) List(opts metav1.ListOptions) (result *v1.ComponentS // Watch returns a watch.Interface that watches the requested componentStatuses. func (c *componentStatuses) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Resource("componentstatuses"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -125,9 +137,14 @@ func (c *componentStatuses) Delete(name string, options *metav1.DeleteOptions) e // DeleteCollection deletes a collection of objects. func (c *componentStatuses) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Resource("componentstatuses"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/configmap.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/configmap.go index 0984ae70..18ce954a 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/configmap.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/configmap.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "time" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -75,11 +77,16 @@ func (c *configMaps) Get(name string, options metav1.GetOptions) (result *v1.Con // List takes label and field selectors, and returns the list of ConfigMaps that match those selectors. func (c *configMaps) List(opts metav1.ListOptions) (result *v1.ConfigMapList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1.ConfigMapList{} err = c.client.Get(). Namespace(c.ns). Resource("configmaps"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -87,11 +94,16 @@ func (c *configMaps) List(opts metav1.ListOptions) (result *v1.ConfigMapList, er // Watch returns a watch.Interface that watches the requested configMaps. func (c *configMaps) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("configmaps"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -133,10 +145,15 @@ func (c *configMaps) Delete(name string, options *metav1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *configMaps) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("configmaps"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/core_client.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/core_client.go index 044a28eb..428d2afa 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/core_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/core_client.go @@ -20,7 +20,6 @@ package v1 import ( v1 "k8s.io/api/core/v1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -146,7 +145,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/api" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/endpoints.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/endpoints.go index dd821678..978a2a19 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/endpoints.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/endpoints.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "time" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -75,11 +77,16 @@ func (c *endpoints) Get(name string, options metav1.GetOptions) (result *v1.Endp // List takes label and field selectors, and returns the list of Endpoints that match those selectors. func (c *endpoints) List(opts metav1.ListOptions) (result *v1.EndpointsList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1.EndpointsList{} err = c.client.Get(). Namespace(c.ns). Resource("endpoints"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -87,11 +94,16 @@ func (c *endpoints) List(opts metav1.ListOptions) (result *v1.EndpointsList, err // Watch returns a watch.Interface that watches the requested endpoints. func (c *endpoints) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("endpoints"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -133,10 +145,15 @@ func (c *endpoints) Delete(name string, options *metav1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *endpoints) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("endpoints"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event.go index 57d30f9f..55cfa090 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "time" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -75,11 +77,16 @@ func (c *events) Get(name string, options metav1.GetOptions) (result *v1.Event, // List takes label and field selectors, and returns the list of Events that match those selectors. func (c *events) List(opts metav1.ListOptions) (result *v1.EventList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1.EventList{} err = c.client.Get(). Namespace(c.ns). Resource("events"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -87,11 +94,16 @@ func (c *events) List(opts metav1.ListOptions) (result *v1.EventList, err error) // Watch returns a watch.Interface that watches the requested events. func (c *events) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("events"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -133,10 +145,15 @@ func (c *events) Delete(name string, options *metav1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *events) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("events"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event_expansion.go index 6929ade1..5a82afa4 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event_expansion.go @@ -104,17 +104,17 @@ func (e *events) Search(scheme *runtime.Scheme, objOrRef runtime.Object) (*v1.Ev if err != nil { return nil, err } - if e.ns != "" && ref.Namespace != e.ns { + if len(e.ns) > 0 && ref.Namespace != e.ns { return nil, fmt.Errorf("won't be able to find any events of namespace '%v' in namespace '%v'", ref.Namespace, e.ns) } stringRefKind := string(ref.Kind) var refKind *string - if stringRefKind != "" { + if len(stringRefKind) > 0 { refKind = &stringRefKind } stringRefUID := string(ref.UID) var refUID *string - if stringRefUID != "" { + if len(stringRefUID) > 0 { refUID = &stringRefUID } fieldSelector := e.GetFieldSelector(&ref.Name, &ref.Namespace, refKind, refUID) @@ -124,10 +124,9 @@ func (e *events) Search(scheme *runtime.Scheme, objOrRef runtime.Object) (*v1.Ev // Returns the appropriate field selector based on the API version being used to communicate with the server. // The returned field selector can be used with List and Watch to filter desired events. func (e *events) GetFieldSelector(involvedObjectName, involvedObjectNamespace, involvedObjectKind, involvedObjectUID *string) fields.Selector { - apiVersion := e.client.APIVersion().String() field := fields.Set{} if involvedObjectName != nil { - field[GetInvolvedObjectNameFieldLabel(apiVersion)] = *involvedObjectName + field["involvedObject.name"] = *involvedObjectName } if involvedObjectNamespace != nil { field["involvedObject.namespace"] = *involvedObjectNamespace @@ -142,6 +141,7 @@ func (e *events) GetFieldSelector(involvedObjectName, involvedObjectNamespace, i } // Returns the appropriate field label to use for name of the involved object as per the given API version. +// DEPRECATED: please use "involvedObject.name" inline. func GetInvolvedObjectNameFieldLabel(version string) string { return "involvedObject.name" } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/limitrange.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/limitrange.go index 5b385668..2eeae11a 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/limitrange.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/limitrange.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "time" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -75,11 +77,16 @@ func (c *limitRanges) Get(name string, options metav1.GetOptions) (result *v1.Li // List takes label and field selectors, and returns the list of LimitRanges that match those selectors. func (c *limitRanges) List(opts metav1.ListOptions) (result *v1.LimitRangeList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1.LimitRangeList{} err = c.client.Get(). Namespace(c.ns). Resource("limitranges"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -87,11 +94,16 @@ func (c *limitRanges) List(opts metav1.ListOptions) (result *v1.LimitRangeList, // Watch returns a watch.Interface that watches the requested limitRanges. func (c *limitRanges) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("limitranges"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -133,10 +145,15 @@ func (c *limitRanges) Delete(name string, options *metav1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *limitRanges) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("limitranges"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace.go index e22d07de..8a81fe85 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "time" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -72,10 +74,15 @@ func (c *namespaces) Get(name string, options metav1.GetOptions) (result *v1.Nam // List takes label and field selectors, and returns the list of Namespaces that match those selectors. func (c *namespaces) List(opts metav1.ListOptions) (result *v1.NamespaceList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1.NamespaceList{} err = c.client.Get(). Resource("namespaces"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -83,10 +90,15 @@ func (c *namespaces) List(opts metav1.ListOptions) (result *v1.NamespaceList, er // Watch returns a watch.Interface that watches the requested namespaces. func (c *namespaces) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Resource("namespaces"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node.go index 5c769c11..d19fab89 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "time" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -73,10 +75,15 @@ func (c *nodes) Get(name string, options metav1.GetOptions) (result *v1.Node, er // List takes label and field selectors, and returns the list of Nodes that match those selectors. func (c *nodes) List(opts metav1.ListOptions) (result *v1.NodeList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1.NodeList{} err = c.client.Get(). Resource("nodes"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -84,10 +91,15 @@ func (c *nodes) List(opts metav1.ListOptions) (result *v1.NodeList, err error) { // Watch returns a watch.Interface that watches the requested nodes. func (c *nodes) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Resource("nodes"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -141,9 +153,14 @@ func (c *nodes) Delete(name string, options *metav1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *nodes) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Resource("nodes"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolume.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolume.go index d5f19aef..74514825 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolume.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolume.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "time" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -73,10 +75,15 @@ func (c *persistentVolumes) Get(name string, options metav1.GetOptions) (result // List takes label and field selectors, and returns the list of PersistentVolumes that match those selectors. func (c *persistentVolumes) List(opts metav1.ListOptions) (result *v1.PersistentVolumeList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1.PersistentVolumeList{} err = c.client.Get(). Resource("persistentvolumes"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -84,10 +91,15 @@ func (c *persistentVolumes) List(opts metav1.ListOptions) (result *v1.Persistent // Watch returns a watch.Interface that watches the requested persistentVolumes. func (c *persistentVolumes) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Resource("persistentvolumes"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -141,9 +153,14 @@ func (c *persistentVolumes) Delete(name string, options *metav1.DeleteOptions) e // DeleteCollection deletes a collection of objects. func (c *persistentVolumes) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Resource("persistentvolumes"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolumeclaim.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolumeclaim.go index d32ae5df..410ab37d 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolumeclaim.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolumeclaim.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "time" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -76,11 +78,16 @@ func (c *persistentVolumeClaims) Get(name string, options metav1.GetOptions) (re // List takes label and field selectors, and returns the list of PersistentVolumeClaims that match those selectors. func (c *persistentVolumeClaims) List(opts metav1.ListOptions) (result *v1.PersistentVolumeClaimList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1.PersistentVolumeClaimList{} err = c.client.Get(). Namespace(c.ns). Resource("persistentvolumeclaims"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -88,11 +95,16 @@ func (c *persistentVolumeClaims) List(opts metav1.ListOptions) (result *v1.Persi // Watch returns a watch.Interface that watches the requested persistentVolumeClaims. func (c *persistentVolumeClaims) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("persistentvolumeclaims"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -150,10 +162,15 @@ func (c *persistentVolumeClaims) Delete(name string, options *metav1.DeleteOptio // DeleteCollection deletes a collection of objects. func (c *persistentVolumeClaims) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("persistentvolumeclaims"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go index b19c5a5c..feacd307 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "time" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -44,6 +46,9 @@ type PodInterface interface { List(opts metav1.ListOptions) (*v1.PodList, error) Watch(opts metav1.ListOptions) (watch.Interface, error) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Pod, err error) + GetEphemeralContainers(podName string, options metav1.GetOptions) (*v1.EphemeralContainers, error) + UpdateEphemeralContainers(podName string, ephemeralContainers *v1.EphemeralContainers) (*v1.EphemeralContainers, error) + PodExpansion } @@ -76,11 +81,16 @@ func (c *pods) Get(name string, options metav1.GetOptions) (result *v1.Pod, err // List takes label and field selectors, and returns the list of Pods that match those selectors. func (c *pods) List(opts metav1.ListOptions) (result *v1.PodList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1.PodList{} err = c.client.Get(). Namespace(c.ns). Resource("pods"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -88,11 +98,16 @@ func (c *pods) List(opts metav1.ListOptions) (result *v1.PodList, err error) { // Watch returns a watch.Interface that watches the requested pods. func (c *pods) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("pods"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -150,10 +165,15 @@ func (c *pods) Delete(name string, options *metav1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *pods) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("pods"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() @@ -172,3 +192,31 @@ func (c *pods) Patch(name string, pt types.PatchType, data []byte, subresources Into(result) return } + +// GetEphemeralContainers takes name of the pod, and returns the corresponding v1.EphemeralContainers object, and an error if there is any. +func (c *pods) GetEphemeralContainers(podName string, options metav1.GetOptions) (result *v1.EphemeralContainers, err error) { + result = &v1.EphemeralContainers{} + err = c.client.Get(). + Namespace(c.ns). + Resource("pods"). + Name(podName). + SubResource("ephemeralcontainers"). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// UpdateEphemeralContainers takes the top resource name and the representation of a ephemeralContainers and updates it. Returns the server's representation of the ephemeralContainers, and an error, if there is any. +func (c *pods) UpdateEphemeralContainers(podName string, ephemeralContainers *v1.EphemeralContainers) (result *v1.EphemeralContainers, err error) { + result = &v1.EphemeralContainers{} + err = c.client.Put(). + Namespace(c.ns). + Resource("pods"). + Name(podName). + SubResource("ephemeralcontainers"). + Body(ephemeralContainers). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/podtemplate.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/podtemplate.go index d644e17d..84d7c980 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/podtemplate.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/podtemplate.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "time" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -75,11 +77,16 @@ func (c *podTemplates) Get(name string, options metav1.GetOptions) (result *v1.P // List takes label and field selectors, and returns the list of PodTemplates that match those selectors. func (c *podTemplates) List(opts metav1.ListOptions) (result *v1.PodTemplateList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1.PodTemplateList{} err = c.client.Get(). Namespace(c.ns). Resource("podtemplates"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -87,11 +94,16 @@ func (c *podTemplates) List(opts metav1.ListOptions) (result *v1.PodTemplateList // Watch returns a watch.Interface that watches the requested podTemplates. func (c *podTemplates) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("podtemplates"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -133,10 +145,15 @@ func (c *podTemplates) Delete(name string, options *metav1.DeleteOptions) error // DeleteCollection deletes a collection of objects. func (c *podTemplates) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("podtemplates"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/replicationcontroller.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/replicationcontroller.go index 17622f1c..dd3182db 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/replicationcontroller.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/replicationcontroller.go @@ -19,8 +19,10 @@ limitations under the License. package v1 import ( + "time" + + autoscalingv1 "k8s.io/api/autoscaling/v1" v1 "k8s.io/api/core/v1" - v1beta1 "k8s.io/api/extensions/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" @@ -45,8 +47,8 @@ type ReplicationControllerInterface interface { List(opts metav1.ListOptions) (*v1.ReplicationControllerList, error) Watch(opts metav1.ListOptions) (watch.Interface, error) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ReplicationController, err error) - GetScale(replicationControllerName string, options metav1.GetOptions) (*v1beta1.Scale, error) - UpdateScale(replicationControllerName string, scale *v1beta1.Scale) (*v1beta1.Scale, error) + GetScale(replicationControllerName string, options metav1.GetOptions) (*autoscalingv1.Scale, error) + UpdateScale(replicationControllerName string, scale *autoscalingv1.Scale) (*autoscalingv1.Scale, error) ReplicationControllerExpansion } @@ -80,11 +82,16 @@ func (c *replicationControllers) Get(name string, options metav1.GetOptions) (re // List takes label and field selectors, and returns the list of ReplicationControllers that match those selectors. func (c *replicationControllers) List(opts metav1.ListOptions) (result *v1.ReplicationControllerList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1.ReplicationControllerList{} err = c.client.Get(). Namespace(c.ns). Resource("replicationcontrollers"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -92,11 +99,16 @@ func (c *replicationControllers) List(opts metav1.ListOptions) (result *v1.Repli // Watch returns a watch.Interface that watches the requested replicationControllers. func (c *replicationControllers) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("replicationcontrollers"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -154,10 +166,15 @@ func (c *replicationControllers) Delete(name string, options *metav1.DeleteOptio // DeleteCollection deletes a collection of objects. func (c *replicationControllers) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("replicationcontrollers"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() @@ -177,9 +194,9 @@ func (c *replicationControllers) Patch(name string, pt types.PatchType, data []b return } -// GetScale takes name of the replicationController, and returns the corresponding v1beta1.Scale object, and an error if there is any. -func (c *replicationControllers) GetScale(replicationControllerName string, options metav1.GetOptions) (result *v1beta1.Scale, err error) { - result = &v1beta1.Scale{} +// GetScale takes name of the replicationController, and returns the corresponding autoscalingv1.Scale object, and an error if there is any. +func (c *replicationControllers) GetScale(replicationControllerName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) { + result = &autoscalingv1.Scale{} err = c.client.Get(). Namespace(c.ns). Resource("replicationcontrollers"). @@ -192,8 +209,8 @@ func (c *replicationControllers) GetScale(replicationControllerName string, opti } // UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. -func (c *replicationControllers) UpdateScale(replicationControllerName string, scale *v1beta1.Scale) (result *v1beta1.Scale, err error) { - result = &v1beta1.Scale{} +func (c *replicationControllers) UpdateScale(replicationControllerName string, scale *autoscalingv1.Scale) (result *autoscalingv1.Scale, err error) { + result = &autoscalingv1.Scale{} err = c.client.Put(). Namespace(c.ns). Resource("replicationcontrollers"). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/resourcequota.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/resourcequota.go index 8b74a404..5a178990 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/resourcequota.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/resourcequota.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "time" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -76,11 +78,16 @@ func (c *resourceQuotas) Get(name string, options metav1.GetOptions) (result *v1 // List takes label and field selectors, and returns the list of ResourceQuotas that match those selectors. func (c *resourceQuotas) List(opts metav1.ListOptions) (result *v1.ResourceQuotaList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1.ResourceQuotaList{} err = c.client.Get(). Namespace(c.ns). Resource("resourcequotas"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -88,11 +95,16 @@ func (c *resourceQuotas) List(opts metav1.ListOptions) (result *v1.ResourceQuota // Watch returns a watch.Interface that watches the requested resourceQuotas. func (c *resourceQuotas) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("resourcequotas"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -150,10 +162,15 @@ func (c *resourceQuotas) Delete(name string, options *metav1.DeleteOptions) erro // DeleteCollection deletes a collection of objects. func (c *resourceQuotas) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("resourcequotas"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/secret.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/secret.go index 4ea9796b..85c143b1 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/secret.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/secret.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "time" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -75,11 +77,16 @@ func (c *secrets) Get(name string, options metav1.GetOptions) (result *v1.Secret // List takes label and field selectors, and returns the list of Secrets that match those selectors. func (c *secrets) List(opts metav1.ListOptions) (result *v1.SecretList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1.SecretList{} err = c.client.Get(). Namespace(c.ns). Resource("secrets"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -87,11 +94,16 @@ func (c *secrets) List(opts metav1.ListOptions) (result *v1.SecretList, err erro // Watch returns a watch.Interface that watches the requested secrets. func (c *secrets) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("secrets"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -133,10 +145,15 @@ func (c *secrets) Delete(name string, options *metav1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *secrets) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("secrets"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service.go index 6c42ca87..b0e09413 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "time" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -75,11 +77,16 @@ func (c *services) Get(name string, options metav1.GetOptions) (result *v1.Servi // List takes label and field selectors, and returns the list of Services that match those selectors. func (c *services) List(opts metav1.ListOptions) (result *v1.ServiceList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1.ServiceList{} err = c.client.Get(). Namespace(c.ns). Resource("services"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -87,11 +94,16 @@ func (c *services) List(opts metav1.ListOptions) (result *v1.ServiceList, err er // Watch returns a watch.Interface that watches the requested services. func (c *services) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("services"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount.go index f3ab7eb8..50af6a21 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "time" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -75,11 +77,16 @@ func (c *serviceAccounts) Get(name string, options metav1.GetOptions) (result *v // List takes label and field selectors, and returns the list of ServiceAccounts that match those selectors. func (c *serviceAccounts) List(opts metav1.ListOptions) (result *v1.ServiceAccountList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1.ServiceAccountList{} err = c.client.Get(). Namespace(c.ns). Resource("serviceaccounts"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -87,11 +94,16 @@ func (c *serviceAccounts) List(opts metav1.ListOptions) (result *v1.ServiceAccou // Watch returns a watch.Interface that watches the requested serviceAccounts. func (c *serviceAccounts) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("serviceaccounts"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -133,10 +145,15 @@ func (c *serviceAccounts) Delete(name string, options *metav1.DeleteOptions) err // DeleteCollection deletes a collection of objects. func (c *serviceAccounts) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("serviceaccounts"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/discovery_client.go b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/discovery_client.go new file mode 100644 index 00000000..e65a0988 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/discovery_client.go @@ -0,0 +1,89 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "k8s.io/api/discovery/v1alpha1" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type DiscoveryV1alpha1Interface interface { + RESTClient() rest.Interface + EndpointSlicesGetter +} + +// DiscoveryV1alpha1Client is used to interact with features provided by the discovery.k8s.io group. +type DiscoveryV1alpha1Client struct { + restClient rest.Interface +} + +func (c *DiscoveryV1alpha1Client) EndpointSlices(namespace string) EndpointSliceInterface { + return newEndpointSlices(c, namespace) +} + +// NewForConfig creates a new DiscoveryV1alpha1Client for the given config. +func NewForConfig(c *rest.Config) (*DiscoveryV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &DiscoveryV1alpha1Client{client}, nil +} + +// NewForConfigOrDie creates a new DiscoveryV1alpha1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *DiscoveryV1alpha1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new DiscoveryV1alpha1Client for the given RESTClient. +func New(c rest.Interface) *DiscoveryV1alpha1Client { + return &DiscoveryV1alpha1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1alpha1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *DiscoveryV1alpha1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/doc.go new file mode 100644 index 00000000..df51baa4 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1alpha1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/endpointslice.go b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/endpointslice.go new file mode 100644 index 00000000..41751476 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/endpointslice.go @@ -0,0 +1,174 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "time" + + v1alpha1 "k8s.io/api/discovery/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// EndpointSlicesGetter has a method to return a EndpointSliceInterface. +// A group's client should implement this interface. +type EndpointSlicesGetter interface { + EndpointSlices(namespace string) EndpointSliceInterface +} + +// EndpointSliceInterface has methods to work with EndpointSlice resources. +type EndpointSliceInterface interface { + Create(*v1alpha1.EndpointSlice) (*v1alpha1.EndpointSlice, error) + Update(*v1alpha1.EndpointSlice) (*v1alpha1.EndpointSlice, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha1.EndpointSlice, error) + List(opts v1.ListOptions) (*v1alpha1.EndpointSliceList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.EndpointSlice, err error) + EndpointSliceExpansion +} + +// endpointSlices implements EndpointSliceInterface +type endpointSlices struct { + client rest.Interface + ns string +} + +// newEndpointSlices returns a EndpointSlices +func newEndpointSlices(c *DiscoveryV1alpha1Client, namespace string) *endpointSlices { + return &endpointSlices{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the endpointSlice, and returns the corresponding endpointSlice object, and an error if there is any. +func (c *endpointSlices) Get(name string, options v1.GetOptions) (result *v1alpha1.EndpointSlice, err error) { + result = &v1alpha1.EndpointSlice{} + err = c.client.Get(). + Namespace(c.ns). + Resource("endpointslices"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of EndpointSlices that match those selectors. +func (c *endpointSlices) List(opts v1.ListOptions) (result *v1alpha1.EndpointSliceList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.EndpointSliceList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("endpointslices"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested endpointSlices. +func (c *endpointSlices) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("endpointslices"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a endpointSlice and creates it. Returns the server's representation of the endpointSlice, and an error, if there is any. +func (c *endpointSlices) Create(endpointSlice *v1alpha1.EndpointSlice) (result *v1alpha1.EndpointSlice, err error) { + result = &v1alpha1.EndpointSlice{} + err = c.client.Post(). + Namespace(c.ns). + Resource("endpointslices"). + Body(endpointSlice). + Do(). + Into(result) + return +} + +// Update takes the representation of a endpointSlice and updates it. Returns the server's representation of the endpointSlice, and an error, if there is any. +func (c *endpointSlices) Update(endpointSlice *v1alpha1.EndpointSlice) (result *v1alpha1.EndpointSlice, err error) { + result = &v1alpha1.EndpointSlice{} + err = c.client.Put(). + Namespace(c.ns). + Resource("endpointslices"). + Name(endpointSlice.Name). + Body(endpointSlice). + Do(). + Into(result) + return +} + +// Delete takes name of the endpointSlice and deletes it. Returns an error if one occurs. +func (c *endpointSlices) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("endpointslices"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *endpointSlices) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("endpointslices"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched endpointSlice. +func (c *endpointSlices) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.EndpointSlice, err error) { + result = &v1alpha1.EndpointSlice{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("endpointslices"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/generated_expansion.go new file mode 100644 index 00000000..e8ceb59a --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/generated_expansion.go @@ -0,0 +1,21 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +type EndpointSliceExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/discovery_client.go b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/discovery_client.go new file mode 100644 index 00000000..997239a9 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/discovery_client.go @@ -0,0 +1,89 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/discovery/v1beta1" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type DiscoveryV1beta1Interface interface { + RESTClient() rest.Interface + EndpointSlicesGetter +} + +// DiscoveryV1beta1Client is used to interact with features provided by the discovery.k8s.io group. +type DiscoveryV1beta1Client struct { + restClient rest.Interface +} + +func (c *DiscoveryV1beta1Client) EndpointSlices(namespace string) EndpointSliceInterface { + return newEndpointSlices(c, namespace) +} + +// NewForConfig creates a new DiscoveryV1beta1Client for the given config. +func NewForConfig(c *rest.Config) (*DiscoveryV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &DiscoveryV1beta1Client{client}, nil +} + +// NewForConfigOrDie creates a new DiscoveryV1beta1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *DiscoveryV1beta1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new DiscoveryV1beta1Client for the given RESTClient. +func New(c rest.Interface) *DiscoveryV1beta1Client { + return &DiscoveryV1beta1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1beta1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *DiscoveryV1beta1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/doc.go new file mode 100644 index 00000000..77110195 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/endpointslice.go b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/endpointslice.go new file mode 100644 index 00000000..bba656b8 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/endpointslice.go @@ -0,0 +1,174 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "time" + + v1beta1 "k8s.io/api/discovery/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// EndpointSlicesGetter has a method to return a EndpointSliceInterface. +// A group's client should implement this interface. +type EndpointSlicesGetter interface { + EndpointSlices(namespace string) EndpointSliceInterface +} + +// EndpointSliceInterface has methods to work with EndpointSlice resources. +type EndpointSliceInterface interface { + Create(*v1beta1.EndpointSlice) (*v1beta1.EndpointSlice, error) + Update(*v1beta1.EndpointSlice) (*v1beta1.EndpointSlice, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.EndpointSlice, error) + List(opts v1.ListOptions) (*v1beta1.EndpointSliceList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.EndpointSlice, err error) + EndpointSliceExpansion +} + +// endpointSlices implements EndpointSliceInterface +type endpointSlices struct { + client rest.Interface + ns string +} + +// newEndpointSlices returns a EndpointSlices +func newEndpointSlices(c *DiscoveryV1beta1Client, namespace string) *endpointSlices { + return &endpointSlices{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the endpointSlice, and returns the corresponding endpointSlice object, and an error if there is any. +func (c *endpointSlices) Get(name string, options v1.GetOptions) (result *v1beta1.EndpointSlice, err error) { + result = &v1beta1.EndpointSlice{} + err = c.client.Get(). + Namespace(c.ns). + Resource("endpointslices"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of EndpointSlices that match those selectors. +func (c *endpointSlices) List(opts v1.ListOptions) (result *v1beta1.EndpointSliceList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1beta1.EndpointSliceList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("endpointslices"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested endpointSlices. +func (c *endpointSlices) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("endpointslices"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a endpointSlice and creates it. Returns the server's representation of the endpointSlice, and an error, if there is any. +func (c *endpointSlices) Create(endpointSlice *v1beta1.EndpointSlice) (result *v1beta1.EndpointSlice, err error) { + result = &v1beta1.EndpointSlice{} + err = c.client.Post(). + Namespace(c.ns). + Resource("endpointslices"). + Body(endpointSlice). + Do(). + Into(result) + return +} + +// Update takes the representation of a endpointSlice and updates it. Returns the server's representation of the endpointSlice, and an error, if there is any. +func (c *endpointSlices) Update(endpointSlice *v1beta1.EndpointSlice) (result *v1beta1.EndpointSlice, err error) { + result = &v1beta1.EndpointSlice{} + err = c.client.Put(). + Namespace(c.ns). + Resource("endpointslices"). + Name(endpointSlice.Name). + Body(endpointSlice). + Do(). + Into(result) + return +} + +// Delete takes name of the endpointSlice and deletes it. Returns an error if one occurs. +func (c *endpointSlices) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("endpointslices"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *endpointSlices) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("endpointslices"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched endpointSlice. +func (c *endpointSlices) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.EndpointSlice, err error) { + result = &v1beta1.EndpointSlice{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("endpointslices"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/generated_expansion.go new file mode 100644 index 00000000..1e7769f2 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/generated_expansion.go @@ -0,0 +1,21 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +type EndpointSliceExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go index af7d060d..143281b2 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "time" + v1beta1 "k8s.io/api/events/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -75,11 +77,16 @@ func (c *events) Get(name string, options v1.GetOptions) (result *v1beta1.Event, // List takes label and field selectors, and returns the list of Events that match those selectors. func (c *events) List(opts v1.ListOptions) (result *v1beta1.EventList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1beta1.EventList{} err = c.client.Get(). Namespace(c.ns). Resource("events"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -87,11 +94,16 @@ func (c *events) List(opts v1.ListOptions) (result *v1beta1.EventList, err error // Watch returns a watch.Interface that watches the requested events. func (c *events) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("events"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -133,10 +145,15 @@ func (c *events) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *events) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("events"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event_expansion.go new file mode 100644 index 00000000..312ee428 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event_expansion.go @@ -0,0 +1,98 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "fmt" + + "k8s.io/api/events/v1beta1" + "k8s.io/apimachinery/pkg/types" +) + +// The EventExpansion interface allows manually adding extra methods to the EventInterface. +// TODO: Add querying functions to the event expansion +type EventExpansion interface { + // CreateWithEventNamespace is the same as a Create + // except that it sends the request to the event.Namespace. + CreateWithEventNamespace(event *v1beta1.Event) (*v1beta1.Event, error) + // UpdateWithEventNamespace is the same as a Update + // except that it sends the request to the event.Namespace. + UpdateWithEventNamespace(event *v1beta1.Event) (*v1beta1.Event, error) + // PatchWithEventNamespace is the same as an Update + // except that it sends the request to the event.Namespace. + PatchWithEventNamespace(event *v1beta1.Event, data []byte) (*v1beta1.Event, error) +} + +// CreateWithEventNamespace makes a new event. +// Returns the copy of the event the server returns, or an error. +// The namespace to create the event within is deduced from the event. +// it must either match this event client's namespace, or this event client must +// have been created with the "" namespace. +func (e *events) CreateWithEventNamespace(event *v1beta1.Event) (*v1beta1.Event, error) { + if e.ns != "" && event.Namespace != e.ns { + return nil, fmt.Errorf("can't create an event with namespace '%v' in namespace '%v'", event.Namespace, e.ns) + } + result := &v1beta1.Event{} + err := e.client.Post(). + NamespaceIfScoped(event.Namespace, len(event.Namespace) > 0). + Resource("events"). + Body(event). + Do(). + Into(result) + return result, err +} + +// UpdateWithEventNamespace modifies an existing event. +// It returns the copy of the event that the server returns, or an error. +// The namespace and key to update the event within is deduced from the event. +// The namespace must either match this event client's namespace, or this event client must have been +// created with the "" namespace. +// Update also requires the ResourceVersion to be set in the event object. +func (e *events) UpdateWithEventNamespace(event *v1beta1.Event) (*v1beta1.Event, error) { + if e.ns != "" && event.Namespace != e.ns { + return nil, fmt.Errorf("can't update an event with namespace '%v' in namespace '%v'", event.Namespace, e.ns) + } + result := &v1beta1.Event{} + err := e.client.Put(). + NamespaceIfScoped(event.Namespace, len(event.Namespace) > 0). + Resource("events"). + Name(event.Name). + Body(event). + Do(). + Into(result) + return result, err +} + +// PatchWithEventNamespace modifies an existing event. +// It returns the copy of the event that the server returns, or an error. +// The namespace and name of the target event is deduced from the event. +// The namespace must either match this event client's namespace, or this event client must +// have been created with the "" namespace. +func (e *events) PatchWithEventNamespace(event *v1beta1.Event, data []byte) (*v1beta1.Event, error) { + if e.ns != "" && event.Namespace != e.ns { + return nil, fmt.Errorf("can't patch an event with namespace '%v' in namespace '%v'", event.Namespace, e.ns) + } + result := &v1beta1.Event{} + err := e.client.Patch(types.StrategicMergePatchType). + NamespaceIfScoped(event.Namespace, len(event.Namespace) > 0). + Resource("events"). + Name(event.Name). + Body(data). + Do(). + Into(result) + return result, err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/events_client.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/events_client.go index fb59635b..e372ccff 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/events_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/events_client.go @@ -20,7 +20,6 @@ package v1beta1 import ( v1beta1 "k8s.io/api/events/v1beta1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -71,7 +70,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/generated_expansion.go index e27f693f..f6df7696 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/generated_expansion.go @@ -17,5 +17,3 @@ limitations under the License. // Code generated by client-gen. DO NOT EDIT. package v1beta1 - -type EventExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/daemonset.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/daemonset.go index 85294be4..93b1ae9b 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/daemonset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/daemonset.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "time" + v1beta1 "k8s.io/api/extensions/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -76,11 +78,16 @@ func (c *daemonSets) Get(name string, options v1.GetOptions) (result *v1beta1.Da // List takes label and field selectors, and returns the list of DaemonSets that match those selectors. func (c *daemonSets) List(opts v1.ListOptions) (result *v1beta1.DaemonSetList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1beta1.DaemonSetList{} err = c.client.Get(). Namespace(c.ns). Resource("daemonsets"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -88,11 +95,16 @@ func (c *daemonSets) List(opts v1.ListOptions) (result *v1beta1.DaemonSetList, e // Watch returns a watch.Interface that watches the requested daemonSets. func (c *daemonSets) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("daemonsets"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -150,10 +162,15 @@ func (c *daemonSets) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *daemonSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("daemonsets"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go index 89183d28..5557b9f2 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "time" + v1beta1 "k8s.io/api/extensions/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -79,11 +81,16 @@ func (c *deployments) Get(name string, options v1.GetOptions) (result *v1beta1.D // List takes label and field selectors, and returns the list of Deployments that match those selectors. func (c *deployments) List(opts v1.ListOptions) (result *v1beta1.DeploymentList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1beta1.DeploymentList{} err = c.client.Get(). Namespace(c.ns). Resource("deployments"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -91,11 +98,16 @@ func (c *deployments) List(opts v1.ListOptions) (result *v1beta1.DeploymentList, // Watch returns a watch.Interface that watches the requested deployments. func (c *deployments) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("deployments"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -153,10 +165,15 @@ func (c *deployments) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *deployments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("deployments"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go index 1961ffc7..e3b22aa4 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go @@ -20,7 +20,6 @@ package v1beta1 import ( v1beta1 "k8s.io/api/extensions/v1beta1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -30,9 +29,9 @@ type ExtensionsV1beta1Interface interface { DaemonSetsGetter DeploymentsGetter IngressesGetter + NetworkPoliciesGetter PodSecurityPoliciesGetter ReplicaSetsGetter - ScalesGetter } // ExtensionsV1beta1Client is used to interact with features provided by the extensions group. @@ -52,6 +51,10 @@ func (c *ExtensionsV1beta1Client) Ingresses(namespace string) IngressInterface { return newIngresses(c, namespace) } +func (c *ExtensionsV1beta1Client) NetworkPolicies(namespace string) NetworkPolicyInterface { + return newNetworkPolicies(c, namespace) +} + func (c *ExtensionsV1beta1Client) PodSecurityPolicies() PodSecurityPolicyInterface { return newPodSecurityPolicies(c) } @@ -60,10 +63,6 @@ func (c *ExtensionsV1beta1Client) ReplicaSets(namespace string) ReplicaSetInterf return newReplicaSets(c, namespace) } -func (c *ExtensionsV1beta1Client) Scales(namespace string) ScaleInterface { - return newScales(c, namespace) -} - // NewForConfig creates a new ExtensionsV1beta1Client for the given config. func NewForConfig(c *rest.Config) (*ExtensionsV1beta1Client, error) { config := *c @@ -96,7 +95,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/generated_expansion.go index cfaeebd0..41d28f04 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/generated_expansion.go @@ -22,6 +22,8 @@ type DaemonSetExpansion interface{} type IngressExpansion interface{} +type NetworkPolicyExpansion interface{} + type PodSecurityPolicyExpansion interface{} type ReplicaSetExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/ingress.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/ingress.go index f8b664cb..4da51c36 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/ingress.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/ingress.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "time" + v1beta1 "k8s.io/api/extensions/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -76,11 +78,16 @@ func (c *ingresses) Get(name string, options v1.GetOptions) (result *v1beta1.Ing // List takes label and field selectors, and returns the list of Ingresses that match those selectors. func (c *ingresses) List(opts v1.ListOptions) (result *v1beta1.IngressList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1beta1.IngressList{} err = c.client.Get(). Namespace(c.ns). Resource("ingresses"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -88,11 +95,16 @@ func (c *ingresses) List(opts v1.ListOptions) (result *v1beta1.IngressList, err // Watch returns a watch.Interface that watches the requested ingresses. func (c *ingresses) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("ingresses"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -150,10 +162,15 @@ func (c *ingresses) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *ingresses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("ingresses"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/networkpolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/networkpolicy.go new file mode 100644 index 00000000..0607e2dd --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/networkpolicy.go @@ -0,0 +1,174 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "time" + + v1beta1 "k8s.io/api/extensions/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// NetworkPoliciesGetter has a method to return a NetworkPolicyInterface. +// A group's client should implement this interface. +type NetworkPoliciesGetter interface { + NetworkPolicies(namespace string) NetworkPolicyInterface +} + +// NetworkPolicyInterface has methods to work with NetworkPolicy resources. +type NetworkPolicyInterface interface { + Create(*v1beta1.NetworkPolicy) (*v1beta1.NetworkPolicy, error) + Update(*v1beta1.NetworkPolicy) (*v1beta1.NetworkPolicy, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.NetworkPolicy, error) + List(opts v1.ListOptions) (*v1beta1.NetworkPolicyList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.NetworkPolicy, err error) + NetworkPolicyExpansion +} + +// networkPolicies implements NetworkPolicyInterface +type networkPolicies struct { + client rest.Interface + ns string +} + +// newNetworkPolicies returns a NetworkPolicies +func newNetworkPolicies(c *ExtensionsV1beta1Client, namespace string) *networkPolicies { + return &networkPolicies{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the networkPolicy, and returns the corresponding networkPolicy object, and an error if there is any. +func (c *networkPolicies) Get(name string, options v1.GetOptions) (result *v1beta1.NetworkPolicy, err error) { + result = &v1beta1.NetworkPolicy{} + err = c.client.Get(). + Namespace(c.ns). + Resource("networkpolicies"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of NetworkPolicies that match those selectors. +func (c *networkPolicies) List(opts v1.ListOptions) (result *v1beta1.NetworkPolicyList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1beta1.NetworkPolicyList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("networkpolicies"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested networkPolicies. +func (c *networkPolicies) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("networkpolicies"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a networkPolicy and creates it. Returns the server's representation of the networkPolicy, and an error, if there is any. +func (c *networkPolicies) Create(networkPolicy *v1beta1.NetworkPolicy) (result *v1beta1.NetworkPolicy, err error) { + result = &v1beta1.NetworkPolicy{} + err = c.client.Post(). + Namespace(c.ns). + Resource("networkpolicies"). + Body(networkPolicy). + Do(). + Into(result) + return +} + +// Update takes the representation of a networkPolicy and updates it. Returns the server's representation of the networkPolicy, and an error, if there is any. +func (c *networkPolicies) Update(networkPolicy *v1beta1.NetworkPolicy) (result *v1beta1.NetworkPolicy, err error) { + result = &v1beta1.NetworkPolicy{} + err = c.client.Put(). + Namespace(c.ns). + Resource("networkpolicies"). + Name(networkPolicy.Name). + Body(networkPolicy). + Do(). + Into(result) + return +} + +// Delete takes name of the networkPolicy and deletes it. Returns an error if one occurs. +func (c *networkPolicies) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("networkpolicies"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *networkPolicies) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("networkpolicies"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched networkPolicy. +func (c *networkPolicies) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.NetworkPolicy, err error) { + result = &v1beta1.NetworkPolicy{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("networkpolicies"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/podsecuritypolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/podsecuritypolicy.go index 8099d773..a947a54a 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/podsecuritypolicy.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/podsecuritypolicy.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "time" + v1beta1 "k8s.io/api/extensions/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -72,10 +74,15 @@ func (c *podSecurityPolicies) Get(name string, options v1.GetOptions) (result *v // List takes label and field selectors, and returns the list of PodSecurityPolicies that match those selectors. func (c *podSecurityPolicies) List(opts v1.ListOptions) (result *v1beta1.PodSecurityPolicyList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1beta1.PodSecurityPolicyList{} err = c.client.Get(). Resource("podsecuritypolicies"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -83,10 +90,15 @@ func (c *podSecurityPolicies) List(opts v1.ListOptions) (result *v1beta1.PodSecu // Watch returns a watch.Interface that watches the requested podSecurityPolicies. func (c *podSecurityPolicies) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Resource("podsecuritypolicies"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -125,9 +137,14 @@ func (c *podSecurityPolicies) Delete(name string, options *v1.DeleteOptions) err // DeleteCollection deletes a collection of objects. func (c *podSecurityPolicies) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Resource("podsecuritypolicies"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go index 7e61fa2d..44402905 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "time" + v1beta1 "k8s.io/api/extensions/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -79,11 +81,16 @@ func (c *replicaSets) Get(name string, options v1.GetOptions) (result *v1beta1.R // List takes label and field selectors, and returns the list of ReplicaSets that match those selectors. func (c *replicaSets) List(opts v1.ListOptions) (result *v1beta1.ReplicaSetList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1beta1.ReplicaSetList{} err = c.client.Get(). Namespace(c.ns). Resource("replicasets"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -91,11 +98,16 @@ func (c *replicaSets) List(opts v1.ListOptions) (result *v1beta1.ReplicaSetList, // Watch returns a watch.Interface that watches the requested replicaSets. func (c *replicaSets) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("replicasets"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -153,10 +165,15 @@ func (c *replicaSets) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *replicaSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("replicasets"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/scale.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/scale.go deleted file mode 100644 index 6ee677ac..00000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/scale.go +++ /dev/null @@ -1,48 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -import ( - rest "k8s.io/client-go/rest" -) - -// ScalesGetter has a method to return a ScaleInterface. -// A group's client should implement this interface. -type ScalesGetter interface { - Scales(namespace string) ScaleInterface -} - -// ScaleInterface has methods to work with Scale resources. -type ScaleInterface interface { - ScaleExpansion -} - -// scales implements ScaleInterface -type scales struct { - client rest.Interface - ns string -} - -// newScales returns a Scales -func newScales(c *ExtensionsV1beta1Client, namespace string) *scales { - return &scales{ - client: c.RESTClient(), - ns: namespace, - } -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/scale_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/scale_expansion.go deleted file mode 100644 index c9733cb2..00000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/scale_expansion.go +++ /dev/null @@ -1,65 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - "k8s.io/api/extensions/v1beta1" - "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// The ScaleExpansion interface allows manually adding extra methods to the ScaleInterface. -type ScaleExpansion interface { - Get(kind string, name string) (*v1beta1.Scale, error) - Update(kind string, scale *v1beta1.Scale) (*v1beta1.Scale, error) -} - -// Get takes the reference to scale subresource and returns the subresource or error, if one occurs. -func (c *scales) Get(kind string, name string) (result *v1beta1.Scale, err error) { - result = &v1beta1.Scale{} - - // TODO this method needs to take a proper unambiguous kind - fullyQualifiedKind := schema.GroupVersionKind{Kind: kind} - resource, _ := meta.UnsafeGuessKindToResource(fullyQualifiedKind) - - err = c.client.Get(). - Namespace(c.ns). - Resource(resource.Resource). - Name(name). - SubResource("scale"). - Do(). - Into(result) - return -} - -func (c *scales) Update(kind string, scale *v1beta1.Scale) (result *v1beta1.Scale, err error) { - result = &v1beta1.Scale{} - - // TODO this method needs to take a proper unambiguous kind - fullyQualifiedKind := schema.GroupVersionKind{Kind: kind} - resource, _ := meta.UnsafeGuessKindToResource(fullyQualifiedKind) - - err = c.client.Put(). - Namespace(scale.Namespace). - Resource(resource.Resource). - Name(scale.Name). - SubResource("scale"). - Body(scale). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/doc.go new file mode 100644 index 00000000..df51baa4 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1alpha1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/flowcontrol_client.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/flowcontrol_client.go new file mode 100644 index 00000000..37a1ff2d --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/flowcontrol_client.go @@ -0,0 +1,94 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "k8s.io/api/flowcontrol/v1alpha1" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type FlowcontrolV1alpha1Interface interface { + RESTClient() rest.Interface + FlowSchemasGetter + PriorityLevelConfigurationsGetter +} + +// FlowcontrolV1alpha1Client is used to interact with features provided by the flowcontrol.apiserver.k8s.io group. +type FlowcontrolV1alpha1Client struct { + restClient rest.Interface +} + +func (c *FlowcontrolV1alpha1Client) FlowSchemas() FlowSchemaInterface { + return newFlowSchemas(c) +} + +func (c *FlowcontrolV1alpha1Client) PriorityLevelConfigurations() PriorityLevelConfigurationInterface { + return newPriorityLevelConfigurations(c) +} + +// NewForConfig creates a new FlowcontrolV1alpha1Client for the given config. +func NewForConfig(c *rest.Config) (*FlowcontrolV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &FlowcontrolV1alpha1Client{client}, nil +} + +// NewForConfigOrDie creates a new FlowcontrolV1alpha1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *FlowcontrolV1alpha1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new FlowcontrolV1alpha1Client for the given RESTClient. +func New(c rest.Interface) *FlowcontrolV1alpha1Client { + return &FlowcontrolV1alpha1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1alpha1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FlowcontrolV1alpha1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/flowschema.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/flowschema.go new file mode 100644 index 00000000..db71d29a --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/flowschema.go @@ -0,0 +1,180 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "time" + + v1alpha1 "k8s.io/api/flowcontrol/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// FlowSchemasGetter has a method to return a FlowSchemaInterface. +// A group's client should implement this interface. +type FlowSchemasGetter interface { + FlowSchemas() FlowSchemaInterface +} + +// FlowSchemaInterface has methods to work with FlowSchema resources. +type FlowSchemaInterface interface { + Create(*v1alpha1.FlowSchema) (*v1alpha1.FlowSchema, error) + Update(*v1alpha1.FlowSchema) (*v1alpha1.FlowSchema, error) + UpdateStatus(*v1alpha1.FlowSchema) (*v1alpha1.FlowSchema, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha1.FlowSchema, error) + List(opts v1.ListOptions) (*v1alpha1.FlowSchemaList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.FlowSchema, err error) + FlowSchemaExpansion +} + +// flowSchemas implements FlowSchemaInterface +type flowSchemas struct { + client rest.Interface +} + +// newFlowSchemas returns a FlowSchemas +func newFlowSchemas(c *FlowcontrolV1alpha1Client) *flowSchemas { + return &flowSchemas{ + client: c.RESTClient(), + } +} + +// Get takes name of the flowSchema, and returns the corresponding flowSchema object, and an error if there is any. +func (c *flowSchemas) Get(name string, options v1.GetOptions) (result *v1alpha1.FlowSchema, err error) { + result = &v1alpha1.FlowSchema{} + err = c.client.Get(). + Resource("flowschemas"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of FlowSchemas that match those selectors. +func (c *flowSchemas) List(opts v1.ListOptions) (result *v1alpha1.FlowSchemaList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.FlowSchemaList{} + err = c.client.Get(). + Resource("flowschemas"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested flowSchemas. +func (c *flowSchemas) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("flowschemas"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a flowSchema and creates it. Returns the server's representation of the flowSchema, and an error, if there is any. +func (c *flowSchemas) Create(flowSchema *v1alpha1.FlowSchema) (result *v1alpha1.FlowSchema, err error) { + result = &v1alpha1.FlowSchema{} + err = c.client.Post(). + Resource("flowschemas"). + Body(flowSchema). + Do(). + Into(result) + return +} + +// Update takes the representation of a flowSchema and updates it. Returns the server's representation of the flowSchema, and an error, if there is any. +func (c *flowSchemas) Update(flowSchema *v1alpha1.FlowSchema) (result *v1alpha1.FlowSchema, err error) { + result = &v1alpha1.FlowSchema{} + err = c.client.Put(). + Resource("flowschemas"). + Name(flowSchema.Name). + Body(flowSchema). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *flowSchemas) UpdateStatus(flowSchema *v1alpha1.FlowSchema) (result *v1alpha1.FlowSchema, err error) { + result = &v1alpha1.FlowSchema{} + err = c.client.Put(). + Resource("flowschemas"). + Name(flowSchema.Name). + SubResource("status"). + Body(flowSchema). + Do(). + Into(result) + return +} + +// Delete takes name of the flowSchema and deletes it. Returns an error if one occurs. +func (c *flowSchemas) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("flowschemas"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *flowSchemas) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("flowschemas"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched flowSchema. +func (c *flowSchemas) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.FlowSchema, err error) { + result = &v1alpha1.FlowSchema{} + err = c.client.Patch(pt). + Resource("flowschemas"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/generated_expansion.go new file mode 100644 index 00000000..065b5e6b --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/generated_expansion.go @@ -0,0 +1,23 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +type FlowSchemaExpansion interface{} + +type PriorityLevelConfigurationExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/prioritylevelconfiguration.go new file mode 100644 index 00000000..eb99cca6 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/prioritylevelconfiguration.go @@ -0,0 +1,180 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "time" + + v1alpha1 "k8s.io/api/flowcontrol/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// PriorityLevelConfigurationsGetter has a method to return a PriorityLevelConfigurationInterface. +// A group's client should implement this interface. +type PriorityLevelConfigurationsGetter interface { + PriorityLevelConfigurations() PriorityLevelConfigurationInterface +} + +// PriorityLevelConfigurationInterface has methods to work with PriorityLevelConfiguration resources. +type PriorityLevelConfigurationInterface interface { + Create(*v1alpha1.PriorityLevelConfiguration) (*v1alpha1.PriorityLevelConfiguration, error) + Update(*v1alpha1.PriorityLevelConfiguration) (*v1alpha1.PriorityLevelConfiguration, error) + UpdateStatus(*v1alpha1.PriorityLevelConfiguration) (*v1alpha1.PriorityLevelConfiguration, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha1.PriorityLevelConfiguration, error) + List(opts v1.ListOptions) (*v1alpha1.PriorityLevelConfigurationList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.PriorityLevelConfiguration, err error) + PriorityLevelConfigurationExpansion +} + +// priorityLevelConfigurations implements PriorityLevelConfigurationInterface +type priorityLevelConfigurations struct { + client rest.Interface +} + +// newPriorityLevelConfigurations returns a PriorityLevelConfigurations +func newPriorityLevelConfigurations(c *FlowcontrolV1alpha1Client) *priorityLevelConfigurations { + return &priorityLevelConfigurations{ + client: c.RESTClient(), + } +} + +// Get takes name of the priorityLevelConfiguration, and returns the corresponding priorityLevelConfiguration object, and an error if there is any. +func (c *priorityLevelConfigurations) Get(name string, options v1.GetOptions) (result *v1alpha1.PriorityLevelConfiguration, err error) { + result = &v1alpha1.PriorityLevelConfiguration{} + err = c.client.Get(). + Resource("prioritylevelconfigurations"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of PriorityLevelConfigurations that match those selectors. +func (c *priorityLevelConfigurations) List(opts v1.ListOptions) (result *v1alpha1.PriorityLevelConfigurationList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.PriorityLevelConfigurationList{} + err = c.client.Get(). + Resource("prioritylevelconfigurations"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested priorityLevelConfigurations. +func (c *priorityLevelConfigurations) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("prioritylevelconfigurations"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a priorityLevelConfiguration and creates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. +func (c *priorityLevelConfigurations) Create(priorityLevelConfiguration *v1alpha1.PriorityLevelConfiguration) (result *v1alpha1.PriorityLevelConfiguration, err error) { + result = &v1alpha1.PriorityLevelConfiguration{} + err = c.client.Post(). + Resource("prioritylevelconfigurations"). + Body(priorityLevelConfiguration). + Do(). + Into(result) + return +} + +// Update takes the representation of a priorityLevelConfiguration and updates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. +func (c *priorityLevelConfigurations) Update(priorityLevelConfiguration *v1alpha1.PriorityLevelConfiguration) (result *v1alpha1.PriorityLevelConfiguration, err error) { + result = &v1alpha1.PriorityLevelConfiguration{} + err = c.client.Put(). + Resource("prioritylevelconfigurations"). + Name(priorityLevelConfiguration.Name). + Body(priorityLevelConfiguration). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *priorityLevelConfigurations) UpdateStatus(priorityLevelConfiguration *v1alpha1.PriorityLevelConfiguration) (result *v1alpha1.PriorityLevelConfiguration, err error) { + result = &v1alpha1.PriorityLevelConfiguration{} + err = c.client.Put(). + Resource("prioritylevelconfigurations"). + Name(priorityLevelConfiguration.Name). + SubResource("status"). + Body(priorityLevelConfiguration). + Do(). + Into(result) + return +} + +// Delete takes name of the priorityLevelConfiguration and deletes it. Returns an error if one occurs. +func (c *priorityLevelConfigurations) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("prioritylevelconfigurations"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *priorityLevelConfigurations) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("prioritylevelconfigurations"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched priorityLevelConfiguration. +func (c *priorityLevelConfigurations) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.PriorityLevelConfiguration, err error) { + result = &v1alpha1.PriorityLevelConfiguration{} + err = c.client.Patch(pt). + Resource("prioritylevelconfigurations"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networking_client.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networking_client.go index 8684db45..5315d9b9 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networking_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networking_client.go @@ -20,7 +20,6 @@ package v1 import ( v1 "k8s.io/api/networking/v1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -71,7 +70,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go index d8f0a6b4..3f39be95 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "time" + v1 "k8s.io/api/networking/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -75,11 +77,16 @@ func (c *networkPolicies) Get(name string, options metav1.GetOptions) (result *v // List takes label and field selectors, and returns the list of NetworkPolicies that match those selectors. func (c *networkPolicies) List(opts metav1.ListOptions) (result *v1.NetworkPolicyList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1.NetworkPolicyList{} err = c.client.Get(). Namespace(c.ns). Resource("networkpolicies"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -87,11 +94,16 @@ func (c *networkPolicies) List(opts metav1.ListOptions) (result *v1.NetworkPolic // Watch returns a watch.Interface that watches the requested networkPolicies. func (c *networkPolicies) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("networkpolicies"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -133,10 +145,15 @@ func (c *networkPolicies) Delete(name string, options *metav1.DeleteOptions) err // DeleteCollection deletes a collection of objects. func (c *networkPolicies) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("networkpolicies"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/doc.go new file mode 100644 index 00000000..77110195 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/generated_expansion.go new file mode 100644 index 00000000..1442649b --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/generated_expansion.go @@ -0,0 +1,21 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +type IngressExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingress.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingress.go new file mode 100644 index 00000000..8d76678f --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingress.go @@ -0,0 +1,191 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "time" + + v1beta1 "k8s.io/api/networking/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// IngressesGetter has a method to return a IngressInterface. +// A group's client should implement this interface. +type IngressesGetter interface { + Ingresses(namespace string) IngressInterface +} + +// IngressInterface has methods to work with Ingress resources. +type IngressInterface interface { + Create(*v1beta1.Ingress) (*v1beta1.Ingress, error) + Update(*v1beta1.Ingress) (*v1beta1.Ingress, error) + UpdateStatus(*v1beta1.Ingress) (*v1beta1.Ingress, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.Ingress, error) + List(opts v1.ListOptions) (*v1beta1.IngressList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Ingress, err error) + IngressExpansion +} + +// ingresses implements IngressInterface +type ingresses struct { + client rest.Interface + ns string +} + +// newIngresses returns a Ingresses +func newIngresses(c *NetworkingV1beta1Client, namespace string) *ingresses { + return &ingresses{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the ingress, and returns the corresponding ingress object, and an error if there is any. +func (c *ingresses) Get(name string, options v1.GetOptions) (result *v1beta1.Ingress, err error) { + result = &v1beta1.Ingress{} + err = c.client.Get(). + Namespace(c.ns). + Resource("ingresses"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Ingresses that match those selectors. +func (c *ingresses) List(opts v1.ListOptions) (result *v1beta1.IngressList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1beta1.IngressList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("ingresses"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested ingresses. +func (c *ingresses) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("ingresses"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a ingress and creates it. Returns the server's representation of the ingress, and an error, if there is any. +func (c *ingresses) Create(ingress *v1beta1.Ingress) (result *v1beta1.Ingress, err error) { + result = &v1beta1.Ingress{} + err = c.client.Post(). + Namespace(c.ns). + Resource("ingresses"). + Body(ingress). + Do(). + Into(result) + return +} + +// Update takes the representation of a ingress and updates it. Returns the server's representation of the ingress, and an error, if there is any. +func (c *ingresses) Update(ingress *v1beta1.Ingress) (result *v1beta1.Ingress, err error) { + result = &v1beta1.Ingress{} + err = c.client.Put(). + Namespace(c.ns). + Resource("ingresses"). + Name(ingress.Name). + Body(ingress). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *ingresses) UpdateStatus(ingress *v1beta1.Ingress) (result *v1beta1.Ingress, err error) { + result = &v1beta1.Ingress{} + err = c.client.Put(). + Namespace(c.ns). + Resource("ingresses"). + Name(ingress.Name). + SubResource("status"). + Body(ingress). + Do(). + Into(result) + return +} + +// Delete takes name of the ingress and deletes it. Returns an error if one occurs. +func (c *ingresses) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("ingresses"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *ingresses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("ingresses"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched ingress. +func (c *ingresses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Ingress, err error) { + result = &v1beta1.Ingress{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("ingresses"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/networking_client.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/networking_client.go new file mode 100644 index 00000000..ee523f8e --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/networking_client.go @@ -0,0 +1,89 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/networking/v1beta1" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type NetworkingV1beta1Interface interface { + RESTClient() rest.Interface + IngressesGetter +} + +// NetworkingV1beta1Client is used to interact with features provided by the networking.k8s.io group. +type NetworkingV1beta1Client struct { + restClient rest.Interface +} + +func (c *NetworkingV1beta1Client) Ingresses(namespace string) IngressInterface { + return newIngresses(c, namespace) +} + +// NewForConfig creates a new NetworkingV1beta1Client for the given config. +func NewForConfig(c *rest.Config) (*NetworkingV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &NetworkingV1beta1Client{client}, nil +} + +// NewForConfigOrDie creates a new NetworkingV1beta1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *NetworkingV1beta1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new NetworkingV1beta1Client for the given RESTClient. +func New(c rest.Interface) *NetworkingV1beta1Client { + return &NetworkingV1beta1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1beta1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *NetworkingV1beta1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/doc.go new file mode 100644 index 00000000..df51baa4 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1alpha1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/generated_expansion.go new file mode 100644 index 00000000..fcef31d1 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/generated_expansion.go @@ -0,0 +1,21 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +type RuntimeClassExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/node_client.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/node_client.go new file mode 100644 index 00000000..e7acc27e --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/node_client.go @@ -0,0 +1,89 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "k8s.io/api/node/v1alpha1" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type NodeV1alpha1Interface interface { + RESTClient() rest.Interface + RuntimeClassesGetter +} + +// NodeV1alpha1Client is used to interact with features provided by the node.k8s.io group. +type NodeV1alpha1Client struct { + restClient rest.Interface +} + +func (c *NodeV1alpha1Client) RuntimeClasses() RuntimeClassInterface { + return newRuntimeClasses(c) +} + +// NewForConfig creates a new NodeV1alpha1Client for the given config. +func NewForConfig(c *rest.Config) (*NodeV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &NodeV1alpha1Client{client}, nil +} + +// NewForConfigOrDie creates a new NodeV1alpha1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *NodeV1alpha1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new NodeV1alpha1Client for the given RESTClient. +func New(c rest.Interface) *NodeV1alpha1Client { + return &NodeV1alpha1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1alpha1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *NodeV1alpha1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/runtimeclass.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/runtimeclass.go new file mode 100644 index 00000000..044460ec --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/runtimeclass.go @@ -0,0 +1,164 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "time" + + v1alpha1 "k8s.io/api/node/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// RuntimeClassesGetter has a method to return a RuntimeClassInterface. +// A group's client should implement this interface. +type RuntimeClassesGetter interface { + RuntimeClasses() RuntimeClassInterface +} + +// RuntimeClassInterface has methods to work with RuntimeClass resources. +type RuntimeClassInterface interface { + Create(*v1alpha1.RuntimeClass) (*v1alpha1.RuntimeClass, error) + Update(*v1alpha1.RuntimeClass) (*v1alpha1.RuntimeClass, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha1.RuntimeClass, error) + List(opts v1.ListOptions) (*v1alpha1.RuntimeClassList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.RuntimeClass, err error) + RuntimeClassExpansion +} + +// runtimeClasses implements RuntimeClassInterface +type runtimeClasses struct { + client rest.Interface +} + +// newRuntimeClasses returns a RuntimeClasses +func newRuntimeClasses(c *NodeV1alpha1Client) *runtimeClasses { + return &runtimeClasses{ + client: c.RESTClient(), + } +} + +// Get takes name of the runtimeClass, and returns the corresponding runtimeClass object, and an error if there is any. +func (c *runtimeClasses) Get(name string, options v1.GetOptions) (result *v1alpha1.RuntimeClass, err error) { + result = &v1alpha1.RuntimeClass{} + err = c.client.Get(). + Resource("runtimeclasses"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of RuntimeClasses that match those selectors. +func (c *runtimeClasses) List(opts v1.ListOptions) (result *v1alpha1.RuntimeClassList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.RuntimeClassList{} + err = c.client.Get(). + Resource("runtimeclasses"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested runtimeClasses. +func (c *runtimeClasses) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("runtimeclasses"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a runtimeClass and creates it. Returns the server's representation of the runtimeClass, and an error, if there is any. +func (c *runtimeClasses) Create(runtimeClass *v1alpha1.RuntimeClass) (result *v1alpha1.RuntimeClass, err error) { + result = &v1alpha1.RuntimeClass{} + err = c.client.Post(). + Resource("runtimeclasses"). + Body(runtimeClass). + Do(). + Into(result) + return +} + +// Update takes the representation of a runtimeClass and updates it. Returns the server's representation of the runtimeClass, and an error, if there is any. +func (c *runtimeClasses) Update(runtimeClass *v1alpha1.RuntimeClass) (result *v1alpha1.RuntimeClass, err error) { + result = &v1alpha1.RuntimeClass{} + err = c.client.Put(). + Resource("runtimeclasses"). + Name(runtimeClass.Name). + Body(runtimeClass). + Do(). + Into(result) + return +} + +// Delete takes name of the runtimeClass and deletes it. Returns an error if one occurs. +func (c *runtimeClasses) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("runtimeclasses"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *runtimeClasses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("runtimeclasses"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched runtimeClass. +func (c *runtimeClasses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.RuntimeClass, err error) { + result = &v1alpha1.RuntimeClass{} + err = c.client.Patch(pt). + Resource("runtimeclasses"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/doc.go new file mode 100644 index 00000000..77110195 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/generated_expansion.go new file mode 100644 index 00000000..669dd028 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/generated_expansion.go @@ -0,0 +1,21 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +type RuntimeClassExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/node_client.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/node_client.go new file mode 100644 index 00000000..b38d9aca --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/node_client.go @@ -0,0 +1,89 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/node/v1beta1" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type NodeV1beta1Interface interface { + RESTClient() rest.Interface + RuntimeClassesGetter +} + +// NodeV1beta1Client is used to interact with features provided by the node.k8s.io group. +type NodeV1beta1Client struct { + restClient rest.Interface +} + +func (c *NodeV1beta1Client) RuntimeClasses() RuntimeClassInterface { + return newRuntimeClasses(c) +} + +// NewForConfig creates a new NodeV1beta1Client for the given config. +func NewForConfig(c *rest.Config) (*NodeV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &NodeV1beta1Client{client}, nil +} + +// NewForConfigOrDie creates a new NodeV1beta1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *NodeV1beta1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new NodeV1beta1Client for the given RESTClient. +func New(c rest.Interface) *NodeV1beta1Client { + return &NodeV1beta1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1beta1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *NodeV1beta1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/runtimeclass.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/runtimeclass.go new file mode 100644 index 00000000..b3f7c497 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/runtimeclass.go @@ -0,0 +1,164 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "time" + + v1beta1 "k8s.io/api/node/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// RuntimeClassesGetter has a method to return a RuntimeClassInterface. +// A group's client should implement this interface. +type RuntimeClassesGetter interface { + RuntimeClasses() RuntimeClassInterface +} + +// RuntimeClassInterface has methods to work with RuntimeClass resources. +type RuntimeClassInterface interface { + Create(*v1beta1.RuntimeClass) (*v1beta1.RuntimeClass, error) + Update(*v1beta1.RuntimeClass) (*v1beta1.RuntimeClass, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.RuntimeClass, error) + List(opts v1.ListOptions) (*v1beta1.RuntimeClassList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.RuntimeClass, err error) + RuntimeClassExpansion +} + +// runtimeClasses implements RuntimeClassInterface +type runtimeClasses struct { + client rest.Interface +} + +// newRuntimeClasses returns a RuntimeClasses +func newRuntimeClasses(c *NodeV1beta1Client) *runtimeClasses { + return &runtimeClasses{ + client: c.RESTClient(), + } +} + +// Get takes name of the runtimeClass, and returns the corresponding runtimeClass object, and an error if there is any. +func (c *runtimeClasses) Get(name string, options v1.GetOptions) (result *v1beta1.RuntimeClass, err error) { + result = &v1beta1.RuntimeClass{} + err = c.client.Get(). + Resource("runtimeclasses"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of RuntimeClasses that match those selectors. +func (c *runtimeClasses) List(opts v1.ListOptions) (result *v1beta1.RuntimeClassList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1beta1.RuntimeClassList{} + err = c.client.Get(). + Resource("runtimeclasses"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested runtimeClasses. +func (c *runtimeClasses) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("runtimeclasses"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a runtimeClass and creates it. Returns the server's representation of the runtimeClass, and an error, if there is any. +func (c *runtimeClasses) Create(runtimeClass *v1beta1.RuntimeClass) (result *v1beta1.RuntimeClass, err error) { + result = &v1beta1.RuntimeClass{} + err = c.client.Post(). + Resource("runtimeclasses"). + Body(runtimeClass). + Do(). + Into(result) + return +} + +// Update takes the representation of a runtimeClass and updates it. Returns the server's representation of the runtimeClass, and an error, if there is any. +func (c *runtimeClasses) Update(runtimeClass *v1beta1.RuntimeClass) (result *v1beta1.RuntimeClass, err error) { + result = &v1beta1.RuntimeClass{} + err = c.client.Put(). + Resource("runtimeclasses"). + Name(runtimeClass.Name). + Body(runtimeClass). + Do(). + Into(result) + return +} + +// Delete takes name of the runtimeClass and deletes it. Returns an error if one occurs. +func (c *runtimeClasses) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("runtimeclasses"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *runtimeClasses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("runtimeclasses"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched runtimeClass. +func (c *runtimeClasses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.RuntimeClass, err error) { + result = &v1beta1.RuntimeClass{} + err = c.client.Patch(pt). + Resource("runtimeclasses"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/poddisruptionbudget.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/poddisruptionbudget.go index a11f27eb..864af9a2 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/poddisruptionbudget.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/poddisruptionbudget.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "time" + v1beta1 "k8s.io/api/policy/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -76,11 +78,16 @@ func (c *podDisruptionBudgets) Get(name string, options v1.GetOptions) (result * // List takes label and field selectors, and returns the list of PodDisruptionBudgets that match those selectors. func (c *podDisruptionBudgets) List(opts v1.ListOptions) (result *v1beta1.PodDisruptionBudgetList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1beta1.PodDisruptionBudgetList{} err = c.client.Get(). Namespace(c.ns). Resource("poddisruptionbudgets"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -88,11 +95,16 @@ func (c *podDisruptionBudgets) List(opts v1.ListOptions) (result *v1beta1.PodDis // Watch returns a watch.Interface that watches the requested podDisruptionBudgets. func (c *podDisruptionBudgets) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("poddisruptionbudgets"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -150,10 +162,15 @@ func (c *podDisruptionBudgets) Delete(name string, options *v1.DeleteOptions) er // DeleteCollection deletes a collection of objects. func (c *podDisruptionBudgets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("poddisruptionbudgets"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/podsecuritypolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/podsecuritypolicy.go index 355be1e9..d02096d7 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/podsecuritypolicy.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/podsecuritypolicy.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "time" + v1beta1 "k8s.io/api/policy/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -72,10 +74,15 @@ func (c *podSecurityPolicies) Get(name string, options v1.GetOptions) (result *v // List takes label and field selectors, and returns the list of PodSecurityPolicies that match those selectors. func (c *podSecurityPolicies) List(opts v1.ListOptions) (result *v1beta1.PodSecurityPolicyList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1beta1.PodSecurityPolicyList{} err = c.client.Get(). Resource("podsecuritypolicies"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -83,10 +90,15 @@ func (c *podSecurityPolicies) List(opts v1.ListOptions) (result *v1beta1.PodSecu // Watch returns a watch.Interface that watches the requested podSecurityPolicies. func (c *podSecurityPolicies) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Resource("podsecuritypolicies"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -125,9 +137,14 @@ func (c *podSecurityPolicies) Delete(name string, options *v1.DeleteOptions) err // DeleteCollection deletes a collection of objects. func (c *podSecurityPolicies) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Resource("podsecuritypolicies"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/policy_client.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/policy_client.go index 020e185e..8b8b22c6 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/policy_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/policy_client.go @@ -20,7 +20,6 @@ package v1beta1 import ( v1beta1 "k8s.io/api/policy/v1beta1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -81,7 +80,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrole.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrole.go index c4299d4c..0a47c441 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrole.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrole.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "time" + v1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -72,10 +74,15 @@ func (c *clusterRoles) Get(name string, options metav1.GetOptions) (result *v1.C // List takes label and field selectors, and returns the list of ClusterRoles that match those selectors. func (c *clusterRoles) List(opts metav1.ListOptions) (result *v1.ClusterRoleList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1.ClusterRoleList{} err = c.client.Get(). Resource("clusterroles"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -83,10 +90,15 @@ func (c *clusterRoles) List(opts metav1.ListOptions) (result *v1.ClusterRoleList // Watch returns a watch.Interface that watches the requested clusterRoles. func (c *clusterRoles) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Resource("clusterroles"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -125,9 +137,14 @@ func (c *clusterRoles) Delete(name string, options *metav1.DeleteOptions) error // DeleteCollection deletes a collection of objects. func (c *clusterRoles) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Resource("clusterroles"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrolebinding.go index 30c0469a..c16ebc31 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrolebinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrolebinding.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "time" + v1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -72,10 +74,15 @@ func (c *clusterRoleBindings) Get(name string, options metav1.GetOptions) (resul // List takes label and field selectors, and returns the list of ClusterRoleBindings that match those selectors. func (c *clusterRoleBindings) List(opts metav1.ListOptions) (result *v1.ClusterRoleBindingList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1.ClusterRoleBindingList{} err = c.client.Get(). Resource("clusterrolebindings"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -83,10 +90,15 @@ func (c *clusterRoleBindings) List(opts metav1.ListOptions) (result *v1.ClusterR // Watch returns a watch.Interface that watches the requested clusterRoleBindings. func (c *clusterRoleBindings) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Resource("clusterrolebindings"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -125,9 +137,14 @@ func (c *clusterRoleBindings) Delete(name string, options *metav1.DeleteOptions) // DeleteCollection deletes a collection of objects. func (c *clusterRoleBindings) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Resource("clusterrolebindings"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rbac_client.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rbac_client.go index e3855bb9..1bc0179c 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rbac_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rbac_client.go @@ -20,7 +20,6 @@ package v1 import ( v1 "k8s.io/api/rbac/v1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -86,7 +85,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/role.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/role.go index 81ea12a9..a17d791f 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/role.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/role.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "time" + v1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -75,11 +77,16 @@ func (c *roles) Get(name string, options metav1.GetOptions) (result *v1.Role, er // List takes label and field selectors, and returns the list of Roles that match those selectors. func (c *roles) List(opts metav1.ListOptions) (result *v1.RoleList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1.RoleList{} err = c.client.Get(). Namespace(c.ns). Resource("roles"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -87,11 +94,16 @@ func (c *roles) List(opts metav1.ListOptions) (result *v1.RoleList, err error) { // Watch returns a watch.Interface that watches the requested roles. func (c *roles) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("roles"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -133,10 +145,15 @@ func (c *roles) Delete(name string, options *metav1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *roles) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("roles"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rolebinding.go index 17c6f991..c87e4571 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rolebinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rolebinding.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "time" + v1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -75,11 +77,16 @@ func (c *roleBindings) Get(name string, options metav1.GetOptions) (result *v1.R // List takes label and field selectors, and returns the list of RoleBindings that match those selectors. func (c *roleBindings) List(opts metav1.ListOptions) (result *v1.RoleBindingList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1.RoleBindingList{} err = c.client.Get(). Namespace(c.ns). Resource("rolebindings"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -87,11 +94,16 @@ func (c *roleBindings) List(opts metav1.ListOptions) (result *v1.RoleBindingList // Watch returns a watch.Interface that watches the requested roleBindings. func (c *roleBindings) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("rolebindings"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -133,10 +145,15 @@ func (c *roleBindings) Delete(name string, options *metav1.DeleteOptions) error // DeleteCollection deletes a collection of objects. func (c *roleBindings) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("rolebindings"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrole.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrole.go index 37a54576..77e66877 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrole.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrole.go @@ -19,6 +19,8 @@ limitations under the License. package v1alpha1 import ( + "time" + v1alpha1 "k8s.io/api/rbac/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -72,10 +74,15 @@ func (c *clusterRoles) Get(name string, options v1.GetOptions) (result *v1alpha1 // List takes label and field selectors, and returns the list of ClusterRoles that match those selectors. func (c *clusterRoles) List(opts v1.ListOptions) (result *v1alpha1.ClusterRoleList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1alpha1.ClusterRoleList{} err = c.client.Get(). Resource("clusterroles"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -83,10 +90,15 @@ func (c *clusterRoles) List(opts v1.ListOptions) (result *v1alpha1.ClusterRoleLi // Watch returns a watch.Interface that watches the requested clusterRoles. func (c *clusterRoles) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Resource("clusterroles"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -125,9 +137,14 @@ func (c *clusterRoles) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *clusterRoles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Resource("clusterroles"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go index 60507890..0d1b9d20 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go @@ -19,6 +19,8 @@ limitations under the License. package v1alpha1 import ( + "time" + v1alpha1 "k8s.io/api/rbac/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -72,10 +74,15 @@ func (c *clusterRoleBindings) Get(name string, options v1.GetOptions) (result *v // List takes label and field selectors, and returns the list of ClusterRoleBindings that match those selectors. func (c *clusterRoleBindings) List(opts v1.ListOptions) (result *v1alpha1.ClusterRoleBindingList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1alpha1.ClusterRoleBindingList{} err = c.client.Get(). Resource("clusterrolebindings"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -83,10 +90,15 @@ func (c *clusterRoleBindings) List(opts v1.ListOptions) (result *v1alpha1.Cluste // Watch returns a watch.Interface that watches the requested clusterRoleBindings. func (c *clusterRoleBindings) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Resource("clusterrolebindings"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -125,9 +137,14 @@ func (c *clusterRoleBindings) Delete(name string, options *v1.DeleteOptions) err // DeleteCollection deletes a collection of objects. func (c *clusterRoleBindings) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Resource("clusterrolebindings"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rbac_client.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rbac_client.go index de83531e..efbbc68b 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rbac_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rbac_client.go @@ -20,7 +20,6 @@ package v1alpha1 import ( v1alpha1 "k8s.io/api/rbac/v1alpha1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -86,7 +85,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1alpha1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/role.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/role.go index aa6954bb..4a4b6724 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/role.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/role.go @@ -19,6 +19,8 @@ limitations under the License. package v1alpha1 import ( + "time" + v1alpha1 "k8s.io/api/rbac/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -75,11 +77,16 @@ func (c *roles) Get(name string, options v1.GetOptions) (result *v1alpha1.Role, // List takes label and field selectors, and returns the list of Roles that match those selectors. func (c *roles) List(opts v1.ListOptions) (result *v1alpha1.RoleList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1alpha1.RoleList{} err = c.client.Get(). Namespace(c.ns). Resource("roles"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -87,11 +94,16 @@ func (c *roles) List(opts v1.ListOptions) (result *v1alpha1.RoleList, err error) // Watch returns a watch.Interface that watches the requested roles. func (c *roles) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("roles"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -133,10 +145,15 @@ func (c *roles) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *roles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("roles"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rolebinding.go index 0941b8e8..bf4e5a10 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rolebinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rolebinding.go @@ -19,6 +19,8 @@ limitations under the License. package v1alpha1 import ( + "time" + v1alpha1 "k8s.io/api/rbac/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -75,11 +77,16 @@ func (c *roleBindings) Get(name string, options v1.GetOptions) (result *v1alpha1 // List takes label and field selectors, and returns the list of RoleBindings that match those selectors. func (c *roleBindings) List(opts v1.ListOptions) (result *v1alpha1.RoleBindingList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1alpha1.RoleBindingList{} err = c.client.Get(). Namespace(c.ns). Resource("rolebindings"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -87,11 +94,16 @@ func (c *roleBindings) List(opts v1.ListOptions) (result *v1alpha1.RoleBindingLi // Watch returns a watch.Interface that watches the requested roleBindings. func (c *roleBindings) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("rolebindings"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -133,10 +145,15 @@ func (c *roleBindings) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *roleBindings) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("rolebindings"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrole.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrole.go index bac951c8..21d3cab3 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrole.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrole.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "time" + v1beta1 "k8s.io/api/rbac/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -72,10 +74,15 @@ func (c *clusterRoles) Get(name string, options v1.GetOptions) (result *v1beta1. // List takes label and field selectors, and returns the list of ClusterRoles that match those selectors. func (c *clusterRoles) List(opts v1.ListOptions) (result *v1beta1.ClusterRoleList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1beta1.ClusterRoleList{} err = c.client.Get(). Resource("clusterroles"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -83,10 +90,15 @@ func (c *clusterRoles) List(opts v1.ListOptions) (result *v1beta1.ClusterRoleLis // Watch returns a watch.Interface that watches the requested clusterRoles. func (c *clusterRoles) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Resource("clusterroles"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -125,9 +137,14 @@ func (c *clusterRoles) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *clusterRoles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Resource("clusterroles"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrolebinding.go index 96c91de6..47eb9e4e 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrolebinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrolebinding.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "time" + v1beta1 "k8s.io/api/rbac/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -72,10 +74,15 @@ func (c *clusterRoleBindings) Get(name string, options v1.GetOptions) (result *v // List takes label and field selectors, and returns the list of ClusterRoleBindings that match those selectors. func (c *clusterRoleBindings) List(opts v1.ListOptions) (result *v1beta1.ClusterRoleBindingList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1beta1.ClusterRoleBindingList{} err = c.client.Get(). Resource("clusterrolebindings"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -83,10 +90,15 @@ func (c *clusterRoleBindings) List(opts v1.ListOptions) (result *v1beta1.Cluster // Watch returns a watch.Interface that watches the requested clusterRoleBindings. func (c *clusterRoleBindings) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Resource("clusterrolebindings"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -125,9 +137,14 @@ func (c *clusterRoleBindings) Delete(name string, options *v1.DeleteOptions) err // DeleteCollection deletes a collection of objects. func (c *clusterRoleBindings) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Resource("clusterrolebindings"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rbac_client.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rbac_client.go index 46718d73..4db94cfa 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rbac_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rbac_client.go @@ -20,7 +20,6 @@ package v1beta1 import ( v1beta1 "k8s.io/api/rbac/v1beta1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -86,7 +85,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/role.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/role.go index 66f382c0..2b61aad5 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/role.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/role.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "time" + v1beta1 "k8s.io/api/rbac/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -75,11 +77,16 @@ func (c *roles) Get(name string, options v1.GetOptions) (result *v1beta1.Role, e // List takes label and field selectors, and returns the list of Roles that match those selectors. func (c *roles) List(opts v1.ListOptions) (result *v1beta1.RoleList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1beta1.RoleList{} err = c.client.Get(). Namespace(c.ns). Resource("roles"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -87,11 +94,16 @@ func (c *roles) List(opts v1.ListOptions) (result *v1beta1.RoleList, err error) // Watch returns a watch.Interface that watches the requested roles. func (c *roles) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("roles"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -133,10 +145,15 @@ func (c *roles) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *roles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("roles"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rolebinding.go index 67d3d331..0bd118fd 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rolebinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rolebinding.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "time" + v1beta1 "k8s.io/api/rbac/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -75,11 +77,16 @@ func (c *roleBindings) Get(name string, options v1.GetOptions) (result *v1beta1. // List takes label and field selectors, and returns the list of RoleBindings that match those selectors. func (c *roleBindings) List(opts v1.ListOptions) (result *v1beta1.RoleBindingList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1beta1.RoleBindingList{} err = c.client.Get(). Namespace(c.ns). Resource("rolebindings"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -87,11 +94,16 @@ func (c *roleBindings) List(opts v1.ListOptions) (result *v1beta1.RoleBindingLis // Watch returns a watch.Interface that watches the requested roleBindings. func (c *roleBindings) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("rolebindings"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -133,10 +145,15 @@ func (c *roleBindings) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *roleBindings) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("rolebindings"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/doc.go new file mode 100644 index 00000000..3af5d054 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/generated_expansion.go new file mode 100644 index 00000000..cc321329 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/generated_expansion.go @@ -0,0 +1,21 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +type PriorityClassExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/priorityclass.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/priorityclass.go new file mode 100644 index 00000000..3abbb7b8 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/priorityclass.go @@ -0,0 +1,164 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "time" + + v1 "k8s.io/api/scheduling/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// PriorityClassesGetter has a method to return a PriorityClassInterface. +// A group's client should implement this interface. +type PriorityClassesGetter interface { + PriorityClasses() PriorityClassInterface +} + +// PriorityClassInterface has methods to work with PriorityClass resources. +type PriorityClassInterface interface { + Create(*v1.PriorityClass) (*v1.PriorityClass, error) + Update(*v1.PriorityClass) (*v1.PriorityClass, error) + Delete(name string, options *metav1.DeleteOptions) error + DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error + Get(name string, options metav1.GetOptions) (*v1.PriorityClass, error) + List(opts metav1.ListOptions) (*v1.PriorityClassList, error) + Watch(opts metav1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.PriorityClass, err error) + PriorityClassExpansion +} + +// priorityClasses implements PriorityClassInterface +type priorityClasses struct { + client rest.Interface +} + +// newPriorityClasses returns a PriorityClasses +func newPriorityClasses(c *SchedulingV1Client) *priorityClasses { + return &priorityClasses{ + client: c.RESTClient(), + } +} + +// Get takes name of the priorityClass, and returns the corresponding priorityClass object, and an error if there is any. +func (c *priorityClasses) Get(name string, options metav1.GetOptions) (result *v1.PriorityClass, err error) { + result = &v1.PriorityClass{} + err = c.client.Get(). + Resource("priorityclasses"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of PriorityClasses that match those selectors. +func (c *priorityClasses) List(opts metav1.ListOptions) (result *v1.PriorityClassList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.PriorityClassList{} + err = c.client.Get(). + Resource("priorityclasses"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested priorityClasses. +func (c *priorityClasses) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("priorityclasses"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a priorityClass and creates it. Returns the server's representation of the priorityClass, and an error, if there is any. +func (c *priorityClasses) Create(priorityClass *v1.PriorityClass) (result *v1.PriorityClass, err error) { + result = &v1.PriorityClass{} + err = c.client.Post(). + Resource("priorityclasses"). + Body(priorityClass). + Do(). + Into(result) + return +} + +// Update takes the representation of a priorityClass and updates it. Returns the server's representation of the priorityClass, and an error, if there is any. +func (c *priorityClasses) Update(priorityClass *v1.PriorityClass) (result *v1.PriorityClass, err error) { + result = &v1.PriorityClass{} + err = c.client.Put(). + Resource("priorityclasses"). + Name(priorityClass.Name). + Body(priorityClass). + Do(). + Into(result) + return +} + +// Delete takes name of the priorityClass and deletes it. Returns an error if one occurs. +func (c *priorityClasses) Delete(name string, options *metav1.DeleteOptions) error { + return c.client.Delete(). + Resource("priorityclasses"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *priorityClasses) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("priorityclasses"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched priorityClass. +func (c *priorityClasses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.PriorityClass, err error) { + result = &v1.PriorityClass{} + err = c.client.Patch(pt). + Resource("priorityclasses"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/scheduling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/scheduling_client.go new file mode 100644 index 00000000..5028bac8 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/scheduling_client.go @@ -0,0 +1,89 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/scheduling/v1" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type SchedulingV1Interface interface { + RESTClient() rest.Interface + PriorityClassesGetter +} + +// SchedulingV1Client is used to interact with features provided by the scheduling.k8s.io group. +type SchedulingV1Client struct { + restClient rest.Interface +} + +func (c *SchedulingV1Client) PriorityClasses() PriorityClassInterface { + return newPriorityClasses(c) +} + +// NewForConfig creates a new SchedulingV1Client for the given config. +func NewForConfig(c *rest.Config) (*SchedulingV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &SchedulingV1Client{client}, nil +} + +// NewForConfigOrDie creates a new SchedulingV1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *SchedulingV1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new SchedulingV1Client for the given RESTClient. +func New(c rest.Interface) *SchedulingV1Client { + return &SchedulingV1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *SchedulingV1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/priorityclass.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/priorityclass.go index 6845d25c..29d646fb 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/priorityclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/priorityclass.go @@ -19,6 +19,8 @@ limitations under the License. package v1alpha1 import ( + "time" + v1alpha1 "k8s.io/api/scheduling/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -72,10 +74,15 @@ func (c *priorityClasses) Get(name string, options v1.GetOptions) (result *v1alp // List takes label and field selectors, and returns the list of PriorityClasses that match those selectors. func (c *priorityClasses) List(opts v1.ListOptions) (result *v1alpha1.PriorityClassList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1alpha1.PriorityClassList{} err = c.client.Get(). Resource("priorityclasses"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -83,10 +90,15 @@ func (c *priorityClasses) List(opts v1.ListOptions) (result *v1alpha1.PriorityCl // Watch returns a watch.Interface that watches the requested priorityClasses. func (c *priorityClasses) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Resource("priorityclasses"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -125,9 +137,14 @@ func (c *priorityClasses) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *priorityClasses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Resource("priorityclasses"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/scheduling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/scheduling_client.go index 375f41b8..83bc0b8a 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/scheduling_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/scheduling_client.go @@ -20,7 +20,6 @@ package v1alpha1 import ( v1alpha1 "k8s.io/api/scheduling/v1alpha1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -71,7 +70,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1alpha1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/priorityclass.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/priorityclass.go index 57b9766e..5e402f8e 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/priorityclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/priorityclass.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "time" + v1beta1 "k8s.io/api/scheduling/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -72,10 +74,15 @@ func (c *priorityClasses) Get(name string, options v1.GetOptions) (result *v1bet // List takes label and field selectors, and returns the list of PriorityClasses that match those selectors. func (c *priorityClasses) List(opts v1.ListOptions) (result *v1beta1.PriorityClassList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1beta1.PriorityClassList{} err = c.client.Get(). Resource("priorityclasses"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -83,10 +90,15 @@ func (c *priorityClasses) List(opts v1.ListOptions) (result *v1beta1.PriorityCla // Watch returns a watch.Interface that watches the requested priorityClasses. func (c *priorityClasses) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Resource("priorityclasses"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -125,9 +137,14 @@ func (c *priorityClasses) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *priorityClasses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Resource("priorityclasses"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/scheduling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/scheduling_client.go index 6feec4ae..373f5cca 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/scheduling_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/scheduling_client.go @@ -20,7 +20,6 @@ package v1beta1 import ( v1beta1 "k8s.io/api/scheduling/v1beta1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -71,7 +70,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/podpreset.go b/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/podpreset.go index f000ae48..8fd6adc5 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/podpreset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/podpreset.go @@ -19,6 +19,8 @@ limitations under the License. package v1alpha1 import ( + "time" + v1alpha1 "k8s.io/api/settings/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -75,11 +77,16 @@ func (c *podPresets) Get(name string, options v1.GetOptions) (result *v1alpha1.P // List takes label and field selectors, and returns the list of PodPresets that match those selectors. func (c *podPresets) List(opts v1.ListOptions) (result *v1alpha1.PodPresetList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1alpha1.PodPresetList{} err = c.client.Get(). Namespace(c.ns). Resource("podpresets"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -87,11 +94,16 @@ func (c *podPresets) List(opts v1.ListOptions) (result *v1alpha1.PodPresetList, // Watch returns a watch.Interface that watches the requested podPresets. func (c *podPresets) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("podpresets"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -133,10 +145,15 @@ func (c *podPresets) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *podPresets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("podpresets"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/settings_client.go b/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/settings_client.go index c2a03b96..8d3a8d8e 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/settings_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/settings_client.go @@ -20,7 +20,6 @@ package v1alpha1 import ( v1alpha1 "k8s.io/api/settings/v1alpha1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -71,7 +70,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1alpha1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csinode.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csinode.go new file mode 100644 index 00000000..20905417 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csinode.go @@ -0,0 +1,164 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "time" + + v1 "k8s.io/api/storage/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// CSINodesGetter has a method to return a CSINodeInterface. +// A group's client should implement this interface. +type CSINodesGetter interface { + CSINodes() CSINodeInterface +} + +// CSINodeInterface has methods to work with CSINode resources. +type CSINodeInterface interface { + Create(*v1.CSINode) (*v1.CSINode, error) + Update(*v1.CSINode) (*v1.CSINode, error) + Delete(name string, options *metav1.DeleteOptions) error + DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error + Get(name string, options metav1.GetOptions) (*v1.CSINode, error) + List(opts metav1.ListOptions) (*v1.CSINodeList, error) + Watch(opts metav1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.CSINode, err error) + CSINodeExpansion +} + +// cSINodes implements CSINodeInterface +type cSINodes struct { + client rest.Interface +} + +// newCSINodes returns a CSINodes +func newCSINodes(c *StorageV1Client) *cSINodes { + return &cSINodes{ + client: c.RESTClient(), + } +} + +// Get takes name of the cSINode, and returns the corresponding cSINode object, and an error if there is any. +func (c *cSINodes) Get(name string, options metav1.GetOptions) (result *v1.CSINode, err error) { + result = &v1.CSINode{} + err = c.client.Get(). + Resource("csinodes"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of CSINodes that match those selectors. +func (c *cSINodes) List(opts metav1.ListOptions) (result *v1.CSINodeList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.CSINodeList{} + err = c.client.Get(). + Resource("csinodes"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested cSINodes. +func (c *cSINodes) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("csinodes"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a cSINode and creates it. Returns the server's representation of the cSINode, and an error, if there is any. +func (c *cSINodes) Create(cSINode *v1.CSINode) (result *v1.CSINode, err error) { + result = &v1.CSINode{} + err = c.client.Post(). + Resource("csinodes"). + Body(cSINode). + Do(). + Into(result) + return +} + +// Update takes the representation of a cSINode and updates it. Returns the server's representation of the cSINode, and an error, if there is any. +func (c *cSINodes) Update(cSINode *v1.CSINode) (result *v1.CSINode, err error) { + result = &v1.CSINode{} + err = c.client.Put(). + Resource("csinodes"). + Name(cSINode.Name). + Body(cSINode). + Do(). + Into(result) + return +} + +// Delete takes name of the cSINode and deletes it. Returns an error if one occurs. +func (c *cSINodes) Delete(name string, options *metav1.DeleteOptions) error { + return c.client.Delete(). + Resource("csinodes"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *cSINodes) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("csinodes"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched cSINode. +func (c *cSINodes) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.CSINode, err error) { + result = &v1.CSINode{} + err = c.client.Patch(pt). + Resource("csinodes"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/generated_expansion.go index 2bea7ec7..d147620a 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/generated_expansion.go @@ -18,4 +18,8 @@ limitations under the License. package v1 +type CSINodeExpansion interface{} + type StorageClassExpansion interface{} + +type VolumeAttachmentExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storage_client.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storage_client.go index ac48f491..822f0891 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storage_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storage_client.go @@ -20,14 +20,15 @@ package v1 import ( v1 "k8s.io/api/storage/v1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) type StorageV1Interface interface { RESTClient() rest.Interface + CSINodesGetter StorageClassesGetter + VolumeAttachmentsGetter } // StorageV1Client is used to interact with features provided by the storage.k8s.io group. @@ -35,10 +36,18 @@ type StorageV1Client struct { restClient rest.Interface } +func (c *StorageV1Client) CSINodes() CSINodeInterface { + return newCSINodes(c) +} + func (c *StorageV1Client) StorageClasses() StorageClassInterface { return newStorageClasses(c) } +func (c *StorageV1Client) VolumeAttachments() VolumeAttachmentInterface { + return newVolumeAttachments(c) +} + // NewForConfig creates a new StorageV1Client for the given config. func NewForConfig(c *rest.Config) (*StorageV1Client, error) { config := *c @@ -71,7 +80,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storageclass.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storageclass.go index 0f7f57f0..3f4c48f0 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storageclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storageclass.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "time" + v1 "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -72,10 +74,15 @@ func (c *storageClasses) Get(name string, options metav1.GetOptions) (result *v1 // List takes label and field selectors, and returns the list of StorageClasses that match those selectors. func (c *storageClasses) List(opts metav1.ListOptions) (result *v1.StorageClassList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1.StorageClassList{} err = c.client.Get(). Resource("storageclasses"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -83,10 +90,15 @@ func (c *storageClasses) List(opts metav1.ListOptions) (result *v1.StorageClassL // Watch returns a watch.Interface that watches the requested storageClasses. func (c *storageClasses) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Resource("storageclasses"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -125,9 +137,14 @@ func (c *storageClasses) Delete(name string, options *metav1.DeleteOptions) erro // DeleteCollection deletes a collection of objects. func (c *storageClasses) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Resource("storageclasses"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/volumeattachment.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/volumeattachment.go new file mode 100644 index 00000000..0f45097b --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/volumeattachment.go @@ -0,0 +1,180 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "time" + + v1 "k8s.io/api/storage/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// VolumeAttachmentsGetter has a method to return a VolumeAttachmentInterface. +// A group's client should implement this interface. +type VolumeAttachmentsGetter interface { + VolumeAttachments() VolumeAttachmentInterface +} + +// VolumeAttachmentInterface has methods to work with VolumeAttachment resources. +type VolumeAttachmentInterface interface { + Create(*v1.VolumeAttachment) (*v1.VolumeAttachment, error) + Update(*v1.VolumeAttachment) (*v1.VolumeAttachment, error) + UpdateStatus(*v1.VolumeAttachment) (*v1.VolumeAttachment, error) + Delete(name string, options *metav1.DeleteOptions) error + DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error + Get(name string, options metav1.GetOptions) (*v1.VolumeAttachment, error) + List(opts metav1.ListOptions) (*v1.VolumeAttachmentList, error) + Watch(opts metav1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.VolumeAttachment, err error) + VolumeAttachmentExpansion +} + +// volumeAttachments implements VolumeAttachmentInterface +type volumeAttachments struct { + client rest.Interface +} + +// newVolumeAttachments returns a VolumeAttachments +func newVolumeAttachments(c *StorageV1Client) *volumeAttachments { + return &volumeAttachments{ + client: c.RESTClient(), + } +} + +// Get takes name of the volumeAttachment, and returns the corresponding volumeAttachment object, and an error if there is any. +func (c *volumeAttachments) Get(name string, options metav1.GetOptions) (result *v1.VolumeAttachment, err error) { + result = &v1.VolumeAttachment{} + err = c.client.Get(). + Resource("volumeattachments"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of VolumeAttachments that match those selectors. +func (c *volumeAttachments) List(opts metav1.ListOptions) (result *v1.VolumeAttachmentList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.VolumeAttachmentList{} + err = c.client.Get(). + Resource("volumeattachments"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested volumeAttachments. +func (c *volumeAttachments) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("volumeattachments"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a volumeAttachment and creates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. +func (c *volumeAttachments) Create(volumeAttachment *v1.VolumeAttachment) (result *v1.VolumeAttachment, err error) { + result = &v1.VolumeAttachment{} + err = c.client.Post(). + Resource("volumeattachments"). + Body(volumeAttachment). + Do(). + Into(result) + return +} + +// Update takes the representation of a volumeAttachment and updates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. +func (c *volumeAttachments) Update(volumeAttachment *v1.VolumeAttachment) (result *v1.VolumeAttachment, err error) { + result = &v1.VolumeAttachment{} + err = c.client.Put(). + Resource("volumeattachments"). + Name(volumeAttachment.Name). + Body(volumeAttachment). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *volumeAttachments) UpdateStatus(volumeAttachment *v1.VolumeAttachment) (result *v1.VolumeAttachment, err error) { + result = &v1.VolumeAttachment{} + err = c.client.Put(). + Resource("volumeattachments"). + Name(volumeAttachment.Name). + SubResource("status"). + Body(volumeAttachment). + Do(). + Into(result) + return +} + +// Delete takes name of the volumeAttachment and deletes it. Returns an error if one occurs. +func (c *volumeAttachments) Delete(name string, options *metav1.DeleteOptions) error { + return c.client.Delete(). + Resource("volumeattachments"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *volumeAttachments) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("volumeattachments"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched volumeAttachment. +func (c *volumeAttachments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.VolumeAttachment, err error) { + result = &v1.VolumeAttachment{} + err = c.client.Patch(pt). + Resource("volumeattachments"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/storage_client.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/storage_client.go index c52f630a..32d50306 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/storage_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/storage_client.go @@ -20,7 +20,6 @@ package v1alpha1 import ( v1alpha1 "k8s.io/api/storage/v1alpha1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -71,7 +70,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1alpha1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattachment.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattachment.go index e6af0018..7fef94e8 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattachment.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattachment.go @@ -19,6 +19,8 @@ limitations under the License. package v1alpha1 import ( + "time" + v1alpha1 "k8s.io/api/storage/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -73,10 +75,15 @@ func (c *volumeAttachments) Get(name string, options v1.GetOptions) (result *v1a // List takes label and field selectors, and returns the list of VolumeAttachments that match those selectors. func (c *volumeAttachments) List(opts v1.ListOptions) (result *v1alpha1.VolumeAttachmentList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1alpha1.VolumeAttachmentList{} err = c.client.Get(). Resource("volumeattachments"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -84,10 +91,15 @@ func (c *volumeAttachments) List(opts v1.ListOptions) (result *v1alpha1.VolumeAt // Watch returns a watch.Interface that watches the requested volumeAttachments. func (c *volumeAttachments) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Resource("volumeattachments"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -141,9 +153,14 @@ func (c *volumeAttachments) Delete(name string, options *v1.DeleteOptions) error // DeleteCollection deletes a collection of objects. func (c *volumeAttachments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Resource("volumeattachments"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csidriver.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csidriver.go new file mode 100644 index 00000000..86cf9bf1 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csidriver.go @@ -0,0 +1,164 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "time" + + v1beta1 "k8s.io/api/storage/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// CSIDriversGetter has a method to return a CSIDriverInterface. +// A group's client should implement this interface. +type CSIDriversGetter interface { + CSIDrivers() CSIDriverInterface +} + +// CSIDriverInterface has methods to work with CSIDriver resources. +type CSIDriverInterface interface { + Create(*v1beta1.CSIDriver) (*v1beta1.CSIDriver, error) + Update(*v1beta1.CSIDriver) (*v1beta1.CSIDriver, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.CSIDriver, error) + List(opts v1.ListOptions) (*v1beta1.CSIDriverList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.CSIDriver, err error) + CSIDriverExpansion +} + +// cSIDrivers implements CSIDriverInterface +type cSIDrivers struct { + client rest.Interface +} + +// newCSIDrivers returns a CSIDrivers +func newCSIDrivers(c *StorageV1beta1Client) *cSIDrivers { + return &cSIDrivers{ + client: c.RESTClient(), + } +} + +// Get takes name of the cSIDriver, and returns the corresponding cSIDriver object, and an error if there is any. +func (c *cSIDrivers) Get(name string, options v1.GetOptions) (result *v1beta1.CSIDriver, err error) { + result = &v1beta1.CSIDriver{} + err = c.client.Get(). + Resource("csidrivers"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of CSIDrivers that match those selectors. +func (c *cSIDrivers) List(opts v1.ListOptions) (result *v1beta1.CSIDriverList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1beta1.CSIDriverList{} + err = c.client.Get(). + Resource("csidrivers"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested cSIDrivers. +func (c *cSIDrivers) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("csidrivers"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a cSIDriver and creates it. Returns the server's representation of the cSIDriver, and an error, if there is any. +func (c *cSIDrivers) Create(cSIDriver *v1beta1.CSIDriver) (result *v1beta1.CSIDriver, err error) { + result = &v1beta1.CSIDriver{} + err = c.client.Post(). + Resource("csidrivers"). + Body(cSIDriver). + Do(). + Into(result) + return +} + +// Update takes the representation of a cSIDriver and updates it. Returns the server's representation of the cSIDriver, and an error, if there is any. +func (c *cSIDrivers) Update(cSIDriver *v1beta1.CSIDriver) (result *v1beta1.CSIDriver, err error) { + result = &v1beta1.CSIDriver{} + err = c.client.Put(). + Resource("csidrivers"). + Name(cSIDriver.Name). + Body(cSIDriver). + Do(). + Into(result) + return +} + +// Delete takes name of the cSIDriver and deletes it. Returns an error if one occurs. +func (c *cSIDrivers) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("csidrivers"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *cSIDrivers) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("csidrivers"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched cSIDriver. +func (c *cSIDrivers) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.CSIDriver, err error) { + result = &v1beta1.CSIDriver{} + err = c.client.Patch(pt). + Resource("csidrivers"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csinode.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csinode.go new file mode 100644 index 00000000..e5540c12 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csinode.go @@ -0,0 +1,164 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "time" + + v1beta1 "k8s.io/api/storage/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// CSINodesGetter has a method to return a CSINodeInterface. +// A group's client should implement this interface. +type CSINodesGetter interface { + CSINodes() CSINodeInterface +} + +// CSINodeInterface has methods to work with CSINode resources. +type CSINodeInterface interface { + Create(*v1beta1.CSINode) (*v1beta1.CSINode, error) + Update(*v1beta1.CSINode) (*v1beta1.CSINode, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.CSINode, error) + List(opts v1.ListOptions) (*v1beta1.CSINodeList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.CSINode, err error) + CSINodeExpansion +} + +// cSINodes implements CSINodeInterface +type cSINodes struct { + client rest.Interface +} + +// newCSINodes returns a CSINodes +func newCSINodes(c *StorageV1beta1Client) *cSINodes { + return &cSINodes{ + client: c.RESTClient(), + } +} + +// Get takes name of the cSINode, and returns the corresponding cSINode object, and an error if there is any. +func (c *cSINodes) Get(name string, options v1.GetOptions) (result *v1beta1.CSINode, err error) { + result = &v1beta1.CSINode{} + err = c.client.Get(). + Resource("csinodes"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of CSINodes that match those selectors. +func (c *cSINodes) List(opts v1.ListOptions) (result *v1beta1.CSINodeList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1beta1.CSINodeList{} + err = c.client.Get(). + Resource("csinodes"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested cSINodes. +func (c *cSINodes) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("csinodes"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a cSINode and creates it. Returns the server's representation of the cSINode, and an error, if there is any. +func (c *cSINodes) Create(cSINode *v1beta1.CSINode) (result *v1beta1.CSINode, err error) { + result = &v1beta1.CSINode{} + err = c.client.Post(). + Resource("csinodes"). + Body(cSINode). + Do(). + Into(result) + return +} + +// Update takes the representation of a cSINode and updates it. Returns the server's representation of the cSINode, and an error, if there is any. +func (c *cSINodes) Update(cSINode *v1beta1.CSINode) (result *v1beta1.CSINode, err error) { + result = &v1beta1.CSINode{} + err = c.client.Put(). + Resource("csinodes"). + Name(cSINode.Name). + Body(cSINode). + Do(). + Into(result) + return +} + +// Delete takes name of the cSINode and deletes it. Returns an error if one occurs. +func (c *cSINodes) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("csinodes"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *cSINodes) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("csinodes"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched cSINode. +func (c *cSINodes) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.CSINode, err error) { + result = &v1beta1.CSINode{} + err = c.client.Patch(pt). + Resource("csinodes"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/generated_expansion.go index 559f88f6..7ba93142 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/generated_expansion.go @@ -18,6 +18,10 @@ limitations under the License. package v1beta1 +type CSIDriverExpansion interface{} + +type CSINodeExpansion interface{} + type StorageClassExpansion interface{} type VolumeAttachmentExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storage_client.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storage_client.go index 4bdebb87..5e12b025 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storage_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storage_client.go @@ -20,13 +20,14 @@ package v1beta1 import ( v1beta1 "k8s.io/api/storage/v1beta1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) type StorageV1beta1Interface interface { RESTClient() rest.Interface + CSIDriversGetter + CSINodesGetter StorageClassesGetter VolumeAttachmentsGetter } @@ -36,6 +37,14 @@ type StorageV1beta1Client struct { restClient rest.Interface } +func (c *StorageV1beta1Client) CSIDrivers() CSIDriverInterface { + return newCSIDrivers(c) +} + +func (c *StorageV1beta1Client) CSINodes() CSINodeInterface { + return newCSINodes(c) +} + func (c *StorageV1beta1Client) StorageClasses() StorageClassInterface { return newStorageClasses(c) } @@ -76,7 +85,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storageclass.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storageclass.go index fbe1fd4c..8a8f3891 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storageclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storageclass.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "time" + v1beta1 "k8s.io/api/storage/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -72,10 +74,15 @@ func (c *storageClasses) Get(name string, options v1.GetOptions) (result *v1beta // List takes label and field selectors, and returns the list of StorageClasses that match those selectors. func (c *storageClasses) List(opts v1.ListOptions) (result *v1beta1.StorageClassList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1beta1.StorageClassList{} err = c.client.Get(). Resource("storageclasses"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -83,10 +90,15 @@ func (c *storageClasses) List(opts v1.ListOptions) (result *v1beta1.StorageClass // Watch returns a watch.Interface that watches the requested storageClasses. func (c *storageClasses) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Resource("storageclasses"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -125,9 +137,14 @@ func (c *storageClasses) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *storageClasses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Resource("storageclasses"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattachment.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattachment.go index 5cd2d391..d319407f 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattachment.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattachment.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "time" + v1beta1 "k8s.io/api/storage/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -73,10 +75,15 @@ func (c *volumeAttachments) Get(name string, options v1.GetOptions) (result *v1b // List takes label and field selectors, and returns the list of VolumeAttachments that match those selectors. func (c *volumeAttachments) List(opts v1.ListOptions) (result *v1beta1.VolumeAttachmentList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1beta1.VolumeAttachmentList{} err = c.client.Get(). Resource("volumeattachments"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -84,10 +91,15 @@ func (c *volumeAttachments) List(opts v1.ListOptions) (result *v1beta1.VolumeAtt // Watch returns a watch.Interface that watches the requested volumeAttachments. func (c *volumeAttachments) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Resource("volumeattachments"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -141,9 +153,14 @@ func (c *volumeAttachments) Delete(name string, options *v1.DeleteOptions) error // DeleteCollection deletes a collection of objects. func (c *volumeAttachments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Resource("volumeattachments"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/listers/admissionregistration/v1/expansion_generated.go b/vendor/k8s.io/client-go/listers/admissionregistration/v1/expansion_generated.go new file mode 100644 index 00000000..e121ae41 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/admissionregistration/v1/expansion_generated.go @@ -0,0 +1,27 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +// MutatingWebhookConfigurationListerExpansion allows custom methods to be added to +// MutatingWebhookConfigurationLister. +type MutatingWebhookConfigurationListerExpansion interface{} + +// ValidatingWebhookConfigurationListerExpansion allows custom methods to be added to +// ValidatingWebhookConfigurationLister. +type ValidatingWebhookConfigurationListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/admissionregistration/v1/mutatingwebhookconfiguration.go b/vendor/k8s.io/client-go/listers/admissionregistration/v1/mutatingwebhookconfiguration.go new file mode 100644 index 00000000..e2b5da09 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/admissionregistration/v1/mutatingwebhookconfiguration.go @@ -0,0 +1,65 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/admissionregistration/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// MutatingWebhookConfigurationLister helps list MutatingWebhookConfigurations. +type MutatingWebhookConfigurationLister interface { + // List lists all MutatingWebhookConfigurations in the indexer. + List(selector labels.Selector) (ret []*v1.MutatingWebhookConfiguration, err error) + // Get retrieves the MutatingWebhookConfiguration from the index for a given name. + Get(name string) (*v1.MutatingWebhookConfiguration, error) + MutatingWebhookConfigurationListerExpansion +} + +// mutatingWebhookConfigurationLister implements the MutatingWebhookConfigurationLister interface. +type mutatingWebhookConfigurationLister struct { + indexer cache.Indexer +} + +// NewMutatingWebhookConfigurationLister returns a new MutatingWebhookConfigurationLister. +func NewMutatingWebhookConfigurationLister(indexer cache.Indexer) MutatingWebhookConfigurationLister { + return &mutatingWebhookConfigurationLister{indexer: indexer} +} + +// List lists all MutatingWebhookConfigurations in the indexer. +func (s *mutatingWebhookConfigurationLister) List(selector labels.Selector) (ret []*v1.MutatingWebhookConfiguration, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.MutatingWebhookConfiguration)) + }) + return ret, err +} + +// Get retrieves the MutatingWebhookConfiguration from the index for a given name. +func (s *mutatingWebhookConfigurationLister) Get(name string) (*v1.MutatingWebhookConfiguration, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("mutatingwebhookconfiguration"), name) + } + return obj.(*v1.MutatingWebhookConfiguration), nil +} diff --git a/vendor/k8s.io/client-go/listers/admissionregistration/v1/validatingwebhookconfiguration.go b/vendor/k8s.io/client-go/listers/admissionregistration/v1/validatingwebhookconfiguration.go new file mode 100644 index 00000000..33d55e08 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/admissionregistration/v1/validatingwebhookconfiguration.go @@ -0,0 +1,65 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/admissionregistration/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ValidatingWebhookConfigurationLister helps list ValidatingWebhookConfigurations. +type ValidatingWebhookConfigurationLister interface { + // List lists all ValidatingWebhookConfigurations in the indexer. + List(selector labels.Selector) (ret []*v1.ValidatingWebhookConfiguration, err error) + // Get retrieves the ValidatingWebhookConfiguration from the index for a given name. + Get(name string) (*v1.ValidatingWebhookConfiguration, error) + ValidatingWebhookConfigurationListerExpansion +} + +// validatingWebhookConfigurationLister implements the ValidatingWebhookConfigurationLister interface. +type validatingWebhookConfigurationLister struct { + indexer cache.Indexer +} + +// NewValidatingWebhookConfigurationLister returns a new ValidatingWebhookConfigurationLister. +func NewValidatingWebhookConfigurationLister(indexer cache.Indexer) ValidatingWebhookConfigurationLister { + return &validatingWebhookConfigurationLister{indexer: indexer} +} + +// List lists all ValidatingWebhookConfigurations in the indexer. +func (s *validatingWebhookConfigurationLister) List(selector labels.Selector) (ret []*v1.ValidatingWebhookConfiguration, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ValidatingWebhookConfiguration)) + }) + return ret, err +} + +// Get retrieves the ValidatingWebhookConfiguration from the index for a given name. +func (s *validatingWebhookConfigurationLister) Get(name string) (*v1.ValidatingWebhookConfiguration, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("validatingwebhookconfiguration"), name) + } + return obj.(*v1.ValidatingWebhookConfiguration), nil +} diff --git a/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/initializerconfiguration.go b/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/initializerconfiguration.go deleted file mode 100644 index dbd7301f..00000000 --- a/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/initializerconfiguration.go +++ /dev/null @@ -1,65 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1alpha1 "k8s.io/api/admissionregistration/v1alpha1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" -) - -// InitializerConfigurationLister helps list InitializerConfigurations. -type InitializerConfigurationLister interface { - // List lists all InitializerConfigurations in the indexer. - List(selector labels.Selector) (ret []*v1alpha1.InitializerConfiguration, err error) - // Get retrieves the InitializerConfiguration from the index for a given name. - Get(name string) (*v1alpha1.InitializerConfiguration, error) - InitializerConfigurationListerExpansion -} - -// initializerConfigurationLister implements the InitializerConfigurationLister interface. -type initializerConfigurationLister struct { - indexer cache.Indexer -} - -// NewInitializerConfigurationLister returns a new InitializerConfigurationLister. -func NewInitializerConfigurationLister(indexer cache.Indexer) InitializerConfigurationLister { - return &initializerConfigurationLister{indexer: indexer} -} - -// List lists all InitializerConfigurations in the indexer. -func (s *initializerConfigurationLister) List(selector labels.Selector) (ret []*v1alpha1.InitializerConfiguration, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.InitializerConfiguration)) - }) - return ret, err -} - -// Get retrieves the InitializerConfiguration from the index for a given name. -func (s *initializerConfigurationLister) Get(name string) (*v1alpha1.InitializerConfiguration, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("initializerconfiguration"), name) - } - return obj.(*v1alpha1.InitializerConfiguration), nil -} diff --git a/vendor/k8s.io/client-go/listers/apps/v1beta1/expansion_generated.go b/vendor/k8s.io/client-go/listers/apps/v1beta1/expansion_generated.go index 8f8d0843..c73cf98c 100644 --- a/vendor/k8s.io/client-go/listers/apps/v1beta1/expansion_generated.go +++ b/vendor/k8s.io/client-go/listers/apps/v1beta1/expansion_generated.go @@ -33,11 +33,3 @@ type DeploymentListerExpansion interface{} // DeploymentNamespaceListerExpansion allows custom methods to be added to // DeploymentNamespaceLister. type DeploymentNamespaceListerExpansion interface{} - -// ScaleListerExpansion allows custom methods to be added to -// ScaleLister. -type ScaleListerExpansion interface{} - -// ScaleNamespaceListerExpansion allows custom methods to be added to -// ScaleNamespaceLister. -type ScaleNamespaceListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/apps/v1beta1/scale.go b/vendor/k8s.io/client-go/listers/apps/v1beta1/scale.go deleted file mode 100644 index ef8a2630..00000000 --- a/vendor/k8s.io/client-go/listers/apps/v1beta1/scale.go +++ /dev/null @@ -1,94 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/apps/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" -) - -// ScaleLister helps list Scales. -type ScaleLister interface { - // List lists all Scales in the indexer. - List(selector labels.Selector) (ret []*v1beta1.Scale, err error) - // Scales returns an object that can list and get Scales. - Scales(namespace string) ScaleNamespaceLister - ScaleListerExpansion -} - -// scaleLister implements the ScaleLister interface. -type scaleLister struct { - indexer cache.Indexer -} - -// NewScaleLister returns a new ScaleLister. -func NewScaleLister(indexer cache.Indexer) ScaleLister { - return &scaleLister{indexer: indexer} -} - -// List lists all Scales in the indexer. -func (s *scaleLister) List(selector labels.Selector) (ret []*v1beta1.Scale, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.Scale)) - }) - return ret, err -} - -// Scales returns an object that can list and get Scales. -func (s *scaleLister) Scales(namespace string) ScaleNamespaceLister { - return scaleNamespaceLister{indexer: s.indexer, namespace: namespace} -} - -// ScaleNamespaceLister helps list and get Scales. -type ScaleNamespaceLister interface { - // List lists all Scales in the indexer for a given namespace. - List(selector labels.Selector) (ret []*v1beta1.Scale, err error) - // Get retrieves the Scale from the indexer for a given namespace and name. - Get(name string) (*v1beta1.Scale, error) - ScaleNamespaceListerExpansion -} - -// scaleNamespaceLister implements the ScaleNamespaceLister -// interface. -type scaleNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all Scales in the indexer for a given namespace. -func (s scaleNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.Scale, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.Scale)) - }) - return ret, err -} - -// Get retrieves the Scale from the indexer for a given namespace and name. -func (s scaleNamespaceLister) Get(name string) (*v1beta1.Scale, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("scale"), name) - } - return obj.(*v1beta1.Scale), nil -} diff --git a/vendor/k8s.io/client-go/listers/apps/v1beta2/expansion_generated.go b/vendor/k8s.io/client-go/listers/apps/v1beta2/expansion_generated.go index d468f38e..bac6ccb9 100644 --- a/vendor/k8s.io/client-go/listers/apps/v1beta2/expansion_generated.go +++ b/vendor/k8s.io/client-go/listers/apps/v1beta2/expansion_generated.go @@ -25,11 +25,3 @@ type ControllerRevisionListerExpansion interface{} // ControllerRevisionNamespaceListerExpansion allows custom methods to be added to // ControllerRevisionNamespaceLister. type ControllerRevisionNamespaceListerExpansion interface{} - -// ScaleListerExpansion allows custom methods to be added to -// ScaleLister. -type ScaleListerExpansion interface{} - -// ScaleNamespaceListerExpansion allows custom methods to be added to -// ScaleNamespaceLister. -type ScaleNamespaceListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/apps/v1beta2/scale.go b/vendor/k8s.io/client-go/listers/apps/v1beta2/scale.go deleted file mode 100644 index d8932986..00000000 --- a/vendor/k8s.io/client-go/listers/apps/v1beta2/scale.go +++ /dev/null @@ -1,94 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1beta2 - -import ( - v1beta2 "k8s.io/api/apps/v1beta2" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" -) - -// ScaleLister helps list Scales. -type ScaleLister interface { - // List lists all Scales in the indexer. - List(selector labels.Selector) (ret []*v1beta2.Scale, err error) - // Scales returns an object that can list and get Scales. - Scales(namespace string) ScaleNamespaceLister - ScaleListerExpansion -} - -// scaleLister implements the ScaleLister interface. -type scaleLister struct { - indexer cache.Indexer -} - -// NewScaleLister returns a new ScaleLister. -func NewScaleLister(indexer cache.Indexer) ScaleLister { - return &scaleLister{indexer: indexer} -} - -// List lists all Scales in the indexer. -func (s *scaleLister) List(selector labels.Selector) (ret []*v1beta2.Scale, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta2.Scale)) - }) - return ret, err -} - -// Scales returns an object that can list and get Scales. -func (s *scaleLister) Scales(namespace string) ScaleNamespaceLister { - return scaleNamespaceLister{indexer: s.indexer, namespace: namespace} -} - -// ScaleNamespaceLister helps list and get Scales. -type ScaleNamespaceLister interface { - // List lists all Scales in the indexer for a given namespace. - List(selector labels.Selector) (ret []*v1beta2.Scale, err error) - // Get retrieves the Scale from the indexer for a given namespace and name. - Get(name string) (*v1beta2.Scale, error) - ScaleNamespaceListerExpansion -} - -// scaleNamespaceLister implements the ScaleNamespaceLister -// interface. -type scaleNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all Scales in the indexer for a given namespace. -func (s scaleNamespaceLister) List(selector labels.Selector) (ret []*v1beta2.Scale, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta2.Scale)) - }) - return ret, err -} - -// Get retrieves the Scale from the indexer for a given namespace and name. -func (s scaleNamespaceLister) Get(name string) (*v1beta2.Scale, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta2.Resource("scale"), name) - } - return obj.(*v1beta2.Scale), nil -} diff --git a/vendor/k8s.io/client-go/listers/auditregistration/v1alpha1/auditsink.go b/vendor/k8s.io/client-go/listers/auditregistration/v1alpha1/auditsink.go new file mode 100644 index 00000000..3ae4528c --- /dev/null +++ b/vendor/k8s.io/client-go/listers/auditregistration/v1alpha1/auditsink.go @@ -0,0 +1,65 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "k8s.io/api/auditregistration/v1alpha1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// AuditSinkLister helps list AuditSinks. +type AuditSinkLister interface { + // List lists all AuditSinks in the indexer. + List(selector labels.Selector) (ret []*v1alpha1.AuditSink, err error) + // Get retrieves the AuditSink from the index for a given name. + Get(name string) (*v1alpha1.AuditSink, error) + AuditSinkListerExpansion +} + +// auditSinkLister implements the AuditSinkLister interface. +type auditSinkLister struct { + indexer cache.Indexer +} + +// NewAuditSinkLister returns a new AuditSinkLister. +func NewAuditSinkLister(indexer cache.Indexer) AuditSinkLister { + return &auditSinkLister{indexer: indexer} +} + +// List lists all AuditSinks in the indexer. +func (s *auditSinkLister) List(selector labels.Selector) (ret []*v1alpha1.AuditSink, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.AuditSink)) + }) + return ret, err +} + +// Get retrieves the AuditSink from the index for a given name. +func (s *auditSinkLister) Get(name string) (*v1alpha1.AuditSink, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("auditsink"), name) + } + return obj.(*v1alpha1.AuditSink), nil +} diff --git a/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/expansion_generated.go b/vendor/k8s.io/client-go/listers/auditregistration/v1alpha1/expansion_generated.go similarity index 78% rename from vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/expansion_generated.go rename to vendor/k8s.io/client-go/listers/auditregistration/v1alpha1/expansion_generated.go index 2c9f9f6a..533dd063 100644 --- a/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/expansion_generated.go +++ b/vendor/k8s.io/client-go/listers/auditregistration/v1alpha1/expansion_generated.go @@ -18,6 +18,6 @@ limitations under the License. package v1alpha1 -// InitializerConfigurationListerExpansion allows custom methods to be added to -// InitializerConfigurationLister. -type InitializerConfigurationListerExpansion interface{} +// AuditSinkListerExpansion allows custom methods to be added to +// AuditSinkLister. +type AuditSinkListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/coordination/v1/expansion_generated.go b/vendor/k8s.io/client-go/listers/coordination/v1/expansion_generated.go new file mode 100644 index 00000000..ddc494f1 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/coordination/v1/expansion_generated.go @@ -0,0 +1,27 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +// LeaseListerExpansion allows custom methods to be added to +// LeaseLister. +type LeaseListerExpansion interface{} + +// LeaseNamespaceListerExpansion allows custom methods to be added to +// LeaseNamespaceLister. +type LeaseNamespaceListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/coordination/v1/lease.go b/vendor/k8s.io/client-go/listers/coordination/v1/lease.go new file mode 100644 index 00000000..cc379088 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/coordination/v1/lease.go @@ -0,0 +1,94 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/coordination/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// LeaseLister helps list Leases. +type LeaseLister interface { + // List lists all Leases in the indexer. + List(selector labels.Selector) (ret []*v1.Lease, err error) + // Leases returns an object that can list and get Leases. + Leases(namespace string) LeaseNamespaceLister + LeaseListerExpansion +} + +// leaseLister implements the LeaseLister interface. +type leaseLister struct { + indexer cache.Indexer +} + +// NewLeaseLister returns a new LeaseLister. +func NewLeaseLister(indexer cache.Indexer) LeaseLister { + return &leaseLister{indexer: indexer} +} + +// List lists all Leases in the indexer. +func (s *leaseLister) List(selector labels.Selector) (ret []*v1.Lease, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Lease)) + }) + return ret, err +} + +// Leases returns an object that can list and get Leases. +func (s *leaseLister) Leases(namespace string) LeaseNamespaceLister { + return leaseNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// LeaseNamespaceLister helps list and get Leases. +type LeaseNamespaceLister interface { + // List lists all Leases in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1.Lease, err error) + // Get retrieves the Lease from the indexer for a given namespace and name. + Get(name string) (*v1.Lease, error) + LeaseNamespaceListerExpansion +} + +// leaseNamespaceLister implements the LeaseNamespaceLister +// interface. +type leaseNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Leases in the indexer for a given namespace. +func (s leaseNamespaceLister) List(selector labels.Selector) (ret []*v1.Lease, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Lease)) + }) + return ret, err +} + +// Get retrieves the Lease from the indexer for a given namespace and name. +func (s leaseNamespaceLister) Get(name string) (*v1.Lease, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("lease"), name) + } + return obj.(*v1.Lease), nil +} diff --git a/vendor/k8s.io/client-go/listers/discovery/v1alpha1/endpointslice.go b/vendor/k8s.io/client-go/listers/discovery/v1alpha1/endpointslice.go new file mode 100644 index 00000000..706beecf --- /dev/null +++ b/vendor/k8s.io/client-go/listers/discovery/v1alpha1/endpointslice.go @@ -0,0 +1,94 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "k8s.io/api/discovery/v1alpha1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// EndpointSliceLister helps list EndpointSlices. +type EndpointSliceLister interface { + // List lists all EndpointSlices in the indexer. + List(selector labels.Selector) (ret []*v1alpha1.EndpointSlice, err error) + // EndpointSlices returns an object that can list and get EndpointSlices. + EndpointSlices(namespace string) EndpointSliceNamespaceLister + EndpointSliceListerExpansion +} + +// endpointSliceLister implements the EndpointSliceLister interface. +type endpointSliceLister struct { + indexer cache.Indexer +} + +// NewEndpointSliceLister returns a new EndpointSliceLister. +func NewEndpointSliceLister(indexer cache.Indexer) EndpointSliceLister { + return &endpointSliceLister{indexer: indexer} +} + +// List lists all EndpointSlices in the indexer. +func (s *endpointSliceLister) List(selector labels.Selector) (ret []*v1alpha1.EndpointSlice, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.EndpointSlice)) + }) + return ret, err +} + +// EndpointSlices returns an object that can list and get EndpointSlices. +func (s *endpointSliceLister) EndpointSlices(namespace string) EndpointSliceNamespaceLister { + return endpointSliceNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// EndpointSliceNamespaceLister helps list and get EndpointSlices. +type EndpointSliceNamespaceLister interface { + // List lists all EndpointSlices in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1alpha1.EndpointSlice, err error) + // Get retrieves the EndpointSlice from the indexer for a given namespace and name. + Get(name string) (*v1alpha1.EndpointSlice, error) + EndpointSliceNamespaceListerExpansion +} + +// endpointSliceNamespaceLister implements the EndpointSliceNamespaceLister +// interface. +type endpointSliceNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all EndpointSlices in the indexer for a given namespace. +func (s endpointSliceNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.EndpointSlice, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.EndpointSlice)) + }) + return ret, err +} + +// Get retrieves the EndpointSlice from the indexer for a given namespace and name. +func (s endpointSliceNamespaceLister) Get(name string) (*v1alpha1.EndpointSlice, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("endpointslice"), name) + } + return obj.(*v1alpha1.EndpointSlice), nil +} diff --git a/vendor/k8s.io/client-go/listers/discovery/v1alpha1/expansion_generated.go b/vendor/k8s.io/client-go/listers/discovery/v1alpha1/expansion_generated.go new file mode 100644 index 00000000..d47af59a --- /dev/null +++ b/vendor/k8s.io/client-go/listers/discovery/v1alpha1/expansion_generated.go @@ -0,0 +1,27 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +// EndpointSliceListerExpansion allows custom methods to be added to +// EndpointSliceLister. +type EndpointSliceListerExpansion interface{} + +// EndpointSliceNamespaceListerExpansion allows custom methods to be added to +// EndpointSliceNamespaceLister. +type EndpointSliceNamespaceListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/discovery/v1beta1/endpointslice.go b/vendor/k8s.io/client-go/listers/discovery/v1beta1/endpointslice.go new file mode 100644 index 00000000..e7d1026a --- /dev/null +++ b/vendor/k8s.io/client-go/listers/discovery/v1beta1/endpointslice.go @@ -0,0 +1,94 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/discovery/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// EndpointSliceLister helps list EndpointSlices. +type EndpointSliceLister interface { + // List lists all EndpointSlices in the indexer. + List(selector labels.Selector) (ret []*v1beta1.EndpointSlice, err error) + // EndpointSlices returns an object that can list and get EndpointSlices. + EndpointSlices(namespace string) EndpointSliceNamespaceLister + EndpointSliceListerExpansion +} + +// endpointSliceLister implements the EndpointSliceLister interface. +type endpointSliceLister struct { + indexer cache.Indexer +} + +// NewEndpointSliceLister returns a new EndpointSliceLister. +func NewEndpointSliceLister(indexer cache.Indexer) EndpointSliceLister { + return &endpointSliceLister{indexer: indexer} +} + +// List lists all EndpointSlices in the indexer. +func (s *endpointSliceLister) List(selector labels.Selector) (ret []*v1beta1.EndpointSlice, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.EndpointSlice)) + }) + return ret, err +} + +// EndpointSlices returns an object that can list and get EndpointSlices. +func (s *endpointSliceLister) EndpointSlices(namespace string) EndpointSliceNamespaceLister { + return endpointSliceNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// EndpointSliceNamespaceLister helps list and get EndpointSlices. +type EndpointSliceNamespaceLister interface { + // List lists all EndpointSlices in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1beta1.EndpointSlice, err error) + // Get retrieves the EndpointSlice from the indexer for a given namespace and name. + Get(name string) (*v1beta1.EndpointSlice, error) + EndpointSliceNamespaceListerExpansion +} + +// endpointSliceNamespaceLister implements the EndpointSliceNamespaceLister +// interface. +type endpointSliceNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all EndpointSlices in the indexer for a given namespace. +func (s endpointSliceNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.EndpointSlice, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.EndpointSlice)) + }) + return ret, err +} + +// Get retrieves the EndpointSlice from the indexer for a given namespace and name. +func (s endpointSliceNamespaceLister) Get(name string) (*v1beta1.EndpointSlice, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta1.Resource("endpointslice"), name) + } + return obj.(*v1beta1.EndpointSlice), nil +} diff --git a/vendor/k8s.io/client-go/listers/discovery/v1beta1/expansion_generated.go b/vendor/k8s.io/client-go/listers/discovery/v1beta1/expansion_generated.go new file mode 100644 index 00000000..9619bbd8 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/discovery/v1beta1/expansion_generated.go @@ -0,0 +1,27 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1beta1 + +// EndpointSliceListerExpansion allows custom methods to be added to +// EndpointSliceLister. +type EndpointSliceListerExpansion interface{} + +// EndpointSliceNamespaceListerExpansion allows custom methods to be added to +// EndpointSliceNamespaceLister. +type EndpointSliceNamespaceListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/extensions/v1beta1/expansion_generated.go b/vendor/k8s.io/client-go/listers/extensions/v1beta1/expansion_generated.go index b5ee8a49..6d55ae9b 100644 --- a/vendor/k8s.io/client-go/listers/extensions/v1beta1/expansion_generated.go +++ b/vendor/k8s.io/client-go/listers/extensions/v1beta1/expansion_generated.go @@ -26,14 +26,14 @@ type IngressListerExpansion interface{} // IngressNamespaceLister. type IngressNamespaceListerExpansion interface{} +// NetworkPolicyListerExpansion allows custom methods to be added to +// NetworkPolicyLister. +type NetworkPolicyListerExpansion interface{} + +// NetworkPolicyNamespaceListerExpansion allows custom methods to be added to +// NetworkPolicyNamespaceLister. +type NetworkPolicyNamespaceListerExpansion interface{} + // PodSecurityPolicyListerExpansion allows custom methods to be added to // PodSecurityPolicyLister. type PodSecurityPolicyListerExpansion interface{} - -// ScaleListerExpansion allows custom methods to be added to -// ScaleLister. -type ScaleListerExpansion interface{} - -// ScaleNamespaceListerExpansion allows custom methods to be added to -// ScaleNamespaceLister. -type ScaleNamespaceListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/extensions/v1beta1/networkpolicy.go b/vendor/k8s.io/client-go/listers/extensions/v1beta1/networkpolicy.go new file mode 100644 index 00000000..782f521a --- /dev/null +++ b/vendor/k8s.io/client-go/listers/extensions/v1beta1/networkpolicy.go @@ -0,0 +1,94 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/extensions/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// NetworkPolicyLister helps list NetworkPolicies. +type NetworkPolicyLister interface { + // List lists all NetworkPolicies in the indexer. + List(selector labels.Selector) (ret []*v1beta1.NetworkPolicy, err error) + // NetworkPolicies returns an object that can list and get NetworkPolicies. + NetworkPolicies(namespace string) NetworkPolicyNamespaceLister + NetworkPolicyListerExpansion +} + +// networkPolicyLister implements the NetworkPolicyLister interface. +type networkPolicyLister struct { + indexer cache.Indexer +} + +// NewNetworkPolicyLister returns a new NetworkPolicyLister. +func NewNetworkPolicyLister(indexer cache.Indexer) NetworkPolicyLister { + return &networkPolicyLister{indexer: indexer} +} + +// List lists all NetworkPolicies in the indexer. +func (s *networkPolicyLister) List(selector labels.Selector) (ret []*v1beta1.NetworkPolicy, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.NetworkPolicy)) + }) + return ret, err +} + +// NetworkPolicies returns an object that can list and get NetworkPolicies. +func (s *networkPolicyLister) NetworkPolicies(namespace string) NetworkPolicyNamespaceLister { + return networkPolicyNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// NetworkPolicyNamespaceLister helps list and get NetworkPolicies. +type NetworkPolicyNamespaceLister interface { + // List lists all NetworkPolicies in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1beta1.NetworkPolicy, err error) + // Get retrieves the NetworkPolicy from the indexer for a given namespace and name. + Get(name string) (*v1beta1.NetworkPolicy, error) + NetworkPolicyNamespaceListerExpansion +} + +// networkPolicyNamespaceLister implements the NetworkPolicyNamespaceLister +// interface. +type networkPolicyNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all NetworkPolicies in the indexer for a given namespace. +func (s networkPolicyNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.NetworkPolicy, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.NetworkPolicy)) + }) + return ret, err +} + +// Get retrieves the NetworkPolicy from the indexer for a given namespace and name. +func (s networkPolicyNamespaceLister) Get(name string) (*v1beta1.NetworkPolicy, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta1.Resource("networkpolicy"), name) + } + return obj.(*v1beta1.NetworkPolicy), nil +} diff --git a/vendor/k8s.io/client-go/listers/extensions/v1beta1/scale.go b/vendor/k8s.io/client-go/listers/extensions/v1beta1/scale.go deleted file mode 100644 index 527d4be4..00000000 --- a/vendor/k8s.io/client-go/listers/extensions/v1beta1/scale.go +++ /dev/null @@ -1,94 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/extensions/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" -) - -// ScaleLister helps list Scales. -type ScaleLister interface { - // List lists all Scales in the indexer. - List(selector labels.Selector) (ret []*v1beta1.Scale, err error) - // Scales returns an object that can list and get Scales. - Scales(namespace string) ScaleNamespaceLister - ScaleListerExpansion -} - -// scaleLister implements the ScaleLister interface. -type scaleLister struct { - indexer cache.Indexer -} - -// NewScaleLister returns a new ScaleLister. -func NewScaleLister(indexer cache.Indexer) ScaleLister { - return &scaleLister{indexer: indexer} -} - -// List lists all Scales in the indexer. -func (s *scaleLister) List(selector labels.Selector) (ret []*v1beta1.Scale, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.Scale)) - }) - return ret, err -} - -// Scales returns an object that can list and get Scales. -func (s *scaleLister) Scales(namespace string) ScaleNamespaceLister { - return scaleNamespaceLister{indexer: s.indexer, namespace: namespace} -} - -// ScaleNamespaceLister helps list and get Scales. -type ScaleNamespaceLister interface { - // List lists all Scales in the indexer for a given namespace. - List(selector labels.Selector) (ret []*v1beta1.Scale, err error) - // Get retrieves the Scale from the indexer for a given namespace and name. - Get(name string) (*v1beta1.Scale, error) - ScaleNamespaceListerExpansion -} - -// scaleNamespaceLister implements the ScaleNamespaceLister -// interface. -type scaleNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all Scales in the indexer for a given namespace. -func (s scaleNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.Scale, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.Scale)) - }) - return ret, err -} - -// Get retrieves the Scale from the indexer for a given namespace and name. -func (s scaleNamespaceLister) Get(name string) (*v1beta1.Scale, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("scale"), name) - } - return obj.(*v1beta1.Scale), nil -} diff --git a/vendor/k8s.io/client-go/listers/flowcontrol/v1alpha1/expansion_generated.go b/vendor/k8s.io/client-go/listers/flowcontrol/v1alpha1/expansion_generated.go new file mode 100644 index 00000000..3e740516 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/flowcontrol/v1alpha1/expansion_generated.go @@ -0,0 +1,27 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +// FlowSchemaListerExpansion allows custom methods to be added to +// FlowSchemaLister. +type FlowSchemaListerExpansion interface{} + +// PriorityLevelConfigurationListerExpansion allows custom methods to be added to +// PriorityLevelConfigurationLister. +type PriorityLevelConfigurationListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/flowcontrol/v1alpha1/flowschema.go b/vendor/k8s.io/client-go/listers/flowcontrol/v1alpha1/flowschema.go new file mode 100644 index 00000000..b6791336 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/flowcontrol/v1alpha1/flowschema.go @@ -0,0 +1,65 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "k8s.io/api/flowcontrol/v1alpha1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// FlowSchemaLister helps list FlowSchemas. +type FlowSchemaLister interface { + // List lists all FlowSchemas in the indexer. + List(selector labels.Selector) (ret []*v1alpha1.FlowSchema, err error) + // Get retrieves the FlowSchema from the index for a given name. + Get(name string) (*v1alpha1.FlowSchema, error) + FlowSchemaListerExpansion +} + +// flowSchemaLister implements the FlowSchemaLister interface. +type flowSchemaLister struct { + indexer cache.Indexer +} + +// NewFlowSchemaLister returns a new FlowSchemaLister. +func NewFlowSchemaLister(indexer cache.Indexer) FlowSchemaLister { + return &flowSchemaLister{indexer: indexer} +} + +// List lists all FlowSchemas in the indexer. +func (s *flowSchemaLister) List(selector labels.Selector) (ret []*v1alpha1.FlowSchema, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.FlowSchema)) + }) + return ret, err +} + +// Get retrieves the FlowSchema from the index for a given name. +func (s *flowSchemaLister) Get(name string) (*v1alpha1.FlowSchema, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("flowschema"), name) + } + return obj.(*v1alpha1.FlowSchema), nil +} diff --git a/vendor/k8s.io/client-go/listers/flowcontrol/v1alpha1/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/listers/flowcontrol/v1alpha1/prioritylevelconfiguration.go new file mode 100644 index 00000000..cb02129a --- /dev/null +++ b/vendor/k8s.io/client-go/listers/flowcontrol/v1alpha1/prioritylevelconfiguration.go @@ -0,0 +1,65 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "k8s.io/api/flowcontrol/v1alpha1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// PriorityLevelConfigurationLister helps list PriorityLevelConfigurations. +type PriorityLevelConfigurationLister interface { + // List lists all PriorityLevelConfigurations in the indexer. + List(selector labels.Selector) (ret []*v1alpha1.PriorityLevelConfiguration, err error) + // Get retrieves the PriorityLevelConfiguration from the index for a given name. + Get(name string) (*v1alpha1.PriorityLevelConfiguration, error) + PriorityLevelConfigurationListerExpansion +} + +// priorityLevelConfigurationLister implements the PriorityLevelConfigurationLister interface. +type priorityLevelConfigurationLister struct { + indexer cache.Indexer +} + +// NewPriorityLevelConfigurationLister returns a new PriorityLevelConfigurationLister. +func NewPriorityLevelConfigurationLister(indexer cache.Indexer) PriorityLevelConfigurationLister { + return &priorityLevelConfigurationLister{indexer: indexer} +} + +// List lists all PriorityLevelConfigurations in the indexer. +func (s *priorityLevelConfigurationLister) List(selector labels.Selector) (ret []*v1alpha1.PriorityLevelConfiguration, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.PriorityLevelConfiguration)) + }) + return ret, err +} + +// Get retrieves the PriorityLevelConfiguration from the index for a given name. +func (s *priorityLevelConfigurationLister) Get(name string) (*v1alpha1.PriorityLevelConfiguration, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("prioritylevelconfiguration"), name) + } + return obj.(*v1alpha1.PriorityLevelConfiguration), nil +} diff --git a/vendor/k8s.io/client-go/listers/networking/v1beta1/expansion_generated.go b/vendor/k8s.io/client-go/listers/networking/v1beta1/expansion_generated.go new file mode 100644 index 00000000..df6da06e --- /dev/null +++ b/vendor/k8s.io/client-go/listers/networking/v1beta1/expansion_generated.go @@ -0,0 +1,27 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1beta1 + +// IngressListerExpansion allows custom methods to be added to +// IngressLister. +type IngressListerExpansion interface{} + +// IngressNamespaceListerExpansion allows custom methods to be added to +// IngressNamespaceLister. +type IngressNamespaceListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/networking/v1beta1/ingress.go b/vendor/k8s.io/client-go/listers/networking/v1beta1/ingress.go new file mode 100644 index 00000000..6676742e --- /dev/null +++ b/vendor/k8s.io/client-go/listers/networking/v1beta1/ingress.go @@ -0,0 +1,94 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/networking/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// IngressLister helps list Ingresses. +type IngressLister interface { + // List lists all Ingresses in the indexer. + List(selector labels.Selector) (ret []*v1beta1.Ingress, err error) + // Ingresses returns an object that can list and get Ingresses. + Ingresses(namespace string) IngressNamespaceLister + IngressListerExpansion +} + +// ingressLister implements the IngressLister interface. +type ingressLister struct { + indexer cache.Indexer +} + +// NewIngressLister returns a new IngressLister. +func NewIngressLister(indexer cache.Indexer) IngressLister { + return &ingressLister{indexer: indexer} +} + +// List lists all Ingresses in the indexer. +func (s *ingressLister) List(selector labels.Selector) (ret []*v1beta1.Ingress, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.Ingress)) + }) + return ret, err +} + +// Ingresses returns an object that can list and get Ingresses. +func (s *ingressLister) Ingresses(namespace string) IngressNamespaceLister { + return ingressNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// IngressNamespaceLister helps list and get Ingresses. +type IngressNamespaceLister interface { + // List lists all Ingresses in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1beta1.Ingress, err error) + // Get retrieves the Ingress from the indexer for a given namespace and name. + Get(name string) (*v1beta1.Ingress, error) + IngressNamespaceListerExpansion +} + +// ingressNamespaceLister implements the IngressNamespaceLister +// interface. +type ingressNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Ingresses in the indexer for a given namespace. +func (s ingressNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.Ingress, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.Ingress)) + }) + return ret, err +} + +// Get retrieves the Ingress from the indexer for a given namespace and name. +func (s ingressNamespaceLister) Get(name string) (*v1beta1.Ingress, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta1.Resource("ingress"), name) + } + return obj.(*v1beta1.Ingress), nil +} diff --git a/vendor/k8s.io/client-go/listers/node/v1alpha1/expansion_generated.go b/vendor/k8s.io/client-go/listers/node/v1alpha1/expansion_generated.go new file mode 100644 index 00000000..a65c208f --- /dev/null +++ b/vendor/k8s.io/client-go/listers/node/v1alpha1/expansion_generated.go @@ -0,0 +1,23 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +// RuntimeClassListerExpansion allows custom methods to be added to +// RuntimeClassLister. +type RuntimeClassListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/node/v1alpha1/runtimeclass.go b/vendor/k8s.io/client-go/listers/node/v1alpha1/runtimeclass.go new file mode 100644 index 00000000..af3f02b9 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/node/v1alpha1/runtimeclass.go @@ -0,0 +1,65 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "k8s.io/api/node/v1alpha1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// RuntimeClassLister helps list RuntimeClasses. +type RuntimeClassLister interface { + // List lists all RuntimeClasses in the indexer. + List(selector labels.Selector) (ret []*v1alpha1.RuntimeClass, err error) + // Get retrieves the RuntimeClass from the index for a given name. + Get(name string) (*v1alpha1.RuntimeClass, error) + RuntimeClassListerExpansion +} + +// runtimeClassLister implements the RuntimeClassLister interface. +type runtimeClassLister struct { + indexer cache.Indexer +} + +// NewRuntimeClassLister returns a new RuntimeClassLister. +func NewRuntimeClassLister(indexer cache.Indexer) RuntimeClassLister { + return &runtimeClassLister{indexer: indexer} +} + +// List lists all RuntimeClasses in the indexer. +func (s *runtimeClassLister) List(selector labels.Selector) (ret []*v1alpha1.RuntimeClass, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.RuntimeClass)) + }) + return ret, err +} + +// Get retrieves the RuntimeClass from the index for a given name. +func (s *runtimeClassLister) Get(name string) (*v1alpha1.RuntimeClass, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("runtimeclass"), name) + } + return obj.(*v1alpha1.RuntimeClass), nil +} diff --git a/vendor/k8s.io/client-go/listers/node/v1beta1/expansion_generated.go b/vendor/k8s.io/client-go/listers/node/v1beta1/expansion_generated.go new file mode 100644 index 00000000..a6744055 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/node/v1beta1/expansion_generated.go @@ -0,0 +1,23 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1beta1 + +// RuntimeClassListerExpansion allows custom methods to be added to +// RuntimeClassLister. +type RuntimeClassListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/node/v1beta1/runtimeclass.go b/vendor/k8s.io/client-go/listers/node/v1beta1/runtimeclass.go new file mode 100644 index 00000000..be642b99 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/node/v1beta1/runtimeclass.go @@ -0,0 +1,65 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/node/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// RuntimeClassLister helps list RuntimeClasses. +type RuntimeClassLister interface { + // List lists all RuntimeClasses in the indexer. + List(selector labels.Selector) (ret []*v1beta1.RuntimeClass, err error) + // Get retrieves the RuntimeClass from the index for a given name. + Get(name string) (*v1beta1.RuntimeClass, error) + RuntimeClassListerExpansion +} + +// runtimeClassLister implements the RuntimeClassLister interface. +type runtimeClassLister struct { + indexer cache.Indexer +} + +// NewRuntimeClassLister returns a new RuntimeClassLister. +func NewRuntimeClassLister(indexer cache.Indexer) RuntimeClassLister { + return &runtimeClassLister{indexer: indexer} +} + +// List lists all RuntimeClasses in the indexer. +func (s *runtimeClassLister) List(selector labels.Selector) (ret []*v1beta1.RuntimeClass, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.RuntimeClass)) + }) + return ret, err +} + +// Get retrieves the RuntimeClass from the index for a given name. +func (s *runtimeClassLister) Get(name string) (*v1beta1.RuntimeClass, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta1.Resource("runtimeclass"), name) + } + return obj.(*v1beta1.RuntimeClass), nil +} diff --git a/vendor/k8s.io/client-go/listers/policy/v1beta1/poddisruptionbudget_expansion.go b/vendor/k8s.io/client-go/listers/policy/v1beta1/poddisruptionbudget_expansion.go index c0ab9d3e..d07d11a9 100644 --- a/vendor/k8s.io/client-go/listers/policy/v1beta1/poddisruptionbudget_expansion.go +++ b/vendor/k8s.io/client-go/listers/policy/v1beta1/poddisruptionbudget_expansion.go @@ -19,11 +19,11 @@ package v1beta1 import ( "fmt" - "github.com/golang/glog" "k8s.io/api/core/v1" policy "k8s.io/api/policy/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" + "k8s.io/klog" ) // PodDisruptionBudgetListerExpansion allows custom methods to be added to @@ -54,7 +54,7 @@ func (s *podDisruptionBudgetLister) GetPodPodDisruptionBudgets(pod *v1.Pod) ([]* pdb := list[i] selector, err = metav1.LabelSelectorAsSelector(pdb.Spec.Selector) if err != nil { - glog.Warningf("invalid selector: %v", err) + klog.Warningf("invalid selector: %v", err) // TODO(mml): add an event to the PDB continue } diff --git a/vendor/k8s.io/client-go/listers/scheduling/v1/expansion_generated.go b/vendor/k8s.io/client-go/listers/scheduling/v1/expansion_generated.go new file mode 100644 index 00000000..d0c45d01 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/scheduling/v1/expansion_generated.go @@ -0,0 +1,23 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +// PriorityClassListerExpansion allows custom methods to be added to +// PriorityClassLister. +type PriorityClassListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/scheduling/v1/priorityclass.go b/vendor/k8s.io/client-go/listers/scheduling/v1/priorityclass.go new file mode 100644 index 00000000..452fee59 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/scheduling/v1/priorityclass.go @@ -0,0 +1,65 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/scheduling/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// PriorityClassLister helps list PriorityClasses. +type PriorityClassLister interface { + // List lists all PriorityClasses in the indexer. + List(selector labels.Selector) (ret []*v1.PriorityClass, err error) + // Get retrieves the PriorityClass from the index for a given name. + Get(name string) (*v1.PriorityClass, error) + PriorityClassListerExpansion +} + +// priorityClassLister implements the PriorityClassLister interface. +type priorityClassLister struct { + indexer cache.Indexer +} + +// NewPriorityClassLister returns a new PriorityClassLister. +func NewPriorityClassLister(indexer cache.Indexer) PriorityClassLister { + return &priorityClassLister{indexer: indexer} +} + +// List lists all PriorityClasses in the indexer. +func (s *priorityClassLister) List(selector labels.Selector) (ret []*v1.PriorityClass, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.PriorityClass)) + }) + return ret, err +} + +// Get retrieves the PriorityClass from the index for a given name. +func (s *priorityClassLister) Get(name string) (*v1.PriorityClass, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("priorityclass"), name) + } + return obj.(*v1.PriorityClass), nil +} diff --git a/vendor/k8s.io/client-go/listers/storage/v1/csinode.go b/vendor/k8s.io/client-go/listers/storage/v1/csinode.go new file mode 100644 index 00000000..577f7285 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/storage/v1/csinode.go @@ -0,0 +1,65 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/storage/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// CSINodeLister helps list CSINodes. +type CSINodeLister interface { + // List lists all CSINodes in the indexer. + List(selector labels.Selector) (ret []*v1.CSINode, err error) + // Get retrieves the CSINode from the index for a given name. + Get(name string) (*v1.CSINode, error) + CSINodeListerExpansion +} + +// cSINodeLister implements the CSINodeLister interface. +type cSINodeLister struct { + indexer cache.Indexer +} + +// NewCSINodeLister returns a new CSINodeLister. +func NewCSINodeLister(indexer cache.Indexer) CSINodeLister { + return &cSINodeLister{indexer: indexer} +} + +// List lists all CSINodes in the indexer. +func (s *cSINodeLister) List(selector labels.Selector) (ret []*v1.CSINode, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.CSINode)) + }) + return ret, err +} + +// Get retrieves the CSINode from the index for a given name. +func (s *cSINodeLister) Get(name string) (*v1.CSINode, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("csinode"), name) + } + return obj.(*v1.CSINode), nil +} diff --git a/vendor/k8s.io/client-go/listers/storage/v1/expansion_generated.go b/vendor/k8s.io/client-go/listers/storage/v1/expansion_generated.go index d9324706..41efa324 100644 --- a/vendor/k8s.io/client-go/listers/storage/v1/expansion_generated.go +++ b/vendor/k8s.io/client-go/listers/storage/v1/expansion_generated.go @@ -18,6 +18,14 @@ limitations under the License. package v1 +// CSINodeListerExpansion allows custom methods to be added to +// CSINodeLister. +type CSINodeListerExpansion interface{} + // StorageClassListerExpansion allows custom methods to be added to // StorageClassLister. type StorageClassListerExpansion interface{} + +// VolumeAttachmentListerExpansion allows custom methods to be added to +// VolumeAttachmentLister. +type VolumeAttachmentListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/storage/v1/volumeattachment.go b/vendor/k8s.io/client-go/listers/storage/v1/volumeattachment.go new file mode 100644 index 00000000..14888812 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/storage/v1/volumeattachment.go @@ -0,0 +1,65 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/storage/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// VolumeAttachmentLister helps list VolumeAttachments. +type VolumeAttachmentLister interface { + // List lists all VolumeAttachments in the indexer. + List(selector labels.Selector) (ret []*v1.VolumeAttachment, err error) + // Get retrieves the VolumeAttachment from the index for a given name. + Get(name string) (*v1.VolumeAttachment, error) + VolumeAttachmentListerExpansion +} + +// volumeAttachmentLister implements the VolumeAttachmentLister interface. +type volumeAttachmentLister struct { + indexer cache.Indexer +} + +// NewVolumeAttachmentLister returns a new VolumeAttachmentLister. +func NewVolumeAttachmentLister(indexer cache.Indexer) VolumeAttachmentLister { + return &volumeAttachmentLister{indexer: indexer} +} + +// List lists all VolumeAttachments in the indexer. +func (s *volumeAttachmentLister) List(selector labels.Selector) (ret []*v1.VolumeAttachment, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.VolumeAttachment)) + }) + return ret, err +} + +// Get retrieves the VolumeAttachment from the index for a given name. +func (s *volumeAttachmentLister) Get(name string) (*v1.VolumeAttachment, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("volumeattachment"), name) + } + return obj.(*v1.VolumeAttachment), nil +} diff --git a/vendor/k8s.io/client-go/listers/storage/v1beta1/csidriver.go b/vendor/k8s.io/client-go/listers/storage/v1beta1/csidriver.go new file mode 100644 index 00000000..8a401375 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/storage/v1beta1/csidriver.go @@ -0,0 +1,65 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/storage/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// CSIDriverLister helps list CSIDrivers. +type CSIDriverLister interface { + // List lists all CSIDrivers in the indexer. + List(selector labels.Selector) (ret []*v1beta1.CSIDriver, err error) + // Get retrieves the CSIDriver from the index for a given name. + Get(name string) (*v1beta1.CSIDriver, error) + CSIDriverListerExpansion +} + +// cSIDriverLister implements the CSIDriverLister interface. +type cSIDriverLister struct { + indexer cache.Indexer +} + +// NewCSIDriverLister returns a new CSIDriverLister. +func NewCSIDriverLister(indexer cache.Indexer) CSIDriverLister { + return &cSIDriverLister{indexer: indexer} +} + +// List lists all CSIDrivers in the indexer. +func (s *cSIDriverLister) List(selector labels.Selector) (ret []*v1beta1.CSIDriver, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.CSIDriver)) + }) + return ret, err +} + +// Get retrieves the CSIDriver from the index for a given name. +func (s *cSIDriverLister) Get(name string) (*v1beta1.CSIDriver, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta1.Resource("csidriver"), name) + } + return obj.(*v1beta1.CSIDriver), nil +} diff --git a/vendor/k8s.io/client-go/listers/storage/v1beta1/csinode.go b/vendor/k8s.io/client-go/listers/storage/v1beta1/csinode.go new file mode 100644 index 00000000..bb7a2b2b --- /dev/null +++ b/vendor/k8s.io/client-go/listers/storage/v1beta1/csinode.go @@ -0,0 +1,65 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/storage/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// CSINodeLister helps list CSINodes. +type CSINodeLister interface { + // List lists all CSINodes in the indexer. + List(selector labels.Selector) (ret []*v1beta1.CSINode, err error) + // Get retrieves the CSINode from the index for a given name. + Get(name string) (*v1beta1.CSINode, error) + CSINodeListerExpansion +} + +// cSINodeLister implements the CSINodeLister interface. +type cSINodeLister struct { + indexer cache.Indexer +} + +// NewCSINodeLister returns a new CSINodeLister. +func NewCSINodeLister(indexer cache.Indexer) CSINodeLister { + return &cSINodeLister{indexer: indexer} +} + +// List lists all CSINodes in the indexer. +func (s *cSINodeLister) List(selector labels.Selector) (ret []*v1beta1.CSINode, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.CSINode)) + }) + return ret, err +} + +// Get retrieves the CSINode from the index for a given name. +func (s *cSINodeLister) Get(name string) (*v1beta1.CSINode, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta1.Resource("csinode"), name) + } + return obj.(*v1beta1.CSINode), nil +} diff --git a/vendor/k8s.io/client-go/listers/storage/v1beta1/expansion_generated.go b/vendor/k8s.io/client-go/listers/storage/v1beta1/expansion_generated.go index 21d95620..eeca4fdb 100644 --- a/vendor/k8s.io/client-go/listers/storage/v1beta1/expansion_generated.go +++ b/vendor/k8s.io/client-go/listers/storage/v1beta1/expansion_generated.go @@ -18,6 +18,14 @@ limitations under the License. package v1beta1 +// CSIDriverListerExpansion allows custom methods to be added to +// CSIDriverLister. +type CSIDriverListerExpansion interface{} + +// CSINodeListerExpansion allows custom methods to be added to +// CSINodeLister. +type CSINodeListerExpansion interface{} + // StorageClassListerExpansion allows custom methods to be added to // StorageClassLister. type StorageClassListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/OWNERS b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/OWNERS new file mode 100644 index 00000000..e0ec62de --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/OWNERS @@ -0,0 +1,9 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +# approval on api packages bubbles to api-approvers +reviewers: +- sig-auth-authenticators-approvers +- sig-auth-authenticators-reviewers +labels: +- sig/auth + diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/doc.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/doc.go index d06482d5..b9945975 100644 --- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/doc.go +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/doc.go @@ -16,4 +16,5 @@ limitations under the License. // +k8s:deepcopy-gen=package // +groupName=client.authentication.k8s.io + package clientauthentication // import "k8s.io/client-go/pkg/apis/clientauthentication" diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/doc.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/doc.go index 016adb28..19ab7761 100644 --- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/doc.go +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/doc.go @@ -20,4 +20,5 @@ limitations under the License. // +k8s:defaulter-gen=TypeMeta // +groupName=client.authentication.k8s.io + package v1alpha1 // import "k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1" diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/types.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/types.go index 921f3a2b..c714e245 100644 --- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/types.go +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/types.go @@ -22,7 +22,7 @@ import ( // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// ExecCredentials is used by exec-based plugins to communicate credentials to +// ExecCredential is used by exec-based plugins to communicate credentials to // HTTP transports. type ExecCredential struct { metav1.TypeMeta `json:",inline"` diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/doc.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/doc.go index fbcd9b7f..22d1c588 100644 --- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/doc.go +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/doc.go @@ -20,4 +20,5 @@ limitations under the License. // +k8s:defaulter-gen=TypeMeta // +groupName=client.authentication.k8s.io + package v1beta1 // import "k8s.io/client-go/pkg/apis/clientauthentication/v1beta1" diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.conversion.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.conversion.go index 94ef4b73..0e533e46 100644 --- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.conversion.go +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.conversion.go @@ -51,11 +51,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*clientauthentication.ExecCredentialSpec)(nil), (*ExecCredentialSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_clientauthentication_ExecCredentialSpec_To_v1beta1_ExecCredentialSpec(a.(*clientauthentication.ExecCredentialSpec), b.(*ExecCredentialSpec), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*ExecCredentialStatus)(nil), (*clientauthentication.ExecCredentialStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus(a.(*ExecCredentialStatus), b.(*clientauthentication.ExecCredentialStatus), scope) }); err != nil { diff --git a/vendor/k8s.io/client-go/pkg/version/def.bzl b/vendor/k8s.io/client-go/pkg/version/def.bzl index 9c018a4e..ecc9cd3b 100644 --- a/vendor/k8s.io/client-go/pkg/version/def.bzl +++ b/vendor/k8s.io/client-go/pkg/version/def.bzl @@ -16,7 +16,7 @@ def version_x_defs(): # This should match the list of packages in kube::version::ldflag stamp_pkgs = [ - "k8s.io/kubernetes/pkg/version", + "k8s.io/component-base/version", # In hack/lib/version.sh, this has a vendor/ prefix. That isn't needed here? "k8s.io/client-go/pkg/version", ] diff --git a/vendor/k8s.io/client-go/pkg/version/doc.go b/vendor/k8s.io/client-go/pkg/version/doc.go index 30399fb0..05e997e1 100644 --- a/vendor/k8s.io/client-go/pkg/version/doc.go +++ b/vendor/k8s.io/client-go/pkg/version/doc.go @@ -14,7 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ +// +k8s:openapi-gen=true + // Package version supplies version information collected at build time to // kubernetes components. -// +k8s:openapi-gen=true package version // import "k8s.io/client-go/pkg/version" diff --git a/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go b/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go index cae9d0d6..741729bb 100644 --- a/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go +++ b/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go @@ -31,9 +31,9 @@ import ( "sync" "time" - "github.com/golang/glog" + "github.com/davecgh/go-spew/spew" "golang.org/x/crypto/ssh/terminal" - "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer" @@ -44,9 +44,11 @@ import ( "k8s.io/client-go/tools/clientcmd/api" "k8s.io/client-go/transport" "k8s.io/client-go/util/connrotation" + "k8s.io/klog" ) const execInfoEnv = "KUBERNETES_EXEC_INFO" +const onRotateListWarningLength = 1000 var scheme = runtime.NewScheme() var codecs = serializer.NewCodecFactory(scheme) @@ -73,8 +75,10 @@ func newCache() *cache { return &cache{m: make(map[string]*Authenticator)} } +var spewConfig = &spew.ConfigState{DisableMethods: true, Indent: " "} + func cacheKey(c *api.ExecConfig) string { - return fmt.Sprintf("%#v", c) + return spewConfig.Sprint(c) } type cache struct { @@ -161,7 +165,7 @@ type Authenticator struct { cachedCreds *credentials exp time.Time - onRotate func() + onRotateList []func() } type credentials struct { @@ -172,13 +176,9 @@ type credentials struct { // UpdateTransportConfig updates the transport.Config to use credentials // returned by the plugin. func (a *Authenticator) UpdateTransportConfig(c *transport.Config) error { - wt := c.WrapTransport - c.WrapTransport = func(rt http.RoundTripper) http.RoundTripper { - if wt != nil { - rt = wt(rt) - } + c.Wrap(func(rt http.RoundTripper) http.RoundTripper { return &roundTripper{a, rt} - } + }) if c.TLS.GetCert != nil { return errors.New("can't add TLS certificate callback: transport.Config.TLS.GetCert already set") @@ -192,7 +192,15 @@ func (a *Authenticator) UpdateTransportConfig(c *transport.Config) error { dial = (&net.Dialer{Timeout: 30 * time.Second, KeepAlive: 30 * time.Second}).DialContext } d := connrotation.NewDialer(dial) - a.onRotate = d.CloseAll + + a.mu.Lock() + defer a.mu.Unlock() + a.onRotateList = append(a.onRotateList, d.CloseAll) + onRotateListLength := len(a.onRotateList) + if onRotateListLength > onRotateListWarningLength { + klog.Warningf("constructing many client instances from the same exec auth config can cause performance problems during cert rotation and can exhaust available network connections; %d clients constructed calling %q", onRotateListLength, a.cmd) + } + c.Dial = d.DialContext return nil @@ -228,7 +236,7 @@ func (r *roundTripper) RoundTrip(req *http.Request) (*http.Response, error) { Code: int32(res.StatusCode), } if err := r.a.maybeRefreshCreds(creds, resp); err != nil { - glog.Errorf("refreshing credentials: %v", err) + klog.Errorf("refreshing credentials: %v", err) } } return res, nil @@ -354,8 +362,10 @@ func (a *Authenticator) refreshCredsLocked(r *clientauthentication.Response) err a.cachedCreds = newCreds // Only close all connections when TLS cert rotates. Token rotation doesn't // need the extra noise. - if a.onRotate != nil && oldCreds != nil && !reflect.DeepEqual(oldCreds.cert, a.cachedCreds.cert) { - a.onRotate() + if len(a.onRotateList) > 0 && oldCreds != nil && !reflect.DeepEqual(oldCreds.cert, a.cachedCreds.cert) { + for _, onRotate := range a.onRotateList { + onRotate() + } } return nil } diff --git a/vendor/k8s.io/client-go/rest/OWNERS b/vendor/k8s.io/client-go/rest/OWNERS index 8d97da00..c02ec6a2 100644 --- a/vendor/k8s.io/client-go/rest/OWNERS +++ b/vendor/k8s.io/client-go/rest/OWNERS @@ -1,3 +1,5 @@ +# See the OWNERS docs at https://go.k8s.io/owners + reviewers: - thockin - smarterclayton @@ -18,7 +20,6 @@ reviewers: - resouer - cjcullen - rmmh -- lixiaobing10051267 - asalkeld - juanvallejo - lojies diff --git a/vendor/k8s.io/client-go/rest/client.go b/vendor/k8s.io/client-go/rest/client.go index 927403cb..53c6abd3 100644 --- a/vendor/k8s.io/client-go/rest/client.go +++ b/vendor/k8s.io/client-go/rest/client.go @@ -17,8 +17,6 @@ limitations under the License. package rest import ( - "fmt" - "mime" "net/http" "net/url" "os" @@ -51,6 +49,28 @@ type Interface interface { APIVersion() schema.GroupVersion } +// ClientContentConfig controls how RESTClient communicates with the server. +// +// TODO: ContentConfig will be updated to accept a Negotiator instead of a +// NegotiatedSerializer and NegotiatedSerializer will be removed. +type ClientContentConfig struct { + // AcceptContentTypes specifies the types the client will accept and is optional. + // If not set, ContentType will be used to define the Accept header + AcceptContentTypes string + // ContentType specifies the wire format used to communicate with the server. + // This value will be set as the Accept header on requests made to the server if + // AcceptContentTypes is not set, and as the default content type on any object + // sent to the server. If not set, "application/json" is used. + ContentType string + // GroupVersion is the API version to talk to. Must be provided when initializing + // a RESTClient directly. When initializing a Client, will be set with the default + // code version. This is used as the default group version for VersionedParams. + GroupVersion schema.GroupVersion + // Negotiator is used for obtaining encoders and decoders for multiple + // supported media types. + Negotiator runtime.ClientNegotiator +} + // RESTClient imposes common Kubernetes API conventions on a set of resource paths. // The baseURL is expected to point to an HTTP or HTTPS path that is the parent // of one or more resources. The server should return a decodable API resource @@ -64,34 +84,27 @@ type RESTClient struct { // versionedAPIPath is a path segment connecting the base URL to the resource root versionedAPIPath string - // contentConfig is the information used to communicate with the server. - contentConfig ContentConfig - - // serializers contain all serializers for underlying content type. - serializers Serializers + // content describes how a RESTClient encodes and decodes responses. + content ClientContentConfig // creates BackoffManager that is passed to requests. createBackoffMgr func() BackoffManager - // TODO extract this into a wrapper interface via the RESTClient interface in kubectl. - Throttle flowcontrol.RateLimiter + // rateLimiter is shared among all requests created by this client unless specifically + // overridden. + rateLimiter flowcontrol.RateLimiter // Set specific behavior of the client. If not set http.DefaultClient will be used. Client *http.Client } -type Serializers struct { - Encoder runtime.Encoder - Decoder runtime.Decoder - StreamingSerializer runtime.Serializer - Framer runtime.Framer - RenegotiatedDecoder func(contentType string, params map[string]string) (runtime.Decoder, error) -} - // NewRESTClient creates a new RESTClient. This client performs generic REST functions -// such as Get, Put, Post, and Delete on specified paths. Codec controls encoding and -// decoding of responses from the server. -func NewRESTClient(baseURL *url.URL, versionedAPIPath string, config ContentConfig, maxQPS float32, maxBurst int, rateLimiter flowcontrol.RateLimiter, client *http.Client) (*RESTClient, error) { +// such as Get, Put, Post, and Delete on specified paths. +func NewRESTClient(baseURL *url.URL, versionedAPIPath string, config ClientContentConfig, rateLimiter flowcontrol.RateLimiter, client *http.Client) (*RESTClient, error) { + if len(config.ContentType) == 0 { + config.ContentType = "application/json" + } + base := *baseURL if !strings.HasSuffix(base.Path, "/") { base.Path += "/" @@ -99,31 +112,14 @@ func NewRESTClient(baseURL *url.URL, versionedAPIPath string, config ContentConf base.RawQuery = "" base.Fragment = "" - if config.GroupVersion == nil { - config.GroupVersion = &schema.GroupVersion{} - } - if len(config.ContentType) == 0 { - config.ContentType = "application/json" - } - serializers, err := createSerializers(config) - if err != nil { - return nil, err - } - - var throttle flowcontrol.RateLimiter - if maxQPS > 0 && rateLimiter == nil { - throttle = flowcontrol.NewTokenBucketRateLimiter(maxQPS, maxBurst) - } else if rateLimiter != nil { - throttle = rateLimiter - } return &RESTClient{ base: &base, versionedAPIPath: versionedAPIPath, - contentConfig: config, - serializers: *serializers, + content: config, createBackoffMgr: readExpBackoffConfig, - Throttle: throttle, - Client: client, + rateLimiter: rateLimiter, + + Client: client, }, nil } @@ -132,7 +128,7 @@ func (c *RESTClient) GetRateLimiter() flowcontrol.RateLimiter { if c == nil { return nil } - return c.Throttle + return c.rateLimiter } // readExpBackoffConfig handles the internal logic of determining what the @@ -153,58 +149,6 @@ func readExpBackoffConfig() BackoffManager { time.Duration(backoffDurationInt)*time.Second)} } -// createSerializers creates all necessary serializers for given contentType. -// TODO: the negotiated serializer passed to this method should probably return -// serializers that control decoding and versioning without this package -// being aware of the types. Depends on whether RESTClient must deal with -// generic infrastructure. -func createSerializers(config ContentConfig) (*Serializers, error) { - mediaTypes := config.NegotiatedSerializer.SupportedMediaTypes() - contentType := config.ContentType - mediaType, _, err := mime.ParseMediaType(contentType) - if err != nil { - return nil, fmt.Errorf("the content type specified in the client configuration is not recognized: %v", err) - } - info, ok := runtime.SerializerInfoForMediaType(mediaTypes, mediaType) - if !ok { - if len(contentType) != 0 || len(mediaTypes) == 0 { - return nil, fmt.Errorf("no serializers registered for %s", contentType) - } - info = mediaTypes[0] - } - - internalGV := schema.GroupVersions{ - { - Group: config.GroupVersion.Group, - Version: runtime.APIVersionInternal, - }, - // always include the legacy group as a decoding target to handle non-error `Status` return types - { - Group: "", - Version: runtime.APIVersionInternal, - }, - } - - s := &Serializers{ - Encoder: config.NegotiatedSerializer.EncoderForVersion(info.Serializer, *config.GroupVersion), - Decoder: config.NegotiatedSerializer.DecoderToVersion(info.Serializer, internalGV), - - RenegotiatedDecoder: func(contentType string, params map[string]string) (runtime.Decoder, error) { - info, ok := runtime.SerializerInfoForMediaType(mediaTypes, contentType) - if !ok { - return nil, fmt.Errorf("serializer for %s not registered", contentType) - } - return config.NegotiatedSerializer.DecoderToVersion(info.Serializer, internalGV), nil - }, - } - if info.StreamSerializer != nil { - s.StreamingSerializer = info.StreamSerializer.Serializer - s.Framer = info.StreamSerializer.Framer - } - - return s, nil -} - // Verb begins a request with a verb (GET, POST, PUT, DELETE). // // Example usage of RESTClient's request building interface: @@ -219,12 +163,7 @@ func createSerializers(config ContentConfig) (*Serializers, error) { // list, ok := resp.(*api.PodList) // func (c *RESTClient) Verb(verb string) *Request { - backoff := c.createBackoffMgr() - - if c.Client == nil { - return NewRequest(nil, verb, c.base, c.versionedAPIPath, c.contentConfig, c.serializers, backoff, c.Throttle, 0) - } - return NewRequest(c.Client, verb, c.base, c.versionedAPIPath, c.contentConfig, c.serializers, backoff, c.Throttle, c.Client.Timeout) + return NewRequest(c).Verb(verb) } // Post begins a POST request. Short for c.Verb("POST"). @@ -254,5 +193,5 @@ func (c *RESTClient) Delete() *Request { // APIVersion returns the APIVersion this RESTClient is expected to use. func (c *RESTClient) APIVersion() schema.GroupVersion { - return *c.contentConfig.GroupVersion + return c.content.GroupVersion } diff --git a/vendor/k8s.io/client-go/rest/config.go b/vendor/k8s.io/client-go/rest/config.go index 87e87905..f58f5183 100644 --- a/vendor/k8s.io/client-go/rest/config.go +++ b/vendor/k8s.io/client-go/rest/config.go @@ -29,14 +29,15 @@ import ( "strings" "time" - "github.com/golang/glog" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/pkg/version" clientcmdapi "k8s.io/client-go/tools/clientcmd/api" + "k8s.io/client-go/transport" certutil "k8s.io/client-go/util/cert" "k8s.io/client-go/util/flowcontrol" + "k8s.io/klog" ) const ( @@ -70,6 +71,11 @@ type Config struct { // TODO: demonstrate an OAuth2 compatible client. BearerToken string + // Path to a file containing a BearerToken. + // If set, the contents are periodically read. + // The last successfully read value takes precedence over BearerToken. + BearerTokenFile string + // Impersonate is the configuration that RESTClient will use for impersonation. Impersonate ImpersonationConfig @@ -88,15 +94,22 @@ type Config struct { // UserAgent is an optional field that specifies the caller of this request. UserAgent string + // DisableCompression bypasses automatic GZip compression requests to the + // server. + DisableCompression bool + // Transport may be used for custom HTTP behavior. This attribute may not // be specified with the TLS client certificate options. Use WrapTransport - // for most client level operations. + // to provide additional per-server middleware behavior. Transport http.RoundTripper // WrapTransport will be invoked for custom HTTP behavior after the underlying // transport is initialized (either the transport created from TLSClientConfig, // Transport, or http.DefaultTransport). The config may layer other RoundTrippers // on top of the returned RoundTripper. - WrapTransport func(rt http.RoundTripper) http.RoundTripper + // + // A future release will change this field to an array. Use config.Wrap() + // instead of setting this value directly. + WrapTransport transport.WrapperFunc // QPS indicates the maximum QPS to the master from this client. // If it's zero, the created RESTClient will use DefaultQPS: 5 @@ -120,6 +133,47 @@ type Config struct { // Version string } +var _ fmt.Stringer = new(Config) +var _ fmt.GoStringer = new(Config) + +type sanitizedConfig *Config + +type sanitizedAuthConfigPersister struct{ AuthProviderConfigPersister } + +func (sanitizedAuthConfigPersister) GoString() string { + return "rest.AuthProviderConfigPersister(--- REDACTED ---)" +} +func (sanitizedAuthConfigPersister) String() string { + return "rest.AuthProviderConfigPersister(--- REDACTED ---)" +} + +// GoString implements fmt.GoStringer and sanitizes sensitive fields of Config +// to prevent accidental leaking via logs. +func (c *Config) GoString() string { + return c.String() +} + +// String implements fmt.Stringer and sanitizes sensitive fields of Config to +// prevent accidental leaking via logs. +func (c *Config) String() string { + if c == nil { + return "" + } + cc := sanitizedConfig(CopyConfig(c)) + // Explicitly mark non-empty credential fields as redacted. + if cc.Password != "" { + cc.Password = "--- REDACTED ---" + } + if cc.BearerToken != "" { + cc.BearerToken = "--- REDACTED ---" + } + if cc.AuthConfigPersister != nil { + cc.AuthConfigPersister = sanitizedAuthConfigPersister{cc.AuthConfigPersister} + } + + return fmt.Sprintf("%#v", cc) +} + // ImpersonationConfig has all the available impersonation options type ImpersonationConfig struct { // UserName is the username to impersonate on each request. @@ -157,6 +211,47 @@ type TLSClientConfig struct { // CAData holds PEM-encoded bytes (typically read from a root certificates bundle). // CAData takes precedence over CAFile CAData []byte + + // NextProtos is a list of supported application level protocols, in order of preference. + // Used to populate tls.Config.NextProtos. + // To indicate to the server http/1.1 is preferred over http/2, set to ["http/1.1", "h2"] (though the server is free to ignore that preference). + // To use only http/1.1, set to ["http/1.1"]. + NextProtos []string +} + +var _ fmt.Stringer = TLSClientConfig{} +var _ fmt.GoStringer = TLSClientConfig{} + +type sanitizedTLSClientConfig TLSClientConfig + +// GoString implements fmt.GoStringer and sanitizes sensitive fields of +// TLSClientConfig to prevent accidental leaking via logs. +func (c TLSClientConfig) GoString() string { + return c.String() +} + +// String implements fmt.Stringer and sanitizes sensitive fields of +// TLSClientConfig to prevent accidental leaking via logs. +func (c TLSClientConfig) String() string { + cc := sanitizedTLSClientConfig{ + Insecure: c.Insecure, + ServerName: c.ServerName, + CertFile: c.CertFile, + KeyFile: c.KeyFile, + CAFile: c.CAFile, + CertData: c.CertData, + KeyData: c.KeyData, + CAData: c.CAData, + NextProtos: c.NextProtos, + } + // Explicitly mark non-empty credential fields as redacted. + if len(cc.CertData) != 0 { + cc.CertData = []byte("--- TRUNCATED ---") + } + if len(cc.KeyData) != 0 { + cc.KeyData = []byte("--- REDACTED ---") + } + return fmt.Sprintf("%#v", cc) } type ContentConfig struct { @@ -174,6 +269,9 @@ type ContentConfig struct { GroupVersion *schema.GroupVersion // NegotiatedSerializer is used for obtaining encoders and decoders for multiple // supported media types. + // + // TODO: NegotiatedSerializer will be phased out as internal clients are removed + // from Kubernetes. NegotiatedSerializer runtime.NegotiatedSerializer } @@ -188,14 +286,6 @@ func RESTClientFor(config *Config) (*RESTClient, error) { if config.NegotiatedSerializer == nil { return nil, fmt.Errorf("NegotiatedSerializer is required when initializing a RESTClient") } - qps := config.QPS - if config.QPS == 0.0 { - qps = DefaultQPS - } - burst := config.Burst - if config.Burst == 0 { - burst = DefaultBurst - } baseURL, versionedAPIPath, err := defaultServerUrlFor(config) if err != nil { @@ -215,7 +305,33 @@ func RESTClientFor(config *Config) (*RESTClient, error) { } } - return NewRESTClient(baseURL, versionedAPIPath, config.ContentConfig, qps, burst, config.RateLimiter, httpClient) + rateLimiter := config.RateLimiter + if rateLimiter == nil { + qps := config.QPS + if config.QPS == 0.0 { + qps = DefaultQPS + } + burst := config.Burst + if config.Burst == 0 { + burst = DefaultBurst + } + if qps > 0 { + rateLimiter = flowcontrol.NewTokenBucketRateLimiter(qps, burst) + } + } + + var gv schema.GroupVersion + if config.GroupVersion != nil { + gv = *config.GroupVersion + } + clientContent := ClientContentConfig{ + AcceptContentTypes: config.AcceptContentTypes, + ContentType: config.ContentType, + GroupVersion: gv, + Negotiator: runtime.NewClientNegotiator(config.NegotiatedSerializer, gv), + } + + return NewRESTClient(baseURL, versionedAPIPath, clientContent, rateLimiter, httpClient) } // UnversionedRESTClientFor is the same as RESTClientFor, except that it allows @@ -243,13 +359,33 @@ func UnversionedRESTClientFor(config *Config) (*RESTClient, error) { } } - versionConfig := config.ContentConfig - if versionConfig.GroupVersion == nil { - v := metav1.SchemeGroupVersion - versionConfig.GroupVersion = &v + rateLimiter := config.RateLimiter + if rateLimiter == nil { + qps := config.QPS + if config.QPS == 0.0 { + qps = DefaultQPS + } + burst := config.Burst + if config.Burst == 0 { + burst = DefaultBurst + } + if qps > 0 { + rateLimiter = flowcontrol.NewTokenBucketRateLimiter(qps, burst) + } + } + + gv := metav1.SchemeGroupVersion + if config.GroupVersion != nil { + gv = *config.GroupVersion + } + clientContent := ClientContentConfig{ + AcceptContentTypes: config.AcceptContentTypes, + ContentType: config.ContentType, + GroupVersion: gv, + Negotiator: runtime.NewClientNegotiator(config.NegotiatedSerializer, gv), } - return NewRESTClient(baseURL, versionedAPIPath, versionConfig, config.QPS, config.Burst, config.RateLimiter, httpClient) + return NewRESTClient(baseURL, versionedAPIPath, clientContent, rateLimiter, httpClient) } // SetKubernetesDefaults sets default values on the provided client config for accessing the @@ -322,16 +458,15 @@ func InClusterConfig() (*Config, error) { return nil, ErrNotInCluster } - ts := newCachedPathTokenSource(tokenFile) - - if _, err := ts.Token(); err != nil { + token, err := ioutil.ReadFile(tokenFile) + if err != nil { return nil, err } tlsClientConfig := TLSClientConfig{} if _, err := certutil.NewPool(rootCAFile); err != nil { - glog.Errorf("Expected to load root CA config from %s, but got err: %v", rootCAFile, err) + klog.Errorf("Expected to load root CA config from %s, but got err: %v", rootCAFile, err) } else { tlsClientConfig.CAFile = rootCAFile } @@ -340,7 +475,8 @@ func InClusterConfig() (*Config, error) { // TODO: switch to using cluster DNS. Host: "https://" + net.JoinHostPort(host, port), TLSClientConfig: tlsClientConfig, - WrapTransport: TokenSourceWrapTransport(ts), + BearerToken: string(token), + BearerTokenFile: tokenFile, }, nil } @@ -403,7 +539,7 @@ func AddUserAgent(config *Config, userAgent string) *Config { return config } -// AnonymousClientConfig returns a copy of the given config with all user credentials (cert/key, bearer token, and username/password) removed +// AnonymousClientConfig returns a copy of the given config with all user credentials (cert/key, bearer token, and username/password) and custom transports (WrapTransport, Transport) removed func AnonymousClientConfig(config *Config) *Config { // copy only known safe fields return &Config{ @@ -415,27 +551,28 @@ func AnonymousClientConfig(config *Config) *Config { ServerName: config.ServerName, CAFile: config.TLSClientConfig.CAFile, CAData: config.TLSClientConfig.CAData, + NextProtos: config.TLSClientConfig.NextProtos, }, - RateLimiter: config.RateLimiter, - UserAgent: config.UserAgent, - Transport: config.Transport, - WrapTransport: config.WrapTransport, - QPS: config.QPS, - Burst: config.Burst, - Timeout: config.Timeout, - Dial: config.Dial, + RateLimiter: config.RateLimiter, + UserAgent: config.UserAgent, + DisableCompression: config.DisableCompression, + QPS: config.QPS, + Burst: config.Burst, + Timeout: config.Timeout, + Dial: config.Dial, } } // CopyConfig returns a copy of the given config func CopyConfig(config *Config) *Config { return &Config{ - Host: config.Host, - APIPath: config.APIPath, - ContentConfig: config.ContentConfig, - Username: config.Username, - Password: config.Password, - BearerToken: config.BearerToken, + Host: config.Host, + APIPath: config.APIPath, + ContentConfig: config.ContentConfig, + Username: config.Username, + Password: config.Password, + BearerToken: config.BearerToken, + BearerTokenFile: config.BearerTokenFile, Impersonate: ImpersonationConfig{ Groups: config.Impersonate.Groups, Extra: config.Impersonate.Extra, @@ -453,14 +590,16 @@ func CopyConfig(config *Config) *Config { CertData: config.TLSClientConfig.CertData, KeyData: config.TLSClientConfig.KeyData, CAData: config.TLSClientConfig.CAData, + NextProtos: config.TLSClientConfig.NextProtos, }, - UserAgent: config.UserAgent, - Transport: config.Transport, - WrapTransport: config.WrapTransport, - QPS: config.QPS, - Burst: config.Burst, - RateLimiter: config.RateLimiter, - Timeout: config.Timeout, - Dial: config.Dial, + UserAgent: config.UserAgent, + DisableCompression: config.DisableCompression, + Transport: config.Transport, + WrapTransport: config.WrapTransport, + QPS: config.QPS, + Burst: config.Burst, + RateLimiter: config.RateLimiter, + Timeout: config.Timeout, + Dial: config.Dial, } } diff --git a/vendor/k8s.io/client-go/rest/plugin.go b/vendor/k8s.io/client-go/rest/plugin.go index cf8fbabf..0bc2d03f 100644 --- a/vendor/k8s.io/client-go/rest/plugin.go +++ b/vendor/k8s.io/client-go/rest/plugin.go @@ -21,7 +21,7 @@ import ( "net/http" "sync" - "github.com/golang/glog" + "k8s.io/klog" clientcmdapi "k8s.io/client-go/tools/clientcmd/api" ) @@ -55,9 +55,9 @@ func RegisterAuthProviderPlugin(name string, plugin Factory) error { pluginsLock.Lock() defer pluginsLock.Unlock() if _, found := plugins[name]; found { - return fmt.Errorf("Auth Provider Plugin %q was registered twice", name) + return fmt.Errorf("auth Provider Plugin %q was registered twice", name) } - glog.V(4).Infof("Registered Auth Provider Plugin %q", name) + klog.V(4).Infof("Registered Auth Provider Plugin %q", name) plugins[name] = plugin return nil } @@ -67,7 +67,7 @@ func GetAuthProvider(clusterAddress string, apc *clientcmdapi.AuthProviderConfig defer pluginsLock.Unlock() p, ok := plugins[apc.Name] if !ok { - return nil, fmt.Errorf("No Auth Provider found for name %q", apc.Name) + return nil, fmt.Errorf("no Auth Provider found for name %q", apc.Name) } return p(clusterAddress, apc.Config, persister) } diff --git a/vendor/k8s.io/client-go/rest/request.go b/vendor/k8s.io/client-go/rest/request.go index 9bb31144..9e0c2611 100644 --- a/vendor/k8s.io/client-go/rest/request.go +++ b/vendor/k8s.io/client-go/rest/request.go @@ -32,7 +32,6 @@ import ( "strings" "time" - "github.com/golang/glog" "golang.org/x/net/http2" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -44,11 +43,13 @@ import ( restclientwatch "k8s.io/client-go/rest/watch" "k8s.io/client-go/tools/metrics" "k8s.io/client-go/util/flowcontrol" + "k8s.io/klog" ) var ( // longThrottleLatency defines threshold for logging requests. All requests being - // throttle for more than longThrottleLatency will be logged. + // throttled (via the provided rateLimiter) for more than longThrottleLatency will + // be logged. longThrottleLatency = 50 * time.Millisecond ) @@ -74,19 +75,20 @@ func (r *RequestConstructionError) Error() string { return fmt.Sprintf("request construction error: '%v'", r.Err) } +var noBackoff = &NoBackoff{} + // Request allows for building up a request to a server in a chained fashion. // Any errors are stored until the end of your call, so you only have to // check once. type Request struct { - // required - client HTTPClient - verb string + c *RESTClient - baseURL *url.URL - content ContentConfig - serializers Serializers + rateLimiter flowcontrol.RateLimiter + backoff BackoffManager + timeout time.Duration // generic components accessible via method setters + verb string pathPrefix string subpath string params url.Values @@ -98,7 +100,6 @@ type Request struct { resource string resourceName string subresource string - timeout time.Duration // output err error @@ -106,42 +107,63 @@ type Request struct { // This is only used for per-request timeouts, deadlines, and cancellations. ctx context.Context - - backoffMgr BackoffManager - throttle flowcontrol.RateLimiter } // NewRequest creates a new request helper object for accessing runtime.Objects on a server. -func NewRequest(client HTTPClient, verb string, baseURL *url.URL, versionedAPIPath string, content ContentConfig, serializers Serializers, backoff BackoffManager, throttle flowcontrol.RateLimiter, timeout time.Duration) *Request { +func NewRequest(c *RESTClient) *Request { + var backoff BackoffManager + if c.createBackoffMgr != nil { + backoff = c.createBackoffMgr() + } if backoff == nil { - glog.V(2).Infof("Not implementing request backoff strategy.") - backoff = &NoBackoff{} + backoff = noBackoff + } + + var pathPrefix string + if c.base != nil { + pathPrefix = path.Join("/", c.base.Path, c.versionedAPIPath) + } else { + pathPrefix = path.Join("/", c.versionedAPIPath) } - pathPrefix := "/" - if baseURL != nil { - pathPrefix = path.Join(pathPrefix, baseURL.Path) + var timeout time.Duration + if c.Client != nil { + timeout = c.Client.Timeout } + r := &Request{ - client: client, - verb: verb, - baseURL: baseURL, - pathPrefix: path.Join(pathPrefix, versionedAPIPath), - content: content, - serializers: serializers, - backoffMgr: backoff, - throttle: throttle, + c: c, + rateLimiter: c.rateLimiter, + backoff: backoff, timeout: timeout, + pathPrefix: pathPrefix, } + switch { - case len(content.AcceptContentTypes) > 0: - r.SetHeader("Accept", content.AcceptContentTypes) - case len(content.ContentType) > 0: - r.SetHeader("Accept", content.ContentType+", */*") + case len(c.content.AcceptContentTypes) > 0: + r.SetHeader("Accept", c.content.AcceptContentTypes) + case len(c.content.ContentType) > 0: + r.SetHeader("Accept", c.content.ContentType+", */*") } return r } +// NewRequestWithClient creates a Request with an embedded RESTClient for use in test scenarios. +func NewRequestWithClient(base *url.URL, versionedAPIPath string, content ClientContentConfig, client *http.Client) *Request { + return NewRequest(&RESTClient{ + base: base, + versionedAPIPath: versionedAPIPath, + content: content, + Client: client, + }) +} + +// Verb sets the verb this request will use. +func (r *Request) Verb(verb string) *Request { + r.verb = verb + return r +} + // Prefix adds segments to the relative beginning to the request path. These // items will be placed before the optional Namespace, Resource, or Name sections. // Setting AbsPath will clear any previously set Prefix segments @@ -184,17 +206,17 @@ func (r *Request) Resource(resource string) *Request { // or defaults to the stub implementation if nil is provided func (r *Request) BackOff(manager BackoffManager) *Request { if manager == nil { - r.backoffMgr = &NoBackoff{} + r.backoff = &NoBackoff{} return r } - r.backoffMgr = manager + r.backoff = manager return r } // Throttle receives a rate-limiter and sets or replaces an existing request limiter func (r *Request) Throttle(limiter flowcontrol.RateLimiter) *Request { - r.throttle = limiter + r.rateLimiter = limiter return r } @@ -272,8 +294,8 @@ func (r *Request) AbsPath(segments ...string) *Request { if r.err != nil { return r } - r.pathPrefix = path.Join(r.baseURL.Path, path.Join(segments...)) - if len(segments) == 1 && (len(r.baseURL.Path) > 1 || len(segments[0]) > 1) && strings.HasSuffix(segments[0], "/") { + r.pathPrefix = path.Join(r.c.base.Path, path.Join(segments...)) + if len(segments) == 1 && (len(r.c.base.Path) > 1 || len(segments[0]) > 1) && strings.HasSuffix(segments[0], "/") { // preserve any trailing slashes for legacy behavior r.pathPrefix += "/" } @@ -317,7 +339,7 @@ func (r *Request) Param(paramName, s string) *Request { // VersionedParams will not write query parameters that have omitempty set and are empty. If a // parameter has already been set it is appended to (Params and VersionedParams are additive). func (r *Request) VersionedParams(obj runtime.Object, codec runtime.ParameterCodec) *Request { - return r.SpecificallyVersionedParams(obj, codec, *r.content.GroupVersion) + return r.SpecificallyVersionedParams(obj, codec, r.c.content.GroupVersion) } func (r *Request) SpecificallyVersionedParams(obj runtime.Object, codec runtime.ParameterCodec, version schema.GroupVersion) *Request { @@ -397,14 +419,19 @@ func (r *Request) Body(obj interface{}) *Request { if reflect.ValueOf(t).IsNil() { return r } - data, err := runtime.Encode(r.serializers.Encoder, t) + encoder, err := r.c.content.Negotiator.Encoder(r.c.content.ContentType, nil) + if err != nil { + r.err = err + return r + } + data, err := runtime.Encode(encoder, t) if err != nil { r.err = err return r } glogBody("Request Body", data) r.body = bytes.NewReader(data) - r.SetHeader("Content-Type", r.content.ContentType) + r.SetHeader("Content-Type", r.c.content.ContentType) default: r.err = fmt.Errorf("unknown type used for body: %+v", obj) } @@ -433,8 +460,8 @@ func (r *Request) URL() *url.URL { } finalURL := &url.URL{} - if r.baseURL != nil { - *finalURL = *r.baseURL + if r.c.base != nil { + *finalURL = *r.c.base } finalURL.Path = p @@ -468,8 +495,8 @@ func (r Request) finalURLTemplate() url.URL { segments := strings.Split(r.URL().Path, "/") groupIndex := 0 index := 0 - if r.URL() != nil && r.baseURL != nil && strings.Contains(r.URL().Path, r.baseURL.Path) { - groupIndex += len(strings.Split(r.baseURL.Path, "/")) + if r.URL() != nil && r.c.base != nil && strings.Contains(r.URL().Path, r.c.base.Path) { + groupIndex += len(strings.Split(r.c.base.Path, "/")) } if groupIndex >= len(segments) { return *url @@ -521,40 +548,34 @@ func (r Request) finalURLTemplate() url.URL { return *url } -func (r *Request) tryThrottle() { +func (r *Request) tryThrottle() error { + if r.rateLimiter == nil { + return nil + } + now := time.Now() - if r.throttle != nil { - r.throttle.Accept() + var err error + if r.ctx != nil { + err = r.rateLimiter.Wait(r.ctx) + } else { + r.rateLimiter.Accept() } + if latency := time.Since(now); latency > longThrottleLatency { - glog.V(4).Infof("Throttling request took %v, request: %s:%s", latency, r.verb, r.URL().String()) + klog.V(4).Infof("Throttling request took %v, request: %s:%s", latency, r.verb, r.URL().String()) } + + return err } // Watch attempts to begin watching the requested location. // Returns a watch.Interface, or an error. func (r *Request) Watch() (watch.Interface, error) { - return r.WatchWithSpecificDecoders( - func(body io.ReadCloser) streaming.Decoder { - framer := r.serializers.Framer.NewFrameReader(body) - return streaming.NewDecoder(framer, r.serializers.StreamingSerializer) - }, - r.serializers.Decoder, - ) -} - -// WatchWithSpecificDecoders attempts to begin watching the requested location with a *different* decoder. -// Turns out that you want one "standard" decoder for the watch event and one "personal" decoder for the content -// Returns a watch.Interface, or an error. -func (r *Request) WatchWithSpecificDecoders(wrapperDecoderFn func(io.ReadCloser) streaming.Decoder, embeddedDecoder runtime.Decoder) (watch.Interface, error) { // We specifically don't want to rate limit watches, so we - // don't use r.throttle here. + // don't use r.rateLimiter here. if r.err != nil { return nil, r.err } - if r.serializers.Framer == nil { - return nil, fmt.Errorf("watching resources is not possible with this client (content-type: %s)", r.content.ContentType) - } url := r.URL().String() req, err := http.NewRequest(r.verb, url, r.body) @@ -565,18 +586,18 @@ func (r *Request) WatchWithSpecificDecoders(wrapperDecoderFn func(io.ReadCloser) req = req.WithContext(r.ctx) } req.Header = r.headers - client := r.client + client := r.c.Client if client == nil { client = http.DefaultClient } - r.backoffMgr.Sleep(r.backoffMgr.CalculateBackoff(r.URL())) + r.backoff.Sleep(r.backoff.CalculateBackoff(r.URL())) resp, err := client.Do(req) updateURLMetrics(r, resp, err) - if r.baseURL != nil { + if r.c.base != nil { if err != nil { - r.backoffMgr.UpdateBackoff(r.baseURL, err, 0) + r.backoff.UpdateBackoff(r.c.base, err, 0) } else { - r.backoffMgr.UpdateBackoff(r.baseURL, err, resp.StatusCode) + r.backoff.UpdateBackoff(r.c.base, err, resp.StatusCode) } } if err != nil { @@ -592,18 +613,36 @@ func (r *Request) WatchWithSpecificDecoders(wrapperDecoderFn func(io.ReadCloser) if result := r.transformResponse(resp, req); result.err != nil { return nil, result.err } - return nil, fmt.Errorf("for request '%+v', got status: %v", url, resp.StatusCode) + return nil, fmt.Errorf("for request %s, got status: %v", url, resp.StatusCode) + } + + contentType := resp.Header.Get("Content-Type") + mediaType, params, err := mime.ParseMediaType(contentType) + if err != nil { + klog.V(4).Infof("Unexpected content type from the server: %q: %v", contentType, err) + } + objectDecoder, streamingSerializer, framer, err := r.c.content.Negotiator.StreamDecoder(mediaType, params) + if err != nil { + return nil, err } - wrapperDecoder := wrapperDecoderFn(resp.Body) - return watch.NewStreamWatcher(restclientwatch.NewDecoder(wrapperDecoder, embeddedDecoder)), nil + + frameReader := framer.NewFrameReader(resp.Body) + watchEventDecoder := streaming.NewDecoder(frameReader, streamingSerializer) + + return watch.NewStreamWatcher( + restclientwatch.NewDecoder(watchEventDecoder, objectDecoder), + // use 500 to indicate that the cause of the error is unknown - other error codes + // are more specific to HTTP interactions, and set a reason + errors.NewClientErrorReporter(http.StatusInternalServerError, r.verb, "ClientWatchDecoding"), + ), nil } // updateURLMetrics is a convenience function for pushing metrics. // It also handles corner cases for incomplete/invalid request data. func updateURLMetrics(req *Request, resp *http.Response, err error) { url := "none" - if req.baseURL != nil { - url = req.baseURL.Host + if req.c.base != nil { + url = req.c.base.Host } // Errors can be arbitrary strings. Unbound label cardinality is not suitable for a metric @@ -625,29 +664,34 @@ func (r *Request) Stream() (io.ReadCloser, error) { return nil, r.err } - r.tryThrottle() + if err := r.tryThrottle(); err != nil { + return nil, err + } url := r.URL().String() req, err := http.NewRequest(r.verb, url, nil) if err != nil { return nil, err } + if r.body != nil { + req.Body = ioutil.NopCloser(r.body) + } if r.ctx != nil { req = req.WithContext(r.ctx) } req.Header = r.headers - client := r.client + client := r.c.Client if client == nil { client = http.DefaultClient } - r.backoffMgr.Sleep(r.backoffMgr.CalculateBackoff(r.URL())) + r.backoff.Sleep(r.backoff.CalculateBackoff(r.URL())) resp, err := client.Do(req) updateURLMetrics(r, resp, err) - if r.baseURL != nil { + if r.c.base != nil { if err != nil { - r.backoffMgr.UpdateBackoff(r.URL(), err, 0) + r.backoff.UpdateBackoff(r.URL(), err, 0) } else { - r.backoffMgr.UpdateBackoff(r.URL(), err, resp.StatusCode) + r.backoff.UpdateBackoff(r.URL(), err, resp.StatusCode) } } if err != nil { @@ -671,6 +715,33 @@ func (r *Request) Stream() (io.ReadCloser, error) { } } +// requestPreflightCheck looks for common programmer errors on Request. +// +// We tackle here two programmer mistakes. The first one is to try to create +// something(POST) using an empty string as namespace with namespaceSet as +// true. If namespaceSet is true then namespace should also be defined. The +// second mistake is, when under the same circumstances, the programmer tries +// to GET, PUT or DELETE a named resource(resourceName != ""), again, if +// namespaceSet is true then namespace must not be empty. +func (r *Request) requestPreflightCheck() error { + if !r.namespaceSet { + return nil + } + if len(r.namespace) > 0 { + return nil + } + + switch r.verb { + case "POST": + return fmt.Errorf("an empty namespace may not be set during creation") + case "GET", "PUT", "DELETE": + if len(r.resourceName) > 0 { + return fmt.Errorf("an empty namespace may not be set when a resource name is provided") + } + } + return nil +} + // request connects to the server and invokes the provided function when a server response is // received. It handles retry behavior and up front validation of requests. It will invoke // fn at most once. It will return an error if a problem occurred prior to connecting to the @@ -683,19 +754,15 @@ func (r *Request) request(fn func(*http.Request, *http.Response)) error { }() if r.err != nil { - glog.V(4).Infof("Error in request: %v", r.err) + klog.V(4).Infof("Error in request: %v", r.err) return r.err } - // TODO: added to catch programmer errors (invoking operations with an object with an empty namespace) - if (r.verb == "GET" || r.verb == "PUT" || r.verb == "DELETE") && r.namespaceSet && len(r.resourceName) > 0 && len(r.namespace) == 0 { - return fmt.Errorf("an empty namespace may not be set when a resource name is provided") - } - if (r.verb == "POST") && r.namespaceSet && len(r.namespace) == 0 { - return fmt.Errorf("an empty namespace may not be set during creation") + if err := r.requestPreflightCheck(); err != nil { + return err } - client := r.client + client := r.c.Client if client == nil { client = http.DefaultClient } @@ -722,19 +789,21 @@ func (r *Request) request(fn func(*http.Request, *http.Response)) error { } req.Header = r.headers - r.backoffMgr.Sleep(r.backoffMgr.CalculateBackoff(r.URL())) + r.backoff.Sleep(r.backoff.CalculateBackoff(r.URL())) if retries > 0 { // We are retrying the request that we already send to apiserver // at least once before. - // This request should also be throttled with the client-internal throttler. - r.tryThrottle() + // This request should also be throttled with the client-internal rate limiter. + if err := r.tryThrottle(); err != nil { + return err + } } resp, err := client.Do(req) updateURLMetrics(r, resp, err) if err != nil { - r.backoffMgr.UpdateBackoff(r.URL(), err, 0) + r.backoff.UpdateBackoff(r.URL(), err, 0) } else { - r.backoffMgr.UpdateBackoff(r.URL(), err, resp.StatusCode) + r.backoff.UpdateBackoff(r.URL(), err, resp.StatusCode) } if err != nil { // "Connection reset by peer" is usually a transient error. @@ -770,14 +839,14 @@ func (r *Request) request(fn func(*http.Request, *http.Response)) error { if seeker, ok := r.body.(io.Seeker); ok && r.body != nil { _, err := seeker.Seek(0, 0) if err != nil { - glog.V(4).Infof("Could not retry request, can't Seek() back to beginning of body for %T", r.body) + klog.V(4).Infof("Could not retry request, can't Seek() back to beginning of body for %T", r.body) fn(req, resp) return true } } - glog.V(4).Infof("Got a Retry-After %ds response for attempt %d to %v", seconds, retries, url) - r.backoffMgr.Sleep(time.Duration(seconds) * time.Second) + klog.V(4).Infof("Got a Retry-After %ds response for attempt %d to %v", seconds, retries, url) + r.backoff.Sleep(time.Duration(seconds) * time.Second) return false } fn(req, resp) @@ -793,12 +862,12 @@ func (r *Request) request(fn func(*http.Request, *http.Response)) error { // processing. // // Error type: -// * If the request can't be constructed, or an error happened earlier while building its -// arguments: *RequestConstructionError // * If the server responds with a status: *errors.StatusError or *errors.UnexpectedObjectError // * http.Client.Do errors are returned directly. func (r *Request) Do() Result { - r.tryThrottle() + if err := r.tryThrottle(); err != nil { + return Result{err: err} + } var result Result err := r.request(func(req *http.Request, resp *http.Response) { @@ -812,7 +881,9 @@ func (r *Request) Do() Result { // DoRaw executes the request but does not process the response body. func (r *Request) DoRaw() ([]byte, error) { - r.tryThrottle() + if err := r.tryThrottle(); err != nil { + return nil, err + } var result Result err := r.request(func(req *http.Request, resp *http.Response) { @@ -844,14 +915,14 @@ func (r *Request) transformResponse(resp *http.Response, req *http.Request) Resu // 2. Apiserver sends back the headers and then part of the body // 3. Apiserver closes connection. // 4. client-go should catch this and return an error. - glog.V(2).Infof("Stream error %#v when reading response body, may be caused by closed connection.", err) - streamErr := fmt.Errorf("Stream error %#v when reading response body, may be caused by closed connection. Please retry.", err) + klog.V(2).Infof("Stream error %#v when reading response body, may be caused by closed connection.", err) + streamErr := fmt.Errorf("stream error when reading response body, may be caused by closed connection. Please retry. Original error: %v", err) return Result{ err: streamErr, } default: - glog.Errorf("Unexpected error when reading response body: %#v", err) - unexpectedErr := fmt.Errorf("Unexpected error %#v when reading response body. Please retry.", err) + klog.Errorf("Unexpected error when reading response body: %v", err) + unexpectedErr := fmt.Errorf("unexpected error when reading response body. Please retry. Original error: %v", err) return Result{ err: unexpectedErr, } @@ -861,14 +932,18 @@ func (r *Request) transformResponse(resp *http.Response, req *http.Request) Resu glogBody("Response Body", body) // verify the content type is accurate + var decoder runtime.Decoder contentType := resp.Header.Get("Content-Type") - decoder := r.serializers.Decoder - if len(contentType) > 0 && (decoder == nil || (len(r.content.ContentType) > 0 && contentType != r.content.ContentType)) { + if len(contentType) == 0 { + contentType = r.c.content.ContentType + } + if len(contentType) > 0 { + var err error mediaType, params, err := mime.ParseMediaType(contentType) if err != nil { return Result{err: errors.NewInternalError(err)} } - decoder, err = r.serializers.RenegotiatedDecoder(mediaType, params) + decoder, err = r.c.content.Negotiator.Decoder(mediaType, params) if err != nil { // if we fail to negotiate a decoder, treat this as an unstructured error switch { @@ -914,11 +989,11 @@ func (r *Request) transformResponse(resp *http.Response, req *http.Request) Resu func truncateBody(body string) string { max := 0 switch { - case bool(glog.V(10)): + case bool(klog.V(10)): return body - case bool(glog.V(9)): + case bool(klog.V(9)): max = 10240 - case bool(glog.V(8)): + case bool(klog.V(8)): max = 1024 } @@ -933,13 +1008,13 @@ func truncateBody(body string) string { // allocating a new string for the body output unless necessary. Uses a simple heuristic to determine // whether the body is printable. func glogBody(prefix string, body []byte) { - if glog.V(8) { + if klog.V(8) { if bytes.IndexFunc(body, func(r rune) bool { return r < 0x0a }) != -1 { - glog.Infof("%s:\n%s", prefix, truncateBody(hex.Dump(body))) + klog.Infof("%s:\n%s", prefix, truncateBody(hex.Dump(body))) } else { - glog.Infof("%s: %s", prefix, truncateBody(string(body))) + klog.Infof("%s: %s", prefix, truncateBody(string(body))) } } } @@ -988,7 +1063,7 @@ func (r *Request) newUnstructuredResponseError(body []byte, isTextResponse bool, } var groupResource schema.GroupResource if len(r.resource) > 0 { - groupResource.Group = r.content.GroupVersion.Group + groupResource.Group = r.c.content.GroupVersion.Group groupResource.Resource = r.resource } return errors.NewGenericServerResponse( @@ -1100,7 +1175,8 @@ func (r Result) Into(obj runtime.Object) error { return fmt.Errorf("serializer for %s doesn't exist", r.contentType) } if len(r.body) == 0 { - return fmt.Errorf("0-length response") + return fmt.Errorf("0-length response with status code: %d and content type: %s", + r.statusCode, r.contentType) } out, _, err := r.decoder.Decode(r.body, nil, obj) @@ -1141,7 +1217,7 @@ func (r Result) Error() error { // to be backwards compatible with old servers that do not return a version, default to "v1" out, _, err := r.decoder.Decode(r.body, &schema.GroupVersionKind{Version: "v1"}, nil) if err != nil { - glog.V(5).Infof("body was not decodable (unable to check for Status): %v", err) + klog.V(5).Infof("body was not decodable (unable to check for Status): %v", err) return r.err } switch t := out.(type) { @@ -1195,7 +1271,6 @@ func IsValidPathSegmentPrefix(name string) []string { func ValidatePathSegmentName(name string, prefix bool) []string { if prefix { return IsValidPathSegmentPrefix(name) - } else { - return IsValidPathSegmentName(name) } + return IsValidPathSegmentName(name) } diff --git a/vendor/k8s.io/client-go/rest/transport.go b/vendor/k8s.io/client-go/rest/transport.go index 25c1801b..0800e4ec 100644 --- a/vendor/k8s.io/client-go/rest/transport.go +++ b/vendor/k8s.io/client-go/rest/transport.go @@ -61,9 +61,10 @@ func HTTPWrappersForConfig(config *Config, rt http.RoundTripper) (http.RoundTrip // TransportConfig converts a client config to an appropriate transport config. func (c *Config) TransportConfig() (*transport.Config, error) { conf := &transport.Config{ - UserAgent: c.UserAgent, - Transport: c.Transport, - WrapTransport: c.WrapTransport, + UserAgent: c.UserAgent, + Transport: c.Transport, + WrapTransport: c.WrapTransport, + DisableCompression: c.DisableCompression, TLS: transport.TLSConfig{ Insecure: c.Insecure, ServerName: c.ServerName, @@ -73,10 +74,12 @@ func (c *Config) TransportConfig() (*transport.Config, error) { CertData: c.CertData, KeyFile: c.KeyFile, KeyData: c.KeyData, + NextProtos: c.NextProtos, }, - Username: c.Username, - Password: c.Password, - BearerToken: c.BearerToken, + Username: c.Username, + Password: c.Password, + BearerToken: c.BearerToken, + BearerTokenFile: c.BearerTokenFile, Impersonate: transport.ImpersonationConfig{ UserName: c.Impersonate.UserName, Groups: c.Impersonate.Groups, @@ -103,14 +106,15 @@ func (c *Config) TransportConfig() (*transport.Config, error) { if err != nil { return nil, err } - wt := conf.WrapTransport - if wt != nil { - conf.WrapTransport = func(rt http.RoundTripper) http.RoundTripper { - return provider.WrapTransport(wt(rt)) - } - } else { - conf.WrapTransport = provider.WrapTransport - } + conf.Wrap(provider.WrapTransport) } return conf, nil } + +// Wrap adds a transport middleware function that will give the caller +// an opportunity to wrap the underlying http.RoundTripper prior to the +// first API call being made. The provided function is invoked after any +// existing transport wrappers are invoked. +func (c *Config) Wrap(fn transport.WrapperFunc) { + c.WrapTransport = transport.Wrappers(c.WrapTransport, fn) +} diff --git a/vendor/k8s.io/client-go/rest/urlbackoff.go b/vendor/k8s.io/client-go/rest/urlbackoff.go index eff848ab..d00e42f8 100644 --- a/vendor/k8s.io/client-go/rest/urlbackoff.go +++ b/vendor/k8s.io/client-go/rest/urlbackoff.go @@ -20,9 +20,9 @@ import ( "net/url" "time" - "github.com/golang/glog" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/util/flowcontrol" + "k8s.io/klog" ) // Set of resp. Codes that we backoff for. @@ -64,7 +64,7 @@ func (n *NoBackoff) Sleep(d time.Duration) { // Disable makes the backoff trivial, i.e., sets it to zero. This might be used // by tests which want to run 1000s of mock requests without slowing down. func (b *URLBackoff) Disable() { - glog.V(4).Infof("Disabling backoff strategy") + klog.V(4).Infof("Disabling backoff strategy") b.Backoff = flowcontrol.NewBackOff(0*time.Second, 0*time.Second) } @@ -76,7 +76,7 @@ func (b *URLBackoff) baseUrlKey(rawurl *url.URL) string { // in the future. host, err := url.Parse(rawurl.String()) if err != nil { - glog.V(4).Infof("Error extracting url: %v", rawurl) + klog.V(4).Infof("Error extracting url: %v", rawurl) panic("bad url!") } return host.Host @@ -89,7 +89,7 @@ func (b *URLBackoff) UpdateBackoff(actualUrl *url.URL, err error, responseCode i b.Backoff.Next(b.baseUrlKey(actualUrl), b.Backoff.Clock.Now()) return } else if responseCode >= 300 || err != nil { - glog.V(4).Infof("Client is returning errors: code %v, error %v", responseCode, err) + klog.V(4).Infof("Client is returning errors: code %v, error %v", responseCode, err) } //If we got this far, there is no backoff required for this URL anymore. diff --git a/vendor/k8s.io/client-go/rest/watch/decoder.go b/vendor/k8s.io/client-go/rest/watch/decoder.go index 73bb63ad..e95c020b 100644 --- a/vendor/k8s.io/client-go/rest/watch/decoder.go +++ b/vendor/k8s.io/client-go/rest/watch/decoder.go @@ -54,7 +54,7 @@ func (d *Decoder) Decode() (watch.EventType, runtime.Object, error) { return "", nil, fmt.Errorf("unable to decode to metav1.Event") } switch got.Type { - case string(watch.Added), string(watch.Modified), string(watch.Deleted), string(watch.Error): + case string(watch.Added), string(watch.Modified), string(watch.Deleted), string(watch.Error), string(watch.Bookmark): default: return "", nil, fmt.Errorf("got invalid watch event type: %v", got.Type) } diff --git a/vendor/k8s.io/client-go/rest/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/rest/zz_generated.deepcopy.go index c1ab45f3..da4a1624 100644 --- a/vendor/k8s.io/client-go/rest/zz_generated.deepcopy.go +++ b/vendor/k8s.io/client-go/rest/zz_generated.deepcopy.go @@ -38,6 +38,11 @@ func (in *TLSClientConfig) DeepCopyInto(out *TLSClientConfig) { *out = make([]byte, len(*in)) copy(*out, *in) } + if in.NextProtos != nil { + in, out := &in.NextProtos, &out.NextProtos + *out = make([]string, len(*in)) + copy(*out, *in) + } return } diff --git a/vendor/k8s.io/client-go/tools/cache/OWNERS b/vendor/k8s.io/client-go/tools/cache/OWNERS index 727377f9..bd61bc76 100644 --- a/vendor/k8s.io/client-go/tools/cache/OWNERS +++ b/vendor/k8s.io/client-go/tools/cache/OWNERS @@ -1,3 +1,5 @@ +# See the OWNERS docs at https://go.k8s.io/owners + approvers: - thockin - lavalamp @@ -22,7 +24,6 @@ reviewers: - erictune - davidopp - pmorie -- kargakis - janetkuo - justinsb - eparis @@ -32,8 +33,6 @@ reviewers: - madhusudancs - hongchaodeng - krousey -- markturansky -- fgrzadkowski - xiang90 - mml - ingvagabund @@ -41,10 +40,6 @@ reviewers: - jessfraz - david-mcmahon - mfojtik -- '249043822' -- lixiaobing10051267 -- ddysher - mqliang -- feihujiang - sdminonne - ncdc diff --git a/vendor/k8s.io/client-go/tools/cache/controller.go b/vendor/k8s.io/client-go/tools/cache/controller.go index 028c75e8..27a1c52c 100644 --- a/vendor/k8s.io/client-go/tools/cache/controller.go +++ b/vendor/k8s.io/client-go/tools/cache/controller.go @@ -28,9 +28,9 @@ import ( // Config contains all the settings for a Controller. type Config struct { - // The queue for your objects; either a FIFO or - // a DeltaFIFO. Your Process() function should accept - // the output of this Queue's Pop() method. + // The queue for your objects - has to be a DeltaFIFO due to + // assumptions in the implementation. Your Process() function + // should accept the output of this Queue's Pop() method. Queue // Something that can list and watch your objects. @@ -79,6 +79,7 @@ type controller struct { clock clock.Clock } +// Controller is a generic controller framework. type Controller interface { Run(stopCh <-chan struct{}) HasSynced() bool @@ -130,6 +131,8 @@ func (c *controller) HasSynced() bool { } func (c *controller) LastSyncResourceVersion() string { + c.reflectorMutex.RLock() + defer c.reflectorMutex.RUnlock() if c.reflector == nil { return "" } @@ -149,7 +152,7 @@ func (c *controller) processLoop() { for { obj, err := c.config.Queue.Pop(PopProcessFunc(c.config.Process)) if err != nil { - if err == FIFOClosedError { + if err == ErrFIFOClosed { return } if c.config.RetryOnError { @@ -285,45 +288,7 @@ func NewInformer( // This will hold the client state, as we know it. clientState := NewStore(DeletionHandlingMetaNamespaceKeyFunc) - // This will hold incoming changes. Note how we pass clientState in as a - // KeyLister, that way resync operations will result in the correct set - // of update/delete deltas. - fifo := NewDeltaFIFO(MetaNamespaceKeyFunc, clientState) - - cfg := &Config{ - Queue: fifo, - ListerWatcher: lw, - ObjectType: objType, - FullResyncPeriod: resyncPeriod, - RetryOnError: false, - - Process: func(obj interface{}) error { - // from oldest to newest - for _, d := range obj.(Deltas) { - switch d.Type { - case Sync, Added, Updated: - if old, exists, err := clientState.Get(d.Object); err == nil && exists { - if err := clientState.Update(d.Object); err != nil { - return err - } - h.OnUpdate(old, d.Object) - } else { - if err := clientState.Add(d.Object); err != nil { - return err - } - h.OnAdd(d.Object) - } - case Deleted: - if err := clientState.Delete(d.Object); err != nil { - return err - } - h.OnDelete(d.Object) - } - } - return nil - }, - } - return clientState, New(cfg) + return clientState, newInformer(lw, objType, resyncPeriod, h, clientState) } // NewIndexerInformer returns a Indexer and a controller for populating the index @@ -352,6 +317,30 @@ func NewIndexerInformer( // This will hold the client state, as we know it. clientState := NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers) + return clientState, newInformer(lw, objType, resyncPeriod, h, clientState) +} + +// newInformer returns a controller for populating the store while also +// providing event notifications. +// +// Parameters +// * lw is list and watch functions for the source of the resource you want to +// be informed of. +// * objType is an object of the type that you expect to receive. +// * resyncPeriod: if non-zero, will re-list this often (you will get OnUpdate +// calls, even if nothing changed). Otherwise, re-list will be delayed as +// long as possible (until the upstream source closes the watch or times out, +// or you stop the controller). +// * h is the object you want notifications sent to. +// * clientState is the store you want to populate +// +func newInformer( + lw ListerWatcher, + objType runtime.Object, + resyncPeriod time.Duration, + h ResourceEventHandler, + clientState Store, +) Controller { // This will hold incoming changes. Note how we pass clientState in as a // KeyLister, that way resync operations will result in the correct set // of update/delete deltas. @@ -390,5 +379,5 @@ func NewIndexerInformer( return nil }, } - return clientState, New(cfg) + return New(cfg) } diff --git a/vendor/k8s.io/client-go/tools/cache/delta_fifo.go b/vendor/k8s.io/client-go/tools/cache/delta_fifo.go index 45c3b500..55ecdcdf 100644 --- a/vendor/k8s.io/client-go/tools/cache/delta_fifo.go +++ b/vendor/k8s.io/client-go/tools/cache/delta_fifo.go @@ -23,7 +23,7 @@ import ( "k8s.io/apimachinery/pkg/util/sets" - "github.com/golang/glog" + "k8s.io/klog" ) // NewDeltaFIFO returns a Store which can be used process changes to items. @@ -160,7 +160,7 @@ func (f *DeltaFIFO) KeyOf(obj interface{}) (string, error) { return f.keyFunc(obj) } -// Return true if an Add/Update/Delete/AddIfNotPresent are called first, +// HasSynced returns true if an Add/Update/Delete/AddIfNotPresent are called first, // or an Update called first but the first batch of items inserted by Replace() has been popped func (f *DeltaFIFO) HasSynced() bool { f.lock.Lock() @@ -295,13 +295,6 @@ func isDeletionDup(a, b *Delta) *Delta { return b } -// willObjectBeDeletedLocked returns true only if the last delta for the -// given object is Delete. Caller must lock first. -func (f *DeltaFIFO) willObjectBeDeletedLocked(id string) bool { - deltas := f.items[id] - return len(deltas) > 0 && deltas[len(deltas)-1].Type == Deleted -} - // queueActionLocked appends to the delta list for the object. // Caller must lock first. func (f *DeltaFIFO) queueActionLocked(actionType DeltaType, obj interface{}) error { @@ -310,27 +303,18 @@ func (f *DeltaFIFO) queueActionLocked(actionType DeltaType, obj interface{}) err return KeyError{obj, err} } - // If object is supposed to be deleted (last event is Deleted), - // then we should ignore Sync events, because it would result in - // recreation of this object. - if actionType == Sync && f.willObjectBeDeletedLocked(id) { - return nil - } - newDeltas := append(f.items[id], Delta{actionType, obj}) newDeltas = dedupDeltas(newDeltas) - _, exists := f.items[id] if len(newDeltas) > 0 { - if !exists { + if _, exists := f.items[id]; !exists { f.queue = append(f.queue, id) } f.items[id] = newDeltas f.cond.Broadcast() - } else if exists { - // We need to remove this from our map (extra items - // in the queue are ignored if they are not in the - // map). + } else { + // We need to remove this from our map (extra items in the queue are + // ignored if they are not in the map). delete(f.items, id) } return nil @@ -348,9 +332,6 @@ func (f *DeltaFIFO) List() []interface{} { func (f *DeltaFIFO) listLocked() []interface{} { list := make([]interface{}, 0, len(f.items)) for _, item := range f.items { - // Copy item's slice so operations on this slice - // won't interfere with the object we return. - item = copyDeltas(item) list = append(list, item.Newest().Object) } return list @@ -394,14 +375,11 @@ func (f *DeltaFIFO) GetByKey(key string) (item interface{}, exists bool, err err return d, exists, nil } -// Checks if the queue is closed +// IsClosed checks if the queue is closed func (f *DeltaFIFO) IsClosed() bool { f.closedLock.Lock() defer f.closedLock.Unlock() - if f.closed { - return true - } - return false + return f.closed } // Pop blocks until an item is added to the queue, and then returns it. If @@ -425,17 +403,17 @@ func (f *DeltaFIFO) Pop(process PopProcessFunc) (interface{}, error) { // When Close() is called, the f.closed is set and the condition is broadcasted. // Which causes this loop to continue and return from the Pop(). if f.IsClosed() { - return nil, FIFOClosedError + return nil, ErrFIFOClosed } f.cond.Wait() } id := f.queue[0] f.queue = f.queue[1:] - item, ok := f.items[id] if f.initialPopulationCount > 0 { f.initialPopulationCount-- } + item, ok := f.items[id] if !ok { // Item may have been deleted subsequently. continue @@ -474,6 +452,7 @@ func (f *DeltaFIFO) Replace(list []interface{}, resourceVersion string) error { if f.knownObjects == nil { // Do deletion detection against our own list. + queuedDeletions := 0 for k, oldItem := range f.items { if keys.Has(k) { continue @@ -482,6 +461,7 @@ func (f *DeltaFIFO) Replace(list []interface{}, resourceVersion string) error { if n := oldItem.Newest(); n != nil { deletedObj = n.Object } + queuedDeletions++ if err := f.queueActionLocked(Deleted, DeletedFinalStateUnknown{k, deletedObj}); err != nil { return err } @@ -489,7 +469,9 @@ func (f *DeltaFIFO) Replace(list []interface{}, resourceVersion string) error { if !f.populated { f.populated = true - f.initialPopulationCount = len(list) + // While there shouldn't be any queued deletions in the initial + // population of the queue, it's better to be on the safe side. + f.initialPopulationCount = len(list) + queuedDeletions } return nil @@ -506,10 +488,10 @@ func (f *DeltaFIFO) Replace(list []interface{}, resourceVersion string) error { deletedObj, exists, err := f.knownObjects.GetByKey(k) if err != nil { deletedObj = nil - glog.Errorf("Unexpected error %v during lookup of key %v, placing DeleteFinalStateUnknown marker without object", err, k) + klog.Errorf("Unexpected error %v during lookup of key %v, placing DeleteFinalStateUnknown marker without object", err, k) } else if !exists { deletedObj = nil - glog.Infof("Key %v does not exist in known objects store, placing DeleteFinalStateUnknown marker without object", k) + klog.Infof("Key %v does not exist in known objects store, placing DeleteFinalStateUnknown marker without object", k) } queuedDeletions++ if err := f.queueActionLocked(Deleted, DeletedFinalStateUnknown{k, deletedObj}); err != nil { @@ -543,20 +525,13 @@ func (f *DeltaFIFO) Resync() error { return nil } -func (f *DeltaFIFO) syncKey(key string) error { - f.lock.Lock() - defer f.lock.Unlock() - - return f.syncKeyLocked(key) -} - func (f *DeltaFIFO) syncKeyLocked(key string) error { obj, exists, err := f.knownObjects.GetByKey(key) if err != nil { - glog.Errorf("Unexpected error %v during lookup of key %v, unable to queue object for sync", err, key) + klog.Errorf("Unexpected error %v during lookup of key %v, unable to queue object for sync", err, key) return nil } else if !exists { - glog.Infof("Key %v does not exist in known objects store, unable to queue object for sync", key) + klog.Infof("Key %v does not exist in known objects store, unable to queue object for sync", key) return nil } @@ -597,6 +572,7 @@ type KeyGetter interface { // DeltaType is the type of a change (addition, deletion, etc) type DeltaType string +// Change type definition const ( Added DeltaType = "Added" Updated DeltaType = "Updated" diff --git a/vendor/k8s.io/client-go/tools/cache/expiration_cache.go b/vendor/k8s.io/client-go/tools/cache/expiration_cache.go index fa88fc40..14ad492e 100644 --- a/vendor/k8s.io/client-go/tools/cache/expiration_cache.go +++ b/vendor/k8s.io/client-go/tools/cache/expiration_cache.go @@ -20,8 +20,8 @@ import ( "sync" "time" - "github.com/golang/glog" "k8s.io/apimachinery/pkg/util/clock" + "k8s.io/klog" ) // ExpirationCache implements the store interface @@ -48,14 +48,14 @@ type ExpirationCache struct { // ExpirationPolicy dictates when an object expires. Currently only abstracted out // so unittests don't rely on the system clock. type ExpirationPolicy interface { - IsExpired(obj *timestampedEntry) bool + IsExpired(obj *TimestampedEntry) bool } // TTLPolicy implements a ttl based ExpirationPolicy. type TTLPolicy struct { // >0: Expire entries with an age > ttl // <=0: Don't expire any entry - Ttl time.Duration + TTL time.Duration // Clock used to calculate ttl expiration Clock clock.Clock @@ -63,26 +63,30 @@ type TTLPolicy struct { // IsExpired returns true if the given object is older than the ttl, or it can't // determine its age. -func (p *TTLPolicy) IsExpired(obj *timestampedEntry) bool { - return p.Ttl > 0 && p.Clock.Since(obj.timestamp) > p.Ttl +func (p *TTLPolicy) IsExpired(obj *TimestampedEntry) bool { + return p.TTL > 0 && p.Clock.Since(obj.Timestamp) > p.TTL } -// timestampedEntry is the only type allowed in a ExpirationCache. -type timestampedEntry struct { - obj interface{} - timestamp time.Time +// TimestampedEntry is the only type allowed in a ExpirationCache. +// Keep in mind that it is not safe to share timestamps between computers. +// Behavior may be inconsistent if you get a timestamp from the API Server and +// use it on the client machine as part of your ExpirationCache. +type TimestampedEntry struct { + Obj interface{} + Timestamp time.Time + key string } -// getTimestampedEntry returns the timestampedEntry stored under the given key. -func (c *ExpirationCache) getTimestampedEntry(key string) (*timestampedEntry, bool) { +// getTimestampedEntry returns the TimestampedEntry stored under the given key. +func (c *ExpirationCache) getTimestampedEntry(key string) (*TimestampedEntry, bool) { item, _ := c.cacheStorage.Get(key) - if tsEntry, ok := item.(*timestampedEntry); ok { + if tsEntry, ok := item.(*TimestampedEntry); ok { return tsEntry, true } return nil, false } -// getOrExpire retrieves the object from the timestampedEntry if and only if it hasn't +// getOrExpire retrieves the object from the TimestampedEntry if and only if it hasn't // already expired. It holds a write lock across deletion. func (c *ExpirationCache) getOrExpire(key string) (interface{}, bool) { // Prevent all inserts from the time we deem an item as "expired" to when we @@ -95,11 +99,11 @@ func (c *ExpirationCache) getOrExpire(key string) (interface{}, bool) { return nil, false } if c.expirationPolicy.IsExpired(timestampedItem) { - glog.V(4).Infof("Entry %v: %+v has expired", key, timestampedItem.obj) + klog.V(4).Infof("Entry %v: %+v has expired", key, timestampedItem.Obj) c.cacheStorage.Delete(key) return nil, false } - return timestampedItem.obj, true + return timestampedItem.Obj, true } // GetByKey returns the item stored under the key, or sets exists=false. @@ -126,10 +130,8 @@ func (c *ExpirationCache) List() []interface{} { list := make([]interface{}, 0, len(items)) for _, item := range items { - obj := item.(*timestampedEntry).obj - if key, err := c.keyFunc(obj); err != nil { - list = append(list, obj) - } else if obj, exists := c.getOrExpire(key); exists { + key := item.(*TimestampedEntry).key + if obj, exists := c.getOrExpire(key); exists { list = append(list, obj) } } @@ -144,14 +146,14 @@ func (c *ExpirationCache) ListKeys() []string { // Add timestamps an item and inserts it into the cache, overwriting entries // that might exist under the same key. func (c *ExpirationCache) Add(obj interface{}) error { - c.expirationLock.Lock() - defer c.expirationLock.Unlock() - key, err := c.keyFunc(obj) if err != nil { return KeyError{obj, err} } - c.cacheStorage.Add(key, ×tampedEntry{obj, c.clock.Now()}) + c.expirationLock.Lock() + defer c.expirationLock.Unlock() + + c.cacheStorage.Add(key, &TimestampedEntry{obj, c.clock.Now(), key}) return nil } @@ -163,12 +165,12 @@ func (c *ExpirationCache) Update(obj interface{}) error { // Delete removes an item from the cache. func (c *ExpirationCache) Delete(obj interface{}) error { - c.expirationLock.Lock() - defer c.expirationLock.Unlock() key, err := c.keyFunc(obj) if err != nil { return KeyError{obj, err} } + c.expirationLock.Lock() + defer c.expirationLock.Unlock() c.cacheStorage.Delete(key) return nil } @@ -177,17 +179,17 @@ func (c *ExpirationCache) Delete(obj interface{}) error { // before attempting the replace operation. The replace operation will // delete the contents of the ExpirationCache `c`. func (c *ExpirationCache) Replace(list []interface{}, resourceVersion string) error { - c.expirationLock.Lock() - defer c.expirationLock.Unlock() - items := map[string]interface{}{} + items := make(map[string]interface{}, len(list)) ts := c.clock.Now() for _, item := range list { key, err := c.keyFunc(item) if err != nil { return KeyError{item, err} } - items[key] = ×tampedEntry{item, ts} + items[key] = &TimestampedEntry{item, ts, key} } + c.expirationLock.Lock() + defer c.expirationLock.Unlock() c.cacheStorage.Replace(items, resourceVersion) return nil } @@ -199,10 +201,15 @@ func (c *ExpirationCache) Resync() error { // NewTTLStore creates and returns a ExpirationCache with a TTLPolicy func NewTTLStore(keyFunc KeyFunc, ttl time.Duration) Store { + return NewExpirationStore(keyFunc, &TTLPolicy{ttl, clock.RealClock{}}) +} + +// NewExpirationStore creates and returns a ExpirationCache for a given policy +func NewExpirationStore(keyFunc KeyFunc, expirationPolicy ExpirationPolicy) Store { return &ExpirationCache{ cacheStorage: NewThreadSafeStore(Indexers{}, Indices{}), keyFunc: keyFunc, clock: clock.RealClock{}, - expirationPolicy: &TTLPolicy{ttl, clock.RealClock{}}, + expirationPolicy: expirationPolicy, } } diff --git a/vendor/k8s.io/client-go/tools/cache/expiration_cache_fakes.go b/vendor/k8s.io/client-go/tools/cache/expiration_cache_fakes.go index a096765f..33afd32c 100644 --- a/vendor/k8s.io/client-go/tools/cache/expiration_cache_fakes.go +++ b/vendor/k8s.io/client-go/tools/cache/expiration_cache_fakes.go @@ -33,16 +33,19 @@ func (c *fakeThreadSafeMap) Delete(key string) { } } +// FakeExpirationPolicy keeps the list for keys which never expires. type FakeExpirationPolicy struct { NeverExpire sets.String RetrieveKeyFunc KeyFunc } -func (p *FakeExpirationPolicy) IsExpired(obj *timestampedEntry) bool { +// IsExpired used to check if object is expired. +func (p *FakeExpirationPolicy) IsExpired(obj *TimestampedEntry) bool { key, _ := p.RetrieveKeyFunc(obj) return !p.NeverExpire.Has(key) } +// NewFakeExpirationStore creates a new instance for the ExpirationCache. func NewFakeExpirationStore(keyFunc KeyFunc, deletedKeys chan<- string, expirationPolicy ExpirationPolicy, cacheClock clock.Clock) Store { cacheStorage := NewThreadSafeStore(Indexers{}, Indices{}) return &ExpirationCache{ diff --git a/vendor/k8s.io/client-go/tools/cache/fake_custom_store.go b/vendor/k8s.io/client-go/tools/cache/fake_custom_store.go index 8d71c247..462d2266 100644 --- a/vendor/k8s.io/client-go/tools/cache/fake_custom_store.go +++ b/vendor/k8s.io/client-go/tools/cache/fake_custom_store.go @@ -16,7 +16,7 @@ limitations under the License. package cache -// FakeStore lets you define custom functions for store operations +// FakeCustomStore lets you define custom functions for store operations. type FakeCustomStore struct { AddFunc func(obj interface{}) error UpdateFunc func(obj interface{}) error @@ -25,7 +25,7 @@ type FakeCustomStore struct { ListKeysFunc func() []string GetFunc func(obj interface{}) (item interface{}, exists bool, err error) GetByKeyFunc func(key string) (item interface{}, exists bool, err error) - ReplaceFunc func(list []interface{}, resourceVerion string) error + ReplaceFunc func(list []interface{}, resourceVersion string) error ResyncFunc func() error } @@ -40,7 +40,7 @@ func (f *FakeCustomStore) Add(obj interface{}) error { // Update calls the custom Update function if defined func (f *FakeCustomStore) Update(obj interface{}) error { if f.UpdateFunc != nil { - return f.Update(obj) + return f.UpdateFunc(obj) } return nil } diff --git a/vendor/k8s.io/client-go/tools/cache/fifo.go b/vendor/k8s.io/client-go/tools/cache/fifo.go index e05c01ee..7a3bc3d3 100644 --- a/vendor/k8s.io/client-go/tools/cache/fifo.go +++ b/vendor/k8s.io/client-go/tools/cache/fifo.go @@ -34,7 +34,8 @@ type ErrRequeue struct { Err error } -var FIFOClosedError error = errors.New("DeltaFIFO: manipulating with closed queue") +// ErrFIFOClosed used when FIFO is closed +var ErrFIFOClosed = errors.New("DeltaFIFO: manipulating with closed queue") func (e ErrRequeue) Error() string { if e.Err == nil { @@ -66,7 +67,7 @@ type Queue interface { Close() } -// Helper function for popping from Queue. +// Pop is helper function for popping from Queue. // WARNING: Do NOT use this function in non-test code to avoid races // unless you really really really really know what you are doing. func Pop(queue Queue) interface{} { @@ -126,7 +127,7 @@ func (f *FIFO) Close() { f.cond.Broadcast() } -// Return true if an Add/Update/Delete/AddIfNotPresent are called first, +// HasSynced returns true if an Add/Update/Delete/AddIfNotPresent are called first, // or an Update called first but the first batch of items inserted by Replace() has been popped func (f *FIFO) HasSynced() bool { f.lock.Lock() @@ -242,7 +243,7 @@ func (f *FIFO) GetByKey(key string) (item interface{}, exists bool, err error) { return item, exists, nil } -// Checks if the queue is closed +// IsClosed checks if the queue is closed func (f *FIFO) IsClosed() bool { f.closedLock.Lock() defer f.closedLock.Unlock() @@ -267,7 +268,7 @@ func (f *FIFO) Pop(process PopProcessFunc) (interface{}, error) { // When Close() is called, the f.closed is set and the condition is broadcasted. // Which causes this loop to continue and return from the Pop(). if f.IsClosed() { - return nil, FIFOClosedError + return nil, ErrFIFOClosed } f.cond.Wait() @@ -297,7 +298,7 @@ func (f *FIFO) Pop(process PopProcessFunc) (interface{}, error) { // after calling this function. f's queue is reset, too; upon return, it // will contain the items in the map, in no particular order. func (f *FIFO) Replace(list []interface{}, resourceVersion string) error { - items := map[string]interface{}{} + items := make(map[string]interface{}, len(list)) for _, item := range list { key, err := f.keyFunc(item) if err != nil { diff --git a/vendor/k8s.io/client-go/tools/cache/heap.go b/vendor/k8s.io/client-go/tools/cache/heap.go index 78e49245..e503a45a 100644 --- a/vendor/k8s.io/client-go/tools/cache/heap.go +++ b/vendor/k8s.io/client-go/tools/cache/heap.go @@ -28,7 +28,9 @@ const ( closedMsg = "heap is closed" ) +// LessFunc is used to compare two objects in the heap. type LessFunc func(interface{}, interface{}) bool + type heapItem struct { obj interface{} // The object which is stored in the heap. index int // The index of the object's key in the Heap.queue. @@ -158,7 +160,7 @@ func (h *Heap) Add(obj interface{}) error { return nil } -// Adds all the items in the list to the queue and then signals the condition +// BulkAdd adds all the items in the list to the queue and then signals the condition // variable. It is useful when the caller would like to add all of the items // to the queue before consumer starts processing them. func (h *Heap) BulkAdd(list []interface{}) error { @@ -204,7 +206,7 @@ func (h *Heap) AddIfNotPresent(obj interface{}) error { return nil } -// addIfNotPresentLocked assumes the lock is already held and adds the the provided +// addIfNotPresentLocked assumes the lock is already held and adds the provided // item to the queue if it does not already exist. func (h *Heap) addIfNotPresentLocked(key string, obj interface{}) { if _, exists := h.data.items[key]; exists { @@ -249,11 +251,11 @@ func (h *Heap) Pop() (interface{}, error) { h.cond.Wait() } obj := heap.Pop(h.data) - if obj != nil { - return obj, nil - } else { + if obj == nil { return nil, fmt.Errorf("object was removed from heap data") } + + return obj, nil } // List returns a list of all the items. diff --git a/vendor/k8s.io/client-go/tools/cache/index.go b/vendor/k8s.io/client-go/tools/cache/index.go index 15acb168..bbfb3b55 100644 --- a/vendor/k8s.io/client-go/tools/cache/index.go +++ b/vendor/k8s.io/client-go/tools/cache/index.go @@ -23,17 +23,27 @@ import ( "k8s.io/apimachinery/pkg/util/sets" ) -// Indexer is a storage interface that lets you list objects using multiple indexing functions +// Indexer is a storage interface that lets you list objects using multiple indexing functions. +// There are three kinds of strings here. +// One is a storage key, as defined in the Store interface. +// Another kind is a name of an index. +// The third kind of string is an "indexed value", which is produced by an +// IndexFunc and can be a field value or any other string computed from the object. type Indexer interface { Store - // Retrieve list of objects that match on the named indexing function + // Index returns the stored objects whose set of indexed values + // intersects the set of indexed values of the given object, for + // the named index Index(indexName string, obj interface{}) ([]interface{}, error) - // IndexKeys returns the set of keys that match on the named indexing function. - IndexKeys(indexName, indexKey string) ([]string, error) - // ListIndexFuncValues returns the list of generated values of an Index func + // IndexKeys returns the storage keys of the stored objects whose + // set of indexed values for the named index includes the given + // indexed value + IndexKeys(indexName, indexedValue string) ([]string, error) + // ListIndexFuncValues returns all the indexed values of the given index ListIndexFuncValues(indexName string) []string - // ByIndex lists object that match on the named indexing function with the exact key - ByIndex(indexName, indexKey string) ([]interface{}, error) + // ByIndex returns the stored objects whose set of indexed values + // for the named index includes the given indexed value + ByIndex(indexName, indexedValue string) ([]interface{}, error) // GetIndexer return the indexers GetIndexers() Indexers @@ -42,11 +52,11 @@ type Indexer interface { AddIndexers(newIndexers Indexers) error } -// IndexFunc knows how to provide an indexed value for an object. +// IndexFunc knows how to compute the set of indexed values for an object. type IndexFunc func(obj interface{}) ([]string, error) // IndexFuncToKeyFuncAdapter adapts an indexFunc to a keyFunc. This is only useful if your index function returns -// unique values for every object. This is conversion can create errors when more than one key is found. You +// unique values for every object. This conversion can create errors when more than one key is found. You // should prefer to make proper key and index functions. func IndexFuncToKeyFuncAdapter(indexFunc IndexFunc) KeyFunc { return func(obj interface{}) (string, error) { @@ -65,6 +75,7 @@ func IndexFuncToKeyFuncAdapter(indexFunc IndexFunc) KeyFunc { } const ( + // NamespaceIndex is the lookup name for the most comment index function, which is to index by the namespace field. NamespaceIndex string = "namespace" ) diff --git a/vendor/k8s.io/client-go/tools/cache/listers.go b/vendor/k8s.io/client-go/tools/cache/listers.go index 27d51a6b..d649cd73 100644 --- a/vendor/k8s.io/client-go/tools/cache/listers.go +++ b/vendor/k8s.io/client-go/tools/cache/listers.go @@ -17,7 +17,7 @@ limitations under the License. package cache import ( - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" @@ -30,8 +30,16 @@ import ( // AppendFunc is used to add a matching item to whatever list the caller is using type AppendFunc func(interface{}) +// ListAll calls appendFn with each value retrieved from store which matches the selector. func ListAll(store Store, selector labels.Selector, appendFn AppendFunc) error { + selectAll := selector.Empty() for _, m := range store.List() { + if selectAll { + // Avoid computing labels of the objects to speed up common flows + // of listing all objects. + appendFn(m) + continue + } metadata, err := meta.Accessor(m) if err != nil { return err @@ -43,9 +51,17 @@ func ListAll(store Store, selector labels.Selector, appendFn AppendFunc) error { return nil } +// ListAllByNamespace used to list items belongs to namespace from Indexer. func ListAllByNamespace(indexer Indexer, namespace string, selector labels.Selector, appendFn AppendFunc) error { + selectAll := selector.Empty() if namespace == metav1.NamespaceAll { for _, m := range indexer.List() { + if selectAll { + // Avoid computing labels of the objects to speed up common flows + // of listing all objects. + appendFn(m) + continue + } metadata, err := meta.Accessor(m) if err != nil { return err @@ -60,7 +76,7 @@ func ListAllByNamespace(indexer Indexer, namespace string, selector labels.Selec items, err := indexer.Index(NamespaceIndex, &metav1.ObjectMeta{Namespace: namespace}) if err != nil { // Ignore error; do slow search without index. - glog.Warningf("can not retrieve list of objects using index : %v", err) + klog.Warningf("can not retrieve list of objects using index : %v", err) for _, m := range indexer.List() { metadata, err := meta.Accessor(m) if err != nil { @@ -74,6 +90,12 @@ func ListAllByNamespace(indexer Indexer, namespace string, selector labels.Selec return nil } for _, m := range items { + if selectAll { + // Avoid computing labels of the objects to speed up common flows + // of listing all objects. + appendFn(m) + continue + } metadata, err := meta.Accessor(m) if err != nil { return err @@ -104,6 +126,7 @@ type GenericNamespaceLister interface { Get(name string) (runtime.Object, error) } +// NewGenericLister creates a new instance for the genericLister. func NewGenericLister(indexer Indexer, resource schema.GroupResource) GenericLister { return &genericLister{indexer: indexer, resource: resource} } diff --git a/vendor/k8s.io/client-go/tools/cache/listwatch.go b/vendor/k8s.io/client-go/tools/cache/listwatch.go index f8679165..8227b73b 100644 --- a/vendor/k8s.io/client-go/tools/cache/listwatch.go +++ b/vendor/k8s.io/client-go/tools/cache/listwatch.go @@ -27,15 +27,25 @@ import ( "k8s.io/client-go/tools/pager" ) -// ListerWatcher is any object that knows how to perform an initial list and start a watch on a resource. -type ListerWatcher interface { +// Lister is any object that knows how to perform an initial list. +type Lister interface { // List should return a list type object; the Items field will be extracted, and the // ResourceVersion field will be used to start the watch in the right place. List(options metav1.ListOptions) (runtime.Object, error) +} + +// Watcher is any object that knows how to start a watch on a resource. +type Watcher interface { // Watch should begin a watch at the specified version. Watch(options metav1.ListOptions) (watch.Interface, error) } +// ListerWatcher is any object that knows how to perform an initial list and start a watch on a resource. +type ListerWatcher interface { + Lister + Watcher +} + // ListFunc knows how to list resources type ListFunc func(options metav1.ListOptions) (runtime.Object, error) diff --git a/vendor/k8s.io/client-go/tools/cache/mutation_cache.go b/vendor/k8s.io/client-go/tools/cache/mutation_cache.go index cbb6434e..5d3245a6 100644 --- a/vendor/k8s.io/client-go/tools/cache/mutation_cache.go +++ b/vendor/k8s.io/client-go/tools/cache/mutation_cache.go @@ -22,7 +22,7 @@ import ( "sync" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" @@ -42,6 +42,7 @@ type MutationCache interface { Mutation(interface{}) } +// ResourceVersionComparator is able to compare object versions. type ResourceVersionComparator interface { CompareResourceVersion(lhs, rhs runtime.Object) int } @@ -156,7 +157,7 @@ func (c *mutationCache) ByIndex(name string, indexKey string) ([]interface{}, er } elements, err := fn(updated) if err != nil { - glog.V(4).Infof("Unable to calculate an index entry for mutation cache entry %s: %v", key, err) + klog.V(4).Infof("Unable to calculate an index entry for mutation cache entry %s: %v", key, err) continue } for _, inIndex := range elements { diff --git a/vendor/k8s.io/client-go/tools/cache/mutation_detector.go b/vendor/k8s.io/client-go/tools/cache/mutation_detector.go index e2aa4484..fa6acab3 100644 --- a/vendor/k8s.io/client-go/tools/cache/mutation_detector.go +++ b/vendor/k8s.io/client-go/tools/cache/mutation_detector.go @@ -24,7 +24,7 @@ import ( "sync" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/diff" @@ -36,17 +36,19 @@ func init() { mutationDetectionEnabled, _ = strconv.ParseBool(os.Getenv("KUBE_CACHE_MUTATION_DETECTOR")) } -type CacheMutationDetector interface { +// MutationDetector is able to monitor if the object be modified outside. +type MutationDetector interface { AddObject(obj interface{}) Run(stopCh <-chan struct{}) } -func NewCacheMutationDetector(name string) CacheMutationDetector { +// NewCacheMutationDetector creates a new instance for the defaultCacheMutationDetector. +func NewCacheMutationDetector(name string) MutationDetector { if !mutationDetectionEnabled { return dummyMutationDetector{} } - glog.Warningln("Mutation detector is enabled, this will result in memory leakage.") - return &defaultCacheMutationDetector{name: name, period: 1 * time.Second} + klog.Warningln("Mutation detector is enabled, this will result in memory leakage.") + return &defaultCacheMutationDetector{name: name, period: 1 * time.Second, retainDuration: 2 * time.Minute} } type dummyMutationDetector struct{} @@ -66,6 +68,10 @@ type defaultCacheMutationDetector struct { lock sync.Mutex cachedObjs []cacheObj + retainDuration time.Duration + lastRotated time.Time + retainedCachedObjs []cacheObj + // failureFunc is injectable for unit testing. If you don't have it, the process will panic. // This panic is intentional, since turning on this detection indicates you want a strong // failure signal. This failure is effectively a p0 bug and you can't trust process results @@ -82,6 +88,14 @@ type cacheObj struct { func (d *defaultCacheMutationDetector) Run(stopCh <-chan struct{}) { // we DON'T want protection from panics. If we're running this code, we want to die for { + if d.lastRotated.IsZero() { + d.lastRotated = time.Now() + } else if time.Now().Sub(d.lastRotated) > d.retainDuration { + d.retainedCachedObjs = d.cachedObjs + d.cachedObjs = nil + d.lastRotated = time.Now() + } + d.CompareObjects() select { @@ -114,7 +128,13 @@ func (d *defaultCacheMutationDetector) CompareObjects() { altered := false for i, obj := range d.cachedObjs { if !reflect.DeepEqual(obj.cached, obj.copied) { - fmt.Printf("CACHE %s[%d] ALTERED!\n%v\n", d.name, i, diff.ObjectDiff(obj.cached, obj.copied)) + fmt.Printf("CACHE %s[%d] ALTERED!\n%v\n", d.name, i, diff.ObjectGoPrintSideBySide(obj.cached, obj.copied)) + altered = true + } + } + for i, obj := range d.retainedCachedObjs { + if !reflect.DeepEqual(obj.cached, obj.copied) { + fmt.Printf("CACHE %s[%d] ALTERED!\n%v\n", d.name, i, diff.ObjectGoPrintSideBySide(obj.cached, obj.copied)) altered = true } } diff --git a/vendor/k8s.io/client-go/tools/cache/reflector.go b/vendor/k8s.io/client-go/tools/cache/reflector.go index 9ee7efcb..62749ed7 100644 --- a/vendor/k8s.io/client-go/tools/cache/reflector.go +++ b/vendor/k8s.io/client-go/tools/cache/reflector.go @@ -17,41 +17,48 @@ limitations under the License. package cache import ( + "context" "errors" "fmt" "io" "math/rand" - "net" - "net/url" "reflect" - "strconv" - "strings" "sync" - "sync/atomic" - "syscall" "time" - "github.com/golang/glog" apierrs "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/clock" "k8s.io/apimachinery/pkg/util/naming" + utilnet "k8s.io/apimachinery/pkg/util/net" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/tools/pager" + "k8s.io/klog" + "k8s.io/utils/trace" ) +const defaultExpectedTypeName = "" + // Reflector watches a specified resource and causes all changes to be reflected in the given store. type Reflector struct { // name identifies this reflector. By default it will be a file:line if possible. name string - // metrics tracks basic metric information about the reflector - metrics *reflectorMetrics + // The name of the type we expect to place in the store. The name + // will be the stringification of expectedGVK if provided, and the + // stringification of expectedType otherwise. It is for display + // only, and should not be used for parsing or comparison. + expectedTypeName string // The type of object we expect to place in the store. expectedType reflect.Type + // The GVK of the object we expect to place in the store if unstructured. + expectedGVK *schema.GroupVersionKind // The destination to sync up with the watch source store Store // listerWatcher is used to perform lists and watches. @@ -69,6 +76,9 @@ type Reflector struct { lastSyncResourceVersion string // lastSyncResourceVersionMutex guards read/write access to lastSyncResourceVersion lastSyncResourceVersionMutex sync.RWMutex + // WatchListPageSize is the requested chunk size of initial and resync watch lists. + // Defaults to pager.PageSize. + WatchListPageSize int64 } var ( @@ -80,7 +90,7 @@ var ( // NewNamespaceKeyedIndexerAndReflector creates an Indexer and a Reflector // The indexer is configured to key on namespace func NewNamespaceKeyedIndexerAndReflector(lw ListerWatcher, expectedType interface{}, resyncPeriod time.Duration) (indexer Indexer, reflector *Reflector) { - indexer = NewIndexer(MetaNamespaceKeyFunc, Indexers{"namespace": MetaNamespaceIndexFunc}) + indexer = NewIndexer(MetaNamespaceKeyFunc, Indexers{NamespaceIndex: MetaNamespaceIndexFunc}) reflector = NewReflector(lw, expectedType, indexer, resyncPeriod) return indexer, reflector } @@ -95,30 +105,39 @@ func NewReflector(lw ListerWatcher, expectedType interface{}, store Store, resyn return NewNamedReflector(naming.GetNameFromCallsite(internalPackages...), lw, expectedType, store, resyncPeriod) } -// reflectorDisambiguator is used to disambiguate started reflectors. -// initialized to an unstable value to ensure meaning isn't attributed to the suffix. -var reflectorDisambiguator = int64(time.Now().UnixNano() % 12345) - // NewNamedReflector same as NewReflector, but with a specified name for logging func NewNamedReflector(name string, lw ListerWatcher, expectedType interface{}, store Store, resyncPeriod time.Duration) *Reflector { - reflectorSuffix := atomic.AddInt64(&reflectorDisambiguator, 1) r := &Reflector{ - name: name, - // we need this to be unique per process (some names are still the same) but obvious who it belongs to - metrics: newReflectorMetrics(makeValidPrometheusMetricLabel(fmt.Sprintf("reflector_"+name+"_%d", reflectorSuffix))), + name: name, listerWatcher: lw, store: store, - expectedType: reflect.TypeOf(expectedType), period: time.Second, resyncPeriod: resyncPeriod, clock: &clock.RealClock{}, } + r.setExpectedType(expectedType) return r } -func makeValidPrometheusMetricLabel(in string) string { - // this isn't perfect, but it removes our common characters - return strings.NewReplacer("/", "_", ".", "_", "-", "_", ":", "_").Replace(in) +func (r *Reflector) setExpectedType(expectedType interface{}) { + r.expectedType = reflect.TypeOf(expectedType) + if r.expectedType == nil { + r.expectedTypeName = defaultExpectedTypeName + return + } + + r.expectedTypeName = r.expectedType.String() + + if obj, ok := expectedType.(*unstructured.Unstructured); ok { + // Use gvk to check that watch event objects are of the desired type. + gvk := obj.GroupVersionKind() + if gvk.Empty() { + klog.V(4).Infof("Reflector from %s configured with expectedType of *unstructured.Unstructured with empty GroupVersionKind.", r.name) + return + } + r.expectedGVK = &gvk + r.expectedTypeName = gvk.String() + } } // internalPackages are packages that ignored when creating a default reflector name. These packages are in the common @@ -128,7 +147,7 @@ var internalPackages = []string{"client-go/tools/cache/"} // Run starts a watch and handles watch events. Will restart the watch if it is closed. // Run will exit when stopCh is closed. func (r *Reflector) Run(stopCh <-chan struct{}) { - glog.V(3).Infof("Starting reflector %v (%s) from %s", r.expectedType, r.resyncPeriod, r.name) + klog.V(3).Infof("Starting reflector %v (%s) from %s", r.expectedTypeName, r.resyncPeriod, r.name) wait.Until(func() { if err := r.ListAndWatch(stopCh); err != nil { utilruntime.HandleError(err) @@ -140,9 +159,6 @@ var ( // nothing will ever be sent down this channel neverExitWatch <-chan time.Time = make(chan time.Time) - // Used to indicate that watching stopped so that a resync could happen. - errorResyncRequested = errors.New("resync channel fired") - // Used to indicate that watching stopped because of a signal from the stop // channel passed in from a client of the reflector. errorStopRequested = errors.New("Stop requested") @@ -166,34 +182,71 @@ func (r *Reflector) resyncChan() (<-chan time.Time, func() bool) { // and then use the resource version to watch. // It returns error if ListAndWatch didn't even try to initialize watch. func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { - glog.V(3).Infof("Listing and watching %v from %s", r.expectedType, r.name) + klog.V(3).Infof("Listing and watching %v from %s", r.expectedTypeName, r.name) var resourceVersion string // Explicitly set "0" as resource version - it's fine for the List() // to be served from cache and potentially be delayed relative to // etcd contents. Reflector framework will catch up via Watch() eventually. options := metav1.ListOptions{ResourceVersion: "0"} - r.metrics.numberOfLists.Inc() - start := r.clock.Now() - list, err := r.listerWatcher.List(options) - if err != nil { - return fmt.Errorf("%s: Failed to list %v: %v", r.name, r.expectedType, err) - } - r.metrics.listDuration.Observe(time.Since(start).Seconds()) - listMetaInterface, err := meta.ListAccessor(list) - if err != nil { - return fmt.Errorf("%s: Unable to understand list result %#v: %v", r.name, list, err) - } - resourceVersion = listMetaInterface.GetResourceVersion() - items, err := meta.ExtractList(list) - if err != nil { - return fmt.Errorf("%s: Unable to understand list result %#v (%v)", r.name, list, err) - } - r.metrics.numberOfItemsInList.Observe(float64(len(items))) - if err := r.syncWith(items, resourceVersion); err != nil { - return fmt.Errorf("%s: Unable to sync list result: %v", r.name, err) + + if err := func() error { + initTrace := trace.New("Reflector ListAndWatch", trace.Field{"name", r.name}) + defer initTrace.LogIfLong(10 * time.Second) + var list runtime.Object + var err error + listCh := make(chan struct{}, 1) + panicCh := make(chan interface{}, 1) + go func() { + defer func() { + if r := recover(); r != nil { + panicCh <- r + } + }() + // Attempt to gather list in chunks, if supported by listerWatcher, if not, the first + // list request will return the full response. + pager := pager.New(pager.SimplePageFunc(func(opts metav1.ListOptions) (runtime.Object, error) { + return r.listerWatcher.List(opts) + })) + if r.WatchListPageSize != 0 { + pager.PageSize = r.WatchListPageSize + } + // Pager falls back to full list if paginated list calls fail due to an "Expired" error. + list, err = pager.List(context.Background(), options) + close(listCh) + }() + select { + case <-stopCh: + return nil + case r := <-panicCh: + panic(r) + case <-listCh: + } + if err != nil { + return fmt.Errorf("%s: Failed to list %v: %v", r.name, r.expectedTypeName, err) + } + initTrace.Step("Objects listed") + listMetaInterface, err := meta.ListAccessor(list) + if err != nil { + return fmt.Errorf("%s: Unable to understand list result %#v: %v", r.name, list, err) + } + resourceVersion = listMetaInterface.GetResourceVersion() + initTrace.Step("Resource version extracted") + items, err := meta.ExtractList(list) + if err != nil { + return fmt.Errorf("%s: Unable to understand list result %#v (%v)", r.name, list, err) + } + initTrace.Step("Objects extracted") + if err := r.syncWith(items, resourceVersion); err != nil { + return fmt.Errorf("%s: Unable to sync list result: %v", r.name, err) + } + initTrace.Step("SyncWith done") + r.setLastSyncResourceVersion(resourceVersion) + initTrace.Step("Resource version updated") + return nil + }(); err != nil { + return err } - r.setLastSyncResourceVersion(resourceVersion) resyncerrc := make(chan error, 1) cancelCh := make(chan struct{}) @@ -212,7 +265,7 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { return } if r.ShouldResync == nil || r.ShouldResync() { - glog.V(4).Infof("%s: forcing resync", r.name) + klog.V(4).Infof("%s: forcing resync", r.name) if err := r.store.Resync(); err != nil { resyncerrc <- err return @@ -237,37 +290,41 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { // We want to avoid situations of hanging watchers. Stop any wachers that do not // receive any events within the timeout window. TimeoutSeconds: &timeoutSeconds, + // To reduce load on kube-apiserver on watch restarts, you may enable watch bookmarks. + // Reflector doesn't assume bookmarks are returned at all (if the server do not support + // watch bookmarks, it will ignore this field). + AllowWatchBookmarks: true, } - r.metrics.numberOfWatches.Inc() w, err := r.listerWatcher.Watch(options) if err != nil { switch err { case io.EOF: // watch closed normally case io.ErrUnexpectedEOF: - glog.V(1).Infof("%s: Watch for %v closed with unexpected EOF: %v", r.name, r.expectedType, err) + klog.V(1).Infof("%s: Watch for %v closed with unexpected EOF: %v", r.name, r.expectedTypeName, err) default: - utilruntime.HandleError(fmt.Errorf("%s: Failed to watch %v: %v", r.name, r.expectedType, err)) + utilruntime.HandleError(fmt.Errorf("%s: Failed to watch %v: %v", r.name, r.expectedTypeName, err)) } // If this is "connection refused" error, it means that most likely apiserver is not responsive. // It doesn't make sense to re-list all objects because most likely we will be able to restart // watch where we ended. // If that's the case wait and resend watch request. - if urlError, ok := err.(*url.Error); ok { - if opError, ok := urlError.Err.(*net.OpError); ok { - if errno, ok := opError.Err.(syscall.Errno); ok && errno == syscall.ECONNREFUSED { - time.Sleep(time.Second) - continue - } - } + if utilnet.IsConnectionRefused(err) { + time.Sleep(time.Second) + continue } return nil } if err := r.watchHandler(w, &resourceVersion, resyncerrc, stopCh); err != nil { if err != errorStopRequested { - glog.Warningf("%s: watch of %v ended with: %v", r.name, r.expectedType, err) + switch { + case apierrs.IsResourceExpired(err): + klog.V(4).Infof("%s: watch of %v ended with: %v", r.name, r.expectedTypeName, err) + default: + klog.Warningf("%s: watch of %v ended with: %v", r.name, r.expectedTypeName, err) + } } return nil } @@ -291,11 +348,6 @@ func (r *Reflector) watchHandler(w watch.Interface, resourceVersion *string, err // Stopping the watcher should be idempotent and if we return from this function there's no way // we're coming back in with the same watch interface. defer w.Stop() - // update metrics - defer func() { - r.metrics.numberOfItemsInWatch.Observe(float64(eventCount)) - r.metrics.watchDuration.Observe(time.Since(start).Seconds()) - }() loop: for { @@ -311,9 +363,17 @@ loop: if event.Type == watch.Error { return apierrs.FromObject(event.Object) } - if e, a := r.expectedType, reflect.TypeOf(event.Object); e != nil && e != a { - utilruntime.HandleError(fmt.Errorf("%s: expected type %v, but watch event object had type %v", r.name, e, a)) - continue + if r.expectedType != nil { + if e, a := r.expectedType, reflect.TypeOf(event.Object); e != a { + utilruntime.HandleError(fmt.Errorf("%s: expected type %v, but watch event object had type %v", r.name, e, a)) + continue + } + } + if r.expectedGVK != nil { + if e, a := *r.expectedGVK, event.Object.GetObjectKind().GroupVersionKind(); e != a { + utilruntime.HandleError(fmt.Errorf("%s: expected gvk %v, but watch event object had gvk %v", r.name, e, a)) + continue + } } meta, err := meta.Accessor(event.Object) if err != nil { @@ -340,6 +400,8 @@ loop: if err != nil { utilruntime.HandleError(fmt.Errorf("%s: unable to delete watch event object (%#v) from store: %v", r.name, event.Object, err)) } + case watch.Bookmark: + // A `Bookmark` means watch has synced here, just update the resourceVersion default: utilruntime.HandleError(fmt.Errorf("%s: unable to understand watch event %#v", r.name, event)) } @@ -349,12 +411,11 @@ loop: } } - watchDuration := r.clock.Now().Sub(start) + watchDuration := r.clock.Since(start) if watchDuration < 1*time.Second && eventCount == 0 { - r.metrics.numberOfShortWatches.Inc() return fmt.Errorf("very short watch: %s: Unexpected watch close - watch lasted less than a second and no items received", r.name) } - glog.V(4).Infof("%s: Watch close - %v total %v items received", r.name, r.expectedType, eventCount) + klog.V(4).Infof("%s: Watch close - %v total %v items received", r.name, r.expectedTypeName, eventCount) return nil } @@ -370,9 +431,4 @@ func (r *Reflector) setLastSyncResourceVersion(v string) { r.lastSyncResourceVersionMutex.Lock() defer r.lastSyncResourceVersionMutex.Unlock() r.lastSyncResourceVersion = v - - rv, err := strconv.Atoi(v) - if err == nil { - r.metrics.lastResourceVersion.Set(float64(rv)) - } } diff --git a/vendor/k8s.io/client-go/tools/cache/reflector_metrics.go b/vendor/k8s.io/client-go/tools/cache/reflector_metrics.go index 0945e5c3..5c00115f 100644 --- a/vendor/k8s.io/client-go/tools/cache/reflector_metrics.go +++ b/vendor/k8s.io/client-go/tools/cache/reflector_metrics.go @@ -47,19 +47,6 @@ func (noopMetric) Dec() {} func (noopMetric) Observe(float64) {} func (noopMetric) Set(float64) {} -type reflectorMetrics struct { - numberOfLists CounterMetric - listDuration SummaryMetric - numberOfItemsInList SummaryMetric - - numberOfWatches CounterMetric - numberOfShortWatches CounterMetric - watchDuration SummaryMetric - numberOfItemsInWatch SummaryMetric - - lastResourceVersion GaugeMetric -} - // MetricsProvider generates various metrics used by the reflector. type MetricsProvider interface { NewListsMetric(name string) CounterMetric @@ -94,23 +81,6 @@ var metricsFactory = struct { metricsProvider: noopMetricsProvider{}, } -func newReflectorMetrics(name string) *reflectorMetrics { - var ret *reflectorMetrics - if len(name) == 0 { - return ret - } - return &reflectorMetrics{ - numberOfLists: metricsFactory.metricsProvider.NewListsMetric(name), - listDuration: metricsFactory.metricsProvider.NewListDurationMetric(name), - numberOfItemsInList: metricsFactory.metricsProvider.NewItemsInListMetric(name), - numberOfWatches: metricsFactory.metricsProvider.NewWatchesMetric(name), - numberOfShortWatches: metricsFactory.metricsProvider.NewShortWatchesMetric(name), - watchDuration: metricsFactory.metricsProvider.NewWatchDurationMetric(name), - numberOfItemsInWatch: metricsFactory.metricsProvider.NewItemsInWatchMetric(name), - lastResourceVersion: metricsFactory.metricsProvider.NewLastResourceVersionMetric(name), - } -} - // SetReflectorMetricsProvider sets the metrics provider func SetReflectorMetricsProvider(metricsProvider MetricsProvider) { metricsFactory.setProviders.Do(func() { diff --git a/vendor/k8s.io/client-go/tools/cache/shared_informer.go b/vendor/k8s.io/client-go/tools/cache/shared_informer.go index 5f8c507f..f59a0852 100644 --- a/vendor/k8s.io/client-go/tools/cache/shared_informer.go +++ b/vendor/k8s.io/client-go/tools/cache/shared_informer.go @@ -25,37 +25,124 @@ import ( "k8s.io/apimachinery/pkg/util/clock" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/util/buffer" "k8s.io/client-go/util/retry" + "k8s.io/utils/buffer" - "github.com/golang/glog" + "k8s.io/klog" ) -// SharedInformer has a shared data cache and is capable of distributing notifications for changes -// to the cache to multiple listeners who registered via AddEventHandler. If you use this, there is -// one behavior change compared to a standard Informer. When you receive a notification, the cache -// will be AT LEAST as fresh as the notification, but it MAY be more fresh. You should NOT depend -// on the contents of the cache exactly matching the notification you've received in handler -// functions. If there was a create, followed by a delete, the cache may NOT have your item. This -// has advantages over the broadcaster since it allows us to share a common cache across many -// controllers. Extending the broadcaster would have required us keep duplicate caches for each -// watch. +// SharedInformer provides eventually consistent linkage of its +// clients to the authoritative state of a given collection of +// objects. An object is identified by its API group, kind/resource, +// namespace, and name; the `ObjectMeta.UID` is not part of an +// object's ID as far as this contract is concerned. One +// SharedInformer provides linkage to objects of a particular API +// group and kind/resource. The linked object collection of a +// SharedInformer may be further restricted to one namespace and/or by +// label selector and/or field selector. +// +// The authoritative state of an object is what apiservers provide +// access to, and an object goes through a strict sequence of states. +// An object state is either "absent" or present with a +// ResourceVersion and other appropriate content. +// +// A SharedInformer gets object states from apiservers using a +// sequence of LIST and WATCH operations. Through this sequence the +// apiservers provide a sequence of "collection states" to the +// informer, where each collection state defines the state of every +// object of the collection. No promise --- beyond what is implied by +// other remarks here --- is made about how one informer's sequence of +// collection states relates to a different informer's sequence of +// collection states. +// +// A SharedInformer maintains a local cache, exposed by GetStore() and +// by GetIndexer() in the case of an indexed informer, of the state of +// each relevant object. This cache is eventually consistent with the +// authoritative state. This means that, unless prevented by +// persistent communication problems, if ever a particular object ID X +// is authoritatively associated with a state S then for every +// SharedInformer I whose collection includes (X, S) eventually either +// (1) I's cache associates X with S or a later state of X, (2) I is +// stopped, or (3) the authoritative state service for X terminates. +// To be formally complete, we say that the absent state meets any +// restriction by label selector or field selector. +// +// The local cache starts out empty, and gets populated and updated +// during `Run()`. +// +// As a simple example, if a collection of objects is henceforeth +// unchanging, a SharedInformer is created that links to that +// collection, and that SharedInformer is `Run()` then that +// SharedInformer's cache eventually holds an exact copy of that +// collection (unless it is stopped too soon, the authoritative state +// service ends, or communication problems between the two +// persistently thwart achievement). +// +// As another simple example, if the local cache ever holds a +// non-absent state for some object ID and the object is eventually +// removed from the authoritative state then eventually the object is +// removed from the local cache (unless the SharedInformer is stopped +// too soon, the authoritative state service ends, or communication +// problems persistently thwart the desired result). +// +// The keys in the Store are of the form namespace/name for namespaced +// objects, and are simply the name for non-namespaced objects. +// Clients can use `MetaNamespaceKeyFunc(obj)` to extract the key for +// a given object, and `SplitMetaNamespaceKey(key)` to split a key +// into its constituent parts. +// +// A client is identified here by a ResourceEventHandler. For every +// update to the SharedInformer's local cache and for every client +// added before `Run()`, eventually either the SharedInformer is +// stopped or the client is notified of the update. A client added +// after `Run()` starts gets a startup batch of notifications of +// additions of the object existing in the cache at the time that +// client was added; also, for every update to the SharedInformer's +// local cache after that client was added, eventually either the +// SharedInformer is stopped or that client is notified of that +// update. Client notifications happen after the corresponding cache +// update and, in the case of a SharedIndexInformer, after the +// corresponding index updates. It is possible that additional cache +// and index updates happen before such a prescribed notification. +// For a given SharedInformer and client, the notifications are +// delivered sequentially. For a given SharedInformer, client, and +// object ID, the notifications are delivered in order. +// +// A client must process each notification promptly; a SharedInformer +// is not engineered to deal well with a large backlog of +// notifications to deliver. Lengthy processing should be passed off +// to something else, for example through a +// `client-go/util/workqueue`. +// +// Each query to an informer's local cache --- whether a single-object +// lookup, a list operation, or a use of one of its indices --- is +// answered entirely from one of the collection states received by +// that informer. +// +// A delete notification exposes the last locally known non-absent +// state, except that its ResourceVersion is replaced with a +// ResourceVersion in which the object is actually absent. type SharedInformer interface { // AddEventHandler adds an event handler to the shared informer using the shared informer's resync // period. Events to a single handler are delivered sequentially, but there is no coordination // between different handlers. AddEventHandler(handler ResourceEventHandler) - // AddEventHandlerWithResyncPeriod adds an event handler to the shared informer using the - // specified resync period. Events to a single handler are delivered sequentially, but there is - // no coordination between different handlers. + // AddEventHandlerWithResyncPeriod adds an event handler to the + // shared informer using the specified resync period. The resync + // operation consists of delivering to the handler a create + // notification for every object in the informer's local cache; it + // does not add any interactions with the authoritative storage. AddEventHandlerWithResyncPeriod(handler ResourceEventHandler, resyncPeriod time.Duration) - // GetStore returns the Store. + // GetStore returns the informer's local cache as a Store. GetStore() Store // GetController gives back a synthetic interface that "votes" to start the informer GetController() Controller - // Run starts the shared informer, which will be stopped when stopCh is closed. + // Run starts and runs the shared informer, returning after it stops. + // The informer will be stopped when stopCh is closed. Run(stopCh <-chan struct{}) - // HasSynced returns true if the shared informer's store has synced. + // HasSynced returns true if the shared informer's store has been + // informed by at least one full LIST of the authoritative state + // of the informer's object collection. This is unrelated to "resync". HasSynced() bool // LastSyncResourceVersion is the resource version observed when last synced with the underlying // store. The value returned is not synchronized with access to the underlying store and is not @@ -63,6 +150,7 @@ type SharedInformer interface { LastSyncResourceVersion() string } +// SharedIndexInformer provides add and get Indexers ability based on SharedInformer. type SharedIndexInformer interface { SharedInformer // AddIndexers add indexers to the informer before it starts. @@ -86,7 +174,7 @@ func NewSharedIndexInformer(lw ListerWatcher, objType runtime.Object, defaultEve resyncCheckPeriod: defaultEventHandlerResyncPeriod, defaultEventHandlerResyncPeriod: defaultEventHandlerResyncPeriod, cacheMutationDetector: NewCacheMutationDetector(fmt.Sprintf("%T", objType)), - clock: realClock, + clock: realClock, } return sharedIndexInformer } @@ -102,10 +190,26 @@ const ( initialBufferSize = 1024 ) +// WaitForNamedCacheSync is a wrapper around WaitForCacheSync that generates log messages +// indicating that the caller identified by name is waiting for syncs, followed by +// either a successful or failed sync. +func WaitForNamedCacheSync(controllerName string, stopCh <-chan struct{}, cacheSyncs ...InformerSynced) bool { + klog.Infof("Waiting for caches to sync for %s", controllerName) + + if !WaitForCacheSync(stopCh, cacheSyncs...) { + utilruntime.HandleError(fmt.Errorf("unable to sync caches for %s", controllerName)) + return false + } + + klog.Infof("Caches are synced for %s ", controllerName) + return true +} + // WaitForCacheSync waits for caches to populate. It returns true if it was successful, false // if the controller should shutdown +// callers should prefer WaitForNamedCacheSync() func WaitForCacheSync(stopCh <-chan struct{}, cacheSyncs ...InformerSynced) bool { - err := wait.PollUntil(syncedPollPeriod, + err := wait.PollImmediateUntil(syncedPollPeriod, func() (bool, error) { for _, syncFunc := range cacheSyncs { if !syncFunc() { @@ -116,11 +220,11 @@ func WaitForCacheSync(stopCh <-chan struct{}, cacheSyncs ...InformerSynced) bool }, stopCh) if err != nil { - glog.V(2).Infof("stop requested") + klog.V(2).Infof("stop requested") return false } - glog.V(4).Infof("caches populated") + klog.V(4).Infof("caches populated") return true } @@ -129,7 +233,7 @@ type sharedIndexInformer struct { controller Controller processor *sharedProcessor - cacheMutationDetector CacheMutationDetector + cacheMutationDetector MutationDetector // This block is tracked to handle late initialization of the controller listerWatcher ListerWatcher @@ -169,7 +273,7 @@ func (v *dummyController) HasSynced() bool { return v.informer.HasSynced() } -func (c *dummyController) LastSyncResourceVersion() string { +func (v *dummyController) LastSyncResourceVersion() string { return "" } @@ -279,11 +383,11 @@ func determineResyncPeriod(desired, check time.Duration) time.Duration { return desired } if check == 0 { - glog.Warningf("The specified resyncPeriod %v is invalid because this shared informer doesn't support resyncing", desired) + klog.Warningf("The specified resyncPeriod %v is invalid because this shared informer doesn't support resyncing", desired) return 0 } if desired < check { - glog.Warningf("The specified resyncPeriod %v is being increased to the minimum resyncCheckPeriod %v", desired, check) + klog.Warningf("The specified resyncPeriod %v is being increased to the minimum resyncCheckPeriod %v", desired, check) return check } return desired @@ -296,19 +400,19 @@ func (s *sharedIndexInformer) AddEventHandlerWithResyncPeriod(handler ResourceEv defer s.startedLock.Unlock() if s.stopped { - glog.V(2).Infof("Handler %v was not added to shared informer because it has stopped already", handler) + klog.V(2).Infof("Handler %v was not added to shared informer because it has stopped already", handler) return } if resyncPeriod > 0 { if resyncPeriod < minimumResyncPeriod { - glog.Warningf("resyncPeriod %d is too small. Changing it to the minimum allowed value of %d", resyncPeriod, minimumResyncPeriod) + klog.Warningf("resyncPeriod %d is too small. Changing it to the minimum allowed value of %d", resyncPeriod, minimumResyncPeriod) resyncPeriod = minimumResyncPeriod } if resyncPeriod < s.resyncCheckPeriod { if s.started { - glog.Warningf("resyncPeriod %d is smaller than resyncCheckPeriod %d and the informer has already started. Changing it to %d", resyncPeriod, s.resyncCheckPeriod, s.resyncCheckPeriod) + klog.Warningf("resyncPeriod %d is smaller than resyncCheckPeriod %d and the informer has already started. Changing it to %d", resyncPeriod, s.resyncCheckPeriod, s.resyncCheckPeriod) resyncPeriod = s.resyncCheckPeriod } else { // if the event handler's resyncPeriod is smaller than the current resyncCheckPeriod, update @@ -555,7 +659,7 @@ func (p *processorListener) run() { case deleteNotification: p.handler.OnDelete(notification.oldObj) default: - utilruntime.HandleError(fmt.Errorf("unrecognized notification: %#v", next)) + utilruntime.HandleError(fmt.Errorf("unrecognized notification: %T", next)) } } // the only way to get here is if the p.nextCh is empty and closed diff --git a/vendor/k8s.io/client-go/tools/cache/store.go b/vendor/k8s.io/client-go/tools/cache/store.go index 4958987f..fc844efe 100644 --- a/vendor/k8s.io/client-go/tools/cache/store.go +++ b/vendor/k8s.io/client-go/tools/cache/store.go @@ -210,7 +210,7 @@ func (c *cache) GetByKey(key string) (item interface{}, exists bool, err error) // 'c' takes ownership of the list, you should not reference the list again // after calling this function. func (c *cache) Replace(list []interface{}, resourceVersion string) error { - items := map[string]interface{}{} + items := make(map[string]interface{}, len(list)) for _, item := range list { key, err := c.keyFunc(item) if err != nil { diff --git a/vendor/k8s.io/client-go/tools/cache/thread_safe_store.go b/vendor/k8s.io/client-go/tools/cache/thread_safe_store.go index 1c201efb..e7232514 100644 --- a/vendor/k8s.io/client-go/tools/cache/thread_safe_store.go +++ b/vendor/k8s.io/client-go/tools/cache/thread_safe_store.go @@ -148,12 +148,19 @@ func (c *threadSafeMap) Index(indexName string, obj interface{}) ([]interface{}, } index := c.indices[indexName] - // need to de-dupe the return list. Since multiple keys are allowed, this can happen. - returnKeySet := sets.String{} - for _, indexKey := range indexKeys { - set := index[indexKey] - for _, key := range set.UnsortedList() { - returnKeySet.Insert(key) + var returnKeySet sets.String + if len(indexKeys) == 1 { + // In majority of cases, there is exactly one value matching. + // Optimize the most common path - deduping is not needed here. + returnKeySet = index[indexKeys[0]] + } else { + // Need to de-dupe the return list. + // Since multiple keys are allowed, this can happen. + returnKeySet = sets.String{} + for _, indexKey := range indexKeys { + for key := range index[indexKey] { + returnKeySet.Insert(key) + } } } @@ -178,7 +185,7 @@ func (c *threadSafeMap) ByIndex(indexName, indexKey string) ([]interface{}, erro set := index[indexKey] list := make([]interface{}, 0, set.Len()) - for _, key := range set.List() { + for key := range set { list = append(list, c.items[key]) } @@ -285,6 +292,13 @@ func (c *threadSafeMap) deleteFromIndices(obj interface{}, key string) { set := index[indexValue] if set != nil { set.Delete(key) + + // If we don't delete the set when zero, indices with high cardinality + // short lived resources can cause memory to increase over time from + // unused empty sets. See `kubernetes/kubernetes/issues/84959`. + if len(set) == 0 { + delete(index, indexValue) + } } } } @@ -295,6 +309,7 @@ func (c *threadSafeMap) Resync() error { return nil } +// NewThreadSafeStore creates a new instance of ThreadSafeStore. func NewThreadSafeStore(indexers Indexers, indices Indices) ThreadSafeStore { return &threadSafeMap{ items: map[string]interface{}{}, diff --git a/vendor/k8s.io/client-go/tools/cache/undelta_store.go b/vendor/k8s.io/client-go/tools/cache/undelta_store.go index 117df46c..220845dd 100644 --- a/vendor/k8s.io/client-go/tools/cache/undelta_store.go +++ b/vendor/k8s.io/client-go/tools/cache/undelta_store.go @@ -31,6 +31,7 @@ type UndeltaStore struct { // Assert that it implements the Store interface. var _ Store = &UndeltaStore{} +// Add inserts an object into the store and sends complete state by calling PushFunc. // Note about thread safety. The Store implementation (cache.cache) uses a lock for all methods. // In the functions below, the lock gets released and reacquired betweend the {Add,Delete,etc} // and the List. So, the following can happen, resulting in two identical calls to PushFunc. @@ -41,7 +42,6 @@ var _ Store = &UndeltaStore{} // 3 Store.Add(b) // 4 Store.List() -> [a,b] // 5 Store.List() -> [a,b] - func (u *UndeltaStore) Add(obj interface{}) error { if err := u.Store.Add(obj); err != nil { return err @@ -50,6 +50,7 @@ func (u *UndeltaStore) Add(obj interface{}) error { return nil } +// Update sets an item in the cache to its updated state and sends complete state by calling PushFunc. func (u *UndeltaStore) Update(obj interface{}) error { if err := u.Store.Update(obj); err != nil { return err @@ -58,6 +59,7 @@ func (u *UndeltaStore) Update(obj interface{}) error { return nil } +// Delete removes an item from the cache and sends complete state by calling PushFunc. func (u *UndeltaStore) Delete(obj interface{}) error { if err := u.Store.Delete(obj); err != nil { return err @@ -66,6 +68,10 @@ func (u *UndeltaStore) Delete(obj interface{}) error { return nil } +// Replace will delete the contents of current store, using instead the given list. +// 'u' takes ownership of the list, you should not reference the list again +// after calling this function. +// The new contents complete state will be sent by calling PushFunc after replacement. func (u *UndeltaStore) Replace(list []interface{}, resourceVersion string) error { if err := u.Store.Replace(list, resourceVersion); err != nil { return err diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/doc.go b/vendor/k8s.io/client-go/tools/clientcmd/api/doc.go index 0a081871..5871575a 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/api/doc.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/doc.go @@ -15,4 +15,5 @@ limitations under the License. */ // +k8s:deepcopy-gen=package + package api diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/types.go b/vendor/k8s.io/client-go/tools/clientcmd/api/types.go index 1391df70..1f1209f8 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/api/types.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/types.go @@ -17,6 +17,8 @@ limitations under the License. package api import ( + "fmt" + "k8s.io/apimachinery/pkg/runtime" ) @@ -29,10 +31,12 @@ import ( type Config struct { // Legacy field from pkg/api/types.go TypeMeta. // TODO(jlowdermilk): remove this after eliminating downstream dependencies. + // +k8s:conversion-gen=false // +optional Kind string `json:"kind,omitempty"` // Legacy field from pkg/api/types.go TypeMeta. // TODO(jlowdermilk): remove this after eliminating downstream dependencies. + // +k8s:conversion-gen=false // +optional APIVersion string `json:"apiVersion,omitempty"` // Preferences holds general information to be use for cli interactions @@ -62,6 +66,7 @@ type Preferences struct { // Cluster contains information about how to communicate with a kubernetes cluster type Cluster struct { // LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized. + // +k8s:conversion-gen=false LocationOfOrigin string // Server is the address of the kubernetes cluster (https://hostname:port). Server string `json:"server"` @@ -82,6 +87,7 @@ type Cluster struct { // AuthInfo contains information that describes identity information. This is use to tell the kubernetes cluster who you are. type AuthInfo struct { // LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized. + // +k8s:conversion-gen=false LocationOfOrigin string // ClientCertificate is the path to a client cert file for TLS. // +optional @@ -130,6 +136,7 @@ type AuthInfo struct { // Context is a tuple of references to a cluster (how do I communicate with a kubernetes cluster), a user (how do I identify myself), and a namespace (what subset of resources do I want to work with) type Context struct { // LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized. + // +k8s:conversion-gen=false LocationOfOrigin string // Cluster is the name of the cluster for this context Cluster string `json:"cluster"` @@ -150,6 +157,25 @@ type AuthProviderConfig struct { Config map[string]string `json:"config,omitempty"` } +var _ fmt.Stringer = new(AuthProviderConfig) +var _ fmt.GoStringer = new(AuthProviderConfig) + +// GoString implements fmt.GoStringer and sanitizes sensitive fields of +// AuthProviderConfig to prevent accidental leaking via logs. +func (c AuthProviderConfig) GoString() string { + return c.String() +} + +// String implements fmt.Stringer and sanitizes sensitive fields of +// AuthProviderConfig to prevent accidental leaking via logs. +func (c AuthProviderConfig) String() string { + cfg := "" + if c.Config != nil { + cfg = "--- REDACTED ---" + } + return fmt.Sprintf("api.AuthProviderConfig{Name: %q, Config: map[string]string{%s}}", c.Name, cfg) +} + // ExecConfig specifies a command to provide client credentials. The command is exec'd // and outputs structured stdout holding credentials. // @@ -172,6 +198,29 @@ type ExecConfig struct { APIVersion string `json:"apiVersion,omitempty"` } +var _ fmt.Stringer = new(ExecConfig) +var _ fmt.GoStringer = new(ExecConfig) + +// GoString implements fmt.GoStringer and sanitizes sensitive fields of +// ExecConfig to prevent accidental leaking via logs. +func (c ExecConfig) GoString() string { + return c.String() +} + +// String implements fmt.Stringer and sanitizes sensitive fields of ExecConfig +// to prevent accidental leaking via logs. +func (c ExecConfig) String() string { + var args []string + if len(c.Args) > 0 { + args = []string{"--- REDACTED ---"} + } + env := "[]ExecEnvVar(nil)" + if len(c.Env) > 0 { + env = "[]ExecEnvVar{--- REDACTED ---}" + } + return fmt.Sprintf("api.AuthProviderConfig{Command: %q, Args: %#v, Env: %s, APIVersion: %q}", c.Command, args, env, c.APIVersion) +} + // ExecEnvVar is used for setting environment variables when executing an exec-based // credential plugin. type ExecEnvVar struct { diff --git a/vendor/k8s.io/client-go/tools/leaderelection/OWNERS b/vendor/k8s.io/client-go/tools/leaderelection/OWNERS index d9516f8a..44d93f84 100644 --- a/vendor/k8s.io/client-go/tools/leaderelection/OWNERS +++ b/vendor/k8s.io/client-go/tools/leaderelection/OWNERS @@ -1,3 +1,5 @@ +# See the OWNERS docs at https://go.k8s.io/owners + approvers: - mikedanese - timothysc @@ -10,4 +12,3 @@ reviewers: - timothysc - ingvagabund - resouer -- goltermann diff --git a/vendor/k8s.io/client-go/tools/leaderelection/healthzadaptor.go b/vendor/k8s.io/client-go/tools/leaderelection/healthzadaptor.go new file mode 100644 index 00000000..b9353729 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/leaderelection/healthzadaptor.go @@ -0,0 +1,69 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package leaderelection + +import ( + "net/http" + "sync" + "time" +) + +// HealthzAdaptor associates the /healthz endpoint with the LeaderElection object. +// It helps deal with the /healthz endpoint being set up prior to the LeaderElection. +// This contains the code needed to act as an adaptor between the leader +// election code the health check code. It allows us to provide health +// status about the leader election. Most specifically about if the leader +// has failed to renew without exiting the process. In that case we should +// report not healthy and rely on the kubelet to take down the process. +type HealthzAdaptor struct { + pointerLock sync.Mutex + le *LeaderElector + timeout time.Duration +} + +// Name returns the name of the health check we are implementing. +func (l *HealthzAdaptor) Name() string { + return "leaderElection" +} + +// Check is called by the healthz endpoint handler. +// It fails (returns an error) if we own the lease but had not been able to renew it. +func (l *HealthzAdaptor) Check(req *http.Request) error { + l.pointerLock.Lock() + defer l.pointerLock.Unlock() + if l.le == nil { + return nil + } + return l.le.Check(l.timeout) +} + +// SetLeaderElection ties a leader election object to a HealthzAdaptor +func (l *HealthzAdaptor) SetLeaderElection(le *LeaderElector) { + l.pointerLock.Lock() + defer l.pointerLock.Unlock() + l.le = le +} + +// NewLeaderHealthzAdaptor creates a basic healthz adaptor to monitor a leader election. +// timeout determines the time beyond the lease expiry to be allowed for timeout. +// checks within the timeout period after the lease expires will still return healthy. +func NewLeaderHealthzAdaptor(timeout time.Duration) *HealthzAdaptor { + result := &HealthzAdaptor{ + timeout: timeout, + } + return result +} diff --git a/vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go b/vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go index 9a357b2a..42fffd45 100644 --- a/vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go +++ b/vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go @@ -16,12 +16,16 @@ limitations under the License. // Package leaderelection implements leader election of a set of endpoints. // It uses an annotation in the endpoints object to store the record of the -// election state. +// election state. This implementation does not guarantee that only one +// client is acting as a leader (a.k.a. fencing). // -// This implementation does not guarantee that only one client is acting as a -// leader (a.k.a. fencing). A client observes timestamps captured locally to -// infer the state of the leader election. Thus the implementation is tolerant -// to arbitrary clock skew, but is not tolerant to arbitrary clock skew rate. +// A client only acts on timestamps captured locally to infer the state of the +// leader election. The client does not consider timestamps in the leader +// election record to be accurate because these timestamps may not have been +// produced by a local clock. The implemention does not depend on their +// accuracy and only uses their change to indicate that another client has +// renewed the leader lease. Thus the implementation is tolerant to arbitrary +// clock skew, but is not tolerant to arbitrary clock skew rate. // // However the level of tolerance to skew rate can be configured by setting // RenewDeadline and LeaseDuration appropriately. The tolerance expressed as a @@ -49,18 +53,19 @@ limitations under the License. package leaderelection import ( + "bytes" "context" "fmt" - "reflect" "time" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/clock" "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" rl "k8s.io/client-go/tools/leaderelection/resourcelock" - "github.com/golang/glog" + "k8s.io/klog" ) const ( @@ -84,13 +89,23 @@ func NewLeaderElector(lec LeaderElectionConfig) (*LeaderElector, error) { if lec.RetryPeriod < 1 { return nil, fmt.Errorf("retryPeriod must be greater than zero") } + if lec.Callbacks.OnStartedLeading == nil { + return nil, fmt.Errorf("OnStartedLeading callback must not be nil") + } + if lec.Callbacks.OnStoppedLeading == nil { + return nil, fmt.Errorf("OnStoppedLeading callback must not be nil") + } if lec.Lock == nil { return nil, fmt.Errorf("Lock must not be nil.") } - return &LeaderElector{ - config: lec, - }, nil + le := LeaderElector{ + config: lec, + clock: clock.RealClock{}, + metrics: globalMetricsFactory.newLeaderMetrics(), + } + le.metrics.leaderOff(le.config.Name) + return &le, nil } type LeaderElectionConfig struct { @@ -100,17 +115,45 @@ type LeaderElectionConfig struct { // LeaseDuration is the duration that non-leader candidates will // wait to force acquire leadership. This is measured against time of // last observed ack. + // + // A client needs to wait a full LeaseDuration without observing a change to + // the record before it can attempt to take over. When all clients are + // shutdown and a new set of clients are started with different names against + // the same leader record, they must wait the full LeaseDuration before + // attempting to acquire the lease. Thus LeaseDuration should be as short as + // possible (within your tolerance for clock skew rate) to avoid a possible + // long waits in the scenario. + // + // Core clients default this value to 15 seconds. LeaseDuration time.Duration // RenewDeadline is the duration that the acting master will retry // refreshing leadership before giving up. + // + // Core clients default this value to 10 seconds. RenewDeadline time.Duration // RetryPeriod is the duration the LeaderElector clients should wait // between tries of actions. + // + // Core clients default this value to 2 seconds. RetryPeriod time.Duration // Callbacks are callbacks that are triggered during certain lifecycle // events of the LeaderElector Callbacks LeaderCallbacks + + // WatchDog is the associated health checker + // WatchDog may be null if its not needed/configured. + WatchDog *HealthzAdaptor + + // ReleaseOnCancel should be set true if the lock should be released + // when the run context is cancelled. If you set this to true, you must + // ensure all code guarded by this lease has successfully completed + // prior to cancelling the context, or you may have two processes + // simultaneously acting on the critical path. + ReleaseOnCancel bool + + // Name is the name of the resource lock for debugging + Name string } // LeaderCallbacks are callbacks that are triggered during certain @@ -133,12 +176,21 @@ type LeaderCallbacks struct { type LeaderElector struct { config LeaderElectionConfig // internal bookkeeping - observedRecord rl.LeaderElectionRecord - observedTime time.Time + observedRecord rl.LeaderElectionRecord + observedRawRecord []byte + observedTime time.Time // used to implement OnNewLeader(), may lag slightly from the // value observedRecord.HolderIdentity if the transition has // not yet been reported. reportedLeader string + + // clock is wrapper around time to allow for less flaky testing + clock clock.Clock + + metrics leaderMetricsAdapter + + // name is the name of the resource lock for debugging + name string } // Run starts the leader election loop @@ -163,6 +215,9 @@ func RunOrDie(ctx context.Context, lec LeaderElectionConfig) { if err != nil { panic(err) } + if lec.WatchDog != nil { + lec.WatchDog.SetLeaderElection(le) + } le.Run(ctx) } @@ -184,16 +239,17 @@ func (le *LeaderElector) acquire(ctx context.Context) bool { defer cancel() succeeded := false desc := le.config.Lock.Describe() - glog.Infof("attempting to acquire leader lease %v...", desc) + klog.Infof("attempting to acquire leader lease %v...", desc) wait.JitterUntil(func() { succeeded = le.tryAcquireOrRenew() le.maybeReportTransition() if !succeeded { - glog.V(4).Infof("failed to acquire lease %v", desc) + klog.V(4).Infof("failed to acquire lease %v", desc) return } le.config.Lock.RecordEvent("became leader") - glog.Infof("successfully acquired lease %v", desc) + le.metrics.leaderOn(le.config.Name) + klog.Infof("successfully acquired lease %v", desc) cancel() }, le.config.RetryPeriod, JitterFactor, true, ctx.Done()) return succeeded @@ -224,13 +280,36 @@ func (le *LeaderElector) renew(ctx context.Context) { le.maybeReportTransition() desc := le.config.Lock.Describe() if err == nil { - glog.V(4).Infof("successfully renewed lease %v", desc) + klog.V(5).Infof("successfully renewed lease %v", desc) return } le.config.Lock.RecordEvent("stopped leading") - glog.Infof("failed to renew lease %v: %v", desc, err) + le.metrics.leaderOff(le.config.Name) + klog.Infof("failed to renew lease %v: %v", desc, err) cancel() }, le.config.RetryPeriod, ctx.Done()) + + // if we hold the lease, give it up + if le.config.ReleaseOnCancel { + le.release() + } +} + +// release attempts to release the leader lease if we have acquired it. +func (le *LeaderElector) release() bool { + if !le.IsLeader() { + return true + } + leaderElectionRecord := rl.LeaderElectionRecord{ + LeaderTransitions: le.observedRecord.LeaderTransitions, + } + if err := le.config.Lock.Update(leaderElectionRecord); err != nil { + klog.Errorf("Failed to release lock: %v", err) + return false + } + le.observedRecord = leaderElectionRecord + le.observedTime = le.clock.Now() + return true } // tryAcquireOrRenew tries to acquire a leader lease if it is not already acquired, @@ -246,29 +325,31 @@ func (le *LeaderElector) tryAcquireOrRenew() bool { } // 1. obtain or create the ElectionRecord - oldLeaderElectionRecord, err := le.config.Lock.Get() + oldLeaderElectionRecord, oldLeaderElectionRawRecord, err := le.config.Lock.Get() if err != nil { if !errors.IsNotFound(err) { - glog.Errorf("error retrieving resource lock %v: %v", le.config.Lock.Describe(), err) + klog.Errorf("error retrieving resource lock %v: %v", le.config.Lock.Describe(), err) return false } if err = le.config.Lock.Create(leaderElectionRecord); err != nil { - glog.Errorf("error initially creating leader election record: %v", err) + klog.Errorf("error initially creating leader election record: %v", err) return false } le.observedRecord = leaderElectionRecord - le.observedTime = time.Now() + le.observedTime = le.clock.Now() return true } // 2. Record obtained, check the Identity & Time - if !reflect.DeepEqual(le.observedRecord, *oldLeaderElectionRecord) { + if !bytes.Equal(le.observedRawRecord, oldLeaderElectionRawRecord) { le.observedRecord = *oldLeaderElectionRecord - le.observedTime = time.Now() + le.observedRawRecord = oldLeaderElectionRawRecord + le.observedTime = le.clock.Now() } - if le.observedTime.Add(le.config.LeaseDuration).After(now.Time) && + if len(oldLeaderElectionRecord.HolderIdentity) > 0 && + le.observedTime.Add(le.config.LeaseDuration).After(now.Time) && !le.IsLeader() { - glog.V(4).Infof("lock is held by %v and has not yet expired", oldLeaderElectionRecord.HolderIdentity) + klog.V(4).Infof("lock is held by %v and has not yet expired", oldLeaderElectionRecord.HolderIdentity) return false } @@ -283,11 +364,12 @@ func (le *LeaderElector) tryAcquireOrRenew() bool { // update the lock itself if err = le.config.Lock.Update(leaderElectionRecord); err != nil { - glog.Errorf("Failed to update lock: %v", err) + klog.Errorf("Failed to update lock: %v", err) return false } + le.observedRecord = leaderElectionRecord - le.observedTime = time.Now() + le.observedTime = le.clock.Now() return true } @@ -300,3 +382,19 @@ func (le *LeaderElector) maybeReportTransition() { go le.config.Callbacks.OnNewLeader(le.reportedLeader) } } + +// Check will determine if the current lease is expired by more than timeout. +func (le *LeaderElector) Check(maxTolerableExpiredLease time.Duration) error { + if !le.IsLeader() { + // Currently not concerned with the case that we are hot standby + return nil + } + // If we are more than timeout seconds after the lease duration that is past the timeout + // on the lease renew. Time to start reporting ourselves as unhealthy. We should have + // died but conditions like deadlock can prevent this. (See #70819) + if le.clock.Since(le.observedTime) > le.config.LeaseDuration+maxTolerableExpiredLease { + return fmt.Errorf("failed election to renew leadership on lease %s", le.config.Name) + } + + return nil +} diff --git a/vendor/k8s.io/client-go/tools/leaderelection/metrics.go b/vendor/k8s.io/client-go/tools/leaderelection/metrics.go new file mode 100644 index 00000000..65917bf8 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/leaderelection/metrics.go @@ -0,0 +1,109 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package leaderelection + +import ( + "sync" +) + +// This file provides abstractions for setting the provider (e.g., prometheus) +// of metrics. + +type leaderMetricsAdapter interface { + leaderOn(name string) + leaderOff(name string) +} + +// GaugeMetric represents a single numerical value that can arbitrarily go up +// and down. +type SwitchMetric interface { + On(name string) + Off(name string) +} + +type noopMetric struct{} + +func (noopMetric) On(name string) {} +func (noopMetric) Off(name string) {} + +// defaultLeaderMetrics expects the caller to lock before setting any metrics. +type defaultLeaderMetrics struct { + // leader's value indicates if the current process is the owner of name lease + leader SwitchMetric +} + +func (m *defaultLeaderMetrics) leaderOn(name string) { + if m == nil { + return + } + m.leader.On(name) +} + +func (m *defaultLeaderMetrics) leaderOff(name string) { + if m == nil { + return + } + m.leader.Off(name) +} + +type noMetrics struct{} + +func (noMetrics) leaderOn(name string) {} +func (noMetrics) leaderOff(name string) {} + +// MetricsProvider generates various metrics used by the leader election. +type MetricsProvider interface { + NewLeaderMetric() SwitchMetric +} + +type noopMetricsProvider struct{} + +func (_ noopMetricsProvider) NewLeaderMetric() SwitchMetric { + return noopMetric{} +} + +var globalMetricsFactory = leaderMetricsFactory{ + metricsProvider: noopMetricsProvider{}, +} + +type leaderMetricsFactory struct { + metricsProvider MetricsProvider + + onlyOnce sync.Once +} + +func (f *leaderMetricsFactory) setProvider(mp MetricsProvider) { + f.onlyOnce.Do(func() { + f.metricsProvider = mp + }) +} + +func (f *leaderMetricsFactory) newLeaderMetrics() leaderMetricsAdapter { + mp := f.metricsProvider + if mp == (noopMetricsProvider{}) { + return noMetrics{} + } + return &defaultLeaderMetrics{ + leader: mp.NewLeaderMetric(), + } +} + +// SetProvider sets the metrics provider for all subsequently created work +// queues. Only the first call has an effect. +func SetProvider(metricsProvider MetricsProvider) { + globalMetricsFactory.setProvider(metricsProvider) +} diff --git a/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/configmaplock.go b/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/configmaplock.go index 4ff59560..fd152b07 100644 --- a/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/configmaplock.go +++ b/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/configmaplock.go @@ -41,22 +41,23 @@ type ConfigMapLock struct { } // Get returns the election record from a ConfigMap Annotation -func (cml *ConfigMapLock) Get() (*LeaderElectionRecord, error) { +func (cml *ConfigMapLock) Get() (*LeaderElectionRecord, []byte, error) { var record LeaderElectionRecord var err error cml.cm, err = cml.Client.ConfigMaps(cml.ConfigMapMeta.Namespace).Get(cml.ConfigMapMeta.Name, metav1.GetOptions{}) if err != nil { - return nil, err + return nil, nil, err } if cml.cm.Annotations == nil { cml.cm.Annotations = make(map[string]string) } - if recordBytes, found := cml.cm.Annotations[LeaderElectionRecordAnnotationKey]; found { + recordBytes, found := cml.cm.Annotations[LeaderElectionRecordAnnotationKey] + if found { if err := json.Unmarshal([]byte(recordBytes), &record); err != nil { - return nil, err + return nil, nil, err } } - return &record, nil + return &record, []byte(recordBytes), nil } // Create attempts to create a LeaderElectionRecord annotation @@ -80,7 +81,7 @@ func (cml *ConfigMapLock) Create(ler LeaderElectionRecord) error { // Update will update an existing annotation on a given resource. func (cml *ConfigMapLock) Update(ler LeaderElectionRecord) error { if cml.cm == nil { - return errors.New("endpoint not initialized, call get or create first") + return errors.New("configmap not initialized, call get or create first") } recordBytes, err := json.Marshal(ler) if err != nil { @@ -93,6 +94,9 @@ func (cml *ConfigMapLock) Update(ler LeaderElectionRecord) error { // RecordEvent in leader election while adding meta-data func (cml *ConfigMapLock) RecordEvent(s string) { + if cml.LockConfig.EventRecorder == nil { + return + } events := fmt.Sprintf("%v %v", cml.LockConfig.Identity, s) cml.LockConfig.EventRecorder.Eventf(&v1.ConfigMap{ObjectMeta: cml.cm.ObjectMeta}, v1.EventTypeNormal, "LeaderElection", events) } @@ -103,7 +107,7 @@ func (cml *ConfigMapLock) Describe() string { return fmt.Sprintf("%v/%v", cml.ConfigMapMeta.Namespace, cml.ConfigMapMeta.Name) } -// returns the Identity of the lock +// Identity returns the Identity of the lock func (cml *ConfigMapLock) Identity() string { return cml.LockConfig.Identity } diff --git a/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/endpointslock.go b/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/endpointslock.go index 6f7dcfb0..f5a8ffcc 100644 --- a/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/endpointslock.go +++ b/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/endpointslock.go @@ -36,22 +36,23 @@ type EndpointsLock struct { } // Get returns the election record from a Endpoints Annotation -func (el *EndpointsLock) Get() (*LeaderElectionRecord, error) { +func (el *EndpointsLock) Get() (*LeaderElectionRecord, []byte, error) { var record LeaderElectionRecord var err error el.e, err = el.Client.Endpoints(el.EndpointsMeta.Namespace).Get(el.EndpointsMeta.Name, metav1.GetOptions{}) if err != nil { - return nil, err + return nil, nil, err } if el.e.Annotations == nil { el.e.Annotations = make(map[string]string) } - if recordBytes, found := el.e.Annotations[LeaderElectionRecordAnnotationKey]; found { + recordBytes, found := el.e.Annotations[LeaderElectionRecordAnnotationKey] + if found { if err := json.Unmarshal([]byte(recordBytes), &record); err != nil { - return nil, err + return nil, nil, err } } - return &record, nil + return &record, []byte(recordBytes), nil } // Create attempts to create a LeaderElectionRecord annotation @@ -81,6 +82,9 @@ func (el *EndpointsLock) Update(ler LeaderElectionRecord) error { if err != nil { return err } + if el.e.Annotations == nil { + el.e.Annotations = make(map[string]string) + } el.e.Annotations[LeaderElectionRecordAnnotationKey] = string(recordBytes) el.e, err = el.Client.Endpoints(el.EndpointsMeta.Namespace).Update(el.e) return err @@ -88,6 +92,9 @@ func (el *EndpointsLock) Update(ler LeaderElectionRecord) error { // RecordEvent in leader election while adding meta-data func (el *EndpointsLock) RecordEvent(s string) { + if el.LockConfig.EventRecorder == nil { + return + } events := fmt.Sprintf("%v %v", el.LockConfig.Identity, s) el.LockConfig.EventRecorder.Eventf(&v1.Endpoints{ObjectMeta: el.e.ObjectMeta}, v1.EventTypeNormal, "LeaderElection", events) } @@ -98,7 +105,7 @@ func (el *EndpointsLock) Describe() string { return fmt.Sprintf("%v/%v", el.EndpointsMeta.Namespace, el.EndpointsMeta.Name) } -// returns the Identity of the lock +// Identity returns the Identity of the lock func (el *EndpointsLock) Identity() string { return el.LockConfig.Identity } diff --git a/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/interface.go b/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/interface.go index 676fd1d7..c9f17591 100644 --- a/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/interface.go +++ b/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/interface.go @@ -20,14 +20,18 @@ import ( "fmt" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + coordinationv1 "k8s.io/client-go/kubernetes/typed/coordination/v1" corev1 "k8s.io/client-go/kubernetes/typed/core/v1" - "k8s.io/client-go/tools/record" ) const ( LeaderElectionRecordAnnotationKey = "control-plane.alpha.kubernetes.io/leader" EndpointsResourceLock = "endpoints" ConfigMapsResourceLock = "configmaps" + LeasesResourceLock = "leases" + EndpointsLeasesResourceLock = "endpointsleases" + ConfigMapsLeasesResourceLock = "configmapsleases" ) // LeaderElectionRecord is the record that is stored in the leader election annotation. @@ -35,6 +39,11 @@ const ( // with a random string (e.g. UUID) with only slight modification of this code. // TODO(mikedanese): this should potentially be versioned type LeaderElectionRecord struct { + // HolderIdentity is the ID that owns the lease. If empty, no one owns this lease and + // all callers may acquire. Versions of this library prior to Kubernetes 1.14 will not + // attempt to acquire leases with empty identities and will wait for the full lease + // interval to expire before attempting to reacquire. This value is set to empty when + // a client voluntarily steps down. HolderIdentity string `json:"holderIdentity"` LeaseDurationSeconds int `json:"leaseDurationSeconds"` AcquireTime metav1.Time `json:"acquireTime"` @@ -42,11 +51,19 @@ type LeaderElectionRecord struct { LeaderTransitions int `json:"leaderTransitions"` } +// EventRecorder records a change in the ResourceLock. +type EventRecorder interface { + Eventf(obj runtime.Object, eventType, reason, message string, args ...interface{}) +} + // ResourceLockConfig common data that exists across different // resource locks type ResourceLockConfig struct { - Identity string - EventRecorder record.EventRecorder + // Identity is the unique string identifying a lease holder across + // all participants in an election. + Identity string + // EventRecorder is optional. + EventRecorder EventRecorder } // Interface offers a common interface for locking on arbitrary @@ -56,7 +73,7 @@ type ResourceLockConfig struct { // by the leaderelection code. type Interface interface { // Get returns the LeaderElectionRecord - Get() (*LeaderElectionRecord, error) + Get() (*LeaderElectionRecord, []byte, error) // Create attempts to create a LeaderElectionRecord Create(ler LeaderElectionRecord) error @@ -76,25 +93,47 @@ type Interface interface { } // Manufacture will create a lock of a given type according to the input parameters -func New(lockType string, ns string, name string, client corev1.CoreV1Interface, rlc ResourceLockConfig) (Interface, error) { +func New(lockType string, ns string, name string, coreClient corev1.CoreV1Interface, coordinationClient coordinationv1.CoordinationV1Interface, rlc ResourceLockConfig) (Interface, error) { + endpointsLock := &EndpointsLock{ + EndpointsMeta: metav1.ObjectMeta{ + Namespace: ns, + Name: name, + }, + Client: coreClient, + LockConfig: rlc, + } + configmapLock := &ConfigMapLock{ + ConfigMapMeta: metav1.ObjectMeta{ + Namespace: ns, + Name: name, + }, + Client: coreClient, + LockConfig: rlc, + } + leaseLock := &LeaseLock{ + LeaseMeta: metav1.ObjectMeta{ + Namespace: ns, + Name: name, + }, + Client: coordinationClient, + LockConfig: rlc, + } switch lockType { case EndpointsResourceLock: - return &EndpointsLock{ - EndpointsMeta: metav1.ObjectMeta{ - Namespace: ns, - Name: name, - }, - Client: client, - LockConfig: rlc, - }, nil + return endpointsLock, nil case ConfigMapsResourceLock: - return &ConfigMapLock{ - ConfigMapMeta: metav1.ObjectMeta{ - Namespace: ns, - Name: name, - }, - Client: client, - LockConfig: rlc, + return configmapLock, nil + case LeasesResourceLock: + return leaseLock, nil + case EndpointsLeasesResourceLock: + return &MultiLock{ + Primary: endpointsLock, + Secondary: leaseLock, + }, nil + case ConfigMapsLeasesResourceLock: + return &MultiLock{ + Primary: configmapLock, + Secondary: leaseLock, }, nil default: return nil, fmt.Errorf("Invalid lock-type %s", lockType) diff --git a/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/leaselock.go b/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/leaselock.go new file mode 100644 index 00000000..74016b8d --- /dev/null +++ b/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/leaselock.go @@ -0,0 +1,129 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resourcelock + +import ( + "encoding/json" + "errors" + "fmt" + + coordinationv1 "k8s.io/api/coordination/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + coordinationv1client "k8s.io/client-go/kubernetes/typed/coordination/v1" +) + +type LeaseLock struct { + // LeaseMeta should contain a Name and a Namespace of a + // LeaseMeta object that the LeaderElector will attempt to lead. + LeaseMeta metav1.ObjectMeta + Client coordinationv1client.LeasesGetter + LockConfig ResourceLockConfig + lease *coordinationv1.Lease +} + +// Get returns the election record from a Lease spec +func (ll *LeaseLock) Get() (*LeaderElectionRecord, []byte, error) { + var err error + ll.lease, err = ll.Client.Leases(ll.LeaseMeta.Namespace).Get(ll.LeaseMeta.Name, metav1.GetOptions{}) + if err != nil { + return nil, nil, err + } + record := LeaseSpecToLeaderElectionRecord(&ll.lease.Spec) + recordByte, err := json.Marshal(*record) + if err != nil { + return nil, nil, err + } + return record, recordByte, nil +} + +// Create attempts to create a Lease +func (ll *LeaseLock) Create(ler LeaderElectionRecord) error { + var err error + ll.lease, err = ll.Client.Leases(ll.LeaseMeta.Namespace).Create(&coordinationv1.Lease{ + ObjectMeta: metav1.ObjectMeta{ + Name: ll.LeaseMeta.Name, + Namespace: ll.LeaseMeta.Namespace, + }, + Spec: LeaderElectionRecordToLeaseSpec(&ler), + }) + return err +} + +// Update will update an existing Lease spec. +func (ll *LeaseLock) Update(ler LeaderElectionRecord) error { + if ll.lease == nil { + return errors.New("lease not initialized, call get or create first") + } + ll.lease.Spec = LeaderElectionRecordToLeaseSpec(&ler) + var err error + ll.lease, err = ll.Client.Leases(ll.LeaseMeta.Namespace).Update(ll.lease) + return err +} + +// RecordEvent in leader election while adding meta-data +func (ll *LeaseLock) RecordEvent(s string) { + if ll.LockConfig.EventRecorder == nil { + return + } + events := fmt.Sprintf("%v %v", ll.LockConfig.Identity, s) + ll.LockConfig.EventRecorder.Eventf(&coordinationv1.Lease{ObjectMeta: ll.lease.ObjectMeta}, corev1.EventTypeNormal, "LeaderElection", events) +} + +// Describe is used to convert details on current resource lock +// into a string +func (ll *LeaseLock) Describe() string { + return fmt.Sprintf("%v/%v", ll.LeaseMeta.Namespace, ll.LeaseMeta.Name) +} + +// Identity returns the Identity of the lock +func (ll *LeaseLock) Identity() string { + return ll.LockConfig.Identity +} + +func LeaseSpecToLeaderElectionRecord(spec *coordinationv1.LeaseSpec) *LeaderElectionRecord { + var r LeaderElectionRecord + if spec.HolderIdentity != nil { + r.HolderIdentity = *spec.HolderIdentity + } + if spec.LeaseDurationSeconds != nil { + r.LeaseDurationSeconds = int(*spec.LeaseDurationSeconds) + } + if spec.LeaseTransitions != nil { + r.LeaderTransitions = int(*spec.LeaseTransitions) + } + if spec.AcquireTime != nil { + r.AcquireTime = metav1.Time{spec.AcquireTime.Time} + } + if spec.RenewTime != nil { + r.RenewTime = metav1.Time{spec.RenewTime.Time} + } + return &r + +} + +func LeaderElectionRecordToLeaseSpec(ler *LeaderElectionRecord) coordinationv1.LeaseSpec { + leaseDurationSeconds := int32(ler.LeaseDurationSeconds) + leaseTransitions := int32(ler.LeaderTransitions) + return coordinationv1.LeaseSpec{ + HolderIdentity: &ler.HolderIdentity, + LeaseDurationSeconds: &leaseDurationSeconds, + AcquireTime: &metav1.MicroTime{ler.AcquireTime.Time}, + RenewTime: &metav1.MicroTime{ler.RenewTime.Time}, + LeaseTransitions: &leaseTransitions, + } +} diff --git a/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/multilock.go b/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/multilock.go new file mode 100644 index 00000000..8cb89dc4 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/multilock.go @@ -0,0 +1,103 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resourcelock + +import ( + "bytes" + "encoding/json" + + apierrors "k8s.io/apimachinery/pkg/api/errors" +) + +const ( + UnknownLeader = "leaderelection.k8s.io/unknown" +) + +// MultiLock is used for lock's migration +type MultiLock struct { + Primary Interface + Secondary Interface +} + +// Get returns the older election record of the lock +func (ml *MultiLock) Get() (*LeaderElectionRecord, []byte, error) { + primary, primaryRaw, err := ml.Primary.Get() + if err != nil { + return nil, nil, err + } + + secondary, secondaryRaw, err := ml.Secondary.Get() + if err != nil { + // Lock is held by old client + if apierrors.IsNotFound(err) && primary.HolderIdentity != ml.Identity() { + return primary, primaryRaw, nil + } + return nil, nil, err + } + + if primary.HolderIdentity != secondary.HolderIdentity { + primary.HolderIdentity = UnknownLeader + primaryRaw, err = json.Marshal(primary) + if err != nil { + return nil, nil, err + } + } + return primary, ConcatRawRecord(primaryRaw, secondaryRaw), nil +} + +// Create attempts to create both primary lock and secondary lock +func (ml *MultiLock) Create(ler LeaderElectionRecord) error { + err := ml.Primary.Create(ler) + if err != nil && !apierrors.IsAlreadyExists(err) { + return err + } + return ml.Secondary.Create(ler) +} + +// Update will update and existing annotation on both two resources. +func (ml *MultiLock) Update(ler LeaderElectionRecord) error { + err := ml.Primary.Update(ler) + if err != nil { + return err + } + _, _, err = ml.Secondary.Get() + if err != nil && apierrors.IsNotFound(err) { + return ml.Secondary.Create(ler) + } + return ml.Secondary.Update(ler) +} + +// RecordEvent in leader election while adding meta-data +func (ml *MultiLock) RecordEvent(s string) { + ml.Primary.RecordEvent(s) + ml.Secondary.RecordEvent(s) +} + +// Describe is used to convert details on current resource lock +// into a string +func (ml *MultiLock) Describe() string { + return ml.Primary.Describe() +} + +// Identity returns the Identity of the lock +func (ml *MultiLock) Identity() string { + return ml.Primary.Identity() +} + +func ConcatRawRecord(primaryRaw, secondaryRaw []byte) []byte { + return bytes.Join([][]byte{primaryRaw, secondaryRaw}, []byte(",")) +} diff --git a/vendor/k8s.io/client-go/tools/metrics/OWNERS b/vendor/k8s.io/client-go/tools/metrics/OWNERS index ff517980..ad52d0c5 100644 --- a/vendor/k8s.io/client-go/tools/metrics/OWNERS +++ b/vendor/k8s.io/client-go/tools/metrics/OWNERS @@ -1,7 +1,7 @@ +# See the OWNERS docs at https://go.k8s.io/owners + reviewers: - wojtek-t - eparis - krousey - jayunit100 -- fgrzadkowski -- tmrts diff --git a/vendor/k8s.io/client-go/tools/pager/pager.go b/vendor/k8s.io/client-go/tools/pager/pager.go index 74ea3586..307808be 100644 --- a/vendor/k8s.io/client-go/tools/pager/pager.go +++ b/vendor/k8s.io/client-go/tools/pager/pager.go @@ -25,9 +25,11 @@ import ( metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" ) const defaultPageSize = 500 +const defaultPageBufferSize = 10 // ListPageFunc returns a list object for the given list options. type ListPageFunc func(ctx context.Context, opts metav1.ListOptions) (runtime.Object, error) @@ -48,6 +50,9 @@ type ListPager struct { PageFn ListPageFunc FullListIfExpired bool + + // Number of pages to buffer + PageBufferSize int32 } // New creates a new pager from the provided pager function using the default @@ -58,6 +63,7 @@ func New(fn ListPageFunc) *ListPager { PageSize: defaultPageSize, PageFn: fn, FullListIfExpired: true, + PageBufferSize: defaultPageBufferSize, } } @@ -71,16 +77,29 @@ func (p *ListPager) List(ctx context.Context, options metav1.ListOptions) (runti if options.Limit == 0 { options.Limit = p.PageSize } + requestedResourceVersion := options.ResourceVersion var list *metainternalversion.List for { + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + obj, err := p.PageFn(ctx, options) if err != nil { - if !errors.IsResourceExpired(err) || !p.FullListIfExpired { + // Only fallback to full list if an "Expired" errors is returned, FullListIfExpired is true, and + // the "Expired" error occurred in page 2 or later (since full list is intended to prevent a pager.List from + // failing when the resource versions is established by the first page request falls out of the compaction + // during the subsequent list requests). + if !errors.IsResourceExpired(err) || !p.FullListIfExpired || options.Continue == "" { return nil, err } - // the list expired while we were processing, fall back to a full list + // the list expired while we were processing, fall back to a full list at + // the requested ResourceVersion. options.Limit = 0 options.Continue = "" + options.ResourceVersion = requestedResourceVersion return p.PageFn(ctx, options) } m, err := meta.ListAccessor(obj) @@ -113,5 +132,111 @@ func (p *ListPager) List(ctx context.Context, options metav1.ListOptions) (runti // set the next loop up options.Continue = m.GetContinue() + // Clear the ResourceVersion on the subsequent List calls to avoid the + // `specifying resource version is not allowed when using continue` error. + // See https://github.com/kubernetes/kubernetes/issues/85221#issuecomment-553748143. + options.ResourceVersion = "" + } +} + +// EachListItem fetches runtime.Object items using this ListPager and invokes fn on each item. If +// fn returns an error, processing stops and that error is returned. If fn does not return an error, +// any error encountered while retrieving the list from the server is returned. If the context +// cancels or times out, the context error is returned. Since the list is retrieved in paginated +// chunks, an "Expired" error (metav1.StatusReasonExpired) may be returned if the pagination list +// requests exceed the expiration limit of the apiserver being called. +// +// Items are retrieved in chunks from the server to reduce the impact on the server with up to +// ListPager.PageBufferSize chunks buffered concurrently in the background. +func (p *ListPager) EachListItem(ctx context.Context, options metav1.ListOptions, fn func(obj runtime.Object) error) error { + return p.eachListChunkBuffered(ctx, options, func(obj runtime.Object) error { + return meta.EachListItem(obj, fn) + }) +} + +// eachListChunkBuffered fetches runtimeObject list chunks using this ListPager and invokes fn on +// each list chunk. If fn returns an error, processing stops and that error is returned. If fn does +// not return an error, any error encountered while retrieving the list from the server is +// returned. If the context cancels or times out, the context error is returned. Since the list is +// retrieved in paginated chunks, an "Expired" error (metav1.StatusReasonExpired) may be returned if +// the pagination list requests exceed the expiration limit of the apiserver being called. +// +// Up to ListPager.PageBufferSize chunks are buffered concurrently in the background. +func (p *ListPager) eachListChunkBuffered(ctx context.Context, options metav1.ListOptions, fn func(obj runtime.Object) error) error { + if p.PageBufferSize < 0 { + return fmt.Errorf("ListPager.PageBufferSize must be >= 0, got %d", p.PageBufferSize) + } + + // Ensure background goroutine is stopped if this call exits before all list items are + // processed. Cancelation error from this deferred cancel call is never returned to caller; + // either the list result has already been sent to bgResultC or the fn error is returned and + // the cancelation error is discarded. + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + chunkC := make(chan runtime.Object, p.PageBufferSize) + bgResultC := make(chan error, 1) + go func() { + defer utilruntime.HandleCrash() + + var err error + defer func() { + close(chunkC) + bgResultC <- err + }() + err = p.eachListChunk(ctx, options, func(chunk runtime.Object) error { + select { + case chunkC <- chunk: // buffer the chunk, this can block + case <-ctx.Done(): + return ctx.Err() + } + return nil + }) + }() + + for o := range chunkC { + err := fn(o) + if err != nil { + return err // any fn error should be returned immediately + } + } + // promote the results of our background goroutine to the foreground + return <-bgResultC +} + +// eachListChunk fetches runtimeObject list chunks using this ListPager and invokes fn on each list +// chunk. If fn returns an error, processing stops and that error is returned. If fn does not return +// an error, any error encountered while retrieving the list from the server is returned. If the +// context cancels or times out, the context error is returned. Since the list is retrieved in +// paginated chunks, an "Expired" error (metav1.StatusReasonExpired) may be returned if the +// pagination list requests exceed the expiration limit of the apiserver being called. +func (p *ListPager) eachListChunk(ctx context.Context, options metav1.ListOptions, fn func(obj runtime.Object) error) error { + if options.Limit == 0 { + options.Limit = p.PageSize + } + for { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + obj, err := p.PageFn(ctx, options) + if err != nil { + return err + } + m, err := meta.ListAccessor(obj) + if err != nil { + return fmt.Errorf("returned object must be a list: %v", err) + } + if err := fn(obj); err != nil { + return err + } + // if we have no more items, return. + if len(m.GetContinue()) == 0 { + return nil + } + // set the next loop up + options.Continue = m.GetContinue() } } diff --git a/vendor/k8s.io/client-go/tools/record/OWNERS b/vendor/k8s.io/client-go/tools/record/OWNERS index 4dd54bbc..6ce73bb5 100644 --- a/vendor/k8s.io/client-go/tools/record/OWNERS +++ b/vendor/k8s.io/client-go/tools/record/OWNERS @@ -1,3 +1,5 @@ +# See the OWNERS docs at https://go.k8s.io/owners + reviewers: - lavalamp - smarterclayton diff --git a/vendor/k8s.io/client-go/tools/record/doc.go b/vendor/k8s.io/client-go/tools/record/doc.go index 657ddecb..33d5fe78 100644 --- a/vendor/k8s.io/client-go/tools/record/doc.go +++ b/vendor/k8s.io/client-go/tools/record/doc.go @@ -14,5 +14,6 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package record has all client logic for recording and reporting events. +// Package record has all client logic for recording and reporting +// "k8s.io/api/core/v1".Event events. package record // import "k8s.io/client-go/tools/record" diff --git a/vendor/k8s.io/client-go/tools/record/event.go b/vendor/k8s.io/client-go/tools/record/event.go index 168dfa80..66f8bd63 100644 --- a/vendor/k8s.io/client-go/tools/record/event.go +++ b/vendor/k8s.io/client-go/tools/record/event.go @@ -21,7 +21,7 @@ import ( "math/rand" "time" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -29,11 +29,9 @@ import ( utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/watch" restclient "k8s.io/client-go/rest" + "k8s.io/client-go/tools/record/util" ref "k8s.io/client-go/tools/reference" - - "net/http" - - "github.com/golang/glog" + "k8s.io/klog" ) const maxTriesPerEvent = 12 @@ -52,6 +50,40 @@ type EventSink interface { Patch(oldEvent *v1.Event, data []byte) (*v1.Event, error) } +// CorrelatorOptions allows you to change the default of the EventSourceObjectSpamFilter +// and EventAggregator in EventCorrelator +type CorrelatorOptions struct { + // The lru cache size used for both EventSourceObjectSpamFilter and the EventAggregator + // If not specified (zero value), the default specified in events_cache.go will be picked + // This means that the LRUCacheSize has to be greater than 0. + LRUCacheSize int + // The burst size used by the token bucket rate filtering in EventSourceObjectSpamFilter + // If not specified (zero value), the default specified in events_cache.go will be picked + // This means that the BurstSize has to be greater than 0. + BurstSize int + // The fill rate of the token bucket in queries per second in EventSourceObjectSpamFilter + // If not specified (zero value), the default specified in events_cache.go will be picked + // This means that the QPS has to be greater than 0. + QPS float32 + // The func used by the EventAggregator to group event keys for aggregation + // If not specified (zero value), EventAggregatorByReasonFunc will be used + KeyFunc EventAggregatorKeyFunc + // The func used by the EventAggregator to produced aggregated message + // If not specified (zero value), EventAggregatorByReasonMessageFunc will be used + MessageFunc EventAggregatorMessageFunc + // The number of events in an interval before aggregation happens by the EventAggregator + // If not specified (zero value), the default specified in events_cache.go will be picked + // This means that the MaxEvents has to be greater than 0 + MaxEvents int + // The amount of time in seconds that must transpire since the last occurrence of a similar event before it is considered new by the EventAggregator + // If not specified (zero value), the default specified in events_cache.go will be picked + // This means that the MaxIntervalInSeconds has to be greater than 0 + MaxIntervalInSeconds int + // The clock used by the EventAggregator to allow for testing + // If not specified (zero value), clock.RealClock{} will be used + Clock clock.Clock +} + // EventRecorder knows how to record events on behalf of an EventSource. type EventRecorder interface { // Event constructs an event from the given information and puts it in the queue for sending. @@ -95,37 +127,75 @@ type EventBroadcaster interface { // NewRecorder returns an EventRecorder that can be used to send events to this EventBroadcaster // with the event source set to the given event source. NewRecorder(scheme *runtime.Scheme, source v1.EventSource) EventRecorder + + // Shutdown shuts down the broadcaster + Shutdown() +} + +// EventRecorderAdapter is a wrapper around a "k8s.io/client-go/tools/record".EventRecorder +// implementing the new "k8s.io/client-go/tools/events".EventRecorder interface. +type EventRecorderAdapter struct { + recorder EventRecorder +} + +// NewEventRecorderAdapter returns an adapter implementing the new +// "k8s.io/client-go/tools/events".EventRecorder interface. +func NewEventRecorderAdapter(recorder EventRecorder) *EventRecorderAdapter { + return &EventRecorderAdapter{ + recorder: recorder, + } +} + +// Eventf is a wrapper around v1 Eventf +func (a *EventRecorderAdapter) Eventf(regarding, _ runtime.Object, eventtype, reason, action, note string, args ...interface{}) { + a.recorder.Eventf(regarding, eventtype, reason, note, args...) } // Creates a new event broadcaster. func NewBroadcaster() EventBroadcaster { - return &eventBroadcasterImpl{watch.NewBroadcaster(maxQueuedEvents, watch.DropIfChannelFull), defaultSleepDuration} + return &eventBroadcasterImpl{ + Broadcaster: watch.NewBroadcaster(maxQueuedEvents, watch.DropIfChannelFull), + sleepDuration: defaultSleepDuration, + } } func NewBroadcasterForTests(sleepDuration time.Duration) EventBroadcaster { - return &eventBroadcasterImpl{watch.NewBroadcaster(maxQueuedEvents, watch.DropIfChannelFull), sleepDuration} + return &eventBroadcasterImpl{ + Broadcaster: watch.NewBroadcaster(maxQueuedEvents, watch.DropIfChannelFull), + sleepDuration: sleepDuration, + } +} + +func NewBroadcasterWithCorrelatorOptions(options CorrelatorOptions) EventBroadcaster { + return &eventBroadcasterImpl{ + Broadcaster: watch.NewBroadcaster(maxQueuedEvents, watch.DropIfChannelFull), + sleepDuration: defaultSleepDuration, + options: options, + } } type eventBroadcasterImpl struct { *watch.Broadcaster sleepDuration time.Duration + options CorrelatorOptions } // StartRecordingToSink starts sending events received from the specified eventBroadcaster to the given sink. // The return value can be ignored or used to stop recording, if desired. // TODO: make me an object with parameterizable queue length and retry interval -func (eventBroadcaster *eventBroadcasterImpl) StartRecordingToSink(sink EventSink) watch.Interface { - // The default math/rand package functions aren't thread safe, so create a - // new Rand object for each StartRecording call. - randGen := rand.New(rand.NewSource(time.Now().UnixNano())) - eventCorrelator := NewEventCorrelator(clock.RealClock{}) - return eventBroadcaster.StartEventWatcher( +func (e *eventBroadcasterImpl) StartRecordingToSink(sink EventSink) watch.Interface { + eventCorrelator := NewEventCorrelatorWithOptions(e.options) + return e.StartEventWatcher( func(event *v1.Event) { - recordToSink(sink, event, eventCorrelator, randGen, eventBroadcaster.sleepDuration) + recordToSink(sink, event, eventCorrelator, e.sleepDuration) }) } -func recordToSink(sink EventSink, event *v1.Event, eventCorrelator *EventCorrelator, randGen *rand.Rand, sleepDuration time.Duration) { +func (e *eventBroadcasterImpl) Shutdown() { + e.Broadcaster.Shutdown() +} + +func recordToSink(sink EventSink, event *v1.Event, eventCorrelator *EventCorrelator, sleepDuration time.Duration) { // Make a copy before modification, because there could be multiple listeners. // Events are safe to copy like this. eventCopy := *event @@ -144,29 +214,19 @@ func recordToSink(sink EventSink, event *v1.Event, eventCorrelator *EventCorrela } tries++ if tries >= maxTriesPerEvent { - glog.Errorf("Unable to write event '%#v' (retry limit exceeded!)", event) + klog.Errorf("Unable to write event '%#v' (retry limit exceeded!)", event) break } // Randomize the first sleep so that various clients won't all be // synced up if the master goes down. if tries == 1 { - time.Sleep(time.Duration(float64(sleepDuration) * randGen.Float64())) + time.Sleep(time.Duration(float64(sleepDuration) * rand.Float64())) } else { time.Sleep(sleepDuration) } } } -func isKeyNotFoundError(err error) bool { - statusErr, _ := err.(*errors.StatusError) - - if statusErr != nil && statusErr.Status().Code == http.StatusNotFound { - return true - } - - return false -} - // recordEvent attempts to write event to a sink. It returns true if the event // was successfully recorded or discarded, false if it should be retried. // If updateExistingEvent is false, it creates a new event, otherwise it updates @@ -178,7 +238,7 @@ func recordEvent(sink EventSink, event *v1.Event, patch []byte, updateExistingEv newEvent, err = sink.Patch(event, patch) } // Update can fail because the event may have been removed and it no longer exists. - if !updateExistingEvent || (updateExistingEvent && isKeyNotFoundError(err)) { + if !updateExistingEvent || (updateExistingEvent && util.IsKeyNotFoundError(err)) { // Making sure that ResourceVersion is empty on creation event.ResourceVersion = "" newEvent, err = sink.Create(event) @@ -194,13 +254,13 @@ func recordEvent(sink EventSink, event *v1.Event, patch []byte, updateExistingEv switch err.(type) { case *restclient.RequestConstructionError: // We will construct the request the same next time, so don't keep trying. - glog.Errorf("Unable to construct event '%#v': '%v' (will not retry!)", event, err) + klog.Errorf("Unable to construct event '%#v': '%v' (will not retry!)", event, err) return true case *errors.StatusError: if errors.IsAlreadyExists(err) { - glog.V(5).Infof("Server rejected event '%#v': '%v' (will not retry!)", event, err) + klog.V(5).Infof("Server rejected event '%#v': '%v' (will not retry!)", event, err) } else { - glog.Errorf("Server rejected event '%#v': '%v' (will not retry!)", event, err) + klog.Errorf("Server rejected event '%#v': '%v' (will not retry!)", event, err) } return true case *errors.UnexpectedObjectError: @@ -209,14 +269,14 @@ func recordEvent(sink EventSink, event *v1.Event, patch []byte, updateExistingEv default: // This case includes actual http transport errors. Go ahead and retry. } - glog.Errorf("Unable to write event: '%v' (may retry after sleeping)", err) + klog.Errorf("Unable to write event: '%v' (may retry after sleeping)", err) return false } // StartLogging starts sending events received from this EventBroadcaster to the given logging function. // The return value can be ignored or used to stop recording, if desired. -func (eventBroadcaster *eventBroadcasterImpl) StartLogging(logf func(format string, args ...interface{})) watch.Interface { - return eventBroadcaster.StartEventWatcher( +func (e *eventBroadcasterImpl) StartLogging(logf func(format string, args ...interface{})) watch.Interface { + return e.StartEventWatcher( func(e *v1.Event) { logf("Event(%#v): type: '%v' reason: '%v' %v", e.InvolvedObject, e.Type, e.Reason, e.Message) }) @@ -224,8 +284,8 @@ func (eventBroadcaster *eventBroadcasterImpl) StartLogging(logf func(format stri // StartEventWatcher starts sending events received from this EventBroadcaster to the given event handler function. // The return value can be ignored or used to stop recording, if desired. -func (eventBroadcaster *eventBroadcasterImpl) StartEventWatcher(eventHandler func(*v1.Event)) watch.Interface { - watcher := eventBroadcaster.Watch() +func (e *eventBroadcasterImpl) StartEventWatcher(eventHandler func(*v1.Event)) watch.Interface { + watcher := e.Watch() go func() { defer utilruntime.HandleCrash() for watchEvent := range watcher.ResultChan() { @@ -242,8 +302,8 @@ func (eventBroadcaster *eventBroadcasterImpl) StartEventWatcher(eventHandler fun } // NewRecorder returns an EventRecorder that records events with the given event source. -func (eventBroadcaster *eventBroadcasterImpl) NewRecorder(scheme *runtime.Scheme, source v1.EventSource) EventRecorder { - return &recorderImpl{scheme, source, eventBroadcaster.Broadcaster, clock.RealClock{}} +func (e *eventBroadcasterImpl) NewRecorder(scheme *runtime.Scheme, source v1.EventSource) EventRecorder { + return &recorderImpl{scheme, source, e.Broadcaster, clock.RealClock{}} } type recorderImpl struct { @@ -256,12 +316,12 @@ type recorderImpl struct { func (recorder *recorderImpl) generateEvent(object runtime.Object, annotations map[string]string, timestamp metav1.Time, eventtype, reason, message string) { ref, err := ref.GetReference(recorder.scheme, object) if err != nil { - glog.Errorf("Could not construct reference to: '%#v' due to: '%v'. Will not report event: '%v' '%v' '%v'", object, err, eventtype, reason, message) + klog.Errorf("Could not construct reference to: '%#v' due to: '%v'. Will not report event: '%v' '%v' '%v'", object, err, eventtype, reason, message) return } - if !validateEventType(eventtype) { - glog.Errorf("Unsupported event type: '%v'", eventtype) + if !util.ValidateEventType(eventtype) { + klog.Errorf("Unsupported event type: '%v'", eventtype) return } @@ -275,14 +335,6 @@ func (recorder *recorderImpl) generateEvent(object runtime.Object, annotations m }() } -func validateEventType(eventtype string) bool { - switch eventtype { - case v1.EventTypeNormal, v1.EventTypeWarning: - return true - } - return false -} - func (recorder *recorderImpl) Event(object runtime.Object, eventtype, reason, message string) { recorder.generateEvent(object, nil, metav1.Now(), eventtype, reason, message) } diff --git a/vendor/k8s.io/client-go/tools/record/events_cache.go b/vendor/k8s.io/client-go/tools/record/events_cache.go index a42084f3..1b499efd 100644 --- a/vendor/k8s.io/client-go/tools/record/events_cache.go +++ b/vendor/k8s.io/client-go/tools/record/events_cache.go @@ -443,6 +443,52 @@ func NewEventCorrelator(clock clock.Clock) *EventCorrelator { } } +func NewEventCorrelatorWithOptions(options CorrelatorOptions) *EventCorrelator { + optionsWithDefaults := populateDefaults(options) + spamFilter := NewEventSourceObjectSpamFilter(optionsWithDefaults.LRUCacheSize, + optionsWithDefaults.BurstSize, optionsWithDefaults.QPS, optionsWithDefaults.Clock) + return &EventCorrelator{ + filterFunc: spamFilter.Filter, + aggregator: NewEventAggregator( + optionsWithDefaults.LRUCacheSize, + optionsWithDefaults.KeyFunc, + optionsWithDefaults.MessageFunc, + optionsWithDefaults.MaxEvents, + optionsWithDefaults.MaxIntervalInSeconds, + optionsWithDefaults.Clock), + logger: newEventLogger(optionsWithDefaults.LRUCacheSize, optionsWithDefaults.Clock), + } +} + +// populateDefaults populates the zero value options with defaults +func populateDefaults(options CorrelatorOptions) CorrelatorOptions { + if options.LRUCacheSize == 0 { + options.LRUCacheSize = maxLruCacheEntries + } + if options.BurstSize == 0 { + options.BurstSize = defaultSpamBurst + } + if options.QPS == 0 { + options.QPS = defaultSpamQPS + } + if options.KeyFunc == nil { + options.KeyFunc = EventAggregatorByReasonFunc + } + if options.MessageFunc == nil { + options.MessageFunc = EventAggregatorByReasonMessageFunc + } + if options.MaxEvents == 0 { + options.MaxEvents = defaultAggregateMaxEvents + } + if options.MaxIntervalInSeconds == 0 { + options.MaxIntervalInSeconds = defaultAggregateIntervalInSeconds + } + if options.Clock == nil { + options.Clock = clock.RealClock{} + } + return options +} + // EventCorrelate filters, aggregates, counts, and de-duplicates all incoming events func (c *EventCorrelator) EventCorrelate(newEvent *v1.Event) (*EventCorrelateResult, error) { if newEvent == nil { diff --git a/vendor/k8s.io/client-go/tools/record/util/util.go b/vendor/k8s.io/client-go/tools/record/util/util.go new file mode 100644 index 00000000..d1818a8d --- /dev/null +++ b/vendor/k8s.io/client-go/tools/record/util/util.go @@ -0,0 +1,44 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "net/http" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" +) + +// ValidateEventType checks that eventtype is an expected type of event +func ValidateEventType(eventtype string) bool { + switch eventtype { + case v1.EventTypeNormal, v1.EventTypeWarning: + return true + } + return false +} + +// IsKeyNotFoundError is utility function that checks if an error is not found error +func IsKeyNotFoundError(err error) bool { + statusErr, _ := err.(*errors.StatusError) + + if statusErr != nil && statusErr.Status().Code == http.StatusNotFound { + return true + } + + return false +} diff --git a/vendor/k8s.io/client-go/tools/reference/ref.go b/vendor/k8s.io/client-go/tools/reference/ref.go index 573d948a..442a991c 100644 --- a/vendor/k8s.io/client-go/tools/reference/ref.go +++ b/vendor/k8s.io/client-go/tools/reference/ref.go @@ -19,8 +19,6 @@ package reference import ( "errors" "fmt" - "net/url" - "strings" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/meta" @@ -30,8 +28,7 @@ import ( var ( // Errors that could be returned by GetReference. - ErrNilObject = errors.New("can't reference a nil object") - ErrNoSelfLink = errors.New("selfLink was empty, can't make reference") + ErrNilObject = errors.New("can't reference a nil object") ) // GetReference returns an ObjectReference which refers to the given @@ -47,20 +44,6 @@ func GetReference(scheme *runtime.Scheme, obj runtime.Object) (*v1.ObjectReferen return ref, nil } - gvk := obj.GetObjectKind().GroupVersionKind() - - // if the object referenced is actually persisted, we can just get kind from meta - // if we are building an object reference to something not yet persisted, we should fallback to scheme - kind := gvk.Kind - if len(kind) == 0 { - // TODO: this is wrong - gvks, _, err := scheme.ObjectKinds(obj) - if err != nil { - return nil, err - } - kind = gvks[0].Kind - } - // An object that implements only List has enough metadata to build a reference var listMeta metav1.Common objectMeta, err := meta.Accessor(obj) @@ -73,29 +56,29 @@ func GetReference(scheme *runtime.Scheme, obj runtime.Object) (*v1.ObjectReferen listMeta = objectMeta } - // if the object referenced is actually persisted, we can also get version from meta - version := gvk.GroupVersion().String() - if len(version) == 0 { - selfLink := listMeta.GetSelfLink() - if len(selfLink) == 0 { - return nil, ErrNoSelfLink - } - selfLinkUrl, err := url.Parse(selfLink) + gvk := obj.GetObjectKind().GroupVersionKind() + + // If object meta doesn't contain data about kind and/or version, + // we are falling back to scheme. + // + // TODO: This doesn't work for CRDs, which are not registered in scheme. + if gvk.Empty() { + gvks, _, err := scheme.ObjectKinds(obj) if err != nil { return nil, err } - // example paths: ///* - parts := strings.Split(selfLinkUrl.Path, "/") - if len(parts) < 4 { - return nil, fmt.Errorf("unexpected self link format: '%v'; got version '%v'", selfLink, version) - } - if parts[1] == "api" { - version = parts[2] - } else { - version = parts[2] + "/" + parts[3] + if len(gvks) == 0 || gvks[0].Empty() { + return nil, fmt.Errorf("unexpected gvks registered for object %T: %v", obj, gvks) } + // TODO: The same object can be registered for multiple group versions + // (although in practise this doesn't seem to be used). + // In such case, the version set may not be correct. + gvk = gvks[0] } + kind := gvk.Kind + version := gvk.GroupVersion().String() + // only has list metadata if objectMeta == nil { return &v1.ObjectReference{ diff --git a/vendor/k8s.io/client-go/transport/OWNERS b/vendor/k8s.io/client-go/transport/OWNERS index bf0ba5b9..a5217690 100644 --- a/vendor/k8s.io/client-go/transport/OWNERS +++ b/vendor/k8s.io/client-go/transport/OWNERS @@ -1,3 +1,5 @@ +# See the OWNERS docs at https://go.k8s.io/owners + reviewers: - smarterclayton - wojtek-t diff --git a/vendor/k8s.io/client-go/transport/cache.go b/vendor/k8s.io/client-go/transport/cache.go index 7cffe2a5..980d36ae 100644 --- a/vendor/k8s.io/client-go/transport/cache.go +++ b/vendor/k8s.io/client-go/transport/cache.go @@ -20,6 +20,7 @@ import ( "fmt" "net" "net/http" + "strings" "sync" "time" @@ -39,13 +40,15 @@ const idleConnsPerHost = 25 var tlsCache = &tlsTransportCache{transports: make(map[tlsCacheKey]*http.Transport)} type tlsCacheKey struct { - insecure bool - caData string - certData string - keyData string - getCert string - serverName string - dial string + insecure bool + caData string + certData string + keyData string + getCert string + serverName string + nextProtos string + dial string + disableCompression bool } func (t tlsCacheKey) String() string { @@ -53,7 +56,7 @@ func (t tlsCacheKey) String() string { if len(t.keyData) > 0 { keyText = "" } - return fmt.Sprintf("insecure:%v, caData:%#v, certData:%#v, keyData:%s, getCert: %s, serverName:%s, dial:%s", t.insecure, t.caData, t.certData, keyText, t.getCert, t.serverName, t.dial) + return fmt.Sprintf("insecure:%v, caData:%#v, certData:%#v, keyData:%s, getCert: %s, serverName:%s, dial:%s disableCompression:%t", t.insecure, t.caData, t.certData, keyText, t.getCert, t.serverName, t.dial, t.disableCompression) } func (c *tlsTransportCache) get(config *Config) (http.RoundTripper, error) { @@ -95,6 +98,7 @@ func (c *tlsTransportCache) get(config *Config) (http.RoundTripper, error) { TLSClientConfig: tlsConfig, MaxIdleConnsPerHost: idleConnsPerHost, DialContext: dial, + DisableCompression: config.DisableCompression, }) return c.transports[key], nil } @@ -106,12 +110,14 @@ func tlsConfigKey(c *Config) (tlsCacheKey, error) { return tlsCacheKey{}, err } return tlsCacheKey{ - insecure: c.TLS.Insecure, - caData: string(c.TLS.CAData), - certData: string(c.TLS.CertData), - keyData: string(c.TLS.KeyData), - getCert: fmt.Sprintf("%p", c.TLS.GetCert), - serverName: c.TLS.ServerName, - dial: fmt.Sprintf("%p", c.Dial), + insecure: c.TLS.Insecure, + caData: string(c.TLS.CAData), + certData: string(c.TLS.CertData), + keyData: string(c.TLS.KeyData), + getCert: fmt.Sprintf("%p", c.TLS.GetCert), + serverName: c.TLS.ServerName, + nextProtos: strings.Join(c.TLS.NextProtos, ","), + dial: fmt.Sprintf("%p", c.Dial), + disableCompression: c.DisableCompression, }, nil } diff --git a/vendor/k8s.io/client-go/transport/config.go b/vendor/k8s.io/client-go/transport/config.go index 4081c23e..9e18d11d 100644 --- a/vendor/k8s.io/client-go/transport/config.go +++ b/vendor/k8s.io/client-go/transport/config.go @@ -39,9 +39,18 @@ type Config struct { // Bearer token for authentication BearerToken string + // Path to a file containing a BearerToken. + // If set, the contents are periodically read. + // The last successfully read value takes precedence over BearerToken. + BearerTokenFile string + // Impersonate is the config that this Config will impersonate using Impersonate ImpersonationConfig + // DisableCompression bypasses automatic GZip compression requests to the + // server. + DisableCompression bool + // Transport may be used for custom HTTP behavior. This attribute may // not be specified with the TLS client certificate options. Use // WrapTransport for most client level operations. @@ -52,7 +61,10 @@ type Config struct { // from TLSClientConfig, Transport, or http.DefaultTransport). The // config may layer other RoundTrippers on top of the returned // RoundTripper. - WrapTransport func(rt http.RoundTripper) http.RoundTripper + // + // A future release will change this field to an array. Use config.Wrap() + // instead of setting this value directly. + WrapTransport WrapperFunc // Dial specifies the dial function for creating unencrypted TCP connections. Dial func(ctx context.Context, network, address string) (net.Conn, error) @@ -80,7 +92,7 @@ func (c *Config) HasBasicAuth() bool { // HasTokenAuth returns whether the configuration has token authentication or not. func (c *Config) HasTokenAuth() bool { - return len(c.BearerToken) != 0 + return len(c.BearerToken) != 0 || len(c.BearerTokenFile) != 0 } // HasCertAuth returns whether the configuration has certificate authentication or not. @@ -93,6 +105,14 @@ func (c *Config) HasCertCallback() bool { return c.TLS.GetCert != nil } +// Wrap adds a transport middleware function that will give the caller +// an opportunity to wrap the underlying http.RoundTripper prior to the +// first API call being made. The provided function is invoked after any +// existing transport wrappers are invoked. +func (c *Config) Wrap(fn WrapperFunc) { + c.WrapTransport = Wrappers(c.WrapTransport, fn) +} + // TLSConfig holds the information needed to set up a TLS transport. type TLSConfig struct { CAFile string // Path of the PEM-encoded server trusted root certificates. @@ -106,5 +126,11 @@ type TLSConfig struct { CertData []byte // Bytes of the PEM-encoded client certificate. Supercedes CertFile. KeyData []byte // Bytes of the PEM-encoded client key. Supercedes KeyFile. + // NextProtos is a list of supported application level protocols, in order of preference. + // Used to populate tls.Config.NextProtos. + // To indicate to the server http/1.1 is preferred over http/2, set to ["http/1.1", "h2"] (though the server is free to ignore that preference). + // To use only http/1.1, set to ["http/1.1"]. + NextProtos []string + GetCert func() (*tls.Certificate, error) // Callback that returns a TLS client certificate. CertData, CertFile, KeyData and KeyFile supercede this field. } diff --git a/vendor/k8s.io/client-go/transport/round_trippers.go b/vendor/k8s.io/client-go/transport/round_trippers.go index 0ebcbbc8..a272753a 100644 --- a/vendor/k8s.io/client-go/transport/round_trippers.go +++ b/vendor/k8s.io/client-go/transport/round_trippers.go @@ -22,7 +22,8 @@ import ( "strings" "time" - "github.com/golang/glog" + "golang.org/x/oauth2" + "k8s.io/klog" utilnet "k8s.io/apimachinery/pkg/util/net" ) @@ -44,7 +45,11 @@ func HTTPWrappersForConfig(config *Config, rt http.RoundTripper) (http.RoundTrip case config.HasBasicAuth() && config.HasTokenAuth(): return nil, fmt.Errorf("username/password or bearer token may be set, but not both") case config.HasTokenAuth(): - rt = NewBearerAuthRoundTripper(config.BearerToken, rt) + var err error + rt, err = NewBearerAuthWithRefreshRoundTripper(config.BearerToken, config.BearerTokenFile, rt) + if err != nil { + return nil, err + } case config.HasBasicAuth(): rt = NewBasicAuthRoundTripper(config.Username, config.Password, rt) } @@ -62,23 +67,19 @@ func HTTPWrappersForConfig(config *Config, rt http.RoundTripper) (http.RoundTrip // DebugWrappers wraps a round tripper and logs based on the current log level. func DebugWrappers(rt http.RoundTripper) http.RoundTripper { switch { - case bool(glog.V(9)): + case bool(klog.V(9)): rt = newDebuggingRoundTripper(rt, debugCurlCommand, debugURLTiming, debugResponseHeaders) - case bool(glog.V(8)): + case bool(klog.V(8)): rt = newDebuggingRoundTripper(rt, debugJustURL, debugRequestHeaders, debugResponseStatus, debugResponseHeaders) - case bool(glog.V(7)): + case bool(klog.V(7)): rt = newDebuggingRoundTripper(rt, debugJustURL, debugRequestHeaders, debugResponseStatus) - case bool(glog.V(6)): + case bool(klog.V(6)): rt = newDebuggingRoundTripper(rt, debugURLTiming) } return rt } -type requestCanceler interface { - CancelRequest(*http.Request) -} - type authProxyRoundTripper struct { username string groups []string @@ -135,11 +136,7 @@ func SetAuthProxyHeaders(req *http.Request, username string, groups []string, ex } func (rt *authProxyRoundTripper) CancelRequest(req *http.Request) { - if canceler, ok := rt.rt.(requestCanceler); ok { - canceler.CancelRequest(req) - } else { - glog.Errorf("CancelRequest not implemented by %T", rt.rt) - } + tryCancelRequest(rt.WrappedRoundTripper(), req) } func (rt *authProxyRoundTripper) WrappedRoundTripper() http.RoundTripper { return rt.rt } @@ -163,11 +160,7 @@ func (rt *userAgentRoundTripper) RoundTrip(req *http.Request) (*http.Response, e } func (rt *userAgentRoundTripper) CancelRequest(req *http.Request) { - if canceler, ok := rt.rt.(requestCanceler); ok { - canceler.CancelRequest(req) - } else { - glog.Errorf("CancelRequest not implemented by %T", rt.rt) - } + tryCancelRequest(rt.WrappedRoundTripper(), req) } func (rt *userAgentRoundTripper) WrappedRoundTripper() http.RoundTripper { return rt.rt } @@ -194,11 +187,7 @@ func (rt *basicAuthRoundTripper) RoundTrip(req *http.Request) (*http.Response, e } func (rt *basicAuthRoundTripper) CancelRequest(req *http.Request) { - if canceler, ok := rt.rt.(requestCanceler); ok { - canceler.CancelRequest(req) - } else { - glog.Errorf("CancelRequest not implemented by %T", rt.rt) - } + tryCancelRequest(rt.WrappedRoundTripper(), req) } func (rt *basicAuthRoundTripper) WrappedRoundTripper() http.RoundTripper { return rt.rt } @@ -254,24 +243,42 @@ func (rt *impersonatingRoundTripper) RoundTrip(req *http.Request) (*http.Respons } func (rt *impersonatingRoundTripper) CancelRequest(req *http.Request) { - if canceler, ok := rt.delegate.(requestCanceler); ok { - canceler.CancelRequest(req) - } else { - glog.Errorf("CancelRequest not implemented by %T", rt.delegate) - } + tryCancelRequest(rt.WrappedRoundTripper(), req) } func (rt *impersonatingRoundTripper) WrappedRoundTripper() http.RoundTripper { return rt.delegate } type bearerAuthRoundTripper struct { bearer string + source oauth2.TokenSource rt http.RoundTripper } // NewBearerAuthRoundTripper adds the provided bearer token to a request // unless the authorization header has already been set. func NewBearerAuthRoundTripper(bearer string, rt http.RoundTripper) http.RoundTripper { - return &bearerAuthRoundTripper{bearer, rt} + return &bearerAuthRoundTripper{bearer, nil, rt} +} + +// NewBearerAuthRoundTripper adds the provided bearer token to a request +// unless the authorization header has already been set. +// If tokenFile is non-empty, it is periodically read, +// and the last successfully read content is used as the bearer token. +// If tokenFile is non-empty and bearer is empty, the tokenFile is read +// immediately to populate the initial bearer token. +func NewBearerAuthWithRefreshRoundTripper(bearer string, tokenFile string, rt http.RoundTripper) (http.RoundTripper, error) { + if len(tokenFile) == 0 { + return &bearerAuthRoundTripper{bearer, nil, rt}, nil + } + source := NewCachedFileTokenSource(tokenFile) + if len(bearer) == 0 { + token, err := source.Token() + if err != nil { + return nil, err + } + bearer = token.AccessToken + } + return &bearerAuthRoundTripper{bearer, source, rt}, nil } func (rt *bearerAuthRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { @@ -280,16 +287,18 @@ func (rt *bearerAuthRoundTripper) RoundTrip(req *http.Request) (*http.Response, } req = utilnet.CloneRequest(req) - req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", rt.bearer)) + token := rt.bearer + if rt.source != nil { + if refreshedToken, err := rt.source.Token(); err == nil { + token = refreshedToken.AccessToken + } + } + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token)) return rt.rt.RoundTrip(req) } func (rt *bearerAuthRoundTripper) CancelRequest(req *http.Request) { - if canceler, ok := rt.rt.(requestCanceler); ok { - canceler.CancelRequest(req) - } else { - glog.Errorf("CancelRequest not implemented by %T", rt.rt) - } + tryCancelRequest(rt.WrappedRoundTripper(), req) } func (rt *bearerAuthRoundTripper) WrappedRoundTripper() http.RoundTripper { return rt.rt } @@ -369,28 +378,57 @@ func newDebuggingRoundTripper(rt http.RoundTripper, levels ...debugLevel) *debug } func (rt *debuggingRoundTripper) CancelRequest(req *http.Request) { - if canceler, ok := rt.delegatedRoundTripper.(requestCanceler); ok { - canceler.CancelRequest(req) + tryCancelRequest(rt.WrappedRoundTripper(), req) +} + +var knownAuthTypes = map[string]bool{ + "bearer": true, + "basic": true, + "negotiate": true, +} + +// maskValue masks credential content from authorization headers +// See https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Authorization +func maskValue(key string, value string) string { + if !strings.EqualFold(key, "Authorization") { + return value + } + if len(value) == 0 { + return "" + } + var authType string + if i := strings.Index(value, " "); i > 0 { + authType = value[0:i] + } else { + authType = value + } + if !knownAuthTypes[strings.ToLower(authType)] { + return "" + } + if len(value) > len(authType)+1 { + value = authType + " " } else { - glog.Errorf("CancelRequest not implemented by %T", rt.delegatedRoundTripper) + value = authType } + return value } func (rt *debuggingRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { reqInfo := newRequestInfo(req) if rt.levels[debugJustURL] { - glog.Infof("%s %s", reqInfo.RequestVerb, reqInfo.RequestURL) + klog.Infof("%s %s", reqInfo.RequestVerb, reqInfo.RequestURL) } if rt.levels[debugCurlCommand] { - glog.Infof("%s", reqInfo.toCurl()) + klog.Infof("%s", reqInfo.toCurl()) } if rt.levels[debugRequestHeaders] { - glog.Infof("Request Headers:") + klog.Infof("Request Headers:") for key, values := range reqInfo.RequestHeaders { for _, value := range values { - glog.Infof(" %s: %s", key, value) + value = maskValue(key, value) + klog.Infof(" %s: %s", key, value) } } } @@ -402,16 +440,16 @@ func (rt *debuggingRoundTripper) RoundTrip(req *http.Request) (*http.Response, e reqInfo.complete(response, err) if rt.levels[debugURLTiming] { - glog.Infof("%s %s %s in %d milliseconds", reqInfo.RequestVerb, reqInfo.RequestURL, reqInfo.ResponseStatus, reqInfo.Duration.Nanoseconds()/int64(time.Millisecond)) + klog.Infof("%s %s %s in %d milliseconds", reqInfo.RequestVerb, reqInfo.RequestURL, reqInfo.ResponseStatus, reqInfo.Duration.Nanoseconds()/int64(time.Millisecond)) } if rt.levels[debugResponseStatus] { - glog.Infof("Response Status: %s in %d milliseconds", reqInfo.ResponseStatus, reqInfo.Duration.Nanoseconds()/int64(time.Millisecond)) + klog.Infof("Response Status: %s in %d milliseconds", reqInfo.ResponseStatus, reqInfo.Duration.Nanoseconds()/int64(time.Millisecond)) } if rt.levels[debugResponseHeaders] { - glog.Infof("Response Headers:") + klog.Infof("Response Headers:") for key, values := range reqInfo.ResponseHeaders { for _, value := range values { - glog.Infof(" %s: %s", key, value) + klog.Infof(" %s: %s", key, value) } } } diff --git a/vendor/k8s.io/client-go/rest/token_source.go b/vendor/k8s.io/client-go/transport/token_source.go similarity index 70% rename from vendor/k8s.io/client-go/rest/token_source.go rename to vendor/k8s.io/client-go/transport/token_source.go index 296b2a04..bb32c3b4 100644 --- a/vendor/k8s.io/client-go/rest/token_source.go +++ b/vendor/k8s.io/client-go/transport/token_source.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package rest +package transport import ( "fmt" @@ -24,8 +24,9 @@ import ( "sync" "time" - "github.com/golang/glog" "golang.org/x/oauth2" + + "k8s.io/klog" ) // TokenSourceWrapTransport returns a WrapTransport that injects bearer tokens @@ -42,21 +43,32 @@ func TokenSourceWrapTransport(ts oauth2.TokenSource) func(http.RoundTripper) htt } } -func newCachedPathTokenSource(path string) oauth2.TokenSource { +// NewCachedFileTokenSource returns a oauth2.TokenSource reads a token from a +// file at a specified path and periodically reloads it. +func NewCachedFileTokenSource(path string) oauth2.TokenSource { return &cachingTokenSource{ now: time.Now, - leeway: 1 * time.Minute, + leeway: 10 * time.Second, base: &fileTokenSource{ path: path, - // This period was picked because it is half of the minimum validity - // duration for a token provisioned by they TokenRequest API. This is - // unsophisticated and should induce rotation at a frequency that should - // work with the token volume source. - period: 5 * time.Minute, + // This period was picked because it is half of the duration between when the kubelet + // refreshes a projected service account token and when the original token expires. + // Default token lifetime is 10 minutes, and the kubelet starts refreshing at 80% of lifetime. + // This should induce re-reading at a frequency that works with the token volume source. + period: time.Minute, }, } } +// NewCachedTokenSource returns a oauth2.TokenSource reads a token from a +// designed TokenSource. The ts would provide the source of token. +func NewCachedTokenSource(ts oauth2.TokenSource) oauth2.TokenSource { + return &cachingTokenSource{ + now: time.Now, + base: ts, + } +} + type tokenSourceTransport struct { base http.RoundTripper ort http.RoundTripper @@ -70,6 +82,14 @@ func (tst *tokenSourceTransport) RoundTrip(req *http.Request) (*http.Response, e return tst.ort.RoundTrip(req) } +func (tst *tokenSourceTransport) CancelRequest(req *http.Request) { + if req.Header.Get("Authorization") != "" { + tryCancelRequest(tst.base, req) + return + } + tryCancelRequest(tst.ort, req) +} + type fileTokenSource struct { path string period time.Duration @@ -129,7 +149,7 @@ func (ts *cachingTokenSource) Token() (*oauth2.Token, error) { if ts.tok == nil { return nil, err } - glog.Errorf("Unable to rotate token: %v", err) + klog.Errorf("Unable to rotate token: %v", err) return ts.tok, nil } diff --git a/vendor/k8s.io/client-go/transport/transport.go b/vendor/k8s.io/client-go/transport/transport.go index c19739fd..cd8de982 100644 --- a/vendor/k8s.io/client-go/transport/transport.go +++ b/vendor/k8s.io/client-go/transport/transport.go @@ -17,11 +17,15 @@ limitations under the License. package transport import ( + "context" "crypto/tls" "crypto/x509" "fmt" "io/ioutil" "net/http" + + utilnet "k8s.io/apimachinery/pkg/util/net" + "k8s.io/klog" ) // New returns an http.RoundTripper that will provide the authentication @@ -52,7 +56,7 @@ func New(config *Config) (http.RoundTripper, error) { // TLSConfigFor returns a tls.Config that will provide the transport level security defined // by the provided Config. Will return nil if no transport level security is requested. func TLSConfigFor(c *Config) (*tls.Config, error) { - if !(c.HasCA() || c.HasCertAuth() || c.HasCertCallback() || c.TLS.Insecure || len(c.TLS.ServerName) > 0) { + if !(c.HasCA() || c.HasCertAuth() || c.HasCertCallback() || c.TLS.Insecure || len(c.TLS.ServerName) > 0 || len(c.TLS.NextProtos) > 0) { return nil, nil } if c.HasCA() && c.TLS.Insecure { @@ -69,6 +73,7 @@ func TLSConfigFor(c *Config) (*tls.Config, error) { MinVersion: tls.VersionTLS12, InsecureSkipVerify: c.TLS.Insecure, ServerName: c.TLS.ServerName, + NextProtos: c.TLS.NextProtos, } if c.HasCA() { @@ -167,3 +172,74 @@ func rootCertPool(caData []byte) *x509.CertPool { certPool.AppendCertsFromPEM(caData) return certPool } + +// WrapperFunc wraps an http.RoundTripper when a new transport +// is created for a client, allowing per connection behavior +// to be injected. +type WrapperFunc func(rt http.RoundTripper) http.RoundTripper + +// Wrappers accepts any number of wrappers and returns a wrapper +// function that is the equivalent of calling each of them in order. Nil +// values are ignored, which makes this function convenient for incrementally +// wrapping a function. +func Wrappers(fns ...WrapperFunc) WrapperFunc { + if len(fns) == 0 { + return nil + } + // optimize the common case of wrapping a possibly nil transport wrapper + // with an additional wrapper + if len(fns) == 2 && fns[0] == nil { + return fns[1] + } + return func(rt http.RoundTripper) http.RoundTripper { + base := rt + for _, fn := range fns { + if fn != nil { + base = fn(base) + } + } + return base + } +} + +// ContextCanceller prevents new requests after the provided context is finished. +// err is returned when the context is closed, allowing the caller to provide a context +// appropriate error. +func ContextCanceller(ctx context.Context, err error) WrapperFunc { + return func(rt http.RoundTripper) http.RoundTripper { + return &contextCanceller{ + ctx: ctx, + rt: rt, + err: err, + } + } +} + +type contextCanceller struct { + ctx context.Context + rt http.RoundTripper + err error +} + +func (b *contextCanceller) RoundTrip(req *http.Request) (*http.Response, error) { + select { + case <-b.ctx.Done(): + return nil, b.err + default: + return b.rt.RoundTrip(req) + } +} + +func tryCancelRequest(rt http.RoundTripper, req *http.Request) { + type canceler interface { + CancelRequest(*http.Request) + } + switch rt := rt.(type) { + case canceler: + rt.CancelRequest(req) + case utilnet.RoundTripperWrapper: + tryCancelRequest(rt.WrappedRoundTripper(), req) + default: + klog.Warningf("Unable to cancel request for %T", rt) + } +} diff --git a/vendor/k8s.io/client-go/util/cert/OWNERS b/vendor/k8s.io/client-go/util/cert/OWNERS new file mode 100644 index 00000000..3cf03643 --- /dev/null +++ b/vendor/k8s.io/client-go/util/cert/OWNERS @@ -0,0 +1,9 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: +- sig-auth-certificates-approvers +reviewers: +- sig-auth-certificates-reviewers +labels: +- sig/auth + diff --git a/vendor/k8s.io/client-go/util/cert/cert.go b/vendor/k8s.io/client-go/util/cert/cert.go index fe2158b2..9fd097af 100644 --- a/vendor/k8s.io/client-go/util/cert/cert.go +++ b/vendor/k8s.io/client-go/util/cert/cert.go @@ -18,30 +18,25 @@ package cert import ( "bytes" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" + "crypto" cryptorand "crypto/rand" "crypto/rsa" "crypto/x509" "crypto/x509/pkix" "encoding/pem" - "errors" "fmt" "io/ioutil" - "math" "math/big" "net" "path" "strings" "time" -) -const ( - rsaKeySize = 2048 - duration365d = time.Hour * 24 * 365 + "k8s.io/client-go/util/keyutil" ) +const duration365d = time.Hour * 24 * 365 + // Config contains the basic fields required for creating a certificate type Config struct { CommonName string @@ -58,13 +53,8 @@ type AltNames struct { IPs []net.IP } -// NewPrivateKey creates an RSA private key -func NewPrivateKey() (*rsa.PrivateKey, error) { - return rsa.GenerateKey(cryptorand.Reader, rsaKeySize) -} - // NewSelfSignedCACert creates a CA certificate -func NewSelfSignedCACert(cfg Config, key *rsa.PrivateKey) (*x509.Certificate, error) { +func NewSelfSignedCACert(cfg Config, key crypto.Signer) (*x509.Certificate, error) { now := time.Now() tmpl := x509.Certificate{ SerialNumber: new(big.Int).SetInt64(0), @@ -76,7 +66,7 @@ func NewSelfSignedCACert(cfg Config, key *rsa.PrivateKey) (*x509.Certificate, er NotAfter: now.Add(duration365d * 10).UTC(), KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, BasicConstraintsValid: true, - IsCA: true, + IsCA: true, } certDERBytes, err := x509.CreateCertificate(cryptorand.Reader, &tmpl, &tmpl, key.Public(), key) @@ -86,58 +76,6 @@ func NewSelfSignedCACert(cfg Config, key *rsa.PrivateKey) (*x509.Certificate, er return x509.ParseCertificate(certDERBytes) } -// NewSignedCert creates a signed certificate using the given CA certificate and key -func NewSignedCert(cfg Config, key *rsa.PrivateKey, caCert *x509.Certificate, caKey *rsa.PrivateKey) (*x509.Certificate, error) { - serial, err := rand.Int(rand.Reader, new(big.Int).SetInt64(math.MaxInt64)) - if err != nil { - return nil, err - } - if len(cfg.CommonName) == 0 { - return nil, errors.New("must specify a CommonName") - } - if len(cfg.Usages) == 0 { - return nil, errors.New("must specify at least one ExtKeyUsage") - } - - certTmpl := x509.Certificate{ - Subject: pkix.Name{ - CommonName: cfg.CommonName, - Organization: cfg.Organization, - }, - DNSNames: cfg.AltNames.DNSNames, - IPAddresses: cfg.AltNames.IPs, - SerialNumber: serial, - NotBefore: caCert.NotBefore, - NotAfter: time.Now().Add(duration365d).UTC(), - KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, - ExtKeyUsage: cfg.Usages, - } - certDERBytes, err := x509.CreateCertificate(cryptorand.Reader, &certTmpl, caCert, key.Public(), caKey) - if err != nil { - return nil, err - } - return x509.ParseCertificate(certDERBytes) -} - -// MakeEllipticPrivateKeyPEM creates an ECDSA private key -func MakeEllipticPrivateKeyPEM() ([]byte, error) { - privateKey, err := ecdsa.GenerateKey(elliptic.P256(), cryptorand.Reader) - if err != nil { - return nil, err - } - - derBytes, err := x509.MarshalECPrivateKey(privateKey) - if err != nil { - return nil, err - } - - privateKeyPemBlock := &pem.Block{ - Type: ECPrivateKeyBlockType, - Bytes: derBytes, - } - return pem.EncodeToMemory(privateKeyPemBlock), nil -} - // GenerateSelfSignedCertKey creates a self-signed certificate and key for the given host. // Host may be an IP or a DNS name // You may also specify additional subject alt names (either ip or dns names) for the certificate. @@ -187,7 +125,7 @@ func GenerateSelfSignedCertKeyWithFixtures(host string, alternateIPs []net.IP, a KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, BasicConstraintsValid: true, - IsCA: true, + IsCA: true, } caDERBytes, err := x509.CreateCertificate(cryptorand.Reader, &caTemplate, &caTemplate, &caKey.PublicKey, caKey) @@ -243,7 +181,7 @@ func GenerateSelfSignedCertKeyWithFixtures(host string, alternateIPs []net.IP, a // Generate key keyBuffer := bytes.Buffer{} - if err := pem.Encode(&keyBuffer, &pem.Block{Type: RSAPrivateKeyBlockType, Bytes: x509.MarshalPKCS1PrivateKey(priv)}); err != nil { + if err := pem.Encode(&keyBuffer, &pem.Block{Type: keyutil.RSAPrivateKeyBlockType, Bytes: x509.MarshalPKCS1PrivateKey(priv)}); err != nil { return nil, nil, err } @@ -259,34 +197,6 @@ func GenerateSelfSignedCertKeyWithFixtures(host string, alternateIPs []net.IP, a return certBuffer.Bytes(), keyBuffer.Bytes(), nil } -// FormatBytesCert receives byte array certificate and formats in human-readable format -func FormatBytesCert(cert []byte) (string, error) { - block, _ := pem.Decode(cert) - c, err := x509.ParseCertificate(block.Bytes) - if err != nil { - return "", fmt.Errorf("failed to parse certificate [%v]", err) - } - return FormatCert(c), nil -} - -// FormatCert receives certificate and formats in human-readable format -func FormatCert(c *x509.Certificate) string { - var ips []string - for _, ip := range c.IPAddresses { - ips = append(ips, ip.String()) - } - altNames := append(ips, c.DNSNames...) - res := fmt.Sprintf( - "Issuer: CN=%s | Subject: CN=%s | CA: %t\n", - c.Issuer.CommonName, c.Subject.CommonName, c.IsCA, - ) - res += fmt.Sprintf("Not before: %s Not After: %s", c.NotBefore, c.NotAfter) - if len(altNames) > 0 { - res += fmt.Sprintf("\nAlternate Names: %v", altNames) - } - return res -} - func ipsToStrings(ips []net.IP) []string { ss := make([]string, 0, len(ips)) for _, ip := range ips { diff --git a/vendor/k8s.io/client-go/util/cert/io.go b/vendor/k8s.io/client-go/util/cert/io.go index a57bf09d..35fde68a 100644 --- a/vendor/k8s.io/client-go/util/cert/io.go +++ b/vendor/k8s.io/client-go/util/cert/io.go @@ -17,11 +17,7 @@ limitations under the License. package cert import ( - "crypto" - "crypto/ecdsa" - "crypto/rsa" "crypto/x509" - "encoding/pem" "fmt" "io/ioutil" "os" @@ -73,64 +69,25 @@ func WriteCert(certPath string, data []byte) error { return ioutil.WriteFile(certPath, data, os.FileMode(0644)) } -// WriteKey writes the pem-encoded key data to keyPath. -// The key file will be created with file mode 0600. -// If the key file already exists, it will be overwritten. -// The parent directory of the keyPath will be created as needed with file mode 0755. -func WriteKey(keyPath string, data []byte) error { - if err := os.MkdirAll(filepath.Dir(keyPath), os.FileMode(0755)); err != nil { - return err - } - return ioutil.WriteFile(keyPath, data, os.FileMode(0600)) -} - -// LoadOrGenerateKeyFile looks for a key in the file at the given path. If it -// can't find one, it will generate a new key and store it there. -func LoadOrGenerateKeyFile(keyPath string) (data []byte, wasGenerated bool, err error) { - loadedData, err := ioutil.ReadFile(keyPath) - // Call verifyKeyData to ensure the file wasn't empty/corrupt. - if err == nil && verifyKeyData(loadedData) { - return loadedData, false, err - } - if !os.IsNotExist(err) { - return nil, false, fmt.Errorf("error loading key from %s: %v", keyPath, err) - } - - generatedData, err := MakeEllipticPrivateKeyPEM() +// NewPool returns an x509.CertPool containing the certificates in the given PEM-encoded file. +// Returns an error if the file could not be read, a certificate could not be parsed, or if the file does not contain any certificates +func NewPool(filename string) (*x509.CertPool, error) { + pemBlock, err := ioutil.ReadFile(filename) if err != nil { - return nil, false, fmt.Errorf("error generating key: %v", err) - } - if err := WriteKey(keyPath, generatedData); err != nil { - return nil, false, fmt.Errorf("error writing key to %s: %v", keyPath, err) + return nil, err } - return generatedData, true, nil -} -// MarshalPrivateKeyToPEM converts a known private key type of RSA or ECDSA to -// a PEM encoded block or returns an error. -func MarshalPrivateKeyToPEM(privateKey crypto.PrivateKey) ([]byte, error) { - switch t := privateKey.(type) { - case *ecdsa.PrivateKey: - derBytes, err := x509.MarshalECPrivateKey(t) - if err != nil { - return nil, err - } - privateKeyPemBlock := &pem.Block{ - Type: ECPrivateKeyBlockType, - Bytes: derBytes, - } - return pem.EncodeToMemory(privateKeyPemBlock), nil - case *rsa.PrivateKey: - return EncodePrivateKeyPEM(t), nil - default: - return nil, fmt.Errorf("private key is not a recognized type: %T", privateKey) + pool, err := NewPoolFromBytes(pemBlock) + if err != nil { + return nil, fmt.Errorf("error creating pool from %s: %s", filename, err) } + return pool, nil } -// NewPool returns an x509.CertPool containing the certificates in the given PEM-encoded file. +// NewPoolFromBytes returns an x509.CertPool containing the certificates in the given PEM-encoded bytes. // Returns an error if the file could not be read, a certificate could not be parsed, or if the file does not contain any certificates -func NewPool(filename string) (*x509.CertPool, error) { - certs, err := CertsFromFile(filename) +func NewPoolFromBytes(pemBlock []byte) (*x509.CertPool, error) { + certs, err := ParseCertsPEM(pemBlock) if err != nil { return nil, err } @@ -154,40 +111,3 @@ func CertsFromFile(file string) ([]*x509.Certificate, error) { } return certs, nil } - -// PrivateKeyFromFile returns the private key in rsa.PrivateKey or ecdsa.PrivateKey format from a given PEM-encoded file. -// Returns an error if the file could not be read or if the private key could not be parsed. -func PrivateKeyFromFile(file string) (interface{}, error) { - data, err := ioutil.ReadFile(file) - if err != nil { - return nil, err - } - key, err := ParsePrivateKeyPEM(data) - if err != nil { - return nil, fmt.Errorf("error reading private key file %s: %v", file, err) - } - return key, nil -} - -// PublicKeysFromFile returns the public keys in rsa.PublicKey or ecdsa.PublicKey format from a given PEM-encoded file. -// Reads public keys from both public and private key files. -func PublicKeysFromFile(file string) ([]interface{}, error) { - data, err := ioutil.ReadFile(file) - if err != nil { - return nil, err - } - keys, err := ParsePublicKeysPEM(data) - if err != nil { - return nil, fmt.Errorf("error reading public key file %s: %v", file, err) - } - return keys, nil -} - -// verifyKeyData returns true if the provided data appears to be a valid private key. -func verifyKeyData(data []byte) bool { - if len(data) == 0 { - return false - } - _, err := ParsePrivateKeyPEM(data) - return err == nil -} diff --git a/vendor/k8s.io/client-go/util/cert/pem.go b/vendor/k8s.io/client-go/util/cert/pem.go index b99e3665..c7751231 100644 --- a/vendor/k8s.io/client-go/util/cert/pem.go +++ b/vendor/k8s.io/client-go/util/cert/pem.go @@ -17,136 +17,19 @@ limitations under the License. package cert import ( - "crypto/ecdsa" - "crypto/rsa" + "bytes" "crypto/x509" "encoding/pem" "errors" - "fmt" ) const ( - // ECPrivateKeyBlockType is a possible value for pem.Block.Type. - ECPrivateKeyBlockType = "EC PRIVATE KEY" - // RSAPrivateKeyBlockType is a possible value for pem.Block.Type. - RSAPrivateKeyBlockType = "RSA PRIVATE KEY" - // PrivateKeyBlockType is a possible value for pem.Block.Type. - PrivateKeyBlockType = "PRIVATE KEY" - // PublicKeyBlockType is a possible value for pem.Block.Type. - PublicKeyBlockType = "PUBLIC KEY" // CertificateBlockType is a possible value for pem.Block.Type. CertificateBlockType = "CERTIFICATE" // CertificateRequestBlockType is a possible value for pem.Block.Type. CertificateRequestBlockType = "CERTIFICATE REQUEST" ) -// EncodePublicKeyPEM returns PEM-encoded public data -func EncodePublicKeyPEM(key *rsa.PublicKey) ([]byte, error) { - der, err := x509.MarshalPKIXPublicKey(key) - if err != nil { - return []byte{}, err - } - block := pem.Block{ - Type: PublicKeyBlockType, - Bytes: der, - } - return pem.EncodeToMemory(&block), nil -} - -// EncodePrivateKeyPEM returns PEM-encoded private key data -func EncodePrivateKeyPEM(key *rsa.PrivateKey) []byte { - block := pem.Block{ - Type: RSAPrivateKeyBlockType, - Bytes: x509.MarshalPKCS1PrivateKey(key), - } - return pem.EncodeToMemory(&block) -} - -// EncodeCertPEM returns PEM-endcoded certificate data -func EncodeCertPEM(cert *x509.Certificate) []byte { - block := pem.Block{ - Type: CertificateBlockType, - Bytes: cert.Raw, - } - return pem.EncodeToMemory(&block) -} - -// ParsePrivateKeyPEM returns a private key parsed from a PEM block in the supplied data. -// Recognizes PEM blocks for "EC PRIVATE KEY", "RSA PRIVATE KEY", or "PRIVATE KEY" -func ParsePrivateKeyPEM(keyData []byte) (interface{}, error) { - var privateKeyPemBlock *pem.Block - for { - privateKeyPemBlock, keyData = pem.Decode(keyData) - if privateKeyPemBlock == nil { - break - } - - switch privateKeyPemBlock.Type { - case ECPrivateKeyBlockType: - // ECDSA Private Key in ASN.1 format - if key, err := x509.ParseECPrivateKey(privateKeyPemBlock.Bytes); err == nil { - return key, nil - } - case RSAPrivateKeyBlockType: - // RSA Private Key in PKCS#1 format - if key, err := x509.ParsePKCS1PrivateKey(privateKeyPemBlock.Bytes); err == nil { - return key, nil - } - case PrivateKeyBlockType: - // RSA or ECDSA Private Key in unencrypted PKCS#8 format - if key, err := x509.ParsePKCS8PrivateKey(privateKeyPemBlock.Bytes); err == nil { - return key, nil - } - } - - // tolerate non-key PEM blocks for compatibility with things like "EC PARAMETERS" blocks - // originally, only the first PEM block was parsed and expected to be a key block - } - - // we read all the PEM blocks and didn't recognize one - return nil, fmt.Errorf("data does not contain a valid RSA or ECDSA private key") -} - -// ParsePublicKeysPEM is a helper function for reading an array of rsa.PublicKey or ecdsa.PublicKey from a PEM-encoded byte array. -// Reads public keys from both public and private key files. -func ParsePublicKeysPEM(keyData []byte) ([]interface{}, error) { - var block *pem.Block - keys := []interface{}{} - for { - // read the next block - block, keyData = pem.Decode(keyData) - if block == nil { - break - } - - // test block against parsing functions - if privateKey, err := parseRSAPrivateKey(block.Bytes); err == nil { - keys = append(keys, &privateKey.PublicKey) - continue - } - if publicKey, err := parseRSAPublicKey(block.Bytes); err == nil { - keys = append(keys, publicKey) - continue - } - if privateKey, err := parseECPrivateKey(block.Bytes); err == nil { - keys = append(keys, &privateKey.PublicKey) - continue - } - if publicKey, err := parseECPublicKey(block.Bytes); err == nil { - keys = append(keys, publicKey) - continue - } - - // tolerate non-key PEM blocks for backwards compatibility - // originally, only the first PEM block was parsed and expected to be a key block - } - - if len(keys) == 0 { - return nil, fmt.Errorf("data does not contain any valid RSA or ECDSA public keys") - } - return keys, nil -} - // ParseCertsPEM returns the x509.Certificates contained in the given PEM-encoded byte array // Returns an error if a certificate could not be parsed, or if the data does not contain any certificates func ParseCertsPEM(pemCerts []byte) ([]*x509.Certificate, error) { @@ -178,92 +61,13 @@ func ParseCertsPEM(pemCerts []byte) ([]*x509.Certificate, error) { return certs, nil } -// parseRSAPublicKey parses a single RSA public key from the provided data -func parseRSAPublicKey(data []byte) (*rsa.PublicKey, error) { - var err error - - // Parse the key - var parsedKey interface{} - if parsedKey, err = x509.ParsePKIXPublicKey(data); err != nil { - if cert, err := x509.ParseCertificate(data); err == nil { - parsedKey = cert.PublicKey - } else { - return nil, err - } - } - - // Test if parsed key is an RSA Public Key - var pubKey *rsa.PublicKey - var ok bool - if pubKey, ok = parsedKey.(*rsa.PublicKey); !ok { - return nil, fmt.Errorf("data doesn't contain valid RSA Public Key") - } - - return pubKey, nil -} - -// parseRSAPrivateKey parses a single RSA private key from the provided data -func parseRSAPrivateKey(data []byte) (*rsa.PrivateKey, error) { - var err error - - // Parse the key - var parsedKey interface{} - if parsedKey, err = x509.ParsePKCS1PrivateKey(data); err != nil { - if parsedKey, err = x509.ParsePKCS8PrivateKey(data); err != nil { - return nil, err - } - } - - // Test if parsed key is an RSA Private Key - var privKey *rsa.PrivateKey - var ok bool - if privKey, ok = parsedKey.(*rsa.PrivateKey); !ok { - return nil, fmt.Errorf("data doesn't contain valid RSA Private Key") - } - - return privKey, nil -} - -// parseECPublicKey parses a single ECDSA public key from the provided data -func parseECPublicKey(data []byte) (*ecdsa.PublicKey, error) { - var err error - - // Parse the key - var parsedKey interface{} - if parsedKey, err = x509.ParsePKIXPublicKey(data); err != nil { - if cert, err := x509.ParseCertificate(data); err == nil { - parsedKey = cert.PublicKey - } else { - return nil, err +// EncodeCertificates returns the PEM-encoded byte array that represents by the specified certs. +func EncodeCertificates(certs ...*x509.Certificate) ([]byte, error) { + b := bytes.Buffer{} + for _, cert := range certs { + if err := pem.Encode(&b, &pem.Block{Type: CertificateBlockType, Bytes: cert.Raw}); err != nil { + return []byte{}, err } } - - // Test if parsed key is an ECDSA Public Key - var pubKey *ecdsa.PublicKey - var ok bool - if pubKey, ok = parsedKey.(*ecdsa.PublicKey); !ok { - return nil, fmt.Errorf("data doesn't contain valid ECDSA Public Key") - } - - return pubKey, nil -} - -// parseECPrivateKey parses a single ECDSA private key from the provided data -func parseECPrivateKey(data []byte) (*ecdsa.PrivateKey, error) { - var err error - - // Parse the key - var parsedKey interface{} - if parsedKey, err = x509.ParseECPrivateKey(data); err != nil { - return nil, err - } - - // Test if parsed key is an ECDSA Private Key - var privKey *ecdsa.PrivateKey - var ok bool - if privKey, ok = parsedKey.(*ecdsa.PrivateKey); !ok { - return nil, fmt.Errorf("data doesn't contain valid ECDSA Private Key") - } - - return privKey, nil + return b.Bytes(), nil } diff --git a/vendor/k8s.io/client-go/util/cert/server_inspection.go b/vendor/k8s.io/client-go/util/cert/server_inspection.go new file mode 100644 index 00000000..f1ef292d --- /dev/null +++ b/vendor/k8s.io/client-go/util/cert/server_inspection.go @@ -0,0 +1,102 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cert + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "net/url" + "strings" +) + +// GetClientCANames gets the CA names for client certs that a server accepts. This is useful when inspecting the +// state of particular servers. apiHost is "host:port" +func GetClientCANames(apiHost string) ([]string, error) { + // when we run this the second time, we know which one we are expecting + acceptableCAs := []string{} + tlsConfig := &tls.Config{ + InsecureSkipVerify: true, // this is insecure to always get to the GetClientCertificate + GetClientCertificate: func(hello *tls.CertificateRequestInfo) (*tls.Certificate, error) { + acceptableCAs = []string{} + for _, curr := range hello.AcceptableCAs { + acceptableCAs = append(acceptableCAs, string(curr)) + } + return &tls.Certificate{}, nil + }, + } + + conn, err := tls.Dial("tcp", apiHost, tlsConfig) + if err != nil { + return nil, err + } + if err := conn.Close(); err != nil { + return nil, err + } + + return acceptableCAs, nil +} + +// GetClientCANamesForURL is GetClientCANames against a URL string like we use in kubeconfigs +func GetClientCANamesForURL(kubeConfigURL string) ([]string, error) { + apiserverURL, err := url.Parse(kubeConfigURL) + if err != nil { + return nil, err + } + return GetClientCANames(apiserverURL.Host) +} + +// GetServingCertificates returns the x509 certs used by a server as certificates and pem encoded bytes. +// The serverName is optional for specifying a different name to get SNI certificates. apiHost is "host:port" +func GetServingCertificates(apiHost, serverName string) ([]*x509.Certificate, [][]byte, error) { + tlsConfig := &tls.Config{ + InsecureSkipVerify: true, // this is insecure so that we always get connected + } + // if a name is specified for SNI, set it. + if len(serverName) > 0 { + tlsConfig.ServerName = serverName + } + + conn, err := tls.Dial("tcp", apiHost, tlsConfig) + if err != nil { + return nil, nil, err + } + if err = conn.Close(); err != nil { + return nil, nil, fmt.Errorf("failed to close connection : %v", err) + } + + peerCerts := conn.ConnectionState().PeerCertificates + peerCertBytes := [][]byte{} + for _, a := range peerCerts { + actualCert, err := EncodeCertificates(a) + if err != nil { + return nil, nil, err + } + peerCertBytes = append(peerCertBytes, []byte(strings.TrimSpace(string(actualCert)))) + } + + return peerCerts, peerCertBytes, err +} + +// GetServingCertificatesForURL is GetServingCertificates against a URL string like we use in kubeconfigs +func GetServingCertificatesForURL(kubeConfigURL, serverName string) ([]*x509.Certificate, [][]byte, error) { + apiserverURL, err := url.Parse(kubeConfigURL) + if err != nil { + return nil, nil, err + } + return GetServingCertificates(apiserverURL.Host, serverName) +} diff --git a/vendor/k8s.io/client-go/util/flowcontrol/backoff.go b/vendor/k8s.io/client-go/util/flowcontrol/backoff.go index 71d442a6..c48ba03e 100644 --- a/vendor/k8s.io/client-go/util/flowcontrol/backoff.go +++ b/vendor/k8s.io/client-go/util/flowcontrol/backoff.go @@ -21,7 +21,7 @@ import ( "time" "k8s.io/apimachinery/pkg/util/clock" - "k8s.io/client-go/util/integer" + "k8s.io/utils/integer" ) type backoffEntry struct { @@ -30,7 +30,7 @@ type backoffEntry struct { } type Backoff struct { - sync.Mutex + sync.RWMutex Clock clock.Clock defaultDuration time.Duration maxDuration time.Duration @@ -57,8 +57,8 @@ func NewBackOff(initial, max time.Duration) *Backoff { // Get the current backoff Duration func (p *Backoff) Get(id string) time.Duration { - p.Lock() - defer p.Unlock() + p.RLock() + defer p.RUnlock() var delay time.Duration entry, ok := p.perItemBackoff[id] if ok { @@ -90,8 +90,8 @@ func (p *Backoff) Reset(id string) { // Returns True if the elapsed time since eventTime is smaller than the current backoff window func (p *Backoff) IsInBackOffSince(id string, eventTime time.Time) bool { - p.Lock() - defer p.Unlock() + p.RLock() + defer p.RUnlock() entry, ok := p.perItemBackoff[id] if !ok { return false @@ -99,13 +99,13 @@ func (p *Backoff) IsInBackOffSince(id string, eventTime time.Time) bool { if hasExpired(eventTime, entry.lastUpdate, p.maxDuration) { return false } - return p.Clock.Now().Sub(eventTime) < entry.backoff + return p.Clock.Since(eventTime) < entry.backoff } // Returns True if time since lastupdate is less than the current backoff window. func (p *Backoff) IsInBackOffSinceUpdate(id string, eventTime time.Time) bool { - p.Lock() - defer p.Unlock() + p.RLock() + defer p.RUnlock() entry, ok := p.perItemBackoff[id] if !ok { return false diff --git a/vendor/k8s.io/client-go/util/flowcontrol/throttle.go b/vendor/k8s.io/client-go/util/flowcontrol/throttle.go index e671c044..ffd912c5 100644 --- a/vendor/k8s.io/client-go/util/flowcontrol/throttle.go +++ b/vendor/k8s.io/client-go/util/flowcontrol/throttle.go @@ -17,6 +17,8 @@ limitations under the License. package flowcontrol import ( + "context" + "errors" "sync" "time" @@ -33,6 +35,8 @@ type RateLimiter interface { Stop() // QPS returns QPS of this rate limiter QPS() float32 + // Wait returns nil if a token is taken before the Context is done. + Wait(ctx context.Context) error } type tokenBucketRateLimiter struct { @@ -98,6 +102,10 @@ func (t *tokenBucketRateLimiter) QPS() float32 { return t.qps } +func (t *tokenBucketRateLimiter) Wait(ctx context.Context) error { + return t.limiter.Wait(ctx) +} + type fakeAlwaysRateLimiter struct{} func NewFakeAlwaysRateLimiter() RateLimiter { @@ -116,6 +124,10 @@ func (t *fakeAlwaysRateLimiter) QPS() float32 { return 1 } +func (t *fakeAlwaysRateLimiter) Wait(ctx context.Context) error { + return nil +} + type fakeNeverRateLimiter struct { wg sync.WaitGroup } @@ -141,3 +153,7 @@ func (t *fakeNeverRateLimiter) Accept() { func (t *fakeNeverRateLimiter) QPS() float32 { return 1 } + +func (t *fakeNeverRateLimiter) Wait(ctx context.Context) error { + return errors.New("can not be accept") +} diff --git a/vendor/k8s.io/client-go/util/keyutil/OWNERS b/vendor/k8s.io/client-go/util/keyutil/OWNERS new file mode 100644 index 00000000..470b7a1c --- /dev/null +++ b/vendor/k8s.io/client-go/util/keyutil/OWNERS @@ -0,0 +1,7 @@ +approvers: +- sig-auth-certificates-approvers +reviewers: +- sig-auth-certificates-reviewers +labels: +- sig/auth + diff --git a/vendor/k8s.io/client-go/util/keyutil/key.go b/vendor/k8s.io/client-go/util/keyutil/key.go new file mode 100644 index 00000000..83c2c625 --- /dev/null +++ b/vendor/k8s.io/client-go/util/keyutil/key.go @@ -0,0 +1,323 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package keyutil contains utilities for managing public/private key pairs. +package keyutil + +import ( + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + cryptorand "crypto/rand" + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "fmt" + "io/ioutil" + "os" + "path/filepath" +) + +const ( + // ECPrivateKeyBlockType is a possible value for pem.Block.Type. + ECPrivateKeyBlockType = "EC PRIVATE KEY" + // RSAPrivateKeyBlockType is a possible value for pem.Block.Type. + RSAPrivateKeyBlockType = "RSA PRIVATE KEY" + // PrivateKeyBlockType is a possible value for pem.Block.Type. + PrivateKeyBlockType = "PRIVATE KEY" + // PublicKeyBlockType is a possible value for pem.Block.Type. + PublicKeyBlockType = "PUBLIC KEY" +) + +// MakeEllipticPrivateKeyPEM creates an ECDSA private key +func MakeEllipticPrivateKeyPEM() ([]byte, error) { + privateKey, err := ecdsa.GenerateKey(elliptic.P256(), cryptorand.Reader) + if err != nil { + return nil, err + } + + derBytes, err := x509.MarshalECPrivateKey(privateKey) + if err != nil { + return nil, err + } + + privateKeyPemBlock := &pem.Block{ + Type: ECPrivateKeyBlockType, + Bytes: derBytes, + } + return pem.EncodeToMemory(privateKeyPemBlock), nil +} + +// WriteKey writes the pem-encoded key data to keyPath. +// The key file will be created with file mode 0600. +// If the key file already exists, it will be overwritten. +// The parent directory of the keyPath will be created as needed with file mode 0755. +func WriteKey(keyPath string, data []byte) error { + if err := os.MkdirAll(filepath.Dir(keyPath), os.FileMode(0755)); err != nil { + return err + } + return ioutil.WriteFile(keyPath, data, os.FileMode(0600)) +} + +// LoadOrGenerateKeyFile looks for a key in the file at the given path. If it +// can't find one, it will generate a new key and store it there. +func LoadOrGenerateKeyFile(keyPath string) (data []byte, wasGenerated bool, err error) { + loadedData, err := ioutil.ReadFile(keyPath) + // Call verifyKeyData to ensure the file wasn't empty/corrupt. + if err == nil && verifyKeyData(loadedData) { + return loadedData, false, err + } + if !os.IsNotExist(err) { + return nil, false, fmt.Errorf("error loading key from %s: %v", keyPath, err) + } + + generatedData, err := MakeEllipticPrivateKeyPEM() + if err != nil { + return nil, false, fmt.Errorf("error generating key: %v", err) + } + if err := WriteKey(keyPath, generatedData); err != nil { + return nil, false, fmt.Errorf("error writing key to %s: %v", keyPath, err) + } + return generatedData, true, nil +} + +// MarshalPrivateKeyToPEM converts a known private key type of RSA or ECDSA to +// a PEM encoded block or returns an error. +func MarshalPrivateKeyToPEM(privateKey crypto.PrivateKey) ([]byte, error) { + switch t := privateKey.(type) { + case *ecdsa.PrivateKey: + derBytes, err := x509.MarshalECPrivateKey(t) + if err != nil { + return nil, err + } + block := &pem.Block{ + Type: ECPrivateKeyBlockType, + Bytes: derBytes, + } + return pem.EncodeToMemory(block), nil + case *rsa.PrivateKey: + block := &pem.Block{ + Type: RSAPrivateKeyBlockType, + Bytes: x509.MarshalPKCS1PrivateKey(t), + } + return pem.EncodeToMemory(block), nil + default: + return nil, fmt.Errorf("private key is not a recognized type: %T", privateKey) + } +} + +// PrivateKeyFromFile returns the private key in rsa.PrivateKey or ecdsa.PrivateKey format from a given PEM-encoded file. +// Returns an error if the file could not be read or if the private key could not be parsed. +func PrivateKeyFromFile(file string) (interface{}, error) { + data, err := ioutil.ReadFile(file) + if err != nil { + return nil, err + } + key, err := ParsePrivateKeyPEM(data) + if err != nil { + return nil, fmt.Errorf("error reading private key file %s: %v", file, err) + } + return key, nil +} + +// PublicKeysFromFile returns the public keys in rsa.PublicKey or ecdsa.PublicKey format from a given PEM-encoded file. +// Reads public keys from both public and private key files. +func PublicKeysFromFile(file string) ([]interface{}, error) { + data, err := ioutil.ReadFile(file) + if err != nil { + return nil, err + } + keys, err := ParsePublicKeysPEM(data) + if err != nil { + return nil, fmt.Errorf("error reading public key file %s: %v", file, err) + } + return keys, nil +} + +// verifyKeyData returns true if the provided data appears to be a valid private key. +func verifyKeyData(data []byte) bool { + if len(data) == 0 { + return false + } + _, err := ParsePrivateKeyPEM(data) + return err == nil +} + +// ParsePrivateKeyPEM returns a private key parsed from a PEM block in the supplied data. +// Recognizes PEM blocks for "EC PRIVATE KEY", "RSA PRIVATE KEY", or "PRIVATE KEY" +func ParsePrivateKeyPEM(keyData []byte) (interface{}, error) { + var privateKeyPemBlock *pem.Block + for { + privateKeyPemBlock, keyData = pem.Decode(keyData) + if privateKeyPemBlock == nil { + break + } + + switch privateKeyPemBlock.Type { + case ECPrivateKeyBlockType: + // ECDSA Private Key in ASN.1 format + if key, err := x509.ParseECPrivateKey(privateKeyPemBlock.Bytes); err == nil { + return key, nil + } + case RSAPrivateKeyBlockType: + // RSA Private Key in PKCS#1 format + if key, err := x509.ParsePKCS1PrivateKey(privateKeyPemBlock.Bytes); err == nil { + return key, nil + } + case PrivateKeyBlockType: + // RSA or ECDSA Private Key in unencrypted PKCS#8 format + if key, err := x509.ParsePKCS8PrivateKey(privateKeyPemBlock.Bytes); err == nil { + return key, nil + } + } + + // tolerate non-key PEM blocks for compatibility with things like "EC PARAMETERS" blocks + // originally, only the first PEM block was parsed and expected to be a key block + } + + // we read all the PEM blocks and didn't recognize one + return nil, fmt.Errorf("data does not contain a valid RSA or ECDSA private key") +} + +// ParsePublicKeysPEM is a helper function for reading an array of rsa.PublicKey or ecdsa.PublicKey from a PEM-encoded byte array. +// Reads public keys from both public and private key files. +func ParsePublicKeysPEM(keyData []byte) ([]interface{}, error) { + var block *pem.Block + keys := []interface{}{} + for { + // read the next block + block, keyData = pem.Decode(keyData) + if block == nil { + break + } + + // test block against parsing functions + if privateKey, err := parseRSAPrivateKey(block.Bytes); err == nil { + keys = append(keys, &privateKey.PublicKey) + continue + } + if publicKey, err := parseRSAPublicKey(block.Bytes); err == nil { + keys = append(keys, publicKey) + continue + } + if privateKey, err := parseECPrivateKey(block.Bytes); err == nil { + keys = append(keys, &privateKey.PublicKey) + continue + } + if publicKey, err := parseECPublicKey(block.Bytes); err == nil { + keys = append(keys, publicKey) + continue + } + + // tolerate non-key PEM blocks for backwards compatibility + // originally, only the first PEM block was parsed and expected to be a key block + } + + if len(keys) == 0 { + return nil, fmt.Errorf("data does not contain any valid RSA or ECDSA public keys") + } + return keys, nil +} + +// parseRSAPublicKey parses a single RSA public key from the provided data +func parseRSAPublicKey(data []byte) (*rsa.PublicKey, error) { + var err error + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParsePKIXPublicKey(data); err != nil { + if cert, err := x509.ParseCertificate(data); err == nil { + parsedKey = cert.PublicKey + } else { + return nil, err + } + } + + // Test if parsed key is an RSA Public Key + var pubKey *rsa.PublicKey + var ok bool + if pubKey, ok = parsedKey.(*rsa.PublicKey); !ok { + return nil, fmt.Errorf("data doesn't contain valid RSA Public Key") + } + + return pubKey, nil +} + +// parseRSAPrivateKey parses a single RSA private key from the provided data +func parseRSAPrivateKey(data []byte) (*rsa.PrivateKey, error) { + var err error + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParsePKCS1PrivateKey(data); err != nil { + if parsedKey, err = x509.ParsePKCS8PrivateKey(data); err != nil { + return nil, err + } + } + + // Test if parsed key is an RSA Private Key + var privKey *rsa.PrivateKey + var ok bool + if privKey, ok = parsedKey.(*rsa.PrivateKey); !ok { + return nil, fmt.Errorf("data doesn't contain valid RSA Private Key") + } + + return privKey, nil +} + +// parseECPublicKey parses a single ECDSA public key from the provided data +func parseECPublicKey(data []byte) (*ecdsa.PublicKey, error) { + var err error + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParsePKIXPublicKey(data); err != nil { + if cert, err := x509.ParseCertificate(data); err == nil { + parsedKey = cert.PublicKey + } else { + return nil, err + } + } + + // Test if parsed key is an ECDSA Public Key + var pubKey *ecdsa.PublicKey + var ok bool + if pubKey, ok = parsedKey.(*ecdsa.PublicKey); !ok { + return nil, fmt.Errorf("data doesn't contain valid ECDSA Public Key") + } + + return pubKey, nil +} + +// parseECPrivateKey parses a single ECDSA private key from the provided data +func parseECPrivateKey(data []byte) (*ecdsa.PrivateKey, error) { + var err error + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParseECPrivateKey(data); err != nil { + return nil, err + } + + // Test if parsed key is an ECDSA Private Key + var privKey *ecdsa.PrivateKey + var ok bool + if privKey, ok = parsedKey.(*ecdsa.PrivateKey); !ok { + return nil, fmt.Errorf("data doesn't contain valid ECDSA Private Key") + } + + return privKey, nil +} diff --git a/vendor/k8s.io/client-go/util/retry/OWNERS b/vendor/k8s.io/client-go/util/retry/OWNERS index a4c1c2d4..dec3e88d 100644 --- a/vendor/k8s.io/client-go/util/retry/OWNERS +++ b/vendor/k8s.io/client-go/util/retry/OWNERS @@ -1,2 +1,4 @@ +# See the OWNERS docs at https://go.k8s.io/owners + reviewers: - caesarxuchao diff --git a/vendor/k8s.io/client-go/util/retry/util.go b/vendor/k8s.io/client-go/util/retry/util.go index 3ac0840a..15e2722f 100644 --- a/vendor/k8s.io/client-go/util/retry/util.go +++ b/vendor/k8s.io/client-go/util/retry/util.go @@ -42,38 +42,64 @@ var DefaultBackoff = wait.Backoff{ Jitter: 0.1, } -// RetryConflict executes the provided function repeatedly, retrying if the server returns a conflicting -// write. Callers should preserve previous executions if they wish to retry changes. It performs an -// exponential backoff. -// -// var pod *api.Pod -// err := RetryOnConflict(DefaultBackoff, func() (err error) { -// pod, err = c.Pods("mynamespace").UpdateStatus(podStatus) -// return -// }) -// if err != nil { -// // may be conflict if max retries were hit -// return err -// } -// ... -// -// TODO: Make Backoff an interface? -func RetryOnConflict(backoff wait.Backoff, fn func() error) error { - var lastConflictErr error +// OnError allows the caller to retry fn in case the error returned by fn is retriable +// according to the provided function. backoff defines the maximum retries and the wait +// interval between two retries. +func OnError(backoff wait.Backoff, retriable func(error) bool, fn func() error) error { + var lastErr error err := wait.ExponentialBackoff(backoff, func() (bool, error) { err := fn() switch { case err == nil: return true, nil - case errors.IsConflict(err): - lastConflictErr = err + case retriable(err): + lastErr = err return false, nil default: return false, err } }) if err == wait.ErrWaitTimeout { - err = lastConflictErr + err = lastErr } return err } + +// RetryOnConflict is used to make an update to a resource when you have to worry about +// conflicts caused by other code making unrelated updates to the resource at the same +// time. fn should fetch the resource to be modified, make appropriate changes to it, try +// to update it, and return (unmodified) the error from the update function. On a +// successful update, RetryOnConflict will return nil. If the update function returns a +// "Conflict" error, RetryOnConflict will wait some amount of time as described by +// backoff, and then try again. On a non-"Conflict" error, or if it retries too many times +// and gives up, RetryOnConflict will return an error to the caller. +// +// err := retry.RetryOnConflict(retry.DefaultRetry, func() error { +// // Fetch the resource here; you need to refetch it on every try, since +// // if you got a conflict on the last update attempt then you need to get +// // the current version before making your own changes. +// pod, err := c.Pods("mynamespace").Get(name, metav1.GetOptions{}) +// if err ! nil { +// return err +// } +// +// // Make whatever updates to the resource are needed +// pod.Status.Phase = v1.PodFailed +// +// // Try to update +// _, err = c.Pods("mynamespace").UpdateStatus(pod) +// // You have to return err itself here (not wrapped inside another error) +// // so that RetryOnConflict can identify it correctly. +// return err +// }) +// if err != nil { +// // May be conflict if max retries were hit, or may be something unrelated +// // like permissions or a network error +// return err +// } +// ... +// +// TODO: Make Backoff an interface? +func RetryOnConflict(backoff wait.Backoff, fn func() error) error { + return OnError(backoff, errors.IsConflict, fn) +} diff --git a/vendor/k8s.io/client-go/util/workqueue/default_rate_limiters.go b/vendor/k8s.io/client-go/util/workqueue/default_rate_limiters.go index a5bed29e..71bb6322 100644 --- a/vendor/k8s.io/client-go/util/workqueue/default_rate_limiters.go +++ b/vendor/k8s.io/client-go/util/workqueue/default_rate_limiters.go @@ -35,7 +35,7 @@ type RateLimiter interface { } // DefaultControllerRateLimiter is a no-arg constructor for a default rate limiter for a workqueue. It has -// both overall and per-item rate limitting. The overall is a token bucket and the per-item is exponential +// both overall and per-item rate limiting. The overall is a token bucket and the per-item is exponential func DefaultControllerRateLimiter() RateLimiter { return NewMaxOfRateLimiter( NewItemExponentialFailureRateLimiter(5*time.Millisecond, 1000*time.Second), @@ -62,7 +62,7 @@ func (r *BucketRateLimiter) NumRequeues(item interface{}) int { func (r *BucketRateLimiter) Forget(item interface{}) { } -// ItemExponentialFailureRateLimiter does a simple baseDelay*10^ limit +// ItemExponentialFailureRateLimiter does a simple baseDelay*2^ limit // dealing with max failures and expiration are up to the caller type ItemExponentialFailureRateLimiter struct { failuresLock sync.Mutex diff --git a/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go b/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go index a3717742..e1ab76ea 100644 --- a/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go +++ b/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go @@ -18,6 +18,7 @@ package workqueue import ( "container/heap" + "sync" "time" "k8s.io/apimachinery/pkg/util/clock" @@ -34,14 +35,17 @@ type DelayingInterface interface { // NewDelayingQueue constructs a new workqueue with delayed queuing ability func NewDelayingQueue() DelayingInterface { - return newDelayingQueue(clock.RealClock{}, "") + return NewDelayingQueueWithCustomClock(clock.RealClock{}, "") } +// NewNamedDelayingQueue constructs a new named workqueue with delayed queuing ability func NewNamedDelayingQueue(name string) DelayingInterface { - return newDelayingQueue(clock.RealClock{}, name) + return NewDelayingQueueWithCustomClock(clock.RealClock{}, name) } -func newDelayingQueue(clock clock.Clock, name string) DelayingInterface { +// NewDelayingQueueWithCustomClock constructs a new named workqueue +// with ability to inject real or fake clock for testing purposes +func NewDelayingQueueWithCustomClock(clock clock.Clock, name string) DelayingInterface { ret := &delayingType{ Interface: NewNamed(name), clock: clock, @@ -65,6 +69,8 @@ type delayingType struct { // stopCh lets us signal a shutdown to the waiting loop stopCh chan struct{} + // stopOnce guarantees we only signal shutdown a single time + stopOnce sync.Once // heartbeat ensures we wait no more than maxWait before firing heartbeat clock.Ticker @@ -131,11 +137,14 @@ func (pq waitForPriorityQueue) Peek() interface{} { return pq[0] } -// ShutDown gives a way to shut off this queue +// ShutDown stops the queue. After the queue drains, the returned shutdown bool +// on Get() will be true. This method may be invoked more than once. func (q *delayingType) ShutDown() { - q.Interface.ShutDown() - close(q.stopCh) - q.heartbeat.Stop() + q.stopOnce.Do(func() { + q.Interface.ShutDown() + close(q.stopCh) + q.heartbeat.Stop() + }) } // AddAfter adds the given item to the work queue after the given delay @@ -172,6 +181,9 @@ func (q *delayingType) waitingLoop() { // Make a placeholder channel to use when there are no items in our list never := make(<-chan time.Time) + // Make a timer that expires when the item at the head of the waiting queue is ready + var nextReadyAtTimer clock.Timer + waitingForQueue := &waitForPriorityQueue{} heap.Init(waitingForQueue) @@ -199,8 +211,12 @@ func (q *delayingType) waitingLoop() { // Set up a wait for the first item's readyAt (if one exists) nextReadyAt := never if waitingForQueue.Len() > 0 { + if nextReadyAtTimer != nil { + nextReadyAtTimer.Stop() + } entry := waitingForQueue.Peek().(*waitFor) - nextReadyAt = q.clock.After(entry.readyAt.Sub(now)) + nextReadyAtTimer = q.clock.NewTimer(entry.readyAt.Sub(now)) + nextReadyAt = nextReadyAtTimer.C() } select { diff --git a/vendor/k8s.io/client-go/util/workqueue/doc.go b/vendor/k8s.io/client-go/util/workqueue/doc.go index 2a00c74a..a5c976e0 100644 --- a/vendor/k8s.io/client-go/util/workqueue/doc.go +++ b/vendor/k8s.io/client-go/util/workqueue/doc.go @@ -23,4 +23,4 @@ limitations under the License. // * Multiple consumers and producers. In particular, it is allowed for an // item to be reenqueued while it is being processed. // * Shutdown notifications. -package workqueue +package workqueue // import "k8s.io/client-go/util/workqueue" diff --git a/vendor/k8s.io/client-go/util/workqueue/metrics.go b/vendor/k8s.io/client-go/util/workqueue/metrics.go index a481bdfb..a3911bf2 100644 --- a/vendor/k8s.io/client-go/util/workqueue/metrics.go +++ b/vendor/k8s.io/client-go/util/workqueue/metrics.go @@ -19,6 +19,8 @@ package workqueue import ( "sync" "time" + + "k8s.io/apimachinery/pkg/util/clock" ) // This file provides abstractions for setting the provider (e.g., prometheus) @@ -28,6 +30,7 @@ type queueMetrics interface { add(item t) get(item t) done(item t) + updateUnfinishedWork() } // GaugeMetric represents a single numerical value that can arbitrarily go up @@ -37,6 +40,12 @@ type GaugeMetric interface { Dec() } +// SettableGaugeMetric represents a single numerical value that can arbitrarily go up +// and down. (Separate from GaugeMetric to preserve backwards compatibility.) +type SettableGaugeMetric interface { + Set(float64) +} + // CounterMetric represents a single numerical value that only ever // goes up. type CounterMetric interface { @@ -48,23 +57,36 @@ type SummaryMetric interface { Observe(float64) } +// HistogramMetric counts individual observations. +type HistogramMetric interface { + Observe(float64) +} + type noopMetric struct{} func (noopMetric) Inc() {} func (noopMetric) Dec() {} +func (noopMetric) Set(float64) {} func (noopMetric) Observe(float64) {} +// defaultQueueMetrics expects the caller to lock before setting any metrics. type defaultQueueMetrics struct { + clock clock.Clock + // current depth of a workqueue depth GaugeMetric // total number of adds handled by a workqueue adds CounterMetric // how long an item stays in a workqueue - latency SummaryMetric + latency HistogramMetric // how long processing an item from a workqueue takes - workDuration SummaryMetric + workDuration HistogramMetric addTimes map[t]time.Time processingStartTimes map[t]time.Time + + // how long have current threads been working? + unfinishedWorkSeconds SettableGaugeMetric + longestRunningProcessor SettableGaugeMetric } func (m *defaultQueueMetrics) add(item t) { @@ -75,7 +97,7 @@ func (m *defaultQueueMetrics) add(item t) { m.adds.Inc() m.depth.Inc() if _, exists := m.addTimes[item]; !exists { - m.addTimes[item] = time.Now() + m.addTimes[item] = m.clock.Now() } } @@ -85,9 +107,9 @@ func (m *defaultQueueMetrics) get(item t) { } m.depth.Dec() - m.processingStartTimes[item] = time.Now() + m.processingStartTimes[item] = m.clock.Now() if startTime, exists := m.addTimes[item]; exists { - m.latency.Observe(sinceInMicroseconds(startTime)) + m.latency.Observe(m.sinceInSeconds(startTime)) delete(m.addTimes, item) } } @@ -98,14 +120,44 @@ func (m *defaultQueueMetrics) done(item t) { } if startTime, exists := m.processingStartTimes[item]; exists { - m.workDuration.Observe(sinceInMicroseconds(startTime)) + m.workDuration.Observe(m.sinceInSeconds(startTime)) delete(m.processingStartTimes, item) } } +func (m *defaultQueueMetrics) updateUnfinishedWork() { + // Note that a summary metric would be better for this, but prometheus + // doesn't seem to have non-hacky ways to reset the summary metrics. + var total float64 + var oldest float64 + for _, t := range m.processingStartTimes { + age := m.sinceInMicroseconds(t) + total += age + if age > oldest { + oldest = age + } + } + // Convert to seconds; microseconds is unhelpfully granular for this. + total /= 1000000 + m.unfinishedWorkSeconds.Set(total) + m.longestRunningProcessor.Set(oldest / 1000000) +} + +type noMetrics struct{} + +func (noMetrics) add(item t) {} +func (noMetrics) get(item t) {} +func (noMetrics) done(item t) {} +func (noMetrics) updateUnfinishedWork() {} + // Gets the time since the specified start in microseconds. -func sinceInMicroseconds(start time.Time) float64 { - return float64(time.Since(start).Nanoseconds() / time.Microsecond.Nanoseconds()) +func (m *defaultQueueMetrics) sinceInMicroseconds(start time.Time) float64 { + return float64(m.clock.Since(start).Nanoseconds() / time.Microsecond.Nanoseconds()) +} + +// Gets the time since the specified start in seconds. +func (m *defaultQueueMetrics) sinceInSeconds(start time.Time) float64 { + return m.clock.Since(start).Seconds() } type retryMetrics interface { @@ -128,8 +180,10 @@ func (m *defaultRetryMetrics) retry() { type MetricsProvider interface { NewDepthMetric(name string) GaugeMetric NewAddsMetric(name string) CounterMetric - NewLatencyMetric(name string) SummaryMetric - NewWorkDurationMetric(name string) SummaryMetric + NewLatencyMetric(name string) HistogramMetric + NewWorkDurationMetric(name string) HistogramMetric + NewUnfinishedWorkSecondsMetric(name string) SettableGaugeMetric + NewLongestRunningProcessorSecondsMetric(name string) SettableGaugeMetric NewRetriesMetric(name string) CounterMetric } @@ -143,11 +197,19 @@ func (_ noopMetricsProvider) NewAddsMetric(name string) CounterMetric { return noopMetric{} } -func (_ noopMetricsProvider) NewLatencyMetric(name string) SummaryMetric { +func (_ noopMetricsProvider) NewLatencyMetric(name string) HistogramMetric { + return noopMetric{} +} + +func (_ noopMetricsProvider) NewWorkDurationMetric(name string) HistogramMetric { + return noopMetric{} +} + +func (_ noopMetricsProvider) NewUnfinishedWorkSecondsMetric(name string) SettableGaugeMetric { return noopMetric{} } -func (_ noopMetricsProvider) NewWorkDurationMetric(name string) SummaryMetric { +func (_ noopMetricsProvider) NewLongestRunningProcessorSecondsMetric(name string) SettableGaugeMetric { return noopMetric{} } @@ -155,25 +217,37 @@ func (_ noopMetricsProvider) NewRetriesMetric(name string) CounterMetric { return noopMetric{} } -var metricsFactory = struct { - metricsProvider MetricsProvider - setProviders sync.Once -}{ +var globalMetricsFactory = queueMetricsFactory{ metricsProvider: noopMetricsProvider{}, } -func newQueueMetrics(name string) queueMetrics { - var ret *defaultQueueMetrics - if len(name) == 0 { - return ret +type queueMetricsFactory struct { + metricsProvider MetricsProvider + + onlyOnce sync.Once +} + +func (f *queueMetricsFactory) setProvider(mp MetricsProvider) { + f.onlyOnce.Do(func() { + f.metricsProvider = mp + }) +} + +func (f *queueMetricsFactory) newQueueMetrics(name string, clock clock.Clock) queueMetrics { + mp := f.metricsProvider + if len(name) == 0 || mp == (noopMetricsProvider{}) { + return noMetrics{} } return &defaultQueueMetrics{ - depth: metricsFactory.metricsProvider.NewDepthMetric(name), - adds: metricsFactory.metricsProvider.NewAddsMetric(name), - latency: metricsFactory.metricsProvider.NewLatencyMetric(name), - workDuration: metricsFactory.metricsProvider.NewWorkDurationMetric(name), - addTimes: map[t]time.Time{}, - processingStartTimes: map[t]time.Time{}, + clock: clock, + depth: mp.NewDepthMetric(name), + adds: mp.NewAddsMetric(name), + latency: mp.NewLatencyMetric(name), + workDuration: mp.NewWorkDurationMetric(name), + unfinishedWorkSeconds: mp.NewUnfinishedWorkSecondsMetric(name), + longestRunningProcessor: mp.NewLongestRunningProcessorSecondsMetric(name), + addTimes: map[t]time.Time{}, + processingStartTimes: map[t]time.Time{}, } } @@ -183,13 +257,12 @@ func newRetryMetrics(name string) retryMetrics { return ret } return &defaultRetryMetrics{ - retries: metricsFactory.metricsProvider.NewRetriesMetric(name), + retries: globalMetricsFactory.metricsProvider.NewRetriesMetric(name), } } -// SetProvider sets the metrics provider of the metricsFactory. +// SetProvider sets the metrics provider for all subsequently created work +// queues. Only the first call has an effect. func SetProvider(metricsProvider MetricsProvider) { - metricsFactory.setProviders.Do(func() { - metricsFactory.metricsProvider = metricsProvider - }) + globalMetricsFactory.setProvider(metricsProvider) } diff --git a/vendor/k8s.io/client-go/util/workqueue/parallelizer.go b/vendor/k8s.io/client-go/util/workqueue/parallelizer.go index 526bd244..5928a0c5 100644 --- a/vendor/k8s.io/client-go/util/workqueue/parallelizer.go +++ b/vendor/k8s.io/client-go/util/workqueue/parallelizer.go @@ -25,12 +25,6 @@ import ( type DoWorkPieceFunc func(piece int) -// Parallelize is a very simple framework that allows for parallelizing -// N independent pieces of work. -func Parallelize(workers, pieces int, doWorkPiece DoWorkPieceFunc) { - ParallelizeUntil(nil, workers, pieces, doWorkPiece) -} - // ParallelizeUntil is a framework that allows for parallelizing N // independent pieces of work until done or the context is canceled. func ParallelizeUntil(ctx context.Context, workers, pieces int, doWorkPiece DoWorkPieceFunc) { diff --git a/vendor/k8s.io/client-go/util/workqueue/queue.go b/vendor/k8s.io/client-go/util/workqueue/queue.go index dc9a7cc7..39009b8e 100644 --- a/vendor/k8s.io/client-go/util/workqueue/queue.go +++ b/vendor/k8s.io/client-go/util/workqueue/queue.go @@ -18,6 +18,9 @@ package workqueue import ( "sync" + "time" + + "k8s.io/apimachinery/pkg/util/clock" ) type Interface interface { @@ -35,14 +38,29 @@ func New() *Type { } func NewNamed(name string) *Type { - return &Type{ - dirty: set{}, - processing: set{}, - cond: sync.NewCond(&sync.Mutex{}), - metrics: newQueueMetrics(name), + rc := clock.RealClock{} + return newQueue( + rc, + globalMetricsFactory.newQueueMetrics(name, rc), + defaultUnfinishedWorkUpdatePeriod, + ) +} + +func newQueue(c clock.Clock, metrics queueMetrics, updatePeriod time.Duration) *Type { + t := &Type{ + clock: c, + dirty: set{}, + processing: set{}, + cond: sync.NewCond(&sync.Mutex{}), + metrics: metrics, + unfinishedWorkUpdatePeriod: updatePeriod, } + go t.updateUnfinishedWorkLoop() + return t } +const defaultUnfinishedWorkUpdatePeriod = 500 * time.Millisecond + // Type is a work queue (see the package comment). type Type struct { // queue defines the order in which we will work on items. Every @@ -64,6 +82,9 @@ type Type struct { shuttingDown bool metrics queueMetrics + + unfinishedWorkUpdatePeriod time.Duration + clock clock.Clock } type empty struct{} @@ -170,3 +191,22 @@ func (q *Type) ShuttingDown() bool { return q.shuttingDown } + +func (q *Type) updateUnfinishedWorkLoop() { + t := q.clock.NewTicker(q.unfinishedWorkUpdatePeriod) + defer t.Stop() + for range t.C() { + if !func() bool { + q.cond.L.Lock() + defer q.cond.L.Unlock() + if !q.shuttingDown { + q.metrics.updateUnfinishedWork() + return true + } + return false + + }() { + return + } + } +} diff --git a/vendor/k8s.io/client-go/util/workqueue/rate_limitting_queue.go b/vendor/k8s.io/client-go/util/workqueue/rate_limiting_queue.go similarity index 96% rename from vendor/k8s.io/client-go/util/workqueue/rate_limitting_queue.go rename to vendor/k8s.io/client-go/util/workqueue/rate_limiting_queue.go index 417ac001..8321876a 100644 --- a/vendor/k8s.io/client-go/util/workqueue/rate_limitting_queue.go +++ b/vendor/k8s.io/client-go/util/workqueue/rate_limiting_queue.go @@ -20,10 +20,10 @@ package workqueue type RateLimitingInterface interface { DelayingInterface - // AddRateLimited adds an item to the workqueue after the rate limiter says its ok + // AddRateLimited adds an item to the workqueue after the rate limiter says it's ok AddRateLimited(item interface{}) - // Forget indicates that an item is finished being retried. Doesn't matter whether its for perm failing + // Forget indicates that an item is finished being retried. Doesn't matter whether it's for perm failing // or for success, we'll stop the rate limiter from tracking it. This only clears the `rateLimiter`, you // still have to call `Done` on the queue. Forget(item interface{}) @@ -55,7 +55,7 @@ type rateLimitingType struct { rateLimiter RateLimiter } -// AddRateLimited AddAfter's the item based on the time when the rate limiter says its ok +// AddRateLimited AddAfter's the item based on the time when the rate limiter says it's ok func (q *rateLimitingType) AddRateLimited(item interface{}) { q.DelayingInterface.AddAfter(item, q.rateLimiter.When(item)) } diff --git a/vendor/k8s.io/klog/.travis.yml b/vendor/k8s.io/klog/.travis.yml new file mode 100644 index 00000000..5677664c --- /dev/null +++ b/vendor/k8s.io/klog/.travis.yml @@ -0,0 +1,16 @@ +language: go +go_import_path: k8s.io/klog +dist: xenial +go: + - 1.9.x + - 1.10.x + - 1.11.x + - 1.12.x +script: + - go get -t -v ./... + - diff -u <(echo -n) <(gofmt -d .) + - diff -u <(echo -n) <(golint $(go list -e ./...)) + - go tool vet . || go vet . + - go test -v -race ./... +install: + - go get golang.org/x/lint/golint diff --git a/vendor/k8s.io/klog/CONTRIBUTING.md b/vendor/k8s.io/klog/CONTRIBUTING.md new file mode 100644 index 00000000..574a56ab --- /dev/null +++ b/vendor/k8s.io/klog/CONTRIBUTING.md @@ -0,0 +1,22 @@ +# Contributing Guidelines + +Welcome to Kubernetes. We are excited about the prospect of you joining our [community](https://github.com/kubernetes/community)! The Kubernetes community abides by the CNCF [code of conduct](code-of-conduct.md). Here is an excerpt: + +_As contributors and maintainers of this project, and in the interest of fostering an open and welcoming community, we pledge to respect all people who contribute through reporting issues, posting feature requests, updating documentation, submitting pull requests or patches, and other activities._ + +## Getting Started + +We have full documentation on how to get started contributing here: + +- [Contributor License Agreement](https://git.k8s.io/community/CLA.md) Kubernetes projects require that you sign a Contributor License Agreement (CLA) before we can accept your pull requests +- [Kubernetes Contributor Guide](http://git.k8s.io/community/contributors/guide) - Main contributor documentation, or you can just jump directly to the [contributing section](http://git.k8s.io/community/contributors/guide#contributing) +- [Contributor Cheat Sheet](https://git.k8s.io/community/contributors/guide/contributor-cheatsheet.md) - Common resources for existing developers + +## Mentorship + +- [Mentoring Initiatives](https://git.k8s.io/community/mentoring) - We have a diverse set of mentorship programs available that are always looking for volunteers! + +## Contact Information + +- [Slack](https://kubernetes.slack.com/messages/sig-architecture) +- [Mailing List](https://groups.google.com/forum/#!forum/kubernetes-sig-architecture) diff --git a/vendor/github.com/golang/glog/LICENSE b/vendor/k8s.io/klog/LICENSE similarity index 100% rename from vendor/github.com/golang/glog/LICENSE rename to vendor/k8s.io/klog/LICENSE diff --git a/vendor/k8s.io/klog/OWNERS b/vendor/k8s.io/klog/OWNERS new file mode 100644 index 00000000..380e514f --- /dev/null +++ b/vendor/k8s.io/klog/OWNERS @@ -0,0 +1,19 @@ +# See the OWNERS docs at https://go.k8s.io/owners +reviewers: + - jayunit100 + - hoegaarden + - andyxning + - neolit123 + - pohly + - yagonobre + - vincepri + - detiber +approvers: + - dims + - thockin + - justinsb + - tallclair + - piosz + - brancz + - DirectXMan12 + - lavalamp diff --git a/vendor/k8s.io/klog/README.md b/vendor/k8s.io/klog/README.md new file mode 100644 index 00000000..841468b4 --- /dev/null +++ b/vendor/k8s.io/klog/README.md @@ -0,0 +1,97 @@ +klog +==== + +klog is a permanent fork of https://github.com/golang/glog. + +## Why was klog created? + +The decision to create klog was one that wasn't made lightly, but it was necessary due to some +drawbacks that are present in [glog](https://github.com/golang/glog). Ultimately, the fork was created due to glog not being under active development; this can be seen in the glog README: + +> The code in this repo [...] is not itself under development + +This makes us unable to solve many use cases without a fork. The factors that contributed to needing feature development are listed below: + + * `glog` [presents a lot "gotchas"](https://github.com/kubernetes/kubernetes/issues/61006) and introduces challenges in containerized environments, all of which aren't well documented. + * `glog` doesn't provide an easy way to test logs, which detracts from the stability of software using it + * A long term goal is to implement a logging interface that allows us to add context, change output format, etc. + +Historical context is available here: + + * https://github.com/kubernetes/kubernetes/issues/61006 + * https://github.com/kubernetes/kubernetes/issues/70264 + * https://groups.google.com/forum/#!msg/kubernetes-sig-architecture/wCWiWf3Juzs/hXRVBH90CgAJ + * https://groups.google.com/forum/#!msg/kubernetes-dev/7vnijOMhLS0/1oRiNtigBgAJ + +---- + +How to use klog +=============== +- Replace imports for `github.com/golang/glog` with `k8s.io/klog` +- Use `klog.InitFlags(nil)` explicitly for initializing global flags as we no longer use `init()` method to register the flags +- You can now use `log-file` instead of `log-dir` for logging to a single file (See `examples/log_file/usage_log_file.go`) +- If you want to redirect everything logged using klog somewhere else (say syslog!), you can use `klog.SetOutput()` method and supply a `io.Writer`. (See `examples/set_output/usage_set_output.go`) +- For more logging conventions (See [Logging Conventions](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/logging.md)) + +### Coexisting with glog +This package can be used side by side with glog. [This example](examples/coexist_glog/coexist_glog.go) shows how to initialize and syncronize flags from the global `flag.CommandLine` FlagSet. In addition, the example makes use of stderr as combined output by setting `alsologtostderr` (or `logtostderr`) to `true`. + +## Community, discussion, contribution, and support + +Learn how to engage with the Kubernetes community on the [community page](http://kubernetes.io/community/). + +You can reach the maintainers of this project at: + +- [Slack](https://kubernetes.slack.com/messages/sig-architecture) +- [Mailing List](https://groups.google.com/forum/#!forum/kubernetes-sig-architecture) + +### Code of conduct + +Participation in the Kubernetes community is governed by the [Kubernetes Code of Conduct](code-of-conduct.md). + +---- + +glog +==== + +Leveled execution logs for Go. + +This is an efficient pure Go implementation of leveled logs in the +manner of the open source C++ package + https://github.com/google/glog + +By binding methods to booleans it is possible to use the log package +without paying the expense of evaluating the arguments to the log. +Through the -vmodule flag, the package also provides fine-grained +control over logging at the file level. + +The comment from glog.go introduces the ideas: + + Package glog implements logging analogous to the Google-internal + C++ INFO/ERROR/V setup. It provides functions Info, Warning, + Error, Fatal, plus formatting variants such as Infof. It + also provides V-style logging controlled by the -v and + -vmodule=file=2 flags. + + Basic examples: + + glog.Info("Prepare to repel boarders") + + glog.Fatalf("Initialization failed: %s", err) + + See the documentation for the V function for an explanation + of these examples: + + if glog.V(2) { + glog.Info("Starting transaction...") + } + + glog.V(2).Infoln("Processed", nItems, "elements") + + +The repository contains an open source version of the log package +used inside Google. The master copy of the source lives inside +Google, not here. The code in this repo is for export only and is not itself +under development. Feature requests will be ignored. + +Send bug reports to golang-nuts@googlegroups.com. diff --git a/vendor/k8s.io/klog/RELEASE.md b/vendor/k8s.io/klog/RELEASE.md new file mode 100644 index 00000000..b53eb960 --- /dev/null +++ b/vendor/k8s.io/klog/RELEASE.md @@ -0,0 +1,9 @@ +# Release Process + +The `klog` is released on an as-needed basis. The process is as follows: + +1. An issue is proposing a new release with a changelog since the last release +1. All [OWNERS](OWNERS) must LGTM this release +1. An OWNER runs `git tag -s $VERSION` and inserts the changelog and pushes the tag with `git push $VERSION` +1. The release issue is closed +1. An announcement email is sent to `kubernetes-dev@googlegroups.com` with the subject `[ANNOUNCE] kubernetes-template-project $VERSION is released` diff --git a/vendor/k8s.io/klog/SECURITY_CONTACTS b/vendor/k8s.io/klog/SECURITY_CONTACTS new file mode 100644 index 00000000..6128a586 --- /dev/null +++ b/vendor/k8s.io/klog/SECURITY_CONTACTS @@ -0,0 +1,20 @@ +# Defined below are the security contacts for this repo. +# +# They are the contact point for the Product Security Committee to reach out +# to for triaging and handling of incoming issues. +# +# The below names agree to abide by the +# [Embargo Policy](https://git.k8s.io/security/private-distributors-list.md#embargo-policy) +# and will be removed and replaced if they violate that agreement. +# +# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE +# INSTRUCTIONS AT https://kubernetes.io/security/ + +dims +thockin +justinsb +tallclair +piosz +brancz +DirectXMan12 +lavalamp diff --git a/vendor/k8s.io/klog/code-of-conduct.md b/vendor/k8s.io/klog/code-of-conduct.md new file mode 100644 index 00000000..0d15c00c --- /dev/null +++ b/vendor/k8s.io/klog/code-of-conduct.md @@ -0,0 +1,3 @@ +# Kubernetes Community Code of Conduct + +Please refer to our [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md) diff --git a/vendor/k8s.io/klog/go.mod b/vendor/k8s.io/klog/go.mod new file mode 100644 index 00000000..3877d854 --- /dev/null +++ b/vendor/k8s.io/klog/go.mod @@ -0,0 +1,5 @@ +module k8s.io/klog + +go 1.12 + +require github.com/go-logr/logr v0.1.0 diff --git a/vendor/k8s.io/klog/go.sum b/vendor/k8s.io/klog/go.sum new file mode 100644 index 00000000..fb64d277 --- /dev/null +++ b/vendor/k8s.io/klog/go.sum @@ -0,0 +1,2 @@ +github.com/go-logr/logr v0.1.0 h1:M1Tv3VzNlEHg6uyACnRdtrploV2P7wZqH8BoQMtz0cg= +github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= diff --git a/vendor/github.com/golang/glog/glog.go b/vendor/k8s.io/klog/klog.go similarity index 83% rename from vendor/github.com/golang/glog/glog.go rename to vendor/k8s.io/klog/klog.go index 54bd7afd..2712ce0a 100644 --- a/vendor/github.com/golang/glog/glog.go +++ b/vendor/k8s.io/klog/klog.go @@ -14,32 +14,32 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Package glog implements logging analogous to the Google-internal C++ INFO/ERROR/V setup. +// Package klog implements logging analogous to the Google-internal C++ INFO/ERROR/V setup. // It provides functions Info, Warning, Error, Fatal, plus formatting variants such as // Infof. It also provides V-style logging controlled by the -v and -vmodule=file=2 flags. // // Basic examples: // -// glog.Info("Prepare to repel boarders") +// klog.Info("Prepare to repel boarders") // -// glog.Fatalf("Initialization failed: %s", err) +// klog.Fatalf("Initialization failed: %s", err) // // See the documentation for the V function for an explanation of these examples: // -// if glog.V(2) { -// glog.Info("Starting transaction...") +// if klog.V(2) { +// klog.Info("Starting transaction...") // } // -// glog.V(2).Infoln("Processed", nItems, "elements") +// klog.V(2).Infoln("Processed", nItems, "elements") // // Log output is buffered and written periodically using Flush. Programs // should call Flush before exiting to guarantee all log output is written. // -// By default, all log statements write to files in a temporary directory. +// By default, all log statements write to standard error. // This package provides several flags that modify this behavior. // As a result, flag.Parse must be called before any logging is done. // -// -logtostderr=false +// -logtostderr=true // Logs are written to standard error instead of to files. // -alsologtostderr=false // Logs are written to standard error as well as to files. @@ -68,7 +68,7 @@ // -vmodule=gopher*=3 // sets the V level to 3 in all Go files whose names begin "gopher". // -package glog +package klog import ( "bufio" @@ -78,6 +78,7 @@ import ( "fmt" "io" stdLog "log" + "math" "os" "path/filepath" "runtime" @@ -141,7 +142,7 @@ func (s *severity) Set(value string) error { if v, ok := severityByName(value); ok { threshold = v } else { - v, err := strconv.Atoi(value) + v, err := strconv.ParseInt(value, 10, 32) if err != nil { return err } @@ -225,7 +226,7 @@ func (l *Level) Get() interface{} { // Set is part of the flag.Value interface. func (l *Level) Set(value string) error { - v, err := strconv.Atoi(value) + v, err := strconv.ParseInt(value, 10, 32) if err != nil { return err } @@ -293,7 +294,7 @@ func (m *moduleSpec) Set(value string) error { return errVmoduleSyntax } pattern := patLev[0] - v, err := strconv.Atoi(patLev[1]) + v, err := strconv.ParseInt(patLev[1], 10, 32) if err != nil { return errors.New("syntax error: expect comma-separated list of filename=N") } @@ -395,21 +396,43 @@ type flushSyncWriter interface { io.Writer } +// init sets up the defaults and runs flushDaemon. func init() { - flag.BoolVar(&logging.toStderr, "logtostderr", false, "log to standard error instead of files") - flag.BoolVar(&logging.alsoToStderr, "alsologtostderr", false, "log to standard error as well as files") - flag.Var(&logging.verbosity, "v", "log level for V logs") - flag.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr") - flag.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging") - flag.Var(&logging.traceLocation, "log_backtrace_at", "when logging hits line file:N, emit a stack trace") - - // Default stderrThreshold is ERROR. - logging.stderrThreshold = errorLog - + logging.stderrThreshold = errorLog // Default stderrThreshold is ERROR. logging.setVState(0, nil, false) + logging.logDir = "" + logging.logFile = "" + logging.logFileMaxSizeMB = 1800 + logging.toStderr = true + logging.alsoToStderr = false + logging.skipHeaders = false + logging.addDirHeader = false + logging.skipLogHeaders = false go logging.flushDaemon() } +// InitFlags is for explicitly initializing the flags. +func InitFlags(flagset *flag.FlagSet) { + if flagset == nil { + flagset = flag.CommandLine + } + + flagset.StringVar(&logging.logDir, "log_dir", logging.logDir, "If non-empty, write log files in this directory") + flagset.StringVar(&logging.logFile, "log_file", logging.logFile, "If non-empty, use this log file") + flagset.Uint64Var(&logging.logFileMaxSizeMB, "log_file_max_size", logging.logFileMaxSizeMB, + "Defines the maximum size a log file can grow to. Unit is megabytes. "+ + "If the value is 0, the maximum file size is unlimited.") + flagset.BoolVar(&logging.toStderr, "logtostderr", logging.toStderr, "log to standard error instead of files") + flagset.BoolVar(&logging.alsoToStderr, "alsologtostderr", logging.alsoToStderr, "log to standard error as well as files") + flagset.Var(&logging.verbosity, "v", "number for the log level verbosity") + flagset.BoolVar(&logging.skipHeaders, "add_dir_header", logging.addDirHeader, "If true, adds the file directory to the header") + flagset.BoolVar(&logging.skipHeaders, "skip_headers", logging.skipHeaders, "If true, avoid header prefixes in the log messages") + flagset.BoolVar(&logging.skipLogHeaders, "skip_log_headers", logging.skipLogHeaders, "If true, avoid headers when opening log files") + flagset.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr") + flagset.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging") + flagset.Var(&logging.traceLocation, "log_backtrace_at", "when logging hits line file:N, emit a stack trace") +} + // Flush flushes all pending log I/O. func Flush() { logging.lockAndFlushAll() @@ -453,6 +476,27 @@ type loggingT struct { // safely using atomic.LoadInt32. vmodule moduleSpec // The state of the -vmodule flag. verbosity Level // V logging level, the value of the -v flag/ + + // If non-empty, overrides the choice of directory in which to write logs. + // See createLogDirs for the full list of possible destinations. + logDir string + + // If non-empty, specifies the path of the file to write logs. mutually exclusive + // with the log-dir option. + logFile string + + // When logFile is specified, this limiter makes sure the logFile won't exceeds a certain size. When exceeds, the + // logFile will be cleaned up. If this value is 0, no size limitation will be applied to logFile. + logFileMaxSizeMB uint64 + + // If true, do not add the prefix headers, useful when used with SetOutput + skipHeaders bool + + // If true, do not add the headers to log files + skipLogHeaders bool + + // If true, add the file directory to the header + addDirHeader bool } // buffer holds a byte Buffer for reuse. The zero value is ready for use. @@ -538,9 +582,14 @@ func (l *loggingT) header(s severity, depth int) (*buffer, string, int) { file = "???" line = 1 } else { - slash := strings.LastIndex(file, "/") - if slash >= 0 { - file = file[slash+1:] + if slash := strings.LastIndex(file, "/"); slash >= 0 { + path := file + file = path[slash+1:] + if l.addDirHeader { + if dirsep := strings.LastIndex(path[:slash], "/"); dirsep >= 0 { + file = path[dirsep+1:] + } + } } } return l.formatHeader(s, file, line), file, line @@ -556,6 +605,9 @@ func (l *loggingT) formatHeader(s severity, file string, line int) *buffer { s = infoLog // for safety. } buf := l.getBuffer() + if l.skipHeaders { + return buf + } // Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand. // It's worth about 3X. Fprintf is hard. @@ -667,6 +719,49 @@ func (l *loggingT) printWithFileLine(s severity, file string, line int, alsoToSt l.output(s, buf, file, line, alsoToStderr) } +// redirectBuffer is used to set an alternate destination for the logs +type redirectBuffer struct { + w io.Writer +} + +func (rb *redirectBuffer) Sync() error { + return nil +} + +func (rb *redirectBuffer) Flush() error { + return nil +} + +func (rb *redirectBuffer) Write(bytes []byte) (n int, err error) { + return rb.w.Write(bytes) +} + +// SetOutput sets the output destination for all severities +func SetOutput(w io.Writer) { + logging.mu.Lock() + defer logging.mu.Unlock() + for s := fatalLog; s >= infoLog; s-- { + rb := &redirectBuffer{ + w: w, + } + logging.file[s] = rb + } +} + +// SetOutputBySeverity sets the output destination for specific severity +func SetOutputBySeverity(name string, w io.Writer) { + logging.mu.Lock() + defer logging.mu.Unlock() + sev, ok := severityByName(name) + if !ok { + panic(fmt.Sprintf("SetOutputBySeverity(%q): unrecognized severity name", name)) + } + rb := &redirectBuffer{ + w: w, + } + logging.file[sev] = rb +} + // output writes the data to the log files and releases the buffer. func (l *loggingT) output(s severity, buf *buffer, file string, line int, alsoToStderr bool) { l.mu.Lock() @@ -676,33 +771,44 @@ func (l *loggingT) output(s severity, buf *buffer, file string, line int, alsoTo } } data := buf.Bytes() - if !flag.Parsed() { - os.Stderr.Write([]byte("ERROR: logging before flag.Parse: ")) - os.Stderr.Write(data) - } else if l.toStderr { + if l.toStderr { os.Stderr.Write(data) } else { if alsoToStderr || l.alsoToStderr || s >= l.stderrThreshold.get() { os.Stderr.Write(data) } - if l.file[s] == nil { - if err := l.createFiles(s); err != nil { - os.Stderr.Write(data) // Make sure the message appears somewhere. - l.exit(err) + + if logging.logFile != "" { + // Since we are using a single log file, all of the items in l.file array + // will point to the same file, so just use one of them to write data. + if l.file[infoLog] == nil { + if err := l.createFiles(infoLog); err != nil { + os.Stderr.Write(data) // Make sure the message appears somewhere. + l.exit(err) + } } - } - switch s { - case fatalLog: - l.file[fatalLog].Write(data) - fallthrough - case errorLog: - l.file[errorLog].Write(data) - fallthrough - case warningLog: - l.file[warningLog].Write(data) - fallthrough - case infoLog: l.file[infoLog].Write(data) + } else { + if l.file[s] == nil { + if err := l.createFiles(s); err != nil { + os.Stderr.Write(data) // Make sure the message appears somewhere. + l.exit(err) + } + } + + switch s { + case fatalLog: + l.file[fatalLog].Write(data) + fallthrough + case errorLog: + l.file[errorLog].Write(data) + fallthrough + case warningLog: + l.file[warningLog].Write(data) + fallthrough + case infoLog: + l.file[infoLog].Write(data) + } } } if s == fatalLog { @@ -741,7 +847,7 @@ func (l *loggingT) output(s severity, buf *buffer, file string, line int, alsoTo // timeoutFlush calls Flush and returns when it completes or after timeout // elapses, whichever happens first. This is needed because the hooks invoked -// by Flush may deadlock when glog.Fatal is called from a hook that holds +// by Flush may deadlock when klog.Fatal is called from a hook that holds // a lock. func timeoutFlush(timeout time.Duration) { done := make(chan bool, 1) @@ -752,7 +858,7 @@ func timeoutFlush(timeout time.Duration) { select { case <-done: case <-time.After(timeout): - fmt.Fprintln(os.Stderr, "glog: Flush took longer than", timeout) + fmt.Fprintln(os.Stderr, "klog: Flush took longer than", timeout) } } @@ -802,18 +908,33 @@ func (l *loggingT) exit(err error) { type syncBuffer struct { logger *loggingT *bufio.Writer - file *os.File - sev severity - nbytes uint64 // The number of bytes written to this file + file *os.File + sev severity + nbytes uint64 // The number of bytes written to this file + maxbytes uint64 // The max number of bytes this syncBuffer.file can hold before cleaning up. } func (sb *syncBuffer) Sync() error { return sb.file.Sync() } +// CalculateMaxSize returns the real max size in bytes after considering the default max size and the flag options. +func CalculateMaxSize() uint64 { + if logging.logFile != "" { + if logging.logFileMaxSizeMB == 0 { + // If logFileMaxSizeMB is zero, we don't have limitations on the log size. + return math.MaxUint64 + } + // Flag logFileMaxSizeMB is in MB for user convenience. + return logging.logFileMaxSizeMB * 1024 * 1024 + } + // If "log_file" flag is not specified, the target file (sb.file) will be cleaned up when reaches a fixed size. + return MaxSize +} + func (sb *syncBuffer) Write(p []byte) (n int, err error) { - if sb.nbytes+uint64(len(p)) >= MaxSize { - if err := sb.rotateFile(time.Now()); err != nil { + if sb.nbytes+uint64(len(p)) >= sb.maxbytes { + if err := sb.rotateFile(time.Now(), false); err != nil { sb.logger.exit(err) } } @@ -826,13 +947,15 @@ func (sb *syncBuffer) Write(p []byte) (n int, err error) { } // rotateFile closes the syncBuffer's file and starts a new one. -func (sb *syncBuffer) rotateFile(now time.Time) error { +// The startup argument indicates whether this is the initial startup of klog. +// If startup is true, existing files are opened for appending instead of truncated. +func (sb *syncBuffer) rotateFile(now time.Time, startup bool) error { if sb.file != nil { sb.Flush() sb.file.Close() } var err error - sb.file, _, err = create(severityName[sb.sev], now) + sb.file, _, err = create(severityName[sb.sev], now, startup) sb.nbytes = 0 if err != nil { return err @@ -840,6 +963,10 @@ func (sb *syncBuffer) rotateFile(now time.Time) error { sb.Writer = bufio.NewWriterSize(sb.file, bufferSize) + if sb.logger.skipLogHeaders { + return nil + } + // Write header. var buf bytes.Buffer fmt.Fprintf(&buf, "Log file created at: %s\n", now.Format("2006/01/02 15:04:05")) @@ -864,10 +991,11 @@ func (l *loggingT) createFiles(sev severity) error { // has already been created, we can stop. for s := sev; s >= infoLog && l.file[s] == nil; s-- { sb := &syncBuffer{ - logger: l, - sev: s, + logger: l, + sev: s, + maxbytes: CalculateMaxSize(), } - if err := sb.rotateFile(now); err != nil { + if err := sb.rotateFile(now, true); err != nil { return err } l.file[s] = sb @@ -875,11 +1003,11 @@ func (l *loggingT) createFiles(sev severity) error { return nil } -const flushInterval = 30 * time.Second +const flushInterval = 5 * time.Second // flushDaemon periodically flushes the log file buffers. func (l *loggingT) flushDaemon() { - for _ = range time.NewTicker(flushInterval).C { + for range time.NewTicker(flushInterval).C { l.lockAndFlushAll() } } @@ -986,9 +1114,9 @@ type Verbose bool // The returned value is a boolean of type Verbose, which implements Info, Infoln // and Infof. These methods will write to the Info log if called. // Thus, one may write either -// if glog.V(2) { glog.Info("log this") } +// if klog.V(2) { klog.Info("log this") } // or -// glog.V(2).Info("log this") +// klog.V(2).Info("log this") // The second form is shorter but the first is cheaper if logging is off because it does // not evaluate its arguments. // @@ -1062,7 +1190,7 @@ func InfoDepth(depth int, args ...interface{}) { } // Infoln logs to the INFO log. -// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. +// Arguments are handled in the manner of fmt.Println; a newline is always appended. func Infoln(args ...interface{}) { logging.println(infoLog, args...) } @@ -1086,7 +1214,7 @@ func WarningDepth(depth int, args ...interface{}) { } // Warningln logs to the WARNING and INFO logs. -// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. +// Arguments are handled in the manner of fmt.Println; a newline is always appended. func Warningln(args ...interface{}) { logging.println(warningLog, args...) } @@ -1110,7 +1238,7 @@ func ErrorDepth(depth int, args ...interface{}) { } // Errorln logs to the ERROR, WARNING, and INFO logs. -// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. +// Arguments are handled in the manner of fmt.Println; a newline is always appended. func Errorln(args ...interface{}) { logging.println(errorLog, args...) } @@ -1136,7 +1264,7 @@ func FatalDepth(depth int, args ...interface{}) { // Fatalln logs to the FATAL, ERROR, WARNING, and INFO logs, // including a stack trace of all running goroutines, then calls os.Exit(255). -// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. +// Arguments are handled in the manner of fmt.Println; a newline is always appended. func Fatalln(args ...interface{}) { logging.println(fatalLog, args...) } diff --git a/vendor/github.com/golang/glog/glog_file.go b/vendor/k8s.io/klog/klog_file.go similarity index 75% rename from vendor/github.com/golang/glog/glog_file.go rename to vendor/k8s.io/klog/klog_file.go index 65075d28..e4010ad4 100644 --- a/vendor/github.com/golang/glog/glog_file.go +++ b/vendor/k8s.io/klog/klog_file.go @@ -16,11 +16,10 @@ // File I/O for logs. -package glog +package klog import ( "errors" - "flag" "fmt" "os" "os/user" @@ -36,13 +35,9 @@ var MaxSize uint64 = 1024 * 1024 * 1800 // logDirs lists the candidate directories for new log files. var logDirs []string -// If non-empty, overrides the choice of directory in which to write logs. -// See createLogDirs for the full list of possible destinations. -var logDir = flag.String("log_dir", "", "If non-empty, write log files in this directory") - func createLogDirs() { - if *logDir != "" { - logDirs = append(logDirs, *logDir) + if logging.logDir != "" { + logDirs = append(logDirs, logging.logDir) } logDirs = append(logDirs, os.TempDir()) } @@ -102,7 +97,16 @@ var onceLogDirs sync.Once // contains tag ("INFO", "FATAL", etc.) and t. If the file is created // successfully, create also attempts to update the symlink for that tag, ignoring // errors. -func create(tag string, t time.Time) (f *os.File, filename string, err error) { +// The startup argument indicates whether this is the initial startup of klog. +// If startup is true, existing files are opened for appending instead of truncated. +func create(tag string, t time.Time, startup bool) (f *os.File, filename string, err error) { + if logging.logFile != "" { + f, err := openOrCreate(logging.logFile, startup) + if err == nil { + return f, logging.logFile, nil + } + return nil, "", fmt.Errorf("log: unable to create log: %v", err) + } onceLogDirs.Do(createLogDirs) if len(logDirs) == 0 { return nil, "", errors.New("log: no log dirs") @@ -111,7 +115,7 @@ func create(tag string, t time.Time) (f *os.File, filename string, err error) { var lastErr error for _, dir := range logDirs { fname := filepath.Join(dir, name) - f, err := os.Create(fname) + f, err := openOrCreate(fname, startup) if err == nil { symlink := filepath.Join(dir, link) os.Remove(symlink) // ignore err @@ -122,3 +126,14 @@ func create(tag string, t time.Time) (f *os.File, filename string, err error) { } return nil, "", fmt.Errorf("log: cannot create log: %v", lastErr) } + +// The startup argument indicates whether this is the initial startup of klog. +// If startup is true, existing files are opened for appending instead of truncated. +func openOrCreate(name string, startup bool) (*os.File, error) { + if startup { + f, err := os.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666) + return f, err + } + f, err := os.Create(name) + return f, err +} diff --git a/vendor/k8s.io/kubernetes/LICENSE b/vendor/k8s.io/kubernetes/LICENSE deleted file mode 100644 index d6456956..00000000 --- a/vendor/k8s.io/kubernetes/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/core/BUILD deleted file mode 100644 index 6ca92930..00000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/core/BUILD +++ /dev/null @@ -1,60 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -go_library( - name = "go_default_library", - srcs = [ - "annotation_key_constants.go", - "doc.go", - "field_constants.go", - "json.go", - "objectreference.go", - "register.go", - "resource.go", - "taint.go", - "toleration.go", - "types.go", - "zz_generated.deepcopy.go", - ], - importpath = "k8s.io/kubernetes/pkg/apis/core", - visibility = ["//visibility:public"], - deps = [ - "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/apis/meta/internalversion:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library", - ], -) - -go_test( - name = "go_default_test", - srcs = [ - "taint_test.go", - "toleration_test.go", - ], - embed = [":go_default_library"], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [ - ":package-srcs", - "//pkg/apis/core/fuzzer:all-srcs", - "//pkg/apis/core/helper:all-srcs", - "//pkg/apis/core/install:all-srcs", - "//pkg/apis/core/pods:all-srcs", - "//pkg/apis/core/v1:all-srcs", - "//pkg/apis/core/validation:all-srcs", - ], - tags = ["automanaged"], - visibility = ["//visibility:public"], -) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/OWNERS b/vendor/k8s.io/kubernetes/pkg/apis/core/OWNERS deleted file mode 100644 index 7e195eca..00000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/core/OWNERS +++ /dev/null @@ -1,45 +0,0 @@ -approvers: -- erictune -- lavalamp -- smarterclayton -- thockin -- liggitt -# - bgrant0607 # manual escalations only -reviewers: -- thockin -- lavalamp -- smarterclayton -- wojtek-t -- deads2k -- yujuhong -- brendandburns -- derekwaynecarr -- caesarxuchao -- vishh -- mikedanese -- liggitt -- nikhiljindal -- gmarek -- erictune -- davidopp -- pmorie -- sttts -- dchen1107 -- saad-ali -- zmerlynn -- luxas -- janetkuo -- justinsb -- pwittrock -- roberthbailey -- ncdc -- tallclair -- yifan-gu -- eparis -- mwielgus -- soltysh -- piosz -- jsafrane -- jbeda -labels: -- sig/apps diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/annotation_key_constants.go b/vendor/k8s.io/kubernetes/pkg/apis/core/annotation_key_constants.go deleted file mode 100644 index a1e6daae..00000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/core/annotation_key_constants.go +++ /dev/null @@ -1,85 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file should be consistent with pkg/api/v1/annotation_key_constants.go. - -package core - -const ( - // ImagePolicyFailedOpenKey is added to pods created by failing open when the image policy - // webhook backend fails. - ImagePolicyFailedOpenKey string = "alpha.image-policy.k8s.io/failed-open" - - // PodPresetOptOutAnnotationKey represents the annotation key for a pod to exempt itself from pod preset manipulation - PodPresetOptOutAnnotationKey string = "podpreset.admission.kubernetes.io/exclude" - - // MirrorAnnotationKey represents the annotation key set by kubelets when creating mirror pods - MirrorPodAnnotationKey string = "kubernetes.io/config.mirror" - - // TolerationsAnnotationKey represents the key of tolerations data (json serialized) - // in the Annotations of a Pod. - TolerationsAnnotationKey string = "scheduler.alpha.kubernetes.io/tolerations" - - // TaintsAnnotationKey represents the key of taints data (json serialized) - // in the Annotations of a Node. - TaintsAnnotationKey string = "scheduler.alpha.kubernetes.io/taints" - - // SeccompPodAnnotationKey represents the key of a seccomp profile applied - // to all containers of a pod. - SeccompPodAnnotationKey string = "seccomp.security.alpha.kubernetes.io/pod" - - // SeccompContainerAnnotationKeyPrefix represents the key of a seccomp profile applied - // to one container of a pod. - SeccompContainerAnnotationKeyPrefix string = "container.seccomp.security.alpha.kubernetes.io/" - - // SeccompProfileRuntimeDefault represents the default seccomp profile used by container runtime. - SeccompProfileRuntimeDefault string = "runtime/default" - - // DeprecatedSeccompProfileDockerDefault represents the default seccomp profile used by docker. - // This is now deprecated and should be replaced by SeccompProfileRuntimeDefault. - DeprecatedSeccompProfileDockerDefault string = "docker/default" - - // PreferAvoidPodsAnnotationKey represents the key of preferAvoidPods data (json serialized) - // in the Annotations of a Node. - PreferAvoidPodsAnnotationKey string = "scheduler.alpha.kubernetes.io/preferAvoidPods" - - // ObjectTTLAnnotations represents a suggestion for kubelet for how long it can cache - // an object (e.g. secret, config map) before fetching it again from apiserver. - // This annotation can be attached to node. - ObjectTTLAnnotationKey string = "node.alpha.kubernetes.io/ttl" - - // BootstrapCheckpointAnnotationKey represents a Resource (Pod) that should be checkpointed by - // the kubelet prior to running - BootstrapCheckpointAnnotationKey string = "node.kubernetes.io/bootstrap-checkpoint" - - // annotation key prefix used to identify non-convertible json paths. - NonConvertibleAnnotationPrefix = "non-convertible.kubernetes.io" - - kubectlPrefix = "kubectl.kubernetes.io/" - - // LastAppliedConfigAnnotation is the annotation used to store the previous - // configuration of a resource for use in a three way diff by UpdateApplyAnnotation. - LastAppliedConfigAnnotation = kubectlPrefix + "last-applied-configuration" - - // AnnotationLoadBalancerSourceRangesKey is the key of the annotation on a service to set allowed ingress ranges on their LoadBalancers - // - // It should be a comma-separated list of CIDRs, e.g. `0.0.0.0/0` to - // allow full access (the default) or `18.0.0.0/8,56.0.0.0/8` to allow - // access only from the CIDRs currently allocated to MIT & the USPS. - // - // Not all cloud providers support this annotation, though AWS & GCE do. - AnnotationLoadBalancerSourceRangesKey = "service.beta.kubernetes.io/load-balancer-source-ranges" -) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/field_constants.go b/vendor/k8s.io/kubernetes/pkg/apis/core/field_constants.go deleted file mode 100644 index a26f8056..00000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/core/field_constants.go +++ /dev/null @@ -1,38 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package core - -// Field path constants that are specific to the internal API -// representation. -const ( - NodeUnschedulableField = "spec.unschedulable" - ObjectNameField = "metadata.name" - PodHostField = "spec.nodeName" - PodStatusField = "status.phase" - SecretTypeField = "type" - - EventReasonField = "action" - EventSourceField = "reportingComponent" - EventTypeField = "type" - EventInvolvedKindField = "involvedObject.kind" - EventInvolvedNamespaceField = "involvedObject.namespace" - EventInvolvedNameField = "involvedObject.name" - EventInvolvedUIDField = "involvedObject.uid" - EventInvolvedAPIVersionField = "involvedObject.apiVersion" - EventInvolvedResourceVersionField = "involvedObject.resourceVersion" - EventInvolvedFieldPathField = "involvedObject.fieldPath" -) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/helper/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/core/helper/BUILD deleted file mode 100644 index d4fa9fea..00000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/core/helper/BUILD +++ /dev/null @@ -1,51 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", - "go_test", -) - -go_test( - name = "go_default_test", - srcs = ["helpers_test.go"], - embed = [":go_default_library"], - deps = [ - "//pkg/apis/core:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", - ], -) - -go_library( - name = "go_default_library", - srcs = ["helpers.go"], - importpath = "k8s.io/kubernetes/pkg/apis/core/helper", - deps = [ - "//pkg/apis/core:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/selection:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/util/validation:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [ - ":package-srcs", - "//pkg/apis/core/helper/qos:all-srcs", - ], - tags = ["automanaged"], -) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/helper/helpers.go b/vendor/k8s.io/kubernetes/pkg/apis/core/helper/helpers.go deleted file mode 100644 index 48612229..00000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/core/helper/helpers.go +++ /dev/null @@ -1,564 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package helper - -import ( - "encoding/json" - "fmt" - "strings" - - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/conversion" - "k8s.io/apimachinery/pkg/fields" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/selection" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/apimachinery/pkg/util/validation" - "k8s.io/kubernetes/pkg/apis/core" -) - -// IsHugePageResourceName returns true if the resource name has the huge page -// resource prefix. -func IsHugePageResourceName(name core.ResourceName) bool { - return strings.HasPrefix(string(name), core.ResourceHugePagesPrefix) -} - -// IsQuotaHugePageResourceName returns true if the resource name has the quota -// related huge page resource prefix. -func IsQuotaHugePageResourceName(name core.ResourceName) bool { - return strings.HasPrefix(string(name), core.ResourceHugePagesPrefix) || strings.HasPrefix(string(name), core.ResourceRequestsHugePagesPrefix) -} - -// HugePageResourceName returns a ResourceName with the canonical hugepage -// prefix prepended for the specified page size. The page size is converted -// to its canonical representation. -func HugePageResourceName(pageSize resource.Quantity) core.ResourceName { - return core.ResourceName(fmt.Sprintf("%s%s", core.ResourceHugePagesPrefix, pageSize.String())) -} - -// HugePageSizeFromResourceName returns the page size for the specified huge page -// resource name. If the specified input is not a valid huge page resource name -// an error is returned. -func HugePageSizeFromResourceName(name core.ResourceName) (resource.Quantity, error) { - if !IsHugePageResourceName(name) { - return resource.Quantity{}, fmt.Errorf("resource name: %s is an invalid hugepage name", name) - } - pageSize := strings.TrimPrefix(string(name), core.ResourceHugePagesPrefix) - return resource.ParseQuantity(pageSize) -} - -// NonConvertibleFields iterates over the provided map and filters out all but -// any keys with the "non-convertible.kubernetes.io" prefix. -func NonConvertibleFields(annotations map[string]string) map[string]string { - nonConvertibleKeys := map[string]string{} - for key, value := range annotations { - if strings.HasPrefix(key, core.NonConvertibleAnnotationPrefix) { - nonConvertibleKeys[key] = value - } - } - return nonConvertibleKeys -} - -// Semantic can do semantic deep equality checks for core objects. -// Example: apiequality.Semantic.DeepEqual(aPod, aPodWithNonNilButEmptyMaps) == true -var Semantic = conversion.EqualitiesOrDie( - func(a, b resource.Quantity) bool { - // Ignore formatting, only care that numeric value stayed the same. - // TODO: if we decide it's important, it should be safe to start comparing the format. - // - // Uninitialized quantities are equivalent to 0 quantities. - return a.Cmp(b) == 0 - }, - func(a, b metav1.MicroTime) bool { - return a.UTC() == b.UTC() - }, - func(a, b metav1.Time) bool { - return a.UTC() == b.UTC() - }, - func(a, b labels.Selector) bool { - return a.String() == b.String() - }, - func(a, b fields.Selector) bool { - return a.String() == b.String() - }, -) - -var standardResourceQuotaScopes = sets.NewString( - string(core.ResourceQuotaScopeTerminating), - string(core.ResourceQuotaScopeNotTerminating), - string(core.ResourceQuotaScopeBestEffort), - string(core.ResourceQuotaScopeNotBestEffort), - string(core.ResourceQuotaScopePriorityClass), -) - -// IsStandardResourceQuotaScope returns true if the scope is a standard value -func IsStandardResourceQuotaScope(str string) bool { - return standardResourceQuotaScopes.Has(str) -} - -var podObjectCountQuotaResources = sets.NewString( - string(core.ResourcePods), -) - -var podComputeQuotaResources = sets.NewString( - string(core.ResourceCPU), - string(core.ResourceMemory), - string(core.ResourceLimitsCPU), - string(core.ResourceLimitsMemory), - string(core.ResourceRequestsCPU), - string(core.ResourceRequestsMemory), -) - -// IsResourceQuotaScopeValidForResource returns true if the resource applies to the specified scope -func IsResourceQuotaScopeValidForResource(scope core.ResourceQuotaScope, resource string) bool { - switch scope { - case core.ResourceQuotaScopeTerminating, core.ResourceQuotaScopeNotTerminating, core.ResourceQuotaScopeNotBestEffort, core.ResourceQuotaScopePriorityClass: - return podObjectCountQuotaResources.Has(resource) || podComputeQuotaResources.Has(resource) - case core.ResourceQuotaScopeBestEffort: - return podObjectCountQuotaResources.Has(resource) - default: - return true - } -} - -var standardContainerResources = sets.NewString( - string(core.ResourceCPU), - string(core.ResourceMemory), - string(core.ResourceEphemeralStorage), -) - -// IsStandardContainerResourceName returns true if the container can make a resource request -// for the specified resource -func IsStandardContainerResourceName(str string) bool { - return standardContainerResources.Has(str) || IsHugePageResourceName(core.ResourceName(str)) -} - -// IsExtendedResourceName returns true if: -// 1. the resource name is not in the default namespace; -// 2. resource name does not have "requests." prefix, -// to avoid confusion with the convention in quota -// 3. it satisfies the rules in IsQualifiedName() after converted into quota resource name -func IsExtendedResourceName(name core.ResourceName) bool { - if IsNativeResource(name) || strings.HasPrefix(string(name), core.DefaultResourceRequestsPrefix) { - return false - } - // Ensure it satisfies the rules in IsQualifiedName() after converted into quota resource name - nameForQuota := fmt.Sprintf("%s%s", core.DefaultResourceRequestsPrefix, string(name)) - if errs := validation.IsQualifiedName(string(nameForQuota)); len(errs) != 0 { - return false - } - return true -} - -// IsNativeResource returns true if the resource name is in the -// *kubernetes.io/ namespace. Partially-qualified (unprefixed) names are -// implicitly in the kubernetes.io/ namespace. -func IsNativeResource(name core.ResourceName) bool { - return !strings.Contains(string(name), "/") || - strings.Contains(string(name), core.ResourceDefaultNamespacePrefix) -} - -// IsOvercommitAllowed returns true if the resource is in the default -// namespace and is not hugepages. -func IsOvercommitAllowed(name core.ResourceName) bool { - return IsNativeResource(name) && - !IsHugePageResourceName(name) -} - -var standardLimitRangeTypes = sets.NewString( - string(core.LimitTypePod), - string(core.LimitTypeContainer), - string(core.LimitTypePersistentVolumeClaim), -) - -// IsStandardLimitRangeType returns true if the type is Pod or Container -func IsStandardLimitRangeType(str string) bool { - return standardLimitRangeTypes.Has(str) -} - -var standardQuotaResources = sets.NewString( - string(core.ResourceCPU), - string(core.ResourceMemory), - string(core.ResourceEphemeralStorage), - string(core.ResourceRequestsCPU), - string(core.ResourceRequestsMemory), - string(core.ResourceRequestsStorage), - string(core.ResourceRequestsEphemeralStorage), - string(core.ResourceLimitsCPU), - string(core.ResourceLimitsMemory), - string(core.ResourceLimitsEphemeralStorage), - string(core.ResourcePods), - string(core.ResourceQuotas), - string(core.ResourceServices), - string(core.ResourceReplicationControllers), - string(core.ResourceSecrets), - string(core.ResourcePersistentVolumeClaims), - string(core.ResourceConfigMaps), - string(core.ResourceServicesNodePorts), - string(core.ResourceServicesLoadBalancers), -) - -// IsStandardQuotaResourceName returns true if the resource is known to -// the quota tracking system -func IsStandardQuotaResourceName(str string) bool { - return standardQuotaResources.Has(str) || IsQuotaHugePageResourceName(core.ResourceName(str)) -} - -var standardResources = sets.NewString( - string(core.ResourceCPU), - string(core.ResourceMemory), - string(core.ResourceEphemeralStorage), - string(core.ResourceRequestsCPU), - string(core.ResourceRequestsMemory), - string(core.ResourceRequestsEphemeralStorage), - string(core.ResourceLimitsCPU), - string(core.ResourceLimitsMemory), - string(core.ResourceLimitsEphemeralStorage), - string(core.ResourcePods), - string(core.ResourceQuotas), - string(core.ResourceServices), - string(core.ResourceReplicationControllers), - string(core.ResourceSecrets), - string(core.ResourceConfigMaps), - string(core.ResourcePersistentVolumeClaims), - string(core.ResourceStorage), - string(core.ResourceRequestsStorage), - string(core.ResourceServicesNodePorts), - string(core.ResourceServicesLoadBalancers), -) - -// IsStandardResourceName returns true if the resource is known to the system -func IsStandardResourceName(str string) bool { - return standardResources.Has(str) || IsQuotaHugePageResourceName(core.ResourceName(str)) -} - -var integerResources = sets.NewString( - string(core.ResourcePods), - string(core.ResourceQuotas), - string(core.ResourceServices), - string(core.ResourceReplicationControllers), - string(core.ResourceSecrets), - string(core.ResourceConfigMaps), - string(core.ResourcePersistentVolumeClaims), - string(core.ResourceServicesNodePorts), - string(core.ResourceServicesLoadBalancers), -) - -// IsIntegerResourceName returns true if the resource is measured in integer values -func IsIntegerResourceName(str string) bool { - return integerResources.Has(str) || IsExtendedResourceName(core.ResourceName(str)) -} - -// this function aims to check if the service's ClusterIP is set or not -// the objective is not to perform validation here -func IsServiceIPSet(service *core.Service) bool { - return service.Spec.ClusterIP != core.ClusterIPNone && service.Spec.ClusterIP != "" -} - -var standardFinalizers = sets.NewString( - string(core.FinalizerKubernetes), - metav1.FinalizerOrphanDependents, - metav1.FinalizerDeleteDependents, -) - -func IsStandardFinalizerName(str string) bool { - return standardFinalizers.Has(str) -} - -// AddToNodeAddresses appends the NodeAddresses to the passed-by-pointer slice, -// only if they do not already exist -func AddToNodeAddresses(addresses *[]core.NodeAddress, addAddresses ...core.NodeAddress) { - for _, add := range addAddresses { - exists := false - for _, existing := range *addresses { - if existing.Address == add.Address && existing.Type == add.Type { - exists = true - break - } - } - if !exists { - *addresses = append(*addresses, add) - } - } -} - -// TODO: make method on LoadBalancerStatus? -func LoadBalancerStatusEqual(l, r *core.LoadBalancerStatus) bool { - return ingressSliceEqual(l.Ingress, r.Ingress) -} - -func ingressSliceEqual(lhs, rhs []core.LoadBalancerIngress) bool { - if len(lhs) != len(rhs) { - return false - } - for i := range lhs { - if !ingressEqual(&lhs[i], &rhs[i]) { - return false - } - } - return true -} - -func ingressEqual(lhs, rhs *core.LoadBalancerIngress) bool { - if lhs.IP != rhs.IP { - return false - } - if lhs.Hostname != rhs.Hostname { - return false - } - return true -} - -// GetAccessModesAsString returns a string representation of an array of access modes. -// modes, when present, are always in the same order: RWO,ROX,RWX. -func GetAccessModesAsString(modes []core.PersistentVolumeAccessMode) string { - modes = removeDuplicateAccessModes(modes) - modesStr := []string{} - if containsAccessMode(modes, core.ReadWriteOnce) { - modesStr = append(modesStr, "RWO") - } - if containsAccessMode(modes, core.ReadOnlyMany) { - modesStr = append(modesStr, "ROX") - } - if containsAccessMode(modes, core.ReadWriteMany) { - modesStr = append(modesStr, "RWX") - } - return strings.Join(modesStr, ",") -} - -// GetAccessModesAsString returns an array of AccessModes from a string created by GetAccessModesAsString -func GetAccessModesFromString(modes string) []core.PersistentVolumeAccessMode { - strmodes := strings.Split(modes, ",") - accessModes := []core.PersistentVolumeAccessMode{} - for _, s := range strmodes { - s = strings.Trim(s, " ") - switch { - case s == "RWO": - accessModes = append(accessModes, core.ReadWriteOnce) - case s == "ROX": - accessModes = append(accessModes, core.ReadOnlyMany) - case s == "RWX": - accessModes = append(accessModes, core.ReadWriteMany) - } - } - return accessModes -} - -// removeDuplicateAccessModes returns an array of access modes without any duplicates -func removeDuplicateAccessModes(modes []core.PersistentVolumeAccessMode) []core.PersistentVolumeAccessMode { - accessModes := []core.PersistentVolumeAccessMode{} - for _, m := range modes { - if !containsAccessMode(accessModes, m) { - accessModes = append(accessModes, m) - } - } - return accessModes -} - -func containsAccessMode(modes []core.PersistentVolumeAccessMode, mode core.PersistentVolumeAccessMode) bool { - for _, m := range modes { - if m == mode { - return true - } - } - return false -} - -// NodeSelectorRequirementsAsSelector converts the []NodeSelectorRequirement core type into a struct that implements -// labels.Selector. -func NodeSelectorRequirementsAsSelector(nsm []core.NodeSelectorRequirement) (labels.Selector, error) { - if len(nsm) == 0 { - return labels.Nothing(), nil - } - selector := labels.NewSelector() - for _, expr := range nsm { - var op selection.Operator - switch expr.Operator { - case core.NodeSelectorOpIn: - op = selection.In - case core.NodeSelectorOpNotIn: - op = selection.NotIn - case core.NodeSelectorOpExists: - op = selection.Exists - case core.NodeSelectorOpDoesNotExist: - op = selection.DoesNotExist - case core.NodeSelectorOpGt: - op = selection.GreaterThan - case core.NodeSelectorOpLt: - op = selection.LessThan - default: - return nil, fmt.Errorf("%q is not a valid node selector operator", expr.Operator) - } - r, err := labels.NewRequirement(expr.Key, op, expr.Values) - if err != nil { - return nil, err - } - selector = selector.Add(*r) - } - return selector, nil -} - -// NodeSelectorRequirementsAsFieldSelector converts the []NodeSelectorRequirement core type into a struct that implements -// fields.Selector. -func NodeSelectorRequirementsAsFieldSelector(nsm []core.NodeSelectorRequirement) (fields.Selector, error) { - if len(nsm) == 0 { - return fields.Nothing(), nil - } - - selectors := []fields.Selector{} - for _, expr := range nsm { - switch expr.Operator { - case core.NodeSelectorOpIn: - if len(expr.Values) != 1 { - return nil, fmt.Errorf("unexpected number of value (%d) for node field selector operator %q", - len(expr.Values), expr.Operator) - } - selectors = append(selectors, fields.OneTermEqualSelector(expr.Key, expr.Values[0])) - - case core.NodeSelectorOpNotIn: - if len(expr.Values) != 1 { - return nil, fmt.Errorf("unexpected number of value (%d) for node field selector operator %q", - len(expr.Values), expr.Operator) - } - selectors = append(selectors, fields.OneTermNotEqualSelector(expr.Key, expr.Values[0])) - - default: - return nil, fmt.Errorf("%q is not a valid node field selector operator", expr.Operator) - } - } - - return fields.AndSelectors(selectors...), nil -} - -// GetTolerationsFromPodAnnotations gets the json serialized tolerations data from Pod.Annotations -// and converts it to the []Toleration type in core. -func GetTolerationsFromPodAnnotations(annotations map[string]string) ([]core.Toleration, error) { - var tolerations []core.Toleration - if len(annotations) > 0 && annotations[core.TolerationsAnnotationKey] != "" { - err := json.Unmarshal([]byte(annotations[core.TolerationsAnnotationKey]), &tolerations) - if err != nil { - return tolerations, err - } - } - return tolerations, nil -} - -// AddOrUpdateTolerationInPod tries to add a toleration to the pod's toleration list. -// Returns true if something was updated, false otherwise. -func AddOrUpdateTolerationInPod(pod *core.Pod, toleration *core.Toleration) bool { - podTolerations := pod.Spec.Tolerations - - var newTolerations []core.Toleration - updated := false - for i := range podTolerations { - if toleration.MatchToleration(&podTolerations[i]) { - if Semantic.DeepEqual(toleration, podTolerations[i]) { - return false - } - newTolerations = append(newTolerations, *toleration) - updated = true - continue - } - - newTolerations = append(newTolerations, podTolerations[i]) - } - - if !updated { - newTolerations = append(newTolerations, *toleration) - } - - pod.Spec.Tolerations = newTolerations - return true -} - -// GetTaintsFromNodeAnnotations gets the json serialized taints data from Pod.Annotations -// and converts it to the []Taint type in core. -func GetTaintsFromNodeAnnotations(annotations map[string]string) ([]core.Taint, error) { - var taints []core.Taint - if len(annotations) > 0 && annotations[core.TaintsAnnotationKey] != "" { - err := json.Unmarshal([]byte(annotations[core.TaintsAnnotationKey]), &taints) - if err != nil { - return []core.Taint{}, err - } - } - return taints, nil -} - -// GetPersistentVolumeClass returns StorageClassName. -func GetPersistentVolumeClass(volume *core.PersistentVolume) string { - // Use beta annotation first - if class, found := volume.Annotations[core.BetaStorageClassAnnotation]; found { - return class - } - - return volume.Spec.StorageClassName -} - -// GetPersistentVolumeClaimClass returns StorageClassName. If no storage class was -// requested, it returns "". -func GetPersistentVolumeClaimClass(claim *core.PersistentVolumeClaim) string { - // Use beta annotation first - if class, found := claim.Annotations[core.BetaStorageClassAnnotation]; found { - return class - } - - if claim.Spec.StorageClassName != nil { - return *claim.Spec.StorageClassName - } - - return "" -} - -// PersistentVolumeClaimHasClass returns true if given claim has set StorageClassName field. -func PersistentVolumeClaimHasClass(claim *core.PersistentVolumeClaim) bool { - // Use beta annotation first - if _, found := claim.Annotations[core.BetaStorageClassAnnotation]; found { - return true - } - - if claim.Spec.StorageClassName != nil { - return true - } - - return false -} - -// ScopedResourceSelectorRequirementsAsSelector converts the ScopedResourceSelectorRequirement api type into a struct that implements -// labels.Selector. -func ScopedResourceSelectorRequirementsAsSelector(ssr core.ScopedResourceSelectorRequirement) (labels.Selector, error) { - selector := labels.NewSelector() - var op selection.Operator - switch ssr.Operator { - case core.ScopeSelectorOpIn: - op = selection.In - case core.ScopeSelectorOpNotIn: - op = selection.NotIn - case core.ScopeSelectorOpExists: - op = selection.Exists - case core.ScopeSelectorOpDoesNotExist: - op = selection.DoesNotExist - default: - return nil, fmt.Errorf("%q is not a valid scope selector operator", ssr.Operator) - } - r, err := labels.NewRequirement(string(ssr.ScopeName), op, ssr.Values) - if err != nil { - return nil, err - } - selector = selector.Add(*r) - return selector, nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/json.go b/vendor/k8s.io/kubernetes/pkg/apis/core/json.go deleted file mode 100644 index 937cd056..00000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/core/json.go +++ /dev/null @@ -1,28 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package core - -import "encoding/json" - -// This file implements json marshaling/unmarshaling interfaces on objects that are currently marshaled into annotations -// to prevent anyone from marshaling these internal structs. - -var _ = json.Marshaler(&AvoidPods{}) -var _ = json.Unmarshaler(&AvoidPods{}) - -func (AvoidPods) MarshalJSON() ([]byte, error) { panic("do not marshal internal struct") } -func (*AvoidPods) UnmarshalJSON([]byte) error { panic("do not unmarshal to internal struct") } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/objectreference.go b/vendor/k8s.io/kubernetes/pkg/apis/core/objectreference.go deleted file mode 100644 index 55b27f30..00000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/core/objectreference.go +++ /dev/null @@ -1,34 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -//TODO: consider making these methods functions, because we don't want helper -//functions in the k8s.io/api repo. - -package core - -import ( - "k8s.io/apimachinery/pkg/runtime/schema" -) - -func (obj *ObjectReference) SetGroupVersionKind(gvk schema.GroupVersionKind) { - obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind() -} - -func (obj *ObjectReference) GroupVersionKind() schema.GroupVersionKind { - return schema.FromAPIVersionAndKind(obj.APIVersion, obj.Kind) -} - -func (obj *ObjectReference) GetObjectKind() schema.ObjectKind { return obj } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/resource.go b/vendor/k8s.io/kubernetes/pkg/apis/core/resource.go deleted file mode 100644 index 1367e00e..00000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/core/resource.go +++ /dev/null @@ -1,55 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package core - -import ( - "k8s.io/apimachinery/pkg/api/resource" -) - -func (self ResourceName) String() string { - return string(self) -} - -// Returns the CPU limit if specified. -func (self *ResourceList) Cpu() *resource.Quantity { - if val, ok := (*self)[ResourceCPU]; ok { - return &val - } - return &resource.Quantity{Format: resource.DecimalSI} -} - -// Returns the Memory limit if specified. -func (self *ResourceList) Memory() *resource.Quantity { - if val, ok := (*self)[ResourceMemory]; ok { - return &val - } - return &resource.Quantity{Format: resource.BinarySI} -} - -func (self *ResourceList) Pods() *resource.Quantity { - if val, ok := (*self)[ResourcePods]; ok { - return &val - } - return &resource.Quantity{} -} - -func (self *ResourceList) StorageEphemeral() *resource.Quantity { - if val, ok := (*self)[ResourceEphemeralStorage]; ok { - return &val - } - return &resource.Quantity{} -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/taint.go b/vendor/k8s.io/kubernetes/pkg/apis/core/taint.go deleted file mode 100644 index ae1feb74..00000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/core/taint.go +++ /dev/null @@ -1,36 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -//TODO: consider making these methods functions, because we don't want helper -//functions in the k8s.io/api repo. - -package core - -import "fmt" - -// MatchTaint checks if the taint matches taintToMatch. Taints are unique by key:effect, -// if the two taints have same key:effect, regard as they match. -func (t *Taint) MatchTaint(taintToMatch Taint) bool { - return t.Key == taintToMatch.Key && t.Effect == taintToMatch.Effect -} - -// taint.ToString() converts taint struct to string in format key=value:effect or key:effect. -func (t *Taint) ToString() string { - if len(t.Value) == 0 { - return fmt.Sprintf("%v:%v", t.Key, t.Effect) - } - return fmt.Sprintf("%v=%v:%v", t.Key, t.Value, t.Effect) -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/toleration.go b/vendor/k8s.io/kubernetes/pkg/apis/core/toleration.go deleted file mode 100644 index 1dfbc9f1..00000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/core/toleration.go +++ /dev/null @@ -1,30 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -//TODO: consider making these methods functions, because we don't want helper -//functions in the k8s.io/api repo. - -package core - -// MatchToleration checks if the toleration matches tolerationToMatch. Tolerations are unique by , -// if the two tolerations have same combination, regard as they match. -// TODO: uniqueness check for tolerations in api validations. -func (t *Toleration) MatchToleration(tolerationToMatch *Toleration) bool { - return t.Key == tolerationToMatch.Key && - t.Effect == tolerationToMatch.Effect && - t.Operator == tolerationToMatch.Operator && - t.Value == tolerationToMatch.Value -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/types.go b/vendor/k8s.io/kubernetes/pkg/apis/core/types.go deleted file mode 100644 index 702ffa4d..00000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/core/types.go +++ /dev/null @@ -1,4696 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package core - -import ( - "k8s.io/apimachinery/pkg/api/resource" - metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/intstr" -) - -const ( - // NamespaceDefault means the object is in the default namespace which is applied when not specified by clients - NamespaceDefault string = "default" - // NamespaceAll is the default argument to specify on a context when you want to list or filter resources across all namespaces - NamespaceAll string = "" - // NamespaceNone is the argument for a context when there is no namespace. - NamespaceNone string = "" - // NamespaceSystem is the system namespace where we place system components. - NamespaceSystem string = "kube-system" - // NamespacePublic is the namespace where we place public info (ConfigMaps) - NamespacePublic string = "kube-public" - // NamespaceNodeLease is the namespace where we place node lease objects (used for node heartbeats) - NamespaceNodeLease string = "kube-node-lease" - // TerminationMessagePathDefault means the default path to capture the application termination message running in a container - TerminationMessagePathDefault string = "/dev/termination-log" -) - -// Volume represents a named volume in a pod that may be accessed by any containers in the pod. -type Volume struct { - // Required: This must be a DNS_LABEL. Each volume in a pod must have - // a unique name. - Name string - // The VolumeSource represents the location and type of a volume to mount. - // This is optional for now. If not specified, the Volume is implied to be an EmptyDir. - // This implied behavior is deprecated and will be removed in a future version. - // +optional - VolumeSource -} - -// VolumeSource represents the source location of a volume to mount. -// Only one of its members may be specified. -type VolumeSource struct { - // HostPath represents file or directory on the host machine that is - // directly exposed to the container. This is generally used for system - // agents or other privileged things that are allowed to see the host - // machine. Most containers will NOT need this. - // --- - // TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not - // mount host directories as read/write. - // +optional - HostPath *HostPathVolumeSource - // EmptyDir represents a temporary directory that shares a pod's lifetime. - // +optional - EmptyDir *EmptyDirVolumeSource - // GCEPersistentDisk represents a GCE Disk resource that is attached to a - // kubelet's host machine and then exposed to the pod. - // +optional - GCEPersistentDisk *GCEPersistentDiskVolumeSource - // AWSElasticBlockStore represents an AWS EBS disk that is attached to a - // kubelet's host machine and then exposed to the pod. - // +optional - AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource - // GitRepo represents a git repository at a particular revision. - // DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an - // EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir - // into the Pod's container. - // +optional - GitRepo *GitRepoVolumeSource - // Secret represents a secret that should populate this volume. - // +optional - Secret *SecretVolumeSource - // NFS represents an NFS mount on the host that shares a pod's lifetime - // +optional - NFS *NFSVolumeSource - // ISCSIVolumeSource represents an ISCSI Disk resource that is attached to a - // kubelet's host machine and then exposed to the pod. - // +optional - ISCSI *ISCSIVolumeSource - // Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime - // +optional - Glusterfs *GlusterfsVolumeSource - // PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace - // +optional - PersistentVolumeClaim *PersistentVolumeClaimVolumeSource - // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime - // +optional - RBD *RBDVolumeSource - - // Quobyte represents a Quobyte mount on the host that shares a pod's lifetime - // +optional - Quobyte *QuobyteVolumeSource - - // FlexVolume represents a generic volume resource that is - // provisioned/attached using an exec based plugin. - // +optional - FlexVolume *FlexVolumeSource - - // Cinder represents a cinder volume attached and mounted on kubelets host machine - // +optional - Cinder *CinderVolumeSource - - // CephFS represents a Cephfs mount on the host that shares a pod's lifetime - // +optional - CephFS *CephFSVolumeSource - - // Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running - // +optional - Flocker *FlockerVolumeSource - - // DownwardAPI represents metadata about the pod that should populate this volume - // +optional - DownwardAPI *DownwardAPIVolumeSource - // FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. - // +optional - FC *FCVolumeSource - // AzureFile represents an Azure File Service mount on the host and bind mount to the pod. - // +optional - AzureFile *AzureFileVolumeSource - // ConfigMap represents a configMap that should populate this volume - // +optional - ConfigMap *ConfigMapVolumeSource - // VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine - // +optional - VsphereVolume *VsphereVirtualDiskVolumeSource - // AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. - // +optional - AzureDisk *AzureDiskVolumeSource - // PhotonPersistentDisk represents a Photon Controller persistent disk attached and mounted on kubelets host machine - PhotonPersistentDisk *PhotonPersistentDiskVolumeSource - // Items for all in one resources secrets, configmaps, and downward API - Projected *ProjectedVolumeSource - // PortworxVolume represents a portworx volume attached and mounted on kubelets host machine - // +optional - PortworxVolume *PortworxVolumeSource - // ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. - // +optional - ScaleIO *ScaleIOVolumeSource - // StorageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod - // +optional - StorageOS *StorageOSVolumeSource -} - -// Similar to VolumeSource but meant for the administrator who creates PVs. -// Exactly one of its members must be set. -type PersistentVolumeSource struct { - // GCEPersistentDisk represents a GCE Disk resource that is attached to a - // kubelet's host machine and then exposed to the pod. - // +optional - GCEPersistentDisk *GCEPersistentDiskVolumeSource - // AWSElasticBlockStore represents an AWS EBS disk that is attached to a - // kubelet's host machine and then exposed to the pod. - // +optional - AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource - // HostPath represents a directory on the host. - // Provisioned by a developer or tester. - // This is useful for single-node development and testing only! - // On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster. - // +optional - HostPath *HostPathVolumeSource - // Glusterfs represents a Glusterfs volume that is attached to a host and exposed to the pod - // +optional - Glusterfs *GlusterfsVolumeSource - // NFS represents an NFS mount on the host that shares a pod's lifetime - // +optional - NFS *NFSVolumeSource - // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime - // +optional - RBD *RBDPersistentVolumeSource - // Quobyte represents a Quobyte mount on the host that shares a pod's lifetime - // +optional - Quobyte *QuobyteVolumeSource - // ISCSIPersistentVolumeSource represents an ISCSI resource that is attached to a - // kubelet's host machine and then exposed to the pod. - // +optional - ISCSI *ISCSIPersistentVolumeSource - // FlexVolume represents a generic volume resource that is - // provisioned/attached using an exec based plugin. - // +optional - FlexVolume *FlexPersistentVolumeSource - // Cinder represents a cinder volume attached and mounted on kubelets host machine - // +optional - Cinder *CinderPersistentVolumeSource - // CephFS represents a Ceph FS mount on the host that shares a pod's lifetime - // +optional - CephFS *CephFSPersistentVolumeSource - // FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. - // +optional - FC *FCVolumeSource - // Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running - // +optional - Flocker *FlockerVolumeSource - // AzureFile represents an Azure File Service mount on the host and bind mount to the pod. - // +optional - AzureFile *AzureFilePersistentVolumeSource - // VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine - // +optional - VsphereVolume *VsphereVirtualDiskVolumeSource - // AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. - // +optional - AzureDisk *AzureDiskVolumeSource - // PhotonPersistentDisk represents a Photon Controller persistent disk attached and mounted on kubelets host machine - PhotonPersistentDisk *PhotonPersistentDiskVolumeSource - // PortworxVolume represents a portworx volume attached and mounted on kubelets host machine - // +optional - PortworxVolume *PortworxVolumeSource - // ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. - // +optional - ScaleIO *ScaleIOPersistentVolumeSource - // Local represents directly-attached storage with node affinity - // +optional - Local *LocalVolumeSource - // StorageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod - // More info: https://releases.k8s.io/HEAD/examples/volumes/storageos/README.md - // +optional - StorageOS *StorageOSPersistentVolumeSource - // CSI (Container Storage Interface) represents storage that handled by an external CSI driver (Beta feature). - // +optional - CSI *CSIPersistentVolumeSource -} - -type PersistentVolumeClaimVolumeSource struct { - // ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume - ClaimName string - // Optional: Defaults to false (read/write). ReadOnly here - // will force the ReadOnly setting in VolumeMounts - // +optional - ReadOnly bool -} - -const ( - // BetaStorageClassAnnotation represents the beta/previous StorageClass annotation. - // It's deprecated and will be removed in a future release. (#51440) - BetaStorageClassAnnotation = "volume.beta.kubernetes.io/storage-class" - - // MountOptionAnnotation defines mount option annotation used in PVs - MountOptionAnnotation = "volume.beta.kubernetes.io/mount-options" -) - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -type PersistentVolume struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - //Spec defines a persistent volume owned by the cluster - // +optional - Spec PersistentVolumeSpec - - // Status represents the current information about persistent volume. - // +optional - Status PersistentVolumeStatus -} - -type PersistentVolumeSpec struct { - // Resources represents the actual resources of the volume - Capacity ResourceList - // Source represents the location and type of a volume to mount. - PersistentVolumeSource - // AccessModes contains all ways the volume can be mounted - // +optional - AccessModes []PersistentVolumeAccessMode - // ClaimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim. - // ClaimRef is expected to be non-nil when bound. - // claim.VolumeName is the authoritative bind between PV and PVC. - // When set to non-nil value, PVC.Spec.Selector of the referenced PVC is - // ignored, i.e. labels of this PV do not need to match PVC selector. - // +optional - ClaimRef *ObjectReference - // Optional: what happens to a persistent volume when released from its claim. - // +optional - PersistentVolumeReclaimPolicy PersistentVolumeReclaimPolicy - // Name of StorageClass to which this persistent volume belongs. Empty value - // means that this volume does not belong to any StorageClass. - // +optional - StorageClassName string - // A list of mount options, e.g. ["ro", "soft"]. Not validated - mount will - // simply fail if one is invalid. - // +optional - MountOptions []string - // volumeMode defines if a volume is intended to be used with a formatted filesystem - // or to remain in raw block state. Value of Filesystem is implied when not included in spec. - // This is an alpha feature and may change in the future. - // +optional - VolumeMode *PersistentVolumeMode - // NodeAffinity defines constraints that limit what nodes this volume can be accessed from. - // This field influences the scheduling of pods that use this volume. - // +optional - NodeAffinity *VolumeNodeAffinity -} - -// VolumeNodeAffinity defines constraints that limit what nodes this volume can be accessed from. -type VolumeNodeAffinity struct { - // Required specifies hard node constraints that must be met. - Required *NodeSelector -} - -// PersistentVolumeReclaimPolicy describes a policy for end-of-life maintenance of persistent volumes -type PersistentVolumeReclaimPolicy string - -const ( - // PersistentVolumeReclaimRecycle means the volume will be recycled back into the pool of unbound persistent volumes on release from its claim. - // The volume plugin must support Recycling. - // DEPRECATED: The PersistentVolumeReclaimRecycle called Recycle is being deprecated. See announcement here: https://groups.google.com/forum/#!topic/kubernetes-dev/uexugCza84I - PersistentVolumeReclaimRecycle PersistentVolumeReclaimPolicy = "Recycle" - // PersistentVolumeReclaimDelete means the volume will be deleted from Kubernetes on release from its claim. - // The volume plugin must support Deletion. - PersistentVolumeReclaimDelete PersistentVolumeReclaimPolicy = "Delete" - // PersistentVolumeReclaimRetain means the volume will be left in its current phase (Released) for manual reclamation by the administrator. - // The default policy is Retain. - PersistentVolumeReclaimRetain PersistentVolumeReclaimPolicy = "Retain" -) - -// PersistentVolumeMode describes how a volume is intended to be consumed, either Block or Filesystem. -type PersistentVolumeMode string - -const ( - // PersistentVolumeBlock means the volume will not be formatted with a filesystem and will remain a raw block device. - PersistentVolumeBlock PersistentVolumeMode = "Block" - // PersistentVolumeFilesystem means the volume will be or is formatted with a filesystem. - PersistentVolumeFilesystem PersistentVolumeMode = "Filesystem" -) - -type PersistentVolumeStatus struct { - // Phase indicates if a volume is available, bound to a claim, or released by a claim - // +optional - Phase PersistentVolumePhase - // A human-readable message indicating details about why the volume is in this state. - // +optional - Message string - // Reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI - // +optional - Reason string -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -type PersistentVolumeList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - Items []PersistentVolume -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PersistentVolumeClaim is a user's request for and claim to a persistent volume -type PersistentVolumeClaim struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // Spec defines the volume requested by a pod author - // +optional - Spec PersistentVolumeClaimSpec - - // Status represents the current information about a claim - // +optional - Status PersistentVolumeClaimStatus -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -type PersistentVolumeClaimList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - Items []PersistentVolumeClaim -} - -// PersistentVolumeClaimSpec describes the common attributes of storage devices -// and allows a Source for provider-specific attributes -type PersistentVolumeClaimSpec struct { - // Contains the types of access modes required - // +optional - AccessModes []PersistentVolumeAccessMode - // A label query over volumes to consider for binding. This selector is - // ignored when VolumeName is set - // +optional - Selector *metav1.LabelSelector - // Resources represents the minimum resources required - // +optional - Resources ResourceRequirements - // VolumeName is the binding reference to the PersistentVolume backing this - // claim. When set to non-empty value Selector is not evaluated - // +optional - VolumeName string - // Name of the StorageClass required by the claim. - // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#class-1 - // +optional - StorageClassName *string - // volumeMode defines what type of volume is required by the claim. - // Value of Filesystem is implied when not included in claim spec. - // This is an alpha feature and may change in the future. - // +optional - VolumeMode *PersistentVolumeMode - // This field requires the VolumeSnapshotDataSource alpha feature gate to be - // enabled and currently VolumeSnapshot is the only supported data source. - // If the provisioner can support VolumeSnapshot data source, it will create - // a new volume and data will be restored to the volume at the same time. - // If the provisioner does not support VolumeSnapshot data source, volume will - // not be created and the failure will be reported as an event. - // In the future, we plan to support more data source types and the behavior - // of the provisioner may change. - // +optional - DataSource *TypedLocalObjectReference -} - -type PersistentVolumeClaimConditionType string - -// These are valid conditions of Pvc -const ( - // An user trigger resize of pvc has been started - PersistentVolumeClaimResizing PersistentVolumeClaimConditionType = "Resizing" - // PersistentVolumeClaimFileSystemResizePending - controller resize is finished and a file system resize is pending on node - PersistentVolumeClaimFileSystemResizePending PersistentVolumeClaimConditionType = "FileSystemResizePending" -) - -type PersistentVolumeClaimCondition struct { - Type PersistentVolumeClaimConditionType - Status ConditionStatus - // +optional - LastProbeTime metav1.Time - // +optional - LastTransitionTime metav1.Time - // +optional - Reason string - // +optional - Message string -} - -type PersistentVolumeClaimStatus struct { - // Phase represents the current phase of PersistentVolumeClaim - // +optional - Phase PersistentVolumeClaimPhase - // AccessModes contains all ways the volume backing the PVC can be mounted - // +optional - AccessModes []PersistentVolumeAccessMode - // Represents the actual resources of the underlying volume - // +optional - Capacity ResourceList - // +optional - Conditions []PersistentVolumeClaimCondition -} - -type PersistentVolumeAccessMode string - -const ( - // can be mounted read/write mode to exactly 1 host - ReadWriteOnce PersistentVolumeAccessMode = "ReadWriteOnce" - // can be mounted in read-only mode to many hosts - ReadOnlyMany PersistentVolumeAccessMode = "ReadOnlyMany" - // can be mounted in read/write mode to many hosts - ReadWriteMany PersistentVolumeAccessMode = "ReadWriteMany" -) - -type PersistentVolumePhase string - -const ( - // used for PersistentVolumes that are not available - VolumePending PersistentVolumePhase = "Pending" - // used for PersistentVolumes that are not yet bound - // Available volumes are held by the binder and matched to PersistentVolumeClaims - VolumeAvailable PersistentVolumePhase = "Available" - // used for PersistentVolumes that are bound - VolumeBound PersistentVolumePhase = "Bound" - // used for PersistentVolumes where the bound PersistentVolumeClaim was deleted - // released volumes must be recycled before becoming available again - // this phase is used by the persistent volume claim binder to signal to another process to reclaim the resource - VolumeReleased PersistentVolumePhase = "Released" - // used for PersistentVolumes that failed to be correctly recycled or deleted after being released from a claim - VolumeFailed PersistentVolumePhase = "Failed" -) - -type PersistentVolumeClaimPhase string - -const ( - // used for PersistentVolumeClaims that are not yet bound - ClaimPending PersistentVolumeClaimPhase = "Pending" - // used for PersistentVolumeClaims that are bound - ClaimBound PersistentVolumeClaimPhase = "Bound" - // used for PersistentVolumeClaims that lost their underlying - // PersistentVolume. The claim was bound to a PersistentVolume and this - // volume does not exist any longer and all data on it was lost. - ClaimLost PersistentVolumeClaimPhase = "Lost" -) - -type HostPathType string - -const ( - // For backwards compatible, leave it empty if unset - HostPathUnset HostPathType = "" - // If nothing exists at the given path, an empty directory will be created there - // as needed with file mode 0755, having the same group and ownership with Kubelet. - HostPathDirectoryOrCreate HostPathType = "DirectoryOrCreate" - // A directory must exist at the given path - HostPathDirectory HostPathType = "Directory" - // If nothing exists at the given path, an empty file will be created there - // as needed with file mode 0644, having the same group and ownership with Kubelet. - HostPathFileOrCreate HostPathType = "FileOrCreate" - // A file must exist at the given path - HostPathFile HostPathType = "File" - // A UNIX socket must exist at the given path - HostPathSocket HostPathType = "Socket" - // A character device must exist at the given path - HostPathCharDev HostPathType = "CharDevice" - // A block device must exist at the given path - HostPathBlockDev HostPathType = "BlockDevice" -) - -// Represents a host path mapped into a pod. -// Host path volumes do not support ownership management or SELinux relabeling. -type HostPathVolumeSource struct { - // If the path is a symlink, it will follow the link to the real path. - Path string - // Defaults to "" - Type *HostPathType -} - -// Represents an empty directory for a pod. -// Empty directory volumes support ownership management and SELinux relabeling. -type EmptyDirVolumeSource struct { - // TODO: Longer term we want to represent the selection of underlying - // media more like a scheduling problem - user says what traits they - // need, we give them a backing store that satisfies that. For now - // this will cover the most common needs. - // Optional: what type of storage medium should back this directory. - // The default is "" which means to use the node's default medium. - // +optional - Medium StorageMedium - // Total amount of local storage required for this EmptyDir volume. - // The size limit is also applicable for memory medium. - // The maximum usage on memory medium EmptyDir would be the minimum value between - // the SizeLimit specified here and the sum of memory limits of all containers in a pod. - // The default is nil which means that the limit is undefined. - // More info: http://kubernetes.io/docs/user-guide/volumes#emptydir - // +optional - SizeLimit *resource.Quantity -} - -// StorageMedium defines ways that storage can be allocated to a volume. -type StorageMedium string - -const ( - StorageMediumDefault StorageMedium = "" // use whatever the default is for the node - StorageMediumMemory StorageMedium = "Memory" // use memory (tmpfs) - StorageMediumHugePages StorageMedium = "HugePages" // use hugepages -) - -// Protocol defines network protocols supported for things like container ports. -type Protocol string - -const ( - // ProtocolTCP is the TCP protocol. - ProtocolTCP Protocol = "TCP" - // ProtocolUDP is the UDP protocol. - ProtocolUDP Protocol = "UDP" - // ProtocolSCTP is the SCTP protocol. - ProtocolSCTP Protocol = "SCTP" -) - -// Represents a Persistent Disk resource in Google Compute Engine. -// -// A GCE PD must exist before mounting to a container. The disk must -// also be in the same GCE project and zone as the kubelet. A GCE PD -// can only be mounted as read/write once or read-only many times. GCE -// PDs support ownership management and SELinux relabeling. -type GCEPersistentDiskVolumeSource struct { - // Unique name of the PD resource. Used to identify the disk in GCE - PDName string - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // TODO: how do we prevent errors in the filesystem from compromising the machine - // +optional - FSType string - // Optional: Partition on the disk to mount. - // If omitted, kubelet will attempt to mount the device name. - // Ex. For /dev/sda1, this field is "1", for /dev/sda, this field is 0 or empty. - // +optional - Partition int32 - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool -} - -// Represents an ISCSI disk. -// ISCSI volumes can only be mounted as read/write once. -// ISCSI volumes support ownership management and SELinux relabeling. -type ISCSIVolumeSource struct { - // Required: iSCSI target portal - // the portal is either an IP or ip_addr:port if port is other than default (typically TCP ports 860 and 3260) - // +optional - TargetPortal string - // Required: target iSCSI Qualified Name - // +optional - IQN string - // Required: iSCSI target lun number - // +optional - Lun int32 - // Optional: Defaults to 'default' (tcp). iSCSI interface name that uses an iSCSI transport. - // +optional - ISCSIInterface string - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // TODO: how do we prevent errors in the filesystem from compromising the machine - // +optional - FSType string - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool - // Optional: list of iSCSI target portal ips for high availability. - // the portal is either an IP or ip_addr:port if port is other than default (typically TCP ports 860 and 3260) - // +optional - Portals []string - // Optional: whether support iSCSI Discovery CHAP authentication - // +optional - DiscoveryCHAPAuth bool - // Optional: whether support iSCSI Session CHAP authentication - // +optional - SessionCHAPAuth bool - // Optional: CHAP secret for iSCSI target and initiator authentication. - // The secret is used if either DiscoveryCHAPAuth or SessionCHAPAuth is true - // +optional - SecretRef *LocalObjectReference - // Optional: Custom initiator name per volume. - // If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface - // : will be created for the connection. - // +optional - InitiatorName *string -} - -// ISCSIPersistentVolumeSource represents an ISCSI disk. -// ISCSI volumes can only be mounted as read/write once. -// ISCSI volumes support ownership management and SELinux relabeling. -type ISCSIPersistentVolumeSource struct { - // Required: iSCSI target portal - // the portal is either an IP or ip_addr:port if port is other than default (typically TCP ports 860 and 3260) - // +optional - TargetPortal string - // Required: target iSCSI Qualified Name - // +optional - IQN string - // Required: iSCSI target lun number - // +optional - Lun int32 - // Optional: Defaults to 'default' (tcp). iSCSI interface name that uses an iSCSI transport. - // +optional - ISCSIInterface string - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // TODO: how do we prevent errors in the filesystem from compromising the machine - // +optional - FSType string - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool - // Optional: list of iSCSI target portal ips for high availability. - // the portal is either an IP or ip_addr:port if port is other than default (typically TCP ports 860 and 3260) - // +optional - Portals []string - // Optional: whether support iSCSI Discovery CHAP authentication - // +optional - DiscoveryCHAPAuth bool - // Optional: whether support iSCSI Session CHAP authentication - // +optional - SessionCHAPAuth bool - // Optional: CHAP secret for iSCSI target and initiator authentication. - // The secret is used if either DiscoveryCHAPAuth or SessionCHAPAuth is true - // +optional - SecretRef *SecretReference - // Optional: Custom initiator name per volume. - // If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface - // : will be created for the connection. - // +optional - InitiatorName *string -} - -// Represents a Fibre Channel volume. -// Fibre Channel volumes can only be mounted as read/write once. -// Fibre Channel volumes support ownership management and SELinux relabeling. -type FCVolumeSource struct { - // Optional: FC target worldwide names (WWNs) - // +optional - TargetWWNs []string - // Optional: FC target lun number - // +optional - Lun *int32 - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // TODO: how do we prevent errors in the filesystem from compromising the machine - // +optional - FSType string - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool - // Optional: FC volume World Wide Identifiers (WWIDs) - // Either WWIDs or TargetWWNs and Lun must be set, but not both simultaneously. - // +optional - WWIDs []string -} - -// FlexPersistentVolumeSource represents a generic persistent volume resource that is -// provisioned/attached using an exec based plugin. -type FlexPersistentVolumeSource struct { - // Driver is the name of the driver to use for this volume. - Driver string - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. - // +optional - FSType string - // Optional: SecretRef is reference to the secret object containing - // sensitive information to pass to the plugin scripts. This may be - // empty if no secret object is specified. If the secret object - // contains more than one secret, all secrets are passed to the plugin - // scripts. - // +optional - SecretRef *SecretReference - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool - // Optional: Extra driver options if any. - // +optional - Options map[string]string -} - -// FlexVolume represents a generic volume resource that is -// provisioned/attached using an exec based plugin. -type FlexVolumeSource struct { - // Driver is the name of the driver to use for this volume. - Driver string - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. - // +optional - FSType string - // Optional: SecretRef is reference to the secret object containing - // sensitive information to pass to the plugin scripts. This may be - // empty if no secret object is specified. If the secret object - // contains more than one secret, all secrets are passed to the plugin - // scripts. - // +optional - SecretRef *LocalObjectReference - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool - // Optional: Extra driver options if any. - // +optional - Options map[string]string -} - -// Represents a Persistent Disk resource in AWS. -// -// An AWS EBS disk must exist before mounting to a container. The disk -// must also be in the same AWS zone as the kubelet. An AWS EBS disk -// can only be mounted as read/write once. AWS EBS volumes support -// ownership management and SELinux relabeling. -type AWSElasticBlockStoreVolumeSource struct { - // Unique id of the persistent disk resource. Used to identify the disk in AWS - VolumeID string - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // TODO: how do we prevent errors in the filesystem from compromising the machine - // +optional - FSType string - // Optional: Partition on the disk to mount. - // If omitted, kubelet will attempt to mount the device name. - // Ex. For /dev/sda1, this field is "1", for /dev/sda, this field is 0 or empty. - // +optional - Partition int32 - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool -} - -// Represents a volume that is populated with the contents of a git repository. -// Git repo volumes do not support ownership management. -// Git repo volumes support SELinux relabeling. -// -// DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an -// EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir -// into the Pod's container. -type GitRepoVolumeSource struct { - // Repository URL - Repository string - // Commit hash, this is optional - // +optional - Revision string - // Clone target, this is optional - // Must not contain or start with '..'. If '.' is supplied, the volume directory will be the - // git repository. Otherwise, if specified, the volume will contain the git repository in - // the subdirectory with the given name. - // +optional - Directory string - // TODO: Consider credentials here. -} - -// Adapts a Secret into a volume. -// -// The contents of the target Secret's Data field will be presented in a volume -// as files using the keys in the Data field as the file names. -// Secret volumes support ownership management and SELinux relabeling. -type SecretVolumeSource struct { - // Name of the secret in the pod's namespace to use. - // +optional - SecretName string - // If unspecified, each key-value pair in the Data field of the referenced - // Secret will be projected into the volume as a file whose name is the - // key and content is the value. If specified, the listed keys will be - // projected into the specified paths, and unlisted keys will not be - // present. If a key is specified which is not present in the Secret, - // the volume setup will error unless it is marked optional. Paths must be - // relative and may not contain the '..' path or start with '..'. - // +optional - Items []KeyToPath - // Mode bits to use on created files by default. Must be a value between - // 0 and 0777. - // Directories within the path are not affected by this setting. - // This might be in conflict with other options that affect the file - // mode, like fsGroup, and the result can be other mode bits set. - // +optional - DefaultMode *int32 - // Specify whether the Secret or its key must be defined - // +optional - Optional *bool -} - -// Adapts a secret into a projected volume. -// -// The contents of the target Secret's Data field will be presented in a -// projected volume as files using the keys in the Data field as the file names. -// Note that this is identical to a secret volume source without the default -// mode. -type SecretProjection struct { - LocalObjectReference - // If unspecified, each key-value pair in the Data field of the referenced - // Secret will be projected into the volume as a file whose name is the - // key and content is the value. If specified, the listed keys will be - // projected into the specified paths, and unlisted keys will not be - // present. If a key is specified which is not present in the Secret, - // the volume setup will error unless it is marked optional. Paths must be - // relative and may not contain the '..' path or start with '..'. - // +optional - Items []KeyToPath - // Specify whether the Secret or its key must be defined - // +optional - Optional *bool -} - -// Represents an NFS mount that lasts the lifetime of a pod. -// NFS volumes do not support ownership management or SELinux relabeling. -type NFSVolumeSource struct { - // Server is the hostname or IP address of the NFS server - Server string - - // Path is the exported NFS share - Path string - - // Optional: Defaults to false (read/write). ReadOnly here will force - // the NFS export to be mounted with read-only permissions - // +optional - ReadOnly bool -} - -// Represents a Quobyte mount that lasts the lifetime of a pod. -// Quobyte volumes do not support ownership management or SELinux relabeling. -type QuobyteVolumeSource struct { - // Registry represents a single or multiple Quobyte Registry services - // specified as a string as host:port pair (multiple entries are separated with commas) - // which acts as the central registry for volumes - Registry string - - // Volume is a string that references an already created Quobyte volume by name. - Volume string - - // Defaults to false (read/write). ReadOnly here will force - // the Quobyte to be mounted with read-only permissions - // +optional - ReadOnly bool - - // User to map volume access to - // Defaults to the root user - // +optional - User string - - // Group to map volume access to - // Default is no group - // +optional - Group string -} - -// Represents a Glusterfs mount that lasts the lifetime of a pod. -// Glusterfs volumes do not support ownership management or SELinux relabeling. -type GlusterfsVolumeSource struct { - // Required: EndpointsName is the endpoint name that details Glusterfs topology - EndpointsName string - - // Required: Path is the Glusterfs volume path - Path string - - // Optional: Defaults to false (read/write). ReadOnly here will force - // the Glusterfs to be mounted with read-only permissions - // +optional - ReadOnly bool -} - -// Represents a Rados Block Device mount that lasts the lifetime of a pod. -// RBD volumes support ownership management and SELinux relabeling. -type RBDVolumeSource struct { - // Required: CephMonitors is a collection of Ceph monitors - CephMonitors []string - // Required: RBDImage is the rados image name - RBDImage string - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // TODO: how do we prevent errors in the filesystem from compromising the machine - // +optional - FSType string - // Optional: RadosPool is the rados pool name,default is rbd - // +optional - RBDPool string - // Optional: RBDUser is the rados user name, default is admin - // +optional - RadosUser string - // Optional: Keyring is the path to key ring for RBDUser, default is /etc/ceph/keyring - // +optional - Keyring string - // Optional: SecretRef is name of the authentication secret for RBDUser, default is nil. - // +optional - SecretRef *LocalObjectReference - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool -} - -// Represents a Rados Block Device mount that lasts the lifetime of a pod. -// RBD volumes support ownership management and SELinux relabeling. -type RBDPersistentVolumeSource struct { - // Required: CephMonitors is a collection of Ceph monitors - CephMonitors []string - // Required: RBDImage is the rados image name - RBDImage string - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // TODO: how do we prevent errors in the filesystem from compromising the machine - // +optional - FSType string - // Optional: RadosPool is the rados pool name,default is rbd - // +optional - RBDPool string - // Optional: RBDUser is the rados user name, default is admin - // +optional - RadosUser string - // Optional: Keyring is the path to key ring for RBDUser, default is /etc/ceph/keyring - // +optional - Keyring string - // Optional: SecretRef is reference to the authentication secret for User, default is empty. - // +optional - SecretRef *SecretReference - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool -} - -// Represents a cinder volume resource in Openstack. A Cinder volume -// must exist before mounting to a container. The volume must also be -// in the same region as the kubelet. Cinder volumes support ownership -// management and SELinux relabeling. -type CinderVolumeSource struct { - // Unique id of the volume used to identify the cinder volume - VolumeID string - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // +optional - FSType string - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool - // Optional: points to a secret object containing parameters used to connect - // to OpenStack. - // +optional - SecretRef *LocalObjectReference -} - -// Represents a cinder volume resource in Openstack. A Cinder volume -// must exist before mounting to a container. The volume must also be -// in the same region as the kubelet. Cinder volumes support ownership -// management and SELinux relabeling. -type CinderPersistentVolumeSource struct { - // Unique id of the volume used to identify the cinder volume - VolumeID string - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // +optional - FSType string - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool - // Optional: points to a secret object containing parameters used to connect - // to OpenStack. - // +optional - SecretRef *SecretReference -} - -// Represents a Ceph Filesystem mount that lasts the lifetime of a pod -// Cephfs volumes do not support ownership management or SELinux relabeling. -type CephFSVolumeSource struct { - // Required: Monitors is a collection of Ceph monitors - Monitors []string - // Optional: Used as the mounted root, rather than the full Ceph tree, default is / - // +optional - Path string - // Optional: User is the rados user name, default is admin - // +optional - User string - // Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret - // +optional - SecretFile string - // Optional: SecretRef is reference to the authentication secret for User, default is empty. - // +optional - SecretRef *LocalObjectReference - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool -} - -// SecretReference represents a Secret Reference. It has enough information to retrieve secret -// in any namespace -type SecretReference struct { - // Name is unique within a namespace to reference a secret resource. - // +optional - Name string - // Namespace defines the space within which the secret name must be unique. - // +optional - Namespace string -} - -// Represents a Ceph Filesystem mount that lasts the lifetime of a pod -// Cephfs volumes do not support ownership management or SELinux relabeling. -type CephFSPersistentVolumeSource struct { - // Required: Monitors is a collection of Ceph monitors - Monitors []string - // Optional: Used as the mounted root, rather than the full Ceph tree, default is / - // +optional - Path string - // Optional: User is the rados user name, default is admin - // +optional - User string - // Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret - // +optional - SecretFile string - // Optional: SecretRef is reference to the authentication secret for User, default is empty. - // +optional - SecretRef *SecretReference - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool -} - -// Represents a Flocker volume mounted by the Flocker agent. -// One and only one of datasetName and datasetUUID should be set. -// Flocker volumes do not support ownership management or SELinux relabeling. -type FlockerVolumeSource struct { - // Name of the dataset stored as metadata -> name on the dataset for Flocker - // should be considered as deprecated - // +optional - DatasetName string - // UUID of the dataset. This is unique identifier of a Flocker dataset - // +optional - DatasetUUID string -} - -// Represents a volume containing downward API info. -// Downward API volumes support ownership management and SELinux relabeling. -type DownwardAPIVolumeSource struct { - // Items is a list of DownwardAPIVolume file - // +optional - Items []DownwardAPIVolumeFile - // Mode bits to use on created files by default. Must be a value between - // 0 and 0777. - // Directories within the path are not affected by this setting. - // This might be in conflict with other options that affect the file - // mode, like fsGroup, and the result can be other mode bits set. - // +optional - DefaultMode *int32 -} - -// Represents a single file containing information from the downward API -type DownwardAPIVolumeFile struct { - // Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..' - Path string - // Required: Selects a field of the pod: only annotations, labels, name, namespace and uid are supported. - // +optional - FieldRef *ObjectFieldSelector - // Selects a resource of the container: only resources limits and requests - // (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. - // +optional - ResourceFieldRef *ResourceFieldSelector - // Optional: mode bits to use on this file, must be a value between 0 - // and 0777. If not specified, the volume defaultMode will be used. - // This might be in conflict with other options that affect the file - // mode, like fsGroup, and the result can be other mode bits set. - // +optional - Mode *int32 -} - -// Represents downward API info for projecting into a projected volume. -// Note that this is identical to a downwardAPI volume source without the default -// mode. -type DownwardAPIProjection struct { - // Items is a list of DownwardAPIVolume file - // +optional - Items []DownwardAPIVolumeFile -} - -// AzureFile represents an Azure File Service mount on the host and bind mount to the pod. -type AzureFileVolumeSource struct { - // the name of secret that contains Azure Storage Account Name and Key - SecretName string - // Share Name - ShareName string - // Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool -} - -// AzureFile represents an Azure File Service mount on the host and bind mount to the pod. -type AzureFilePersistentVolumeSource struct { - // the name of secret that contains Azure Storage Account Name and Key - SecretName string - // Share Name - ShareName string - // Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool - // the namespace of the secret that contains Azure Storage Account Name and Key - // default is the same as the Pod - // +optional - SecretNamespace *string -} - -// Represents a vSphere volume resource. -type VsphereVirtualDiskVolumeSource struct { - // Path that identifies vSphere volume vmdk - VolumePath string - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // +optional - FSType string - // Storage Policy Based Management (SPBM) profile name. - // +optional - StoragePolicyName string - // Storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName. - // +optional - StoragePolicyID string -} - -// Represents a Photon Controller persistent disk resource. -type PhotonPersistentDiskVolumeSource struct { - // ID that identifies Photon Controller persistent disk - PdID string - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - FSType string -} - -// PortworxVolumeSource represents a Portworx volume resource. -type PortworxVolumeSource struct { - // VolumeID uniquely identifies a Portworx volume - VolumeID string - // FSType represents the filesystem type to mount - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. - // +optional - FSType string - // Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool -} - -type AzureDataDiskCachingMode string -type AzureDataDiskKind string - -const ( - AzureDataDiskCachingNone AzureDataDiskCachingMode = "None" - AzureDataDiskCachingReadOnly AzureDataDiskCachingMode = "ReadOnly" - AzureDataDiskCachingReadWrite AzureDataDiskCachingMode = "ReadWrite" - - AzureSharedBlobDisk AzureDataDiskKind = "Shared" - AzureDedicatedBlobDisk AzureDataDiskKind = "Dedicated" - AzureManagedDisk AzureDataDiskKind = "Managed" -) - -// AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. -type AzureDiskVolumeSource struct { - // The Name of the data disk in the blob storage - DiskName string - // The URI of the data disk in the blob storage - DataDiskURI string - // Host Caching mode: None, Read Only, Read Write. - // +optional - CachingMode *AzureDataDiskCachingMode - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // +optional - FSType *string - // Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly *bool - // Expected values Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared - Kind *AzureDataDiskKind -} - -// ScaleIOVolumeSource represents a persistent ScaleIO volume -type ScaleIOVolumeSource struct { - // The host address of the ScaleIO API Gateway. - Gateway string - // The name of the storage system as configured in ScaleIO. - System string - // SecretRef references to the secret for ScaleIO user and other - // sensitive information. If this is not provided, Login operation will fail. - SecretRef *LocalObjectReference - // Flag to enable/disable SSL communication with Gateway, default false - // +optional - SSLEnabled bool - // The name of the ScaleIO Protection Domain for the configured storage. - // +optional - ProtectionDomain string - // The ScaleIO Storage Pool associated with the protection domain. - // +optional - StoragePool string - // Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. - // Default is ThinProvisioned. - // +optional - StorageMode string - // The name of a volume already created in the ScaleIO system - // that is associated with this volume source. - VolumeName string - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". - // Default is "xfs". - // +optional - FSType string - // Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool -} - -// ScaleIOPersistentVolumeSource represents a persistent ScaleIO volume that can be defined -// by a an admin via a storage class, for instance. -type ScaleIOPersistentVolumeSource struct { - // The host address of the ScaleIO API Gateway. - Gateway string - // The name of the storage system as configured in ScaleIO. - System string - // SecretRef references to the secret for ScaleIO user and other - // sensitive information. If this is not provided, Login operation will fail. - SecretRef *SecretReference - // Flag to enable/disable SSL communication with Gateway, default false - // +optional - SSLEnabled bool - // The name of the ScaleIO Protection Domain for the configured storage. - // +optional - ProtectionDomain string - // The ScaleIO Storage Pool associated with the protection domain. - // +optional - StoragePool string - // Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. - // Default is ThinProvisioned. - // +optional - StorageMode string - // The name of a volume created in the ScaleIO system - // that is associated with this volume source. - VolumeName string - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". - // Default is "xfs". - // +optional - FSType string - // Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool -} - -// Represents a StorageOS persistent volume resource. -type StorageOSVolumeSource struct { - // VolumeName is the human-readable name of the StorageOS volume. Volume - // names are only unique within a namespace. - VolumeName string - // VolumeNamespace specifies the scope of the volume within StorageOS. If no - // namespace is specified then the Pod's namespace will be used. This allows the - // Kubernetes name scoping to be mirrored within StorageOS for tighter integration. - // Set VolumeName to any name to override the default behaviour. - // Set to "default" if you are not using namespaces within StorageOS. - // Namespaces that do not pre-exist within StorageOS will be created. - // +optional - VolumeNamespace string - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // +optional - FSType string - // Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool - // SecretRef specifies the secret to use for obtaining the StorageOS API - // credentials. If not specified, default values will be attempted. - // +optional - SecretRef *LocalObjectReference -} - -// Represents a StorageOS persistent volume resource. -type StorageOSPersistentVolumeSource struct { - // VolumeName is the human-readable name of the StorageOS volume. Volume - // names are only unique within a namespace. - VolumeName string - // VolumeNamespace specifies the scope of the volume within StorageOS. If no - // namespace is specified then the Pod's namespace will be used. This allows the - // Kubernetes name scoping to be mirrored within StorageOS for tighter integration. - // Set VolumeName to any name to override the default behaviour. - // Set to "default" if you are not using namespaces within StorageOS. - // Namespaces that do not pre-exist within StorageOS will be created. - // +optional - VolumeNamespace string - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // +optional - FSType string - // Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool - // SecretRef specifies the secret to use for obtaining the StorageOS API - // credentials. If not specified, default values will be attempted. - // +optional - SecretRef *ObjectReference -} - -// Adapts a ConfigMap into a volume. -// -// The contents of the target ConfigMap's Data field will be presented in a -// volume as files using the keys in the Data field as the file names, unless -// the items element is populated with specific mappings of keys to paths. -// ConfigMap volumes support ownership management and SELinux relabeling. -type ConfigMapVolumeSource struct { - LocalObjectReference - // If unspecified, each key-value pair in the Data field of the referenced - // ConfigMap will be projected into the volume as a file whose name is the - // key and content is the value. If specified, the listed keys will be - // projected into the specified paths, and unlisted keys will not be - // present. If a key is specified which is not present in the ConfigMap, - // the volume setup will error unless it is marked optional. Paths must be - // relative and may not contain the '..' path or start with '..'. - // +optional - Items []KeyToPath - // Mode bits to use on created files by default. Must be a value between - // 0 and 0777. - // Directories within the path are not affected by this setting. - // This might be in conflict with other options that affect the file - // mode, like fsGroup, and the result can be other mode bits set. - // +optional - DefaultMode *int32 - // Specify whether the ConfigMap or it's keys must be defined - // +optional - Optional *bool -} - -// Adapts a ConfigMap into a projected volume. -// -// The contents of the target ConfigMap's Data field will be presented in a -// projected volume as files using the keys in the Data field as the file names, -// unless the items element is populated with specific mappings of keys to paths. -// Note that this is identical to a configmap volume source without the default -// mode. -type ConfigMapProjection struct { - LocalObjectReference - // If unspecified, each key-value pair in the Data field of the referenced - // ConfigMap will be projected into the volume as a file whose name is the - // key and content is the value. If specified, the listed keys will be - // projected into the specified paths, and unlisted keys will not be - // present. If a key is specified which is not present in the ConfigMap, - // the volume setup will error unless it is marked optional. Paths must be - // relative and may not contain the '..' path or start with '..'. - // +optional - Items []KeyToPath - // Specify whether the ConfigMap or it's keys must be defined - // +optional - Optional *bool -} - -// ServiceAccountTokenProjection represents a projected service account token -// volume. This projection can be used to insert a service account token into -// the pods runtime filesystem for use against APIs (Kubernetes API Server or -// otherwise). -type ServiceAccountTokenProjection struct { - // Audience is the intended audience of the token. A recipient of a token - // must identify itself with an identifier specified in the audience of the - // token, and otherwise should reject the token. The audience defaults to the - // identifier of the apiserver. - Audience string - // ExpirationSeconds is the requested duration of validity of the service - // account token. As the token approaches expiration, the kubelet volume - // plugin will proactively rotate the service account token. The kubelet will - // start trying to rotate the token if the token is older than 80 percent of - // its time to live or if the token is older than 24 hours.Defaults to 1 hour - // and must be at least 10 minutes. - ExpirationSeconds int64 - // Path is the path relative to the mount point of the file to project the - // token into. - Path string -} - -// Represents a projected volume source -type ProjectedVolumeSource struct { - // list of volume projections - Sources []VolumeProjection - // Mode bits to use on created files by default. Must be a value between - // 0 and 0777. - // Directories within the path are not affected by this setting. - // This might be in conflict with other options that affect the file - // mode, like fsGroup, and the result can be other mode bits set. - // +optional - DefaultMode *int32 -} - -// Projection that may be projected along with other supported volume types -type VolumeProjection struct { - // all types below are the supported types for projection into the same volume - - // information about the secret data to project - Secret *SecretProjection - // information about the downwardAPI data to project - DownwardAPI *DownwardAPIProjection - // information about the configMap data to project - ConfigMap *ConfigMapProjection - // information about the serviceAccountToken data to project - ServiceAccountToken *ServiceAccountTokenProjection -} - -// Maps a string key to a path within a volume. -type KeyToPath struct { - // The key to project. - Key string - - // The relative path of the file to map the key to. - // May not be an absolute path. - // May not contain the path element '..'. - // May not start with the string '..'. - Path string - // Optional: mode bits to use on this file, should be a value between 0 - // and 0777. If not specified, the volume defaultMode will be used. - // This might be in conflict with other options that affect the file - // mode, like fsGroup, and the result can be other mode bits set. - // +optional - Mode *int32 -} - -// Local represents directly-attached storage with node affinity (Beta feature) -type LocalVolumeSource struct { - // The full path to the volume on the node. - // It can be either a directory or block device (disk, partition, ...). - Path string - - // Filesystem type to mount. - // It applies only when the Path is a block device. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". The default value is to auto-select a fileystem if unspecified. - // +optional - FSType *string -} - -// Represents storage that is managed by an external CSI volume driver (Beta feature) -type CSIPersistentVolumeSource struct { - // Driver is the name of the driver to use for this volume. - // Required. - Driver string - - // VolumeHandle is the unique volume name returned by the CSI volume - // plugin’s CreateVolume to refer to the volume on all subsequent calls. - // Required. - VolumeHandle string - - // Optional: The value to pass to ControllerPublishVolumeRequest. - // Defaults to false (read/write). - // +optional - ReadOnly bool - - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". - // +optional - FSType string - - // Attributes of the volume to publish. - // +optional - VolumeAttributes map[string]string - - // ControllerPublishSecretRef is a reference to the secret object containing - // sensitive information to pass to the CSI driver to complete the CSI - // ControllerPublishVolume and ControllerUnpublishVolume calls. - // This field is optional, and may be empty if no secret is required. If the - // secret object contains more than one secret, all secrets are passed. - // +optional - ControllerPublishSecretRef *SecretReference - - // NodeStageSecretRef is a reference to the secret object containing sensitive - // information to pass to the CSI driver to complete the CSI NodeStageVolume - // and NodeStageVolume and NodeUnstageVolume calls. - // This field is optional, and may be empty if no secret is required. If the - // secret object contains more than one secret, all secrets are passed. - // +optional - NodeStageSecretRef *SecretReference - - // NodePublishSecretRef is a reference to the secret object containing - // sensitive information to pass to the CSI driver to complete the CSI - // NodePublishVolume and NodeUnpublishVolume calls. - // This field is optional, and may be empty if no secret is required. If the - // secret object contains more than one secret, all secrets are passed. - // +optional - NodePublishSecretRef *SecretReference -} - -// ContainerPort represents a network port in a single container -type ContainerPort struct { - // Optional: If specified, this must be an IANA_SVC_NAME Each named port - // in a pod must have a unique name. - // +optional - Name string - // Optional: If specified, this must be a valid port number, 0 < x < 65536. - // If HostNetwork is specified, this must match ContainerPort. - // +optional - HostPort int32 - // Required: This must be a valid port number, 0 < x < 65536. - ContainerPort int32 - // Required: Supports "TCP", "UDP" and "SCTP" - // +optional - Protocol Protocol - // Optional: What host IP to bind the external port to. - // +optional - HostIP string -} - -// VolumeMount describes a mounting of a Volume within a container. -type VolumeMount struct { - // Required: This must match the Name of a Volume [above]. - Name string - // Optional: Defaults to false (read-write). - // +optional - ReadOnly bool - // Required. If the path is not an absolute path (e.g. some/path) it - // will be prepended with the appropriate root prefix for the operating - // system. On Linux this is '/', on Windows this is 'C:\'. - MountPath string - // Path within the volume from which the container's volume should be mounted. - // Defaults to "" (volume's root). - // +optional - SubPath string - // mountPropagation determines how mounts are propagated from the host - // to container and the other way around. - // When not set, MountPropagationNone is used. - // This field is beta in 1.10. - // +optional - MountPropagation *MountPropagationMode -} - -// MountPropagationMode describes mount propagation. -type MountPropagationMode string - -const ( - // MountPropagationNone means that the volume in a container will - // not receive new mounts from the host or other containers, and filesystems - // mounted inside the container won't be propagated to the host or other - // containers. - // Note that this mode corresponds to "private" in Linux terminology. - MountPropagationNone MountPropagationMode = "None" - // MountPropagationHostToContainer means that the volume in a container will - // receive new mounts from the host or other containers, but filesystems - // mounted inside the container won't be propagated to the host or other - // containers. - // Note that this mode is recursively applied to all mounts in the volume - // ("rslave" in Linux terminology). - MountPropagationHostToContainer MountPropagationMode = "HostToContainer" - // MountPropagationBidirectional means that the volume in a container will - // receive new mounts from the host or other containers, and its own mounts - // will be propagated from the container to the host or other containers. - // Note that this mode is recursively applied to all mounts in the volume - // ("rshared" in Linux terminology). - MountPropagationBidirectional MountPropagationMode = "Bidirectional" -) - -// VolumeDevice describes a mapping of a raw block device within a container. -type VolumeDevice struct { - // name must match the name of a persistentVolumeClaim in the pod - Name string - // devicePath is the path inside of the container that the device will be mapped to. - DevicePath string -} - -// EnvVar represents an environment variable present in a Container. -type EnvVar struct { - // Required: This must be a C_IDENTIFIER. - Name string - // Optional: no more than one of the following may be specified. - // Optional: Defaults to ""; variable references $(VAR_NAME) are expanded - // using the previous defined environment variables in the container and - // any service environment variables. If a variable cannot be resolved, - // the reference in the input string will be unchanged. The $(VAR_NAME) - // syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped - // references will never be expanded, regardless of whether the variable - // exists or not. - // +optional - Value string - // Optional: Specifies a source the value of this var should come from. - // +optional - ValueFrom *EnvVarSource -} - -// EnvVarSource represents a source for the value of an EnvVar. -// Only one of its fields may be set. -type EnvVarSource struct { - // Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, - // metadata.uid, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP. - // +optional - FieldRef *ObjectFieldSelector - // Selects a resource of the container: only resources limits and requests - // (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. - // +optional - ResourceFieldRef *ResourceFieldSelector - // Selects a key of a ConfigMap. - // +optional - ConfigMapKeyRef *ConfigMapKeySelector - // Selects a key of a secret in the pod's namespace. - // +optional - SecretKeyRef *SecretKeySelector -} - -// ObjectFieldSelector selects an APIVersioned field of an object. -type ObjectFieldSelector struct { - // Required: Version of the schema the FieldPath is written in terms of. - // If no value is specified, it will be defaulted to the APIVersion of the - // enclosing object. - APIVersion string - // Required: Path of the field to select in the specified API version - FieldPath string -} - -// ResourceFieldSelector represents container resources (cpu, memory) and their output format -type ResourceFieldSelector struct { - // Container name: required for volumes, optional for env vars - // +optional - ContainerName string - // Required: resource to select - Resource string - // Specifies the output format of the exposed resources, defaults to "1" - // +optional - Divisor resource.Quantity -} - -// Selects a key from a ConfigMap. -type ConfigMapKeySelector struct { - // The ConfigMap to select from. - LocalObjectReference - // The key to select. - Key string - // Specify whether the ConfigMap or it's key must be defined - // +optional - Optional *bool -} - -// SecretKeySelector selects a key of a Secret. -type SecretKeySelector struct { - // The name of the secret in the pod's namespace to select from. - LocalObjectReference - // The key of the secret to select from. Must be a valid secret key. - Key string - // Specify whether the Secret or it's key must be defined - // +optional - Optional *bool -} - -// EnvFromSource represents the source of a set of ConfigMaps -type EnvFromSource struct { - // An optional identifier to prepend to each key in the ConfigMap. - // +optional - Prefix string - // The ConfigMap to select from. - //+optional - ConfigMapRef *ConfigMapEnvSource - // The Secret to select from. - //+optional - SecretRef *SecretEnvSource -} - -// ConfigMapEnvSource selects a ConfigMap to populate the environment -// variables with. -// -// The contents of the target ConfigMap's Data field will represent the -// key-value pairs as environment variables. -type ConfigMapEnvSource struct { - // The ConfigMap to select from. - LocalObjectReference - // Specify whether the ConfigMap must be defined - // +optional - Optional *bool -} - -// SecretEnvSource selects a Secret to populate the environment -// variables with. -// -// The contents of the target Secret's Data field will represent the -// key-value pairs as environment variables. -type SecretEnvSource struct { - // The Secret to select from. - LocalObjectReference - // Specify whether the Secret must be defined - // +optional - Optional *bool -} - -// HTTPHeader describes a custom header to be used in HTTP probes -type HTTPHeader struct { - // The header field name - Name string - // The header field value - Value string -} - -// HTTPGetAction describes an action based on HTTP Get requests. -type HTTPGetAction struct { - // Optional: Path to access on the HTTP server. - // +optional - Path string - // Required: Name or number of the port to access on the container. - // +optional - Port intstr.IntOrString - // Optional: Host name to connect to, defaults to the pod IP. You - // probably want to set "Host" in httpHeaders instead. - // +optional - Host string - // Optional: Scheme to use for connecting to the host, defaults to HTTP. - // +optional - Scheme URIScheme - // Optional: Custom headers to set in the request. HTTP allows repeated headers. - // +optional - HTTPHeaders []HTTPHeader -} - -// URIScheme identifies the scheme used for connection to a host for Get actions -type URIScheme string - -const ( - // URISchemeHTTP means that the scheme used will be http:// - URISchemeHTTP URIScheme = "HTTP" - // URISchemeHTTPS means that the scheme used will be https:// - URISchemeHTTPS URIScheme = "HTTPS" -) - -// TCPSocketAction describes an action based on opening a socket -type TCPSocketAction struct { - // Required: Port to connect to. - // +optional - Port intstr.IntOrString - // Optional: Host name to connect to, defaults to the pod IP. - // +optional - Host string -} - -// ExecAction describes a "run in container" action. -type ExecAction struct { - // Command is the command line to execute inside the container, the working directory for the - // command is root ('/') in the container's filesystem. The command is simply exec'd, it is - // not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use - // a shell, you need to explicitly call out to that shell. - // +optional - Command []string -} - -// Probe describes a health check to be performed against a container to determine whether it is -// alive or ready to receive traffic. -type Probe struct { - // The action taken to determine the health of a container - Handler - // Length of time before health checking is activated. In seconds. - // +optional - InitialDelaySeconds int32 - // Length of time before health checking times out. In seconds. - // +optional - TimeoutSeconds int32 - // How often (in seconds) to perform the probe. - // +optional - PeriodSeconds int32 - // Minimum consecutive successes for the probe to be considered successful after having failed. - // Must be 1 for liveness. - // +optional - SuccessThreshold int32 - // Minimum consecutive failures for the probe to be considered failed after having succeeded. - // +optional - FailureThreshold int32 -} - -// PullPolicy describes a policy for if/when to pull a container image -type PullPolicy string - -const ( - // PullAlways means that kubelet always attempts to pull the latest image. Container will fail If the pull fails. - PullAlways PullPolicy = "Always" - // PullNever means that kubelet never pulls an image, but only uses a local image. Container will fail if the image isn't present - PullNever PullPolicy = "Never" - // PullIfNotPresent means that kubelet pulls if the image isn't present on disk. Container will fail if the image isn't present and the pull fails. - PullIfNotPresent PullPolicy = "IfNotPresent" -) - -// TerminationMessagePolicy describes how termination messages are retrieved from a container. -type TerminationMessagePolicy string - -const ( - // TerminationMessageReadFile is the default behavior and will set the container status message to - // the contents of the container's terminationMessagePath when the container exits. - TerminationMessageReadFile TerminationMessagePolicy = "File" - // TerminationMessageFallbackToLogsOnError will read the most recent contents of the container logs - // for the container status message when the container exits with an error and the - // terminationMessagePath has no contents. - TerminationMessageFallbackToLogsOnError TerminationMessagePolicy = "FallbackToLogsOnError" -) - -// Capability represent POSIX capabilities type -type Capability string - -// Capabilities represent POSIX capabilities that can be added or removed to a running container. -type Capabilities struct { - // Added capabilities - // +optional - Add []Capability - // Removed capabilities - // +optional - Drop []Capability -} - -// ResourceRequirements describes the compute resource requirements. -type ResourceRequirements struct { - // Limits describes the maximum amount of compute resources allowed. - // +optional - Limits ResourceList - // Requests describes the minimum amount of compute resources required. - // If Request is omitted for a container, it defaults to Limits if that is explicitly specified, - // otherwise to an implementation-defined value - // +optional - Requests ResourceList -} - -// Container represents a single container that is expected to be run on the host. -type Container struct { - // Required: This must be a DNS_LABEL. Each container in a pod must - // have a unique name. - Name string - // Required. - Image string - // Optional: The docker image's entrypoint is used if this is not provided; cannot be updated. - // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable - // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax - // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, - // regardless of whether the variable exists or not. - // +optional - Command []string - // Optional: The docker image's cmd is used if this is not provided; cannot be updated. - // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable - // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax - // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, - // regardless of whether the variable exists or not. - // +optional - Args []string - // Optional: Defaults to Docker's default. - // +optional - WorkingDir string - // +optional - Ports []ContainerPort - // List of sources to populate environment variables in the container. - // The keys defined within a source must be a C_IDENTIFIER. All invalid keys - // will be reported as an event when the container is starting. When a key exists in multiple - // sources, the value associated with the last source will take precedence. - // Values defined by an Env with a duplicate key will take precedence. - // Cannot be updated. - // +optional - EnvFrom []EnvFromSource - // +optional - Env []EnvVar - // Compute resource requirements. - // +optional - Resources ResourceRequirements - // +optional - VolumeMounts []VolumeMount - // volumeDevices is the list of block devices to be used by the container. - // This is an alpha feature and may change in the future. - // +optional - VolumeDevices []VolumeDevice - // +optional - LivenessProbe *Probe - // +optional - ReadinessProbe *Probe - // +optional - Lifecycle *Lifecycle - // Required. - // +optional - TerminationMessagePath string - // +optional - TerminationMessagePolicy TerminationMessagePolicy - // Required: Policy for pulling images for this container - ImagePullPolicy PullPolicy - // Optional: SecurityContext defines the security options the container should be run with. - // If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. - // +optional - SecurityContext *SecurityContext - - // Variables for interactive containers, these have very specialized use-cases (e.g. debugging) - // and shouldn't be used for general purpose containers. - // +optional - Stdin bool - // +optional - StdinOnce bool - // +optional - TTY bool -} - -// Handler defines a specific action that should be taken -// TODO: pass structured data to these actions, and document that data here. -type Handler struct { - // One and only one of the following should be specified. - // Exec specifies the action to take. - // +optional - Exec *ExecAction - // HTTPGet specifies the http request to perform. - // +optional - HTTPGet *HTTPGetAction - // TCPSocket specifies an action involving a TCP port. - // TODO: implement a realistic TCP lifecycle hook - // +optional - TCPSocket *TCPSocketAction -} - -// Lifecycle describes actions that the management system should take in response to container lifecycle -// events. For the PostStart and PreStop lifecycle handlers, management of the container blocks -// until the action is complete, unless the container process fails, in which case the handler is aborted. -type Lifecycle struct { - // PostStart is called immediately after a container is created. If the handler fails, the container - // is terminated and restarted. - // +optional - PostStart *Handler - // PreStop is called immediately before a container is terminated. The reason for termination is - // passed to the handler. Regardless of the outcome of the handler, the container is eventually terminated. - // +optional - PreStop *Handler -} - -// The below types are used by kube_client and api_server. - -type ConditionStatus string - -// These are valid condition statuses. "ConditionTrue" means a resource is in the condition; -// "ConditionFalse" means a resource is not in the condition; "ConditionUnknown" means kubernetes -// can't decide if a resource is in the condition or not. In the future, we could add other -// intermediate conditions, e.g. ConditionDegraded. -const ( - ConditionTrue ConditionStatus = "True" - ConditionFalse ConditionStatus = "False" - ConditionUnknown ConditionStatus = "Unknown" -) - -type ContainerStateWaiting struct { - // A brief CamelCase string indicating details about why the container is in waiting state. - // +optional - Reason string - // A human-readable message indicating details about why the container is in waiting state. - // +optional - Message string -} - -type ContainerStateRunning struct { - // +optional - StartedAt metav1.Time -} - -type ContainerStateTerminated struct { - ExitCode int32 - // +optional - Signal int32 - // +optional - Reason string - // +optional - Message string - // +optional - StartedAt metav1.Time - // +optional - FinishedAt metav1.Time - // +optional - ContainerID string -} - -// ContainerState holds a possible state of container. -// Only one of its members may be specified. -// If none of them is specified, the default one is ContainerStateWaiting. -type ContainerState struct { - // +optional - Waiting *ContainerStateWaiting - // +optional - Running *ContainerStateRunning - // +optional - Terminated *ContainerStateTerminated -} - -type ContainerStatus struct { - // Each container in a pod must have a unique name. - Name string - // +optional - State ContainerState - // +optional - LastTerminationState ContainerState - // Ready specifies whether the container has passed its readiness check. - Ready bool - // Note that this is calculated from dead containers. But those containers are subject to - // garbage collection. This value will get capped at 5 by GC. - RestartCount int32 - Image string - ImageID string - // +optional - ContainerID string -} - -// PodPhase is a label for the condition of a pod at the current time. -type PodPhase string - -// These are the valid statuses of pods. -const ( - // PodPending means the pod has been accepted by the system, but one or more of the containers - // has not been started. This includes time before being bound to a node, as well as time spent - // pulling images onto the host. - PodPending PodPhase = "Pending" - // PodRunning means the pod has been bound to a node and all of the containers have been started. - // At least one container is still running or is in the process of being restarted. - PodRunning PodPhase = "Running" - // PodSucceeded means that all containers in the pod have voluntarily terminated - // with a container exit code of 0, and the system is not going to restart any of these containers. - PodSucceeded PodPhase = "Succeeded" - // PodFailed means that all containers in the pod have terminated, and at least one container has - // terminated in a failure (exited with a non-zero exit code or was stopped by the system). - PodFailed PodPhase = "Failed" - // PodUnknown means that for some reason the state of the pod could not be obtained, typically due - // to an error in communicating with the host of the pod. - PodUnknown PodPhase = "Unknown" -) - -type PodConditionType string - -// These are valid conditions of pod. -const ( - // PodScheduled represents status of the scheduling process for this pod. - PodScheduled PodConditionType = "PodScheduled" - // PodReady means the pod is able to service requests and should be added to the - // load balancing pools of all matching services. - PodReady PodConditionType = "Ready" - // PodInitialized means that all init containers in the pod have started successfully. - PodInitialized PodConditionType = "Initialized" - // PodReasonUnschedulable reason in PodScheduled PodCondition means that the scheduler - // can't schedule the pod right now, for example due to insufficient resources in the cluster. - PodReasonUnschedulable = "Unschedulable" - // ContainersReady indicates whether all containers in the pod are ready. - ContainersReady PodConditionType = "ContainersReady" -) - -type PodCondition struct { - Type PodConditionType - Status ConditionStatus - // +optional - LastProbeTime metav1.Time - // +optional - LastTransitionTime metav1.Time - // +optional - Reason string - // +optional - Message string -} - -// RestartPolicy describes how the container should be restarted. -// Only one of the following restart policies may be specified. -// If none of the following policies is specified, the default one -// is RestartPolicyAlways. -type RestartPolicy string - -const ( - RestartPolicyAlways RestartPolicy = "Always" - RestartPolicyOnFailure RestartPolicy = "OnFailure" - RestartPolicyNever RestartPolicy = "Never" -) - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PodList is a list of Pods. -type PodList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - - Items []Pod -} - -// DNSPolicy defines how a pod's DNS will be configured. -type DNSPolicy string - -const ( - // DNSClusterFirstWithHostNet indicates that the pod should use cluster DNS - // first, if it is available, then fall back on the default - // (as determined by kubelet) DNS settings. - DNSClusterFirstWithHostNet DNSPolicy = "ClusterFirstWithHostNet" - - // DNSClusterFirst indicates that the pod should use cluster DNS - // first unless hostNetwork is true, if it is available, then - // fall back on the default (as determined by kubelet) DNS settings. - DNSClusterFirst DNSPolicy = "ClusterFirst" - - // DNSDefault indicates that the pod should use the default (as - // determined by kubelet) DNS settings. - DNSDefault DNSPolicy = "Default" - - // DNSNone indicates that the pod should use empty DNS settings. DNS - // parameters such as nameservers and search paths should be defined via - // DNSConfig. - DNSNone DNSPolicy = "None" -) - -// A node selector represents the union of the results of one or more label queries -// over a set of nodes; that is, it represents the OR of the selectors represented -// by the node selector terms. -type NodeSelector struct { - //Required. A list of node selector terms. The terms are ORed. - NodeSelectorTerms []NodeSelectorTerm -} - -// A null or empty node selector term matches no objects. The requirements of -// them are ANDed. -// The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. -type NodeSelectorTerm struct { - // A list of node selector requirements by node's labels. - MatchExpressions []NodeSelectorRequirement - // A list of node selector requirements by node's fields. - MatchFields []NodeSelectorRequirement -} - -// A node selector requirement is a selector that contains values, a key, and an operator -// that relates the key and values. -type NodeSelectorRequirement struct { - // The label key that the selector applies to. - Key string - // Represents a key's relationship to a set of values. - // Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - Operator NodeSelectorOperator - // An array of string values. If the operator is In or NotIn, - // the values array must be non-empty. If the operator is Exists or DoesNotExist, - // the values array must be empty. If the operator is Gt or Lt, the values - // array must have a single element, which will be interpreted as an integer. - // This array is replaced during a strategic merge patch. - // +optional - Values []string -} - -// A node selector operator is the set of operators that can be used in -// a node selector requirement. -type NodeSelectorOperator string - -const ( - NodeSelectorOpIn NodeSelectorOperator = "In" - NodeSelectorOpNotIn NodeSelectorOperator = "NotIn" - NodeSelectorOpExists NodeSelectorOperator = "Exists" - NodeSelectorOpDoesNotExist NodeSelectorOperator = "DoesNotExist" - NodeSelectorOpGt NodeSelectorOperator = "Gt" - NodeSelectorOpLt NodeSelectorOperator = "Lt" -) - -// A topology selector term represents the result of label queries. -// A null or empty topology selector term matches no objects. -// The requirements of them are ANDed. -// It provides a subset of functionality as NodeSelectorTerm. -// This is an alpha feature and may change in the future. -type TopologySelectorTerm struct { - // A list of topology selector requirements by labels. - // +optional - MatchLabelExpressions []TopologySelectorLabelRequirement -} - -// A topology selector requirement is a selector that matches given label. -// This is an alpha feature and may change in the future. -type TopologySelectorLabelRequirement struct { - // The label key that the selector applies to. - Key string - // An array of string values. One value must match the label to be selected. - // Each entry in Values is ORed. - Values []string -} - -// Affinity is a group of affinity scheduling rules. -type Affinity struct { - // Describes node affinity scheduling rules for the pod. - // +optional - NodeAffinity *NodeAffinity - // Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). - // +optional - PodAffinity *PodAffinity - // Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). - // +optional - PodAntiAffinity *PodAntiAffinity -} - -// Pod affinity is a group of inter pod affinity scheduling rules. -type PodAffinity struct { - // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. - // If the affinity requirements specified by this field are not met at - // scheduling time, the pod will not be scheduled onto the node. - // If the affinity requirements specified by this field cease to be met - // at some point during pod execution (e.g. due to a pod label update), the - // system will try to eventually evict the pod from its node. - // When there are multiple elements, the lists of nodes corresponding to each - // podAffinityTerm are intersected, i.e. all terms must be satisfied. - // +optional - // RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm - - // If the affinity requirements specified by this field are not met at - // scheduling time, the pod will not be scheduled onto the node. - // If the affinity requirements specified by this field cease to be met - // at some point during pod execution (e.g. due to a pod label update), the - // system may or may not try to eventually evict the pod from its node. - // When there are multiple elements, the lists of nodes corresponding to each - // podAffinityTerm are intersected, i.e. all terms must be satisfied. - // +optional - RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTerm - // The scheduler will prefer to schedule pods to nodes that satisfy - // the affinity expressions specified by this field, but it may choose - // a node that violates one or more of the expressions. The node that is - // most preferred is the one with the greatest sum of weights, i.e. - // for each node that meets all of the scheduling requirements (resource - // request, requiredDuringScheduling affinity expressions, etc.), - // compute a sum by iterating through the elements of this field and adding - // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the - // node(s) with the highest sum are the most preferred. - // +optional - PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTerm -} - -// Pod anti affinity is a group of inter pod anti affinity scheduling rules. -type PodAntiAffinity struct { - // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. - // If the anti-affinity requirements specified by this field are not met at - // scheduling time, the pod will not be scheduled onto the node. - // If the anti-affinity requirements specified by this field cease to be met - // at some point during pod execution (e.g. due to a pod label update), the - // system will try to eventually evict the pod from its node. - // When there are multiple elements, the lists of nodes corresponding to each - // podAffinityTerm are intersected, i.e. all terms must be satisfied. - // +optional - // RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm - - // If the anti-affinity requirements specified by this field are not met at - // scheduling time, the pod will not be scheduled onto the node. - // If the anti-affinity requirements specified by this field cease to be met - // at some point during pod execution (e.g. due to a pod label update), the - // system may or may not try to eventually evict the pod from its node. - // When there are multiple elements, the lists of nodes corresponding to each - // podAffinityTerm are intersected, i.e. all terms must be satisfied. - // +optional - RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTerm - // The scheduler will prefer to schedule pods to nodes that satisfy - // the anti-affinity expressions specified by this field, but it may choose - // a node that violates one or more of the expressions. The node that is - // most preferred is the one with the greatest sum of weights, i.e. - // for each node that meets all of the scheduling requirements (resource - // request, requiredDuringScheduling anti-affinity expressions, etc.), - // compute a sum by iterating through the elements of this field and adding - // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the - // node(s) with the highest sum are the most preferred. - // +optional - PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTerm -} - -// The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) -type WeightedPodAffinityTerm struct { - // weight associated with matching the corresponding podAffinityTerm, - // in the range 1-100. - Weight int32 - // Required. A pod affinity term, associated with the corresponding weight. - PodAffinityTerm PodAffinityTerm -} - -// Defines a set of pods (namely those matching the labelSelector -// relative to the given namespace(s)) that this pod should be -// co-located (affinity) or not co-located (anti-affinity) with, -// where co-located is defined as running on a node whose value of -// the label with key matches that of any node on which -// a pod of the set of pods is running. -type PodAffinityTerm struct { - // A label query over a set of resources, in this case pods. - // +optional - LabelSelector *metav1.LabelSelector - // namespaces specifies which namespaces the labelSelector applies to (matches against); - // null or empty list means "this pod's namespace" - // +optional - Namespaces []string - // This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching - // the labelSelector in the specified namespaces, where co-located is defined as running on a node - // whose value of the label with key topologyKey matches that of any node on which any of the - // selected pods is running. - // Empty topologyKey is not allowed. - TopologyKey string -} - -// Node affinity is a group of node affinity scheduling rules. -type NodeAffinity struct { - // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. - // If the affinity requirements specified by this field are not met at - // scheduling time, the pod will not be scheduled onto the node. - // If the affinity requirements specified by this field cease to be met - // at some point during pod execution (e.g. due to an update), the system - // will try to eventually evict the pod from its node. - // +optional - // RequiredDuringSchedulingRequiredDuringExecution *NodeSelector - - // If the affinity requirements specified by this field are not met at - // scheduling time, the pod will not be scheduled onto the node. - // If the affinity requirements specified by this field cease to be met - // at some point during pod execution (e.g. due to an update), the system - // may or may not try to eventually evict the pod from its node. - // +optional - RequiredDuringSchedulingIgnoredDuringExecution *NodeSelector - // The scheduler will prefer to schedule pods to nodes that satisfy - // the affinity expressions specified by this field, but it may choose - // a node that violates one or more of the expressions. The node that is - // most preferred is the one with the greatest sum of weights, i.e. - // for each node that meets all of the scheduling requirements (resource - // request, requiredDuringScheduling affinity expressions, etc.), - // compute a sum by iterating through the elements of this field and adding - // "weight" to the sum if the node matches the corresponding matchExpressions; the - // node(s) with the highest sum are the most preferred. - // +optional - PreferredDuringSchedulingIgnoredDuringExecution []PreferredSchedulingTerm -} - -// An empty preferred scheduling term matches all objects with implicit weight 0 -// (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). -type PreferredSchedulingTerm struct { - // Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. - Weight int32 - // A node selector term, associated with the corresponding weight. - Preference NodeSelectorTerm -} - -// The node this Taint is attached to has the "effect" on -// any pod that does not tolerate the Taint. -type Taint struct { - // Required. The taint key to be applied to a node. - Key string - // Required. The taint value corresponding to the taint key. - // +optional - Value string - // Required. The effect of the taint on pods - // that do not tolerate the taint. - // Valid effects are NoSchedule, PreferNoSchedule and NoExecute. - Effect TaintEffect - // TimeAdded represents the time at which the taint was added. - // It is only written for NoExecute taints. - // +optional - TimeAdded *metav1.Time -} - -type TaintEffect string - -const ( - // Do not allow new pods to schedule onto the node unless they tolerate the taint, - // but allow all pods submitted to Kubelet without going through the scheduler - // to start, and allow all already-running pods to continue running. - // Enforced by the scheduler. - TaintEffectNoSchedule TaintEffect = "NoSchedule" - // Like TaintEffectNoSchedule, but the scheduler tries not to schedule - // new pods onto the node, rather than prohibiting new pods from scheduling - // onto the node entirely. Enforced by the scheduler. - TaintEffectPreferNoSchedule TaintEffect = "PreferNoSchedule" - // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. - // Like TaintEffectNoSchedule, but additionally do not allow pods submitted to - // Kubelet without going through the scheduler to start. - // Enforced by Kubelet and the scheduler. - // TaintEffectNoScheduleNoAdmit TaintEffect = "NoScheduleNoAdmit" - - // Evict any already-running pods that do not tolerate the taint. - // Currently enforced by NodeController. - TaintEffectNoExecute TaintEffect = "NoExecute" -) - -// The pod this Toleration is attached to tolerates any taint that matches -// the triple using the matching operator . -type Toleration struct { - // Key is the taint key that the toleration applies to. Empty means match all taint keys. - // If the key is empty, operator must be Exists; this combination means to match all values and all keys. - // +optional - Key string - // Operator represents a key's relationship to the value. - // Valid operators are Exists and Equal. Defaults to Equal. - // Exists is equivalent to wildcard for value, so that a pod can - // tolerate all taints of a particular category. - // +optional - Operator TolerationOperator - // Value is the taint value the toleration matches to. - // If the operator is Exists, the value should be empty, otherwise just a regular string. - // +optional - Value string - // Effect indicates the taint effect to match. Empty means match all taint effects. - // When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. - // +optional - Effect TaintEffect - // TolerationSeconds represents the period of time the toleration (which must be - // of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, - // it is not set, which means tolerate the taint forever (do not evict). Zero and - // negative values will be treated as 0 (evict immediately) by the system. - // +optional - TolerationSeconds *int64 -} - -// A toleration operator is the set of operators that can be used in a toleration. -type TolerationOperator string - -const ( - TolerationOpExists TolerationOperator = "Exists" - TolerationOpEqual TolerationOperator = "Equal" -) - -// PodReadinessGate contains the reference to a pod condition -type PodReadinessGate struct { - // ConditionType refers to a condition in the pod's condition list with matching type. - ConditionType PodConditionType -} - -// PodSpec is a description of a pod -type PodSpec struct { - Volumes []Volume - // List of initialization containers belonging to the pod. - InitContainers []Container - // List of containers belonging to the pod. - Containers []Container - // +optional - RestartPolicy RestartPolicy - // Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. - // Value must be non-negative integer. The value zero indicates delete immediately. - // If this value is nil, the default grace period will be used instead. - // The grace period is the duration in seconds after the processes running in the pod are sent - // a termination signal and the time when the processes are forcibly halted with a kill signal. - // Set this value longer than the expected cleanup time for your process. - // +optional - TerminationGracePeriodSeconds *int64 - // Optional duration in seconds relative to the StartTime that the pod may be active on a node - // before the system actively tries to terminate the pod; value must be positive integer - // +optional - ActiveDeadlineSeconds *int64 - // Set DNS policy for the pod. - // Defaults to "ClusterFirst". - // Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. - // DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. - // To have DNS options set along with hostNetwork, you have to specify DNS policy - // explicitly to 'ClusterFirstWithHostNet'. - // +optional - DNSPolicy DNSPolicy - // NodeSelector is a selector which must be true for the pod to fit on a node - // +optional - NodeSelector map[string]string - - // ServiceAccountName is the name of the ServiceAccount to use to run this pod - // The pod will be allowed to use secrets referenced by the ServiceAccount - ServiceAccountName string - // AutomountServiceAccountToken indicates whether a service account token should be automatically mounted. - // +optional - AutomountServiceAccountToken *bool - - // NodeName is a request to schedule this pod onto a specific node. If it is non-empty, - // the scheduler simply schedules this pod onto that node, assuming that it fits resource - // requirements. - // +optional - NodeName string - // SecurityContext holds pod-level security attributes and common container settings. - // Optional: Defaults to empty. See type description for default values of each field. - // +optional - SecurityContext *PodSecurityContext - // ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. - // If specified, these secrets will be passed to individual puller implementations for them to use. For example, - // in the case of docker, only DockerConfig type secrets are honored. - // +optional - ImagePullSecrets []LocalObjectReference - // Specifies the hostname of the Pod. - // If not specified, the pod's hostname will be set to a system-defined value. - // +optional - Hostname string - // If specified, the fully qualified Pod hostname will be "...svc.". - // If not specified, the pod will not have a domainname at all. - // +optional - Subdomain string - // If specified, the pod's scheduling constraints - // +optional - Affinity *Affinity - // If specified, the pod will be dispatched by specified scheduler. - // If not specified, the pod will be dispatched by default scheduler. - // +optional - SchedulerName string - // If specified, the pod's tolerations. - // +optional - Tolerations []Toleration - // HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts - // file if specified. This is only valid for non-hostNetwork pods. - // +optional - HostAliases []HostAlias - // If specified, indicates the pod's priority. "system-node-critical" and - // "system-cluster-critical" are two special keywords which indicate the - // highest priorities with the former being the highest priority. Any other - // name must be defined by creating a PriorityClass object with that name. - // If not specified, the pod priority will be default or zero if there is no - // default. - // +optional - PriorityClassName string - // The priority value. Various system components use this field to find the - // priority of the pod. When Priority Admission Controller is enabled, it - // prevents users from setting this field. The admission controller populates - // this field from PriorityClassName. - // The higher the value, the higher the priority. - // +optional - Priority *int32 - // Specifies the DNS parameters of a pod. - // Parameters specified here will be merged to the generated DNS - // configuration based on DNSPolicy. - // +optional - DNSConfig *PodDNSConfig - // If specified, all readiness gates will be evaluated for pod readiness. - // A pod is ready when all its containers are ready AND - // all conditions specified in the readiness gates have status equal to "True" - // More info: https://github.com/kubernetes/community/blob/master/keps/sig-network/0007-pod-ready%2B%2B.md - // +optional - ReadinessGates []PodReadinessGate - // RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used - // to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. - // If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an - // empty definition that uses the default runtime handler. - // More info: https://github.com/kubernetes/community/blob/master/keps/sig-node/0014-runtime-class.md - // This is an alpha feature and may change in the future. - // +optional - RuntimeClassName *string -} - -// HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the -// pod's hosts file. -type HostAlias struct { - IP string - Hostnames []string -} - -// Sysctl defines a kernel parameter to be set -type Sysctl struct { - // Name of a property to set - Name string - // Value of a property to set - Value string -} - -// PodSecurityContext holds pod-level security attributes and common container settings. -// Some fields are also present in container.securityContext. Field values of -// container.securityContext take precedence over field values of PodSecurityContext. -type PodSecurityContext struct { - // Use the host's network namespace. If this option is set, the ports that will be - // used must be specified. - // Optional: Default to false - // +k8s:conversion-gen=false - // +optional - HostNetwork bool - // Use the host's pid namespace. - // Optional: Default to false. - // +k8s:conversion-gen=false - // +optional - HostPID bool - // Use the host's ipc namespace. - // Optional: Default to false. - // +k8s:conversion-gen=false - // +optional - HostIPC bool - // Share a single process namespace between all of the containers in a pod. - // When this is set containers will be able to view and signal processes from other containers - // in the same pod, and the first process in each container will not be assigned PID 1. - // HostPID and ShareProcessNamespace cannot both be set. - // Optional: Default to false. - // This field is beta-level and may be disabled with the PodShareProcessNamespace feature. - // +k8s:conversion-gen=false - // +optional - ShareProcessNamespace *bool - // The SELinux context to be applied to all containers. - // If unspecified, the container runtime will allocate a random SELinux context for each - // container. May also be set in SecurityContext. If set in - // both SecurityContext and PodSecurityContext, the value specified in SecurityContext - // takes precedence for that container. - // +optional - SELinuxOptions *SELinuxOptions - // The UID to run the entrypoint of the container process. - // Defaults to user specified in image metadata if unspecified. - // May also be set in SecurityContext. If set in both SecurityContext and - // PodSecurityContext, the value specified in SecurityContext takes precedence - // for that container. - // +optional - RunAsUser *int64 - // The GID to run the entrypoint of the container process. - // Uses runtime default if unset. - // May also be set in SecurityContext. If set in both SecurityContext and - // PodSecurityContext, the value specified in SecurityContext takes precedence - // for that container. - // +optional - RunAsGroup *int64 - // Indicates that the container must run as a non-root user. - // If true, the Kubelet will validate the image at runtime to ensure that it - // does not run as UID 0 (root) and fail to start the container if it does. - // If unset or false, no such validation will be performed. - // May also be set in SecurityContext. If set in both SecurityContext and - // PodSecurityContext, the value specified in SecurityContext takes precedence - // for that container. - // +optional - RunAsNonRoot *bool - // A list of groups applied to the first process run in each container, in addition - // to the container's primary GID. If unspecified, no groups will be added to - // any container. - // +optional - SupplementalGroups []int64 - // A special supplemental group that applies to all containers in a pod. - // Some volume types allow the Kubelet to change the ownership of that volume - // to be owned by the pod: - // - // 1. The owning GID will be the FSGroup - // 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) - // 3. The permission bits are OR'd with rw-rw---- - // - // If unset, the Kubelet will not modify the ownership and permissions of any volume. - // +optional - FSGroup *int64 - // Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported - // sysctls (by the container runtime) might fail to launch. - // +optional - Sysctls []Sysctl -} - -// PodQOSClass defines the supported qos classes of Pods. -type PodQOSClass string - -const ( - // PodQOSGuaranteed is the Guaranteed qos class. - PodQOSGuaranteed PodQOSClass = "Guaranteed" - // PodQOSBurstable is the Burstable qos class. - PodQOSBurstable PodQOSClass = "Burstable" - // PodQOSBestEffort is the BestEffort qos class. - PodQOSBestEffort PodQOSClass = "BestEffort" -) - -// PodDNSConfig defines the DNS parameters of a pod in addition to -// those generated from DNSPolicy. -type PodDNSConfig struct { - // A list of DNS name server IP addresses. - // This will be appended to the base nameservers generated from DNSPolicy. - // Duplicated nameservers will be removed. - // +optional - Nameservers []string - // A list of DNS search domains for host-name lookup. - // This will be appended to the base search paths generated from DNSPolicy. - // Duplicated search paths will be removed. - // +optional - Searches []string - // A list of DNS resolver options. - // This will be merged with the base options generated from DNSPolicy. - // Duplicated entries will be removed. Resolution options given in Options - // will override those that appear in the base DNSPolicy. - // +optional - Options []PodDNSConfigOption -} - -// PodDNSConfigOption defines DNS resolver options of a pod. -type PodDNSConfigOption struct { - // Required. - Name string - // +optional - Value *string -} - -// PodStatus represents information about the status of a pod. Status may trail the actual -// state of a system. -type PodStatus struct { - // +optional - Phase PodPhase - // +optional - Conditions []PodCondition - // A human readable message indicating details about why the pod is in this state. - // +optional - Message string - // A brief CamelCase message indicating details about why the pod is in this state. e.g. 'Evicted' - // +optional - Reason string - // nominatedNodeName is set when this pod preempts other pods on the node, but it cannot be - // scheduled right away as preemption victims receive their graceful termination periods. - // This field does not guarantee that the pod will be scheduled on this node. Scheduler may decide - // to place the pod elsewhere if other nodes become available sooner. Scheduler may also decide to - // give the resources on this node to a higher priority pod that is created after preemption. - // +optional - NominatedNodeName string - - // +optional - HostIP string - // +optional - PodIP string - - // Date and time at which the object was acknowledged by the Kubelet. - // This is before the Kubelet pulled the container image(s) for the pod. - // +optional - StartTime *metav1.Time - // +optional - QOSClass PodQOSClass - - // The list has one entry per init container in the manifest. The most recent successful - // init container will have ready = true, the most recently started container will have - // startTime set. - // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-and-container-status - InitContainerStatuses []ContainerStatus - // The list has one entry per container in the manifest. Each entry is - // currently the output of `docker inspect`. This output format is *not* - // final and should not be relied upon. - // TODO: Make real decisions about what our info should look like. Re-enable fuzz test - // when we have done this. - // +optional - ContainerStatuses []ContainerStatus -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded -type PodStatusResult struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - // Status represents the current information about a pod. This data may not be up - // to date. - // +optional - Status PodStatus -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Pod is a collection of containers, used as either input (create, update) or as output (list, get). -type Pod struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // Spec defines the behavior of a pod. - // +optional - Spec PodSpec - - // Status represents the current information about a pod. This data may not be up - // to date. - // +optional - Status PodStatus -} - -// PodTemplateSpec describes the data a pod should have when created from a template -type PodTemplateSpec struct { - // Metadata of the pods created from this template. - // +optional - metav1.ObjectMeta - - // Spec defines the behavior of a pod. - // +optional - Spec PodSpec -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PodTemplate describes a template for creating copies of a predefined pod. -type PodTemplate struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // Template defines the pods that will be created from this pod template - // +optional - Template PodTemplateSpec -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PodTemplateList is a list of PodTemplates. -type PodTemplateList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - - Items []PodTemplate -} - -// ReplicationControllerSpec is the specification of a replication controller. -// As the internal representation of a replication controller, it may have either -// a TemplateRef or a Template set. -type ReplicationControllerSpec struct { - // Replicas is the number of desired replicas. - Replicas int32 - - // Minimum number of seconds for which a newly created pod should be ready - // without any of its container crashing, for it to be considered available. - // Defaults to 0 (pod will be considered available as soon as it is ready) - // +optional - MinReadySeconds int32 - - // Selector is a label query over pods that should match the Replicas count. - Selector map[string]string - - // TemplateRef is a reference to an object that describes the pod that will be created if - // insufficient replicas are detected. This reference is ignored if a Template is set. - // Must be set before converting to a versioned API object - // +optional - //TemplateRef *ObjectReference - - // Template is the object that describes the pod that will be created if - // insufficient replicas are detected. Internally, this takes precedence over a - // TemplateRef. - // +optional - Template *PodTemplateSpec -} - -// ReplicationControllerStatus represents the current status of a replication -// controller. -type ReplicationControllerStatus struct { - // Replicas is the number of actual replicas. - Replicas int32 - - // The number of pods that have labels matching the labels of the pod template of the replication controller. - // +optional - FullyLabeledReplicas int32 - - // The number of ready replicas for this replication controller. - // +optional - ReadyReplicas int32 - - // The number of available replicas (ready for at least minReadySeconds) for this replication controller. - // +optional - AvailableReplicas int32 - - // ObservedGeneration is the most recent generation observed by the controller. - // +optional - ObservedGeneration int64 - - // Represents the latest available observations of a replication controller's current state. - // +optional - Conditions []ReplicationControllerCondition -} - -type ReplicationControllerConditionType string - -// These are valid conditions of a replication controller. -const ( - // ReplicationControllerReplicaFailure is added in a replication controller when one of its pods - // fails to be created due to insufficient quota, limit ranges, pod security policy, node selectors, - // etc. or deleted due to kubelet being down or finalizers are failing. - ReplicationControllerReplicaFailure ReplicationControllerConditionType = "ReplicaFailure" -) - -// ReplicationControllerCondition describes the state of a replication controller at a certain point. -type ReplicationControllerCondition struct { - // Type of replication controller condition. - Type ReplicationControllerConditionType - // Status of the condition, one of True, False, Unknown. - Status ConditionStatus - // The last time the condition transitioned from one status to another. - // +optional - LastTransitionTime metav1.Time - // The reason for the condition's last transition. - // +optional - Reason string - // A human readable message indicating details about the transition. - // +optional - Message string -} - -// +genclient -// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/kubernetes/pkg/apis/autoscaling.Scale -// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/kubernetes/pkg/apis/autoscaling.Scale,result=k8s.io/kubernetes/pkg/apis/autoscaling.Scale -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ReplicationController represents the configuration of a replication controller. -type ReplicationController struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // Spec defines the desired behavior of this replication controller. - // +optional - Spec ReplicationControllerSpec - - // Status is the current status of this replication controller. This data may be - // out of date by some window of time. - // +optional - Status ReplicationControllerStatus -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ReplicationControllerList is a collection of replication controllers. -type ReplicationControllerList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - - Items []ReplicationController -} - -const ( - // ClusterIPNone - do not assign a cluster IP - // no proxying required and no environment variables should be created for pods - ClusterIPNone = "None" -) - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ServiceList holds a list of services. -type ServiceList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - - Items []Service -} - -// Session Affinity Type string -type ServiceAffinity string - -const ( - // ServiceAffinityClientIP is the Client IP based. - ServiceAffinityClientIP ServiceAffinity = "ClientIP" - - // ServiceAffinityNone - no session affinity. - ServiceAffinityNone ServiceAffinity = "None" -) - -const ( - // DefaultClientIPServiceAffinitySeconds is the default timeout seconds - // of Client IP based session affinity - 3 hours. - DefaultClientIPServiceAffinitySeconds int32 = 10800 - // MaxClientIPServiceAffinitySeconds is the max timeout seconds - // of Client IP based session affinity - 1 day. - MaxClientIPServiceAffinitySeconds int32 = 86400 -) - -// SessionAffinityConfig represents the configurations of session affinity. -type SessionAffinityConfig struct { - // clientIP contains the configurations of Client IP based session affinity. - // +optional - ClientIP *ClientIPConfig -} - -// ClientIPConfig represents the configurations of Client IP based session affinity. -type ClientIPConfig struct { - // timeoutSeconds specifies the seconds of ClientIP type session sticky time. - // The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP". - // Default value is 10800(for 3 hours). - // +optional - TimeoutSeconds *int32 -} - -// Service Type string describes ingress methods for a service -type ServiceType string - -const ( - // ServiceTypeClusterIP means a service will only be accessible inside the - // cluster, via the ClusterIP. - ServiceTypeClusterIP ServiceType = "ClusterIP" - - // ServiceTypeNodePort means a service will be exposed on one port of - // every node, in addition to 'ClusterIP' type. - ServiceTypeNodePort ServiceType = "NodePort" - - // ServiceTypeLoadBalancer means a service will be exposed via an - // external load balancer (if the cloud provider supports it), in addition - // to 'NodePort' type. - ServiceTypeLoadBalancer ServiceType = "LoadBalancer" - - // ServiceTypeExternalName means a service consists of only a reference to - // an external name that kubedns or equivalent will return as a CNAME - // record, with no exposing or proxying of any pods involved. - ServiceTypeExternalName ServiceType = "ExternalName" -) - -// Service External Traffic Policy Type string -type ServiceExternalTrafficPolicyType string - -const ( - // ServiceExternalTrafficPolicyTypeLocal specifies node-local endpoints behavior. - ServiceExternalTrafficPolicyTypeLocal ServiceExternalTrafficPolicyType = "Local" - // ServiceExternalTrafficPolicyTypeCluster specifies cluster-wide (legacy) behavior. - ServiceExternalTrafficPolicyTypeCluster ServiceExternalTrafficPolicyType = "Cluster" -) - -// ServiceStatus represents the current status of a service -type ServiceStatus struct { - // LoadBalancer contains the current status of the load-balancer, - // if one is present. - // +optional - LoadBalancer LoadBalancerStatus -} - -// LoadBalancerStatus represents the status of a load-balancer -type LoadBalancerStatus struct { - // Ingress is a list containing ingress points for the load-balancer; - // traffic intended for the service should be sent to these ingress points. - // +optional - Ingress []LoadBalancerIngress -} - -// LoadBalancerIngress represents the status of a load-balancer ingress point: -// traffic intended for the service should be sent to an ingress point. -type LoadBalancerIngress struct { - // IP is set for load-balancer ingress points that are IP based - // (typically GCE or OpenStack load-balancers) - // +optional - IP string - - // Hostname is set for load-balancer ingress points that are DNS based - // (typically AWS load-balancers) - // +optional - Hostname string -} - -// ServiceSpec describes the attributes that a user creates on a service -type ServiceSpec struct { - // Type determines how the Service is exposed. Defaults to ClusterIP. Valid - // options are ExternalName, ClusterIP, NodePort, and LoadBalancer. - // "ExternalName" maps to the specified externalName. - // "ClusterIP" allocates a cluster-internal IP address for load-balancing to - // endpoints. Endpoints are determined by the selector or if that is not - // specified, by manual construction of an Endpoints object. If clusterIP is - // "None", no virtual IP is allocated and the endpoints are published as a - // set of endpoints rather than a stable IP. - // "NodePort" builds on ClusterIP and allocates a port on every node which - // routes to the clusterIP. - // "LoadBalancer" builds on NodePort and creates an - // external load-balancer (if supported in the current cloud) which routes - // to the clusterIP. - // More info: https://kubernetes.io/docs/concepts/services-networking/service/ - // +optional - Type ServiceType - - // Required: The list of ports that are exposed by this service. - Ports []ServicePort - - // Route service traffic to pods with label keys and values matching this - // selector. If empty or not present, the service is assumed to have an - // external process managing its endpoints, which Kubernetes will not - // modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. - // Ignored if type is ExternalName. - // More info: https://kubernetes.io/docs/concepts/services-networking/service/ - Selector map[string]string - - // ClusterIP is the IP address of the service and is usually assigned - // randomly by the master. If an address is specified manually and is not in - // use by others, it will be allocated to the service; otherwise, creation - // of the service will fail. This field can not be changed through updates. - // Valid values are "None", empty string (""), or a valid IP address. "None" - // can be specified for headless services when proxying is not required. - // Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if - // type is ExternalName. - // More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies - // +optional - ClusterIP string - - // ExternalName is the external reference that kubedns or equivalent will - // return as a CNAME record for this service. No proxying will be involved. - // Must be a valid RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) - // and requires Type to be ExternalName. - ExternalName string - - // ExternalIPs are used by external load balancers, or can be set by - // users to handle external traffic that arrives at a node. - // +optional - ExternalIPs []string - - // Only applies to Service Type: LoadBalancer - // LoadBalancer will get created with the IP specified in this field. - // This feature depends on whether the underlying cloud-provider supports specifying - // the loadBalancerIP when a load balancer is created. - // This field will be ignored if the cloud-provider does not support the feature. - // +optional - LoadBalancerIP string - - // Optional: Supports "ClientIP" and "None". Used to maintain session affinity. - // +optional - SessionAffinity ServiceAffinity - - // sessionAffinityConfig contains the configurations of session affinity. - // +optional - SessionAffinityConfig *SessionAffinityConfig - - // Optional: If specified and supported by the platform, this will restrict traffic through the cloud-provider - // load-balancer will be restricted to the specified client IPs. This field will be ignored if the - // cloud-provider does not support the feature." - // +optional - LoadBalancerSourceRanges []string - - // externalTrafficPolicy denotes if this Service desires to route external - // traffic to node-local or cluster-wide endpoints. "Local" preserves the - // client source IP and avoids a second hop for LoadBalancer and Nodeport - // type services, but risks potentially imbalanced traffic spreading. - // "Cluster" obscures the client source IP and may cause a second hop to - // another node, but should have good overall load-spreading. - // +optional - ExternalTrafficPolicy ServiceExternalTrafficPolicyType - - // healthCheckNodePort specifies the healthcheck nodePort for the service. - // If not specified, HealthCheckNodePort is created by the service api - // backend with the allocated nodePort. Will use user-specified nodePort value - // if specified by the client. Only effects when Type is set to LoadBalancer - // and ExternalTrafficPolicy is set to Local. - // +optional - HealthCheckNodePort int32 - - // publishNotReadyAddresses, when set to true, indicates that DNS implementations - // must publish the notReadyAddresses of subsets for the Endpoints associated with - // the Service. The default value is false. - // The primary use case for setting this field is to use a StatefulSet's Headless Service - // to propagate SRV records for its Pods without respect to their readiness for purpose - // of peer discovery. - // +optional - PublishNotReadyAddresses bool -} - -type ServicePort struct { - // Optional if only one ServicePort is defined on this service: The - // name of this port within the service. This must be a DNS_LABEL. - // All ports within a ServiceSpec must have unique names. This maps to - // the 'Name' field in EndpointPort objects. - Name string - - // The IP protocol for this port. Supports "TCP", "UDP", and "SCTP". - Protocol Protocol - - // The port that will be exposed on the service. - Port int32 - - // Optional: The target port on pods selected by this service. If this - // is a string, it will be looked up as a named port in the target - // Pod's container ports. If this is not specified, the value - // of the 'port' field is used (an identity map). - // This field is ignored for services with clusterIP=None, and should be - // omitted or set equal to the 'port' field. - TargetPort intstr.IntOrString - - // The port on each node on which this service is exposed. - // Default is to auto-allocate a port if the ServiceType of this Service requires one. - NodePort int32 -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Service is a named abstraction of software service (for example, mysql) consisting of local port -// (for example 3306) that the proxy listens on, and the selector that determines which pods -// will answer requests sent through the proxy. -type Service struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // Spec defines the behavior of a service. - // +optional - Spec ServiceSpec - - // Status represents the current status of a service. - // +optional - Status ServiceStatus -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ServiceAccount binds together: -// * a name, understood by users, and perhaps by peripheral systems, for an identity -// * a principal that can be authenticated and authorized -// * a set of secrets -type ServiceAccount struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount - Secrets []ObjectReference - - // ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images - // in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets - // can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. - // +optional - ImagePullSecrets []LocalObjectReference - - // AutomountServiceAccountToken indicates whether pods running as this service account should have an API token automatically mounted. - // Can be overridden at the pod level. - // +optional - AutomountServiceAccountToken *bool -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ServiceAccountList is a list of ServiceAccount objects -type ServiceAccountList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - - Items []ServiceAccount -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Endpoints is a collection of endpoints that implement the actual service. Example: -// Name: "mysvc", -// Subsets: [ -// { -// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}], -// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}] -// }, -// { -// Addresses: [{"ip": "10.10.3.3"}], -// Ports: [{"name": "a", "port": 93}, {"name": "b", "port": 76}] -// }, -// ] -type Endpoints struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // The set of all endpoints is the union of all subsets. - Subsets []EndpointSubset -} - -// EndpointSubset is a group of addresses with a common set of ports. The -// expanded set of endpoints is the Cartesian product of Addresses x Ports. -// For example, given: -// { -// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}], -// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}] -// } -// The resulting set of endpoints can be viewed as: -// a: [ 10.10.1.1:8675, 10.10.2.2:8675 ], -// b: [ 10.10.1.1:309, 10.10.2.2:309 ] -type EndpointSubset struct { - Addresses []EndpointAddress - NotReadyAddresses []EndpointAddress - Ports []EndpointPort -} - -// EndpointAddress is a tuple that describes single IP address. -type EndpointAddress struct { - // The IP of this endpoint. - // IPv6 is also accepted but not fully supported on all platforms. Also, certain - // kubernetes components, like kube-proxy, are not IPv6 ready. - // TODO: This should allow hostname or IP, see #4447. - IP string - // Optional: Hostname of this endpoint - // Meant to be used by DNS servers etc. - // +optional - Hostname string - // Optional: Node hosting this endpoint. This can be used to determine endpoints local to a node. - // +optional - NodeName *string - // Optional: The kubernetes object related to the entry point. - TargetRef *ObjectReference -} - -// EndpointPort is a tuple that describes a single port. -type EndpointPort struct { - // The name of this port (corresponds to ServicePort.Name). Optional - // if only one port is defined. Must be a DNS_LABEL. - Name string - - // The port number. - Port int32 - - // The IP protocol for this port. - Protocol Protocol -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// EndpointsList is a list of endpoints. -type EndpointsList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - - Items []Endpoints -} - -// NodeSpec describes the attributes that a node is created with. -type NodeSpec struct { - // PodCIDR represents the pod IP range assigned to the node - // Note: assigning IP ranges to nodes might need to be revisited when we support migratable IPs. - // +optional - PodCIDR string - - // ID of the node assigned by the cloud provider - // Note: format is "://" - // +optional - ProviderID string - - // Unschedulable controls node schedulability of new pods. By default node is schedulable. - // +optional - Unschedulable bool - - // If specified, the node's taints. - // +optional - Taints []Taint - - // If specified, the source to get node configuration from - // The DynamicKubeletConfig feature gate must be enabled for the Kubelet to use this field - // +optional - ConfigSource *NodeConfigSource - - // Deprecated. Not all kubelets will set this field. Remove field after 1.13. - // see: https://issues.k8s.io/61966 - // +optional - DoNotUse_ExternalID string -} - -// NodeConfigSource specifies a source of node configuration. Exactly one subfield must be non-nil. -type NodeConfigSource struct { - ConfigMap *ConfigMapNodeConfigSource -} - -type ConfigMapNodeConfigSource struct { - // Namespace is the metadata.namespace of the referenced ConfigMap. - // This field is required in all cases. - Namespace string - - // Name is the metadata.name of the referenced ConfigMap. - // This field is required in all cases. - Name string - - // UID is the metadata.UID of the referenced ConfigMap. - // This field is forbidden in Node.Spec, and required in Node.Status. - // +optional - UID types.UID - - // ResourceVersion is the metadata.ResourceVersion of the referenced ConfigMap. - // This field is forbidden in Node.Spec, and required in Node.Status. - // +optional - ResourceVersion string - - // KubeletConfigKey declares which key of the referenced ConfigMap corresponds to the KubeletConfiguration structure - // This field is required in all cases. - KubeletConfigKey string -} - -// DaemonEndpoint contains information about a single Daemon endpoint. -type DaemonEndpoint struct { - /* - The port tag was not properly in quotes in earlier releases, so it must be - uppercase for backwards compatibility (since it was falling back to var name of - 'Port'). - */ - - // Port number of the given endpoint. - Port int32 -} - -// NodeDaemonEndpoints lists ports opened by daemons running on the Node. -type NodeDaemonEndpoints struct { - // Endpoint on which Kubelet is listening. - // +optional - KubeletEndpoint DaemonEndpoint -} - -// NodeSystemInfo is a set of ids/uuids to uniquely identify the node. -type NodeSystemInfo struct { - // MachineID reported by the node. For unique machine identification - // in the cluster this field is preferred. Learn more from man(5) - // machine-id: http://man7.org/linux/man-pages/man5/machine-id.5.html - MachineID string - // SystemUUID reported by the node. For unique machine identification - // MachineID is preferred. This field is specific to Red Hat hosts - // https://access.redhat.com/documentation/en-US/Red_Hat_Subscription_Management/1/html/RHSM/getting-system-uuid.html - SystemUUID string - // Boot ID reported by the node. - BootID string - // Kernel Version reported by the node. - KernelVersion string - // OS Image reported by the node. - OSImage string - // ContainerRuntime Version reported by the node. - ContainerRuntimeVersion string - // Kubelet Version reported by the node. - KubeletVersion string - // KubeProxy Version reported by the node. - KubeProxyVersion string - // The Operating System reported by the node - OperatingSystem string - // The Architecture reported by the node - Architecture string -} - -// NodeConfigStatus describes the status of the config assigned by Node.Spec.ConfigSource. -type NodeConfigStatus struct { - // Assigned reports the checkpointed config the node will try to use. - // When Node.Spec.ConfigSource is updated, the node checkpoints the associated - // config payload to local disk, along with a record indicating intended - // config. The node refers to this record to choose its config checkpoint, and - // reports this record in Assigned. Assigned only updates in the status after - // the record has been checkpointed to disk. When the Kubelet is restarted, - // it tries to make the Assigned config the Active config by loading and - // validating the checkpointed payload identified by Assigned. - // +optional - Assigned *NodeConfigSource - // Active reports the checkpointed config the node is actively using. - // Active will represent either the current version of the Assigned config, - // or the current LastKnownGood config, depending on whether attempting to use the - // Assigned config results in an error. - // +optional - Active *NodeConfigSource - // LastKnownGood reports the checkpointed config the node will fall back to - // when it encounters an error attempting to use the Assigned config. - // The Assigned config becomes the LastKnownGood config when the node determines - // that the Assigned config is stable and correct. - // This is currently implemented as a 10-minute soak period starting when the local - // record of Assigned config is updated. If the Assigned config is Active at the end - // of this period, it becomes the LastKnownGood. Note that if Spec.ConfigSource is - // reset to nil (use local defaults), the LastKnownGood is also immediately reset to nil, - // because the local default config is always assumed good. - // You should not make assumptions about the node's method of determining config stability - // and correctness, as this may change or become configurable in the future. - // +optional - LastKnownGood *NodeConfigSource - // Error describes any problems reconciling the Spec.ConfigSource to the Active config. - // Errors may occur, for example, attempting to checkpoint Spec.ConfigSource to the local Assigned - // record, attempting to checkpoint the payload associated with Spec.ConfigSource, attempting - // to load or validate the Assigned config, etc. - // Errors may occur at different points while syncing config. Earlier errors (e.g. download or - // checkpointing errors) will not result in a rollback to LastKnownGood, and may resolve across - // Kubelet retries. Later errors (e.g. loading or validating a checkpointed config) will result in - // a rollback to LastKnownGood. In the latter case, it is usually possible to resolve the error - // by fixing the config assigned in Spec.ConfigSource. - // You can find additional information for debugging by searching the error message in the Kubelet log. - // Error is a human-readable description of the error state; machines can check whether or not Error - // is empty, but should not rely on the stability of the Error text across Kubelet versions. - // +optional - Error string -} - -// NodeStatus is information about the current status of a node. -type NodeStatus struct { - // Capacity represents the total resources of a node. - // +optional - Capacity ResourceList - // Allocatable represents the resources of a node that are available for scheduling. - // +optional - Allocatable ResourceList - // NodePhase is the current lifecycle phase of the node. - // +optional - Phase NodePhase - // Conditions is an array of current node conditions. - // +optional - Conditions []NodeCondition - // Queried from cloud provider, if available. - // +optional - Addresses []NodeAddress - // Endpoints of daemons running on the Node. - // +optional - DaemonEndpoints NodeDaemonEndpoints - // Set of ids/uuids to uniquely identify the node. - // +optional - NodeInfo NodeSystemInfo - // List of container images on this node - // +optional - Images []ContainerImage - // List of attachable volumes in use (mounted) by the node. - // +optional - VolumesInUse []UniqueVolumeName - // List of volumes that are attached to the node. - // +optional - VolumesAttached []AttachedVolume - // Status of the config assigned to the node via the dynamic Kubelet config feature. - // +optional - Config *NodeConfigStatus -} - -type UniqueVolumeName string - -// AttachedVolume describes a volume attached to a node -type AttachedVolume struct { - // Name of the attached volume - Name UniqueVolumeName - - // DevicePath represents the device path where the volume should be available - DevicePath string -} - -// AvoidPods describes pods that should avoid this node. This is the value for a -// Node annotation with key scheduler.alpha.kubernetes.io/preferAvoidPods and -// will eventually become a field of NodeStatus. -type AvoidPods struct { - // Bounded-sized list of signatures of pods that should avoid this node, sorted - // in timestamp order from oldest to newest. Size of the slice is unspecified. - // +optional - PreferAvoidPods []PreferAvoidPodsEntry -} - -// Describes a class of pods that should avoid this node. -type PreferAvoidPodsEntry struct { - // The class of pods. - PodSignature PodSignature - // Time at which this entry was added to the list. - // +optional - EvictionTime metav1.Time - // (brief) reason why this entry was added to the list. - // +optional - Reason string - // Human readable message indicating why this entry was added to the list. - // +optional - Message string -} - -// Describes the class of pods that should avoid this node. -// Exactly one field should be set. -type PodSignature struct { - // Reference to controller whose pods should avoid this node. - // +optional - PodController *metav1.OwnerReference -} - -// Describe a container image -type ContainerImage struct { - // Names by which this image is known. - Names []string - // The size of the image in bytes. - // +optional - SizeBytes int64 -} - -type NodePhase string - -// These are the valid phases of node. -const ( - // NodePending means the node has been created/added by the system, but not configured. - NodePending NodePhase = "Pending" - // NodeRunning means the node has been configured and has Kubernetes components running. - NodeRunning NodePhase = "Running" - // NodeTerminated means the node has been removed from the cluster. - NodeTerminated NodePhase = "Terminated" -) - -type NodeConditionType string - -// These are valid conditions of node. Currently, we don't have enough information to decide -// node condition. In the future, we will add more. The proposed set of conditions are: -// NodeReady, NodeReachable -const ( - // NodeReady means kubelet is healthy and ready to accept pods. - NodeReady NodeConditionType = "Ready" - // NodeOutOfDisk means the kubelet will not accept new pods due to insufficient free disk - // space on the node. - NodeOutOfDisk NodeConditionType = "OutOfDisk" - // NodeMemoryPressure means the kubelet is under pressure due to insufficient available memory. - NodeMemoryPressure NodeConditionType = "MemoryPressure" - // NodeDiskPressure means the kubelet is under pressure due to insufficient available disk. - NodeDiskPressure NodeConditionType = "DiskPressure" - // NodeNetworkUnavailable means that network for the node is not correctly configured. - NodeNetworkUnavailable NodeConditionType = "NetworkUnavailable" -) - -type NodeCondition struct { - Type NodeConditionType - Status ConditionStatus - // +optional - LastHeartbeatTime metav1.Time - // +optional - LastTransitionTime metav1.Time - // +optional - Reason string - // +optional - Message string -} - -type NodeAddressType string - -const ( - NodeHostName NodeAddressType = "Hostname" - NodeExternalIP NodeAddressType = "ExternalIP" - NodeInternalIP NodeAddressType = "InternalIP" - NodeExternalDNS NodeAddressType = "ExternalDNS" - NodeInternalDNS NodeAddressType = "InternalDNS" -) - -type NodeAddress struct { - Type NodeAddressType - Address string -} - -// NodeResources is an object for conveying resource information about a node. -// see http://releases.k8s.io/HEAD/docs/design/resources.md for more details. -type NodeResources struct { - // Capacity represents the available resources of a node - // +optional - Capacity ResourceList -} - -// ResourceName is the name identifying various resources in a ResourceList. -type ResourceName string - -// Resource names must be not more than 63 characters, consisting of upper- or lower-case alphanumeric characters, -// with the -, _, and . characters allowed anywhere, except the first or last character. -// The default convention, matching that for annotations, is to use lower-case names, with dashes, rather than -// camel case, separating compound words. -// Fully-qualified resource typenames are constructed from a DNS-style subdomain, followed by a slash `/` and a name. -const ( - // CPU, in cores. (500m = .5 cores) - ResourceCPU ResourceName = "cpu" - // Memory, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) - ResourceMemory ResourceName = "memory" - // Volume size, in bytes (e,g. 5Gi = 5GiB = 5 * 1024 * 1024 * 1024) - ResourceStorage ResourceName = "storage" - // Local ephemeral storage, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) - // The resource name for ResourceEphemeralStorage is alpha and it can change across releases. - ResourceEphemeralStorage ResourceName = "ephemeral-storage" -) - -const ( - // Default namespace prefix. - ResourceDefaultNamespacePrefix = "kubernetes.io/" - // Name prefix for huge page resources (alpha). - ResourceHugePagesPrefix = "hugepages-" - // Name prefix for storage resource limits - ResourceAttachableVolumesPrefix = "attachable-volumes-" -) - -// ResourceList is a set of (resource name, quantity) pairs. -type ResourceList map[ResourceName]resource.Quantity - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Node is a worker node in Kubernetes -// The name of the node according to etcd is in ObjectMeta.Name. -type Node struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // Spec defines the behavior of a node. - // +optional - Spec NodeSpec - - // Status describes the current status of a Node - // +optional - Status NodeStatus -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// NodeList is a list of nodes. -type NodeList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - - Items []Node -} - -// NamespaceSpec describes the attributes on a Namespace -type NamespaceSpec struct { - // Finalizers is an opaque list of values that must be empty to permanently remove object from storage - Finalizers []FinalizerName -} - -// FinalizerName is the name identifying a finalizer during namespace lifecycle. -type FinalizerName string - -// These are internal finalizer values to Kubernetes, must be qualified name unless defined here or -// in metav1. -const ( - FinalizerKubernetes FinalizerName = "kubernetes" -) - -// NamespaceStatus is information about the current status of a Namespace. -type NamespaceStatus struct { - // Phase is the current lifecycle phase of the namespace. - // +optional - Phase NamespacePhase -} - -type NamespacePhase string - -// These are the valid phases of a namespace. -const ( - // NamespaceActive means the namespace is available for use in the system - NamespaceActive NamespacePhase = "Active" - // NamespaceTerminating means the namespace is undergoing graceful termination - NamespaceTerminating NamespacePhase = "Terminating" -) - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// A namespace provides a scope for Names. -// Use of multiple namespaces is optional -type Namespace struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // Spec defines the behavior of the Namespace. - // +optional - Spec NamespaceSpec - - // Status describes the current status of a Namespace - // +optional - Status NamespaceStatus -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// NamespaceList is a list of Namespaces. -type NamespaceList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - - Items []Namespace -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Binding ties one object to another; for example, a pod is bound to a node by a scheduler. -// Deprecated in 1.7, please use the bindings subresource of pods instead. -type Binding struct { - metav1.TypeMeta - // ObjectMeta describes the object that is being bound. - // +optional - metav1.ObjectMeta - - // Target is the object to bind to. - Target ObjectReference -} - -// Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out. -type Preconditions struct { - // Specifies the target UID. - // +optional - UID *types.UID -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PodLogOptions is the query options for a Pod's logs REST call -type PodLogOptions struct { - metav1.TypeMeta - - // Container for which to return logs - Container string - // If true, follow the logs for the pod - Follow bool - // If true, return previous terminated container logs - Previous bool - // A relative time in seconds before the current time from which to show logs. If this value - // precedes the time a pod was started, only logs since the pod start will be returned. - // If this value is in the future, no logs will be returned. - // Only one of sinceSeconds or sinceTime may be specified. - SinceSeconds *int64 - // An RFC3339 timestamp from which to show logs. If this value - // precedes the time a pod was started, only logs since the pod start will be returned. - // If this value is in the future, no logs will be returned. - // Only one of sinceSeconds or sinceTime may be specified. - SinceTime *metav1.Time - // If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line - // of log output. - Timestamps bool - // If set, the number of lines from the end of the logs to show. If not specified, - // logs are shown from the creation of the container or sinceSeconds or sinceTime - TailLines *int64 - // If set, the number of bytes to read from the server before terminating the - // log output. This may not display a complete final line of logging, and may return - // slightly more or slightly less than the specified limit. - LimitBytes *int64 -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PodAttachOptions is the query options to a Pod's remote attach call -// TODO: merge w/ PodExecOptions below for stdin, stdout, etc -type PodAttachOptions struct { - metav1.TypeMeta - - // Stdin if true indicates that stdin is to be redirected for the attach call - // +optional - Stdin bool - - // Stdout if true indicates that stdout is to be redirected for the attach call - // +optional - Stdout bool - - // Stderr if true indicates that stderr is to be redirected for the attach call - // +optional - Stderr bool - - // TTY if true indicates that a tty will be allocated for the attach call - // +optional - TTY bool - - // Container to attach to. - // +optional - Container string -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PodExecOptions is the query options to a Pod's remote exec call -type PodExecOptions struct { - metav1.TypeMeta - - // Stdin if true indicates that stdin is to be redirected for the exec call - Stdin bool - - // Stdout if true indicates that stdout is to be redirected for the exec call - Stdout bool - - // Stderr if true indicates that stderr is to be redirected for the exec call - Stderr bool - - // TTY if true indicates that a tty will be allocated for the exec call - TTY bool - - // Container in which to execute the command. - Container string - - // Command is the remote command to execute; argv array; not executed within a shell. - Command []string -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PodPortForwardOptions is the query options to a Pod's port forward call -type PodPortForwardOptions struct { - metav1.TypeMeta - - // The list of ports to forward - // +optional - Ports []int32 -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PodProxyOptions is the query options to a Pod's proxy call -type PodProxyOptions struct { - metav1.TypeMeta - - // Path is the URL path to use for the current proxy request - Path string -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// NodeProxyOptions is the query options to a Node's proxy call -type NodeProxyOptions struct { - metav1.TypeMeta - - // Path is the URL path to use for the current proxy request - Path string -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ServiceProxyOptions is the query options to a Service's proxy call. -type ServiceProxyOptions struct { - metav1.TypeMeta - - // Path is the part of URLs that include service endpoints, suffixes, - // and parameters to use for the current proxy request to service. - // For example, the whole request URL is - // http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. - // Path is _search?q=user:kimchy. - Path string -} - -// ObjectReference contains enough information to let you inspect or modify the referred object. -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -type ObjectReference struct { - // +optional - Kind string - // +optional - Namespace string - // +optional - Name string - // +optional - UID types.UID - // +optional - APIVersion string - // +optional - ResourceVersion string - - // Optional. If referring to a piece of an object instead of an entire object, this string - // should contain information to identify the sub-object. For example, if the object - // reference is to a container within a pod, this would take on a value like: - // "spec.containers{name}" (where "name" refers to the name of the container that triggered - // the event) or if no container name is specified "spec.containers[2]" (container with - // index 2 in this pod). This syntax is chosen only to have some well-defined way of - // referencing a part of an object. - // TODO: this design is not final and this field is subject to change in the future. - // +optional - FieldPath string -} - -// LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. -type LocalObjectReference struct { - //TODO: Add other useful fields. apiVersion, kind, uid? - Name string -} - -// TypedLocalObjectReference contains enough information to let you locate the typed referenced object inside the same namespace. -type TypedLocalObjectReference struct { - // APIGroup is the group for the resource being referenced. - // If APIGroup is not specified, the specified Kind must be in the core API group. - // For any other third-party types, APIGroup is required. - // +optional - APIGroup *string - // Kind is the type of resource being referenced - Kind string - // Name is the name of resource being referenced - Name string -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -type SerializedReference struct { - metav1.TypeMeta - // +optional - Reference ObjectReference -} - -type EventSource struct { - // Component from which the event is generated. - // +optional - Component string - // Node name on which the event is generated. - // +optional - Host string -} - -// Valid values for event types (new types could be added in future) -const ( - // Information only and will not cause any problems - EventTypeNormal string = "Normal" - // These events are to warn that something might go wrong - EventTypeWarning string = "Warning" -) - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Event is a report of an event somewhere in the cluster. -// TODO: Decide whether to store these separately or with the object they apply to. -type Event struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // Required. The object that this event is about. Mapped to events.Event.regarding - // +optional - InvolvedObject ObjectReference - - // Optional; this should be a short, machine understandable string that gives the reason - // for this event being generated. For example, if the event is reporting that a container - // can't start, the Reason might be "ImageNotFound". - // TODO: provide exact specification for format. - // +optional - Reason string - - // Optional. A human-readable description of the status of this operation. - // TODO: decide on maximum length. Mapped to events.Event.note - // +optional - Message string - - // Optional. The component reporting this event. Should be a short machine understandable string. - // +optional - Source EventSource - - // The time at which the event was first recorded. (Time of server receipt is in TypeMeta.) - // +optional - FirstTimestamp metav1.Time - - // The time at which the most recent occurrence of this event was recorded. - // +optional - LastTimestamp metav1.Time - - // The number of times this event has occurred. - // +optional - Count int32 - - // Type of this event (Normal, Warning), new types could be added in the future. - // +optional - Type string - - // Time when this Event was first observed. - // +optional - EventTime metav1.MicroTime - - // Data about the Event series this event represents or nil if it's a singleton Event. - // +optional - Series *EventSeries - - // What action was taken/failed regarding to the Regarding object. - // +optional - Action string - - // Optional secondary object for more complex actions. - // +optional - Related *ObjectReference - - // Name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`. - // +optional - ReportingController string - - // ID of the controller instance, e.g. `kubelet-xyzf`. - // +optional - ReportingInstance string -} - -type EventSeries struct { - // Number of occurrences in this series up to the last heartbeat time - Count int32 - // Time of the last occurrence observed - LastObservedTime metav1.MicroTime - // State of this Series: Ongoing or Finished - State EventSeriesState -} - -type EventSeriesState string - -const ( - EventSeriesStateOngoing EventSeriesState = "Ongoing" - EventSeriesStateFinished EventSeriesState = "Finished" - EventSeriesStateUnknown EventSeriesState = "Unknown" -) - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// EventList is a list of events. -type EventList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - - Items []Event -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// List holds a list of objects, which may not be known by the server. -type List metainternalversion.List - -// A type of object that is limited -type LimitType string - -const ( - // Limit that applies to all pods in a namespace - LimitTypePod LimitType = "Pod" - // Limit that applies to all containers in a namespace - LimitTypeContainer LimitType = "Container" - // Limit that applies to all persistent volume claims in a namespace - LimitTypePersistentVolumeClaim LimitType = "PersistentVolumeClaim" -) - -// LimitRangeItem defines a min/max usage limit for any resource that matches on kind -type LimitRangeItem struct { - // Type of resource that this limit applies to - // +optional - Type LimitType - // Max usage constraints on this kind by resource name - // +optional - Max ResourceList - // Min usage constraints on this kind by resource name - // +optional - Min ResourceList - // Default resource requirement limit value by resource name. - // +optional - Default ResourceList - // DefaultRequest resource requirement request value by resource name. - // +optional - DefaultRequest ResourceList - // MaxLimitRequestRatio represents the max burst value for the named resource - // +optional - MaxLimitRequestRatio ResourceList -} - -// LimitRangeSpec defines a min/max usage limit for resources that match on kind -type LimitRangeSpec struct { - // Limits is the list of LimitRangeItem objects that are enforced - Limits []LimitRangeItem -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// LimitRange sets resource usage limits for each kind of resource in a Namespace -type LimitRange struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // Spec defines the limits enforced - // +optional - Spec LimitRangeSpec -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// LimitRangeList is a list of LimitRange items. -type LimitRangeList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - - // Items is a list of LimitRange objects - Items []LimitRange -} - -// The following identify resource constants for Kubernetes object types -const ( - // Pods, number - ResourcePods ResourceName = "pods" - // Services, number - ResourceServices ResourceName = "services" - // ReplicationControllers, number - ResourceReplicationControllers ResourceName = "replicationcontrollers" - // ResourceQuotas, number - ResourceQuotas ResourceName = "resourcequotas" - // ResourceSecrets, number - ResourceSecrets ResourceName = "secrets" - // ResourceConfigMaps, number - ResourceConfigMaps ResourceName = "configmaps" - // ResourcePersistentVolumeClaims, number - ResourcePersistentVolumeClaims ResourceName = "persistentvolumeclaims" - // ResourceServicesNodePorts, number - ResourceServicesNodePorts ResourceName = "services.nodeports" - // ResourceServicesLoadBalancers, number - ResourceServicesLoadBalancers ResourceName = "services.loadbalancers" - // CPU request, in cores. (500m = .5 cores) - ResourceRequestsCPU ResourceName = "requests.cpu" - // Memory request, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) - ResourceRequestsMemory ResourceName = "requests.memory" - // Storage request, in bytes - ResourceRequestsStorage ResourceName = "requests.storage" - // Local ephemeral storage request, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) - ResourceRequestsEphemeralStorage ResourceName = "requests.ephemeral-storage" - // CPU limit, in cores. (500m = .5 cores) - ResourceLimitsCPU ResourceName = "limits.cpu" - // Memory limit, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) - ResourceLimitsMemory ResourceName = "limits.memory" - // Local ephemeral storage limit, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) - ResourceLimitsEphemeralStorage ResourceName = "limits.ephemeral-storage" -) - -// The following identify resource prefix for Kubernetes object types -const ( - // HugePages request, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) - // As burst is not supported for HugePages, we would only quota its request, and ignore the limit. - ResourceRequestsHugePagesPrefix = "requests.hugepages-" - // Default resource requests prefix - DefaultResourceRequestsPrefix = "requests." -) - -// A ResourceQuotaScope defines a filter that must match each object tracked by a quota -type ResourceQuotaScope string - -const ( - // Match all pod objects where spec.activeDeadlineSeconds - ResourceQuotaScopeTerminating ResourceQuotaScope = "Terminating" - // Match all pod objects where !spec.activeDeadlineSeconds - ResourceQuotaScopeNotTerminating ResourceQuotaScope = "NotTerminating" - // Match all pod objects that have best effort quality of service - ResourceQuotaScopeBestEffort ResourceQuotaScope = "BestEffort" - // Match all pod objects that do not have best effort quality of service - ResourceQuotaScopeNotBestEffort ResourceQuotaScope = "NotBestEffort" - // Match all pod objects that have priority class mentioned - ResourceQuotaScopePriorityClass ResourceQuotaScope = "PriorityClass" -) - -// ResourceQuotaSpec defines the desired hard limits to enforce for Quota -type ResourceQuotaSpec struct { - // Hard is the set of desired hard limits for each named resource - // +optional - Hard ResourceList - // A collection of filters that must match each object tracked by a quota. - // If not specified, the quota matches all objects. - // +optional - Scopes []ResourceQuotaScope - // ScopeSelector is also a collection of filters like Scopes that must match each object tracked by a quota - // but expressed using ScopeSelectorOperator in combination with possible values. - // +optional - ScopeSelector *ScopeSelector -} - -// A scope selector represents the AND of the selectors represented -// by the scoped-resource selector terms. -type ScopeSelector struct { - // A list of scope selector requirements by scope of the resources. - // +optional - MatchExpressions []ScopedResourceSelectorRequirement -} - -// A scoped-resource selector requirement is a selector that contains values, a scope name, and an operator -// that relates the scope name and values. -type ScopedResourceSelectorRequirement struct { - // The name of the scope that the selector applies to. - ScopeName ResourceQuotaScope - // Represents a scope's relationship to a set of values. - // Valid operators are In, NotIn, Exists, DoesNotExist. - Operator ScopeSelectorOperator - // An array of string values. If the operator is In or NotIn, - // the values array must be non-empty. If the operator is Exists or DoesNotExist, - // the values array must be empty. - // This array is replaced during a strategic merge patch. - // +optional - Values []string -} - -// A scope selector operator is the set of operators that can be used in -// a scope selector requirement. -type ScopeSelectorOperator string - -const ( - ScopeSelectorOpIn ScopeSelectorOperator = "In" - ScopeSelectorOpNotIn ScopeSelectorOperator = "NotIn" - ScopeSelectorOpExists ScopeSelectorOperator = "Exists" - ScopeSelectorOpDoesNotExist ScopeSelectorOperator = "DoesNotExist" -) - -// ResourceQuotaStatus defines the enforced hard limits and observed use -type ResourceQuotaStatus struct { - // Hard is the set of enforced hard limits for each named resource - // +optional - Hard ResourceList - // Used is the current observed total usage of the resource in the namespace - // +optional - Used ResourceList -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ResourceQuota sets aggregate quota restrictions enforced per namespace -type ResourceQuota struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // Spec defines the desired quota - // +optional - Spec ResourceQuotaSpec - - // Status defines the actual enforced quota and its current usage - // +optional - Status ResourceQuotaStatus -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ResourceQuotaList is a list of ResourceQuota items -type ResourceQuotaList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - - // Items is a list of ResourceQuota objects - Items []ResourceQuota -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Secret holds secret data of a certain type. The total bytes of the values in -// the Data field must be less than MaxSecretSize bytes. -type Secret struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // Data contains the secret data. Each key must consist of alphanumeric - // characters, '-', '_' or '.'. The serialized form of the secret data is a - // base64 encoded string, representing the arbitrary (possibly non-string) - // data value here. - // +optional - Data map[string][]byte - - // Used to facilitate programmatic handling of secret data. - // +optional - Type SecretType -} - -const MaxSecretSize = 1 * 1024 * 1024 - -type SecretType string - -const ( - // SecretTypeOpaque is the default; arbitrary user-defined data - SecretTypeOpaque SecretType = "Opaque" - - // SecretTypeServiceAccountToken contains a token that identifies a service account to the API - // - // Required fields: - // - Secret.Annotations["kubernetes.io/service-account.name"] - the name of the ServiceAccount the token identifies - // - Secret.Annotations["kubernetes.io/service-account.uid"] - the UID of the ServiceAccount the token identifies - // - Secret.Data["token"] - a token that identifies the service account to the API - SecretTypeServiceAccountToken SecretType = "kubernetes.io/service-account-token" - - // ServiceAccountNameKey is the key of the required annotation for SecretTypeServiceAccountToken secrets - ServiceAccountNameKey = "kubernetes.io/service-account.name" - // ServiceAccountUIDKey is the key of the required annotation for SecretTypeServiceAccountToken secrets - ServiceAccountUIDKey = "kubernetes.io/service-account.uid" - // ServiceAccountTokenKey is the key of the required data for SecretTypeServiceAccountToken secrets - ServiceAccountTokenKey = "token" - // ServiceAccountKubeconfigKey is the key of the optional kubeconfig data for SecretTypeServiceAccountToken secrets - ServiceAccountKubeconfigKey = "kubernetes.kubeconfig" - // ServiceAccountRootCAKey is the key of the optional root certificate authority for SecretTypeServiceAccountToken secrets - ServiceAccountRootCAKey = "ca.crt" - // ServiceAccountNamespaceKey is the key of the optional namespace to use as the default for namespaced API calls - ServiceAccountNamespaceKey = "namespace" - - // SecretTypeDockercfg contains a dockercfg file that follows the same format rules as ~/.dockercfg - // - // Required fields: - // - Secret.Data[".dockercfg"] - a serialized ~/.dockercfg file - SecretTypeDockercfg SecretType = "kubernetes.io/dockercfg" - - // DockerConfigKey is the key of the required data for SecretTypeDockercfg secrets - DockerConfigKey = ".dockercfg" - - // SecretTypeDockerConfigJson contains a dockercfg file that follows the same format rules as ~/.docker/config.json - // - // Required fields: - // - Secret.Data[".dockerconfigjson"] - a serialized ~/.docker/config.json file - SecretTypeDockerConfigJson SecretType = "kubernetes.io/dockerconfigjson" - - // DockerConfigJsonKey is the key of the required data for SecretTypeDockerConfigJson secrets - DockerConfigJsonKey = ".dockerconfigjson" - - // SecretTypeBasicAuth contains data needed for basic authentication. - // - // Required at least one of fields: - // - Secret.Data["username"] - username used for authentication - // - Secret.Data["password"] - password or token needed for authentication - SecretTypeBasicAuth SecretType = "kubernetes.io/basic-auth" - - // BasicAuthUsernameKey is the key of the username for SecretTypeBasicAuth secrets - BasicAuthUsernameKey = "username" - // BasicAuthPasswordKey is the key of the password or token for SecretTypeBasicAuth secrets - BasicAuthPasswordKey = "password" - - // SecretTypeSSHAuth contains data needed for SSH authentication. - // - // Required field: - // - Secret.Data["ssh-privatekey"] - private SSH key needed for authentication - SecretTypeSSHAuth SecretType = "kubernetes.io/ssh-auth" - - // SSHAuthPrivateKey is the key of the required SSH private key for SecretTypeSSHAuth secrets - SSHAuthPrivateKey = "ssh-privatekey" - - // SecretTypeTLS contains information about a TLS client or server secret. It - // is primarily used with TLS termination of the Ingress resource, but may be - // used in other types. - // - // Required fields: - // - Secret.Data["tls.key"] - TLS private key. - // Secret.Data["tls.crt"] - TLS certificate. - // TODO: Consider supporting different formats, specifying CA/destinationCA. - SecretTypeTLS SecretType = "kubernetes.io/tls" - - // TLSCertKey is the key for tls certificates in a TLS secret. - TLSCertKey = "tls.crt" - // TLSPrivateKeyKey is the key for the private key field in a TLS secret. - TLSPrivateKeyKey = "tls.key" - // SecretTypeBootstrapToken is used during the automated bootstrap process (first - // implemented by kubeadm). It stores tokens that are used to sign well known - // ConfigMaps. They are used for authn. - SecretTypeBootstrapToken SecretType = "bootstrap.kubernetes.io/token" -) - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -type SecretList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - - Items []Secret -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ConfigMap holds configuration data for components or applications to consume. -type ConfigMap struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // Data contains the configuration data. - // Each key must consist of alphanumeric characters, '-', '_' or '.'. - // Values with non-UTF-8 byte sequences must use the BinaryData field. - // The keys stored in Data must not overlap with the keys in - // the BinaryData field, this is enforced during validation process. - // +optional - Data map[string]string - - // BinaryData contains the binary data. - // Each key must consist of alphanumeric characters, '-', '_' or '.'. - // BinaryData can contain byte sequences that are not in the UTF-8 range. - // The keys stored in BinaryData must not overlap with the ones in - // the Data field, this is enforced during validation process. - // Using this field will require 1.10+ apiserver and - // kubelet. - // +optional - BinaryData map[string][]byte -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ConfigMapList is a resource containing a list of ConfigMap objects. -type ConfigMapList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - - // Items is the list of ConfigMaps. - Items []ConfigMap -} - -// These constants are for remote command execution and port forwarding and are -// used by both the client side and server side components. -// -// This is probably not the ideal place for them, but it didn't seem worth it -// to create pkg/exec and pkg/portforward just to contain a single file with -// constants in it. Suggestions for more appropriate alternatives are -// definitely welcome! -const ( - // Enable stdin for remote command execution - ExecStdinParam = "input" - // Enable stdout for remote command execution - ExecStdoutParam = "output" - // Enable stderr for remote command execution - ExecStderrParam = "error" - // Enable TTY for remote command execution - ExecTTYParam = "tty" - // Command to run for remote command execution - ExecCommandParam = "command" - - // Name of header that specifies stream type - StreamType = "streamType" - // Value for streamType header for stdin stream - StreamTypeStdin = "stdin" - // Value for streamType header for stdout stream - StreamTypeStdout = "stdout" - // Value for streamType header for stderr stream - StreamTypeStderr = "stderr" - // Value for streamType header for data stream - StreamTypeData = "data" - // Value for streamType header for error stream - StreamTypeError = "error" - // Value for streamType header for terminal resize stream - StreamTypeResize = "resize" - - // Name of header that specifies the port being forwarded - PortHeader = "port" - // Name of header that specifies a request ID used to associate the error - // and data streams for a single forwarded connection - PortForwardRequestIDHeader = "requestID" -) - -// Type and constants for component health validation. -type ComponentConditionType string - -// These are the valid conditions for the component. -const ( - ComponentHealthy ComponentConditionType = "Healthy" -) - -type ComponentCondition struct { - Type ComponentConditionType - Status ConditionStatus - // +optional - Message string - // +optional - Error string -} - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ComponentStatus (and ComponentStatusList) holds the cluster validation info. -type ComponentStatus struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // +optional - Conditions []ComponentCondition -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -type ComponentStatusList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - - Items []ComponentStatus -} - -// SecurityContext holds security configuration that will be applied to a container. -// Some fields are present in both SecurityContext and PodSecurityContext. When both -// are set, the values in SecurityContext take precedence. -type SecurityContext struct { - // The capabilities to add/drop when running containers. - // Defaults to the default set of capabilities granted by the container runtime. - // +optional - Capabilities *Capabilities - // Run container in privileged mode. - // Processes in privileged containers are essentially equivalent to root on the host. - // Defaults to false. - // +optional - Privileged *bool - // The SELinux context to be applied to the container. - // If unspecified, the container runtime will allocate a random SELinux context for each - // container. May also be set in PodSecurityContext. If set in both SecurityContext and - // PodSecurityContext, the value specified in SecurityContext takes precedence. - // +optional - SELinuxOptions *SELinuxOptions - // The UID to run the entrypoint of the container process. - // Defaults to user specified in image metadata if unspecified. - // May also be set in PodSecurityContext. If set in both SecurityContext and - // PodSecurityContext, the value specified in SecurityContext takes precedence. - // +optional - RunAsUser *int64 - // The GID to run the entrypoint of the container process. - // Uses runtime default if unset. - // May also be set in PodSecurityContext. If set in both SecurityContext and - // PodSecurityContext, the value specified in SecurityContext takes precedence. - // +optional - RunAsGroup *int64 - // Indicates that the container must run as a non-root user. - // If true, the Kubelet will validate the image at runtime to ensure that it - // does not run as UID 0 (root) and fail to start the container if it does. - // If unset or false, no such validation will be performed. - // May also be set in PodSecurityContext. If set in both SecurityContext and - // PodSecurityContext, the value specified in SecurityContext takes precedence. - // +optional - RunAsNonRoot *bool - // The read-only root filesystem allows you to restrict the locations that an application can write - // files to, ensuring the persistent data can only be written to mounts. - // +optional - ReadOnlyRootFilesystem *bool - // AllowPrivilegeEscalation controls whether a process can gain more - // privileges than its parent process. This bool directly controls if - // the no_new_privs flag will be set on the container process. - // +optional - AllowPrivilegeEscalation *bool - // ProcMount denotes the type of proc mount to use for the containers. - // The default is DefaultProcMount which uses the container runtime defaults for - // readonly paths and masked paths. - // +optional - ProcMount *ProcMountType -} - -type ProcMountType string - -const ( - // DefaultProcMount uses the container runtime defaults for readonly and masked - // paths for /proc. Most container runtimes mask certain paths in /proc to avoid - // accidental security exposure of special devices or information. - DefaultProcMount ProcMountType = "Default" - - // UnmaskedProcMount bypasses the default masking behavior of the container - // runtime and ensures the newly created /proc the container stays intact with - // no modifications. - UnmaskedProcMount ProcMountType = "Unmasked" -) - -// SELinuxOptions are the labels to be applied to the container. -type SELinuxOptions struct { - // SELinux user label - // +optional - User string - // SELinux role label - // +optional - Role string - // SELinux type label - // +optional - Type string - // SELinux level label. - // +optional - Level string -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// RangeAllocation is an opaque API object (not exposed to end users) that can be persisted to record -// the global allocation state of the cluster. The schema of Range and Data generic, in that Range -// should be a string representation of the inputs to a range (for instance, for IP allocation it -// might be a CIDR) and Data is an opaque blob understood by an allocator which is typically a -// binary range. Consumers should use annotations to record additional information (schema version, -// data encoding hints). A range allocation should *ALWAYS* be recreatable at any time by observation -// of the cluster, thus the object is less strongly typed than most. -type RangeAllocation struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - // A string representing a unique label for a range of resources, such as a CIDR "10.0.0.0/8" or - // port range "10000-30000". Range is not strongly schema'd here. The Range is expected to define - // a start and end unless there is an implicit end. - Range string - // A byte array representing the serialized state of a range allocation. Additional clarifiers on - // the type or format of data should be represented with annotations. For IP allocations, this is - // represented as a bit array starting at the base IP of the CIDR in Range, with each bit representing - // a single allocated address (the fifth bit on CIDR 10.0.0.0/8 is 10.0.0.4). - Data []byte -} - -const ( - // "default-scheduler" is the name of default scheduler. - DefaultSchedulerName = "default-scheduler" - - // RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule - // corresponding to every RequiredDuringScheduling affinity rule. - // When the --hard-pod-affinity-weight scheduler flag is not specified, - // DefaultHardPodAffinityWeight defines the weight of the implicit PreferredDuringScheduling affinity rule. - DefaultHardPodAffinitySymmetricWeight int32 = 1 -) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/v1/helper/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/core/v1/helper/BUILD deleted file mode 100644 index 79483ac3..00000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/core/v1/helper/BUILD +++ /dev/null @@ -1,52 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", - "go_test", -) - -go_test( - name = "go_default_test", - srcs = ["helpers_test.go"], - embed = [":go_default_library"], - deps = [ - "//staging/src/k8s.io/api/core/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", - ], -) - -go_library( - name = "go_default_library", - srcs = ["helpers.go"], - importpath = "k8s.io/kubernetes/pkg/apis/core/v1/helper", - deps = [ - "//pkg/apis/core/helper:go_default_library", - "//staging/src/k8s.io/api/core/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/selection:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/util/validation:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [ - ":package-srcs", - "//pkg/apis/core/v1/helper/qos:all-srcs", - ], - tags = ["automanaged"], -) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/v1/helper/helpers.go b/vendor/k8s.io/kubernetes/pkg/apis/core/v1/helper/helpers.go deleted file mode 100644 index bf6c001b..00000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/core/v1/helper/helpers.go +++ /dev/null @@ -1,502 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package helper - -import ( - "encoding/json" - "fmt" - "strings" - - "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - "k8s.io/apimachinery/pkg/fields" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/selection" - "k8s.io/apimachinery/pkg/util/validation" - "k8s.io/kubernetes/pkg/apis/core/helper" -) - -// IsExtendedResourceName returns true if: -// 1. the resource name is not in the default namespace; -// 2. resource name does not have "requests." prefix, -// to avoid confusion with the convention in quota -// 3. it satisfies the rules in IsQualifiedName() after converted into quota resource name -func IsExtendedResourceName(name v1.ResourceName) bool { - if IsNativeResource(name) || strings.HasPrefix(string(name), v1.DefaultResourceRequestsPrefix) { - return false - } - // Ensure it satisfies the rules in IsQualifiedName() after converted into quota resource name - nameForQuota := fmt.Sprintf("%s%s", v1.DefaultResourceRequestsPrefix, string(name)) - if errs := validation.IsQualifiedName(string(nameForQuota)); len(errs) != 0 { - return false - } - return true -} - -// IsPrefixedNativeResource returns true if the resource name is in the -// *kubernetes.io/ namespace. -func IsPrefixedNativeResource(name v1.ResourceName) bool { - return strings.Contains(string(name), v1.ResourceDefaultNamespacePrefix) -} - -// IsNativeResource returns true if the resource name is in the -// *kubernetes.io/ namespace. Partially-qualified (unprefixed) names are -// implicitly in the kubernetes.io/ namespace. -func IsNativeResource(name v1.ResourceName) bool { - return !strings.Contains(string(name), "/") || - IsPrefixedNativeResource(name) -} - -// IsHugePageResourceName returns true if the resource name has the huge page -// resource prefix. -func IsHugePageResourceName(name v1.ResourceName) bool { - return strings.HasPrefix(string(name), v1.ResourceHugePagesPrefix) -} - -// HugePageResourceName returns a ResourceName with the canonical hugepage -// prefix prepended for the specified page size. The page size is converted -// to its canonical representation. -func HugePageResourceName(pageSize resource.Quantity) v1.ResourceName { - return v1.ResourceName(fmt.Sprintf("%s%s", v1.ResourceHugePagesPrefix, pageSize.String())) -} - -// HugePageSizeFromResourceName returns the page size for the specified huge page -// resource name. If the specified input is not a valid huge page resource name -// an error is returned. -func HugePageSizeFromResourceName(name v1.ResourceName) (resource.Quantity, error) { - if !IsHugePageResourceName(name) { - return resource.Quantity{}, fmt.Errorf("resource name: %s is an invalid hugepage name", name) - } - pageSize := strings.TrimPrefix(string(name), v1.ResourceHugePagesPrefix) - return resource.ParseQuantity(pageSize) -} - -// IsOvercommitAllowed returns true if the resource is in the default -// namespace and is not hugepages. -func IsOvercommitAllowed(name v1.ResourceName) bool { - return IsNativeResource(name) && - !IsHugePageResourceName(name) -} - -func IsAttachableVolumeResourceName(name v1.ResourceName) bool { - return strings.HasPrefix(string(name), v1.ResourceAttachableVolumesPrefix) -} - -// Extended and Hugepages resources -func IsScalarResourceName(name v1.ResourceName) bool { - return IsExtendedResourceName(name) || IsHugePageResourceName(name) || - IsPrefixedNativeResource(name) || IsAttachableVolumeResourceName(name) -} - -// this function aims to check if the service's ClusterIP is set or not -// the objective is not to perform validation here -func IsServiceIPSet(service *v1.Service) bool { - return service.Spec.ClusterIP != v1.ClusterIPNone && service.Spec.ClusterIP != "" -} - -// AddToNodeAddresses appends the NodeAddresses to the passed-by-pointer slice, -// only if they do not already exist -func AddToNodeAddresses(addresses *[]v1.NodeAddress, addAddresses ...v1.NodeAddress) { - for _, add := range addAddresses { - exists := false - for _, existing := range *addresses { - if existing.Address == add.Address && existing.Type == add.Type { - exists = true - break - } - } - if !exists { - *addresses = append(*addresses, add) - } - } -} - -// TODO: make method on LoadBalancerStatus? -func LoadBalancerStatusEqual(l, r *v1.LoadBalancerStatus) bool { - return ingressSliceEqual(l.Ingress, r.Ingress) -} - -func ingressSliceEqual(lhs, rhs []v1.LoadBalancerIngress) bool { - if len(lhs) != len(rhs) { - return false - } - for i := range lhs { - if !ingressEqual(&lhs[i], &rhs[i]) { - return false - } - } - return true -} - -func ingressEqual(lhs, rhs *v1.LoadBalancerIngress) bool { - if lhs.IP != rhs.IP { - return false - } - if lhs.Hostname != rhs.Hostname { - return false - } - return true -} - -// TODO: make method on LoadBalancerStatus? -func LoadBalancerStatusDeepCopy(lb *v1.LoadBalancerStatus) *v1.LoadBalancerStatus { - c := &v1.LoadBalancerStatus{} - c.Ingress = make([]v1.LoadBalancerIngress, len(lb.Ingress)) - for i := range lb.Ingress { - c.Ingress[i] = lb.Ingress[i] - } - return c -} - -// GetAccessModesAsString returns a string representation of an array of access modes. -// modes, when present, are always in the same order: RWO,ROX,RWX. -func GetAccessModesAsString(modes []v1.PersistentVolumeAccessMode) string { - modes = removeDuplicateAccessModes(modes) - modesStr := []string{} - if containsAccessMode(modes, v1.ReadWriteOnce) { - modesStr = append(modesStr, "RWO") - } - if containsAccessMode(modes, v1.ReadOnlyMany) { - modesStr = append(modesStr, "ROX") - } - if containsAccessMode(modes, v1.ReadWriteMany) { - modesStr = append(modesStr, "RWX") - } - return strings.Join(modesStr, ",") -} - -// GetAccessModesAsString returns an array of AccessModes from a string created by GetAccessModesAsString -func GetAccessModesFromString(modes string) []v1.PersistentVolumeAccessMode { - strmodes := strings.Split(modes, ",") - accessModes := []v1.PersistentVolumeAccessMode{} - for _, s := range strmodes { - s = strings.Trim(s, " ") - switch { - case s == "RWO": - accessModes = append(accessModes, v1.ReadWriteOnce) - case s == "ROX": - accessModes = append(accessModes, v1.ReadOnlyMany) - case s == "RWX": - accessModes = append(accessModes, v1.ReadWriteMany) - } - } - return accessModes -} - -// removeDuplicateAccessModes returns an array of access modes without any duplicates -func removeDuplicateAccessModes(modes []v1.PersistentVolumeAccessMode) []v1.PersistentVolumeAccessMode { - accessModes := []v1.PersistentVolumeAccessMode{} - for _, m := range modes { - if !containsAccessMode(accessModes, m) { - accessModes = append(accessModes, m) - } - } - return accessModes -} - -func containsAccessMode(modes []v1.PersistentVolumeAccessMode, mode v1.PersistentVolumeAccessMode) bool { - for _, m := range modes { - if m == mode { - return true - } - } - return false -} - -// NodeSelectorRequirementsAsSelector converts the []NodeSelectorRequirement api type into a struct that implements -// labels.Selector. -func NodeSelectorRequirementsAsSelector(nsm []v1.NodeSelectorRequirement) (labels.Selector, error) { - if len(nsm) == 0 { - return labels.Nothing(), nil - } - selector := labels.NewSelector() - for _, expr := range nsm { - var op selection.Operator - switch expr.Operator { - case v1.NodeSelectorOpIn: - op = selection.In - case v1.NodeSelectorOpNotIn: - op = selection.NotIn - case v1.NodeSelectorOpExists: - op = selection.Exists - case v1.NodeSelectorOpDoesNotExist: - op = selection.DoesNotExist - case v1.NodeSelectorOpGt: - op = selection.GreaterThan - case v1.NodeSelectorOpLt: - op = selection.LessThan - default: - return nil, fmt.Errorf("%q is not a valid node selector operator", expr.Operator) - } - r, err := labels.NewRequirement(expr.Key, op, expr.Values) - if err != nil { - return nil, err - } - selector = selector.Add(*r) - } - return selector, nil -} - -// NodeSelectorRequirementsAsFieldSelector converts the []NodeSelectorRequirement core type into a struct that implements -// fields.Selector. -func NodeSelectorRequirementsAsFieldSelector(nsm []v1.NodeSelectorRequirement) (fields.Selector, error) { - if len(nsm) == 0 { - return fields.Nothing(), nil - } - - selectors := []fields.Selector{} - for _, expr := range nsm { - switch expr.Operator { - case v1.NodeSelectorOpIn: - if len(expr.Values) != 1 { - return nil, fmt.Errorf("unexpected number of value (%d) for node field selector operator %q", - len(expr.Values), expr.Operator) - } - selectors = append(selectors, fields.OneTermEqualSelector(expr.Key, expr.Values[0])) - - case v1.NodeSelectorOpNotIn: - if len(expr.Values) != 1 { - return nil, fmt.Errorf("unexpected number of value (%d) for node field selector operator %q", - len(expr.Values), expr.Operator) - } - selectors = append(selectors, fields.OneTermNotEqualSelector(expr.Key, expr.Values[0])) - - default: - return nil, fmt.Errorf("%q is not a valid node field selector operator", expr.Operator) - } - } - - return fields.AndSelectors(selectors...), nil -} - -// NodeSelectorRequirementKeysExistInNodeSelectorTerms checks if a NodeSelectorTerm with key is already specified in terms -func NodeSelectorRequirementKeysExistInNodeSelectorTerms(reqs []v1.NodeSelectorRequirement, terms []v1.NodeSelectorTerm) bool { - for _, req := range reqs { - for _, term := range terms { - for _, r := range term.MatchExpressions { - if r.Key == req.Key { - return true - } - } - } - } - return false -} - -// MatchNodeSelectorTerms checks whether the node labels and fields match node selector terms in ORed; -// nil or empty term matches no objects. -func MatchNodeSelectorTerms( - nodeSelectorTerms []v1.NodeSelectorTerm, - nodeLabels labels.Set, - nodeFields fields.Set, -) bool { - for _, req := range nodeSelectorTerms { - // nil or empty term selects no objects - if len(req.MatchExpressions) == 0 && len(req.MatchFields) == 0 { - continue - } - - if len(req.MatchExpressions) != 0 { - labelSelector, err := NodeSelectorRequirementsAsSelector(req.MatchExpressions) - if err != nil || !labelSelector.Matches(nodeLabels) { - continue - } - } - - if len(req.MatchFields) != 0 { - fieldSelector, err := NodeSelectorRequirementsAsFieldSelector(req.MatchFields) - if err != nil || !fieldSelector.Matches(nodeFields) { - continue - } - } - - return true - } - - return false -} - -// TopologySelectorRequirementsAsSelector converts the []TopologySelectorLabelRequirement api type into a struct -// that implements labels.Selector. -func TopologySelectorRequirementsAsSelector(tsm []v1.TopologySelectorLabelRequirement) (labels.Selector, error) { - if len(tsm) == 0 { - return labels.Nothing(), nil - } - - selector := labels.NewSelector() - for _, expr := range tsm { - r, err := labels.NewRequirement(expr.Key, selection.In, expr.Values) - if err != nil { - return nil, err - } - selector = selector.Add(*r) - } - - return selector, nil -} - -// MatchTopologySelectorTerms checks whether given labels match topology selector terms in ORed; -// nil or empty term matches no objects; while empty term list matches all objects. -func MatchTopologySelectorTerms(topologySelectorTerms []v1.TopologySelectorTerm, lbls labels.Set) bool { - if len(topologySelectorTerms) == 0 { - // empty term list matches all objects - return true - } - - for _, req := range topologySelectorTerms { - // nil or empty term selects no objects - if len(req.MatchLabelExpressions) == 0 { - continue - } - - labelSelector, err := TopologySelectorRequirementsAsSelector(req.MatchLabelExpressions) - if err != nil || !labelSelector.Matches(lbls) { - continue - } - - return true - } - - return false -} - -// AddOrUpdateTolerationInPodSpec tries to add a toleration to the toleration list in PodSpec. -// Returns true if something was updated, false otherwise. -func AddOrUpdateTolerationInPodSpec(spec *v1.PodSpec, toleration *v1.Toleration) bool { - podTolerations := spec.Tolerations - - var newTolerations []v1.Toleration - updated := false - for i := range podTolerations { - if toleration.MatchToleration(&podTolerations[i]) { - if helper.Semantic.DeepEqual(toleration, podTolerations[i]) { - return false - } - newTolerations = append(newTolerations, *toleration) - updated = true - continue - } - - newTolerations = append(newTolerations, podTolerations[i]) - } - - if !updated { - newTolerations = append(newTolerations, *toleration) - } - - spec.Tolerations = newTolerations - return true -} - -// AddOrUpdateTolerationInPod tries to add a toleration to the pod's toleration list. -// Returns true if something was updated, false otherwise. -func AddOrUpdateTolerationInPod(pod *v1.Pod, toleration *v1.Toleration) bool { - return AddOrUpdateTolerationInPodSpec(&pod.Spec, toleration) -} - -// TolerationsTolerateTaint checks if taint is tolerated by any of the tolerations. -func TolerationsTolerateTaint(tolerations []v1.Toleration, taint *v1.Taint) bool { - for i := range tolerations { - if tolerations[i].ToleratesTaint(taint) { - return true - } - } - return false -} - -type taintsFilterFunc func(*v1.Taint) bool - -// TolerationsTolerateTaintsWithFilter checks if given tolerations tolerates -// all the taints that apply to the filter in given taint list. -func TolerationsTolerateTaintsWithFilter(tolerations []v1.Toleration, taints []v1.Taint, applyFilter taintsFilterFunc) bool { - if len(taints) == 0 { - return true - } - - for i := range taints { - if applyFilter != nil && !applyFilter(&taints[i]) { - continue - } - - if !TolerationsTolerateTaint(tolerations, &taints[i]) { - return false - } - } - - return true -} - -// Returns true and list of Tolerations matching all Taints if all are tolerated, or false otherwise. -func GetMatchingTolerations(taints []v1.Taint, tolerations []v1.Toleration) (bool, []v1.Toleration) { - if len(taints) == 0 { - return true, []v1.Toleration{} - } - if len(tolerations) == 0 && len(taints) > 0 { - return false, []v1.Toleration{} - } - result := []v1.Toleration{} - for i := range taints { - tolerated := false - for j := range tolerations { - if tolerations[j].ToleratesTaint(&taints[i]) { - result = append(result, tolerations[j]) - tolerated = true - break - } - } - if !tolerated { - return false, []v1.Toleration{} - } - } - return true, result -} - -func GetAvoidPodsFromNodeAnnotations(annotations map[string]string) (v1.AvoidPods, error) { - var avoidPods v1.AvoidPods - if len(annotations) > 0 && annotations[v1.PreferAvoidPodsAnnotationKey] != "" { - err := json.Unmarshal([]byte(annotations[v1.PreferAvoidPodsAnnotationKey]), &avoidPods) - if err != nil { - return avoidPods, err - } - } - return avoidPods, nil -} - -// GetPersistentVolumeClass returns StorageClassName. -func GetPersistentVolumeClass(volume *v1.PersistentVolume) string { - // Use beta annotation first - if class, found := volume.Annotations[v1.BetaStorageClassAnnotation]; found { - return class - } - - return volume.Spec.StorageClassName -} - -// GetPersistentVolumeClaimClass returns StorageClassName. If no storage class was -// requested, it returns "". -func GetPersistentVolumeClaimClass(claim *v1.PersistentVolumeClaim) string { - // Use beta annotation first - if class, found := claim.Annotations[v1.BetaStorageClassAnnotation]; found { - return class - } - - if claim.Spec.StorageClassName != nil { - return *claim.Spec.StorageClassName - } - - return "" -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/core/zz_generated.deepcopy.go deleted file mode 100644 index 26fe6fa0..00000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/core/zz_generated.deepcopy.go +++ /dev/null @@ -1,5389 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package core - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - types "k8s.io/apimachinery/pkg/types" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AWSElasticBlockStoreVolumeSource) DeepCopyInto(out *AWSElasticBlockStoreVolumeSource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSElasticBlockStoreVolumeSource. -func (in *AWSElasticBlockStoreVolumeSource) DeepCopy() *AWSElasticBlockStoreVolumeSource { - if in == nil { - return nil - } - out := new(AWSElasticBlockStoreVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Affinity) DeepCopyInto(out *Affinity) { - *out = *in - if in.NodeAffinity != nil { - in, out := &in.NodeAffinity, &out.NodeAffinity - *out = new(NodeAffinity) - (*in).DeepCopyInto(*out) - } - if in.PodAffinity != nil { - in, out := &in.PodAffinity, &out.PodAffinity - *out = new(PodAffinity) - (*in).DeepCopyInto(*out) - } - if in.PodAntiAffinity != nil { - in, out := &in.PodAntiAffinity, &out.PodAntiAffinity - *out = new(PodAntiAffinity) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Affinity. -func (in *Affinity) DeepCopy() *Affinity { - if in == nil { - return nil - } - out := new(Affinity) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AttachedVolume) DeepCopyInto(out *AttachedVolume) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AttachedVolume. -func (in *AttachedVolume) DeepCopy() *AttachedVolume { - if in == nil { - return nil - } - out := new(AttachedVolume) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AvoidPods) DeepCopyInto(out *AvoidPods) { - *out = *in - if in.PreferAvoidPods != nil { - in, out := &in.PreferAvoidPods, &out.PreferAvoidPods - *out = make([]PreferAvoidPodsEntry, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AvoidPods. -func (in *AvoidPods) DeepCopy() *AvoidPods { - if in == nil { - return nil - } - out := new(AvoidPods) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AzureDiskVolumeSource) DeepCopyInto(out *AzureDiskVolumeSource) { - *out = *in - if in.CachingMode != nil { - in, out := &in.CachingMode, &out.CachingMode - *out = new(AzureDataDiskCachingMode) - **out = **in - } - if in.FSType != nil { - in, out := &in.FSType, &out.FSType - *out = new(string) - **out = **in - } - if in.ReadOnly != nil { - in, out := &in.ReadOnly, &out.ReadOnly - *out = new(bool) - **out = **in - } - if in.Kind != nil { - in, out := &in.Kind, &out.Kind - *out = new(AzureDataDiskKind) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureDiskVolumeSource. -func (in *AzureDiskVolumeSource) DeepCopy() *AzureDiskVolumeSource { - if in == nil { - return nil - } - out := new(AzureDiskVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AzureFilePersistentVolumeSource) DeepCopyInto(out *AzureFilePersistentVolumeSource) { - *out = *in - if in.SecretNamespace != nil { - in, out := &in.SecretNamespace, &out.SecretNamespace - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureFilePersistentVolumeSource. -func (in *AzureFilePersistentVolumeSource) DeepCopy() *AzureFilePersistentVolumeSource { - if in == nil { - return nil - } - out := new(AzureFilePersistentVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AzureFileVolumeSource) DeepCopyInto(out *AzureFileVolumeSource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureFileVolumeSource. -func (in *AzureFileVolumeSource) DeepCopy() *AzureFileVolumeSource { - if in == nil { - return nil - } - out := new(AzureFileVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Binding) DeepCopyInto(out *Binding) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Target = in.Target - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Binding. -func (in *Binding) DeepCopy() *Binding { - if in == nil { - return nil - } - out := new(Binding) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Binding) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CSIPersistentVolumeSource) DeepCopyInto(out *CSIPersistentVolumeSource) { - *out = *in - if in.VolumeAttributes != nil { - in, out := &in.VolumeAttributes, &out.VolumeAttributes - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.ControllerPublishSecretRef != nil { - in, out := &in.ControllerPublishSecretRef, &out.ControllerPublishSecretRef - *out = new(SecretReference) - **out = **in - } - if in.NodeStageSecretRef != nil { - in, out := &in.NodeStageSecretRef, &out.NodeStageSecretRef - *out = new(SecretReference) - **out = **in - } - if in.NodePublishSecretRef != nil { - in, out := &in.NodePublishSecretRef, &out.NodePublishSecretRef - *out = new(SecretReference) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSIPersistentVolumeSource. -func (in *CSIPersistentVolumeSource) DeepCopy() *CSIPersistentVolumeSource { - if in == nil { - return nil - } - out := new(CSIPersistentVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Capabilities) DeepCopyInto(out *Capabilities) { - *out = *in - if in.Add != nil { - in, out := &in.Add, &out.Add - *out = make([]Capability, len(*in)) - copy(*out, *in) - } - if in.Drop != nil { - in, out := &in.Drop, &out.Drop - *out = make([]Capability, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Capabilities. -func (in *Capabilities) DeepCopy() *Capabilities { - if in == nil { - return nil - } - out := new(Capabilities) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CephFSPersistentVolumeSource) DeepCopyInto(out *CephFSPersistentVolumeSource) { - *out = *in - if in.Monitors != nil { - in, out := &in.Monitors, &out.Monitors - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - *out = new(SecretReference) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephFSPersistentVolumeSource. -func (in *CephFSPersistentVolumeSource) DeepCopy() *CephFSPersistentVolumeSource { - if in == nil { - return nil - } - out := new(CephFSPersistentVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CephFSVolumeSource) DeepCopyInto(out *CephFSVolumeSource) { - *out = *in - if in.Monitors != nil { - in, out := &in.Monitors, &out.Monitors - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - *out = new(LocalObjectReference) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephFSVolumeSource. -func (in *CephFSVolumeSource) DeepCopy() *CephFSVolumeSource { - if in == nil { - return nil - } - out := new(CephFSVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CinderPersistentVolumeSource) DeepCopyInto(out *CinderPersistentVolumeSource) { - *out = *in - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - *out = new(SecretReference) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CinderPersistentVolumeSource. -func (in *CinderPersistentVolumeSource) DeepCopy() *CinderPersistentVolumeSource { - if in == nil { - return nil - } - out := new(CinderPersistentVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CinderVolumeSource) DeepCopyInto(out *CinderVolumeSource) { - *out = *in - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - *out = new(LocalObjectReference) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CinderVolumeSource. -func (in *CinderVolumeSource) DeepCopy() *CinderVolumeSource { - if in == nil { - return nil - } - out := new(CinderVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClientIPConfig) DeepCopyInto(out *ClientIPConfig) { - *out = *in - if in.TimeoutSeconds != nil { - in, out := &in.TimeoutSeconds, &out.TimeoutSeconds - *out = new(int32) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientIPConfig. -func (in *ClientIPConfig) DeepCopy() *ClientIPConfig { - if in == nil { - return nil - } - out := new(ClientIPConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ComponentCondition) DeepCopyInto(out *ComponentCondition) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentCondition. -func (in *ComponentCondition) DeepCopy() *ComponentCondition { - if in == nil { - return nil - } - out := new(ComponentCondition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ComponentStatus) DeepCopyInto(out *ComponentStatus) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]ComponentCondition, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentStatus. -func (in *ComponentStatus) DeepCopy() *ComponentStatus { - if in == nil { - return nil - } - out := new(ComponentStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ComponentStatus) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ComponentStatusList) DeepCopyInto(out *ComponentStatusList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ComponentStatus, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentStatusList. -func (in *ComponentStatusList) DeepCopy() *ComponentStatusList { - if in == nil { - return nil - } - out := new(ComponentStatusList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ComponentStatusList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ConfigMap) DeepCopyInto(out *ConfigMap) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.Data != nil { - in, out := &in.Data, &out.Data - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.BinaryData != nil { - in, out := &in.BinaryData, &out.BinaryData - *out = make(map[string][]byte, len(*in)) - for key, val := range *in { - var outVal []byte - if val == nil { - (*out)[key] = nil - } else { - in, out := &val, &outVal - *out = make([]byte, len(*in)) - copy(*out, *in) - } - (*out)[key] = outVal - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMap. -func (in *ConfigMap) DeepCopy() *ConfigMap { - if in == nil { - return nil - } - out := new(ConfigMap) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ConfigMap) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ConfigMapEnvSource) DeepCopyInto(out *ConfigMapEnvSource) { - *out = *in - out.LocalObjectReference = in.LocalObjectReference - if in.Optional != nil { - in, out := &in.Optional, &out.Optional - *out = new(bool) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapEnvSource. -func (in *ConfigMapEnvSource) DeepCopy() *ConfigMapEnvSource { - if in == nil { - return nil - } - out := new(ConfigMapEnvSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ConfigMapKeySelector) DeepCopyInto(out *ConfigMapKeySelector) { - *out = *in - out.LocalObjectReference = in.LocalObjectReference - if in.Optional != nil { - in, out := &in.Optional, &out.Optional - *out = new(bool) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapKeySelector. -func (in *ConfigMapKeySelector) DeepCopy() *ConfigMapKeySelector { - if in == nil { - return nil - } - out := new(ConfigMapKeySelector) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ConfigMapList) DeepCopyInto(out *ConfigMapList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ConfigMap, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapList. -func (in *ConfigMapList) DeepCopy() *ConfigMapList { - if in == nil { - return nil - } - out := new(ConfigMapList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ConfigMapList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ConfigMapNodeConfigSource) DeepCopyInto(out *ConfigMapNodeConfigSource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapNodeConfigSource. -func (in *ConfigMapNodeConfigSource) DeepCopy() *ConfigMapNodeConfigSource { - if in == nil { - return nil - } - out := new(ConfigMapNodeConfigSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ConfigMapProjection) DeepCopyInto(out *ConfigMapProjection) { - *out = *in - out.LocalObjectReference = in.LocalObjectReference - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]KeyToPath, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Optional != nil { - in, out := &in.Optional, &out.Optional - *out = new(bool) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapProjection. -func (in *ConfigMapProjection) DeepCopy() *ConfigMapProjection { - if in == nil { - return nil - } - out := new(ConfigMapProjection) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ConfigMapVolumeSource) DeepCopyInto(out *ConfigMapVolumeSource) { - *out = *in - out.LocalObjectReference = in.LocalObjectReference - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]KeyToPath, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.DefaultMode != nil { - in, out := &in.DefaultMode, &out.DefaultMode - *out = new(int32) - **out = **in - } - if in.Optional != nil { - in, out := &in.Optional, &out.Optional - *out = new(bool) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapVolumeSource. -func (in *ConfigMapVolumeSource) DeepCopy() *ConfigMapVolumeSource { - if in == nil { - return nil - } - out := new(ConfigMapVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Container) DeepCopyInto(out *Container) { - *out = *in - if in.Command != nil { - in, out := &in.Command, &out.Command - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Args != nil { - in, out := &in.Args, &out.Args - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Ports != nil { - in, out := &in.Ports, &out.Ports - *out = make([]ContainerPort, len(*in)) - copy(*out, *in) - } - if in.EnvFrom != nil { - in, out := &in.EnvFrom, &out.EnvFrom - *out = make([]EnvFromSource, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Env != nil { - in, out := &in.Env, &out.Env - *out = make([]EnvVar, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - in.Resources.DeepCopyInto(&out.Resources) - if in.VolumeMounts != nil { - in, out := &in.VolumeMounts, &out.VolumeMounts - *out = make([]VolumeMount, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.VolumeDevices != nil { - in, out := &in.VolumeDevices, &out.VolumeDevices - *out = make([]VolumeDevice, len(*in)) - copy(*out, *in) - } - if in.LivenessProbe != nil { - in, out := &in.LivenessProbe, &out.LivenessProbe - *out = new(Probe) - (*in).DeepCopyInto(*out) - } - if in.ReadinessProbe != nil { - in, out := &in.ReadinessProbe, &out.ReadinessProbe - *out = new(Probe) - (*in).DeepCopyInto(*out) - } - if in.Lifecycle != nil { - in, out := &in.Lifecycle, &out.Lifecycle - *out = new(Lifecycle) - (*in).DeepCopyInto(*out) - } - if in.SecurityContext != nil { - in, out := &in.SecurityContext, &out.SecurityContext - *out = new(SecurityContext) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Container. -func (in *Container) DeepCopy() *Container { - if in == nil { - return nil - } - out := new(Container) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ContainerImage) DeepCopyInto(out *ContainerImage) { - *out = *in - if in.Names != nil { - in, out := &in.Names, &out.Names - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerImage. -func (in *ContainerImage) DeepCopy() *ContainerImage { - if in == nil { - return nil - } - out := new(ContainerImage) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ContainerPort) DeepCopyInto(out *ContainerPort) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerPort. -func (in *ContainerPort) DeepCopy() *ContainerPort { - if in == nil { - return nil - } - out := new(ContainerPort) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ContainerState) DeepCopyInto(out *ContainerState) { - *out = *in - if in.Waiting != nil { - in, out := &in.Waiting, &out.Waiting - *out = new(ContainerStateWaiting) - **out = **in - } - if in.Running != nil { - in, out := &in.Running, &out.Running - *out = new(ContainerStateRunning) - (*in).DeepCopyInto(*out) - } - if in.Terminated != nil { - in, out := &in.Terminated, &out.Terminated - *out = new(ContainerStateTerminated) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerState. -func (in *ContainerState) DeepCopy() *ContainerState { - if in == nil { - return nil - } - out := new(ContainerState) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ContainerStateRunning) DeepCopyInto(out *ContainerStateRunning) { - *out = *in - in.StartedAt.DeepCopyInto(&out.StartedAt) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerStateRunning. -func (in *ContainerStateRunning) DeepCopy() *ContainerStateRunning { - if in == nil { - return nil - } - out := new(ContainerStateRunning) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ContainerStateTerminated) DeepCopyInto(out *ContainerStateTerminated) { - *out = *in - in.StartedAt.DeepCopyInto(&out.StartedAt) - in.FinishedAt.DeepCopyInto(&out.FinishedAt) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerStateTerminated. -func (in *ContainerStateTerminated) DeepCopy() *ContainerStateTerminated { - if in == nil { - return nil - } - out := new(ContainerStateTerminated) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ContainerStateWaiting) DeepCopyInto(out *ContainerStateWaiting) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerStateWaiting. -func (in *ContainerStateWaiting) DeepCopy() *ContainerStateWaiting { - if in == nil { - return nil - } - out := new(ContainerStateWaiting) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ContainerStatus) DeepCopyInto(out *ContainerStatus) { - *out = *in - in.State.DeepCopyInto(&out.State) - in.LastTerminationState.DeepCopyInto(&out.LastTerminationState) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerStatus. -func (in *ContainerStatus) DeepCopy() *ContainerStatus { - if in == nil { - return nil - } - out := new(ContainerStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DaemonEndpoint) DeepCopyInto(out *DaemonEndpoint) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonEndpoint. -func (in *DaemonEndpoint) DeepCopy() *DaemonEndpoint { - if in == nil { - return nil - } - out := new(DaemonEndpoint) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DownwardAPIProjection) DeepCopyInto(out *DownwardAPIProjection) { - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]DownwardAPIVolumeFile, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DownwardAPIProjection. -func (in *DownwardAPIProjection) DeepCopy() *DownwardAPIProjection { - if in == nil { - return nil - } - out := new(DownwardAPIProjection) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DownwardAPIVolumeFile) DeepCopyInto(out *DownwardAPIVolumeFile) { - *out = *in - if in.FieldRef != nil { - in, out := &in.FieldRef, &out.FieldRef - *out = new(ObjectFieldSelector) - **out = **in - } - if in.ResourceFieldRef != nil { - in, out := &in.ResourceFieldRef, &out.ResourceFieldRef - *out = new(ResourceFieldSelector) - (*in).DeepCopyInto(*out) - } - if in.Mode != nil { - in, out := &in.Mode, &out.Mode - *out = new(int32) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DownwardAPIVolumeFile. -func (in *DownwardAPIVolumeFile) DeepCopy() *DownwardAPIVolumeFile { - if in == nil { - return nil - } - out := new(DownwardAPIVolumeFile) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DownwardAPIVolumeSource) DeepCopyInto(out *DownwardAPIVolumeSource) { - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]DownwardAPIVolumeFile, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.DefaultMode != nil { - in, out := &in.DefaultMode, &out.DefaultMode - *out = new(int32) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DownwardAPIVolumeSource. -func (in *DownwardAPIVolumeSource) DeepCopy() *DownwardAPIVolumeSource { - if in == nil { - return nil - } - out := new(DownwardAPIVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EmptyDirVolumeSource) DeepCopyInto(out *EmptyDirVolumeSource) { - *out = *in - if in.SizeLimit != nil { - in, out := &in.SizeLimit, &out.SizeLimit - x := (*in).DeepCopy() - *out = &x - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EmptyDirVolumeSource. -func (in *EmptyDirVolumeSource) DeepCopy() *EmptyDirVolumeSource { - if in == nil { - return nil - } - out := new(EmptyDirVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EndpointAddress) DeepCopyInto(out *EndpointAddress) { - *out = *in - if in.NodeName != nil { - in, out := &in.NodeName, &out.NodeName - *out = new(string) - **out = **in - } - if in.TargetRef != nil { - in, out := &in.TargetRef, &out.TargetRef - *out = new(ObjectReference) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointAddress. -func (in *EndpointAddress) DeepCopy() *EndpointAddress { - if in == nil { - return nil - } - out := new(EndpointAddress) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EndpointPort) DeepCopyInto(out *EndpointPort) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointPort. -func (in *EndpointPort) DeepCopy() *EndpointPort { - if in == nil { - return nil - } - out := new(EndpointPort) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EndpointSubset) DeepCopyInto(out *EndpointSubset) { - *out = *in - if in.Addresses != nil { - in, out := &in.Addresses, &out.Addresses - *out = make([]EndpointAddress, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.NotReadyAddresses != nil { - in, out := &in.NotReadyAddresses, &out.NotReadyAddresses - *out = make([]EndpointAddress, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Ports != nil { - in, out := &in.Ports, &out.Ports - *out = make([]EndpointPort, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointSubset. -func (in *EndpointSubset) DeepCopy() *EndpointSubset { - if in == nil { - return nil - } - out := new(EndpointSubset) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Endpoints) DeepCopyInto(out *Endpoints) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.Subsets != nil { - in, out := &in.Subsets, &out.Subsets - *out = make([]EndpointSubset, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Endpoints. -func (in *Endpoints) DeepCopy() *Endpoints { - if in == nil { - return nil - } - out := new(Endpoints) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Endpoints) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EndpointsList) DeepCopyInto(out *EndpointsList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Endpoints, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointsList. -func (in *EndpointsList) DeepCopy() *EndpointsList { - if in == nil { - return nil - } - out := new(EndpointsList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *EndpointsList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EnvFromSource) DeepCopyInto(out *EnvFromSource) { - *out = *in - if in.ConfigMapRef != nil { - in, out := &in.ConfigMapRef, &out.ConfigMapRef - *out = new(ConfigMapEnvSource) - (*in).DeepCopyInto(*out) - } - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - *out = new(SecretEnvSource) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvFromSource. -func (in *EnvFromSource) DeepCopy() *EnvFromSource { - if in == nil { - return nil - } - out := new(EnvFromSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EnvVar) DeepCopyInto(out *EnvVar) { - *out = *in - if in.ValueFrom != nil { - in, out := &in.ValueFrom, &out.ValueFrom - *out = new(EnvVarSource) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvVar. -func (in *EnvVar) DeepCopy() *EnvVar { - if in == nil { - return nil - } - out := new(EnvVar) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EnvVarSource) DeepCopyInto(out *EnvVarSource) { - *out = *in - if in.FieldRef != nil { - in, out := &in.FieldRef, &out.FieldRef - *out = new(ObjectFieldSelector) - **out = **in - } - if in.ResourceFieldRef != nil { - in, out := &in.ResourceFieldRef, &out.ResourceFieldRef - *out = new(ResourceFieldSelector) - (*in).DeepCopyInto(*out) - } - if in.ConfigMapKeyRef != nil { - in, out := &in.ConfigMapKeyRef, &out.ConfigMapKeyRef - *out = new(ConfigMapKeySelector) - (*in).DeepCopyInto(*out) - } - if in.SecretKeyRef != nil { - in, out := &in.SecretKeyRef, &out.SecretKeyRef - *out = new(SecretKeySelector) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvVarSource. -func (in *EnvVarSource) DeepCopy() *EnvVarSource { - if in == nil { - return nil - } - out := new(EnvVarSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Event) DeepCopyInto(out *Event) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.InvolvedObject = in.InvolvedObject - out.Source = in.Source - in.FirstTimestamp.DeepCopyInto(&out.FirstTimestamp) - in.LastTimestamp.DeepCopyInto(&out.LastTimestamp) - in.EventTime.DeepCopyInto(&out.EventTime) - if in.Series != nil { - in, out := &in.Series, &out.Series - *out = new(EventSeries) - (*in).DeepCopyInto(*out) - } - if in.Related != nil { - in, out := &in.Related, &out.Related - *out = new(ObjectReference) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Event. -func (in *Event) DeepCopy() *Event { - if in == nil { - return nil - } - out := new(Event) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Event) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EventList) DeepCopyInto(out *EventList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Event, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventList. -func (in *EventList) DeepCopy() *EventList { - if in == nil { - return nil - } - out := new(EventList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *EventList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EventSeries) DeepCopyInto(out *EventSeries) { - *out = *in - in.LastObservedTime.DeepCopyInto(&out.LastObservedTime) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventSeries. -func (in *EventSeries) DeepCopy() *EventSeries { - if in == nil { - return nil - } - out := new(EventSeries) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EventSource) DeepCopyInto(out *EventSource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventSource. -func (in *EventSource) DeepCopy() *EventSource { - if in == nil { - return nil - } - out := new(EventSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ExecAction) DeepCopyInto(out *ExecAction) { - *out = *in - if in.Command != nil { - in, out := &in.Command, &out.Command - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecAction. -func (in *ExecAction) DeepCopy() *ExecAction { - if in == nil { - return nil - } - out := new(ExecAction) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FCVolumeSource) DeepCopyInto(out *FCVolumeSource) { - *out = *in - if in.TargetWWNs != nil { - in, out := &in.TargetWWNs, &out.TargetWWNs - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Lun != nil { - in, out := &in.Lun, &out.Lun - *out = new(int32) - **out = **in - } - if in.WWIDs != nil { - in, out := &in.WWIDs, &out.WWIDs - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FCVolumeSource. -func (in *FCVolumeSource) DeepCopy() *FCVolumeSource { - if in == nil { - return nil - } - out := new(FCVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FlexPersistentVolumeSource) DeepCopyInto(out *FlexPersistentVolumeSource) { - *out = *in - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - *out = new(SecretReference) - **out = **in - } - if in.Options != nil { - in, out := &in.Options, &out.Options - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlexPersistentVolumeSource. -func (in *FlexPersistentVolumeSource) DeepCopy() *FlexPersistentVolumeSource { - if in == nil { - return nil - } - out := new(FlexPersistentVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FlexVolumeSource) DeepCopyInto(out *FlexVolumeSource) { - *out = *in - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - *out = new(LocalObjectReference) - **out = **in - } - if in.Options != nil { - in, out := &in.Options, &out.Options - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlexVolumeSource. -func (in *FlexVolumeSource) DeepCopy() *FlexVolumeSource { - if in == nil { - return nil - } - out := new(FlexVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FlockerVolumeSource) DeepCopyInto(out *FlockerVolumeSource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlockerVolumeSource. -func (in *FlockerVolumeSource) DeepCopy() *FlockerVolumeSource { - if in == nil { - return nil - } - out := new(FlockerVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GCEPersistentDiskVolumeSource) DeepCopyInto(out *GCEPersistentDiskVolumeSource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCEPersistentDiskVolumeSource. -func (in *GCEPersistentDiskVolumeSource) DeepCopy() *GCEPersistentDiskVolumeSource { - if in == nil { - return nil - } - out := new(GCEPersistentDiskVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GitRepoVolumeSource) DeepCopyInto(out *GitRepoVolumeSource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepoVolumeSource. -func (in *GitRepoVolumeSource) DeepCopy() *GitRepoVolumeSource { - if in == nil { - return nil - } - out := new(GitRepoVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GlusterfsVolumeSource) DeepCopyInto(out *GlusterfsVolumeSource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlusterfsVolumeSource. -func (in *GlusterfsVolumeSource) DeepCopy() *GlusterfsVolumeSource { - if in == nil { - return nil - } - out := new(GlusterfsVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HTTPGetAction) DeepCopyInto(out *HTTPGetAction) { - *out = *in - out.Port = in.Port - if in.HTTPHeaders != nil { - in, out := &in.HTTPHeaders, &out.HTTPHeaders - *out = make([]HTTPHeader, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPGetAction. -func (in *HTTPGetAction) DeepCopy() *HTTPGetAction { - if in == nil { - return nil - } - out := new(HTTPGetAction) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HTTPHeader) DeepCopyInto(out *HTTPHeader) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPHeader. -func (in *HTTPHeader) DeepCopy() *HTTPHeader { - if in == nil { - return nil - } - out := new(HTTPHeader) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Handler) DeepCopyInto(out *Handler) { - *out = *in - if in.Exec != nil { - in, out := &in.Exec, &out.Exec - *out = new(ExecAction) - (*in).DeepCopyInto(*out) - } - if in.HTTPGet != nil { - in, out := &in.HTTPGet, &out.HTTPGet - *out = new(HTTPGetAction) - (*in).DeepCopyInto(*out) - } - if in.TCPSocket != nil { - in, out := &in.TCPSocket, &out.TCPSocket - *out = new(TCPSocketAction) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Handler. -func (in *Handler) DeepCopy() *Handler { - if in == nil { - return nil - } - out := new(Handler) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HostAlias) DeepCopyInto(out *HostAlias) { - *out = *in - if in.Hostnames != nil { - in, out := &in.Hostnames, &out.Hostnames - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostAlias. -func (in *HostAlias) DeepCopy() *HostAlias { - if in == nil { - return nil - } - out := new(HostAlias) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HostPathVolumeSource) DeepCopyInto(out *HostPathVolumeSource) { - *out = *in - if in.Type != nil { - in, out := &in.Type, &out.Type - *out = new(HostPathType) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostPathVolumeSource. -func (in *HostPathVolumeSource) DeepCopy() *HostPathVolumeSource { - if in == nil { - return nil - } - out := new(HostPathVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ISCSIPersistentVolumeSource) DeepCopyInto(out *ISCSIPersistentVolumeSource) { - *out = *in - if in.Portals != nil { - in, out := &in.Portals, &out.Portals - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - *out = new(SecretReference) - **out = **in - } - if in.InitiatorName != nil { - in, out := &in.InitiatorName, &out.InitiatorName - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ISCSIPersistentVolumeSource. -func (in *ISCSIPersistentVolumeSource) DeepCopy() *ISCSIPersistentVolumeSource { - if in == nil { - return nil - } - out := new(ISCSIPersistentVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ISCSIVolumeSource) DeepCopyInto(out *ISCSIVolumeSource) { - *out = *in - if in.Portals != nil { - in, out := &in.Portals, &out.Portals - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - *out = new(LocalObjectReference) - **out = **in - } - if in.InitiatorName != nil { - in, out := &in.InitiatorName, &out.InitiatorName - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ISCSIVolumeSource. -func (in *ISCSIVolumeSource) DeepCopy() *ISCSIVolumeSource { - if in == nil { - return nil - } - out := new(ISCSIVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KeyToPath) DeepCopyInto(out *KeyToPath) { - *out = *in - if in.Mode != nil { - in, out := &in.Mode, &out.Mode - *out = new(int32) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KeyToPath. -func (in *KeyToPath) DeepCopy() *KeyToPath { - if in == nil { - return nil - } - out := new(KeyToPath) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Lifecycle) DeepCopyInto(out *Lifecycle) { - *out = *in - if in.PostStart != nil { - in, out := &in.PostStart, &out.PostStart - *out = new(Handler) - (*in).DeepCopyInto(*out) - } - if in.PreStop != nil { - in, out := &in.PreStop, &out.PreStop - *out = new(Handler) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Lifecycle. -func (in *Lifecycle) DeepCopy() *Lifecycle { - if in == nil { - return nil - } - out := new(Lifecycle) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LimitRange) DeepCopyInto(out *LimitRange) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitRange. -func (in *LimitRange) DeepCopy() *LimitRange { - if in == nil { - return nil - } - out := new(LimitRange) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *LimitRange) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LimitRangeItem) DeepCopyInto(out *LimitRangeItem) { - *out = *in - if in.Max != nil { - in, out := &in.Max, &out.Max - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - if in.Min != nil { - in, out := &in.Min, &out.Min - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - if in.Default != nil { - in, out := &in.Default, &out.Default - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - if in.DefaultRequest != nil { - in, out := &in.DefaultRequest, &out.DefaultRequest - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - if in.MaxLimitRequestRatio != nil { - in, out := &in.MaxLimitRequestRatio, &out.MaxLimitRequestRatio - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitRangeItem. -func (in *LimitRangeItem) DeepCopy() *LimitRangeItem { - if in == nil { - return nil - } - out := new(LimitRangeItem) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LimitRangeList) DeepCopyInto(out *LimitRangeList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]LimitRange, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitRangeList. -func (in *LimitRangeList) DeepCopy() *LimitRangeList { - if in == nil { - return nil - } - out := new(LimitRangeList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *LimitRangeList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LimitRangeSpec) DeepCopyInto(out *LimitRangeSpec) { - *out = *in - if in.Limits != nil { - in, out := &in.Limits, &out.Limits - *out = make([]LimitRangeItem, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitRangeSpec. -func (in *LimitRangeSpec) DeepCopy() *LimitRangeSpec { - if in == nil { - return nil - } - out := new(LimitRangeSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *List) DeepCopyInto(out *List) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]runtime.Object, len(*in)) - for i := range *in { - if (*in)[i] != nil { - (*out)[i] = (*in)[i].DeepCopyObject() - } - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new List. -func (in *List) DeepCopy() *List { - if in == nil { - return nil - } - out := new(List) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *List) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LoadBalancerIngress) DeepCopyInto(out *LoadBalancerIngress) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancerIngress. -func (in *LoadBalancerIngress) DeepCopy() *LoadBalancerIngress { - if in == nil { - return nil - } - out := new(LoadBalancerIngress) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LoadBalancerStatus) DeepCopyInto(out *LoadBalancerStatus) { - *out = *in - if in.Ingress != nil { - in, out := &in.Ingress, &out.Ingress - *out = make([]LoadBalancerIngress, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancerStatus. -func (in *LoadBalancerStatus) DeepCopy() *LoadBalancerStatus { - if in == nil { - return nil - } - out := new(LoadBalancerStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LocalObjectReference) DeepCopyInto(out *LocalObjectReference) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalObjectReference. -func (in *LocalObjectReference) DeepCopy() *LocalObjectReference { - if in == nil { - return nil - } - out := new(LocalObjectReference) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LocalVolumeSource) DeepCopyInto(out *LocalVolumeSource) { - *out = *in - if in.FSType != nil { - in, out := &in.FSType, &out.FSType - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalVolumeSource. -func (in *LocalVolumeSource) DeepCopy() *LocalVolumeSource { - if in == nil { - return nil - } - out := new(LocalVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NFSVolumeSource) DeepCopyInto(out *NFSVolumeSource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NFSVolumeSource. -func (in *NFSVolumeSource) DeepCopy() *NFSVolumeSource { - if in == nil { - return nil - } - out := new(NFSVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Namespace) DeepCopyInto(out *Namespace) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - out.Status = in.Status - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Namespace. -func (in *Namespace) DeepCopy() *Namespace { - if in == nil { - return nil - } - out := new(Namespace) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Namespace) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NamespaceList) DeepCopyInto(out *NamespaceList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Namespace, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespaceList. -func (in *NamespaceList) DeepCopy() *NamespaceList { - if in == nil { - return nil - } - out := new(NamespaceList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NamespaceList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NamespaceSpec) DeepCopyInto(out *NamespaceSpec) { - *out = *in - if in.Finalizers != nil { - in, out := &in.Finalizers, &out.Finalizers - *out = make([]FinalizerName, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespaceSpec. -func (in *NamespaceSpec) DeepCopy() *NamespaceSpec { - if in == nil { - return nil - } - out := new(NamespaceSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NamespaceStatus) DeepCopyInto(out *NamespaceStatus) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespaceStatus. -func (in *NamespaceStatus) DeepCopy() *NamespaceStatus { - if in == nil { - return nil - } - out := new(NamespaceStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Node) DeepCopyInto(out *Node) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Node. -func (in *Node) DeepCopy() *Node { - if in == nil { - return nil - } - out := new(Node) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Node) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeAddress) DeepCopyInto(out *NodeAddress) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeAddress. -func (in *NodeAddress) DeepCopy() *NodeAddress { - if in == nil { - return nil - } - out := new(NodeAddress) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeAffinity) DeepCopyInto(out *NodeAffinity) { - *out = *in - if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { - in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution - *out = new(NodeSelector) - (*in).DeepCopyInto(*out) - } - if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { - in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution - *out = make([]PreferredSchedulingTerm, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeAffinity. -func (in *NodeAffinity) DeepCopy() *NodeAffinity { - if in == nil { - return nil - } - out := new(NodeAffinity) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeCondition) DeepCopyInto(out *NodeCondition) { - *out = *in - in.LastHeartbeatTime.DeepCopyInto(&out.LastHeartbeatTime) - in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeCondition. -func (in *NodeCondition) DeepCopy() *NodeCondition { - if in == nil { - return nil - } - out := new(NodeCondition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeConfigSource) DeepCopyInto(out *NodeConfigSource) { - *out = *in - if in.ConfigMap != nil { - in, out := &in.ConfigMap, &out.ConfigMap - *out = new(ConfigMapNodeConfigSource) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeConfigSource. -func (in *NodeConfigSource) DeepCopy() *NodeConfigSource { - if in == nil { - return nil - } - out := new(NodeConfigSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeConfigStatus) DeepCopyInto(out *NodeConfigStatus) { - *out = *in - if in.Assigned != nil { - in, out := &in.Assigned, &out.Assigned - *out = new(NodeConfigSource) - (*in).DeepCopyInto(*out) - } - if in.Active != nil { - in, out := &in.Active, &out.Active - *out = new(NodeConfigSource) - (*in).DeepCopyInto(*out) - } - if in.LastKnownGood != nil { - in, out := &in.LastKnownGood, &out.LastKnownGood - *out = new(NodeConfigSource) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeConfigStatus. -func (in *NodeConfigStatus) DeepCopy() *NodeConfigStatus { - if in == nil { - return nil - } - out := new(NodeConfigStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeDaemonEndpoints) DeepCopyInto(out *NodeDaemonEndpoints) { - *out = *in - out.KubeletEndpoint = in.KubeletEndpoint - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeDaemonEndpoints. -func (in *NodeDaemonEndpoints) DeepCopy() *NodeDaemonEndpoints { - if in == nil { - return nil - } - out := new(NodeDaemonEndpoints) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeList) DeepCopyInto(out *NodeList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Node, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeList. -func (in *NodeList) DeepCopy() *NodeList { - if in == nil { - return nil - } - out := new(NodeList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NodeList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeProxyOptions) DeepCopyInto(out *NodeProxyOptions) { - *out = *in - out.TypeMeta = in.TypeMeta - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeProxyOptions. -func (in *NodeProxyOptions) DeepCopy() *NodeProxyOptions { - if in == nil { - return nil - } - out := new(NodeProxyOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NodeProxyOptions) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeResources) DeepCopyInto(out *NodeResources) { - *out = *in - if in.Capacity != nil { - in, out := &in.Capacity, &out.Capacity - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeResources. -func (in *NodeResources) DeepCopy() *NodeResources { - if in == nil { - return nil - } - out := new(NodeResources) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeSelector) DeepCopyInto(out *NodeSelector) { - *out = *in - if in.NodeSelectorTerms != nil { - in, out := &in.NodeSelectorTerms, &out.NodeSelectorTerms - *out = make([]NodeSelectorTerm, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSelector. -func (in *NodeSelector) DeepCopy() *NodeSelector { - if in == nil { - return nil - } - out := new(NodeSelector) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeSelectorRequirement) DeepCopyInto(out *NodeSelectorRequirement) { - *out = *in - if in.Values != nil { - in, out := &in.Values, &out.Values - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSelectorRequirement. -func (in *NodeSelectorRequirement) DeepCopy() *NodeSelectorRequirement { - if in == nil { - return nil - } - out := new(NodeSelectorRequirement) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeSelectorTerm) DeepCopyInto(out *NodeSelectorTerm) { - *out = *in - if in.MatchExpressions != nil { - in, out := &in.MatchExpressions, &out.MatchExpressions - *out = make([]NodeSelectorRequirement, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.MatchFields != nil { - in, out := &in.MatchFields, &out.MatchFields - *out = make([]NodeSelectorRequirement, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSelectorTerm. -func (in *NodeSelectorTerm) DeepCopy() *NodeSelectorTerm { - if in == nil { - return nil - } - out := new(NodeSelectorTerm) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeSpec) DeepCopyInto(out *NodeSpec) { - *out = *in - if in.Taints != nil { - in, out := &in.Taints, &out.Taints - *out = make([]Taint, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.ConfigSource != nil { - in, out := &in.ConfigSource, &out.ConfigSource - *out = new(NodeConfigSource) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSpec. -func (in *NodeSpec) DeepCopy() *NodeSpec { - if in == nil { - return nil - } - out := new(NodeSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeStatus) DeepCopyInto(out *NodeStatus) { - *out = *in - if in.Capacity != nil { - in, out := &in.Capacity, &out.Capacity - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - if in.Allocatable != nil { - in, out := &in.Allocatable, &out.Allocatable - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]NodeCondition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Addresses != nil { - in, out := &in.Addresses, &out.Addresses - *out = make([]NodeAddress, len(*in)) - copy(*out, *in) - } - out.DaemonEndpoints = in.DaemonEndpoints - out.NodeInfo = in.NodeInfo - if in.Images != nil { - in, out := &in.Images, &out.Images - *out = make([]ContainerImage, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.VolumesInUse != nil { - in, out := &in.VolumesInUse, &out.VolumesInUse - *out = make([]UniqueVolumeName, len(*in)) - copy(*out, *in) - } - if in.VolumesAttached != nil { - in, out := &in.VolumesAttached, &out.VolumesAttached - *out = make([]AttachedVolume, len(*in)) - copy(*out, *in) - } - if in.Config != nil { - in, out := &in.Config, &out.Config - *out = new(NodeConfigStatus) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeStatus. -func (in *NodeStatus) DeepCopy() *NodeStatus { - if in == nil { - return nil - } - out := new(NodeStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeSystemInfo) DeepCopyInto(out *NodeSystemInfo) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSystemInfo. -func (in *NodeSystemInfo) DeepCopy() *NodeSystemInfo { - if in == nil { - return nil - } - out := new(NodeSystemInfo) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ObjectFieldSelector) DeepCopyInto(out *ObjectFieldSelector) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectFieldSelector. -func (in *ObjectFieldSelector) DeepCopy() *ObjectFieldSelector { - if in == nil { - return nil - } - out := new(ObjectFieldSelector) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ObjectReference) DeepCopyInto(out *ObjectReference) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectReference. -func (in *ObjectReference) DeepCopy() *ObjectReference { - if in == nil { - return nil - } - out := new(ObjectReference) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ObjectReference) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PersistentVolume) DeepCopyInto(out *PersistentVolume) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - out.Status = in.Status - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolume. -func (in *PersistentVolume) DeepCopy() *PersistentVolume { - if in == nil { - return nil - } - out := new(PersistentVolume) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PersistentVolume) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PersistentVolumeClaim) DeepCopyInto(out *PersistentVolumeClaim) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaim. -func (in *PersistentVolumeClaim) DeepCopy() *PersistentVolumeClaim { - if in == nil { - return nil - } - out := new(PersistentVolumeClaim) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PersistentVolumeClaim) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PersistentVolumeClaimCondition) DeepCopyInto(out *PersistentVolumeClaimCondition) { - *out = *in - in.LastProbeTime.DeepCopyInto(&out.LastProbeTime) - in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimCondition. -func (in *PersistentVolumeClaimCondition) DeepCopy() *PersistentVolumeClaimCondition { - if in == nil { - return nil - } - out := new(PersistentVolumeClaimCondition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PersistentVolumeClaimList) DeepCopyInto(out *PersistentVolumeClaimList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]PersistentVolumeClaim, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimList. -func (in *PersistentVolumeClaimList) DeepCopy() *PersistentVolumeClaimList { - if in == nil { - return nil - } - out := new(PersistentVolumeClaimList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PersistentVolumeClaimList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PersistentVolumeClaimSpec) DeepCopyInto(out *PersistentVolumeClaimSpec) { - *out = *in - if in.AccessModes != nil { - in, out := &in.AccessModes, &out.AccessModes - *out = make([]PersistentVolumeAccessMode, len(*in)) - copy(*out, *in) - } - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - *out = new(v1.LabelSelector) - (*in).DeepCopyInto(*out) - } - in.Resources.DeepCopyInto(&out.Resources) - if in.StorageClassName != nil { - in, out := &in.StorageClassName, &out.StorageClassName - *out = new(string) - **out = **in - } - if in.VolumeMode != nil { - in, out := &in.VolumeMode, &out.VolumeMode - *out = new(PersistentVolumeMode) - **out = **in - } - if in.DataSource != nil { - in, out := &in.DataSource, &out.DataSource - *out = new(TypedLocalObjectReference) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimSpec. -func (in *PersistentVolumeClaimSpec) DeepCopy() *PersistentVolumeClaimSpec { - if in == nil { - return nil - } - out := new(PersistentVolumeClaimSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PersistentVolumeClaimStatus) DeepCopyInto(out *PersistentVolumeClaimStatus) { - *out = *in - if in.AccessModes != nil { - in, out := &in.AccessModes, &out.AccessModes - *out = make([]PersistentVolumeAccessMode, len(*in)) - copy(*out, *in) - } - if in.Capacity != nil { - in, out := &in.Capacity, &out.Capacity - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]PersistentVolumeClaimCondition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimStatus. -func (in *PersistentVolumeClaimStatus) DeepCopy() *PersistentVolumeClaimStatus { - if in == nil { - return nil - } - out := new(PersistentVolumeClaimStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PersistentVolumeClaimVolumeSource) DeepCopyInto(out *PersistentVolumeClaimVolumeSource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimVolumeSource. -func (in *PersistentVolumeClaimVolumeSource) DeepCopy() *PersistentVolumeClaimVolumeSource { - if in == nil { - return nil - } - out := new(PersistentVolumeClaimVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PersistentVolumeList) DeepCopyInto(out *PersistentVolumeList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]PersistentVolume, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeList. -func (in *PersistentVolumeList) DeepCopy() *PersistentVolumeList { - if in == nil { - return nil - } - out := new(PersistentVolumeList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PersistentVolumeList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PersistentVolumeSource) DeepCopyInto(out *PersistentVolumeSource) { - *out = *in - if in.GCEPersistentDisk != nil { - in, out := &in.GCEPersistentDisk, &out.GCEPersistentDisk - *out = new(GCEPersistentDiskVolumeSource) - **out = **in - } - if in.AWSElasticBlockStore != nil { - in, out := &in.AWSElasticBlockStore, &out.AWSElasticBlockStore - *out = new(AWSElasticBlockStoreVolumeSource) - **out = **in - } - if in.HostPath != nil { - in, out := &in.HostPath, &out.HostPath - *out = new(HostPathVolumeSource) - (*in).DeepCopyInto(*out) - } - if in.Glusterfs != nil { - in, out := &in.Glusterfs, &out.Glusterfs - *out = new(GlusterfsVolumeSource) - **out = **in - } - if in.NFS != nil { - in, out := &in.NFS, &out.NFS - *out = new(NFSVolumeSource) - **out = **in - } - if in.RBD != nil { - in, out := &in.RBD, &out.RBD - *out = new(RBDPersistentVolumeSource) - (*in).DeepCopyInto(*out) - } - if in.Quobyte != nil { - in, out := &in.Quobyte, &out.Quobyte - *out = new(QuobyteVolumeSource) - **out = **in - } - if in.ISCSI != nil { - in, out := &in.ISCSI, &out.ISCSI - *out = new(ISCSIPersistentVolumeSource) - (*in).DeepCopyInto(*out) - } - if in.FlexVolume != nil { - in, out := &in.FlexVolume, &out.FlexVolume - *out = new(FlexPersistentVolumeSource) - (*in).DeepCopyInto(*out) - } - if in.Cinder != nil { - in, out := &in.Cinder, &out.Cinder - *out = new(CinderPersistentVolumeSource) - (*in).DeepCopyInto(*out) - } - if in.CephFS != nil { - in, out := &in.CephFS, &out.CephFS - *out = new(CephFSPersistentVolumeSource) - (*in).DeepCopyInto(*out) - } - if in.FC != nil { - in, out := &in.FC, &out.FC - *out = new(FCVolumeSource) - (*in).DeepCopyInto(*out) - } - if in.Flocker != nil { - in, out := &in.Flocker, &out.Flocker - *out = new(FlockerVolumeSource) - **out = **in - } - if in.AzureFile != nil { - in, out := &in.AzureFile, &out.AzureFile - *out = new(AzureFilePersistentVolumeSource) - (*in).DeepCopyInto(*out) - } - if in.VsphereVolume != nil { - in, out := &in.VsphereVolume, &out.VsphereVolume - *out = new(VsphereVirtualDiskVolumeSource) - **out = **in - } - if in.AzureDisk != nil { - in, out := &in.AzureDisk, &out.AzureDisk - *out = new(AzureDiskVolumeSource) - (*in).DeepCopyInto(*out) - } - if in.PhotonPersistentDisk != nil { - in, out := &in.PhotonPersistentDisk, &out.PhotonPersistentDisk - *out = new(PhotonPersistentDiskVolumeSource) - **out = **in - } - if in.PortworxVolume != nil { - in, out := &in.PortworxVolume, &out.PortworxVolume - *out = new(PortworxVolumeSource) - **out = **in - } - if in.ScaleIO != nil { - in, out := &in.ScaleIO, &out.ScaleIO - *out = new(ScaleIOPersistentVolumeSource) - (*in).DeepCopyInto(*out) - } - if in.Local != nil { - in, out := &in.Local, &out.Local - *out = new(LocalVolumeSource) - (*in).DeepCopyInto(*out) - } - if in.StorageOS != nil { - in, out := &in.StorageOS, &out.StorageOS - *out = new(StorageOSPersistentVolumeSource) - (*in).DeepCopyInto(*out) - } - if in.CSI != nil { - in, out := &in.CSI, &out.CSI - *out = new(CSIPersistentVolumeSource) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeSource. -func (in *PersistentVolumeSource) DeepCopy() *PersistentVolumeSource { - if in == nil { - return nil - } - out := new(PersistentVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PersistentVolumeSpec) DeepCopyInto(out *PersistentVolumeSpec) { - *out = *in - if in.Capacity != nil { - in, out := &in.Capacity, &out.Capacity - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - in.PersistentVolumeSource.DeepCopyInto(&out.PersistentVolumeSource) - if in.AccessModes != nil { - in, out := &in.AccessModes, &out.AccessModes - *out = make([]PersistentVolumeAccessMode, len(*in)) - copy(*out, *in) - } - if in.ClaimRef != nil { - in, out := &in.ClaimRef, &out.ClaimRef - *out = new(ObjectReference) - **out = **in - } - if in.MountOptions != nil { - in, out := &in.MountOptions, &out.MountOptions - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.VolumeMode != nil { - in, out := &in.VolumeMode, &out.VolumeMode - *out = new(PersistentVolumeMode) - **out = **in - } - if in.NodeAffinity != nil { - in, out := &in.NodeAffinity, &out.NodeAffinity - *out = new(VolumeNodeAffinity) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeSpec. -func (in *PersistentVolumeSpec) DeepCopy() *PersistentVolumeSpec { - if in == nil { - return nil - } - out := new(PersistentVolumeSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PersistentVolumeStatus) DeepCopyInto(out *PersistentVolumeStatus) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeStatus. -func (in *PersistentVolumeStatus) DeepCopy() *PersistentVolumeStatus { - if in == nil { - return nil - } - out := new(PersistentVolumeStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PhotonPersistentDiskVolumeSource) DeepCopyInto(out *PhotonPersistentDiskVolumeSource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PhotonPersistentDiskVolumeSource. -func (in *PhotonPersistentDiskVolumeSource) DeepCopy() *PhotonPersistentDiskVolumeSource { - if in == nil { - return nil - } - out := new(PhotonPersistentDiskVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Pod) DeepCopyInto(out *Pod) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Pod. -func (in *Pod) DeepCopy() *Pod { - if in == nil { - return nil - } - out := new(Pod) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Pod) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodAffinity) DeepCopyInto(out *PodAffinity) { - *out = *in - if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { - in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution - *out = make([]PodAffinityTerm, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { - in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution - *out = make([]WeightedPodAffinityTerm, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodAffinity. -func (in *PodAffinity) DeepCopy() *PodAffinity { - if in == nil { - return nil - } - out := new(PodAffinity) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodAffinityTerm) DeepCopyInto(out *PodAffinityTerm) { - *out = *in - if in.LabelSelector != nil { - in, out := &in.LabelSelector, &out.LabelSelector - *out = new(v1.LabelSelector) - (*in).DeepCopyInto(*out) - } - if in.Namespaces != nil { - in, out := &in.Namespaces, &out.Namespaces - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodAffinityTerm. -func (in *PodAffinityTerm) DeepCopy() *PodAffinityTerm { - if in == nil { - return nil - } - out := new(PodAffinityTerm) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodAntiAffinity) DeepCopyInto(out *PodAntiAffinity) { - *out = *in - if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { - in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution - *out = make([]PodAffinityTerm, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { - in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution - *out = make([]WeightedPodAffinityTerm, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodAntiAffinity. -func (in *PodAntiAffinity) DeepCopy() *PodAntiAffinity { - if in == nil { - return nil - } - out := new(PodAntiAffinity) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodAttachOptions) DeepCopyInto(out *PodAttachOptions) { - *out = *in - out.TypeMeta = in.TypeMeta - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodAttachOptions. -func (in *PodAttachOptions) DeepCopy() *PodAttachOptions { - if in == nil { - return nil - } - out := new(PodAttachOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodAttachOptions) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodCondition) DeepCopyInto(out *PodCondition) { - *out = *in - in.LastProbeTime.DeepCopyInto(&out.LastProbeTime) - in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodCondition. -func (in *PodCondition) DeepCopy() *PodCondition { - if in == nil { - return nil - } - out := new(PodCondition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodDNSConfig) DeepCopyInto(out *PodDNSConfig) { - *out = *in - if in.Nameservers != nil { - in, out := &in.Nameservers, &out.Nameservers - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Searches != nil { - in, out := &in.Searches, &out.Searches - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Options != nil { - in, out := &in.Options, &out.Options - *out = make([]PodDNSConfigOption, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodDNSConfig. -func (in *PodDNSConfig) DeepCopy() *PodDNSConfig { - if in == nil { - return nil - } - out := new(PodDNSConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodDNSConfigOption) DeepCopyInto(out *PodDNSConfigOption) { - *out = *in - if in.Value != nil { - in, out := &in.Value, &out.Value - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodDNSConfigOption. -func (in *PodDNSConfigOption) DeepCopy() *PodDNSConfigOption { - if in == nil { - return nil - } - out := new(PodDNSConfigOption) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodExecOptions) DeepCopyInto(out *PodExecOptions) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.Command != nil { - in, out := &in.Command, &out.Command - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodExecOptions. -func (in *PodExecOptions) DeepCopy() *PodExecOptions { - if in == nil { - return nil - } - out := new(PodExecOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodExecOptions) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodList) DeepCopyInto(out *PodList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Pod, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodList. -func (in *PodList) DeepCopy() *PodList { - if in == nil { - return nil - } - out := new(PodList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodLogOptions) DeepCopyInto(out *PodLogOptions) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.SinceSeconds != nil { - in, out := &in.SinceSeconds, &out.SinceSeconds - *out = new(int64) - **out = **in - } - if in.SinceTime != nil { - in, out := &in.SinceTime, &out.SinceTime - *out = (*in).DeepCopy() - } - if in.TailLines != nil { - in, out := &in.TailLines, &out.TailLines - *out = new(int64) - **out = **in - } - if in.LimitBytes != nil { - in, out := &in.LimitBytes, &out.LimitBytes - *out = new(int64) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodLogOptions. -func (in *PodLogOptions) DeepCopy() *PodLogOptions { - if in == nil { - return nil - } - out := new(PodLogOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodLogOptions) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodPortForwardOptions) DeepCopyInto(out *PodPortForwardOptions) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.Ports != nil { - in, out := &in.Ports, &out.Ports - *out = make([]int32, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodPortForwardOptions. -func (in *PodPortForwardOptions) DeepCopy() *PodPortForwardOptions { - if in == nil { - return nil - } - out := new(PodPortForwardOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodPortForwardOptions) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodProxyOptions) DeepCopyInto(out *PodProxyOptions) { - *out = *in - out.TypeMeta = in.TypeMeta - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodProxyOptions. -func (in *PodProxyOptions) DeepCopy() *PodProxyOptions { - if in == nil { - return nil - } - out := new(PodProxyOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodProxyOptions) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodReadinessGate) DeepCopyInto(out *PodReadinessGate) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodReadinessGate. -func (in *PodReadinessGate) DeepCopy() *PodReadinessGate { - if in == nil { - return nil - } - out := new(PodReadinessGate) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodSecurityContext) DeepCopyInto(out *PodSecurityContext) { - *out = *in - if in.ShareProcessNamespace != nil { - in, out := &in.ShareProcessNamespace, &out.ShareProcessNamespace - *out = new(bool) - **out = **in - } - if in.SELinuxOptions != nil { - in, out := &in.SELinuxOptions, &out.SELinuxOptions - *out = new(SELinuxOptions) - **out = **in - } - if in.RunAsUser != nil { - in, out := &in.RunAsUser, &out.RunAsUser - *out = new(int64) - **out = **in - } - if in.RunAsGroup != nil { - in, out := &in.RunAsGroup, &out.RunAsGroup - *out = new(int64) - **out = **in - } - if in.RunAsNonRoot != nil { - in, out := &in.RunAsNonRoot, &out.RunAsNonRoot - *out = new(bool) - **out = **in - } - if in.SupplementalGroups != nil { - in, out := &in.SupplementalGroups, &out.SupplementalGroups - *out = make([]int64, len(*in)) - copy(*out, *in) - } - if in.FSGroup != nil { - in, out := &in.FSGroup, &out.FSGroup - *out = new(int64) - **out = **in - } - if in.Sysctls != nil { - in, out := &in.Sysctls, &out.Sysctls - *out = make([]Sysctl, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityContext. -func (in *PodSecurityContext) DeepCopy() *PodSecurityContext { - if in == nil { - return nil - } - out := new(PodSecurityContext) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodSignature) DeepCopyInto(out *PodSignature) { - *out = *in - if in.PodController != nil { - in, out := &in.PodController, &out.PodController - *out = new(v1.OwnerReference) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSignature. -func (in *PodSignature) DeepCopy() *PodSignature { - if in == nil { - return nil - } - out := new(PodSignature) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodSpec) DeepCopyInto(out *PodSpec) { - *out = *in - if in.Volumes != nil { - in, out := &in.Volumes, &out.Volumes - *out = make([]Volume, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.InitContainers != nil { - in, out := &in.InitContainers, &out.InitContainers - *out = make([]Container, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Containers != nil { - in, out := &in.Containers, &out.Containers - *out = make([]Container, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.TerminationGracePeriodSeconds != nil { - in, out := &in.TerminationGracePeriodSeconds, &out.TerminationGracePeriodSeconds - *out = new(int64) - **out = **in - } - if in.ActiveDeadlineSeconds != nil { - in, out := &in.ActiveDeadlineSeconds, &out.ActiveDeadlineSeconds - *out = new(int64) - **out = **in - } - if in.NodeSelector != nil { - in, out := &in.NodeSelector, &out.NodeSelector - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.AutomountServiceAccountToken != nil { - in, out := &in.AutomountServiceAccountToken, &out.AutomountServiceAccountToken - *out = new(bool) - **out = **in - } - if in.SecurityContext != nil { - in, out := &in.SecurityContext, &out.SecurityContext - *out = new(PodSecurityContext) - (*in).DeepCopyInto(*out) - } - if in.ImagePullSecrets != nil { - in, out := &in.ImagePullSecrets, &out.ImagePullSecrets - *out = make([]LocalObjectReference, len(*in)) - copy(*out, *in) - } - if in.Affinity != nil { - in, out := &in.Affinity, &out.Affinity - *out = new(Affinity) - (*in).DeepCopyInto(*out) - } - if in.Tolerations != nil { - in, out := &in.Tolerations, &out.Tolerations - *out = make([]Toleration, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.HostAliases != nil { - in, out := &in.HostAliases, &out.HostAliases - *out = make([]HostAlias, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Priority != nil { - in, out := &in.Priority, &out.Priority - *out = new(int32) - **out = **in - } - if in.DNSConfig != nil { - in, out := &in.DNSConfig, &out.DNSConfig - *out = new(PodDNSConfig) - (*in).DeepCopyInto(*out) - } - if in.ReadinessGates != nil { - in, out := &in.ReadinessGates, &out.ReadinessGates - *out = make([]PodReadinessGate, len(*in)) - copy(*out, *in) - } - if in.RuntimeClassName != nil { - in, out := &in.RuntimeClassName, &out.RuntimeClassName - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSpec. -func (in *PodSpec) DeepCopy() *PodSpec { - if in == nil { - return nil - } - out := new(PodSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodStatus) DeepCopyInto(out *PodStatus) { - *out = *in - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]PodCondition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.StartTime != nil { - in, out := &in.StartTime, &out.StartTime - *out = (*in).DeepCopy() - } - if in.InitContainerStatuses != nil { - in, out := &in.InitContainerStatuses, &out.InitContainerStatuses - *out = make([]ContainerStatus, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.ContainerStatuses != nil { - in, out := &in.ContainerStatuses, &out.ContainerStatuses - *out = make([]ContainerStatus, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodStatus. -func (in *PodStatus) DeepCopy() *PodStatus { - if in == nil { - return nil - } - out := new(PodStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodStatusResult) DeepCopyInto(out *PodStatusResult) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodStatusResult. -func (in *PodStatusResult) DeepCopy() *PodStatusResult { - if in == nil { - return nil - } - out := new(PodStatusResult) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodStatusResult) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodTemplate) DeepCopyInto(out *PodTemplate) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Template.DeepCopyInto(&out.Template) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodTemplate. -func (in *PodTemplate) DeepCopy() *PodTemplate { - if in == nil { - return nil - } - out := new(PodTemplate) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodTemplate) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodTemplateList) DeepCopyInto(out *PodTemplateList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]PodTemplate, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodTemplateList. -func (in *PodTemplateList) DeepCopy() *PodTemplateList { - if in == nil { - return nil - } - out := new(PodTemplateList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodTemplateList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodTemplateSpec) DeepCopyInto(out *PodTemplateSpec) { - *out = *in - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodTemplateSpec. -func (in *PodTemplateSpec) DeepCopy() *PodTemplateSpec { - if in == nil { - return nil - } - out := new(PodTemplateSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PortworxVolumeSource) DeepCopyInto(out *PortworxVolumeSource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortworxVolumeSource. -func (in *PortworxVolumeSource) DeepCopy() *PortworxVolumeSource { - if in == nil { - return nil - } - out := new(PortworxVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Preconditions) DeepCopyInto(out *Preconditions) { - *out = *in - if in.UID != nil { - in, out := &in.UID, &out.UID - *out = new(types.UID) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Preconditions. -func (in *Preconditions) DeepCopy() *Preconditions { - if in == nil { - return nil - } - out := new(Preconditions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PreferAvoidPodsEntry) DeepCopyInto(out *PreferAvoidPodsEntry) { - *out = *in - in.PodSignature.DeepCopyInto(&out.PodSignature) - in.EvictionTime.DeepCopyInto(&out.EvictionTime) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PreferAvoidPodsEntry. -func (in *PreferAvoidPodsEntry) DeepCopy() *PreferAvoidPodsEntry { - if in == nil { - return nil - } - out := new(PreferAvoidPodsEntry) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PreferredSchedulingTerm) DeepCopyInto(out *PreferredSchedulingTerm) { - *out = *in - in.Preference.DeepCopyInto(&out.Preference) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PreferredSchedulingTerm. -func (in *PreferredSchedulingTerm) DeepCopy() *PreferredSchedulingTerm { - if in == nil { - return nil - } - out := new(PreferredSchedulingTerm) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Probe) DeepCopyInto(out *Probe) { - *out = *in - in.Handler.DeepCopyInto(&out.Handler) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Probe. -func (in *Probe) DeepCopy() *Probe { - if in == nil { - return nil - } - out := new(Probe) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ProjectedVolumeSource) DeepCopyInto(out *ProjectedVolumeSource) { - *out = *in - if in.Sources != nil { - in, out := &in.Sources, &out.Sources - *out = make([]VolumeProjection, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.DefaultMode != nil { - in, out := &in.DefaultMode, &out.DefaultMode - *out = new(int32) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectedVolumeSource. -func (in *ProjectedVolumeSource) DeepCopy() *ProjectedVolumeSource { - if in == nil { - return nil - } - out := new(ProjectedVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *QuobyteVolumeSource) DeepCopyInto(out *QuobyteVolumeSource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QuobyteVolumeSource. -func (in *QuobyteVolumeSource) DeepCopy() *QuobyteVolumeSource { - if in == nil { - return nil - } - out := new(QuobyteVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RBDPersistentVolumeSource) DeepCopyInto(out *RBDPersistentVolumeSource) { - *out = *in - if in.CephMonitors != nil { - in, out := &in.CephMonitors, &out.CephMonitors - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - *out = new(SecretReference) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RBDPersistentVolumeSource. -func (in *RBDPersistentVolumeSource) DeepCopy() *RBDPersistentVolumeSource { - if in == nil { - return nil - } - out := new(RBDPersistentVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RBDVolumeSource) DeepCopyInto(out *RBDVolumeSource) { - *out = *in - if in.CephMonitors != nil { - in, out := &in.CephMonitors, &out.CephMonitors - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - *out = new(LocalObjectReference) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RBDVolumeSource. -func (in *RBDVolumeSource) DeepCopy() *RBDVolumeSource { - if in == nil { - return nil - } - out := new(RBDVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RangeAllocation) DeepCopyInto(out *RangeAllocation) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.Data != nil { - in, out := &in.Data, &out.Data - *out = make([]byte, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RangeAllocation. -func (in *RangeAllocation) DeepCopy() *RangeAllocation { - if in == nil { - return nil - } - out := new(RangeAllocation) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *RangeAllocation) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ReplicationController) DeepCopyInto(out *ReplicationController) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationController. -func (in *ReplicationController) DeepCopy() *ReplicationController { - if in == nil { - return nil - } - out := new(ReplicationController) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ReplicationController) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ReplicationControllerCondition) DeepCopyInto(out *ReplicationControllerCondition) { - *out = *in - in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationControllerCondition. -func (in *ReplicationControllerCondition) DeepCopy() *ReplicationControllerCondition { - if in == nil { - return nil - } - out := new(ReplicationControllerCondition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ReplicationControllerList) DeepCopyInto(out *ReplicationControllerList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ReplicationController, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationControllerList. -func (in *ReplicationControllerList) DeepCopy() *ReplicationControllerList { - if in == nil { - return nil - } - out := new(ReplicationControllerList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ReplicationControllerList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ReplicationControllerSpec) DeepCopyInto(out *ReplicationControllerSpec) { - *out = *in - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Template != nil { - in, out := &in.Template, &out.Template - *out = new(PodTemplateSpec) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationControllerSpec. -func (in *ReplicationControllerSpec) DeepCopy() *ReplicationControllerSpec { - if in == nil { - return nil - } - out := new(ReplicationControllerSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ReplicationControllerStatus) DeepCopyInto(out *ReplicationControllerStatus) { - *out = *in - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]ReplicationControllerCondition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationControllerStatus. -func (in *ReplicationControllerStatus) DeepCopy() *ReplicationControllerStatus { - if in == nil { - return nil - } - out := new(ReplicationControllerStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceFieldSelector) DeepCopyInto(out *ResourceFieldSelector) { - *out = *in - out.Divisor = in.Divisor.DeepCopy() - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceFieldSelector. -func (in *ResourceFieldSelector) DeepCopy() *ResourceFieldSelector { - if in == nil { - return nil - } - out := new(ResourceFieldSelector) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in ResourceList) DeepCopyInto(out *ResourceList) { - { - in := &in - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - return - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceList. -func (in ResourceList) DeepCopy() ResourceList { - if in == nil { - return nil - } - out := new(ResourceList) - in.DeepCopyInto(out) - return *out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceQuota) DeepCopyInto(out *ResourceQuota) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceQuota. -func (in *ResourceQuota) DeepCopy() *ResourceQuota { - if in == nil { - return nil - } - out := new(ResourceQuota) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ResourceQuota) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceQuotaList) DeepCopyInto(out *ResourceQuotaList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ResourceQuota, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceQuotaList. -func (in *ResourceQuotaList) DeepCopy() *ResourceQuotaList { - if in == nil { - return nil - } - out := new(ResourceQuotaList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ResourceQuotaList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceQuotaSpec) DeepCopyInto(out *ResourceQuotaSpec) { - *out = *in - if in.Hard != nil { - in, out := &in.Hard, &out.Hard - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - if in.Scopes != nil { - in, out := &in.Scopes, &out.Scopes - *out = make([]ResourceQuotaScope, len(*in)) - copy(*out, *in) - } - if in.ScopeSelector != nil { - in, out := &in.ScopeSelector, &out.ScopeSelector - *out = new(ScopeSelector) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceQuotaSpec. -func (in *ResourceQuotaSpec) DeepCopy() *ResourceQuotaSpec { - if in == nil { - return nil - } - out := new(ResourceQuotaSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceQuotaStatus) DeepCopyInto(out *ResourceQuotaStatus) { - *out = *in - if in.Hard != nil { - in, out := &in.Hard, &out.Hard - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - if in.Used != nil { - in, out := &in.Used, &out.Used - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceQuotaStatus. -func (in *ResourceQuotaStatus) DeepCopy() *ResourceQuotaStatus { - if in == nil { - return nil - } - out := new(ResourceQuotaStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceRequirements) DeepCopyInto(out *ResourceRequirements) { - *out = *in - if in.Limits != nil { - in, out := &in.Limits, &out.Limits - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - if in.Requests != nil { - in, out := &in.Requests, &out.Requests - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceRequirements. -func (in *ResourceRequirements) DeepCopy() *ResourceRequirements { - if in == nil { - return nil - } - out := new(ResourceRequirements) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SELinuxOptions) DeepCopyInto(out *SELinuxOptions) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SELinuxOptions. -func (in *SELinuxOptions) DeepCopy() *SELinuxOptions { - if in == nil { - return nil - } - out := new(SELinuxOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ScaleIOPersistentVolumeSource) DeepCopyInto(out *ScaleIOPersistentVolumeSource) { - *out = *in - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - *out = new(SecretReference) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleIOPersistentVolumeSource. -func (in *ScaleIOPersistentVolumeSource) DeepCopy() *ScaleIOPersistentVolumeSource { - if in == nil { - return nil - } - out := new(ScaleIOPersistentVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ScaleIOVolumeSource) DeepCopyInto(out *ScaleIOVolumeSource) { - *out = *in - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - *out = new(LocalObjectReference) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleIOVolumeSource. -func (in *ScaleIOVolumeSource) DeepCopy() *ScaleIOVolumeSource { - if in == nil { - return nil - } - out := new(ScaleIOVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ScopeSelector) DeepCopyInto(out *ScopeSelector) { - *out = *in - if in.MatchExpressions != nil { - in, out := &in.MatchExpressions, &out.MatchExpressions - *out = make([]ScopedResourceSelectorRequirement, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScopeSelector. -func (in *ScopeSelector) DeepCopy() *ScopeSelector { - if in == nil { - return nil - } - out := new(ScopeSelector) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ScopedResourceSelectorRequirement) DeepCopyInto(out *ScopedResourceSelectorRequirement) { - *out = *in - if in.Values != nil { - in, out := &in.Values, &out.Values - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScopedResourceSelectorRequirement. -func (in *ScopedResourceSelectorRequirement) DeepCopy() *ScopedResourceSelectorRequirement { - if in == nil { - return nil - } - out := new(ScopedResourceSelectorRequirement) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Secret) DeepCopyInto(out *Secret) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.Data != nil { - in, out := &in.Data, &out.Data - *out = make(map[string][]byte, len(*in)) - for key, val := range *in { - var outVal []byte - if val == nil { - (*out)[key] = nil - } else { - in, out := &val, &outVal - *out = make([]byte, len(*in)) - copy(*out, *in) - } - (*out)[key] = outVal - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Secret. -func (in *Secret) DeepCopy() *Secret { - if in == nil { - return nil - } - out := new(Secret) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Secret) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SecretEnvSource) DeepCopyInto(out *SecretEnvSource) { - *out = *in - out.LocalObjectReference = in.LocalObjectReference - if in.Optional != nil { - in, out := &in.Optional, &out.Optional - *out = new(bool) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretEnvSource. -func (in *SecretEnvSource) DeepCopy() *SecretEnvSource { - if in == nil { - return nil - } - out := new(SecretEnvSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SecretKeySelector) DeepCopyInto(out *SecretKeySelector) { - *out = *in - out.LocalObjectReference = in.LocalObjectReference - if in.Optional != nil { - in, out := &in.Optional, &out.Optional - *out = new(bool) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretKeySelector. -func (in *SecretKeySelector) DeepCopy() *SecretKeySelector { - if in == nil { - return nil - } - out := new(SecretKeySelector) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SecretList) DeepCopyInto(out *SecretList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Secret, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretList. -func (in *SecretList) DeepCopy() *SecretList { - if in == nil { - return nil - } - out := new(SecretList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *SecretList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SecretProjection) DeepCopyInto(out *SecretProjection) { - *out = *in - out.LocalObjectReference = in.LocalObjectReference - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]KeyToPath, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Optional != nil { - in, out := &in.Optional, &out.Optional - *out = new(bool) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretProjection. -func (in *SecretProjection) DeepCopy() *SecretProjection { - if in == nil { - return nil - } - out := new(SecretProjection) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SecretReference) DeepCopyInto(out *SecretReference) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretReference. -func (in *SecretReference) DeepCopy() *SecretReference { - if in == nil { - return nil - } - out := new(SecretReference) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SecretVolumeSource) DeepCopyInto(out *SecretVolumeSource) { - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]KeyToPath, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.DefaultMode != nil { - in, out := &in.DefaultMode, &out.DefaultMode - *out = new(int32) - **out = **in - } - if in.Optional != nil { - in, out := &in.Optional, &out.Optional - *out = new(bool) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretVolumeSource. -func (in *SecretVolumeSource) DeepCopy() *SecretVolumeSource { - if in == nil { - return nil - } - out := new(SecretVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SecurityContext) DeepCopyInto(out *SecurityContext) { - *out = *in - if in.Capabilities != nil { - in, out := &in.Capabilities, &out.Capabilities - *out = new(Capabilities) - (*in).DeepCopyInto(*out) - } - if in.Privileged != nil { - in, out := &in.Privileged, &out.Privileged - *out = new(bool) - **out = **in - } - if in.SELinuxOptions != nil { - in, out := &in.SELinuxOptions, &out.SELinuxOptions - *out = new(SELinuxOptions) - **out = **in - } - if in.RunAsUser != nil { - in, out := &in.RunAsUser, &out.RunAsUser - *out = new(int64) - **out = **in - } - if in.RunAsGroup != nil { - in, out := &in.RunAsGroup, &out.RunAsGroup - *out = new(int64) - **out = **in - } - if in.RunAsNonRoot != nil { - in, out := &in.RunAsNonRoot, &out.RunAsNonRoot - *out = new(bool) - **out = **in - } - if in.ReadOnlyRootFilesystem != nil { - in, out := &in.ReadOnlyRootFilesystem, &out.ReadOnlyRootFilesystem - *out = new(bool) - **out = **in - } - if in.AllowPrivilegeEscalation != nil { - in, out := &in.AllowPrivilegeEscalation, &out.AllowPrivilegeEscalation - *out = new(bool) - **out = **in - } - if in.ProcMount != nil { - in, out := &in.ProcMount, &out.ProcMount - *out = new(ProcMountType) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityContext. -func (in *SecurityContext) DeepCopy() *SecurityContext { - if in == nil { - return nil - } - out := new(SecurityContext) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SerializedReference) DeepCopyInto(out *SerializedReference) { - *out = *in - out.TypeMeta = in.TypeMeta - out.Reference = in.Reference - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SerializedReference. -func (in *SerializedReference) DeepCopy() *SerializedReference { - if in == nil { - return nil - } - out := new(SerializedReference) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *SerializedReference) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Service) DeepCopyInto(out *Service) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Service. -func (in *Service) DeepCopy() *Service { - if in == nil { - return nil - } - out := new(Service) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Service) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServiceAccount) DeepCopyInto(out *ServiceAccount) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.Secrets != nil { - in, out := &in.Secrets, &out.Secrets - *out = make([]ObjectReference, len(*in)) - copy(*out, *in) - } - if in.ImagePullSecrets != nil { - in, out := &in.ImagePullSecrets, &out.ImagePullSecrets - *out = make([]LocalObjectReference, len(*in)) - copy(*out, *in) - } - if in.AutomountServiceAccountToken != nil { - in, out := &in.AutomountServiceAccountToken, &out.AutomountServiceAccountToken - *out = new(bool) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccount. -func (in *ServiceAccount) DeepCopy() *ServiceAccount { - if in == nil { - return nil - } - out := new(ServiceAccount) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ServiceAccount) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServiceAccountList) DeepCopyInto(out *ServiceAccountList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ServiceAccount, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountList. -func (in *ServiceAccountList) DeepCopy() *ServiceAccountList { - if in == nil { - return nil - } - out := new(ServiceAccountList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ServiceAccountList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServiceAccountTokenProjection) DeepCopyInto(out *ServiceAccountTokenProjection) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountTokenProjection. -func (in *ServiceAccountTokenProjection) DeepCopy() *ServiceAccountTokenProjection { - if in == nil { - return nil - } - out := new(ServiceAccountTokenProjection) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServiceList) DeepCopyInto(out *ServiceList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Service, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceList. -func (in *ServiceList) DeepCopy() *ServiceList { - if in == nil { - return nil - } - out := new(ServiceList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ServiceList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServicePort) DeepCopyInto(out *ServicePort) { - *out = *in - out.TargetPort = in.TargetPort - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServicePort. -func (in *ServicePort) DeepCopy() *ServicePort { - if in == nil { - return nil - } - out := new(ServicePort) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServiceProxyOptions) DeepCopyInto(out *ServiceProxyOptions) { - *out = *in - out.TypeMeta = in.TypeMeta - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceProxyOptions. -func (in *ServiceProxyOptions) DeepCopy() *ServiceProxyOptions { - if in == nil { - return nil - } - out := new(ServiceProxyOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ServiceProxyOptions) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServiceSpec) DeepCopyInto(out *ServiceSpec) { - *out = *in - if in.Ports != nil { - in, out := &in.Ports, &out.Ports - *out = make([]ServicePort, len(*in)) - copy(*out, *in) - } - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.ExternalIPs != nil { - in, out := &in.ExternalIPs, &out.ExternalIPs - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.SessionAffinityConfig != nil { - in, out := &in.SessionAffinityConfig, &out.SessionAffinityConfig - *out = new(SessionAffinityConfig) - (*in).DeepCopyInto(*out) - } - if in.LoadBalancerSourceRanges != nil { - in, out := &in.LoadBalancerSourceRanges, &out.LoadBalancerSourceRanges - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceSpec. -func (in *ServiceSpec) DeepCopy() *ServiceSpec { - if in == nil { - return nil - } - out := new(ServiceSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServiceStatus) DeepCopyInto(out *ServiceStatus) { - *out = *in - in.LoadBalancer.DeepCopyInto(&out.LoadBalancer) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceStatus. -func (in *ServiceStatus) DeepCopy() *ServiceStatus { - if in == nil { - return nil - } - out := new(ServiceStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SessionAffinityConfig) DeepCopyInto(out *SessionAffinityConfig) { - *out = *in - if in.ClientIP != nil { - in, out := &in.ClientIP, &out.ClientIP - *out = new(ClientIPConfig) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SessionAffinityConfig. -func (in *SessionAffinityConfig) DeepCopy() *SessionAffinityConfig { - if in == nil { - return nil - } - out := new(SessionAffinityConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *StorageOSPersistentVolumeSource) DeepCopyInto(out *StorageOSPersistentVolumeSource) { - *out = *in - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - *out = new(ObjectReference) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageOSPersistentVolumeSource. -func (in *StorageOSPersistentVolumeSource) DeepCopy() *StorageOSPersistentVolumeSource { - if in == nil { - return nil - } - out := new(StorageOSPersistentVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *StorageOSVolumeSource) DeepCopyInto(out *StorageOSVolumeSource) { - *out = *in - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - *out = new(LocalObjectReference) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageOSVolumeSource. -func (in *StorageOSVolumeSource) DeepCopy() *StorageOSVolumeSource { - if in == nil { - return nil - } - out := new(StorageOSVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Sysctl) DeepCopyInto(out *Sysctl) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Sysctl. -func (in *Sysctl) DeepCopy() *Sysctl { - if in == nil { - return nil - } - out := new(Sysctl) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TCPSocketAction) DeepCopyInto(out *TCPSocketAction) { - *out = *in - out.Port = in.Port - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPSocketAction. -func (in *TCPSocketAction) DeepCopy() *TCPSocketAction { - if in == nil { - return nil - } - out := new(TCPSocketAction) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Taint) DeepCopyInto(out *Taint) { - *out = *in - if in.TimeAdded != nil { - in, out := &in.TimeAdded, &out.TimeAdded - *out = (*in).DeepCopy() - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Taint. -func (in *Taint) DeepCopy() *Taint { - if in == nil { - return nil - } - out := new(Taint) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Toleration) DeepCopyInto(out *Toleration) { - *out = *in - if in.TolerationSeconds != nil { - in, out := &in.TolerationSeconds, &out.TolerationSeconds - *out = new(int64) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Toleration. -func (in *Toleration) DeepCopy() *Toleration { - if in == nil { - return nil - } - out := new(Toleration) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TopologySelectorLabelRequirement) DeepCopyInto(out *TopologySelectorLabelRequirement) { - *out = *in - if in.Values != nil { - in, out := &in.Values, &out.Values - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopologySelectorLabelRequirement. -func (in *TopologySelectorLabelRequirement) DeepCopy() *TopologySelectorLabelRequirement { - if in == nil { - return nil - } - out := new(TopologySelectorLabelRequirement) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TopologySelectorTerm) DeepCopyInto(out *TopologySelectorTerm) { - *out = *in - if in.MatchLabelExpressions != nil { - in, out := &in.MatchLabelExpressions, &out.MatchLabelExpressions - *out = make([]TopologySelectorLabelRequirement, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopologySelectorTerm. -func (in *TopologySelectorTerm) DeepCopy() *TopologySelectorTerm { - if in == nil { - return nil - } - out := new(TopologySelectorTerm) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TypedLocalObjectReference) DeepCopyInto(out *TypedLocalObjectReference) { - *out = *in - if in.APIGroup != nil { - in, out := &in.APIGroup, &out.APIGroup - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TypedLocalObjectReference. -func (in *TypedLocalObjectReference) DeepCopy() *TypedLocalObjectReference { - if in == nil { - return nil - } - out := new(TypedLocalObjectReference) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Volume) DeepCopyInto(out *Volume) { - *out = *in - in.VolumeSource.DeepCopyInto(&out.VolumeSource) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Volume. -func (in *Volume) DeepCopy() *Volume { - if in == nil { - return nil - } - out := new(Volume) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumeDevice) DeepCopyInto(out *VolumeDevice) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeDevice. -func (in *VolumeDevice) DeepCopy() *VolumeDevice { - if in == nil { - return nil - } - out := new(VolumeDevice) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumeMount) DeepCopyInto(out *VolumeMount) { - *out = *in - if in.MountPropagation != nil { - in, out := &in.MountPropagation, &out.MountPropagation - *out = new(MountPropagationMode) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeMount. -func (in *VolumeMount) DeepCopy() *VolumeMount { - if in == nil { - return nil - } - out := new(VolumeMount) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumeNodeAffinity) DeepCopyInto(out *VolumeNodeAffinity) { - *out = *in - if in.Required != nil { - in, out := &in.Required, &out.Required - *out = new(NodeSelector) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeNodeAffinity. -func (in *VolumeNodeAffinity) DeepCopy() *VolumeNodeAffinity { - if in == nil { - return nil - } - out := new(VolumeNodeAffinity) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumeProjection) DeepCopyInto(out *VolumeProjection) { - *out = *in - if in.Secret != nil { - in, out := &in.Secret, &out.Secret - *out = new(SecretProjection) - (*in).DeepCopyInto(*out) - } - if in.DownwardAPI != nil { - in, out := &in.DownwardAPI, &out.DownwardAPI - *out = new(DownwardAPIProjection) - (*in).DeepCopyInto(*out) - } - if in.ConfigMap != nil { - in, out := &in.ConfigMap, &out.ConfigMap - *out = new(ConfigMapProjection) - (*in).DeepCopyInto(*out) - } - if in.ServiceAccountToken != nil { - in, out := &in.ServiceAccountToken, &out.ServiceAccountToken - *out = new(ServiceAccountTokenProjection) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeProjection. -func (in *VolumeProjection) DeepCopy() *VolumeProjection { - if in == nil { - return nil - } - out := new(VolumeProjection) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumeSource) DeepCopyInto(out *VolumeSource) { - *out = *in - if in.HostPath != nil { - in, out := &in.HostPath, &out.HostPath - *out = new(HostPathVolumeSource) - (*in).DeepCopyInto(*out) - } - if in.EmptyDir != nil { - in, out := &in.EmptyDir, &out.EmptyDir - *out = new(EmptyDirVolumeSource) - (*in).DeepCopyInto(*out) - } - if in.GCEPersistentDisk != nil { - in, out := &in.GCEPersistentDisk, &out.GCEPersistentDisk - *out = new(GCEPersistentDiskVolumeSource) - **out = **in - } - if in.AWSElasticBlockStore != nil { - in, out := &in.AWSElasticBlockStore, &out.AWSElasticBlockStore - *out = new(AWSElasticBlockStoreVolumeSource) - **out = **in - } - if in.GitRepo != nil { - in, out := &in.GitRepo, &out.GitRepo - *out = new(GitRepoVolumeSource) - **out = **in - } - if in.Secret != nil { - in, out := &in.Secret, &out.Secret - *out = new(SecretVolumeSource) - (*in).DeepCopyInto(*out) - } - if in.NFS != nil { - in, out := &in.NFS, &out.NFS - *out = new(NFSVolumeSource) - **out = **in - } - if in.ISCSI != nil { - in, out := &in.ISCSI, &out.ISCSI - *out = new(ISCSIVolumeSource) - (*in).DeepCopyInto(*out) - } - if in.Glusterfs != nil { - in, out := &in.Glusterfs, &out.Glusterfs - *out = new(GlusterfsVolumeSource) - **out = **in - } - if in.PersistentVolumeClaim != nil { - in, out := &in.PersistentVolumeClaim, &out.PersistentVolumeClaim - *out = new(PersistentVolumeClaimVolumeSource) - **out = **in - } - if in.RBD != nil { - in, out := &in.RBD, &out.RBD - *out = new(RBDVolumeSource) - (*in).DeepCopyInto(*out) - } - if in.Quobyte != nil { - in, out := &in.Quobyte, &out.Quobyte - *out = new(QuobyteVolumeSource) - **out = **in - } - if in.FlexVolume != nil { - in, out := &in.FlexVolume, &out.FlexVolume - *out = new(FlexVolumeSource) - (*in).DeepCopyInto(*out) - } - if in.Cinder != nil { - in, out := &in.Cinder, &out.Cinder - *out = new(CinderVolumeSource) - (*in).DeepCopyInto(*out) - } - if in.CephFS != nil { - in, out := &in.CephFS, &out.CephFS - *out = new(CephFSVolumeSource) - (*in).DeepCopyInto(*out) - } - if in.Flocker != nil { - in, out := &in.Flocker, &out.Flocker - *out = new(FlockerVolumeSource) - **out = **in - } - if in.DownwardAPI != nil { - in, out := &in.DownwardAPI, &out.DownwardAPI - *out = new(DownwardAPIVolumeSource) - (*in).DeepCopyInto(*out) - } - if in.FC != nil { - in, out := &in.FC, &out.FC - *out = new(FCVolumeSource) - (*in).DeepCopyInto(*out) - } - if in.AzureFile != nil { - in, out := &in.AzureFile, &out.AzureFile - *out = new(AzureFileVolumeSource) - **out = **in - } - if in.ConfigMap != nil { - in, out := &in.ConfigMap, &out.ConfigMap - *out = new(ConfigMapVolumeSource) - (*in).DeepCopyInto(*out) - } - if in.VsphereVolume != nil { - in, out := &in.VsphereVolume, &out.VsphereVolume - *out = new(VsphereVirtualDiskVolumeSource) - **out = **in - } - if in.AzureDisk != nil { - in, out := &in.AzureDisk, &out.AzureDisk - *out = new(AzureDiskVolumeSource) - (*in).DeepCopyInto(*out) - } - if in.PhotonPersistentDisk != nil { - in, out := &in.PhotonPersistentDisk, &out.PhotonPersistentDisk - *out = new(PhotonPersistentDiskVolumeSource) - **out = **in - } - if in.Projected != nil { - in, out := &in.Projected, &out.Projected - *out = new(ProjectedVolumeSource) - (*in).DeepCopyInto(*out) - } - if in.PortworxVolume != nil { - in, out := &in.PortworxVolume, &out.PortworxVolume - *out = new(PortworxVolumeSource) - **out = **in - } - if in.ScaleIO != nil { - in, out := &in.ScaleIO, &out.ScaleIO - *out = new(ScaleIOVolumeSource) - (*in).DeepCopyInto(*out) - } - if in.StorageOS != nil { - in, out := &in.StorageOS, &out.StorageOS - *out = new(StorageOSVolumeSource) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeSource. -func (in *VolumeSource) DeepCopy() *VolumeSource { - if in == nil { - return nil - } - out := new(VolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VsphereVirtualDiskVolumeSource) DeepCopyInto(out *VsphereVirtualDiskVolumeSource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VsphereVirtualDiskVolumeSource. -func (in *VsphereVirtualDiskVolumeSource) DeepCopy() *VsphereVirtualDiskVolumeSource { - if in == nil { - return nil - } - out := new(VsphereVirtualDiskVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *WeightedPodAffinityTerm) DeepCopyInto(out *WeightedPodAffinityTerm) { - *out = *in - in.PodAffinityTerm.DeepCopyInto(&out.PodAffinityTerm) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WeightedPodAffinityTerm. -func (in *WeightedPodAffinityTerm) DeepCopy() *WeightedPodAffinityTerm { - if in == nil { - return nil - } - out := new(WeightedPodAffinityTerm) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/k8s.io/kubernetes/pkg/util/version/BUILD b/vendor/k8s.io/kubernetes/pkg/util/version/BUILD deleted file mode 100644 index 3b53281f..00000000 --- a/vendor/k8s.io/kubernetes/pkg/util/version/BUILD +++ /dev/null @@ -1,35 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", - "go_test", -) - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "version.go", - ], - importpath = "k8s.io/kubernetes/pkg/util/version", -) - -go_test( - name = "go_default_test", - srcs = ["version_test.go"], - embed = [":go_default_library"], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], -) diff --git a/vendor/github.com/google/btree/LICENSE b/vendor/k8s.io/utils/LICENSE similarity index 100% rename from vendor/github.com/google/btree/LICENSE rename to vendor/k8s.io/utils/LICENSE diff --git a/vendor/k8s.io/client-go/util/buffer/ring_growing.go b/vendor/k8s.io/utils/buffer/ring_growing.go similarity index 100% rename from vendor/k8s.io/client-go/util/buffer/ring_growing.go rename to vendor/k8s.io/utils/buffer/ring_growing.go diff --git a/vendor/k8s.io/client-go/util/integer/integer.go b/vendor/k8s.io/utils/integer/integer.go similarity index 81% rename from vendor/k8s.io/client-go/util/integer/integer.go rename to vendor/k8s.io/utils/integer/integer.go index c6ea106f..e4e740ca 100644 --- a/vendor/k8s.io/client-go/util/integer/integer.go +++ b/vendor/k8s.io/utils/integer/integer.go @@ -16,6 +16,7 @@ limitations under the License. package integer +// IntMax returns the maximum of the params func IntMax(a, b int) int { if b > a { return b @@ -23,6 +24,7 @@ func IntMax(a, b int) int { return a } +// IntMin returns the minimum of the params func IntMin(a, b int) int { if b < a { return b @@ -30,6 +32,7 @@ func IntMin(a, b int) int { return a } +// Int32Max returns the maximum of the params func Int32Max(a, b int32) int32 { if b > a { return b @@ -37,6 +40,7 @@ func Int32Max(a, b int32) int32 { return a } +// Int32Min returns the minimum of the params func Int32Min(a, b int32) int32 { if b < a { return b @@ -44,6 +48,7 @@ func Int32Min(a, b int32) int32 { return a } +// Int64Max returns the maximum of the params func Int64Max(a, b int64) int64 { if b > a { return b @@ -51,6 +56,7 @@ func Int64Max(a, b int64) int64 { return a } +// Int64Min returns the minimum of the params func Int64Min(a, b int64) int64 { if b < a { return b diff --git a/vendor/k8s.io/utils/trace/trace.go b/vendor/k8s.io/utils/trace/trace.go new file mode 100644 index 00000000..3b424104 --- /dev/null +++ b/vendor/k8s.io/utils/trace/trace.go @@ -0,0 +1,131 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package trace + +import ( + "bytes" + "fmt" + "math/rand" + "time" + + "k8s.io/klog" +) + +// Field is a key value pair that provides additional details about the trace. +type Field struct { + Key string + Value interface{} +} + +func (f Field) format() string { + return fmt.Sprintf("%s:%v", f.Key, f.Value) +} + +func writeFields(b *bytes.Buffer, l []Field) { + for i, f := range l { + b.WriteString(f.format()) + if i < len(l)-1 { + b.WriteString(",") + } + } +} + +type traceStep struct { + stepTime time.Time + msg string + fields []Field +} + +// Trace keeps track of a set of "steps" and allows us to log a specific +// step if it took longer than its share of the total allowed time +type Trace struct { + name string + fields []Field + startTime time.Time + steps []traceStep +} + +// New creates a Trace with the specified name. The name identifies the operation to be traced. The +// Fields add key value pairs to provide additional details about the trace, such as operation inputs. +func New(name string, fields ...Field) *Trace { + return &Trace{name: name, startTime: time.Now(), fields: fields} +} + +// Step adds a new step with a specific message. Call this at the end of an execution step to record +// how long it took. The Fields add key value pairs to provide additional details about the trace +// step. +func (t *Trace) Step(msg string, fields ...Field) { + if t.steps == nil { + // traces almost always have less than 6 steps, do this to avoid more than a single allocation + t.steps = make([]traceStep, 0, 6) + } + t.steps = append(t.steps, traceStep{stepTime: time.Now(), msg: msg, fields: fields}) +} + +// Log is used to dump all the steps in the Trace +func (t *Trace) Log() { + // an explicit logging request should dump all the steps out at the higher level + t.logWithStepThreshold(0) +} + +func (t *Trace) logWithStepThreshold(stepThreshold time.Duration) { + var buffer bytes.Buffer + tracenum := rand.Int31() + endTime := time.Now() + + totalTime := endTime.Sub(t.startTime) + buffer.WriteString(fmt.Sprintf("Trace[%d]: %q ", tracenum, t.name)) + if len(t.fields) > 0 { + writeFields(&buffer, t.fields) + buffer.WriteString(" ") + } + buffer.WriteString(fmt.Sprintf("(started: %v) (total time: %v):\n", t.startTime, totalTime)) + lastStepTime := t.startTime + for _, step := range t.steps { + stepDuration := step.stepTime.Sub(lastStepTime) + if stepThreshold == 0 || stepDuration > stepThreshold || klog.V(4) { + buffer.WriteString(fmt.Sprintf("Trace[%d]: [%v] [%v] ", tracenum, step.stepTime.Sub(t.startTime), stepDuration)) + buffer.WriteString(step.msg) + if len(step.fields) > 0 { + buffer.WriteString(" ") + writeFields(&buffer, step.fields) + } + buffer.WriteString("\n") + } + lastStepTime = step.stepTime + } + stepDuration := endTime.Sub(lastStepTime) + if stepThreshold == 0 || stepDuration > stepThreshold || klog.V(4) { + buffer.WriteString(fmt.Sprintf("Trace[%d]: [%v] [%v] END\n", tracenum, endTime.Sub(t.startTime), stepDuration)) + } + + klog.Info(buffer.String()) +} + +// LogIfLong is used to dump steps that took longer than its share +func (t *Trace) LogIfLong(threshold time.Duration) { + if time.Since(t.startTime) >= threshold { + // if any step took more than it's share of the total allowed time, it deserves a higher log level + stepThreshold := threshold / time.Duration(len(t.steps)+1) + t.logWithStepThreshold(stepThreshold) + } +} + +// TotalTime can be used to figure out how long it took since the Trace was created +func (t *Trace) TotalTime() time.Duration { + return time.Since(t.startTime) +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 3cf457a1..40ab36d5 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -4,113 +4,113 @@ github.com/Sirupsen/logrus github.com/beorn7/perks/quantile # github.com/davecgh/go-spew v1.1.1 github.com/davecgh/go-spew/spew -# github.com/ghodss/yaml v1.0.0 -github.com/ghodss/yaml # github.com/gogo/protobuf v1.3.0 github.com/gogo/protobuf/proto github.com/gogo/protobuf/sortkeys -# github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b -github.com/golang/glog # github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 github.com/golang/groupcache/lru # github.com/golang/protobuf v1.3.2 github.com/golang/protobuf/proto -github.com/golang/protobuf/ptypes/any github.com/golang/protobuf/ptypes +github.com/golang/protobuf/ptypes/any github.com/golang/protobuf/ptypes/duration github.com/golang/protobuf/ptypes/timestamp -# github.com/google/btree v1.0.0 -github.com/google/btree +# github.com/google/go-cmp v0.3.0 +github.com/google/go-cmp/cmp +github.com/google/go-cmp/cmp/internal/diff +github.com/google/go-cmp/cmp/internal/flags +github.com/google/go-cmp/cmp/internal/function +github.com/google/go-cmp/cmp/internal/value # github.com/google/gofuzz v1.0.0 github.com/google/gofuzz -# github.com/google/uuid v1.0.0 +# github.com/google/uuid v1.1.1 github.com/google/uuid # github.com/googleapis/gnostic v0.3.1 github.com/googleapis/gnostic/OpenAPIv2 github.com/googleapis/gnostic/compiler github.com/googleapis/gnostic/extensions -# github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 -github.com/gregjones/httpcache -github.com/gregjones/httpcache/diskcache # github.com/hashicorp/golang-lru v0.5.3 github.com/hashicorp/golang-lru github.com/hashicorp/golang-lru/simplelru -# github.com/json-iterator/go v1.1.7 +# github.com/json-iterator/go v1.1.8 github.com/json-iterator/go -# github.com/kubernetes-incubator/external-storage v5.2.0+incompatible -github.com/kubernetes-incubator/external-storage/lib/controller -github.com/kubernetes-incubator/external-storage/lib/controller/metrics -github.com/kubernetes-incubator/external-storage/lib/util # github.com/matttproud/golang_protobuf_extensions v1.0.1 github.com/matttproud/golang_protobuf_extensions/pbutil +# github.com/miekg/dns v1.1.27 +github.com/miekg/dns # github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd github.com/modern-go/concurrent # github.com/modern-go/reflect2 v1.0.1 github.com/modern-go/reflect2 -# github.com/pborman/uuid v1.2.0 -github.com/pborman/uuid -# github.com/peterbourgon/diskv v2.0.1+incompatible -github.com/peterbourgon/diskv # github.com/pkg/errors v0.8.1 github.com/pkg/errors # github.com/prometheus/client_golang v1.1.0 github.com/prometheus/client_golang/prometheus -github.com/prometheus/client_golang/prometheus/promhttp github.com/prometheus/client_golang/prometheus/internal +github.com/prometheus/client_golang/prometheus/promhttp # github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 github.com/prometheus/client_model/go # github.com/prometheus/common v0.6.0 github.com/prometheus/common/expfmt -github.com/prometheus/common/model github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg +github.com/prometheus/common/model # github.com/prometheus/procfs v0.0.3 github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs # github.com/urfave/cli v1.19.1 github.com/urfave/cli -# golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 +# golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 +golang.org/x/crypto/ed25519 +golang.org/x/crypto/ed25519/internal/edwards25519 golang.org/x/crypto/ssh/terminal -# golang.org/x/net v0.0.0-20190613194153-d28f0bde5980 -golang.org/x/net/http2 +# golang.org/x/net v0.0.0-20191004110552-13f9640d40b9 +golang.org/x/net/bpf +golang.org/x/net/context +golang.org/x/net/context/ctxhttp golang.org/x/net/http/httpguts +golang.org/x/net/http2 golang.org/x/net/http2/hpack golang.org/x/net/idna -golang.org/x/net/context/ctxhttp -golang.org/x/net/context +golang.org/x/net/internal/iana +golang.org/x/net/internal/socket +golang.org/x/net/ipv4 +golang.org/x/net/ipv6 # golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 golang.org/x/oauth2 golang.org/x/oauth2/internal # golang.org/x/sys v0.0.0-20190926180325-855e68c8590b golang.org/x/sys/unix golang.org/x/sys/windows -# golang.org/x/text v0.3.0 +# golang.org/x/text v0.3.2 golang.org/x/text/secure/bidirule +golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm -golang.org/x/text/transform # golang.org/x/time v0.0.0-20190921001708-c4c64cad1fd0 golang.org/x/time/rate -# google.golang.org/appengine v1.4.0 -google.golang.org/appengine/urlfetch +# google.golang.org/appengine v1.5.0 google.golang.org/appengine/internal -google.golang.org/appengine/internal/urlfetch google.golang.org/appengine/internal/base google.golang.org/appengine/internal/datastore google.golang.org/appengine/internal/log google.golang.org/appengine/internal/remote_api +google.golang.org/appengine/internal/urlfetch +google.golang.org/appengine/urlfetch # gopkg.in/inf.v0 v0.9.1 gopkg.in/inf.v0 -# gopkg.in/yaml.v2 v2.2.2 +# gopkg.in/yaml.v2 v2.2.4 gopkg.in/yaml.v2 -# k8s.io/api v0.0.0-20181005203742-357ec6384fa7 -k8s.io/api/core/v1 -k8s.io/api/storage/v1 -k8s.io/api/storage/v1beta1 -k8s.io/api/admissionregistration/v1alpha1 +# k8s.io/api v0.17.1 +k8s.io/api/admissionregistration/v1 k8s.io/api/admissionregistration/v1beta1 k8s.io/api/apps/v1 k8s.io/api/apps/v1beta1 k8s.io/api/apps/v1beta2 +k8s.io/api/auditregistration/v1alpha1 +k8s.io/api/authentication/v1 +k8s.io/api/authentication/v1beta1 +k8s.io/api/authorization/v1 +k8s.io/api/authorization/v1beta1 k8s.io/api/autoscaling/v1 k8s.io/api/autoscaling/v2beta1 k8s.io/api/autoscaling/v2beta2 @@ -118,84 +118,141 @@ k8s.io/api/batch/v1 k8s.io/api/batch/v1beta1 k8s.io/api/batch/v2alpha1 k8s.io/api/certificates/v1beta1 +k8s.io/api/coordination/v1 k8s.io/api/coordination/v1beta1 +k8s.io/api/core/v1 +k8s.io/api/discovery/v1alpha1 +k8s.io/api/discovery/v1beta1 k8s.io/api/events/v1beta1 k8s.io/api/extensions/v1beta1 +k8s.io/api/flowcontrol/v1alpha1 k8s.io/api/networking/v1 +k8s.io/api/networking/v1beta1 +k8s.io/api/node/v1alpha1 +k8s.io/api/node/v1beta1 k8s.io/api/policy/v1beta1 k8s.io/api/rbac/v1 k8s.io/api/rbac/v1alpha1 k8s.io/api/rbac/v1beta1 +k8s.io/api/scheduling/v1 k8s.io/api/scheduling/v1alpha1 k8s.io/api/scheduling/v1beta1 k8s.io/api/settings/v1alpha1 +k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 -k8s.io/api/authentication/v1 -k8s.io/api/authentication/v1beta1 -k8s.io/api/authorization/v1 -k8s.io/api/authorization/v1beta1 -# k8s.io/apimachinery v0.0.0-20180913025736-6dd46049f395 +k8s.io/api/storage/v1beta1 +# k8s.io/apimachinery v0.17.1 k8s.io/apimachinery/pkg/api/errors -k8s.io/apimachinery/pkg/apis/meta/v1 -k8s.io/apimachinery/pkg/util/runtime -k8s.io/apimachinery/pkg/util/uuid -k8s.io/apimachinery/pkg/util/wait +k8s.io/apimachinery/pkg/api/meta k8s.io/apimachinery/pkg/api/resource -k8s.io/apimachinery/pkg/runtime -k8s.io/apimachinery/pkg/runtime/schema -k8s.io/apimachinery/pkg/types -k8s.io/apimachinery/pkg/util/intstr -k8s.io/apimachinery/pkg/util/validation/field +k8s.io/apimachinery/pkg/apis/meta/internalversion +k8s.io/apimachinery/pkg/apis/meta/v1 +k8s.io/apimachinery/pkg/apis/meta/v1/unstructured +k8s.io/apimachinery/pkg/apis/meta/v1beta1 k8s.io/apimachinery/pkg/conversion +k8s.io/apimachinery/pkg/conversion/queryparams k8s.io/apimachinery/pkg/fields k8s.io/apimachinery/pkg/labels -k8s.io/apimachinery/pkg/selection -k8s.io/apimachinery/pkg/watch -k8s.io/apimachinery/pkg/runtime/serializer/streaming -k8s.io/apimachinery/pkg/util/net -k8s.io/apimachinery/pkg/util/sets +k8s.io/apimachinery/pkg/runtime +k8s.io/apimachinery/pkg/runtime/schema k8s.io/apimachinery/pkg/runtime/serializer -k8s.io/apimachinery/pkg/api/meta +k8s.io/apimachinery/pkg/runtime/serializer/json +k8s.io/apimachinery/pkg/runtime/serializer/protobuf +k8s.io/apimachinery/pkg/runtime/serializer/recognizer +k8s.io/apimachinery/pkg/runtime/serializer/streaming +k8s.io/apimachinery/pkg/runtime/serializer/versioning +k8s.io/apimachinery/pkg/selection +k8s.io/apimachinery/pkg/types k8s.io/apimachinery/pkg/util/cache k8s.io/apimachinery/pkg/util/clock k8s.io/apimachinery/pkg/util/diff +k8s.io/apimachinery/pkg/util/errors +k8s.io/apimachinery/pkg/util/framer +k8s.io/apimachinery/pkg/util/intstr +k8s.io/apimachinery/pkg/util/json +k8s.io/apimachinery/pkg/util/mergepatch k8s.io/apimachinery/pkg/util/naming +k8s.io/apimachinery/pkg/util/net +k8s.io/apimachinery/pkg/util/runtime +k8s.io/apimachinery/pkg/util/sets k8s.io/apimachinery/pkg/util/strategicpatch +k8s.io/apimachinery/pkg/util/uuid k8s.io/apimachinery/pkg/util/validation -k8s.io/apimachinery/pkg/conversion/queryparams -k8s.io/apimachinery/pkg/util/errors -k8s.io/apimachinery/pkg/util/json -k8s.io/apimachinery/third_party/forked/golang/reflect +k8s.io/apimachinery/pkg/util/validation/field +k8s.io/apimachinery/pkg/util/version +k8s.io/apimachinery/pkg/util/wait +k8s.io/apimachinery/pkg/util/yaml k8s.io/apimachinery/pkg/version -k8s.io/apimachinery/pkg/runtime/serializer/json -k8s.io/apimachinery/pkg/runtime/serializer/protobuf -k8s.io/apimachinery/pkg/runtime/serializer/recognizer -k8s.io/apimachinery/pkg/runtime/serializer/versioning -k8s.io/apimachinery/pkg/apis/meta/v1beta1 -k8s.io/apimachinery/pkg/apis/meta/internalversion -k8s.io/apimachinery/pkg/apis/meta/v1/unstructured -k8s.io/apimachinery/pkg/util/mergepatch +k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json -k8s.io/apimachinery/pkg/util/framer -k8s.io/apimachinery/pkg/util/yaml -# k8s.io/client-go v0.0.0-20181005204318-cb4883f3dea0 -k8s.io/client-go/kubernetes -k8s.io/client-go/rest +k8s.io/apimachinery/third_party/forked/golang/reflect +# k8s.io/client-go v0.17.1 +k8s.io/client-go/discovery k8s.io/client-go/informers +k8s.io/client-go/informers/admissionregistration +k8s.io/client-go/informers/admissionregistration/v1 +k8s.io/client-go/informers/admissionregistration/v1beta1 +k8s.io/client-go/informers/apps +k8s.io/client-go/informers/apps/v1 +k8s.io/client-go/informers/apps/v1beta1 +k8s.io/client-go/informers/apps/v1beta2 +k8s.io/client-go/informers/auditregistration +k8s.io/client-go/informers/auditregistration/v1alpha1 +k8s.io/client-go/informers/autoscaling +k8s.io/client-go/informers/autoscaling/v1 +k8s.io/client-go/informers/autoscaling/v2beta1 +k8s.io/client-go/informers/autoscaling/v2beta2 +k8s.io/client-go/informers/batch +k8s.io/client-go/informers/batch/v1 +k8s.io/client-go/informers/batch/v1beta1 +k8s.io/client-go/informers/batch/v2alpha1 +k8s.io/client-go/informers/certificates +k8s.io/client-go/informers/certificates/v1beta1 +k8s.io/client-go/informers/coordination +k8s.io/client-go/informers/coordination/v1 +k8s.io/client-go/informers/coordination/v1beta1 +k8s.io/client-go/informers/core +k8s.io/client-go/informers/core/v1 +k8s.io/client-go/informers/discovery +k8s.io/client-go/informers/discovery/v1alpha1 +k8s.io/client-go/informers/discovery/v1beta1 +k8s.io/client-go/informers/events +k8s.io/client-go/informers/events/v1beta1 +k8s.io/client-go/informers/extensions +k8s.io/client-go/informers/extensions/v1beta1 +k8s.io/client-go/informers/flowcontrol +k8s.io/client-go/informers/flowcontrol/v1alpha1 +k8s.io/client-go/informers/internalinterfaces +k8s.io/client-go/informers/networking +k8s.io/client-go/informers/networking/v1 +k8s.io/client-go/informers/networking/v1beta1 +k8s.io/client-go/informers/node +k8s.io/client-go/informers/node/v1alpha1 +k8s.io/client-go/informers/node/v1beta1 +k8s.io/client-go/informers/policy +k8s.io/client-go/informers/policy/v1beta1 +k8s.io/client-go/informers/rbac +k8s.io/client-go/informers/rbac/v1 +k8s.io/client-go/informers/rbac/v1alpha1 +k8s.io/client-go/informers/rbac/v1beta1 +k8s.io/client-go/informers/scheduling +k8s.io/client-go/informers/scheduling/v1 +k8s.io/client-go/informers/scheduling/v1alpha1 +k8s.io/client-go/informers/scheduling/v1beta1 +k8s.io/client-go/informers/settings +k8s.io/client-go/informers/settings/v1alpha1 +k8s.io/client-go/informers/storage +k8s.io/client-go/informers/storage/v1 +k8s.io/client-go/informers/storage/v1alpha1 +k8s.io/client-go/informers/storage/v1beta1 +k8s.io/client-go/kubernetes k8s.io/client-go/kubernetes/scheme -k8s.io/client-go/kubernetes/typed/core/v1 -k8s.io/client-go/tools/cache -k8s.io/client-go/tools/leaderelection -k8s.io/client-go/tools/leaderelection/resourcelock -k8s.io/client-go/tools/record -k8s.io/client-go/tools/reference -k8s.io/client-go/util/workqueue -k8s.io/client-go/discovery -k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1 +k8s.io/client-go/kubernetes/typed/admissionregistration/v1 k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1 k8s.io/client-go/kubernetes/typed/apps/v1 k8s.io/client-go/kubernetes/typed/apps/v1beta1 k8s.io/client-go/kubernetes/typed/apps/v1beta2 +k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1 k8s.io/client-go/kubernetes/typed/authentication/v1 k8s.io/client-go/kubernetes/typed/authentication/v1beta1 k8s.io/client-go/kubernetes/typed/authorization/v1 @@ -207,84 +264,35 @@ k8s.io/client-go/kubernetes/typed/batch/v1 k8s.io/client-go/kubernetes/typed/batch/v1beta1 k8s.io/client-go/kubernetes/typed/batch/v2alpha1 k8s.io/client-go/kubernetes/typed/certificates/v1beta1 +k8s.io/client-go/kubernetes/typed/coordination/v1 k8s.io/client-go/kubernetes/typed/coordination/v1beta1 +k8s.io/client-go/kubernetes/typed/core/v1 +k8s.io/client-go/kubernetes/typed/discovery/v1alpha1 +k8s.io/client-go/kubernetes/typed/discovery/v1beta1 k8s.io/client-go/kubernetes/typed/events/v1beta1 k8s.io/client-go/kubernetes/typed/extensions/v1beta1 +k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1 k8s.io/client-go/kubernetes/typed/networking/v1 +k8s.io/client-go/kubernetes/typed/networking/v1beta1 +k8s.io/client-go/kubernetes/typed/node/v1alpha1 +k8s.io/client-go/kubernetes/typed/node/v1beta1 k8s.io/client-go/kubernetes/typed/policy/v1beta1 k8s.io/client-go/kubernetes/typed/rbac/v1 k8s.io/client-go/kubernetes/typed/rbac/v1alpha1 k8s.io/client-go/kubernetes/typed/rbac/v1beta1 +k8s.io/client-go/kubernetes/typed/scheduling/v1 k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1 k8s.io/client-go/kubernetes/typed/scheduling/v1beta1 k8s.io/client-go/kubernetes/typed/settings/v1alpha1 k8s.io/client-go/kubernetes/typed/storage/v1 k8s.io/client-go/kubernetes/typed/storage/v1alpha1 k8s.io/client-go/kubernetes/typed/storage/v1beta1 -k8s.io/client-go/util/flowcontrol -k8s.io/client-go/pkg/version -k8s.io/client-go/plugin/pkg/client/auth/exec -k8s.io/client-go/rest/watch -k8s.io/client-go/tools/clientcmd/api -k8s.io/client-go/tools/metrics -k8s.io/client-go/transport -k8s.io/client-go/util/cert -k8s.io/client-go/informers/admissionregistration -k8s.io/client-go/informers/apps -k8s.io/client-go/informers/autoscaling -k8s.io/client-go/informers/batch -k8s.io/client-go/informers/certificates -k8s.io/client-go/informers/coordination -k8s.io/client-go/informers/core -k8s.io/client-go/informers/events -k8s.io/client-go/informers/extensions -k8s.io/client-go/informers/internalinterfaces -k8s.io/client-go/informers/networking -k8s.io/client-go/informers/policy -k8s.io/client-go/informers/rbac -k8s.io/client-go/informers/scheduling -k8s.io/client-go/informers/settings -k8s.io/client-go/informers/storage -k8s.io/client-go/tools/pager -k8s.io/client-go/util/buffer -k8s.io/client-go/util/retry -k8s.io/client-go/util/integer -k8s.io/client-go/pkg/apis/clientauthentication -k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1 -k8s.io/client-go/pkg/apis/clientauthentication/v1beta1 -k8s.io/client-go/util/connrotation -k8s.io/client-go/informers/admissionregistration/v1alpha1 -k8s.io/client-go/informers/admissionregistration/v1beta1 -k8s.io/client-go/informers/apps/v1 -k8s.io/client-go/informers/apps/v1beta1 -k8s.io/client-go/informers/apps/v1beta2 -k8s.io/client-go/informers/autoscaling/v1 -k8s.io/client-go/informers/autoscaling/v2beta1 -k8s.io/client-go/informers/autoscaling/v2beta2 -k8s.io/client-go/informers/batch/v1 -k8s.io/client-go/informers/batch/v1beta1 -k8s.io/client-go/informers/batch/v2alpha1 -k8s.io/client-go/informers/certificates/v1beta1 -k8s.io/client-go/informers/coordination/v1beta1 -k8s.io/client-go/informers/core/v1 -k8s.io/client-go/informers/events/v1beta1 -k8s.io/client-go/informers/extensions/v1beta1 -k8s.io/client-go/informers/networking/v1 -k8s.io/client-go/informers/policy/v1beta1 -k8s.io/client-go/informers/rbac/v1 -k8s.io/client-go/informers/rbac/v1alpha1 -k8s.io/client-go/informers/rbac/v1beta1 -k8s.io/client-go/informers/scheduling/v1alpha1 -k8s.io/client-go/informers/scheduling/v1beta1 -k8s.io/client-go/informers/settings/v1alpha1 -k8s.io/client-go/informers/storage/v1 -k8s.io/client-go/informers/storage/v1alpha1 -k8s.io/client-go/informers/storage/v1beta1 -k8s.io/client-go/listers/admissionregistration/v1alpha1 +k8s.io/client-go/listers/admissionregistration/v1 k8s.io/client-go/listers/admissionregistration/v1beta1 k8s.io/client-go/listers/apps/v1 k8s.io/client-go/listers/apps/v1beta1 k8s.io/client-go/listers/apps/v1beta2 +k8s.io/client-go/listers/auditregistration/v1alpha1 k8s.io/client-go/listers/autoscaling/v1 k8s.io/client-go/listers/autoscaling/v2beta1 k8s.io/client-go/listers/autoscaling/v2beta2 @@ -292,25 +300,63 @@ k8s.io/client-go/listers/batch/v1 k8s.io/client-go/listers/batch/v1beta1 k8s.io/client-go/listers/batch/v2alpha1 k8s.io/client-go/listers/certificates/v1beta1 +k8s.io/client-go/listers/coordination/v1 k8s.io/client-go/listers/coordination/v1beta1 k8s.io/client-go/listers/core/v1 +k8s.io/client-go/listers/discovery/v1alpha1 +k8s.io/client-go/listers/discovery/v1beta1 k8s.io/client-go/listers/events/v1beta1 k8s.io/client-go/listers/extensions/v1beta1 +k8s.io/client-go/listers/flowcontrol/v1alpha1 k8s.io/client-go/listers/networking/v1 +k8s.io/client-go/listers/networking/v1beta1 +k8s.io/client-go/listers/node/v1alpha1 +k8s.io/client-go/listers/node/v1beta1 k8s.io/client-go/listers/policy/v1beta1 k8s.io/client-go/listers/rbac/v1 k8s.io/client-go/listers/rbac/v1alpha1 k8s.io/client-go/listers/rbac/v1beta1 +k8s.io/client-go/listers/scheduling/v1 k8s.io/client-go/listers/scheduling/v1alpha1 k8s.io/client-go/listers/scheduling/v1beta1 k8s.io/client-go/listers/settings/v1alpha1 k8s.io/client-go/listers/storage/v1 k8s.io/client-go/listers/storage/v1alpha1 k8s.io/client-go/listers/storage/v1beta1 -# k8s.io/kube-openapi v0.0.0-20190918143330-0270cf2f1c1d +k8s.io/client-go/pkg/apis/clientauthentication +k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1 +k8s.io/client-go/pkg/apis/clientauthentication/v1beta1 +k8s.io/client-go/pkg/version +k8s.io/client-go/plugin/pkg/client/auth/exec +k8s.io/client-go/rest +k8s.io/client-go/rest/watch +k8s.io/client-go/tools/cache +k8s.io/client-go/tools/clientcmd/api +k8s.io/client-go/tools/leaderelection +k8s.io/client-go/tools/leaderelection/resourcelock +k8s.io/client-go/tools/metrics +k8s.io/client-go/tools/pager +k8s.io/client-go/tools/record +k8s.io/client-go/tools/record/util +k8s.io/client-go/tools/reference +k8s.io/client-go/transport +k8s.io/client-go/util/cert +k8s.io/client-go/util/connrotation +k8s.io/client-go/util/flowcontrol +k8s.io/client-go/util/keyutil +k8s.io/client-go/util/retry +k8s.io/client-go/util/workqueue +# k8s.io/klog v1.0.0 +k8s.io/klog +# k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a k8s.io/kube-openapi/pkg/util/proto -# k8s.io/kubernetes v1.12.1 -k8s.io/kubernetes/pkg/apis/core/v1/helper -k8s.io/kubernetes/pkg/util/version -k8s.io/kubernetes/pkg/apis/core/helper -k8s.io/kubernetes/pkg/apis/core +# k8s.io/utils v0.0.0-20191114184206-e782cd3c129f +k8s.io/utils/buffer +k8s.io/utils/integer +k8s.io/utils/trace +# sigs.k8s.io/sig-storage-lib-external-provisioner v4.0.2-0.20200115000635-36885abbb2bd+incompatible +sigs.k8s.io/sig-storage-lib-external-provisioner/controller +sigs.k8s.io/sig-storage-lib-external-provisioner/controller/metrics +sigs.k8s.io/sig-storage-lib-external-provisioner/util +# sigs.k8s.io/yaml v1.1.0 +sigs.k8s.io/yaml diff --git a/vendor/github.com/kubernetes-incubator/external-storage/LICENSE b/vendor/sigs.k8s.io/sig-storage-lib-external-provisioner/LICENSE similarity index 100% rename from vendor/github.com/kubernetes-incubator/external-storage/LICENSE rename to vendor/sigs.k8s.io/sig-storage-lib-external-provisioner/LICENSE diff --git a/vendor/github.com/kubernetes-incubator/external-storage/lib/controller/controller.go b/vendor/sigs.k8s.io/sig-storage-lib-external-provisioner/controller/controller.go similarity index 65% rename from vendor/github.com/kubernetes-incubator/external-storage/lib/controller/controller.go rename to vendor/sigs.k8s.io/sig-storage-lib-external-provisioner/controller/controller.go index ac0055fa..5c0c320b 100644 --- a/vendor/github.com/kubernetes-incubator/external-storage/lib/controller/controller.go +++ b/vendor/sigs.k8s.io/sig-storage-lib-external-provisioner/controller/controller.go @@ -28,9 +28,6 @@ import ( "sync" "time" - "github.com/golang/glog" - "github.com/kubernetes-incubator/external-storage/lib/controller/metrics" - "github.com/kubernetes-incubator/external-storage/lib/util" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" "golang.org/x/time/rate" @@ -41,6 +38,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/uuid" + utilversion "k8s.io/apimachinery/pkg/util/version" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes" @@ -52,8 +50,9 @@ import ( "k8s.io/client-go/tools/record" ref "k8s.io/client-go/tools/reference" "k8s.io/client-go/util/workqueue" - "k8s.io/kubernetes/pkg/apis/core/v1/helper" - utilversion "k8s.io/kubernetes/pkg/util/version" + glog "k8s.io/klog" + "sigs.k8s.io/sig-storage-lib-external-provisioner/controller/metrics" + "sigs.k8s.io/sig-storage-lib-external-provisioner/util" ) // annClass annotation represents the storage class associated with a resource: @@ -78,6 +77,14 @@ const annStorageProvisioner = "volume.beta.kubernetes.io/storage-provisioner" // be dynamically provisioned. Its value is the name of the selected node. const annSelectedNode = "volume.kubernetes.io/selected-node" +// This annotation is present on K8s 1.11 release. +const annAlphaSelectedNode = "volume.alpha.kubernetes.io/selected-node" + +// Finalizer for PVs so we know to clean them up +const finalizerPV = "external-provisioner.volume.kubernetes.io/finalizer" + +const uidIndex = "uid" + // ProvisionController is a controller that provisions PersistentVolumes for // PersistentVolumeClaims. type ProvisionController struct { @@ -88,6 +95,10 @@ type ProvisionController struct { // annStorageProvisioner to set & watch for, respectively provisionerName string + // additional provisioner names (beyond provisionerName) that the + // provisioner should watch for and handle in annStorageProvisioner + additionalProvisionerNames []string + // The provisioner the controller will use to provision and delete volumes. // Presumably this implementer of Provisioner carries its own // volume-specific options and such that it needs in order to provision @@ -102,8 +113,8 @@ type ProvisionController struct { // * 1.6: storage classes enter GA kubeVersion *utilversion.Version - claimInformer cache.SharedInformer - claims cache.Store + claimInformer cache.SharedIndexInformer + claimsIndexer cache.Indexer volumeInformer cache.SharedInformer volumes cache.Store classInformer cache.SharedInformer @@ -125,11 +136,14 @@ type ProvisionController struct { resyncPeriod time.Duration + rateLimiter workqueue.RateLimiter exponentialBackOffOnError bool threadiness int + createProvisionedPVBackoff *wait.Backoff createProvisionedPVRetryCount int createProvisionedPVInterval time.Duration + createProvisionerPVLimiter workqueue.RateLimiter failedProvisionThreshold, failedDeleteThreshold int @@ -140,6 +154,11 @@ type ProvisionController struct { // The path of metrics endpoint path. metricsPath string + // Whether to add a finalizer marking the provisioner as the owner of the PV + // with clean up duty. + // TODO: upstream and we may have a race b/w applying reclaim policy and not if pv has protection finalizer + addFinalizer bool + // Whether to do kubernetes leader election at all. It should basically // always be done when possible to avoid duplicate Provision attempts. leaderElection bool @@ -149,6 +168,11 @@ type ProvisionController struct { hasRun bool hasRunLock *sync.Mutex + + // Map UID -> *PVC with all claims that may be provisioned in the background. + claimsInProgress sync.Map + + volumeStore VolumeStore } const ( @@ -180,6 +204,8 @@ const ( DefaultMetricsAddress = "0.0.0.0" // DefaultMetricsPath is used when option function MetricsPath is omitted DefaultMetricsPath = "/metrics" + // DefaultAddFinalizer is used when option function AddFinalizer is omitted + DefaultAddFinalizer = false ) var errRuntime = fmt.Errorf("cannot call option functions after controller has Run") @@ -210,6 +236,18 @@ func Threadiness(threadiness int) func(*ProvisionController) error { } } +// RateLimiter is the workqueue.RateLimiter to use for the provisioning and +// deleting work queues. If set, ExponentialBackOffOnError is ignored. +func RateLimiter(rateLimiter workqueue.RateLimiter) func(*ProvisionController) error { + return func(c *ProvisionController) error { + if c.HasRun() { + return errRuntime + } + c.rateLimiter = rateLimiter + return nil + } +} + // ExponentialBackOffOnError determines whether to exponentially back off from // failures of Provision and Delete. Defaults to true. func ExponentialBackOffOnError(exponentialBackOffOnError bool) func(*ProvisionController) error { @@ -224,11 +262,21 @@ func ExponentialBackOffOnError(exponentialBackOffOnError bool) func(*ProvisionCo // CreateProvisionedPVRetryCount is the number of retries when we create a PV // object for a provisioned volume. Defaults to 5. +// If PV is not saved after given number of retries, corresponding storage asset (volume) is deleted! +// Only one of CreateProvisionedPVInterval+CreateProvisionedPVRetryCount or CreateProvisionedPVBackoff or +// CreateProvisionedPVLimiter can be used. +// Deprecated: Use CreateProvisionedPVLimiter instead, it tries indefinitely. func CreateProvisionedPVRetryCount(createProvisionedPVRetryCount int) func(*ProvisionController) error { return func(c *ProvisionController) error { if c.HasRun() { return errRuntime } + if c.createProvisionedPVBackoff != nil { + return fmt.Errorf("CreateProvisionedPVBackoff cannot be used together with CreateProvisionedPVRetryCount") + } + if c.createProvisionerPVLimiter != nil { + return fmt.Errorf("CreateProvisionedPVBackoff cannot be used together with CreateProvisionedPVLimiter") + } c.createProvisionedPVRetryCount = createProvisionedPVRetryCount return nil } @@ -236,18 +284,79 @@ func CreateProvisionedPVRetryCount(createProvisionedPVRetryCount int) func(*Prov // CreateProvisionedPVInterval is the interval between retries when we create a // PV object for a provisioned volume. Defaults to 10 seconds. +// If PV is not saved after given number of retries, corresponding storage asset (volume) is deleted! +// Only one of CreateProvisionedPVInterval+CreateProvisionedPVRetryCount or CreateProvisionedPVBackoff or +// CreateProvisionedPVLimiter can be used. +// Deprecated: Use CreateProvisionedPVLimiter instead, it tries indefinitely. func CreateProvisionedPVInterval(createProvisionedPVInterval time.Duration) func(*ProvisionController) error { return func(c *ProvisionController) error { if c.HasRun() { return errRuntime } + if c.createProvisionedPVBackoff != nil { + return fmt.Errorf("CreateProvisionedPVBackoff cannot be used together with CreateProvisionedPVInterval") + } + if c.createProvisionerPVLimiter != nil { + return fmt.Errorf("CreateProvisionedPVInterval cannot be used together with CreateProvisionedPVLimiter") + } c.createProvisionedPVInterval = createProvisionedPVInterval return nil } } +// CreateProvisionedPVBackoff is the configuration of exponential backoff between retries when we create a +// PV object for a provisioned volume. Defaults to linear backoff, 10 seconds 5 times. +// If PV is not saved after given number of retries, corresponding storage asset (volume) is deleted! +// Only one of CreateProvisionedPVInterval+CreateProvisionedPVRetryCount or CreateProvisionedPVBackoff or +// CreateProvisionedPVLimiter can be used. +// Deprecated: Use CreateProvisionedPVLimiter instead, it tries indefinitely. +func CreateProvisionedPVBackoff(backoff wait.Backoff) func(*ProvisionController) error { + return func(c *ProvisionController) error { + if c.HasRun() { + return errRuntime + } + if c.createProvisionedPVRetryCount != 0 { + return fmt.Errorf("CreateProvisionedPVBackoff cannot be used together with CreateProvisionedPVRetryCount") + } + if c.createProvisionedPVInterval != 0 { + return fmt.Errorf("CreateProvisionedPVBackoff cannot be used together with CreateProvisionedPVInterval") + } + if c.createProvisionerPVLimiter != nil { + return fmt.Errorf("CreateProvisionedPVBackoff cannot be used together with CreateProvisionedPVLimiter") + } + c.createProvisionedPVBackoff = &backoff + return nil + } +} + +// CreateProvisionedPVLimiter is the configuration of rate limiter for queue of unsaved PersistentVolumes. +// If set, PVs that fail to be saved to Kubernetes API server will be re-enqueued to a separate workqueue +// with this limiter and re-tried until they are saved to API server. There is no limit of retries. +// The main difference to other CreateProvisionedPV* option is that the storage asset is never deleted +// and the controller continues saving PV to API server indefinitely. +// This option cannot be used with CreateProvisionedPVBackoff or CreateProvisionedPVInterval +// or CreateProvisionedPVRetryCount. +func CreateProvisionedPVLimiter(limiter workqueue.RateLimiter) func(*ProvisionController) error { + return func(c *ProvisionController) error { + if c.HasRun() { + return errRuntime + } + if c.createProvisionedPVRetryCount != 0 { + return fmt.Errorf("CreateProvisionedPVLimiter cannot be used together with CreateProvisionedPVRetryCount") + } + if c.createProvisionedPVInterval != 0 { + return fmt.Errorf("CreateProvisionedPVLimiter cannot be used together with CreateProvisionedPVInterval") + } + if c.createProvisionedPVBackoff != nil { + return fmt.Errorf("CreateProvisionedPVLimiter cannot be used together with CreateProvisionedPVBackoff") + } + c.createProvisionerPVLimiter = limiter + return nil + } +} + // FailedProvisionThreshold is the threshold for max number of retries on -// failures of Provision. Defaults to 15. +// failures of Provision. Set to 0 to retry indefinitely. Defaults to 15. func FailedProvisionThreshold(failedProvisionThreshold int) func(*ProvisionController) error { return func(c *ProvisionController) error { if c.HasRun() { @@ -259,7 +368,7 @@ func FailedProvisionThreshold(failedProvisionThreshold int) func(*ProvisionContr } // FailedDeleteThreshold is the threshold for max number of retries on failures -// of Delete. Defaults to 15. +// of Delete. Set to 0 to retry indefinitely. Defaults to 15. func FailedDeleteThreshold(failedDeleteThreshold int) func(*ProvisionController) error { return func(c *ProvisionController) error { if c.HasRun() { @@ -334,7 +443,7 @@ func RetryPeriod(retryPeriod time.Duration) func(*ProvisionController) error { // ClaimsInformer sets the informer to use for accessing PersistentVolumeClaims. // Defaults to using a internal informer. -func ClaimsInformer(informer cache.SharedInformer) func(*ProvisionController) error { +func ClaimsInformer(informer cache.SharedIndexInformer) func(*ProvisionController) error { return func(c *ProvisionController) error { if c.HasRun() { return errRuntime @@ -406,6 +515,30 @@ func MetricsPath(metricsPath string) func(*ProvisionController) error { } } +// AdditionalProvisionerNames sets additional names for the provisioner +func AdditionalProvisionerNames(additionalProvisionerNames []string) func(*ProvisionController) error { + return func(c *ProvisionController) error { + if c.HasRun() { + return errRuntime + } + c.additionalProvisionerNames = additionalProvisionerNames + return nil + } +} + +// AddFinalizer determines whether to add a finalizer marking the provisioner +// as the owner of the PV with clean up duty. A PV having the finalizer means +// the provisioner wants to keep it around so that it can reclaim it. +func AddFinalizer(addFinalizer bool) func(*ProvisionController) error { + return func(c *ProvisionController) error { + if c.HasRun() { + return errRuntime + } + c.addFinalizer = addFinalizer + return nil + } +} + // HasRun returns whether the controller has Run func (ctrl *ProvisionController) HasRun() bool { ctrl.hasRunLock.Lock() @@ -437,48 +570,55 @@ func NewProvisionController( eventRecorder := broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: component}) controller := &ProvisionController{ - client: client, - provisionerName: provisionerName, - provisioner: provisioner, - kubeVersion: utilversion.MustParseSemantic(kubeVersion), - id: id, - component: component, - eventRecorder: eventRecorder, - resyncPeriod: DefaultResyncPeriod, - exponentialBackOffOnError: DefaultExponentialBackOffOnError, - threadiness: DefaultThreadiness, - createProvisionedPVRetryCount: DefaultCreateProvisionedPVRetryCount, - createProvisionedPVInterval: DefaultCreateProvisionedPVInterval, - failedProvisionThreshold: DefaultFailedProvisionThreshold, - failedDeleteThreshold: DefaultFailedDeleteThreshold, - leaderElection: DefaultLeaderElection, - leaderElectionNamespace: getInClusterNamespace(), - leaseDuration: DefaultLeaseDuration, - renewDeadline: DefaultRenewDeadline, - retryPeriod: DefaultRetryPeriod, - metricsPort: DefaultMetricsPort, - metricsAddress: DefaultMetricsAddress, - metricsPath: DefaultMetricsPath, - hasRun: false, - hasRunLock: &sync.Mutex{}, + client: client, + provisionerName: provisionerName, + provisioner: provisioner, + kubeVersion: utilversion.MustParseSemantic(kubeVersion), + id: id, + component: component, + eventRecorder: eventRecorder, + resyncPeriod: DefaultResyncPeriod, + exponentialBackOffOnError: DefaultExponentialBackOffOnError, + threadiness: DefaultThreadiness, + failedProvisionThreshold: DefaultFailedProvisionThreshold, + failedDeleteThreshold: DefaultFailedDeleteThreshold, + leaderElection: DefaultLeaderElection, + leaderElectionNamespace: getInClusterNamespace(), + leaseDuration: DefaultLeaseDuration, + renewDeadline: DefaultRenewDeadline, + retryPeriod: DefaultRetryPeriod, + metricsPort: DefaultMetricsPort, + metricsAddress: DefaultMetricsAddress, + metricsPath: DefaultMetricsPath, + addFinalizer: DefaultAddFinalizer, + hasRun: false, + hasRunLock: &sync.Mutex{}, } for _, option := range options { - option(controller) + err := option(controller) + if err != nil { + glog.Fatalf("Error processing controller options: %s", err) + } } - ratelimiter := workqueue.NewMaxOfRateLimiter( - workqueue.NewItemExponentialFailureRateLimiter(15*time.Second, 1000*time.Second), - &workqueue.BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(10), 100)}, - ) - if !controller.exponentialBackOffOnError { - ratelimiter = workqueue.NewMaxOfRateLimiter( + var rateLimiter workqueue.RateLimiter + if controller.rateLimiter != nil { + // rateLimiter set via parameter takes precedence + rateLimiter = controller.rateLimiter + } else if controller.exponentialBackOffOnError { + rateLimiter = workqueue.NewMaxOfRateLimiter( + workqueue.NewItemExponentialFailureRateLimiter(15*time.Second, 1000*time.Second), + &workqueue.BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(10), 100)}, + ) + } else { + rateLimiter = workqueue.NewMaxOfRateLimiter( workqueue.NewItemExponentialFailureRateLimiter(15*time.Second, 15*time.Second), &workqueue.BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(10), 100)}, ) } - controller.claimQueue = workqueue.NewNamedRateLimitingQueue(ratelimiter, "claims") - controller.volumeQueue = workqueue.NewNamedRateLimitingQueue(ratelimiter, "volumes") + controller.claimQueue = workqueue.NewNamedRateLimitingQueue(rateLimiter, "claims") + controller.volumeQueue = workqueue.NewNamedRateLimitingQueue(rateLimiter, "volumes") informer := informers.NewSharedInformerFactory(client, controller.resyncPeriod) @@ -486,9 +626,12 @@ func NewProvisionController( // PersistentVolumeClaims claimHandler := cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { controller.enqueueWork(controller.claimQueue, obj) }, - UpdateFunc: func(oldObj, newObj interface{}) { controller.enqueueWork(controller.claimQueue, newObj) }, - DeleteFunc: func(obj interface{}) { controller.forgetWork(controller.claimQueue, obj) }, + AddFunc: func(obj interface{}) { controller.enqueueClaim(obj) }, + UpdateFunc: func(oldObj, newObj interface{}) { controller.enqueueClaim(newObj) }, + DeleteFunc: func(obj interface{}) { + // NOOP. The claim is either in claimsInProgress and in the queue, so it will be processed as usual + // or it's not in claimsInProgress and then we don't care + }, } if controller.claimInformer != nil { @@ -497,15 +640,22 @@ func NewProvisionController( controller.claimInformer = informer.Core().V1().PersistentVolumeClaims().Informer() controller.claimInformer.AddEventHandler(claimHandler) } - controller.claims = controller.claimInformer.GetStore() + controller.claimInformer.AddIndexers(cache.Indexers{uidIndex: func(obj interface{}) ([]string, error) { + uid, err := getObjectUID(obj) + if err != nil { + return nil, err + } + return []string{uid}, nil + }}) + controller.claimsIndexer = controller.claimInformer.GetIndexer() // ----------------- // PersistentVolumes volumeHandler := cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { controller.enqueueWork(controller.volumeQueue, obj) }, - UpdateFunc: func(oldObj, newObj interface{}) { controller.enqueueWork(controller.volumeQueue, newObj) }, - DeleteFunc: func(obj interface{}) { controller.forgetWork(controller.volumeQueue, obj) }, + AddFunc: func(obj interface{}) { controller.enqueueVolume(obj) }, + UpdateFunc: func(oldObj, newObj interface{}) { controller.enqueueVolume(newObj) }, + DeleteFunc: func(obj interface{}) { controller.forgetVolume(obj) }, } if controller.volumeInformer != nil { @@ -529,12 +679,63 @@ func NewProvisionController( } controller.classes = controller.classInformer.GetStore() + if controller.createProvisionerPVLimiter != nil { + glog.V(2).Infof("Using saving PVs to API server in background") + controller.volumeStore = NewVolumeStoreQueue(client, controller.createProvisionerPVLimiter, controller.claimsIndexer, controller.eventRecorder) + } else { + if controller.createProvisionedPVBackoff == nil { + // Use linear backoff with createProvisionedPVInterval and createProvisionedPVRetryCount by default. + if controller.createProvisionedPVInterval == 0 { + controller.createProvisionedPVInterval = DefaultCreateProvisionedPVInterval + } + if controller.createProvisionedPVRetryCount == 0 { + controller.createProvisionedPVRetryCount = DefaultCreateProvisionedPVRetryCount + } + controller.createProvisionedPVBackoff = &wait.Backoff{ + Duration: controller.createProvisionedPVInterval, + Factor: 1, // linear backoff + Steps: controller.createProvisionedPVRetryCount, + //Cap: controller.createProvisionedPVInterval, + } + } + glog.V(2).Infof("Using blocking saving PVs to API server") + controller.volumeStore = NewBackoffStore(client, controller.eventRecorder, controller.createProvisionedPVBackoff, controller) + } + return controller } -// enqueueWork takes an obj and converts it into a namespace/name string which +func getObjectUID(obj interface{}) (string, error) { + var object metav1.Object + var ok bool + if object, ok = obj.(metav1.Object); !ok { + tombstone, ok := obj.(cache.DeletedFinalStateUnknown) + if !ok { + return "", fmt.Errorf("error decoding object, invalid type") + } + object, ok = tombstone.Obj.(metav1.Object) + if !ok { + return "", fmt.Errorf("error decoding object tombstone, invalid type") + } + } + return string(object.GetUID()), nil +} + +// enqueueClaim takes an obj and converts it into UID that is then put onto claim work queue. +func (ctrl *ProvisionController) enqueueClaim(obj interface{}) { + uid, err := getObjectUID(obj) + if err != nil { + utilruntime.HandleError(err) + return + } + if ctrl.claimQueue.NumRequeues(uid) == 0 { + ctrl.claimQueue.Add(uid) + } +} + +// enqueueVolume takes an obj and converts it into a namespace/name string which // is then put onto the given work queue. -func (ctrl *ProvisionController) enqueueWork(queue workqueue.RateLimitingInterface, obj interface{}) { +func (ctrl *ProvisionController) enqueueVolume(obj interface{}) { var key string var err error if key, err = cache.DeletionHandlingMetaNamespaceKeyFunc(obj); err != nil { @@ -543,22 +744,22 @@ func (ctrl *ProvisionController) enqueueWork(queue workqueue.RateLimitingInterfa } // Re-Adding is harmless but try to add it to the queue only if it is not // already there, because if it is already there we *must* be retrying it - if queue.NumRequeues(key) == 0 { - queue.Add(key) + if ctrl.volumeQueue.NumRequeues(key) == 0 { + ctrl.volumeQueue.Add(key) } } -// forgetWork Forgets an obj from the given work queue, telling the queue to +// forgetVolume Forgets an obj from the given work queue, telling the queue to // stop tracking its retries because e.g. the obj was deleted -func (ctrl *ProvisionController) forgetWork(queue workqueue.RateLimitingInterface, obj interface{}) { +func (ctrl *ProvisionController) forgetVolume(obj interface{}) { var key string var err error if key, err = cache.DeletionHandlingMetaNamespaceKeyFunc(obj); err != nil { utilruntime.HandleError(err) return } - queue.Forget(key) - queue.Done(key) + ctrl.volumeQueue.Forget(key) + ctrl.volumeQueue.Done(key) } // Run starts all of this controller's control loops @@ -623,11 +824,14 @@ func (ctrl *ProvisionController) Run(_ <-chan struct{}) { select {} } + go ctrl.volumeStore.Run(context.TODO(), DefaultThreadiness) + if ctrl.leaderElection { rl, err := resourcelock.New("endpoints", ctrl.leaderElectionNamespace, strings.Replace(ctrl.provisionerName, "/", "-", -1), ctrl.client.CoreV1(), + nil, resourcelock.ResourceLockConfig{ Identity: ctrl.id, EventRecorder: ctrl.eventRecorder, @@ -682,11 +886,16 @@ func (ctrl *ProvisionController) processNextClaimWorkItem() bool { } if err := ctrl.syncClaimHandler(key); err != nil { - if ctrl.claimQueue.NumRequeues(obj) < ctrl.failedProvisionThreshold { + if ctrl.failedProvisionThreshold == 0 { + glog.Warningf("Retrying syncing claim %q, failure %v", key, ctrl.claimQueue.NumRequeues(obj)) + ctrl.claimQueue.AddRateLimited(obj) + } else if ctrl.claimQueue.NumRequeues(obj) < ctrl.failedProvisionThreshold { glog.Warningf("Retrying syncing claim %q because failures %v < threshold %v", key, ctrl.claimQueue.NumRequeues(obj), ctrl.failedProvisionThreshold) ctrl.claimQueue.AddRateLimited(obj) } else { glog.Errorf("Giving up syncing claim %q because failures %v >= threshold %v", key, ctrl.claimQueue.NumRequeues(obj), ctrl.failedProvisionThreshold) + glog.V(2).Infof("Removing PVC %s from claims in progress", key) + ctrl.claimsInProgress.Delete(key) // This can leak a volume that's being provisioned in the background! // Done but do not Forget: it will not be in the queue but NumRequeues // will be saved until the obj is deleted from kubernetes } @@ -694,6 +903,9 @@ func (ctrl *ProvisionController) processNextClaimWorkItem() bool { } ctrl.claimQueue.Forget(obj) + // Silently remove the PVC from list of volumes in progress. The provisioning either succeeded + // or the PVC was ignored by this provisioner. + ctrl.claimsInProgress.Delete(key) return nil }(obj) @@ -723,11 +935,14 @@ func (ctrl *ProvisionController) processNextVolumeWorkItem() bool { } if err := ctrl.syncVolumeHandler(key); err != nil { - if ctrl.volumeQueue.NumRequeues(obj) < ctrl.failedDeleteThreshold { - glog.Warningf("Retrying syncing volume %q because failures %v < threshold %v", key, ctrl.volumeQueue.NumRequeues(obj), ctrl.failedProvisionThreshold) + if ctrl.failedDeleteThreshold == 0 { + glog.Warningf("Retrying syncing volume %q, failure %v", key, ctrl.volumeQueue.NumRequeues(obj)) + ctrl.volumeQueue.AddRateLimited(obj) + } else if ctrl.volumeQueue.NumRequeues(obj) < ctrl.failedDeleteThreshold { + glog.Warningf("Retrying syncing volume %q because failures %v < threshold %v", key, ctrl.volumeQueue.NumRequeues(obj), ctrl.failedDeleteThreshold) ctrl.volumeQueue.AddRateLimited(obj) } else { - glog.Errorf("Giving up syncing volume %q because failures %v >= threshold %v", key, ctrl.volumeQueue.NumRequeues(obj), ctrl.failedProvisionThreshold) + glog.Errorf("Giving up syncing volume %q because failures %v >= threshold %v", key, ctrl.volumeQueue.NumRequeues(obj), ctrl.failedDeleteThreshold) // Done but do not Forget: it will not be in the queue but NumRequeues // will be saved until the obj is deleted from kubernetes } @@ -748,15 +963,21 @@ func (ctrl *ProvisionController) processNextVolumeWorkItem() bool { // syncClaimHandler gets the claim from informer's cache then calls syncClaim func (ctrl *ProvisionController) syncClaimHandler(key string) error { - claimObj, exists, err := ctrl.claims.GetByKey(key) + objs, err := ctrl.claimsIndexer.ByIndex(uidIndex, key) if err != nil { return err } - if !exists { - utilruntime.HandleError(fmt.Errorf("claim %q in work queue no longer exists", key)) - return nil + var claimObj interface{} + if len(objs) > 0 { + claimObj = objs[0] + } else { + obj, found := ctrl.claimsInProgress.Load(key) + if !found { + utilruntime.HandleError(fmt.Errorf("claim %q in work queue no longer exists", key)) + return nil + } + claimObj = obj } - return ctrl.syncClaim(claimObj) } @@ -782,10 +1003,34 @@ func (ctrl *ProvisionController) syncClaim(obj interface{}) error { return fmt.Errorf("expected claim but got %+v", obj) } - if ctrl.shouldProvision(claim) { + should, err := ctrl.shouldProvision(claim) + if err != nil { + return err + } else if should { startTime := time.Now() - err := ctrl.provisionClaimOperation(claim) + + status, err := ctrl.provisionClaimOperation(claim) ctrl.updateProvisionStats(claim, err, startTime) + if err == nil || status == ProvisioningFinished { + // Provisioning is 100% finished / not in progress. + if err == nil { + glog.V(5).Infof("Claim processing succeeded, removing PVC %s from claims in progress", claim.UID) + } else { + glog.V(2).Infof("Final error received, removing PVC %s from claims in progress", claim.UID) + } + ctrl.claimsInProgress.Delete(string(claim.UID)) + return err + } + if status == ProvisioningInBackground { + // Provisioning is in progress in background. + glog.V(2).Infof("Temporary error received, adding PVC %s to claims in progress", claim.UID) + ctrl.claimsInProgress.Store(string(claim.UID), claim) + } else { + // status == ProvisioningNoChange. + // Don't change claimsInProgress: + // - the claim is already there if previous status was ProvisioningInBackground. + // - the claim is not there if if previous status was ProvisioningFinished. + } return err } return nil @@ -807,52 +1052,73 @@ func (ctrl *ProvisionController) syncVolume(obj interface{}) error { return nil } +// knownProvisioner checks if provisioner name has been +// configured to provision volumes for +func (ctrl *ProvisionController) knownProvisioner(provisioner string) bool { + if provisioner == ctrl.provisionerName { + return true + } + for _, p := range ctrl.additionalProvisionerNames { + if p == provisioner { + return true + } + } + return false +} + // shouldProvision returns whether a claim should have a volume provisioned for // it, i.e. whether a Provision is "desired" -func (ctrl *ProvisionController) shouldProvision(claim *v1.PersistentVolumeClaim) bool { +func (ctrl *ProvisionController) shouldProvision(claim *v1.PersistentVolumeClaim) (bool, error) { if claim.Spec.VolumeName != "" { - return false + return false, nil } if qualifier, ok := ctrl.provisioner.(Qualifier); ok { if !qualifier.ShouldProvision(claim) { - return false + return false, nil } } // Kubernetes 1.5 provisioning with annStorageProvisioner if ctrl.kubeVersion.AtLeast(utilversion.MustParseSemantic("v1.5.0")) { if provisioner, found := claim.Annotations[annStorageProvisioner]; found { - if provisioner == ctrl.provisionerName { - return true + if ctrl.knownProvisioner(provisioner) { + return true, nil } - return false } } else { // Kubernetes 1.4 provisioning, evaluating class.Provisioner - claimClass := helper.GetPersistentVolumeClaimClass(claim) - provisioner, _, err := ctrl.getStorageClassFields(claimClass) + claimClass := util.GetPersistentVolumeClaimClass(claim) + class, err := ctrl.getStorageClass(claimClass) if err != nil { glog.Errorf("Error getting claim %q's StorageClass's fields: %v", claimToClaimKey(claim), err) - return false + return false, err } - if provisioner != ctrl.provisionerName { - return false + if class.Provisioner != ctrl.provisionerName { + return false, nil } - return true + return true, nil } - return false + return false, nil } // shouldDelete returns whether a volume should have its backing volume // deleted, i.e. whether a Delete is "desired" func (ctrl *ProvisionController) shouldDelete(volume *v1.PersistentVolume) bool { + if deletionGuard, ok := ctrl.provisioner.(DeletionGuard); ok { + if !deletionGuard.ShouldDelete(volume) { + return false + } + } + // In 1.9+ PV protection means the object will exist briefly with a // deletion timestamp even after our successful Delete. Ignore it. if ctrl.kubeVersion.AtLeast(utilversion.MustParseSemantic("v1.9.0")) { - if volume.ObjectMeta.DeletionTimestamp != nil { + if ctrl.addFinalizer && !ctrl.checkFinalizer(volume, finalizerPV) && volume.ObjectMeta.DeletionTimestamp != nil { + return false + } else if volume.ObjectMeta.DeletionTimestamp != nil { return false } } @@ -894,6 +1160,15 @@ func (ctrl *ProvisionController) canProvision(claim *v1.PersistentVolumeClaim) e return nil } +func (ctrl *ProvisionController) checkFinalizer(volume *v1.PersistentVolume, finalizer string) bool { + for _, f := range volume.ObjectMeta.Finalizers { + if f == finalizer { + return true + } + } + return false +} + func (ctrl *ProvisionController) updateProvisionStats(claim *v1.PersistentVolumeClaim, err error, startTime time.Time) { class := "" if claim.Spec.StorageClassName != nil { @@ -920,9 +1195,9 @@ func (ctrl *ProvisionController) updateDeleteStats(volume *v1.PersistentVolume, // provisionClaimOperation attempts to provision a volume for the given claim. // Returns error, which indicates whether provisioning should be retried // (requeue the claim) or not -func (ctrl *ProvisionController) provisionClaimOperation(claim *v1.PersistentVolumeClaim) error { +func (ctrl *ProvisionController) provisionClaimOperation(claim *v1.PersistentVolumeClaim) (ProvisioningState, error) { // Most code here is identical to that found in controller.go of kube's PV controller... - claimClass := helper.GetPersistentVolumeClaimClass(claim) + claimClass := util.GetPersistentVolumeClaimClass(claim) operation := fmt.Sprintf("provision %q class %q", claimToClaimKey(claim), claimClass) glog.Info(logOperation(operation, "started")) @@ -934,7 +1209,7 @@ func (ctrl *ProvisionController) provisionClaimOperation(claim *v1.PersistentVol if err == nil && volume != nil { // Volume has been already provisioned, nothing to do. glog.Info(logOperation(operation, "persistentvolume %q already exists, skipping", pvName)) - return nil + return ProvisioningFinished, nil } // Prepare a claimRef to the claim early (to fail before a volume is @@ -942,86 +1217,68 @@ func (ctrl *ProvisionController) provisionClaimOperation(claim *v1.PersistentVol claimRef, err := ref.GetReference(scheme.Scheme, claim) if err != nil { glog.Error(logOperation(operation, "unexpected error getting claim reference: %v", err)) - return nil - } - - provisioner, parameters, err := ctrl.getStorageClassFields(claimClass) - if err != nil { - glog.Error(logOperation(operation, "error getting claim's StorageClass's fields: %v", err)) - return nil - } - if provisioner != ctrl.provisionerName { - // class.Provisioner has either changed since shouldProvision() or - // annDynamicallyProvisioned contains different provisioner than - // class.Provisioner. - glog.Error(logOperation(operation, "unknown provisioner %q requested in claim's StorageClass", provisioner)) - return nil + return ProvisioningNoChange, nil } // Check if this provisioner can provision this claim. if err = ctrl.canProvision(claim); err != nil { ctrl.eventRecorder.Event(claim, v1.EventTypeWarning, "ProvisioningFailed", err.Error()) glog.Error(logOperation(operation, "failed to provision volume: %v", err)) - return nil + return ProvisioningFinished, nil } - reclaimPolicy := v1.PersistentVolumeReclaimDelete - if ctrl.kubeVersion.AtLeast(utilversion.MustParseSemantic("v1.8.0")) { - reclaimPolicy, err = ctrl.fetchReclaimPolicy(claimClass) - if err != nil { - return err - } - } - - mountOptions, err := ctrl.fetchMountOptions(claimClass) + // For any issues getting fields from StorageClass (including reclaimPolicy & mountOptions), + // retry the claim because the storageClass can be fixed/(re)created independently of the claim + class, err := ctrl.getStorageClass(claimClass) if err != nil { - return err + glog.Error(logOperation(operation, "error getting claim's StorageClass's fields: %v", err)) + return ProvisioningFinished, err + } + if !ctrl.knownProvisioner(class.Provisioner) { + // class.Provisioner has either changed since shouldProvision() or + // annDynamicallyProvisioned contains different provisioner than + // class.Provisioner. + glog.Error(logOperation(operation, "unknown provisioner %q requested in claim's StorageClass", class.Provisioner)) + return ProvisioningFinished, nil } var selectedNode *v1.Node - var allowedTopologies []v1.TopologySelectorTerm if ctrl.kubeVersion.AtLeast(utilversion.MustParseSemantic("v1.11.0")) { // Get SelectedNode - if nodeName, ok := claim.Annotations[annSelectedNode]; ok { + if nodeName, ok := getString(claim.Annotations, annSelectedNode, annAlphaSelectedNode); ok { selectedNode, err = ctrl.client.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) // TODO (verult) cache Nodes if err != nil { err = fmt.Errorf("failed to get target node: %v", err) ctrl.eventRecorder.Event(claim, v1.EventTypeWarning, "ProvisioningFailed", err.Error()) - return err + return ProvisioningNoChange, err } } - - // Get AllowedTopologies - allowedTopologies, err = ctrl.fetchAllowedTopologies(claimClass) - if err != nil { - err = fmt.Errorf("failed to get AllowedTopologies from StorageClass: %v", err) - ctrl.eventRecorder.Event(claim, v1.EventTypeWarning, "ProvisioningFailed", err.Error()) - return err - } } - options := VolumeOptions{ - PersistentVolumeReclaimPolicy: reclaimPolicy, - PVName: pvName, - PVC: claim, - MountOptions: mountOptions, - Parameters: parameters, - SelectedNode: selectedNode, - AllowedTopologies: allowedTopologies, + options := ProvisionOptions{ + StorageClass: class, + PVName: pvName, + PVC: claim, + SelectedNode: selectedNode, } ctrl.eventRecorder.Event(claim, v1.EventTypeNormal, "Provisioning", fmt.Sprintf("External provisioner is provisioning volume for claim %q", claimToClaimKey(claim))) - volume, err = ctrl.provisioner.Provision(options) + result := ProvisioningFinished + if p, ok := ctrl.provisioner.(ProvisionerExt); ok { + volume, result, err = p.ProvisionExt(options) + } else { + volume, err = ctrl.provisioner.Provision(options) + } if err != nil { if ierr, ok := err.(*IgnoredError); ok { // Provision ignored, do nothing and hope another provisioner will provision it. glog.Info(logOperation(operation, "volume provision ignored: %v", ierr)) - return nil + return ProvisioningFinished, nil } err = fmt.Errorf("failed to provision volume with StorageClass %q: %v", claimClass, err) ctrl.eventRecorder.Event(claim, v1.EventTypeWarning, "ProvisioningFailed", err.Error()) - return err + return result, err } glog.Info(logOperation(operation, "volume %q provisioned", volume.Name)) @@ -1029,6 +1286,11 @@ func (ctrl *ProvisionController) provisionClaimOperation(claim *v1.PersistentVol // Set ClaimRef and the PV controller will bind and set annBoundByController for us volume.Spec.ClaimRef = claimRef + // Add external provisioner finalizer if it doesn't already have it + if ctrl.addFinalizer && !ctrl.checkFinalizer(volume, finalizerPV) { + volume.ObjectMeta.Finalizers = append(volume.ObjectMeta.Finalizers, finalizerPV) + } + metav1.SetMetaDataAnnotation(&volume.ObjectMeta, annDynamicallyProvisioned, ctrl.provisionerName) if ctrl.kubeVersion.AtLeast(utilversion.MustParseSemantic("v1.6.0")) { volume.Spec.StorageClassName = claimClass @@ -1036,58 +1298,12 @@ func (ctrl *ProvisionController) provisionClaimOperation(claim *v1.PersistentVol metav1.SetMetaDataAnnotation(&volume.ObjectMeta, annClass, claimClass) } - // Try to create the PV object several times - for i := 0; i < ctrl.createProvisionedPVRetryCount; i++ { - glog.Info(logOperation(operation, "trying to save persistentvolume %q", volume.Name)) - if _, err = ctrl.client.CoreV1().PersistentVolumes().Create(volume); err == nil || apierrs.IsAlreadyExists(err) { - // Save succeeded. - if err != nil { - glog.Info(logOperation(operation, "persistentvolume %q already exists, reusing", volume.Name)) - err = nil - } else { - glog.Info(logOperation(operation, "persistentvolume %q saved", volume.Name)) - } - break - } - // Save failed, try again after a while. - glog.Info(logOperation(operation, "failed to save persistentvolume %q: %v", volume.Name, err)) - time.Sleep(ctrl.createProvisionedPVInterval) - } - - if err != nil { - // Save failed. Now we have a storage asset outside of Kubernetes, - // but we don't have appropriate PV object for it. - // Emit some event here and try to delete the storage asset several - // times. - strerr := fmt.Sprintf("Error creating provisioned PV object for claim %s: %v. Deleting the volume.", claimToClaimKey(claim), err) - glog.Error(logOperation(operation, strerr)) - ctrl.eventRecorder.Event(claim, v1.EventTypeWarning, "ProvisioningFailed", strerr) - - for i := 0; i < ctrl.createProvisionedPVRetryCount; i++ { - if err = ctrl.provisioner.Delete(volume); err == nil { - // Delete succeeded - glog.Info(logOperation(operation, "cleaning volume %q succeeded", volume.Name)) - break - } - // Delete failed, try again after a while. - glog.Info(logOperation(operation, "failed to clean volume %q: %v", volume.Name, err)) - time.Sleep(ctrl.createProvisionedPVInterval) - } + glog.Info(logOperation(operation, "succeeded")) - if err != nil { - // Delete failed several times. There is an orphaned volume and there - // is nothing we can do about it. - strerr := fmt.Sprintf("Error cleaning provisioned volume for claim %s: %v. Please delete manually.", claimToClaimKey(claim), err) - glog.Error(logOperation(operation, strerr)) - ctrl.eventRecorder.Event(claim, v1.EventTypeWarning, "ProvisioningCleanupFailed", strerr) - } - } else { - msg := fmt.Sprintf("Successfully provisioned volume %s", volume.Name) - ctrl.eventRecorder.Event(claim, v1.EventTypeNormal, "ProvisioningSucceeded", msg) + if err := ctrl.volumeStore.StoreVolume(claim, volume); err != nil { + return ProvisioningFinished, err } - - glog.Info(logOperation(operation, "succeeded")) - return nil + return ProvisioningFinished, nil } // deleteVolumeOperation attempts to delete the volume backing the given @@ -1133,6 +1349,42 @@ func (ctrl *ProvisionController) deleteVolumeOperation(volume *v1.PersistentVolu return err } + if ctrl.addFinalizer { + if len(newVolume.ObjectMeta.Finalizers) > 0 { + // Remove external-provisioner finalizer + + // need to get the pv again because the delete has updated the object with a deletion timestamp + newVolume, err := ctrl.client.CoreV1().PersistentVolumes().Get(volume.Name, metav1.GetOptions{}) + if err != nil { + // If the volume is not found return, otherwise error + if !apierrs.IsNotFound(err) { + glog.Info(logOperation(operation, "failed to get persistentvolume to update finalizer: %v", err)) + return err + } + return nil + } + finalizers := make([]string, 0) + for _, finalizer := range newVolume.ObjectMeta.Finalizers { + if finalizer != finalizerPV { + finalizers = append(finalizers, finalizer) + } + } + + // Only update the finalizers if we actually removed something + if len(finalizers) != len(newVolume.ObjectMeta.Finalizers) { + newVolume.ObjectMeta.Finalizers = finalizers + if _, err = ctrl.client.CoreV1().PersistentVolumes().Update(newVolume); err != nil { + if !apierrs.IsNotFound(err) { + // Couldn't remove finalizer and the object still exists, the controller may + // try to remove the finalizer again on the next update + glog.Info(logOperation(operation, "failed to remove finalizer for persistentvolume: %v", err)) + return err + } + } + } + } + } + glog.Info(logOperation(operation, "persistentvolume deleted")) glog.Info(logOperation(operation, "succeeded")) @@ -1165,86 +1417,36 @@ func (ctrl *ProvisionController) getProvisionedVolumeNameForClaim(claim *v1.Pers return "pvc-" + string(claim.UID) } -func (ctrl *ProvisionController) getStorageClassFields(name string) (string, map[string]string, error) { +// getStorageClass retrives storage class object by name. +func (ctrl *ProvisionController) getStorageClass(name string) (*storage.StorageClass, error) { classObj, found, err := ctrl.classes.GetByKey(name) - if err != nil { - return "", nil, err - } - if !found { - return "", nil, fmt.Errorf("storageClass %q not found", name) - // 3. It tries to find a StorageClass instance referenced by annotation - // `claim.Annotations["volume.beta.kubernetes.io/storage-class"]`. If not - // found, it SHOULD report an error (by sending an event to the claim) and it - // SHOULD retry periodically with step i. - } - switch class := classObj.(type) { - case *storage.StorageClass: - return class.Provisioner, class.Parameters, nil - case *storagebeta.StorageClass: - return class.Provisioner, class.Parameters, nil - } - return "", nil, fmt.Errorf("cannot convert object to StorageClass: %+v", classObj) -} - -func claimToClaimKey(claim *v1.PersistentVolumeClaim) string { - return fmt.Sprintf("%s/%s", claim.Namespace, claim.Name) -} - -func (ctrl *ProvisionController) fetchReclaimPolicy(storageClassName string) (v1.PersistentVolumeReclaimPolicy, error) { - classObj, found, err := ctrl.classes.GetByKey(storageClassName) - if err != nil { - return "", err - } - if !found { - return "", fmt.Errorf("storageClass %q not found", storageClassName) - } - - switch class := classObj.(type) { - case *storage.StorageClass: - return *class.ReclaimPolicy, nil - case *storagebeta.StorageClass: - return *class.ReclaimPolicy, nil - } - - return v1.PersistentVolumeReclaimDelete, fmt.Errorf("cannot convert object to StorageClass: %+v", classObj) -} - -func (ctrl *ProvisionController) fetchMountOptions(storageClassName string) ([]string, error) { - classObj, found, err := ctrl.classes.GetByKey(storageClassName) if err != nil { return nil, err } if !found { - return nil, fmt.Errorf("storageClass %q not found", storageClassName) + return nil, fmt.Errorf("storageClass %q not found", name) } - switch class := classObj.(type) { case *storage.StorageClass: - return class.MountOptions, nil + return class, nil case *storagebeta.StorageClass: - return class.MountOptions, nil + // convert storagebeta.StorageClass to storage.StorageClass + return &storage.StorageClass{ + ObjectMeta: class.ObjectMeta, + Provisioner: class.Provisioner, + Parameters: class.Parameters, + ReclaimPolicy: class.ReclaimPolicy, + MountOptions: class.MountOptions, + AllowVolumeExpansion: class.AllowVolumeExpansion, + VolumeBindingMode: (*storage.VolumeBindingMode)(class.VolumeBindingMode), + AllowedTopologies: class.AllowedTopologies, + }, nil } - return nil, fmt.Errorf("cannot convert object to StorageClass: %+v", classObj) } -func (ctrl *ProvisionController) fetchAllowedTopologies(storageClassName string) ([]v1.TopologySelectorTerm, error) { - classObj, found, err := ctrl.classes.GetByKey(storageClassName) - if err != nil { - return nil, err - } - if !found { - return nil, fmt.Errorf("storageClass %q not found", storageClassName) - } - - switch class := classObj.(type) { - case *storage.StorageClass: - return class.AllowedTopologies, nil - case *storagebeta.StorageClass: - return class.AllowedTopologies, nil - } - - return nil, fmt.Errorf("cannot convert object to StorageClass: %+v", classObj) +func claimToClaimKey(claim *v1.PersistentVolumeClaim) string { + return fmt.Sprintf("%s/%s", claim.Namespace, claim.Name) } // supportsBlock returns whether a provisioner supports block volume. @@ -1256,3 +1458,16 @@ func (ctrl *ProvisionController) supportsBlock() bool { } return false } + +func getString(m map[string]string, key string, alts ...string) (string, bool) { + if m == nil { + return "", false + } + keys := append([]string{key}, alts...) + for _, k := range keys { + if v, ok := m[k]; ok { + return v, true + } + } + return "", false +} diff --git a/vendor/sigs.k8s.io/sig-storage-lib-external-provisioner/controller/doc.go b/vendor/sigs.k8s.io/sig-storage-lib-external-provisioner/controller/doc.go new file mode 100644 index 00000000..f1db463b --- /dev/null +++ b/vendor/sigs.k8s.io/sig-storage-lib-external-provisioner/controller/doc.go @@ -0,0 +1,17 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller // import "sigs.k8s.io/sig-storage-lib-external-provisioner/controller" diff --git a/vendor/sigs.k8s.io/sig-storage-lib-external-provisioner/controller/metrics/doc.go b/vendor/sigs.k8s.io/sig-storage-lib-external-provisioner/controller/metrics/doc.go new file mode 100644 index 00000000..eac1f359 --- /dev/null +++ b/vendor/sigs.k8s.io/sig-storage-lib-external-provisioner/controller/metrics/doc.go @@ -0,0 +1,17 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics // import "sigs.k8s.io/sig-storage-lib-external-provisioner/controller/metrics" diff --git a/vendor/github.com/kubernetes-incubator/external-storage/lib/controller/metrics/metrics.go b/vendor/sigs.k8s.io/sig-storage-lib-external-provisioner/controller/metrics/metrics.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/external-storage/lib/controller/metrics/metrics.go rename to vendor/sigs.k8s.io/sig-storage-lib-external-provisioner/controller/metrics/metrics.go diff --git a/vendor/github.com/kubernetes-incubator/external-storage/lib/controller/volume.go b/vendor/sigs.k8s.io/sig-storage-lib-external-provisioner/controller/volume.go similarity index 50% rename from vendor/github.com/kubernetes-incubator/external-storage/lib/controller/volume.go rename to vendor/sigs.k8s.io/sig-storage-lib-external-provisioner/controller/volume.go index 92db4103..ef69550a 100644 --- a/vendor/github.com/kubernetes-incubator/external-storage/lib/controller/volume.go +++ b/vendor/sigs.k8s.io/sig-storage-lib-external-provisioner/controller/volume.go @@ -20,6 +20,7 @@ import ( "fmt" "k8s.io/api/core/v1" + storageapis "k8s.io/api/storage/v1" ) // Provisioner is an interface that creates templates for PersistentVolumes @@ -29,7 +30,7 @@ import ( type Provisioner interface { // Provision creates a volume i.e. the storage asset and returns a PV object // for the volume - Provision(VolumeOptions) (*v1.PersistentVolume, error) + Provision(ProvisionOptions) (*v1.PersistentVolume, error) // Delete removes the storage asset that was created by Provision backing the // given PV. Does not delete the PV object itself. // @@ -47,6 +48,13 @@ type Qualifier interface { ShouldProvision(*v1.PersistentVolumeClaim) bool } +// DeletionGuard is an optional interface implemented by provisioners to determine +// whether a PV should be deleted. +type DeletionGuard interface { + // ShouldDelete returns whether deleting the PV should be attempted. + ShouldDelete(volume *v1.PersistentVolume) bool +} + // BlockProvisioner is an optional interface implemented by provisioners to determine // whether it supports block volume. type BlockProvisioner interface { @@ -55,6 +63,42 @@ type BlockProvisioner interface { SupportsBlock() bool } +// ProvisionerExt is an optional interface implemented by provisioners that +// can return enhanced error code from provisioner. +type ProvisionerExt interface { + // ProvisionExt creates a volume i.e. the storage asset and returns a PV object + // for the volume. The provisioner can return an error (e.g. timeout) and state + // ProvisioningInBackground to tell the controller that provisioning may be in + // progress after ProvisionExt() finishes. The controller will call ProvisionExt() + // again with the same parameters, assuming that the provisioner continues + // provisioning the volume. The provisioner must return either final error (with + // ProvisioningFinished) or success eventually, otherwise the controller will try + // forever (unless FailedProvisionThreshold is set). + ProvisionExt(options ProvisionOptions) (*v1.PersistentVolume, ProvisioningState, error) +} + +// ProvisioningState is state of volume provisioning. It tells the controller if +// provisioning could be in progress in the background after ProvisionExt() call +// returns or the provisioning is 100% finished (either with success or error). +type ProvisioningState string + +const ( + // ProvisioningInBackground tells the controller that provisioning may be in + // progress in background after ProvisionExt call finished. + ProvisioningInBackground ProvisioningState = "Background" + // ProvisioningFinished tells the controller that provisioning for sure does + // not continue in background, error code of ProvisionExt() is final. + ProvisioningFinished ProvisioningState = "Finished" + // ProvisioningNoChange tells the controller that provisioning state is the same as + // before the call - either ProvisioningInBackground or ProvisioningFinished from + // the previous ProvisionExt(). This state is typically returned by a provisioner + // before it could reach storage backend - the provisioner could not check status + // of provisioning and previous state applies. If this state is returned from the + // first ProvisionExt call, ProvisioningFinished is assumed (the provisioning + // could not even start). + ProvisioningNoChange ProvisioningState = "NoChange" +) + // IgnoredError is the value for Delete to return to indicate that the call has // been ignored and no action taken. In case multiple provisioners are serving // the same storage class, provisioners may ignore PVs they are not responsible @@ -68,28 +112,22 @@ func (e *IgnoredError) Error() string { return fmt.Sprintf("ignored because %s", e.Reason) } -// VolumeOptions contains option information about a volume -// https://github.com/kubernetes/kubernetes/blob/release-1.4/pkg/volume/plugins.go -type VolumeOptions struct { - // Reclamation policy for a persistent volume - PersistentVolumeReclaimPolicy v1.PersistentVolumeReclaimPolicy +// ProvisionOptions contains all information required to provision a volume +type ProvisionOptions struct { + // StorageClass is a reference to the storage class that is used for + // provisioning for this volume + StorageClass *storageapis.StorageClass + // PV.Name of the appropriate PersistentVolume. Used to generate cloud // volume name. PVName string - // PV mount options. Not validated - mount of the PVs will simply fail if one is invalid. - MountOptions []string - // PVC is reference to the claim that lead to provisioning of a new PV. // Provisioners *must* create a PV that would be matched by this PVC, // i.e. with required capacity, accessMode, labels matching PVC.Selector and // so on. PVC *v1.PersistentVolumeClaim - // Volume provisioning parameters from StorageClass - Parameters map[string]string // Node selected by the scheduler for the volume. SelectedNode *v1.Node - // Topology constraint parameter from StorageClass - AllowedTopologies []v1.TopologySelectorTerm } diff --git a/vendor/sigs.k8s.io/sig-storage-lib-external-provisioner/controller/volume_store.go b/vendor/sigs.k8s.io/sig-storage-lib-external-provisioner/controller/volume_store.go new file mode 100644 index 00000000..d535f2ac --- /dev/null +++ b/vendor/sigs.k8s.io/sig-storage-lib-external-provisioner/controller/volume_store.go @@ -0,0 +1,269 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "fmt" + "sync" + "time" + + "k8s.io/client-go/tools/record" + + v1 "k8s.io/api/core/v1" + apierrs "k8s.io/apimachinery/pkg/api/errors" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog" +) + +// VolumeStore is an interface that's used to save PersistentVolumes to API server. +// Implementation of the interface add custom error recovery policy. +// A volume is added via StoreVolume(). It's enough to store the volume only once. +// It is not possible to remove a volume, even when corresponding PVC is deleted +// and PV is not necessary any longer. PV will be always created. +// If corresponding PVC is deleted, the PV will be deleted by Kubernetes using +// standard deletion procedure. It saves us some code here. +type VolumeStore interface { + // StoreVolume makes sure a volume is saved to Kubernetes API server. + // If no error is returned, caller can assume that PV was saved or + // is being saved in background. + // In error is returned, no PV was saved and corresponding PVC needs + // to be re-queued (so whole provisioning needs to be done again). + StoreVolume(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) error + + // Runs any background goroutines for implementation of the interface. + Run(ctx context.Context, threadiness int) +} + +// queueStore is implementation of VolumeStore that re-tries saving +// PVs to API server using a workqueue running in its own goroutine(s). +// After failed save, volume is re-qeueued with exponential backoff. +type queueStore struct { + client kubernetes.Interface + queue workqueue.RateLimitingInterface + eventRecorder record.EventRecorder + claimsIndexer cache.Indexer + + volumes sync.Map +} + +var _ VolumeStore = &queueStore{} + +// NewVolumeStoreQueue returns VolumeStore that uses asynchronous workqueue to save PVs. +func NewVolumeStoreQueue( + client kubernetes.Interface, + limiter workqueue.RateLimiter, + claimsIndexer cache.Indexer, + eventRecorder record.EventRecorder, +) VolumeStore { + + return &queueStore{ + client: client, + queue: workqueue.NewNamedRateLimitingQueue(limiter, "unsavedpvs"), + claimsIndexer: claimsIndexer, + eventRecorder: eventRecorder, + } +} + +func (q *queueStore) StoreVolume(_ *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) error { + if err := q.doSaveVolume(volume); err != nil { + q.volumes.Store(volume.Name, volume) + q.queue.Add(volume.Name) + klog.Errorf("Failed to save volume %s: %s", volume.Name, err) + } + // Consume any error, this Store will retry in background. + return nil +} + +func (q *queueStore) Run(ctx context.Context, threadiness int) { + klog.Infof("Starting save volume queue") + defer q.queue.ShutDown() + + for i := 0; i < threadiness; i++ { + go wait.Until(q.saveVolumeWorker, time.Second, ctx.Done()) + } + <-ctx.Done() + klog.Infof("Stopped save volume queue") +} + +func (q *queueStore) saveVolumeWorker() { + for q.processNextWorkItem() { + } +} + +func (q *queueStore) processNextWorkItem() bool { + obj, shutdown := q.queue.Get() + defer q.queue.Done(obj) + + if shutdown { + return false + } + + var volumeName string + var ok bool + if volumeName, ok = obj.(string); !ok { + q.queue.Forget(obj) + utilruntime.HandleError(fmt.Errorf("expected string in save workqueue but got %#v", obj)) + return true + } + + volumeObj, found := q.volumes.Load(volumeName) + if !found { + q.queue.Forget(volumeName) + utilruntime.HandleError(fmt.Errorf("did not find saved volume %s", volumeName)) + return true + } + + volume, ok := volumeObj.(*v1.PersistentVolume) + if !ok { + q.queue.Forget(volumeName) + utilruntime.HandleError(fmt.Errorf("saved object is not volume: %+v", volumeObj)) + return true + } + + if err := q.doSaveVolume(volume); err != nil { + q.queue.AddRateLimited(volumeName) + utilruntime.HandleError(err) + klog.V(5).Infof("Volume %s enqueued", volume.Name) + return true + } + q.volumes.Delete(volumeName) + q.queue.Forget(volumeName) + return true +} + +func (q *queueStore) doSaveVolume(volume *v1.PersistentVolume) error { + klog.V(5).Infof("Saving volume %s", volume.Name) + _, err := q.client.CoreV1().PersistentVolumes().Create(volume) + if err == nil || apierrs.IsAlreadyExists(err) { + klog.V(5).Infof("Volume %s saved", volume.Name) + q.sendSuccessEvent(volume) + return nil + } + return fmt.Errorf("error saving volume %s: %s", volume.Name, err) +} + +func (q *queueStore) sendSuccessEvent(volume *v1.PersistentVolume) { + claimObjs, err := q.claimsIndexer.ByIndex(uidIndex, string(volume.Spec.ClaimRef.UID)) + if err != nil { + klog.V(2).Infof("Error sending event to claim %s: %s", volume.Spec.ClaimRef.UID, err) + return + } + if len(claimObjs) != 1 { + return + } + claim, ok := claimObjs[0].(*v1.PersistentVolumeClaim) + if !ok { + return + } + msg := fmt.Sprintf("Successfully provisioned volume %s", volume.Name) + q.eventRecorder.Event(claim, v1.EventTypeNormal, "ProvisioningSucceeded", msg) +} + +// backoffStore is implementation of VolumeStore that blocks and tries to save +// a volume to API server with configurable backoff. If saving fails, +// StoreVolume() deletes the storage asset in the end and returns appropriate +// error code. +type backoffStore struct { + client kubernetes.Interface + eventRecorder record.EventRecorder + backoff *wait.Backoff + ctrl *ProvisionController +} + +var _ VolumeStore = &backoffStore{} + +// NewBackoffStore returns VolumeStore that uses blocking exponential backoff to save PVs. +func NewBackoffStore(client kubernetes.Interface, + eventRecorder record.EventRecorder, + backoff *wait.Backoff, + ctrl *ProvisionController, +) VolumeStore { + return &backoffStore{ + client: client, + eventRecorder: eventRecorder, + backoff: backoff, + ctrl: ctrl, + } +} + +func (b *backoffStore) StoreVolume(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) error { + // Try to create the PV object several times + var lastSaveError error + err := wait.ExponentialBackoff(*b.backoff, func() (bool, error) { + klog.Infof("Trying to save persistentvolume %q", volume.Name) + var err error + if _, err = b.client.CoreV1().PersistentVolumes().Create(volume); err == nil || apierrs.IsAlreadyExists(err) { + // Save succeeded. + if err != nil { + klog.Infof("persistentvolume %q already exists, reusing", volume.Name) + } else { + klog.Infof("persistentvolume %q saved", volume.Name) + } + return true, nil + } + // Save failed, try again after a while. + klog.Infof("Failed to save persistentvolume %q: %v", volume.Name, err) + lastSaveError = err + return false, nil + }) + + if err == nil { + // Save succeeded + msg := fmt.Sprintf("Successfully provisioned volume %s", volume.Name) + b.eventRecorder.Event(claim, v1.EventTypeNormal, "ProvisioningSucceeded", msg) + return nil + } + + // Save failed. Now we have a storage asset outside of Kubernetes, + // but we don't have appropriate PV object for it. + // Emit some event here and try to delete the storage asset several + // times. + strerr := fmt.Sprintf("Error creating provisioned PV object for claim %s: %v. Deleting the volume.", claimToClaimKey(claim), lastSaveError) + klog.Error(strerr) + b.eventRecorder.Event(claim, v1.EventTypeWarning, "ProvisioningFailed", strerr) + + var lastDeleteError error + err = wait.ExponentialBackoff(*b.backoff, func() (bool, error) { + if err = b.ctrl.provisioner.Delete(volume); err == nil { + // Delete succeeded + klog.Infof("Cleaning volume %q succeeded", volume.Name) + return true, nil + } + // Delete failed, try again after a while. + klog.Infof("Failed to clean volume %q: %v", volume.Name, err) + lastDeleteError = err + return false, nil + }) + if err != nil { + // Delete failed several times. There is an orphaned volume and there + // is nothing we can do about it. + strerr := fmt.Sprintf("Error cleaning provisioned volume for claim %s: %v. Please delete manually.", claimToClaimKey(claim), lastDeleteError) + klog.Error(strerr) + b.eventRecorder.Event(claim, v1.EventTypeWarning, "ProvisioningCleanupFailed", strerr) + } + + return lastSaveError +} + +func (b *backoffStore) Run(ctx context.Context, threadiness int) { + // There is not background processing +} diff --git a/vendor/sigs.k8s.io/sig-storage-lib-external-provisioner/util/doc.go b/vendor/sigs.k8s.io/sig-storage-lib-external-provisioner/util/doc.go new file mode 100644 index 00000000..3697f686 --- /dev/null +++ b/vendor/sigs.k8s.io/sig-storage-lib-external-provisioner/util/doc.go @@ -0,0 +1,17 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util // import "sigs.k8s.io/sig-storage-lib-external-provisioner/util" diff --git a/vendor/sigs.k8s.io/sig-storage-lib-external-provisioner/util/util.go b/vendor/sigs.k8s.io/sig-storage-lib-external-provisioner/util/util.go new file mode 100644 index 00000000..9cf7cbd8 --- /dev/null +++ b/vendor/sigs.k8s.io/sig-storage-lib-external-provisioner/util/util.go @@ -0,0 +1,159 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "github.com/miekg/dns" + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + glog "k8s.io/klog" + "net" +) + +// Common allocation units +const ( + KiB int64 = 1024 + MiB int64 = 1024 * KiB + GiB int64 = 1024 * MiB + TiB int64 = 1024 * GiB +) + +// RoundUpSize calculates how many allocation units are needed to accommodate +// a volume of given size. E.g. when user wants 1500MiB volume, while AWS EBS +// allocates volumes in gibibyte-sized chunks, +// RoundUpSize(1500 * 1024*1024, 1024*1024*1024) returns '2' +// (2 GiB is the smallest allocatable volume that can hold 1500MiB) +func RoundUpSize(volumeSizeBytes int64, allocationUnitBytes int64) int64 { + return (volumeSizeBytes + allocationUnitBytes - 1) / allocationUnitBytes +} + +// RoundUpToGiB rounds up given quantity upto chunks of GiB +func RoundUpToGiB(sizeBytes int64) int64 { + return RoundUpSize(sizeBytes, GiB) +} + +// AccessModesContains returns whether the requested mode is contained by modes +func AccessModesContains(modes []v1.PersistentVolumeAccessMode, mode v1.PersistentVolumeAccessMode) bool { + for _, m := range modes { + if m == mode { + return true + } + } + return false +} + +// AccessModesContainedInAll returns whether all of the requested modes are contained by modes +func AccessModesContainedInAll(indexedModes []v1.PersistentVolumeAccessMode, requestedModes []v1.PersistentVolumeAccessMode) bool { + for _, mode := range requestedModes { + if !AccessModesContains(indexedModes, mode) { + return false + } + } + return true +} + +// GetPersistentVolumeClass returns StorageClassName. +func GetPersistentVolumeClass(volume *v1.PersistentVolume) string { + // Use beta annotation first + if class, found := volume.Annotations[v1.BetaStorageClassAnnotation]; found { + return class + } + + return volume.Spec.StorageClassName +} + +// GetPersistentVolumeClaimClass returns StorageClassName. If no storage class was +// requested, it returns "". +func GetPersistentVolumeClaimClass(claim *v1.PersistentVolumeClaim) string { + // Use beta annotation first + if class, found := claim.Annotations[v1.BetaStorageClassAnnotation]; found { + return class + } + + if claim.Spec.StorageClassName != nil { + return *claim.Spec.StorageClassName + } + + return "" +} + +// CheckPersistentVolumeClaimModeBlock checks VolumeMode. +// If the mode is Block, return true otherwise return false. +func CheckPersistentVolumeClaimModeBlock(pvc *v1.PersistentVolumeClaim) bool { + return pvc.Spec.VolumeMode != nil && *pvc.Spec.VolumeMode == v1.PersistentVolumeBlock +} + +// FindDNSIP looks up the cluster DNS service by label "coredns", falling back to "kube-dns" if not found +func FindDNSIP(client kubernetes.Interface) (dnsip string) { + // find DNS server address through client API + // cache result in rbdProvisioner + var dnssvc *v1.Service + coredns, err := client.CoreV1().Services(metav1.NamespaceSystem).Get("coredns", metav1.GetOptions{}) + if err != nil { + glog.Warningf("error getting coredns service: %v. Falling back to kube-dns\n", err) + kubedns, err := client.CoreV1().Services(metav1.NamespaceSystem).Get("kube-dns", metav1.GetOptions{}) + if err != nil { + glog.Errorf("error getting kube-dns service: %v\n", err) + return "" + } + dnssvc = kubedns + } else { + dnssvc = coredns + } + if len(dnssvc.Spec.ClusterIP) == 0 { + glog.Errorf("DNS service ClusterIP bad\n") + return "" + } + return dnssvc.Spec.ClusterIP +} + +// LookupHost looks up IP addresses of hostname on specified DNS server +func LookupHost(hostname string, serverip string) (iplist []string, err error) { + glog.V(4).Infof("lookuphost %q on %q\n", hostname, serverip) + m := new(dns.Msg) + m.SetQuestion(dns.Fqdn(hostname), dns.TypeA) + in, err := dns.Exchange(m, JoinHostPort(serverip, "53")) + if err != nil { + glog.Errorf("dns lookup of %q failed: err %v", hostname, err) + return nil, err + } + for _, a := range in.Answer { + glog.V(4).Infof("lookuphost answer: %v\n", a) + if t, ok := a.(*dns.A); ok { + iplist = append(iplist, t.A.String()) + } + } + return iplist, nil +} + +// SplitHostPort split a string into host and port (port is optional) +func SplitHostPort(hostport string) (host, port string) { + host, port, err := net.SplitHostPort(hostport) + if err != nil { + host, port = hostport, "" + } + return host, port +} + +// JoinHostPort joins a hostname and an optional port +func JoinHostPort(host, port string) (hostport string) { + if port != "" { + return net.JoinHostPort(host, port) + } + return host +} diff --git a/vendor/github.com/ghodss/yaml/.gitignore b/vendor/sigs.k8s.io/yaml/.gitignore similarity index 100% rename from vendor/github.com/ghodss/yaml/.gitignore rename to vendor/sigs.k8s.io/yaml/.gitignore diff --git a/vendor/sigs.k8s.io/yaml/.travis.yml b/vendor/sigs.k8s.io/yaml/.travis.yml new file mode 100644 index 00000000..03ddc731 --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/.travis.yml @@ -0,0 +1,14 @@ +language: go +dist: xenial +go: + - 1.9.x + - 1.10.x + - 1.11.x +script: + - go get -t -v ./... + - diff -u <(echo -n) <(gofmt -d .) + - diff -u <(echo -n) <(golint $(go list -e ./...) | grep -v YAMLToJSON) + - go tool vet . + - go test -v -race ./... +install: + - go get golang.org/x/lint/golint diff --git a/vendor/sigs.k8s.io/yaml/CONTRIBUTING.md b/vendor/sigs.k8s.io/yaml/CONTRIBUTING.md new file mode 100644 index 00000000..de471151 --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/CONTRIBUTING.md @@ -0,0 +1,31 @@ +# Contributing Guidelines + +Welcome to Kubernetes. We are excited about the prospect of you joining our [community](https://github.com/kubernetes/community)! The Kubernetes community abides by the CNCF [code of conduct](code-of-conduct.md). Here is an excerpt: + +_As contributors and maintainers of this project, and in the interest of fostering an open and welcoming community, we pledge to respect all people who contribute through reporting issues, posting feature requests, updating documentation, submitting pull requests or patches, and other activities._ + +## Getting Started + +We have full documentation on how to get started contributing here: + + + +- [Contributor License Agreement](https://git.k8s.io/community/CLA.md) Kubernetes projects require that you sign a Contributor License Agreement (CLA) before we can accept your pull requests +- [Kubernetes Contributor Guide](http://git.k8s.io/community/contributors/guide) - Main contributor documentation, or you can just jump directly to the [contributing section](http://git.k8s.io/community/contributors/guide#contributing) +- [Contributor Cheat Sheet](https://git.k8s.io/community/contributors/guide/contributor-cheatsheet.md) - Common resources for existing developers + +## Mentorship + +- [Mentoring Initiatives](https://git.k8s.io/community/mentoring) - We have a diverse set of mentorship programs available that are always looking for volunteers! + + diff --git a/vendor/github.com/ghodss/yaml/LICENSE b/vendor/sigs.k8s.io/yaml/LICENSE similarity index 100% rename from vendor/github.com/ghodss/yaml/LICENSE rename to vendor/sigs.k8s.io/yaml/LICENSE diff --git a/vendor/sigs.k8s.io/yaml/OWNERS b/vendor/sigs.k8s.io/yaml/OWNERS new file mode 100644 index 00000000..11ad7ce1 --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/OWNERS @@ -0,0 +1,25 @@ +approvers: +- dims +- lavalamp +- smarterclayton +- deads2k +- sttts +- liggitt +- caesarxuchao +reviewers: +- dims +- thockin +- lavalamp +- smarterclayton +- wojtek-t +- deads2k +- derekwaynecarr +- caesarxuchao +- mikedanese +- liggitt +- gmarek +- sttts +- ncdc +- tallclair +labels: +- sig/api-machinery diff --git a/vendor/github.com/ghodss/yaml/README.md b/vendor/sigs.k8s.io/yaml/README.md similarity index 100% rename from vendor/github.com/ghodss/yaml/README.md rename to vendor/sigs.k8s.io/yaml/README.md diff --git a/vendor/sigs.k8s.io/yaml/RELEASE.md b/vendor/sigs.k8s.io/yaml/RELEASE.md new file mode 100644 index 00000000..6b642464 --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/RELEASE.md @@ -0,0 +1,9 @@ +# Release Process + +The `yaml` Project is released on an as-needed basis. The process is as follows: + +1. An issue is proposing a new release with a changelog since the last release +1. All [OWNERS](OWNERS) must LGTM this release +1. An OWNER runs `git tag -s $VERSION` and inserts the changelog and pushes the tag with `git push $VERSION` +1. The release issue is closed +1. An announcement email is sent to `kubernetes-dev@googlegroups.com` with the subject `[ANNOUNCE] kubernetes-template-project $VERSION is released` diff --git a/vendor/sigs.k8s.io/yaml/SECURITY_CONTACTS b/vendor/sigs.k8s.io/yaml/SECURITY_CONTACTS new file mode 100644 index 00000000..0648a8eb --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/SECURITY_CONTACTS @@ -0,0 +1,17 @@ +# Defined below are the security contacts for this repo. +# +# They are the contact point for the Product Security Team to reach out +# to for triaging and handling of incoming issues. +# +# The below names agree to abide by the +# [Embargo Policy](https://github.com/kubernetes/sig-release/blob/master/security-release-process-documentation/security-release-process.md#embargo-policy) +# and will be removed and replaced if they violate that agreement. +# +# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE +# INSTRUCTIONS AT https://kubernetes.io/security/ + +cjcullen +jessfraz +liggitt +philips +tallclair diff --git a/vendor/sigs.k8s.io/yaml/code-of-conduct.md b/vendor/sigs.k8s.io/yaml/code-of-conduct.md new file mode 100644 index 00000000..0d15c00c --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/code-of-conduct.md @@ -0,0 +1,3 @@ +# Kubernetes Community Code of Conduct + +Please refer to our [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md) diff --git a/vendor/github.com/ghodss/yaml/fields.go b/vendor/sigs.k8s.io/yaml/fields.go similarity index 99% rename from vendor/github.com/ghodss/yaml/fields.go rename to vendor/sigs.k8s.io/yaml/fields.go index 58600740..235b7f2c 100644 --- a/vendor/github.com/ghodss/yaml/fields.go +++ b/vendor/sigs.k8s.io/yaml/fields.go @@ -1,6 +1,7 @@ // Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. + package yaml import ( diff --git a/vendor/github.com/ghodss/yaml/yaml.go b/vendor/sigs.k8s.io/yaml/yaml.go similarity index 77% rename from vendor/github.com/ghodss/yaml/yaml.go rename to vendor/sigs.k8s.io/yaml/yaml.go index 4fb4054a..02459611 100644 --- a/vendor/github.com/ghodss/yaml/yaml.go +++ b/vendor/sigs.k8s.io/yaml/yaml.go @@ -4,13 +4,14 @@ import ( "bytes" "encoding/json" "fmt" + "io" "reflect" "strconv" "gopkg.in/yaml.v2" ) -// Marshals the object into JSON then converts JSON to YAML and returns the +// Marshal marshals the object into JSON then converts JSON to YAML and returns the // YAML. func Marshal(o interface{}) ([]byte, error) { j, err := json.Marshal(o) @@ -26,15 +27,35 @@ func Marshal(o interface{}) ([]byte, error) { return y, nil } -// Converts YAML to JSON then uses JSON to unmarshal into an object. -func Unmarshal(y []byte, o interface{}) error { +// JSONOpt is a decoding option for decoding from JSON format. +type JSONOpt func(*json.Decoder) *json.Decoder + +// Unmarshal converts YAML to JSON then uses JSON to unmarshal into an object, +// optionally configuring the behavior of the JSON unmarshal. +func Unmarshal(y []byte, o interface{}, opts ...JSONOpt) error { + return yamlUnmarshal(y, o, false, opts...) +} + +// UnmarshalStrict strictly converts YAML to JSON then uses JSON to unmarshal +// into an object, optionally configuring the behavior of the JSON unmarshal. +func UnmarshalStrict(y []byte, o interface{}, opts ...JSONOpt) error { + return yamlUnmarshal(y, o, true, append(opts, DisallowUnknownFields)...) +} + +// yamlUnmarshal unmarshals the given YAML byte stream into the given interface, +// optionally performing the unmarshalling strictly +func yamlUnmarshal(y []byte, o interface{}, strict bool, opts ...JSONOpt) error { vo := reflect.ValueOf(o) - j, err := yamlToJSON(y, &vo) + unmarshalFn := yaml.Unmarshal + if strict { + unmarshalFn = yaml.UnmarshalStrict + } + j, err := yamlToJSON(y, &vo, unmarshalFn) if err != nil { return fmt.Errorf("error converting YAML to JSON: %v", err) } - err = json.Unmarshal(j, o) + err = jsonUnmarshal(bytes.NewReader(j), o, opts...) if err != nil { return fmt.Errorf("error unmarshaling JSON: %v", err) } @@ -42,7 +63,22 @@ func Unmarshal(y []byte, o interface{}) error { return nil } -// Convert JSON to YAML. +// jsonUnmarshal unmarshals the JSON byte stream from the given reader into the +// object, optionally applying decoder options prior to decoding. We are not +// using json.Unmarshal directly as we want the chance to pass in non-default +// options. +func jsonUnmarshal(r io.Reader, o interface{}, opts ...JSONOpt) error { + d := json.NewDecoder(r) + for _, opt := range opts { + d = opt(d) + } + if err := d.Decode(&o); err != nil { + return fmt.Errorf("while decoding JSON: %v", err) + } + return nil +} + +// JSONToYAML Converts JSON to YAML. func JSONToYAML(j []byte) ([]byte, error) { // Convert the JSON to an object. var jsonObj interface{} @@ -60,8 +96,8 @@ func JSONToYAML(j []byte) ([]byte, error) { return yaml.Marshal(jsonObj) } -// Convert YAML to JSON. Since JSON is a subset of YAML, passing JSON through -// this method should be a no-op. +// YAMLToJSON converts YAML to JSON. Since JSON is a subset of YAML, +// passing JSON through this method should be a no-op. // // Things YAML can do that are not supported by JSON: // * In YAML you can have binary and null keys in your maps. These are invalid @@ -70,14 +106,22 @@ func JSONToYAML(j []byte) ([]byte, error) { // use binary data with this library, encode the data as base64 as usual but do // not use the !!binary tag in your YAML. This will ensure the original base64 // encoded data makes it all the way through to the JSON. +// +// For strict decoding of YAML, use YAMLToJSONStrict. func YAMLToJSON(y []byte) ([]byte, error) { - return yamlToJSON(y, nil) + return yamlToJSON(y, nil, yaml.Unmarshal) } -func yamlToJSON(y []byte, jsonTarget *reflect.Value) ([]byte, error) { +// YAMLToJSONStrict is like YAMLToJSON but enables strict YAML decoding, +// returning an error on any duplicate field names. +func YAMLToJSONStrict(y []byte) ([]byte, error) { + return yamlToJSON(y, nil, yaml.UnmarshalStrict) +} + +func yamlToJSON(y []byte, jsonTarget *reflect.Value, yamlUnmarshal func([]byte, interface{}) error) ([]byte, error) { // Convert the YAML to an object. var yamlObj interface{} - err := yaml.Unmarshal(y, &yamlObj) + err := yamlUnmarshal(y, &yamlObj) if err != nil { return nil, err } @@ -272,6 +316,4 @@ func convertToJSONableObject(yamlObj interface{}, jsonTarget *reflect.Value) (in } return yamlObj, nil } - - return nil, nil } diff --git a/vendor/sigs.k8s.io/yaml/yaml_go110.go b/vendor/sigs.k8s.io/yaml/yaml_go110.go new file mode 100644 index 00000000..ab3e06a2 --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/yaml_go110.go @@ -0,0 +1,14 @@ +// This file contains changes that are only compatible with go 1.10 and onwards. + +// +build go1.10 + +package yaml + +import "encoding/json" + +// DisallowUnknownFields configures the JSON decoder to error out if unknown +// fields come along, instead of dropping them by default. +func DisallowUnknownFields(d *json.Decoder) *json.Decoder { + d.DisallowUnknownFields() + return d +} -- GitLab